1207673Sjoel/*-
2181643Skmacy * Copyright (c) 2004-2006 Kip Macy
3181643Skmacy * All rights reserved.
4181643Skmacy *
5207673Sjoel * Redistribution and use in source and binary forms, with or without
6207673Sjoel * modification, are permitted provided that the following conditions
7207673Sjoel * are met:
8207673Sjoel * 1. Redistributions of source code must retain the above copyright
9207673Sjoel *    notice, this list of conditions and the following disclaimer.
10207673Sjoel * 2. Redistributions in binary form must reproduce the above copyright
11207673Sjoel *    notice, this list of conditions and the following disclaimer in the
12207673Sjoel *    documentation and/or other materials provided with the distribution.
13181643Skmacy *
14207673Sjoel * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15207673Sjoel * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16207673Sjoel * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17207673Sjoel * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18207673Sjoel * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19207673Sjoel * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20207673Sjoel * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21207673Sjoel * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22207673Sjoel * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23207673Sjoel * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24207673Sjoel * SUCH DAMAGE.
25181643Skmacy */
26181643Skmacy
27181643Skmacy#include <sys/cdefs.h>
28181643Skmacy__FBSDID("$FreeBSD: releng/10.2/sys/dev/xen/netfront/netfront.c 285737 2015-07-21 07:20:02Z royger $");
29181643Skmacy
30221130Sbz#include "opt_inet.h"
31259541Sglebius#include "opt_inet6.h"
32221130Sbz
33181643Skmacy#include <sys/param.h>
34181643Skmacy#include <sys/systm.h>
35181643Skmacy#include <sys/sockio.h>
36181643Skmacy#include <sys/mbuf.h>
37181643Skmacy#include <sys/malloc.h>
38185605Skmacy#include <sys/module.h>
39181643Skmacy#include <sys/kernel.h>
40181643Skmacy#include <sys/socket.h>
41189699Sdfr#include <sys/sysctl.h>
42181643Skmacy#include <sys/queue.h>
43193618Sadrian#include <sys/lock.h>
44181643Skmacy#include <sys/sx.h>
45285737Sroyger#include <sys/limits.h>
46181643Skmacy
47181643Skmacy#include <net/if.h>
48181643Skmacy#include <net/if_arp.h>
49181643Skmacy#include <net/ethernet.h>
50181643Skmacy#include <net/if_dl.h>
51181643Skmacy#include <net/if_media.h>
52181643Skmacy
53181643Skmacy#include <net/bpf.h>
54181643Skmacy
55181643Skmacy#include <net/if_types.h>
56181643Skmacy#include <net/if.h>
57181643Skmacy
58181643Skmacy#include <netinet/in_systm.h>
59181643Skmacy#include <netinet/in.h>
60181643Skmacy#include <netinet/ip.h>
61181643Skmacy#include <netinet/if_ether.h>
62189699Sdfr#if __FreeBSD_version >= 700000
63189699Sdfr#include <netinet/tcp.h>
64189699Sdfr#include <netinet/tcp_lro.h>
65189699Sdfr#endif
66181643Skmacy
67181643Skmacy#include <vm/vm.h>
68181643Skmacy#include <vm/pmap.h>
69181643Skmacy
70181643Skmacy#include <machine/clock.h>      /* for DELAY */
71181643Skmacy#include <machine/bus.h>
72181643Skmacy#include <machine/resource.h>
73181643Skmacy#include <machine/frame.h>
74181910Skmacy#include <machine/vmparam.h>
75181643Skmacy
76181643Skmacy#include <sys/bus.h>
77181643Skmacy#include <sys/rman.h>
78181643Skmacy
79181643Skmacy#include <machine/intr_machdep.h>
80181643Skmacy
81255040Sgibbs#include <xen/xen-os.h>
82186557Skmacy#include <xen/hypervisor.h>
83186557Skmacy#include <xen/xen_intr.h>
84181643Skmacy#include <xen/gnttab.h>
85181643Skmacy#include <xen/interface/memory.h>
86181643Skmacy#include <xen/interface/io/netif.h>
87185605Skmacy#include <xen/xenbus/xenbusvar.h>
88181643Skmacy
89255040Sgibbs#include <machine/xen/xenvar.h>
90255040Sgibbs
91189699Sdfr#include <dev/xen/netfront/mbufq.h>
92189699Sdfr
93185605Skmacy#include "xenbus_if.h"
94181643Skmacy
95225709Sgibbs/* Features supported by all backends.  TSO and LRO can be negotiated */
96225709Sgibbs#define XN_CSUM_FEATURES	(CSUM_TCP | CSUM_UDP)
97189699Sdfr
98181643Skmacy#define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE)
99181643Skmacy#define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE)
100181643Skmacy
101189699Sdfr#if __FreeBSD_version >= 700000
102189699Sdfr/*
103189699Sdfr * Should the driver do LRO on the RX end
104189699Sdfr *  this can be toggled on the fly, but the
105189699Sdfr *  interface must be reset (down/up) for it
106189699Sdfr *  to take effect.
107189699Sdfr */
108189699Sdfrstatic int xn_enable_lro = 1;
109189699SdfrTUNABLE_INT("hw.xn.enable_lro", &xn_enable_lro);
110189699Sdfr#else
111189699Sdfr
112189699Sdfr#define IFCAP_TSO4	0
113189699Sdfr#define CSUM_TSO	0
114189699Sdfr
115189699Sdfr#endif
116189699Sdfr
117181643Skmacy#ifdef CONFIG_XEN
118181643Skmacystatic int MODPARM_rx_copy = 0;
119181643Skmacymodule_param_named(rx_copy, MODPARM_rx_copy, bool, 0);
120181643SkmacyMODULE_PARM_DESC(rx_copy, "Copy packets from network card (rather than flip)");
121181643Skmacystatic int MODPARM_rx_flip = 0;
122181643Skmacymodule_param_named(rx_flip, MODPARM_rx_flip, bool, 0);
123181643SkmacyMODULE_PARM_DESC(rx_flip, "Flip packets from network card (rather than copy)");
124181643Skmacy#else
125181643Skmacystatic const int MODPARM_rx_copy = 1;
126181643Skmacystatic const int MODPARM_rx_flip = 0;
127181643Skmacy#endif
128181643Skmacy
129208901Sken/**
130208901Sken * \brief The maximum allowed data fragments in a single transmit
131208901Sken *        request.
132208901Sken *
133208901Sken * This limit is imposed by the backend driver.  We assume here that
134208901Sken * we are dealing with a Linux driver domain and have set our limit
135208901Sken * to mirror the Linux MAX_SKB_FRAGS constant.
136208901Sken */
137208901Sken#define	MAX_TX_REQ_FRAGS (65536 / PAGE_SIZE + 2)
138208901Sken
139181643Skmacy#define RX_COPY_THRESHOLD 256
140181643Skmacy
141181643Skmacy#define net_ratelimit() 0
142181643Skmacy
143181643Skmacystruct netfront_info;
144181643Skmacystruct netfront_rx_info;
145181643Skmacy
146181643Skmacystatic void xn_txeof(struct netfront_info *);
147181643Skmacystatic void xn_rxeof(struct netfront_info *);
148181643Skmacystatic void network_alloc_rx_buffers(struct netfront_info *);
149181643Skmacy
150181643Skmacystatic void xn_tick_locked(struct netfront_info *);
151181643Skmacystatic void xn_tick(void *);
152181643Skmacy
153181643Skmacystatic void xn_intr(void *);
154208901Skenstatic inline int xn_count_frags(struct mbuf *m);
155208901Skenstatic int  xn_assemble_tx_request(struct netfront_info *sc,
156208901Sken				   struct mbuf *m_head);
157181643Skmacystatic void xn_start_locked(struct ifnet *);
158181643Skmacystatic void xn_start(struct ifnet *);
159181643Skmacystatic int  xn_ioctl(struct ifnet *, u_long, caddr_t);
160181643Skmacystatic void xn_ifinit_locked(struct netfront_info *);
161181643Skmacystatic void xn_ifinit(void *);
162181643Skmacystatic void xn_stop(struct netfront_info *);
163225709Sgibbsstatic void xn_query_features(struct netfront_info *np);
164225709Sgibbsstatic int  xn_configure_features(struct netfront_info *np);
165181643Skmacy#ifdef notyet
166181643Skmacystatic void xn_watchdog(struct ifnet *);
167181643Skmacy#endif
168181643Skmacy
169181643Skmacy#ifdef notyet
170185605Skmacystatic void netfront_closing(device_t dev);
171181643Skmacy#endif
172181643Skmacystatic void netif_free(struct netfront_info *info);
173185605Skmacystatic int netfront_detach(device_t dev);
174181643Skmacy
175185605Skmacystatic int talk_to_backend(device_t dev, struct netfront_info *info);
176185605Skmacystatic int create_netdev(device_t dev);
177181643Skmacystatic void netif_disconnect_backend(struct netfront_info *info);
178185605Skmacystatic int setup_device(device_t dev, struct netfront_info *info);
179225707Sgibbsstatic void free_ring(int *ref, void *ring_ptr_ref);
180181643Skmacy
181199997Sgibbsstatic int  xn_ifmedia_upd(struct ifnet *ifp);
182199997Sgibbsstatic void xn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
183199997Sgibbs
184181643Skmacy/* Xenolinux helper functions */
185185605Skmacyint network_connect(struct netfront_info *);
186181643Skmacy
187181643Skmacystatic void xn_free_rx_ring(struct netfront_info *);
188181643Skmacy
189181643Skmacystatic void xn_free_tx_ring(struct netfront_info *);
190181643Skmacy
191181643Skmacystatic int xennet_get_responses(struct netfront_info *np,
192208901Sken	struct netfront_rx_info *rinfo, RING_IDX rp, RING_IDX *cons,
193208901Sken	struct mbuf **list, int *pages_flipped_p);
194181643Skmacy
195181643Skmacy#define virt_to_mfn(x) (vtomach(x) >> PAGE_SHIFT)
196181643Skmacy
197181643Skmacy#define INVALID_P2M_ENTRY (~0UL)
198181643Skmacy
199181643Skmacy/*
200181643Skmacy * Mbuf pointers. We need these to keep track of the virtual addresses
201181643Skmacy * of our mbuf chains since we can only convert from virtual to physical,
202181643Skmacy * not the other way around.  The size must track the free index arrays.
203181643Skmacy */
204181643Skmacystruct xn_chain_data {
205208901Sken	struct mbuf    *xn_tx_chain[NET_TX_RING_SIZE+1];
206208901Sken	int		xn_tx_chain_cnt;
207208901Sken	struct mbuf    *xn_rx_chain[NET_RX_RING_SIZE+1];
208181643Skmacy};
209181643Skmacy
210181643Skmacystruct net_device_stats
211181643Skmacy{
212181643Skmacy	u_long	rx_packets;		/* total packets received	*/
213181643Skmacy	u_long	tx_packets;		/* total packets transmitted	*/
214181643Skmacy	u_long	rx_bytes;		/* total bytes received 	*/
215181643Skmacy	u_long	tx_bytes;		/* total bytes transmitted	*/
216181643Skmacy	u_long	rx_errors;		/* bad packets received		*/
217181643Skmacy	u_long	tx_errors;		/* packet transmit problems	*/
218181643Skmacy	u_long	rx_dropped;		/* no space in linux buffers	*/
219181643Skmacy	u_long	tx_dropped;		/* no space available in linux	*/
220181643Skmacy	u_long	multicast;		/* multicast packets received	*/
221181643Skmacy	u_long	collisions;
222181643Skmacy
223181643Skmacy	/* detailed rx_errors: */
224181643Skmacy	u_long	rx_length_errors;
225181643Skmacy	u_long	rx_over_errors;		/* receiver ring buff overflow	*/
226181643Skmacy	u_long	rx_crc_errors;		/* recved pkt with crc error	*/
227181643Skmacy	u_long	rx_frame_errors;	/* recv'd frame alignment error */
228181643Skmacy	u_long	rx_fifo_errors;		/* recv'r fifo overrun		*/
229181643Skmacy	u_long	rx_missed_errors;	/* receiver missed packet	*/
230181643Skmacy
231181643Skmacy	/* detailed tx_errors */
232181643Skmacy	u_long	tx_aborted_errors;
233181643Skmacy	u_long	tx_carrier_errors;
234181643Skmacy	u_long	tx_fifo_errors;
235181643Skmacy	u_long	tx_heartbeat_errors;
236181643Skmacy	u_long	tx_window_errors;
237181643Skmacy
238181643Skmacy	/* for cslip etc */
239181643Skmacy	u_long	rx_compressed;
240181643Skmacy	u_long	tx_compressed;
241181643Skmacy};
242181643Skmacy
243181643Skmacystruct netfront_info {
244181643Skmacy	struct ifnet *xn_ifp;
245189699Sdfr#if __FreeBSD_version >= 700000
246189699Sdfr	struct lro_ctrl xn_lro;
247189699Sdfr#endif
248181643Skmacy
249181643Skmacy	struct net_device_stats stats;
250181643Skmacy	u_int tx_full;
251181643Skmacy
252181643Skmacy	netif_tx_front_ring_t tx;
253181643Skmacy	netif_rx_front_ring_t rx;
254181643Skmacy
255181643Skmacy	struct mtx   tx_lock;
256181643Skmacy	struct mtx   rx_lock;
257204158Skmacy	struct mtx   sc_lock;
258181643Skmacy
259255040Sgibbs	xen_intr_handle_t xen_intr_handle;
260181643Skmacy	u_int copying_receiver;
261181643Skmacy	u_int carrier;
262225709Sgibbs	u_int maxfrags;
263181643Skmacy
264181643Skmacy	/* Receive-ring batched refills. */
265181643Skmacy#define RX_MIN_TARGET 32
266181643Skmacy#define RX_MAX_TARGET NET_RX_RING_SIZE
267199997Sgibbs	int rx_min_target;
268199997Sgibbs	int rx_max_target;
269199997Sgibbs	int rx_target;
270181643Skmacy
271181643Skmacy	grant_ref_t gref_tx_head;
272181643Skmacy	grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1];
273181643Skmacy	grant_ref_t gref_rx_head;
274181643Skmacy	grant_ref_t grant_rx_ref[NET_TX_RING_SIZE + 1];
275181643Skmacy
276199997Sgibbs	device_t		xbdev;
277199997Sgibbs	int			tx_ring_ref;
278199997Sgibbs	int			rx_ring_ref;
279199997Sgibbs	uint8_t			mac[ETHER_ADDR_LEN];
280181643Skmacy	struct xn_chain_data	xn_cdata;	/* mbufs */
281199997Sgibbs	struct mbuf_head	xn_rx_batch;	/* head of the batch queue */
282181643Skmacy
283181643Skmacy	int			xn_if_flags;
284181643Skmacy	struct callout	        xn_stat_ch;
285181643Skmacy
286199997Sgibbs	u_long			rx_pfn_array[NET_RX_RING_SIZE];
287199997Sgibbs	multicall_entry_t	rx_mcl[NET_RX_RING_SIZE+1];
288199997Sgibbs	mmu_update_t		rx_mmu[NET_RX_RING_SIZE];
289199997Sgibbs	struct ifmedia		sc_media;
290285737Sroyger
291285737Sroyger	bool			xn_resume;
292181643Skmacy};
293181643Skmacy
294181643Skmacy#define rx_mbufs xn_cdata.xn_rx_chain
295181643Skmacy#define tx_mbufs xn_cdata.xn_tx_chain
296181643Skmacy
297181643Skmacy#define XN_LOCK_INIT(_sc, _name) \
298181643Skmacy        mtx_init(&(_sc)->tx_lock, #_name"_tx", "network transmit lock", MTX_DEF); \
299181643Skmacy        mtx_init(&(_sc)->rx_lock, #_name"_rx", "network receive lock", MTX_DEF);  \
300208901Sken        mtx_init(&(_sc)->sc_lock, #_name"_sc", "netfront softc lock", MTX_DEF)
301181643Skmacy
302181643Skmacy#define XN_RX_LOCK(_sc)           mtx_lock(&(_sc)->rx_lock)
303181643Skmacy#define XN_RX_UNLOCK(_sc)         mtx_unlock(&(_sc)->rx_lock)
304181643Skmacy
305181643Skmacy#define XN_TX_LOCK(_sc)           mtx_lock(&(_sc)->tx_lock)
306181643Skmacy#define XN_TX_UNLOCK(_sc)         mtx_unlock(&(_sc)->tx_lock)
307181643Skmacy
308204158Skmacy#define XN_LOCK(_sc)           mtx_lock(&(_sc)->sc_lock);
309204158Skmacy#define XN_UNLOCK(_sc)         mtx_unlock(&(_sc)->sc_lock);
310181643Skmacy
311204158Skmacy#define XN_LOCK_ASSERT(_sc)    mtx_assert(&(_sc)->sc_lock, MA_OWNED);
312181643Skmacy#define XN_RX_LOCK_ASSERT(_sc)    mtx_assert(&(_sc)->rx_lock, MA_OWNED);
313181643Skmacy#define XN_TX_LOCK_ASSERT(_sc)    mtx_assert(&(_sc)->tx_lock, MA_OWNED);
314181643Skmacy#define XN_LOCK_DESTROY(_sc)   mtx_destroy(&(_sc)->rx_lock); \
315181643Skmacy                               mtx_destroy(&(_sc)->tx_lock); \
316204158Skmacy                               mtx_destroy(&(_sc)->sc_lock);
317181643Skmacy
318181643Skmacystruct netfront_rx_info {
319181643Skmacy	struct netif_rx_response rx;
320181643Skmacy	struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
321181643Skmacy};
322181643Skmacy
323181643Skmacy#define netfront_carrier_on(netif)	((netif)->carrier = 1)
324181643Skmacy#define netfront_carrier_off(netif)	((netif)->carrier = 0)
325181643Skmacy#define netfront_carrier_ok(netif)	((netif)->carrier)
326181643Skmacy
327181643Skmacy/* Access macros for acquiring freeing slots in xn_free_{tx,rx}_idxs[]. */
328181643Skmacy
329181643Skmacystatic inline void
330208901Skenadd_id_to_freelist(struct mbuf **list, uintptr_t id)
331181643Skmacy{
332208901Sken	KASSERT(id != 0,
333208901Sken		("%s: the head item (0) must always be free.", __func__));
334181643Skmacy	list[id] = list[0];
335208901Sken	list[0]  = (struct mbuf *)id;
336181643Skmacy}
337181643Skmacy
338181643Skmacystatic inline unsigned short
339181643Skmacyget_id_from_freelist(struct mbuf **list)
340181643Skmacy{
341208901Sken	uintptr_t id;
342208901Sken
343208901Sken	id = (uintptr_t)list[0];
344208901Sken	KASSERT(id != 0,
345208901Sken		("%s: the head item (0) must always remain free.", __func__));
346181643Skmacy	list[0] = list[id];
347181643Skmacy	return (id);
348181643Skmacy}
349181643Skmacy
350181643Skmacystatic inline int
351181643Skmacyxennet_rxidx(RING_IDX idx)
352181643Skmacy{
353181643Skmacy	return idx & (NET_RX_RING_SIZE - 1);
354181643Skmacy}
355181643Skmacy
356181643Skmacystatic inline struct mbuf *
357208901Skenxennet_get_rx_mbuf(struct netfront_info *np, RING_IDX ri)
358181643Skmacy{
359181643Skmacy	int i = xennet_rxidx(ri);
360181643Skmacy	struct mbuf *m;
361181643Skmacy
362181643Skmacy	m = np->rx_mbufs[i];
363181643Skmacy	np->rx_mbufs[i] = NULL;
364181643Skmacy	return (m);
365181643Skmacy}
366181643Skmacy
367181643Skmacystatic inline grant_ref_t
368181643Skmacyxennet_get_rx_ref(struct netfront_info *np, RING_IDX ri)
369181643Skmacy{
370181643Skmacy	int i = xennet_rxidx(ri);
371181643Skmacy	grant_ref_t ref = np->grant_rx_ref[i];
372214077Sgibbs	KASSERT(ref != GRANT_REF_INVALID, ("Invalid grant reference!\n"));
373214077Sgibbs	np->grant_rx_ref[i] = GRANT_REF_INVALID;
374181643Skmacy	return ref;
375181643Skmacy}
376181643Skmacy
377181643Skmacy#define IPRINTK(fmt, args...) \
378181643Skmacy    printf("[XEN] " fmt, ##args)
379204158Skmacy#ifdef INVARIANTS
380181643Skmacy#define WPRINTK(fmt, args...) \
381181643Skmacy    printf("[XEN] " fmt, ##args)
382204158Skmacy#else
383204158Skmacy#define WPRINTK(fmt, args...)
384204158Skmacy#endif
385204158Skmacy#ifdef DEBUG
386181643Skmacy#define DPRINTK(fmt, args...) \
387185605Skmacy    printf("[XEN] %s: " fmt, __func__, ##args)
388189699Sdfr#else
389189699Sdfr#define DPRINTK(fmt, args...)
390189699Sdfr#endif
391181643Skmacy
392181643Skmacy/**
393181643Skmacy * Read the 'mac' node at the given device's node in the store, and parse that
394181643Skmacy * as colon-separated octets, placing result the given mac array.  mac must be
395181643Skmacy * a preallocated array of length ETH_ALEN (as declared in linux/if_ether.h).
396181643Skmacy * Return 0 on success, or errno on error.
397181643Skmacy */
398181643Skmacystatic int
399185605Skmacyxen_net_read_mac(device_t dev, uint8_t mac[])
400181643Skmacy{
401186557Skmacy	int error, i;
402186557Skmacy	char *s, *e, *macstr;
403225708Sgibbs	const char *path;
404186557Skmacy
405225708Sgibbs	path = xenbus_get_node(dev);
406225708Sgibbs	error = xs_read(XST_NIL, path, "mac", NULL, (void **) &macstr);
407225708Sgibbs	if (error == ENOENT) {
408225708Sgibbs		/*
409225708Sgibbs		 * Deal with missing mac XenStore nodes on devices with
410225708Sgibbs		 * HVM emulation (the 'ioemu' configuration attribute)
411225708Sgibbs		 * enabled.
412225708Sgibbs		 *
413225708Sgibbs		 * The HVM emulator may execute in a stub device model
414225708Sgibbs		 * domain which lacks the permission, only given to Dom0,
415225708Sgibbs		 * to update the guest's XenStore tree.  For this reason,
416225708Sgibbs		 * the HVM emulator doesn't even attempt to write the
417225708Sgibbs		 * front-side mac node, even when operating in Dom0.
418225708Sgibbs		 * However, there should always be a mac listed in the
419225708Sgibbs		 * backend tree.  Fallback to this version if our query
420225708Sgibbs		 * of the front side XenStore location doesn't find
421225708Sgibbs		 * anything.
422225708Sgibbs		 */
423225708Sgibbs		path = xenbus_get_otherend_path(dev);
424225708Sgibbs		error = xs_read(XST_NIL, path, "mac", NULL, (void **) &macstr);
425225708Sgibbs	}
426225708Sgibbs	if (error != 0) {
427225708Sgibbs		xenbus_dev_fatal(dev, error, "parsing %s/mac", path);
428186557Skmacy		return (error);
429225708Sgibbs	}
430186557Skmacy
431181643Skmacy	s = macstr;
432181643Skmacy	for (i = 0; i < ETHER_ADDR_LEN; i++) {
433181643Skmacy		mac[i] = strtoul(s, &e, 16);
434181643Skmacy		if (s == e || (e[0] != ':' && e[0] != 0)) {
435214077Sgibbs			free(macstr, M_XENBUS);
436186557Skmacy			return (ENOENT);
437181643Skmacy		}
438181643Skmacy		s = &e[1];
439181643Skmacy	}
440214077Sgibbs	free(macstr, M_XENBUS);
441186557Skmacy	return (0);
442181643Skmacy}
443181643Skmacy
444181643Skmacy/**
445181643Skmacy * Entry point to this code when a new device is created.  Allocate the basic
446181643Skmacy * structures and the ring buffers for communication with the backend, and
447181643Skmacy * inform the backend of the appropriate details for those.  Switch to
448181643Skmacy * Connected state.
449181643Skmacy */
450181643Skmacystatic int
451185605Skmacynetfront_probe(device_t dev)
452181643Skmacy{
453185605Skmacy
454185605Skmacy	if (!strcmp(xenbus_get_type(dev), "vif")) {
455185605Skmacy		device_set_desc(dev, "Virtual Network Interface");
456185605Skmacy		return (0);
457185605Skmacy	}
458185605Skmacy
459185605Skmacy	return (ENXIO);
460185605Skmacy}
461185605Skmacy
462185605Skmacystatic int
463185605Skmacynetfront_attach(device_t dev)
464185605Skmacy{
465181643Skmacy	int err;
466181643Skmacy
467185605Skmacy	err = create_netdev(dev);
468181643Skmacy	if (err) {
469181643Skmacy		xenbus_dev_fatal(dev, err, "creating netdev");
470225708Sgibbs		return (err);
471181643Skmacy	}
472181643Skmacy
473189699Sdfr#if __FreeBSD_version >= 700000
474189699Sdfr	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
475189699Sdfr	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
476273736Shselasky	    OID_AUTO, "enable_lro", CTLFLAG_RW,
477189699Sdfr	    &xn_enable_lro, 0, "Large Receive Offload");
478189699Sdfr#endif
479189699Sdfr
480225708Sgibbs	return (0);
481181643Skmacy}
482181643Skmacy
483225707Sgibbsstatic int
484225707Sgibbsnetfront_suspend(device_t dev)
485225707Sgibbs{
486225707Sgibbs	struct netfront_info *info = device_get_softc(dev);
487181643Skmacy
488225707Sgibbs	XN_RX_LOCK(info);
489225707Sgibbs	XN_TX_LOCK(info);
490225707Sgibbs	netfront_carrier_off(info);
491225707Sgibbs	XN_TX_UNLOCK(info);
492225707Sgibbs	XN_RX_UNLOCK(info);
493225707Sgibbs	return (0);
494225707Sgibbs}
495225707Sgibbs
496181643Skmacy/**
497181643Skmacy * We are reconnecting to the backend, due to a suspend/resume, or a backend
498181643Skmacy * driver restart.  We tear down our netif structure and recreate it, but
499181643Skmacy * leave the device-layer structures intact so that this is transparent to the
500181643Skmacy * rest of the kernel.
501181643Skmacy */
502186557Skmacystatic int
503185605Skmacynetfront_resume(device_t dev)
504181643Skmacy{
505185605Skmacy	struct netfront_info *info = device_get_softc(dev);
506186557Skmacy
507285737Sroyger	info->xn_resume = true;
508181643Skmacy	netif_disconnect_backend(info);
509181643Skmacy	return (0);
510181643Skmacy}
511181643Skmacy
512181643Skmacy/* Common code used when first setting up, and when resuming. */
513181643Skmacystatic int
514185605Skmacytalk_to_backend(device_t dev, struct netfront_info *info)
515181643Skmacy{
516181643Skmacy	const char *message;
517214077Sgibbs	struct xs_transaction xst;
518185605Skmacy	const char *node = xenbus_get_node(dev);
519181643Skmacy	int err;
520181643Skmacy
521181643Skmacy	err = xen_net_read_mac(dev, info->mac);
522181643Skmacy	if (err) {
523185605Skmacy		xenbus_dev_fatal(dev, err, "parsing %s/mac", node);
524181643Skmacy		goto out;
525181643Skmacy	}
526181643Skmacy
527181643Skmacy	/* Create shared ring, alloc event channel. */
528181643Skmacy	err = setup_device(dev, info);
529181643Skmacy	if (err)
530181643Skmacy		goto out;
531181643Skmacy
532181643Skmacy again:
533214077Sgibbs	err = xs_transaction_start(&xst);
534181643Skmacy	if (err) {
535181643Skmacy		xenbus_dev_fatal(dev, err, "starting transaction");
536181643Skmacy		goto destroy_ring;
537181643Skmacy	}
538214077Sgibbs	err = xs_printf(xst, node, "tx-ring-ref","%u",
539208901Sken			info->tx_ring_ref);
540181643Skmacy	if (err) {
541181643Skmacy		message = "writing tx ring-ref";
542181643Skmacy		goto abort_transaction;
543181643Skmacy	}
544214077Sgibbs	err = xs_printf(xst, node, "rx-ring-ref","%u",
545208901Sken			info->rx_ring_ref);
546181643Skmacy	if (err) {
547181643Skmacy		message = "writing rx ring-ref";
548181643Skmacy		goto abort_transaction;
549181643Skmacy	}
550214077Sgibbs	err = xs_printf(xst, node,
551255040Sgibbs			"event-channel", "%u",
552255040Sgibbs			xen_intr_port(info->xen_intr_handle));
553181643Skmacy	if (err) {
554181643Skmacy		message = "writing event-channel";
555181643Skmacy		goto abort_transaction;
556181643Skmacy	}
557214077Sgibbs	err = xs_printf(xst, node, "request-rx-copy", "%u",
558208901Sken			info->copying_receiver);
559181643Skmacy	if (err) {
560181643Skmacy		message = "writing request-rx-copy";
561181643Skmacy		goto abort_transaction;
562181643Skmacy	}
563214077Sgibbs	err = xs_printf(xst, node, "feature-rx-notify", "%d", 1);
564181643Skmacy	if (err) {
565181643Skmacy		message = "writing feature-rx-notify";
566181643Skmacy		goto abort_transaction;
567181643Skmacy	}
568214077Sgibbs	err = xs_printf(xst, node, "feature-sg", "%d", 1);
569181643Skmacy	if (err) {
570181643Skmacy		message = "writing feature-sg";
571181643Skmacy		goto abort_transaction;
572181643Skmacy	}
573189699Sdfr#if __FreeBSD_version >= 700000
574214077Sgibbs	err = xs_printf(xst, node, "feature-gso-tcpv4", "%d", 1);
575181643Skmacy	if (err) {
576181643Skmacy		message = "writing feature-gso-tcpv4";
577181643Skmacy		goto abort_transaction;
578181643Skmacy	}
579181643Skmacy#endif
580181643Skmacy
581214077Sgibbs	err = xs_transaction_end(xst, 0);
582181643Skmacy	if (err) {
583181643Skmacy		if (err == EAGAIN)
584181643Skmacy			goto again;
585181643Skmacy		xenbus_dev_fatal(dev, err, "completing transaction");
586181643Skmacy		goto destroy_ring;
587181643Skmacy	}
588181643Skmacy
589181643Skmacy	return 0;
590181643Skmacy
591181643Skmacy abort_transaction:
592214077Sgibbs	xs_transaction_end(xst, 1);
593181643Skmacy	xenbus_dev_fatal(dev, err, "%s", message);
594181643Skmacy destroy_ring:
595181643Skmacy	netif_free(info);
596181643Skmacy out:
597181643Skmacy	return err;
598181643Skmacy}
599181643Skmacy
600181643Skmacystatic int
601185605Skmacysetup_device(device_t dev, struct netfront_info *info)
602181643Skmacy{
603181643Skmacy	netif_tx_sring_t *txs;
604181643Skmacy	netif_rx_sring_t *rxs;
605186557Skmacy	int error;
606181643Skmacy	struct ifnet *ifp;
607181643Skmacy
608181643Skmacy	ifp = info->xn_ifp;
609181643Skmacy
610214077Sgibbs	info->tx_ring_ref = GRANT_REF_INVALID;
611214077Sgibbs	info->rx_ring_ref = GRANT_REF_INVALID;
612181643Skmacy	info->rx.sring = NULL;
613181643Skmacy	info->tx.sring = NULL;
614181643Skmacy
615181643Skmacy	txs = (netif_tx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT|M_ZERO);
616181643Skmacy	if (!txs) {
617186557Skmacy		error = ENOMEM;
618186557Skmacy		xenbus_dev_fatal(dev, error, "allocating tx ring page");
619181643Skmacy		goto fail;
620181643Skmacy	}
621181643Skmacy	SHARED_RING_INIT(txs);
622181643Skmacy	FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
623186557Skmacy	error = xenbus_grant_ring(dev, virt_to_mfn(txs), &info->tx_ring_ref);
624186557Skmacy	if (error)
625181643Skmacy		goto fail;
626181643Skmacy
627181643Skmacy	rxs = (netif_rx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT|M_ZERO);
628181643Skmacy	if (!rxs) {
629186557Skmacy		error = ENOMEM;
630186557Skmacy		xenbus_dev_fatal(dev, error, "allocating rx ring page");
631181643Skmacy		goto fail;
632181643Skmacy	}
633181643Skmacy	SHARED_RING_INIT(rxs);
634181643Skmacy	FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
635181643Skmacy
636186557Skmacy	error = xenbus_grant_ring(dev, virt_to_mfn(rxs), &info->rx_ring_ref);
637186557Skmacy	if (error)
638181643Skmacy		goto fail;
639181643Skmacy
640255040Sgibbs	error = xen_intr_alloc_and_bind_local_port(dev,
641255040Sgibbs	    xenbus_get_otherend_id(dev), /*filter*/NULL, xn_intr, info,
642255040Sgibbs	    INTR_TYPE_NET | INTR_MPSAFE | INTR_ENTROPY, &info->xen_intr_handle);
643181643Skmacy
644186557Skmacy	if (error) {
645186557Skmacy		xenbus_dev_fatal(dev, error,
646255040Sgibbs				 "xen_intr_alloc_and_bind_local_port failed");
647181643Skmacy		goto fail;
648181643Skmacy	}
649186557Skmacy
650186557Skmacy	return (0);
651181643Skmacy
652181643Skmacy fail:
653181643Skmacy	netif_free(info);
654186557Skmacy	return (error);
655181643Skmacy}
656181643Skmacy
657221130Sbz#ifdef INET
658181643Skmacy/**
659189699Sdfr * If this interface has an ipv4 address, send an arp for it. This
660189699Sdfr * helps to get the network going again after migrating hosts.
661189699Sdfr */
662189699Sdfrstatic void
663189699Sdfrnetfront_send_fake_arp(device_t dev, struct netfront_info *info)
664189699Sdfr{
665189699Sdfr	struct ifnet *ifp;
666189699Sdfr	struct ifaddr *ifa;
667189699Sdfr
668189699Sdfr	ifp = info->xn_ifp;
669189699Sdfr	TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
670189699Sdfr		if (ifa->ifa_addr->sa_family == AF_INET) {
671189699Sdfr			arp_ifinit(ifp, ifa);
672189699Sdfr		}
673189699Sdfr	}
674189699Sdfr}
675221130Sbz#endif
676189699Sdfr
677189699Sdfr/**
678181643Skmacy * Callback received when the backend's state changes.
679181643Skmacy */
680222975Sgibbsstatic void
681185605Skmacynetfront_backend_changed(device_t dev, XenbusState newstate)
682181643Skmacy{
683185605Skmacy	struct netfront_info *sc = device_get_softc(dev);
684181643Skmacy
685185605Skmacy	DPRINTK("newstate=%d\n", newstate);
686185605Skmacy
687185605Skmacy	switch (newstate) {
688181643Skmacy	case XenbusStateInitialising:
689181643Skmacy	case XenbusStateInitialised:
690181643Skmacy	case XenbusStateUnknown:
691181643Skmacy	case XenbusStateClosed:
692183375Skmacy	case XenbusStateReconfigured:
693183375Skmacy	case XenbusStateReconfiguring:
694185605Skmacy		break;
695181643Skmacy	case XenbusStateInitWait:
696185605Skmacy		if (xenbus_get_state(dev) != XenbusStateInitialising)
697181643Skmacy			break;
698185605Skmacy		if (network_connect(sc) != 0)
699181643Skmacy			break;
700185605Skmacy		xenbus_set_state(dev, XenbusStateConnected);
701283218Sroyger		break;
702283218Sroyger	case XenbusStateClosing:
703283218Sroyger		xenbus_set_state(dev, XenbusStateClosed);
704283218Sroyger		break;
705283218Sroyger	case XenbusStateConnected:
706221130Sbz#ifdef INET
707189699Sdfr		netfront_send_fake_arp(dev, sc);
708221130Sbz#endif
709185605Skmacy		break;
710181643Skmacy	}
711181643Skmacy}
712181643Skmacy
713181643Skmacystatic void
714181643Skmacyxn_free_rx_ring(struct netfront_info *sc)
715181643Skmacy{
716181643Skmacy#if 0
717181643Skmacy	int i;
718181643Skmacy
719181643Skmacy	for (i = 0; i < NET_RX_RING_SIZE; i++) {
720208901Sken		if (sc->xn_cdata.rx_mbufs[i] != NULL) {
721208901Sken			m_freem(sc->rx_mbufs[i]);
722208901Sken			sc->rx_mbufs[i] = NULL;
723181643Skmacy		}
724181643Skmacy	}
725181643Skmacy
726181643Skmacy	sc->rx.rsp_cons = 0;
727181643Skmacy	sc->xn_rx_if->req_prod = 0;
728181643Skmacy	sc->xn_rx_if->event = sc->rx.rsp_cons ;
729181643Skmacy#endif
730181643Skmacy}
731181643Skmacy
732181643Skmacystatic void
733181643Skmacyxn_free_tx_ring(struct netfront_info *sc)
734181643Skmacy{
735181643Skmacy#if 0
736181643Skmacy	int i;
737181643Skmacy
738181643Skmacy	for (i = 0; i < NET_TX_RING_SIZE; i++) {
739208901Sken		if (sc->tx_mbufs[i] != NULL) {
740208901Sken			m_freem(sc->tx_mbufs[i]);
741181643Skmacy			sc->xn_cdata.xn_tx_chain[i] = NULL;
742181643Skmacy		}
743181643Skmacy	}
744181643Skmacy
745181643Skmacy	return;
746181643Skmacy#endif
747181643Skmacy}
748181643Skmacy
749208901Sken/**
750208901Sken * \brief Verify that there is sufficient space in the Tx ring
751208901Sken *        buffer for a maximally sized request to be enqueued.
752192869Sadrian *
753208901Sken * A transmit request requires a transmit descriptor for each packet
754208901Sken * fragment, plus up to 2 entries for "options" (e.g. TSO).
755192869Sadrian */
756181643Skmacystatic inline int
757208901Skenxn_tx_slot_available(struct netfront_info *np)
758181643Skmacy{
759208901Sken	return (RING_FREE_REQUESTS(&np->tx) > (MAX_TX_REQ_FRAGS + 2));
760181643Skmacy}
761208901Sken
762181643Skmacystatic void
763181643Skmacynetif_release_tx_bufs(struct netfront_info *np)
764181643Skmacy{
765181643Skmacy	int i;
766181643Skmacy
767181643Skmacy	for (i = 1; i <= NET_TX_RING_SIZE; i++) {
768208901Sken		struct mbuf *m;
769181643Skmacy
770208901Sken		m = np->tx_mbufs[i];
771208901Sken
772208901Sken		/*
773208901Sken		 * We assume that no kernel addresses are
774208901Sken		 * less than NET_TX_RING_SIZE.  Any entry
775208901Sken		 * in the table that is below this number
776208901Sken		 * must be an index from free-list tracking.
777208901Sken		 */
778208901Sken		if (((uintptr_t)m) <= NET_TX_RING_SIZE)
779181643Skmacy			continue;
780225707Sgibbs		gnttab_end_foreign_access_ref(np->grant_tx_ref[i]);
781181643Skmacy		gnttab_release_grant_reference(&np->gref_tx_head,
782181643Skmacy		    np->grant_tx_ref[i]);
783214077Sgibbs		np->grant_tx_ref[i] = GRANT_REF_INVALID;
784181643Skmacy		add_id_to_freelist(np->tx_mbufs, i);
785192871Sadrian		np->xn_cdata.xn_tx_chain_cnt--;
786192871Sadrian		if (np->xn_cdata.xn_tx_chain_cnt < 0) {
787244991Smarius			panic("%s: tx_chain_cnt must be >= 0", __func__);
788192871Sadrian		}
789225707Sgibbs		m_free(m);
790181643Skmacy	}
791181643Skmacy}
792181643Skmacy
793181643Skmacystatic void
794181643Skmacynetwork_alloc_rx_buffers(struct netfront_info *sc)
795181643Skmacy{
796185605Skmacy	int otherend_id = xenbus_get_otherend_id(sc->xbdev);
797181643Skmacy	unsigned short id;
798181643Skmacy	struct mbuf *m_new;
799181643Skmacy	int i, batch_target, notify;
800181643Skmacy	RING_IDX req_prod;
801181643Skmacy	struct xen_memory_reservation reservation;
802181643Skmacy	grant_ref_t ref;
803181643Skmacy	int nr_flips;
804181643Skmacy	netif_rx_request_t *req;
805181643Skmacy	vm_offset_t vaddr;
806181643Skmacy	u_long pfn;
807181643Skmacy
808181643Skmacy	req_prod = sc->rx.req_prod_pvt;
809181643Skmacy
810255040Sgibbs	if (__predict_false(sc->carrier == 0))
811181643Skmacy		return;
812181643Skmacy
813181643Skmacy	/*
814208901Sken	 * Allocate mbufs greedily, even though we batch updates to the
815181643Skmacy	 * receive ring. This creates a less bursty demand on the memory
816208901Sken	 * allocator, and so should reduce the chance of failed allocation
817181643Skmacy	 * requests both for ourself and for other kernel subsystems.
818208901Sken	 *
819208901Sken	 * Here we attempt to maintain rx_target buffers in flight, counting
820208901Sken	 * buffers that we have yet to process in the receive ring.
821181643Skmacy	 */
822181643Skmacy	batch_target = sc->rx_target - (req_prod - sc->rx.rsp_cons);
823181643Skmacy	for (i = mbufq_len(&sc->xn_rx_batch); i < batch_target; i++) {
824243857Sglebius		MGETHDR(m_new, M_NOWAIT, MT_DATA);
825208901Sken		if (m_new == NULL) {
826208901Sken			printf("%s: MGETHDR failed\n", __func__);
827181643Skmacy			goto no_mbuf;
828208901Sken		}
829181643Skmacy
830243857Sglebius		m_cljget(m_new, M_NOWAIT, MJUMPAGESIZE);
831181643Skmacy		if ((m_new->m_flags & M_EXT) == 0) {
832208901Sken			printf("%s: m_cljget failed\n", __func__);
833181643Skmacy			m_freem(m_new);
834181643Skmacy
835181643Skmacyno_mbuf:
836181643Skmacy			if (i != 0)
837181643Skmacy				goto refill;
838181643Skmacy			/*
839181643Skmacy			 * XXX set timer
840181643Skmacy			 */
841181643Skmacy			break;
842181643Skmacy		}
843181643Skmacy		m_new->m_len = m_new->m_pkthdr.len = MJUMPAGESIZE;
844181643Skmacy
845181643Skmacy		/* queue the mbufs allocated */
846181643Skmacy		mbufq_tail(&sc->xn_rx_batch, m_new);
847181643Skmacy	}
848181643Skmacy
849208901Sken	/*
850208901Sken	 * If we've allocated at least half of our target number of entries,
851208901Sken	 * submit them to the backend - we have enough to make the overhead
852208901Sken	 * of submission worthwhile.  Otherwise wait for more mbufs and
853208901Sken	 * request entries to become available.
854208901Sken	 */
855181643Skmacy	if (i < (sc->rx_target/2)) {
856181643Skmacy		if (req_prod >sc->rx.sring->req_prod)
857181643Skmacy			goto push;
858181643Skmacy		return;
859181643Skmacy	}
860181643Skmacy
861208901Sken	/*
862208901Sken	 * Double floating fill target if we risked having the backend
863208901Sken	 * run out of empty buffers for receive traffic.  We define "running
864208901Sken	 * low" as having less than a fourth of our target buffers free
865208901Sken	 * at the time we refilled the queue.
866208901Sken	 */
867208901Sken	if ((req_prod - sc->rx.sring->rsp_prod) < (sc->rx_target / 4)) {
868208901Sken		sc->rx_target *= 2;
869208901Sken		if (sc->rx_target > sc->rx_max_target)
870208901Sken			sc->rx_target = sc->rx_max_target;
871208901Sken	}
872208901Sken
873181643Skmacyrefill:
874181643Skmacy	for (nr_flips = i = 0; ; i++) {
875181643Skmacy		if ((m_new = mbufq_dequeue(&sc->xn_rx_batch)) == NULL)
876181643Skmacy			break;
877181643Skmacy
878181643Skmacy		m_new->m_ext.ext_arg1 = (vm_paddr_t *)(uintptr_t)(
879181945Skmacy				vtophys(m_new->m_ext.ext_buf) >> PAGE_SHIFT);
880181643Skmacy
881181643Skmacy		id = xennet_rxidx(req_prod + i);
882181643Skmacy
883208901Sken		KASSERT(sc->rx_mbufs[id] == NULL, ("non-NULL xm_rx_chain"));
884208901Sken		sc->rx_mbufs[id] = m_new;
885181643Skmacy
886181643Skmacy		ref = gnttab_claim_grant_reference(&sc->gref_rx_head);
887214077Sgibbs		KASSERT(ref != GNTTAB_LIST_END,
888214077Sgibbs			("reserved grant references exhuasted"));
889181643Skmacy		sc->grant_rx_ref[id] = ref;
890181643Skmacy
891181643Skmacy		vaddr = mtod(m_new, vm_offset_t);
892181643Skmacy		pfn = vtophys(vaddr) >> PAGE_SHIFT;
893181643Skmacy		req = RING_GET_REQUEST(&sc->rx, req_prod + i);
894181643Skmacy
895181643Skmacy		if (sc->copying_receiver == 0) {
896181643Skmacy			gnttab_grant_foreign_transfer_ref(ref,
897185605Skmacy			    otherend_id, pfn);
898181643Skmacy			sc->rx_pfn_array[nr_flips] = PFNTOMFN(pfn);
899181643Skmacy			if (!xen_feature(XENFEAT_auto_translated_physmap)) {
900181643Skmacy				/* Remove this page before passing
901181643Skmacy				 * back to Xen.
902181643Skmacy				 */
903181643Skmacy				set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
904181643Skmacy				MULTI_update_va_mapping(&sc->rx_mcl[i],
905181643Skmacy				    vaddr, 0, 0);
906181643Skmacy			}
907181643Skmacy			nr_flips++;
908181643Skmacy		} else {
909181643Skmacy			gnttab_grant_foreign_access_ref(ref,
910185605Skmacy			    otherend_id,
911181643Skmacy			    PFNTOMFN(pfn), 0);
912181643Skmacy		}
913181643Skmacy		req->id = id;
914181643Skmacy		req->gref = ref;
915181643Skmacy
916181643Skmacy		sc->rx_pfn_array[i] =
917181643Skmacy		    vtomach(mtod(m_new,vm_offset_t)) >> PAGE_SHIFT;
918181643Skmacy	}
919181643Skmacy
920181643Skmacy	KASSERT(i, ("no mbufs processed")); /* should have returned earlier */
921181643Skmacy	KASSERT(mbufq_len(&sc->xn_rx_batch) == 0, ("not all mbufs processed"));
922181643Skmacy	/*
923181643Skmacy	 * We may have allocated buffers which have entries outstanding
924181643Skmacy	 * in the page * update queue -- make sure we flush those first!
925181643Skmacy	 */
926181643Skmacy	PT_UPDATES_FLUSH();
927181643Skmacy	if (nr_flips != 0) {
928181643Skmacy#ifdef notyet
929181643Skmacy		/* Tell the ballon driver what is going on. */
930181643Skmacy		balloon_update_driver_allowance(i);
931181643Skmacy#endif
932183375Skmacy		set_xen_guest_handle(reservation.extent_start, sc->rx_pfn_array);
933181643Skmacy		reservation.nr_extents   = i;
934181643Skmacy		reservation.extent_order = 0;
935181643Skmacy		reservation.address_bits = 0;
936181643Skmacy		reservation.domid        = DOMID_SELF;
937181643Skmacy
938181643Skmacy		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
939181643Skmacy			/* After all PTEs have been zapped, flush the TLB. */
940181643Skmacy			sc->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] =
941181643Skmacy			    UVMF_TLB_FLUSH|UVMF_ALL;
942181643Skmacy
943181643Skmacy			/* Give away a batch of pages. */
944181643Skmacy			sc->rx_mcl[i].op = __HYPERVISOR_memory_op;
945181643Skmacy			sc->rx_mcl[i].args[0] = XENMEM_decrease_reservation;
946181643Skmacy			sc->rx_mcl[i].args[1] =  (u_long)&reservation;
947181643Skmacy			/* Zap PTEs and give away pages in one big multicall. */
948181643Skmacy			(void)HYPERVISOR_multicall(sc->rx_mcl, i+1);
949181643Skmacy
950255040Sgibbs			if (__predict_false(sc->rx_mcl[i].result != i ||
951244991Smarius			    HYPERVISOR_memory_op(XENMEM_decrease_reservation,
952244991Smarius			    &reservation) != i))
953244991Smarius				panic("%s: unable to reduce memory "
954244991Smarius				    "reservation\n", __func__);
955181643Skmacy		}
956181643Skmacy	} else {
957181643Skmacy		wmb();
958181643Skmacy	}
959181643Skmacy
960181643Skmacy	/* Above is a suitable barrier to ensure backend will see requests. */
961181643Skmacy	sc->rx.req_prod_pvt = req_prod + i;
962181643Skmacypush:
963181643Skmacy	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->rx, notify);
964181643Skmacy	if (notify)
965255040Sgibbs		xen_intr_signal(sc->xen_intr_handle);
966181643Skmacy}
967181643Skmacy
968181643Skmacystatic void
969181643Skmacyxn_rxeof(struct netfront_info *np)
970181643Skmacy{
971181643Skmacy	struct ifnet *ifp;
972259541Sglebius#if __FreeBSD_version >= 700000 && (defined(INET) || defined(INET6))
973189699Sdfr	struct lro_ctrl *lro = &np->xn_lro;
974189699Sdfr	struct lro_entry *queued;
975189699Sdfr#endif
976181643Skmacy	struct netfront_rx_info rinfo;
977181643Skmacy	struct netif_rx_response *rx = &rinfo.rx;
978181643Skmacy	struct netif_extra_info *extras = rinfo.extras;
979181643Skmacy	RING_IDX i, rp;
980181643Skmacy	multicall_entry_t *mcl;
981181643Skmacy	struct mbuf *m;
982181945Skmacy	struct mbuf_head rxq, errq;
983185473Sdfr	int err, pages_flipped = 0, work_to_do;
984181643Skmacy
985185473Sdfr	do {
986185473Sdfr		XN_RX_LOCK_ASSERT(np);
987185473Sdfr		if (!netfront_carrier_ok(np))
988185473Sdfr			return;
989181643Skmacy
990185473Sdfr		mbufq_init(&errq);
991185473Sdfr		mbufq_init(&rxq);
992181643Skmacy
993185473Sdfr		ifp = np->xn_ifp;
994181643Skmacy
995185473Sdfr		rp = np->rx.sring->rsp_prod;
996185473Sdfr		rmb();	/* Ensure we see queued responses up to 'rp'. */
997181643Skmacy
998185473Sdfr		i = np->rx.rsp_cons;
999185473Sdfr		while ((i != rp)) {
1000185473Sdfr			memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx));
1001185473Sdfr			memset(extras, 0, sizeof(rinfo.extras));
1002181643Skmacy
1003185473Sdfr			m = NULL;
1004208901Sken			err = xennet_get_responses(np, &rinfo, rp, &i, &m,
1005185473Sdfr			    &pages_flipped);
1006181643Skmacy
1007255040Sgibbs			if (__predict_false(err)) {
1008181945Skmacy				if (m)
1009185473Sdfr					mbufq_tail(&errq, m);
1010185473Sdfr				np->stats.rx_errors++;
1011185473Sdfr				continue;
1012185473Sdfr			}
1013181643Skmacy
1014185473Sdfr			m->m_pkthdr.rcvif = ifp;
1015185473Sdfr			if ( rx->flags & NETRXF_data_validated ) {
1016185473Sdfr				/* Tell the stack the checksums are okay */
1017185473Sdfr				/*
1018185473Sdfr				 * XXX this isn't necessarily the case - need to add
1019185473Sdfr				 * check
1020185473Sdfr				 */
1021181643Skmacy
1022185473Sdfr				m->m_pkthdr.csum_flags |=
1023185473Sdfr					(CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID
1024185473Sdfr					    | CSUM_PSEUDO_HDR);
1025185473Sdfr				m->m_pkthdr.csum_data = 0xffff;
1026185473Sdfr			}
1027181643Skmacy
1028185473Sdfr			np->stats.rx_packets++;
1029185473Sdfr			np->stats.rx_bytes += m->m_pkthdr.len;
1030181643Skmacy
1031185473Sdfr			mbufq_tail(&rxq, m);
1032208901Sken			np->rx.rsp_cons = i;
1033185473Sdfr		}
1034181643Skmacy
1035185473Sdfr		if (pages_flipped) {
1036185473Sdfr			/* Some pages are no longer absent... */
1037181643Skmacy#ifdef notyet
1038185473Sdfr			balloon_update_driver_allowance(-pages_flipped);
1039181643Skmacy#endif
1040185473Sdfr			/* Do all the remapping work, and M->P updates, in one big
1041185473Sdfr			 * hypercall.
1042185473Sdfr			 */
1043185473Sdfr			if (!!xen_feature(XENFEAT_auto_translated_physmap)) {
1044185473Sdfr				mcl = np->rx_mcl + pages_flipped;
1045185473Sdfr				mcl->op = __HYPERVISOR_mmu_update;
1046185473Sdfr				mcl->args[0] = (u_long)np->rx_mmu;
1047185473Sdfr				mcl->args[1] = pages_flipped;
1048185473Sdfr				mcl->args[2] = 0;
1049185473Sdfr				mcl->args[3] = DOMID_SELF;
1050185473Sdfr				(void)HYPERVISOR_multicall(np->rx_mcl,
1051185473Sdfr				    pages_flipped + 1);
1052185473Sdfr			}
1053181643Skmacy		}
1054181643Skmacy
1055185473Sdfr		while ((m = mbufq_dequeue(&errq)))
1056185473Sdfr			m_freem(m);
1057181643Skmacy
1058185473Sdfr		/*
1059185473Sdfr		 * Process all the mbufs after the remapping is complete.
1060185473Sdfr		 * Break the mbuf chain first though.
1061185473Sdfr		 */
1062185473Sdfr		while ((m = mbufq_dequeue(&rxq)) != NULL) {
1063185473Sdfr			ifp->if_ipackets++;
1064181643Skmacy
1065185473Sdfr			/*
1066185473Sdfr			 * Do we really need to drop the rx lock?
1067185473Sdfr			 */
1068185473Sdfr			XN_RX_UNLOCK(np);
1069259541Sglebius#if __FreeBSD_version >= 700000 && (defined(INET) || defined(INET6))
1070189699Sdfr			/* Use LRO if possible */
1071189699Sdfr			if ((ifp->if_capenable & IFCAP_LRO) == 0 ||
1072189699Sdfr			    lro->lro_cnt == 0 || tcp_lro_rx(lro, m, 0)) {
1073189699Sdfr				/*
1074189699Sdfr				 * If LRO fails, pass up to the stack
1075189699Sdfr				 * directly.
1076189699Sdfr				 */
1077189699Sdfr				(*ifp->if_input)(ifp, m);
1078189699Sdfr			}
1079189699Sdfr#else
1080185473Sdfr			(*ifp->if_input)(ifp, m);
1081189699Sdfr#endif
1082185473Sdfr			XN_RX_LOCK(np);
1083185473Sdfr		}
1084181643Skmacy
1085185473Sdfr		np->rx.rsp_cons = i;
1086181643Skmacy
1087259541Sglebius#if __FreeBSD_version >= 700000 && (defined(INET) || defined(INET6))
1088189699Sdfr		/*
1089189699Sdfr		 * Flush any outstanding LRO work
1090189699Sdfr		 */
1091189699Sdfr		while (!SLIST_EMPTY(&lro->lro_active)) {
1092189699Sdfr			queued = SLIST_FIRST(&lro->lro_active);
1093189699Sdfr			SLIST_REMOVE_HEAD(&lro->lro_active, next);
1094189699Sdfr			tcp_lro_flush(lro, queued);
1095189699Sdfr		}
1096189699Sdfr#endif
1097189699Sdfr
1098181643Skmacy#if 0
1099185473Sdfr		/* If we get a callback with very few responses, reduce fill target. */
1100185473Sdfr		/* NB. Note exponential increase, linear decrease. */
1101185473Sdfr		if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
1102185473Sdfr			((3*np->rx_target) / 4)) && (--np->rx_target < np->rx_min_target))
1103185473Sdfr			np->rx_target = np->rx_min_target;
1104181643Skmacy#endif
1105181643Skmacy
1106185473Sdfr		network_alloc_rx_buffers(np);
1107181643Skmacy
1108185473Sdfr		RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, work_to_do);
1109185473Sdfr	} while (work_to_do);
1110181643Skmacy}
1111181643Skmacy
1112181643Skmacystatic void
1113181643Skmacyxn_txeof(struct netfront_info *np)
1114181643Skmacy{
1115181643Skmacy	RING_IDX i, prod;
1116181643Skmacy	unsigned short id;
1117181643Skmacy	struct ifnet *ifp;
1118189699Sdfr	netif_tx_response_t *txr;
1119181643Skmacy	struct mbuf *m;
1120181643Skmacy
1121181643Skmacy	XN_TX_LOCK_ASSERT(np);
1122181643Skmacy
1123181643Skmacy	if (!netfront_carrier_ok(np))
1124181643Skmacy		return;
1125181643Skmacy
1126181643Skmacy	ifp = np->xn_ifp;
1127181643Skmacy
1128181643Skmacy	do {
1129181643Skmacy		prod = np->tx.sring->rsp_prod;
1130181643Skmacy		rmb(); /* Ensure we see responses up to 'rp'. */
1131181643Skmacy
1132181643Skmacy		for (i = np->tx.rsp_cons; i != prod; i++) {
1133189699Sdfr			txr = RING_GET_RESPONSE(&np->tx, i);
1134189699Sdfr			if (txr->status == NETIF_RSP_NULL)
1135189699Sdfr				continue;
1136189699Sdfr
1137208901Sken			if (txr->status != NETIF_RSP_OKAY) {
1138208901Sken				printf("%s: WARNING: response is %d!\n",
1139208901Sken				       __func__, txr->status);
1140208901Sken			}
1141189699Sdfr			id = txr->id;
1142208901Sken			m = np->tx_mbufs[id];
1143192870Sadrian			KASSERT(m != NULL, ("mbuf not found in xn_tx_chain"));
1144208901Sken			KASSERT((uintptr_t)m > NET_TX_RING_SIZE,
1145208901Sken				("mbuf already on the free list, but we're "
1146208901Sken				"trying to free it again!"));
1147192870Sadrian			M_ASSERTVALID(m);
1148181643Skmacy
1149189699Sdfr			/*
1150189699Sdfr			 * Increment packet count if this is the last
1151189699Sdfr			 * mbuf of the chain.
1152189699Sdfr			 */
1153189699Sdfr			if (!m->m_next)
1154189699Sdfr				ifp->if_opackets++;
1155255040Sgibbs			if (__predict_false(gnttab_query_foreign_access(
1156181643Skmacy			    np->grant_tx_ref[id]) != 0)) {
1157244991Smarius				panic("%s: grant id %u still in use by the "
1158244991Smarius				    "backend", __func__, id);
1159181643Skmacy			}
1160181643Skmacy			gnttab_end_foreign_access_ref(
1161183375Skmacy				np->grant_tx_ref[id]);
1162181643Skmacy			gnttab_release_grant_reference(
1163181643Skmacy				&np->gref_tx_head, np->grant_tx_ref[id]);
1164214077Sgibbs			np->grant_tx_ref[id] = GRANT_REF_INVALID;
1165181643Skmacy
1166208901Sken			np->tx_mbufs[id] = NULL;
1167208901Sken			add_id_to_freelist(np->tx_mbufs, id);
1168192871Sadrian			np->xn_cdata.xn_tx_chain_cnt--;
1169189699Sdfr			m_free(m);
1170192894Sadrian			/* Only mark the queue active if we've freed up at least one slot to try */
1171192894Sadrian			ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1172181643Skmacy		}
1173181643Skmacy		np->tx.rsp_cons = prod;
1174181643Skmacy
1175181643Skmacy		/*
1176181643Skmacy		 * Set a new event, then check for race with update of
1177181643Skmacy		 * tx_cons. Note that it is essential to schedule a
1178181643Skmacy		 * callback, no matter how few buffers are pending. Even if
1179181643Skmacy		 * there is space in the transmit ring, higher layers may
1180181643Skmacy		 * be blocked because too much data is outstanding: in such
1181181643Skmacy		 * cases notification from Xen is likely to be the only kick
1182181643Skmacy		 * that we'll get.
1183181643Skmacy		 */
1184181643Skmacy		np->tx.sring->rsp_event =
1185181643Skmacy		    prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
1186181643Skmacy
1187181643Skmacy		mb();
1188181643Skmacy	} while (prod != np->tx.sring->rsp_prod);
1189181643Skmacy
1190181643Skmacy	if (np->tx_full &&
1191181643Skmacy	    ((np->tx.sring->req_prod - prod) < NET_TX_RING_SIZE)) {
1192181643Skmacy		np->tx_full = 0;
1193181643Skmacy#if 0
1194181643Skmacy		if (np->user_state == UST_OPEN)
1195181643Skmacy			netif_wake_queue(dev);
1196181643Skmacy#endif
1197181643Skmacy	}
1198181643Skmacy}
1199181643Skmacy
1200181643Skmacystatic void
1201181643Skmacyxn_intr(void *xsc)
1202181643Skmacy{
1203181643Skmacy	struct netfront_info *np = xsc;
1204181643Skmacy	struct ifnet *ifp = np->xn_ifp;
1205181643Skmacy
1206181643Skmacy#if 0
1207181643Skmacy	if (!(np->rx.rsp_cons != np->rx.sring->rsp_prod &&
1208181643Skmacy	    likely(netfront_carrier_ok(np)) &&
1209181643Skmacy	    ifp->if_drv_flags & IFF_DRV_RUNNING))
1210181643Skmacy		return;
1211181643Skmacy#endif
1212208901Sken	if (RING_HAS_UNCONSUMED_RESPONSES(&np->tx)) {
1213181643Skmacy		XN_TX_LOCK(np);
1214181643Skmacy		xn_txeof(np);
1215181643Skmacy		XN_TX_UNLOCK(np);
1216181643Skmacy	}
1217181643Skmacy
1218181643Skmacy	XN_RX_LOCK(np);
1219181643Skmacy	xn_rxeof(np);
1220181643Skmacy	XN_RX_UNLOCK(np);
1221181643Skmacy
1222181643Skmacy	if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1223181643Skmacy	    !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1224181643Skmacy		xn_start(ifp);
1225181643Skmacy}
1226181643Skmacy
1227181643Skmacystatic void
1228181643Skmacyxennet_move_rx_slot(struct netfront_info *np, struct mbuf *m,
1229181643Skmacy	grant_ref_t ref)
1230181643Skmacy{
1231181643Skmacy	int new = xennet_rxidx(np->rx.req_prod_pvt);
1232181643Skmacy
1233181643Skmacy	KASSERT(np->rx_mbufs[new] == NULL, ("rx_mbufs != NULL"));
1234181643Skmacy	np->rx_mbufs[new] = m;
1235181643Skmacy	np->grant_rx_ref[new] = ref;
1236181643Skmacy	RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new;
1237181643Skmacy	RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref;
1238181643Skmacy	np->rx.req_prod_pvt++;
1239181643Skmacy}
1240181643Skmacy
1241181643Skmacystatic int
1242181643Skmacyxennet_get_extras(struct netfront_info *np,
1243208901Sken    struct netif_extra_info *extras, RING_IDX rp, RING_IDX *cons)
1244181643Skmacy{
1245181643Skmacy	struct netif_extra_info *extra;
1246181643Skmacy
1247181643Skmacy	int err = 0;
1248181643Skmacy
1249181643Skmacy	do {
1250181643Skmacy		struct mbuf *m;
1251181643Skmacy		grant_ref_t ref;
1252181643Skmacy
1253255040Sgibbs		if (__predict_false(*cons + 1 == rp)) {
1254181643Skmacy#if 0
1255181643Skmacy			if (net_ratelimit())
1256181643Skmacy				WPRINTK("Missing extra info\n");
1257181643Skmacy#endif
1258208901Sken			err = EINVAL;
1259181643Skmacy			break;
1260181643Skmacy		}
1261181643Skmacy
1262181643Skmacy		extra = (struct netif_extra_info *)
1263208901Sken		RING_GET_RESPONSE(&np->rx, ++(*cons));
1264181643Skmacy
1265255040Sgibbs		if (__predict_false(!extra->type ||
1266181643Skmacy			extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1267181643Skmacy#if 0
1268181643Skmacy			if (net_ratelimit())
1269181643Skmacy				WPRINTK("Invalid extra type: %d\n",
1270181643Skmacy					extra->type);
1271181643Skmacy#endif
1272208901Sken			err = EINVAL;
1273181643Skmacy		} else {
1274181643Skmacy			memcpy(&extras[extra->type - 1], extra, sizeof(*extra));
1275181643Skmacy		}
1276181643Skmacy
1277208901Sken		m = xennet_get_rx_mbuf(np, *cons);
1278208901Sken		ref = xennet_get_rx_ref(np, *cons);
1279181643Skmacy		xennet_move_rx_slot(np, m, ref);
1280181643Skmacy	} while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
1281181643Skmacy
1282181643Skmacy	return err;
1283181643Skmacy}
1284181643Skmacy
1285181643Skmacystatic int
1286181643Skmacyxennet_get_responses(struct netfront_info *np,
1287208901Sken	struct netfront_rx_info *rinfo, RING_IDX rp, RING_IDX *cons,
1288181945Skmacy	struct mbuf  **list,
1289181643Skmacy	int *pages_flipped_p)
1290181643Skmacy{
1291181643Skmacy	int pages_flipped = *pages_flipped_p;
1292181643Skmacy	struct mmu_update *mmu;
1293181643Skmacy	struct multicall_entry *mcl;
1294181643Skmacy	struct netif_rx_response *rx = &rinfo->rx;
1295181643Skmacy	struct netif_extra_info *extras = rinfo->extras;
1296181945Skmacy	struct mbuf *m, *m0, *m_prev;
1297208901Sken	grant_ref_t ref = xennet_get_rx_ref(np, *cons);
1298208901Sken	RING_IDX ref_cons = *cons;
1299181643Skmacy	int frags = 1;
1300181643Skmacy	int err = 0;
1301181643Skmacy	u_long ret;
1302181643Skmacy
1303208901Sken	m0 = m = m_prev = xennet_get_rx_mbuf(np, *cons);
1304181945Skmacy
1305181643Skmacy	if (rx->flags & NETRXF_extra_info) {
1306208901Sken		err = xennet_get_extras(np, extras, rp, cons);
1307181643Skmacy	}
1308181643Skmacy
1309181945Skmacy	if (m0 != NULL) {
1310208901Sken		m0->m_pkthdr.len = 0;
1311208901Sken		m0->m_next = NULL;
1312181945Skmacy	}
1313244991Smarius
1314181643Skmacy	for (;;) {
1315181643Skmacy		u_long mfn;
1316181643Skmacy
1317181945Skmacy#if 0
1318204158Skmacy		DPRINTK("rx->status=%hd rx->offset=%hu frags=%u\n",
1319181945Skmacy			rx->status, rx->offset, frags);
1320181945Skmacy#endif
1321255040Sgibbs		if (__predict_false(rx->status < 0 ||
1322181643Skmacy			rx->offset + rx->status > PAGE_SIZE)) {
1323208901Sken
1324181643Skmacy#if 0
1325181643Skmacy			if (net_ratelimit())
1326181643Skmacy				WPRINTK("rx->offset: %x, size: %u\n",
1327181643Skmacy					rx->offset, rx->status);
1328181643Skmacy#endif
1329181643Skmacy			xennet_move_rx_slot(np, m, ref);
1330208901Sken			if (m0 == m)
1331208901Sken				m0 = NULL;
1332208901Sken			m = NULL;
1333208901Sken			err = EINVAL;
1334208901Sken			goto next_skip_queue;
1335181643Skmacy		}
1336181945Skmacy
1337181643Skmacy		/*
1338181643Skmacy		 * This definitely indicates a bug, either in this driver or in
1339181643Skmacy		 * the backend driver. In future this should flag the bad
1340181643Skmacy		 * situation to the system controller to reboot the backed.
1341181643Skmacy		 */
1342214077Sgibbs		if (ref == GRANT_REF_INVALID) {
1343208901Sken
1344181643Skmacy#if 0
1345181643Skmacy			if (net_ratelimit())
1346181643Skmacy				WPRINTK("Bad rx response id %d.\n", rx->id);
1347181643Skmacy#endif
1348214077Sgibbs			printf("%s: Bad rx response id %d.\n", __func__,rx->id);
1349208901Sken			err = EINVAL;
1350181643Skmacy			goto next;
1351181643Skmacy		}
1352181643Skmacy
1353181643Skmacy		if (!np->copying_receiver) {
1354181643Skmacy			/* Memory pressure, insufficient buffer
1355181643Skmacy			 * headroom, ...
1356181643Skmacy			 */
1357181643Skmacy			if (!(mfn = gnttab_end_foreign_transfer_ref(ref))) {
1358208901Sken				WPRINTK("Unfulfilled rx req (id=%d, st=%d).\n",
1359208901Sken					rx->id, rx->status);
1360181643Skmacy				xennet_move_rx_slot(np, m, ref);
1361208901Sken				err = ENOMEM;
1362181643Skmacy				goto next;
1363181643Skmacy			}
1364181643Skmacy
1365181643Skmacy			if (!xen_feature( XENFEAT_auto_translated_physmap)) {
1366181643Skmacy				/* Remap the page. */
1367181643Skmacy				void *vaddr = mtod(m, void *);
1368181643Skmacy				uint32_t pfn;
1369181643Skmacy
1370181643Skmacy				mcl = np->rx_mcl + pages_flipped;
1371181643Skmacy				mmu = np->rx_mmu + pages_flipped;
1372181643Skmacy
1373181643Skmacy				MULTI_update_va_mapping(mcl, (u_long)vaddr,
1374181916Skmacy				    (((vm_paddr_t)mfn) << PAGE_SHIFT) | PG_RW |
1375181643Skmacy				    PG_V | PG_M | PG_A, 0);
1376186557Skmacy				pfn = (uintptr_t)m->m_ext.ext_arg1;
1377181643Skmacy				mmu->ptr = ((vm_paddr_t)mfn << PAGE_SHIFT) |
1378181643Skmacy				    MMU_MACHPHYS_UPDATE;
1379181643Skmacy				mmu->val = pfn;
1380181643Skmacy
1381181643Skmacy				set_phys_to_machine(pfn, mfn);
1382181643Skmacy			}
1383181643Skmacy			pages_flipped++;
1384181643Skmacy		} else {
1385183375Skmacy			ret = gnttab_end_foreign_access_ref(ref);
1386181643Skmacy			KASSERT(ret, ("ret != 0"));
1387181643Skmacy		}
1388181643Skmacy
1389181643Skmacy		gnttab_release_grant_reference(&np->gref_rx_head, ref);
1390181643Skmacy
1391181643Skmacynext:
1392192286Sadrian		if (m == NULL)
1393192286Sadrian			break;
1394192286Sadrian
1395192286Sadrian		m->m_len = rx->status;
1396192286Sadrian		m->m_data += rx->offset;
1397192286Sadrian		m0->m_pkthdr.len += rx->status;
1398181945Skmacy
1399208901Skennext_skip_queue:
1400181643Skmacy		if (!(rx->flags & NETRXF_more_data))
1401181643Skmacy			break;
1402181643Skmacy
1403208901Sken		if (*cons + frags == rp) {
1404181643Skmacy			if (net_ratelimit())
1405181643Skmacy				WPRINTK("Need more frags\n");
1406208901Sken			err = ENOENT;
1407208901Sken			printf("%s: cons %u frags %u rp %u, not enough frags\n",
1408208901Sken			       __func__, *cons, frags, rp);
1409214077Sgibbs			break;
1410181643Skmacy		}
1411208901Sken		/*
1412208901Sken		 * Note that m can be NULL, if rx->status < 0 or if
1413208901Sken		 * rx->offset + rx->status > PAGE_SIZE above.
1414208901Sken		 */
1415181945Skmacy		m_prev = m;
1416181945Skmacy
1417208901Sken		rx = RING_GET_RESPONSE(&np->rx, *cons + frags);
1418208901Sken		m = xennet_get_rx_mbuf(np, *cons + frags);
1419181945Skmacy
1420208901Sken		/*
1421208901Sken		 * m_prev == NULL can happen if rx->status < 0 or if
1422208901Sken		 * rx->offset + * rx->status > PAGE_SIZE above.
1423208901Sken		 */
1424208901Sken		if (m_prev != NULL)
1425208901Sken			m_prev->m_next = m;
1426208901Sken
1427208901Sken		/*
1428208901Sken		 * m0 can be NULL if rx->status < 0 or if * rx->offset +
1429208901Sken		 * rx->status > PAGE_SIZE above.
1430208901Sken		 */
1431208901Sken		if (m0 == NULL)
1432208901Sken			m0 = m;
1433181945Skmacy		m->m_next = NULL;
1434208901Sken		ref = xennet_get_rx_ref(np, *cons + frags);
1435208901Sken		ref_cons = *cons + frags;
1436181643Skmacy		frags++;
1437181643Skmacy	}
1438181945Skmacy	*list = m0;
1439208901Sken	*cons += frags;
1440181643Skmacy	*pages_flipped_p = pages_flipped;
1441181643Skmacy
1442218056Sgibbs	return (err);
1443181643Skmacy}
1444181643Skmacy
1445181643Skmacystatic void
1446181643Skmacyxn_tick_locked(struct netfront_info *sc)
1447181643Skmacy{
1448181643Skmacy	XN_RX_LOCK_ASSERT(sc);
1449181643Skmacy	callout_reset(&sc->xn_stat_ch, hz, xn_tick, sc);
1450181643Skmacy
1451181643Skmacy	/* XXX placeholder for printing debug information */
1452181643Skmacy}
1453181643Skmacy
1454181643Skmacystatic void
1455181643Skmacyxn_tick(void *xsc)
1456181643Skmacy{
1457181643Skmacy	struct netfront_info *sc;
1458181643Skmacy
1459181643Skmacy	sc = xsc;
1460181643Skmacy	XN_RX_LOCK(sc);
1461181643Skmacy	xn_tick_locked(sc);
1462181643Skmacy	XN_RX_UNLOCK(sc);
1463181643Skmacy}
1464208901Sken
1465208901Sken/**
1466208901Sken * \brief Count the number of fragments in an mbuf chain.
1467208901Sken *
1468208901Sken * Surprisingly, there isn't an M* macro for this.
1469208901Sken */
1470208901Skenstatic inline int
1471208901Skenxn_count_frags(struct mbuf *m)
1472181643Skmacy{
1473208901Sken	int nfrags;
1474208901Sken
1475208901Sken	for (nfrags = 0; m != NULL; m = m->m_next)
1476208901Sken		nfrags++;
1477208901Sken
1478208901Sken	return (nfrags);
1479208901Sken}
1480208901Sken
1481208901Sken/**
1482208901Sken * Given an mbuf chain, make sure we have enough room and then push
1483208901Sken * it onto the transmit ring.
1484208901Sken */
1485208901Skenstatic int
1486208901Skenxn_assemble_tx_request(struct netfront_info *sc, struct mbuf *m_head)
1487208901Sken{
1488208901Sken	struct ifnet *ifp;
1489208901Sken	struct mbuf *m;
1490208901Sken	u_int nfrags;
1491208901Sken	netif_extra_info_t *extra;
1492185605Skmacy	int otherend_id;
1493181643Skmacy
1494208901Sken	ifp = sc->xn_ifp;
1495181643Skmacy
1496208901Sken	/**
1497208901Sken	 * Defragment the mbuf if necessary.
1498208901Sken	 */
1499208901Sken	nfrags = xn_count_frags(m_head);
1500181643Skmacy
1501208901Sken	/*
1502208901Sken	 * Check to see whether this request is longer than netback
1503208901Sken	 * can handle, and try to defrag it.
1504208901Sken	 */
1505208901Sken	/**
1506208901Sken	 * It is a bit lame, but the netback driver in Linux can't
1507208901Sken	 * deal with nfrags > MAX_TX_REQ_FRAGS, which is a quirk of
1508208901Sken	 * the Linux network stack.
1509208901Sken	 */
1510225709Sgibbs	if (nfrags > sc->maxfrags) {
1511243857Sglebius		m = m_defrag(m_head, M_NOWAIT);
1512208901Sken		if (!m) {
1513208901Sken			/*
1514208901Sken			 * Defrag failed, so free the mbuf and
1515208901Sken			 * therefore drop the packet.
1516208901Sken			 */
1517208901Sken			m_freem(m_head);
1518208901Sken			return (EMSGSIZE);
1519189699Sdfr		}
1520208901Sken		m_head = m;
1521208901Sken	}
1522189699Sdfr
1523208901Sken	/* Determine how many fragments now exist */
1524208901Sken	nfrags = xn_count_frags(m_head);
1525192871Sadrian
1526208901Sken	/*
1527208901Sken	 * Check to see whether the defragmented packet has too many
1528208901Sken	 * segments for the Linux netback driver.
1529208901Sken	 */
1530208901Sken	/**
1531208901Sken	 * The FreeBSD TCP stack, with TSO enabled, can produce a chain
1532208901Sken	 * of mbufs longer than Linux can handle.  Make sure we don't
1533208901Sken	 * pass a too-long chain over to the other side by dropping the
1534208901Sken	 * packet.  It doesn't look like there is currently a way to
1535208901Sken	 * tell the TCP stack to generate a shorter chain of packets.
1536208901Sken	 */
1537208901Sken	if (nfrags > MAX_TX_REQ_FRAGS) {
1538214077Sgibbs#ifdef DEBUG
1539214077Sgibbs		printf("%s: nfrags %d > MAX_TX_REQ_FRAGS %d, netback "
1540214077Sgibbs		       "won't be able to handle it, dropping\n",
1541214077Sgibbs		       __func__, nfrags, MAX_TX_REQ_FRAGS);
1542214077Sgibbs#endif
1543208901Sken		m_freem(m_head);
1544208901Sken		return (EMSGSIZE);
1545208901Sken	}
1546192871Sadrian
1547208901Sken	/*
1548208901Sken	 * This check should be redundant.  We've already verified that we
1549208901Sken	 * have enough slots in the ring to handle a packet of maximum
1550208901Sken	 * size, and that our packet is less than the maximum size.  Keep
1551208901Sken	 * it in here as an assert for now just to make certain that
1552208901Sken	 * xn_tx_chain_cnt is accurate.
1553208901Sken	 */
1554208901Sken	KASSERT((sc->xn_cdata.xn_tx_chain_cnt + nfrags) <= NET_TX_RING_SIZE,
1555208901Sken		("%s: xn_tx_chain_cnt (%d) + nfrags (%d) > NET_TX_RING_SIZE "
1556208901Sken		 "(%d)!", __func__, (int) sc->xn_cdata.xn_tx_chain_cnt,
1557208901Sken                    (int) nfrags, (int) NET_TX_RING_SIZE));
1558192871Sadrian
1559208901Sken	/*
1560208901Sken	 * Start packing the mbufs in this chain into
1561208901Sken	 * the fragment pointers. Stop when we run out
1562208901Sken	 * of fragments or hit the end of the mbuf chain.
1563208901Sken	 */
1564208901Sken	m = m_head;
1565208901Sken	extra = NULL;
1566208901Sken	otherend_id = xenbus_get_otherend_id(sc->xbdev);
1567208901Sken	for (m = m_head; m; m = m->m_next) {
1568208901Sken		netif_tx_request_t *tx;
1569208901Sken		uintptr_t id;
1570208901Sken		grant_ref_t ref;
1571208901Sken		u_long mfn; /* XXX Wrong type? */
1572192871Sadrian
1573208901Sken		tx = RING_GET_REQUEST(&sc->tx, sc->tx.req_prod_pvt);
1574208901Sken		id = get_id_from_freelist(sc->tx_mbufs);
1575208901Sken		if (id == 0)
1576244991Smarius			panic("%s: was allocated the freelist head!\n",
1577244991Smarius			    __func__);
1578208901Sken		sc->xn_cdata.xn_tx_chain_cnt++;
1579208901Sken		if (sc->xn_cdata.xn_tx_chain_cnt > NET_TX_RING_SIZE)
1580244991Smarius			panic("%s: tx_chain_cnt must be <= NET_TX_RING_SIZE\n",
1581244991Smarius			    __func__);
1582208901Sken		sc->tx_mbufs[id] = m;
1583208901Sken		tx->id = id;
1584208901Sken		ref = gnttab_claim_grant_reference(&sc->gref_tx_head);
1585208901Sken		KASSERT((short)ref >= 0, ("Negative ref"));
1586208901Sken		mfn = virt_to_mfn(mtod(m, vm_offset_t));
1587208901Sken		gnttab_grant_foreign_access_ref(ref, otherend_id,
1588208901Sken		    mfn, GNTMAP_readonly);
1589208901Sken		tx->gref = sc->grant_tx_ref[id] = ref;
1590208901Sken		tx->offset = mtod(m, vm_offset_t) & (PAGE_SIZE - 1);
1591208901Sken		tx->flags = 0;
1592208901Sken		if (m == m_head) {
1593208901Sken			/*
1594208901Sken			 * The first fragment has the entire packet
1595208901Sken			 * size, subsequent fragments have just the
1596208901Sken			 * fragment size. The backend works out the
1597208901Sken			 * true size of the first fragment by
1598208901Sken			 * subtracting the sizes of the other
1599208901Sken			 * fragments.
1600208901Sken			 */
1601208901Sken			tx->size = m->m_pkthdr.len;
1602189699Sdfr
1603208901Sken			/*
1604208901Sken			 * The first fragment contains the checksum flags
1605208901Sken			 * and is optionally followed by extra data for
1606208901Sken			 * TSO etc.
1607208901Sken			 */
1608208901Sken			/**
1609208901Sken			 * CSUM_TSO requires checksum offloading.
1610208901Sken			 * Some versions of FreeBSD fail to
1611208901Sken			 * set CSUM_TCP in the CSUM_TSO case,
1612208901Sken			 * so we have to test for CSUM_TSO
1613208901Sken			 * explicitly.
1614208901Sken			 */
1615208901Sken			if (m->m_pkthdr.csum_flags
1616208901Sken			    & (CSUM_DELAY_DATA | CSUM_TSO)) {
1617208901Sken				tx->flags |= (NETTXF_csum_blank
1618208901Sken				    | NETTXF_data_validated);
1619208901Sken			}
1620189699Sdfr#if __FreeBSD_version >= 700000
1621208901Sken			if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1622208901Sken				struct netif_extra_info *gso =
1623208901Sken					(struct netif_extra_info *)
1624208901Sken					RING_GET_REQUEST(&sc->tx,
1625208901Sken							 ++sc->tx.req_prod_pvt);
1626189699Sdfr
1627208901Sken				tx->flags |= NETTXF_extra_info;
1628189699Sdfr
1629208901Sken				gso->u.gso.size = m->m_pkthdr.tso_segsz;
1630208901Sken				gso->u.gso.type =
1631208901Sken					XEN_NETIF_GSO_TYPE_TCPV4;
1632208901Sken				gso->u.gso.pad = 0;
1633208901Sken				gso->u.gso.features = 0;
1634189699Sdfr
1635208901Sken				gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
1636208901Sken				gso->flags = 0;
1637208901Sken			}
1638181643Skmacy#endif
1639208901Sken		} else {
1640208901Sken			tx->size = m->m_len;
1641189699Sdfr		}
1642208901Sken		if (m->m_next)
1643208901Sken			tx->flags |= NETTXF_more_data;
1644181643Skmacy
1645208901Sken		sc->tx.req_prod_pvt++;
1646208901Sken	}
1647208901Sken	BPF_MTAP(ifp, m_head);
1648181643Skmacy
1649208901Sken	sc->stats.tx_bytes += m_head->m_pkthdr.len;
1650208901Sken	sc->stats.tx_packets++;
1651208901Sken
1652208901Sken	return (0);
1653208901Sken}
1654208901Sken
1655208901Skenstatic void
1656208901Skenxn_start_locked(struct ifnet *ifp)
1657208901Sken{
1658208901Sken	struct netfront_info *sc;
1659208901Sken	struct mbuf *m_head;
1660208901Sken	int notify;
1661208901Sken
1662208901Sken	sc = ifp->if_softc;
1663208901Sken
1664208901Sken	if (!netfront_carrier_ok(sc))
1665208901Sken		return;
1666208901Sken
1667208901Sken	/*
1668208901Sken	 * While we have enough transmit slots available for at least one
1669208901Sken	 * maximum-sized packet, pull mbufs off the queue and put them on
1670208901Sken	 * the transmit ring.
1671208901Sken	 */
1672208901Sken	while (xn_tx_slot_available(sc)) {
1673208901Sken		IF_DEQUEUE(&ifp->if_snd, m_head);
1674208901Sken		if (m_head == NULL)
1675208901Sken			break;
1676208901Sken
1677208901Sken		if (xn_assemble_tx_request(sc, m_head) != 0)
1678208901Sken			break;
1679181643Skmacy	}
1680181643Skmacy
1681181643Skmacy	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->tx, notify);
1682181643Skmacy	if (notify)
1683255040Sgibbs		xen_intr_signal(sc->xen_intr_handle);
1684181643Skmacy
1685181643Skmacy	if (RING_FULL(&sc->tx)) {
1686181643Skmacy		sc->tx_full = 1;
1687181643Skmacy#if 0
1688181643Skmacy		netif_stop_queue(dev);
1689181643Skmacy#endif
1690181643Skmacy	}
1691208901Sken}
1692181643Skmacy
1693181643Skmacystatic void
1694181643Skmacyxn_start(struct ifnet *ifp)
1695181643Skmacy{
1696181643Skmacy	struct netfront_info *sc;
1697181643Skmacy	sc = ifp->if_softc;
1698181643Skmacy	XN_TX_LOCK(sc);
1699181643Skmacy	xn_start_locked(ifp);
1700181643Skmacy	XN_TX_UNLOCK(sc);
1701181643Skmacy}
1702181643Skmacy
1703181643Skmacy/* equivalent of network_open() in Linux */
1704181643Skmacystatic void
1705181643Skmacyxn_ifinit_locked(struct netfront_info *sc)
1706181643Skmacy{
1707181643Skmacy	struct ifnet *ifp;
1708181643Skmacy
1709181643Skmacy	XN_LOCK_ASSERT(sc);
1710181643Skmacy
1711181643Skmacy	ifp = sc->xn_ifp;
1712181643Skmacy
1713181643Skmacy	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1714181643Skmacy		return;
1715181643Skmacy
1716181643Skmacy	xn_stop(sc);
1717181643Skmacy
1718181643Skmacy	network_alloc_rx_buffers(sc);
1719181643Skmacy	sc->rx.sring->rsp_event = sc->rx.rsp_cons + 1;
1720181643Skmacy
1721181643Skmacy	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1722181643Skmacy	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1723199997Sgibbs	if_link_state_change(ifp, LINK_STATE_UP);
1724181643Skmacy
1725181643Skmacy	callout_reset(&sc->xn_stat_ch, hz, xn_tick, sc);
1726181643Skmacy}
1727181643Skmacy
1728181643Skmacystatic void
1729181643Skmacyxn_ifinit(void *xsc)
1730181643Skmacy{
1731181643Skmacy	struct netfront_info *sc = xsc;
1732181643Skmacy
1733181643Skmacy	XN_LOCK(sc);
1734181643Skmacy	xn_ifinit_locked(sc);
1735181643Skmacy	XN_UNLOCK(sc);
1736181643Skmacy}
1737181643Skmacy
1738181643Skmacystatic int
1739181643Skmacyxn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1740181643Skmacy{
1741181643Skmacy	struct netfront_info *sc = ifp->if_softc;
1742181643Skmacy	struct ifreq *ifr = (struct ifreq *) data;
1743221130Sbz#ifdef INET
1744181643Skmacy	struct ifaddr *ifa = (struct ifaddr *)data;
1745221130Sbz#endif
1746181643Skmacy
1747181643Skmacy	int mask, error = 0;
1748181643Skmacy	switch(cmd) {
1749181643Skmacy	case SIOCSIFADDR:
1750181643Skmacy	case SIOCGIFADDR:
1751221130Sbz#ifdef INET
1752181643Skmacy		XN_LOCK(sc);
1753181643Skmacy		if (ifa->ifa_addr->sa_family == AF_INET) {
1754181643Skmacy			ifp->if_flags |= IFF_UP;
1755181643Skmacy			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1756181643Skmacy				xn_ifinit_locked(sc);
1757181643Skmacy			arp_ifinit(ifp, ifa);
1758189699Sdfr			XN_UNLOCK(sc);
1759185473Sdfr		} else {
1760189699Sdfr			XN_UNLOCK(sc);
1761221130Sbz#endif
1762181643Skmacy			error = ether_ioctl(ifp, cmd, data);
1763221130Sbz#ifdef INET
1764185473Sdfr		}
1765221130Sbz#endif
1766181643Skmacy		break;
1767181643Skmacy	case SIOCSIFMTU:
1768181643Skmacy		/* XXX can we alter the MTU on a VN ?*/
1769181643Skmacy#ifdef notyet
1770181643Skmacy		if (ifr->ifr_mtu > XN_JUMBO_MTU)
1771181643Skmacy			error = EINVAL;
1772181643Skmacy		else
1773181643Skmacy#endif
1774181643Skmacy		{
1775181643Skmacy			ifp->if_mtu = ifr->ifr_mtu;
1776181643Skmacy			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1777181643Skmacy			xn_ifinit(sc);
1778181643Skmacy		}
1779181643Skmacy		break;
1780181643Skmacy	case SIOCSIFFLAGS:
1781181643Skmacy		XN_LOCK(sc);
1782181643Skmacy		if (ifp->if_flags & IFF_UP) {
1783181643Skmacy			/*
1784181643Skmacy			 * If only the state of the PROMISC flag changed,
1785181643Skmacy			 * then just use the 'set promisc mode' command
1786181643Skmacy			 * instead of reinitializing the entire NIC. Doing
1787181643Skmacy			 * a full re-init means reloading the firmware and
1788181643Skmacy			 * waiting for it to start up, which may take a
1789181643Skmacy			 * second or two.
1790181643Skmacy			 */
1791181643Skmacy#ifdef notyet
1792181643Skmacy			/* No promiscuous mode with Xen */
1793181643Skmacy			if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1794181643Skmacy			    ifp->if_flags & IFF_PROMISC &&
1795181643Skmacy			    !(sc->xn_if_flags & IFF_PROMISC)) {
1796181643Skmacy				XN_SETBIT(sc, XN_RX_MODE,
1797181643Skmacy					  XN_RXMODE_RX_PROMISC);
1798181643Skmacy			} else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1799181643Skmacy				   !(ifp->if_flags & IFF_PROMISC) &&
1800181643Skmacy				   sc->xn_if_flags & IFF_PROMISC) {
1801181643Skmacy				XN_CLRBIT(sc, XN_RX_MODE,
1802181643Skmacy					  XN_RXMODE_RX_PROMISC);
1803181643Skmacy			} else
1804181643Skmacy#endif
1805181643Skmacy				xn_ifinit_locked(sc);
1806181643Skmacy		} else {
1807181643Skmacy			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1808181643Skmacy				xn_stop(sc);
1809181643Skmacy			}
1810181643Skmacy		}
1811181643Skmacy		sc->xn_if_flags = ifp->if_flags;
1812181643Skmacy		XN_UNLOCK(sc);
1813181643Skmacy		error = 0;
1814181643Skmacy		break;
1815181643Skmacy	case SIOCSIFCAP:
1816181643Skmacy		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1817189699Sdfr		if (mask & IFCAP_TXCSUM) {
1818189699Sdfr			if (IFCAP_TXCSUM & ifp->if_capenable) {
1819189699Sdfr				ifp->if_capenable &= ~(IFCAP_TXCSUM|IFCAP_TSO4);
1820189699Sdfr				ifp->if_hwassist &= ~(CSUM_TCP | CSUM_UDP
1821189699Sdfr				    | CSUM_IP | CSUM_TSO);
1822189699Sdfr			} else {
1823189699Sdfr				ifp->if_capenable |= IFCAP_TXCSUM;
1824189699Sdfr				ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP
1825189699Sdfr				    | CSUM_IP);
1826189699Sdfr			}
1827181643Skmacy		}
1828189699Sdfr		if (mask & IFCAP_RXCSUM) {
1829189699Sdfr			ifp->if_capenable ^= IFCAP_RXCSUM;
1830189699Sdfr		}
1831189699Sdfr#if __FreeBSD_version >= 700000
1832189699Sdfr		if (mask & IFCAP_TSO4) {
1833189699Sdfr			if (IFCAP_TSO4 & ifp->if_capenable) {
1834189699Sdfr				ifp->if_capenable &= ~IFCAP_TSO4;
1835189699Sdfr				ifp->if_hwassist &= ~CSUM_TSO;
1836189699Sdfr			} else if (IFCAP_TXCSUM & ifp->if_capenable) {
1837189699Sdfr				ifp->if_capenable |= IFCAP_TSO4;
1838189699Sdfr				ifp->if_hwassist |= CSUM_TSO;
1839189699Sdfr			} else {
1840192927Sadrian				IPRINTK("Xen requires tx checksum offload"
1841189699Sdfr				    " be enabled to use TSO\n");
1842189699Sdfr				error = EINVAL;
1843189699Sdfr			}
1844189699Sdfr		}
1845189699Sdfr		if (mask & IFCAP_LRO) {
1846189699Sdfr			ifp->if_capenable ^= IFCAP_LRO;
1847189699Sdfr
1848189699Sdfr		}
1849189699Sdfr#endif
1850181643Skmacy		error = 0;
1851181643Skmacy		break;
1852181643Skmacy	case SIOCADDMULTI:
1853181643Skmacy	case SIOCDELMULTI:
1854181643Skmacy#ifdef notyet
1855181643Skmacy		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1856181643Skmacy			XN_LOCK(sc);
1857181643Skmacy			xn_setmulti(sc);
1858181643Skmacy			XN_UNLOCK(sc);
1859181643Skmacy			error = 0;
1860181643Skmacy		}
1861181643Skmacy#endif
1862181643Skmacy		/* FALLTHROUGH */
1863181643Skmacy	case SIOCSIFMEDIA:
1864181643Skmacy	case SIOCGIFMEDIA:
1865199997Sgibbs		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1866181643Skmacy		break;
1867181643Skmacy	default:
1868181643Skmacy		error = ether_ioctl(ifp, cmd, data);
1869181643Skmacy	}
1870181643Skmacy
1871181643Skmacy	return (error);
1872181643Skmacy}
1873181643Skmacy
1874181643Skmacystatic void
1875181643Skmacyxn_stop(struct netfront_info *sc)
1876181643Skmacy{
1877181643Skmacy	struct ifnet *ifp;
1878181643Skmacy
1879181643Skmacy	XN_LOCK_ASSERT(sc);
1880181643Skmacy
1881181643Skmacy	ifp = sc->xn_ifp;
1882181643Skmacy
1883181643Skmacy	callout_stop(&sc->xn_stat_ch);
1884181643Skmacy
1885181643Skmacy	xn_free_rx_ring(sc);
1886181643Skmacy	xn_free_tx_ring(sc);
1887181643Skmacy
1888181643Skmacy	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1889199997Sgibbs	if_link_state_change(ifp, LINK_STATE_DOWN);
1890181643Skmacy}
1891181643Skmacy
1892181643Skmacy/* START of Xenolinux helper functions adapted to FreeBSD */
1893185605Skmacyint
1894185605Skmacynetwork_connect(struct netfront_info *np)
1895181643Skmacy{
1896186557Skmacy	int i, requeue_idx, error;
1897181643Skmacy	grant_ref_t ref;
1898181643Skmacy	netif_rx_request_t *req;
1899181643Skmacy	u_int feature_rx_copy, feature_rx_flip;
1900181643Skmacy
1901214077Sgibbs	error = xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev),
1902186557Skmacy	    "feature-rx-copy", NULL, "%u", &feature_rx_copy);
1903186557Skmacy	if (error)
1904181643Skmacy		feature_rx_copy = 0;
1905214077Sgibbs	error = xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev),
1906186557Skmacy	    "feature-rx-flip", NULL, "%u", &feature_rx_flip);
1907186557Skmacy	if (error)
1908181643Skmacy		feature_rx_flip = 1;
1909181643Skmacy
1910181643Skmacy	/*
1911181643Skmacy	 * Copy packets on receive path if:
1912181643Skmacy	 *  (a) This was requested by user, and the backend supports it; or
1913181643Skmacy	 *  (b) Flipping was requested, but this is unsupported by the backend.
1914181643Skmacy	 */
1915181643Skmacy	np->copying_receiver = ((MODPARM_rx_copy && feature_rx_copy) ||
1916181643Skmacy				(MODPARM_rx_flip && !feature_rx_flip));
1917181643Skmacy
1918181643Skmacy	/* Recovery procedure: */
1919186557Skmacy	error = talk_to_backend(np->xbdev, np);
1920186557Skmacy	if (error)
1921186557Skmacy		return (error);
1922181643Skmacy
1923181643Skmacy	/* Step 1: Reinitialise variables. */
1924225709Sgibbs	xn_query_features(np);
1925225709Sgibbs	xn_configure_features(np);
1926181643Skmacy	netif_release_tx_bufs(np);
1927181643Skmacy
1928181643Skmacy	/* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
1929181643Skmacy	for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
1930181643Skmacy		struct mbuf *m;
1931186557Skmacy		u_long pfn;
1932181643Skmacy
1933181643Skmacy		if (np->rx_mbufs[i] == NULL)
1934181643Skmacy			continue;
1935181643Skmacy
1936181643Skmacy		m = np->rx_mbufs[requeue_idx] = xennet_get_rx_mbuf(np, i);
1937181643Skmacy		ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i);
1938208901Sken
1939181643Skmacy		req = RING_GET_REQUEST(&np->rx, requeue_idx);
1940186557Skmacy		pfn = vtophys(mtod(m, vm_offset_t)) >> PAGE_SHIFT;
1941181643Skmacy
1942181643Skmacy		if (!np->copying_receiver) {
1943181643Skmacy			gnttab_grant_foreign_transfer_ref(ref,
1944185605Skmacy			    xenbus_get_otherend_id(np->xbdev),
1945186557Skmacy			    pfn);
1946181643Skmacy		} else {
1947181643Skmacy			gnttab_grant_foreign_access_ref(ref,
1948185605Skmacy			    xenbus_get_otherend_id(np->xbdev),
1949186557Skmacy			    PFNTOMFN(pfn), 0);
1950181643Skmacy		}
1951181643Skmacy		req->gref = ref;
1952181643Skmacy		req->id   = requeue_idx;
1953181643Skmacy
1954181643Skmacy		requeue_idx++;
1955181643Skmacy	}
1956181643Skmacy
1957181643Skmacy	np->rx.req_prod_pvt = requeue_idx;
1958181643Skmacy
1959181643Skmacy	/* Step 3: All public and private state should now be sane.  Get
1960181643Skmacy	 * ready to start sending and receiving packets and give the driver
1961181643Skmacy	 * domain a kick because we've probably just requeued some
1962181643Skmacy	 * packets.
1963181643Skmacy	 */
1964181643Skmacy	netfront_carrier_on(np);
1965255040Sgibbs	xen_intr_signal(np->xen_intr_handle);
1966181643Skmacy	XN_TX_LOCK(np);
1967181643Skmacy	xn_txeof(np);
1968181643Skmacy	XN_TX_UNLOCK(np);
1969181643Skmacy	network_alloc_rx_buffers(np);
1970181643Skmacy
1971181643Skmacy	return (0);
1972181643Skmacy}
1973181643Skmacy
1974225709Sgibbsstatic void
1975225709Sgibbsxn_query_features(struct netfront_info *np)
1976225709Sgibbs{
1977225709Sgibbs	int val;
1978225709Sgibbs
1979225709Sgibbs	device_printf(np->xbdev, "backend features:");
1980225709Sgibbs
1981225709Sgibbs	if (xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev),
1982225709Sgibbs		"feature-sg", NULL, "%d", &val) < 0)
1983225709Sgibbs		val = 0;
1984225709Sgibbs
1985225709Sgibbs	np->maxfrags = 1;
1986225709Sgibbs	if (val) {
1987225709Sgibbs		np->maxfrags = MAX_TX_REQ_FRAGS;
1988225709Sgibbs		printf(" feature-sg");
1989225709Sgibbs	}
1990225709Sgibbs
1991225709Sgibbs	if (xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev),
1992225709Sgibbs		"feature-gso-tcpv4", NULL, "%d", &val) < 0)
1993225709Sgibbs		val = 0;
1994225709Sgibbs
1995225709Sgibbs	np->xn_ifp->if_capabilities &= ~(IFCAP_TSO4|IFCAP_LRO);
1996225709Sgibbs	if (val) {
1997225709Sgibbs		np->xn_ifp->if_capabilities |= IFCAP_TSO4|IFCAP_LRO;
1998225709Sgibbs		printf(" feature-gso-tcp4");
1999225709Sgibbs	}
2000225709Sgibbs
2001225709Sgibbs	printf("\n");
2002225709Sgibbs}
2003225709Sgibbs
2004225707Sgibbsstatic int
2005225709Sgibbsxn_configure_features(struct netfront_info *np)
2006225707Sgibbs{
2007285737Sroyger	int err, cap_enabled;
2008225707Sgibbs
2009225707Sgibbs	err = 0;
2010285737Sroyger
2011285737Sroyger	if (np->xn_resume &&
2012285737Sroyger	    ((np->xn_ifp->if_capenable & np->xn_ifp->if_capabilities)
2013285737Sroyger	    == np->xn_ifp->if_capenable)) {
2014285737Sroyger		/* Current options are available, no need to do anything. */
2015285737Sroyger		return (0);
2016285737Sroyger	}
2017285737Sroyger
2018285737Sroyger	/* Try to preserve as many options as possible. */
2019285737Sroyger	if (np->xn_resume)
2020285737Sroyger		cap_enabled = np->xn_ifp->if_capenable;
2021285737Sroyger	else
2022285737Sroyger		cap_enabled = UINT_MAX;
2023285737Sroyger
2024259541Sglebius#if __FreeBSD_version >= 700000 && (defined(INET) || defined(INET6))
2025285737Sroyger	if ((np->xn_ifp->if_capenable & IFCAP_LRO) == (cap_enabled & IFCAP_LRO))
2026225707Sgibbs		tcp_lro_free(&np->xn_lro);
2027225709Sgibbs#endif
2028225709Sgibbs    	np->xn_ifp->if_capenable =
2029285737Sroyger	    np->xn_ifp->if_capabilities & ~(IFCAP_LRO|IFCAP_TSO4) & cap_enabled;
2030225709Sgibbs	np->xn_ifp->if_hwassist &= ~CSUM_TSO;
2031259541Sglebius#if __FreeBSD_version >= 700000 && (defined(INET) || defined(INET6))
2032285737Sroyger	if (xn_enable_lro && (np->xn_ifp->if_capabilities & IFCAP_LRO) ==
2033285737Sroyger	    (cap_enabled & IFCAP_LRO)) {
2034225707Sgibbs		err = tcp_lro_init(&np->xn_lro);
2035225707Sgibbs		if (err) {
2036225707Sgibbs			device_printf(np->xbdev, "LRO initialization failed\n");
2037225707Sgibbs		} else {
2038225707Sgibbs			np->xn_lro.ifp = np->xn_ifp;
2039225709Sgibbs			np->xn_ifp->if_capenable |= IFCAP_LRO;
2040225707Sgibbs		}
2041225707Sgibbs	}
2042285737Sroyger	if ((np->xn_ifp->if_capabilities & IFCAP_TSO4) ==
2043285737Sroyger	    (cap_enabled & IFCAP_TSO4)) {
2044225709Sgibbs		np->xn_ifp->if_capenable |= IFCAP_TSO4;
2045225709Sgibbs		np->xn_ifp->if_hwassist |= CSUM_TSO;
2046225709Sgibbs	}
2047225707Sgibbs#endif
2048225707Sgibbs	return (err);
2049225707Sgibbs}
2050225707Sgibbs
2051255040Sgibbs/**
2052255040Sgibbs * Create a network device.
2053255040Sgibbs * @param dev  Newbus device representing this virtual NIC.
2054181643Skmacy */
2055185605Skmacyint
2056185605Skmacycreate_netdev(device_t dev)
2057181643Skmacy{
2058181643Skmacy	int i;
2059181643Skmacy	struct netfront_info *np;
2060181643Skmacy	int err;
2061181643Skmacy	struct ifnet *ifp;
2062181643Skmacy
2063185605Skmacy	np = device_get_softc(dev);
2064181643Skmacy
2065181643Skmacy	np->xbdev         = dev;
2066181643Skmacy
2067181643Skmacy	XN_LOCK_INIT(np, xennetif);
2068199997Sgibbs
2069199997Sgibbs	ifmedia_init(&np->sc_media, 0, xn_ifmedia_upd, xn_ifmedia_sts);
2070199997Sgibbs	ifmedia_add(&np->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
2071199997Sgibbs	ifmedia_set(&np->sc_media, IFM_ETHER|IFM_MANUAL);
2072199997Sgibbs
2073181643Skmacy	np->rx_target     = RX_MIN_TARGET;
2074181643Skmacy	np->rx_min_target = RX_MIN_TARGET;
2075181643Skmacy	np->rx_max_target = RX_MAX_TARGET;
2076225709Sgibbs
2077181643Skmacy	/* Initialise {tx,rx}_skbs to be a free chain containing every entry. */
2078181643Skmacy	for (i = 0; i <= NET_TX_RING_SIZE; i++) {
2079181643Skmacy		np->tx_mbufs[i] = (void *) ((u_long) i+1);
2080214077Sgibbs		np->grant_tx_ref[i] = GRANT_REF_INVALID;
2081181643Skmacy	}
2082208901Sken	np->tx_mbufs[NET_TX_RING_SIZE] = (void *)0;
2083208901Sken
2084181643Skmacy	for (i = 0; i <= NET_RX_RING_SIZE; i++) {
2085208901Sken
2086181643Skmacy		np->rx_mbufs[i] = NULL;
2087214077Sgibbs		np->grant_rx_ref[i] = GRANT_REF_INVALID;
2088181643Skmacy	}
2089181643Skmacy	/* A grant for every tx ring slot */
2090208901Sken	if (gnttab_alloc_grant_references(NET_TX_RING_SIZE,
2091208901Sken					  &np->gref_tx_head) != 0) {
2092204158Skmacy		IPRINTK("#### netfront can't alloc tx grant refs\n");
2093181643Skmacy		err = ENOMEM;
2094181643Skmacy		goto exit;
2095181643Skmacy	}
2096181643Skmacy	/* A grant for every rx ring slot */
2097181643Skmacy	if (gnttab_alloc_grant_references(RX_MAX_TARGET,
2098208901Sken					  &np->gref_rx_head) != 0) {
2099204158Skmacy		WPRINTK("#### netfront can't alloc rx grant refs\n");
2100181643Skmacy		gnttab_free_grant_references(np->gref_tx_head);
2101181643Skmacy		err = ENOMEM;
2102181643Skmacy		goto exit;
2103181643Skmacy	}
2104181643Skmacy
2105181643Skmacy	err = xen_net_read_mac(dev, np->mac);
2106225708Sgibbs	if (err)
2107181643Skmacy		goto out;
2108181643Skmacy
2109181643Skmacy	/* Set up ifnet structure */
2110185605Skmacy	ifp = np->xn_ifp = if_alloc(IFT_ETHER);
2111181643Skmacy    	ifp->if_softc = np;
2112185605Skmacy    	if_initname(ifp, "xn",  device_get_unit(dev));
2113186557Skmacy    	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2114181643Skmacy    	ifp->if_ioctl = xn_ioctl;
2115181643Skmacy    	ifp->if_output = ether_output;
2116181643Skmacy    	ifp->if_start = xn_start;
2117204158Skmacy#ifdef notyet
2118204158Skmacy    	ifp->if_watchdog = xn_watchdog;
2119204158Skmacy#endif
2120181643Skmacy    	ifp->if_init = xn_ifinit;
2121181643Skmacy    	ifp->if_snd.ifq_maxlen = NET_TX_RING_SIZE - 1;
2122181643Skmacy
2123181643Skmacy    	ifp->if_hwassist = XN_CSUM_FEATURES;
2124181643Skmacy    	ifp->if_capabilities = IFCAP_HWCSUM;
2125274043Shselasky	ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
2126274043Shselasky	ifp->if_hw_tsomaxsegcount = MAX_TX_REQ_FRAGS;
2127274043Shselasky	ifp->if_hw_tsomaxsegsize = PAGE_SIZE;
2128181643Skmacy
2129181643Skmacy    	ether_ifattach(ifp, np->mac);
2130181643Skmacy    	callout_init(&np->xn_stat_ch, CALLOUT_MPSAFE);
2131181643Skmacy	netfront_carrier_off(np);
2132181643Skmacy
2133181643Skmacy	return (0);
2134181643Skmacy
2135181643Skmacyexit:
2136181643Skmacy	gnttab_free_grant_references(np->gref_tx_head);
2137181643Skmacyout:
2138225708Sgibbs	return (err);
2139181643Skmacy}
2140181643Skmacy
2141181643Skmacy/**
2142181643Skmacy * Handle the change of state of the backend to Closing.  We must delete our
2143181643Skmacy * device-layer structures now, to ensure that writes are flushed through to
2144181643Skmacy * the backend.  Once is this done, we can switch to Closed in
2145181643Skmacy * acknowledgement.
2146181643Skmacy */
2147181643Skmacy#if 0
2148199997Sgibbsstatic void
2149199997Sgibbsnetfront_closing(device_t dev)
2150181643Skmacy{
2151181643Skmacy#if 0
2152181643Skmacy	struct netfront_info *info = dev->dev_driver_data;
2153181643Skmacy
2154181643Skmacy	DPRINTK("netfront_closing: %s removed\n", dev->nodename);
2155181643Skmacy
2156181643Skmacy	close_netdev(info);
2157181643Skmacy#endif
2158181643Skmacy	xenbus_switch_state(dev, XenbusStateClosed);
2159181643Skmacy}
2160181643Skmacy#endif
2161181643Skmacy
2162199997Sgibbsstatic int
2163199997Sgibbsnetfront_detach(device_t dev)
2164181643Skmacy{
2165185605Skmacy	struct netfront_info *info = device_get_softc(dev);
2166181643Skmacy
2167185605Skmacy	DPRINTK("%s\n", xenbus_get_node(dev));
2168181643Skmacy
2169181643Skmacy	netif_free(info);
2170181643Skmacy
2171181643Skmacy	return 0;
2172181643Skmacy}
2173181643Skmacy
2174199997Sgibbsstatic void
2175199997Sgibbsnetif_free(struct netfront_info *info)
2176181643Skmacy{
2177250913Sgibbs	XN_LOCK(info);
2178250913Sgibbs	xn_stop(info);
2179250913Sgibbs	XN_UNLOCK(info);
2180250913Sgibbs	callout_drain(&info->xn_stat_ch);
2181181643Skmacy	netif_disconnect_backend(info);
2182251176Sgibbs	if (info->xn_ifp != NULL) {
2183251176Sgibbs		ether_ifdetach(info->xn_ifp);
2184251176Sgibbs		if_free(info->xn_ifp);
2185251176Sgibbs		info->xn_ifp = NULL;
2186251176Sgibbs	}
2187251729Sgibbs	ifmedia_removeall(&info->sc_media);
2188181643Skmacy}
2189181643Skmacy
2190199997Sgibbsstatic void
2191199997Sgibbsnetif_disconnect_backend(struct netfront_info *info)
2192181643Skmacy{
2193186557Skmacy	XN_RX_LOCK(info);
2194186557Skmacy	XN_TX_LOCK(info);
2195186557Skmacy	netfront_carrier_off(info);
2196186557Skmacy	XN_TX_UNLOCK(info);
2197186557Skmacy	XN_RX_UNLOCK(info);
2198186557Skmacy
2199225707Sgibbs	free_ring(&info->tx_ring_ref, &info->tx.sring);
2200225707Sgibbs	free_ring(&info->rx_ring_ref, &info->rx.sring);
2201181643Skmacy
2202255040Sgibbs	xen_intr_unbind(&info->xen_intr_handle);
2203181643Skmacy}
2204181643Skmacy
2205199997Sgibbsstatic void
2206225707Sgibbsfree_ring(int *ref, void *ring_ptr_ref)
2207181643Skmacy{
2208225707Sgibbs	void **ring_ptr_ptr = ring_ptr_ref;
2209225707Sgibbs
2210225707Sgibbs	if (*ref != GRANT_REF_INVALID) {
2211225707Sgibbs		/* This API frees the associated storage. */
2212225707Sgibbs		gnttab_end_foreign_access(*ref, *ring_ptr_ptr);
2213225707Sgibbs		*ref = GRANT_REF_INVALID;
2214225707Sgibbs	}
2215225707Sgibbs	*ring_ptr_ptr = NULL;
2216181643Skmacy}
2217181643Skmacy
2218199997Sgibbsstatic int
2219199997Sgibbsxn_ifmedia_upd(struct ifnet *ifp)
2220199997Sgibbs{
2221199997Sgibbs	return (0);
2222199997Sgibbs}
2223199997Sgibbs
2224199997Sgibbsstatic void
2225199997Sgibbsxn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2226199997Sgibbs{
2227199997Sgibbs	ifmr->ifm_status = IFM_AVALID|IFM_ACTIVE;
2228199997Sgibbs	ifmr->ifm_active = IFM_ETHER|IFM_MANUAL;
2229199997Sgibbs}
2230199997Sgibbs
2231181643Skmacy/* ** Driver registration ** */
2232185605Skmacystatic device_method_t netfront_methods[] = {
2233185605Skmacy	/* Device interface */
2234185605Skmacy	DEVMETHOD(device_probe,         netfront_probe),
2235185605Skmacy	DEVMETHOD(device_attach,        netfront_attach),
2236185605Skmacy	DEVMETHOD(device_detach,        netfront_detach),
2237185605Skmacy	DEVMETHOD(device_shutdown,      bus_generic_shutdown),
2238225707Sgibbs	DEVMETHOD(device_suspend,       netfront_suspend),
2239185605Skmacy	DEVMETHOD(device_resume,        netfront_resume),
2240185605Skmacy
2241185605Skmacy	/* Xenbus interface */
2242214077Sgibbs	DEVMETHOD(xenbus_otherend_changed, netfront_backend_changed),
2243181643Skmacy
2244244991Smarius	DEVMETHOD_END
2245185605Skmacy};
2246181643Skmacy
2247185605Skmacystatic driver_t netfront_driver = {
2248185605Skmacy	"xn",
2249185605Skmacy	netfront_methods,
2250185605Skmacy	sizeof(struct netfront_info),
2251185605Skmacy};
2252185605Skmacydevclass_t netfront_devclass;
2253185605Skmacy
2254244991SmariusDRIVER_MODULE(xe, xenbusb_front, netfront_driver, netfront_devclass, NULL,
2255244991Smarius    NULL);
2256