netback_unit_tests.c revision 240521
1156230Smux/*-
2156230Smux * Copyright (c) 2009-2011 Spectra Logic Corporation
3156230Smux * All rights reserved.
4156230Smux *
5156230Smux * Redistribution and use in source and binary forms, with or without
6156230Smux * modification, are permitted provided that the following conditions
7156230Smux * are met:
8156230Smux * 1. Redistributions of source code must retain the above copyright
9156230Smux *    notice, this list of conditions, and the following disclaimer,
10156230Smux *    without modification.
11156230Smux * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12156230Smux *    substantially similar to the "NO WARRANTY" disclaimer below
13156230Smux *    ("Disclaimer") and any redistribution must be conditioned upon
14156230Smux *    including a substantially similar Disclaimer requirement for further
15156230Smux *    binary redistribution.
16156230Smux *
17156230Smux * NO WARRANTY
18156230Smux * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19156230Smux * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20156230Smux * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
21156230Smux * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22156230Smux * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23156230Smux * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24156230Smux * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25156230Smux * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
26156230Smux * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
27156230Smux * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28156230Smux * POSSIBILITY OF SUCH DAMAGES.
29156230Smux *
30156230Smux * Authors: Justin T. Gibbs     (Spectra Logic Corporation)
31156230Smux *          Alan Somers         (Spectra Logic Corporation)
32156230Smux *          John Suykerbuyk     (Spectra Logic Corporation)
33156230Smux */
34156230Smux
35156230Smux#include <sys/cdefs.h>
36156230Smux__FBSDID("$FreeBSD: head/sys/dev/xen/netback/netback_unit_tests.c 240521 2012-09-14 22:00:03Z eadler $");
37156230Smux
38156230Smux/**
39156230Smux * \file netback_unit_tests.c
40156230Smux *
41156230Smux * \brief Unit tests for the Xen netback driver.
42156230Smux *
43156230Smux * Due to the driver's use of static functions, these tests cannot be compiled
44156230Smux * standalone; they must be #include'd from the driver's .c file.
45156230Smux */
46156230Smux
47156230Smux
48156230Smux/** Helper macro used to snprintf to a buffer and update the buffer pointer */
49156230Smux#define	SNCATF(buffer, buflen, ...) do {				\
50156230Smux	size_t new_chars = snprintf(buffer, buflen, __VA_ARGS__);	\
51156230Smux	buffer += new_chars;						\
52156230Smux	/* be careful; snprintf's return value can be  > buflen */	\
53156230Smux	buflen -= MIN(buflen, new_chars);				\
54156230Smux} while (0)
55156230Smux
56156230Smux/* STRINGIFY and TOSTRING are used only to help turn __LINE__ into a string */
57156230Smux#define	STRINGIFY(x) #x
58156230Smux#define	TOSTRING(x) STRINGIFY(x)
59156230Smux
60156230Smux/**
61156230Smux * Writes an error message to buffer if cond is false, and returns true
62156230Smux * iff the assertion failed.  Note the implied parameters buffer and
63156230Smux * buflen
64156230Smux */
65156230Smux#define	XNB_ASSERT(cond) ({						\
66156230Smux	int passed = (cond);						\
67156230Smux	char *_buffer = (buffer);					\
68156230Smux	size_t _buflen = (buflen);					\
69156230Smux	if (! passed) {							\
70156230Smux		strlcat(_buffer, __func__, _buflen);			\
71156230Smux		strlcat(_buffer, ":" TOSTRING(__LINE__) 		\
72156230Smux		  " Assertion Error: " #cond "\n", _buflen);		\
73156230Smux	}								\
74156230Smux ! passed; })
75156230Smux
76156230Smux
77156230Smux/**
78156230Smux * The signature used by all testcases.  If the test writes anything
79156230Smux * to buffer, then it will be considered a failure
80156230Smux * \param buffer	Return storage for error messages
81156230Smux * \param buflen	The space available in the buffer
82156230Smux */
83156230Smuxtypedef void testcase_t(char *buffer, size_t buflen);
84156230Smux
85156230Smux/**
86156230Smux * Signature used by setup functions
87156230Smux * \return nonzero on error
88156230Smux */
89156230Smuxtypedef int setup_t(void);
90156230Smux
91156230Smuxtypedef void teardown_t(void);
92156230Smux
93156230Smux/** A simple test fixture comprising setup, teardown, and test */
94156230Smuxstruct test_fixture {
95156230Smux	/** Will be run before the test to allocate and initialize variables */
96156230Smux	setup_t *setup;
97156230Smux
98156230Smux	/** Will be run if setup succeeds */
99156230Smux	testcase_t *test;
100156230Smux
101156230Smux	/** Cleans up test data whether or not the setup suceeded*/
102156230Smux	teardown_t *teardown;
103156230Smux};
104156230Smux
105156230Smuxtypedef struct test_fixture test_fixture_t;
106156230Smux
107156230Smuxstatic void	xnb_fill_eh_and_ip(struct mbuf *m, uint16_t ip_len,
108156230Smux				   uint16_t ip_id, uint16_t ip_p,
109156230Smux				   uint16_t ip_off, uint16_t ip_sum);
110156230Smuxstatic void	xnb_fill_tcp(struct mbuf *m);
111156230Smuxstatic int	xnb_get1pkt(struct xnb_pkt *pkt, size_t size, uint16_t flags);
112156230Smuxstatic int	xnb_unit_test_runner(test_fixture_t const tests[], int ntests,
113156230Smux				     char *buffer, size_t buflen);
114156230Smux
115156230Smuxstatic int __unused
116156230Smuxnull_setup(void) { return 0; }
117156230Smux
118156230Smuxstatic void __unused
119156230Smuxnull_teardown(void) { }
120156230Smux
121156230Smuxstatic setup_t setup_pvt_data;
122156230Smuxstatic teardown_t teardown_pvt_data;
123156230Smuxstatic testcase_t xnb_ring2pkt_emptyring;
124156230Smuxstatic testcase_t xnb_ring2pkt_1req;
125156230Smuxstatic testcase_t xnb_ring2pkt_2req;
126156230Smuxstatic testcase_t xnb_ring2pkt_3req;
127156230Smuxstatic testcase_t xnb_ring2pkt_extra;
128156230Smuxstatic testcase_t xnb_ring2pkt_partial;
129156230Smuxstatic testcase_t xnb_ring2pkt_wraps;
130156230Smuxstatic testcase_t xnb_txpkt2rsp_emptypkt;
131156230Smuxstatic testcase_t xnb_txpkt2rsp_1req;
132156230Smuxstatic testcase_t xnb_txpkt2rsp_extra;
133156230Smuxstatic testcase_t xnb_txpkt2rsp_long;
134156230Smuxstatic testcase_t xnb_txpkt2rsp_invalid;
135156230Smuxstatic testcase_t xnb_txpkt2rsp_error;
136156230Smuxstatic testcase_t xnb_txpkt2rsp_wraps;
137156230Smuxstatic testcase_t xnb_pkt2mbufc_empty;
138156230Smuxstatic testcase_t xnb_pkt2mbufc_short;
139156230Smuxstatic testcase_t xnb_pkt2mbufc_csum;
140156230Smuxstatic testcase_t xnb_pkt2mbufc_1cluster;
141156230Smuxstatic testcase_t xnb_pkt2mbufc_largecluster;
142156230Smuxstatic testcase_t xnb_pkt2mbufc_2cluster;
143156230Smuxstatic testcase_t xnb_txpkt2gnttab_empty;
144156230Smuxstatic testcase_t xnb_txpkt2gnttab_short;
145156230Smuxstatic testcase_t xnb_txpkt2gnttab_2req;
146156230Smuxstatic testcase_t xnb_txpkt2gnttab_2cluster;
147156230Smuxstatic testcase_t xnb_update_mbufc_short;
148156230Smuxstatic testcase_t xnb_update_mbufc_2req;
149156230Smuxstatic testcase_t xnb_update_mbufc_2cluster;
150156230Smuxstatic testcase_t xnb_mbufc2pkt_empty;
151156230Smuxstatic testcase_t xnb_mbufc2pkt_short;
152156230Smuxstatic testcase_t xnb_mbufc2pkt_1cluster;
153156230Smuxstatic testcase_t xnb_mbufc2pkt_2short;
154156230Smuxstatic testcase_t xnb_mbufc2pkt_long;
155156230Smuxstatic testcase_t xnb_mbufc2pkt_extra;
156156230Smuxstatic testcase_t xnb_mbufc2pkt_nospace;
157156230Smuxstatic testcase_t xnb_rxpkt2gnttab_empty;
158156230Smuxstatic testcase_t xnb_rxpkt2gnttab_short;
159156230Smuxstatic testcase_t xnb_rxpkt2gnttab_2req;
160156230Smuxstatic testcase_t xnb_rxpkt2rsp_empty;
161156230Smuxstatic testcase_t xnb_rxpkt2rsp_short;
162156230Smuxstatic testcase_t xnb_rxpkt2rsp_extra;
163156230Smuxstatic testcase_t xnb_rxpkt2rsp_2short;
164156230Smuxstatic testcase_t xnb_rxpkt2rsp_2slots;
165156230Smuxstatic testcase_t xnb_rxpkt2rsp_copyerror;
166156230Smux/* TODO: add test cases for xnb_add_mbuf_cksum for IPV6 tcp and udp */
167156230Smuxstatic testcase_t xnb_add_mbuf_cksum_arp;
168156230Smuxstatic testcase_t xnb_add_mbuf_cksum_tcp;
169156230Smuxstatic testcase_t xnb_add_mbuf_cksum_udp;
170156230Smuxstatic testcase_t xnb_add_mbuf_cksum_icmp;
171156230Smuxstatic testcase_t xnb_add_mbuf_cksum_tcp_swcksum;
172156230Smuxstatic testcase_t xnb_sscanf_llu;
173156230Smuxstatic testcase_t xnb_sscanf_lld;
174156230Smuxstatic testcase_t xnb_sscanf_hhu;
175156230Smuxstatic testcase_t xnb_sscanf_hhd;
176156230Smuxstatic testcase_t xnb_sscanf_hhn;
177156230Smux
178156230Smux/** Private data used by unit tests */
179156230Smuxstatic struct {
180156230Smux	gnttab_copy_table 	gnttab;
181156230Smux	netif_rx_back_ring_t	rxb;
182156230Smux	netif_rx_front_ring_t	rxf;
183156230Smux	netif_tx_back_ring_t	txb;
184156230Smux	netif_tx_front_ring_t	txf;
185156230Smux	struct ifnet*		ifp;
186156230Smux	netif_rx_sring_t*	rxs;
187156230Smux	netif_tx_sring_t*	txs;
188156230Smux} xnb_unit_pvt;
189156230Smux
190156230Smuxstatic inline void safe_m_freem(struct mbuf **ppMbuf) {
191156230Smux	if (*ppMbuf != NULL) {
192156230Smux		m_freem(*ppMbuf);
193156230Smux		*ppMbuf = NULL;
194156230Smux	}
195156230Smux}
196156230Smux
197156230Smux/**
198156230Smux * The unit test runner.  It will run every supplied test and return an
199156230Smux * output message as a string
200156230Smux * \param tests		An array of tests.  Every test will be attempted.
201156230Smux * \param ntests	The length of tests
202156230Smux * \param buffer	Return storage for the result string
203156230Smux * \param buflen	The length of buffer
204156230Smux * \return		The number of tests that failed
205156230Smux */
206156230Smuxstatic int
207156230Smuxxnb_unit_test_runner(test_fixture_t const tests[], int ntests, char *buffer,
208156230Smux    		     size_t buflen)
209156230Smux{
210156230Smux	int i;
211156230Smux	int n_passes;
212156230Smux	int n_failures = 0;
213156230Smux
214156230Smux	for (i = 0; i < ntests; i++) {
215156230Smux		int error = tests[i].setup();
216156230Smux		if (error != 0) {
217156230Smux			SNCATF(buffer, buflen,
218156230Smux			    "Setup failed for test idx %d\n", i);
219156230Smux			n_failures++;
220156230Smux		} else {
221156230Smux			size_t new_chars;
222156230Smux
223156230Smux			tests[i].test(buffer, buflen);
224156230Smux			new_chars = strnlen(buffer, buflen);
225156230Smux			buffer += new_chars;
226156230Smux			buflen -= new_chars;
227156230Smux
228156230Smux			if (new_chars > 0) {
229156230Smux				n_failures++;
230156230Smux			}
231156230Smux		}
232156230Smux		tests[i].teardown();
233156230Smux	}
234156230Smux
235156230Smux	n_passes = ntests - n_failures;
236156230Smux	if (n_passes > 0) {
237156230Smux		SNCATF(buffer, buflen, "%d Tests Passed\n", n_passes);
238156230Smux	}
239156230Smux	if (n_failures > 0) {
240156230Smux		SNCATF(buffer, buflen, "%d Tests FAILED\n", n_failures);
241156230Smux	}
242156230Smux
243156230Smux	return n_failures;
244156230Smux}
245156230Smux
246156230Smux/** Number of unit tests.  Must match the length of the tests array below */
247156230Smux#define	TOTAL_TESTS	(53)
248156230Smux/**
249156230Smux * Max memory available for returning results.  400 chars/test should give
250156230Smux * enough space for a five line error message for every test
251156230Smux */
252156230Smux#define	TOTAL_BUFLEN	(400 * TOTAL_TESTS + 2)
253156230Smux
254156230Smux/**
255156230Smux * Called from userspace by a sysctl.  Runs all internal unit tests, and
256156230Smux * returns the results to userspace as a string
257156230Smux * \param oidp	unused
258156230Smux * \param arg1	pointer to an xnb_softc for a specific xnb device
259156230Smux * \param arg2	unused
260156230Smux * \param req	sysctl access structure
261156230Smux * \return a string via the special SYSCTL_OUT macro.
262156230Smux */
263156230Smux
264156230Smuxstatic int
265156230Smuxxnb_unit_test_main(SYSCTL_HANDLER_ARGS) {
266156230Smux	test_fixture_t const tests[TOTAL_TESTS] = {
267156230Smux		{setup_pvt_data, xnb_ring2pkt_emptyring, teardown_pvt_data},
268156230Smux		{setup_pvt_data, xnb_ring2pkt_1req, teardown_pvt_data},
269156230Smux		{setup_pvt_data, xnb_ring2pkt_2req, teardown_pvt_data},
270156230Smux		{setup_pvt_data, xnb_ring2pkt_3req, teardown_pvt_data},
271156230Smux		{setup_pvt_data, xnb_ring2pkt_extra, teardown_pvt_data},
272156230Smux		{setup_pvt_data, xnb_ring2pkt_partial, teardown_pvt_data},
273156230Smux		{setup_pvt_data, xnb_ring2pkt_wraps, teardown_pvt_data},
274156230Smux		{setup_pvt_data, xnb_txpkt2rsp_emptypkt, teardown_pvt_data},
275156230Smux		{setup_pvt_data, xnb_txpkt2rsp_1req, teardown_pvt_data},
276156230Smux		{setup_pvt_data, xnb_txpkt2rsp_extra, teardown_pvt_data},
277156230Smux		{setup_pvt_data, xnb_txpkt2rsp_long, teardown_pvt_data},
278156230Smux		{setup_pvt_data, xnb_txpkt2rsp_invalid, teardown_pvt_data},
279156230Smux		{setup_pvt_data, xnb_txpkt2rsp_error, teardown_pvt_data},
280156230Smux		{setup_pvt_data, xnb_txpkt2rsp_wraps, teardown_pvt_data},
281156230Smux		{setup_pvt_data, xnb_pkt2mbufc_empty, teardown_pvt_data},
282156230Smux		{setup_pvt_data, xnb_pkt2mbufc_short, teardown_pvt_data},
283156230Smux		{setup_pvt_data, xnb_pkt2mbufc_csum, teardown_pvt_data},
284156230Smux		{setup_pvt_data, xnb_pkt2mbufc_1cluster, teardown_pvt_data},
285156230Smux		{setup_pvt_data, xnb_pkt2mbufc_largecluster, teardown_pvt_data},
286156230Smux		{setup_pvt_data, xnb_pkt2mbufc_2cluster, teardown_pvt_data},
287156230Smux		{setup_pvt_data, xnb_txpkt2gnttab_empty, teardown_pvt_data},
288156230Smux		{setup_pvt_data, xnb_txpkt2gnttab_short, teardown_pvt_data},
289156230Smux		{setup_pvt_data, xnb_txpkt2gnttab_2req, teardown_pvt_data},
290156230Smux		{setup_pvt_data, xnb_txpkt2gnttab_2cluster, teardown_pvt_data},
291156230Smux		{setup_pvt_data, xnb_update_mbufc_short, teardown_pvt_data},
292156230Smux		{setup_pvt_data, xnb_update_mbufc_2req, teardown_pvt_data},
293156230Smux		{setup_pvt_data, xnb_update_mbufc_2cluster, teardown_pvt_data},
294156230Smux		{setup_pvt_data, xnb_mbufc2pkt_empty, teardown_pvt_data},
295156230Smux		{setup_pvt_data, xnb_mbufc2pkt_short, teardown_pvt_data},
296156230Smux		{setup_pvt_data, xnb_mbufc2pkt_1cluster, teardown_pvt_data},
297156230Smux		{setup_pvt_data, xnb_mbufc2pkt_2short, teardown_pvt_data},
298156230Smux		{setup_pvt_data, xnb_mbufc2pkt_long, teardown_pvt_data},
299156230Smux		{setup_pvt_data, xnb_mbufc2pkt_extra, teardown_pvt_data},
300156230Smux		{setup_pvt_data, xnb_mbufc2pkt_nospace, teardown_pvt_data},
301156230Smux		{setup_pvt_data, xnb_rxpkt2gnttab_empty, teardown_pvt_data},
302156230Smux		{setup_pvt_data, xnb_rxpkt2gnttab_short, teardown_pvt_data},
303156230Smux		{setup_pvt_data, xnb_rxpkt2gnttab_2req, teardown_pvt_data},
304156230Smux		{setup_pvt_data, xnb_rxpkt2rsp_empty, teardown_pvt_data},
305156230Smux		{setup_pvt_data, xnb_rxpkt2rsp_short, teardown_pvt_data},
306156230Smux		{setup_pvt_data, xnb_rxpkt2rsp_extra, teardown_pvt_data},
307156230Smux		{setup_pvt_data, xnb_rxpkt2rsp_2short, teardown_pvt_data},
308156230Smux		{setup_pvt_data, xnb_rxpkt2rsp_2slots, teardown_pvt_data},
309156230Smux		{setup_pvt_data, xnb_rxpkt2rsp_copyerror, teardown_pvt_data},
310156230Smux		{null_setup, xnb_add_mbuf_cksum_arp, null_teardown},
311156230Smux		{null_setup, xnb_add_mbuf_cksum_icmp, null_teardown},
312156230Smux		{null_setup, xnb_add_mbuf_cksum_tcp, null_teardown},
313156230Smux		{null_setup, xnb_add_mbuf_cksum_tcp_swcksum, null_teardown},
314156230Smux		{null_setup, xnb_add_mbuf_cksum_udp, null_teardown},
315156230Smux		{null_setup, xnb_sscanf_hhd, null_teardown},
316156230Smux		{null_setup, xnb_sscanf_hhu, null_teardown},
317156230Smux		{null_setup, xnb_sscanf_lld, null_teardown},
318156230Smux		{null_setup, xnb_sscanf_llu, null_teardown},
319156230Smux		{null_setup, xnb_sscanf_hhn, null_teardown},
320156230Smux	};
321156230Smux	/**
322156230Smux	 * results is static so that the data will persist after this function
323156230Smux	 * returns.  The sysctl code expects us to return a constant string.
324156230Smux	 * \todo: the static variable is not thread safe.  Put a mutex around
325156230Smux	 * it.
326156230Smux	 */
327156230Smux	static char results[TOTAL_BUFLEN];
328156230Smux
329156230Smux	/* empty the result strings */
330156230Smux	results[0] = 0;
331156230Smux	xnb_unit_test_runner(tests, TOTAL_TESTS, results, TOTAL_BUFLEN);
332156230Smux
333156230Smux	return (SYSCTL_OUT(req, results, strnlen(results, TOTAL_BUFLEN)));
334156230Smux}
335156230Smux
336156230Smuxstatic int
337156230Smuxsetup_pvt_data(void)
338156230Smux{
339156230Smux	int error = 0;
340156230Smux
341156230Smux	bzero(xnb_unit_pvt.gnttab, sizeof(xnb_unit_pvt.gnttab));
342156230Smux
343156230Smux	xnb_unit_pvt.txs = malloc(PAGE_SIZE, M_XENNETBACK, M_WAITOK|M_ZERO);
344156230Smux	if (xnb_unit_pvt.txs != NULL) {
345156230Smux		SHARED_RING_INIT(xnb_unit_pvt.txs);
346156230Smux		BACK_RING_INIT(&xnb_unit_pvt.txb, xnb_unit_pvt.txs, PAGE_SIZE);
347156230Smux		FRONT_RING_INIT(&xnb_unit_pvt.txf, xnb_unit_pvt.txs, PAGE_SIZE);
348156230Smux	} else {
349156230Smux		error = 1;
350156230Smux	}
351156230Smux
352156230Smux	xnb_unit_pvt.ifp = if_alloc(IFT_ETHER);
353156230Smux	if (xnb_unit_pvt.ifp == NULL) {
354156230Smux		error = 1;
355156230Smux	}
356156230Smux
357156230Smux	xnb_unit_pvt.rxs = malloc(PAGE_SIZE, M_XENNETBACK, M_WAITOK|M_ZERO);
358156230Smux	if (xnb_unit_pvt.rxs != NULL) {
359156230Smux		SHARED_RING_INIT(xnb_unit_pvt.rxs);
360156230Smux		BACK_RING_INIT(&xnb_unit_pvt.rxb, xnb_unit_pvt.rxs, PAGE_SIZE);
361156230Smux		FRONT_RING_INIT(&xnb_unit_pvt.rxf, xnb_unit_pvt.rxs, PAGE_SIZE);
362156230Smux	} else {
363156230Smux		error = 1;
364156230Smux	}
365156230Smux
366156230Smux	return error;
367156230Smux}
368156230Smux
369156230Smuxstatic void
370156230Smuxteardown_pvt_data(void)
371156230Smux{
372156230Smux	if (xnb_unit_pvt.txs != NULL) {
373156230Smux		free(xnb_unit_pvt.txs, M_XENNETBACK);
374156230Smux	}
375156230Smux	if (xnb_unit_pvt.rxs != NULL) {
376156230Smux		free(xnb_unit_pvt.rxs, M_XENNETBACK);
377156230Smux	}
378156230Smux	if (xnb_unit_pvt.ifp != NULL) {
379156230Smux		if_free(xnb_unit_pvt.ifp);
380156230Smux	}
381156230Smux}
382156230Smux
383156230Smux/**
384156230Smux * Verify that xnb_ring2pkt will not consume any requests from an empty ring
385156230Smux */
386156230Smuxstatic void
387156230Smuxxnb_ring2pkt_emptyring(char *buffer, size_t buflen)
388156230Smux{
389156230Smux	struct xnb_pkt pkt;
390156230Smux	int num_consumed;
391156230Smux
392156230Smux	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
393156230Smux	                            xnb_unit_pvt.txb.req_cons);
394156230Smux	XNB_ASSERT(num_consumed == 0);
395156230Smux}
396156230Smux
397156230Smux/**
398156230Smux * Verify that xnb_ring2pkt can convert a single request packet correctly
399156230Smux */
400156230Smuxstatic void
401156230Smuxxnb_ring2pkt_1req(char *buffer, size_t buflen)
402156230Smux{
403156230Smux	struct xnb_pkt pkt;
404156230Smux	int num_consumed;
405156230Smux	struct netif_tx_request *req;
406156230Smux
407156230Smux	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
408156230Smux	    xnb_unit_pvt.txf.req_prod_pvt);
409156230Smux
410156230Smux	req->flags = 0;
411156230Smux	req->size = 69;	/* arbitrary number for test */
412156230Smux	xnb_unit_pvt.txf.req_prod_pvt++;
413156230Smux
414156230Smux	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
415156230Smux
416156230Smux	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
417156230Smux	                            xnb_unit_pvt.txb.req_cons);
418156230Smux	XNB_ASSERT(num_consumed == 1);
419156230Smux	XNB_ASSERT(pkt.size == 69);
420156230Smux	XNB_ASSERT(pkt.car_size == 69);
421156230Smux	XNB_ASSERT(pkt.flags == 0);
422156230Smux	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
423156230Smux	XNB_ASSERT(pkt.list_len == 1);
424156230Smux	XNB_ASSERT(pkt.car == 0);
425156230Smux}
426156230Smux
427156230Smux/**
428156230Smux * Verify that xnb_ring2pkt can convert a two request packet correctly.
429156230Smux * This tests handling of the MORE_DATA flag and cdr
430156230Smux */
431156230Smuxstatic void
432156230Smuxxnb_ring2pkt_2req(char *buffer, size_t buflen)
433156230Smux{
434156230Smux	struct xnb_pkt pkt;
435156230Smux	int num_consumed;
436156230Smux	struct netif_tx_request *req;
437156230Smux	RING_IDX start_idx = xnb_unit_pvt.txf.req_prod_pvt;
438156230Smux
439156230Smux	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
440156230Smux	    xnb_unit_pvt.txf.req_prod_pvt);
441156230Smux	req->flags = NETTXF_more_data;
442156230Smux	req->size = 100;
443156230Smux	xnb_unit_pvt.txf.req_prod_pvt++;
444156230Smux
445156230Smux	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
446156230Smux	    xnb_unit_pvt.txf.req_prod_pvt);
447156230Smux	req->flags = 0;
448156230Smux	req->size = 40;
449156230Smux	xnb_unit_pvt.txf.req_prod_pvt++;
450156230Smux
451156230Smux	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
452156230Smux
453156230Smux	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
454156230Smux	                            xnb_unit_pvt.txb.req_cons);
455156230Smux	XNB_ASSERT(num_consumed == 2);
456156230Smux	XNB_ASSERT(pkt.size == 100);
457156230Smux	XNB_ASSERT(pkt.car_size == 60);
458156230Smux	XNB_ASSERT(pkt.flags == 0);
459156230Smux	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
460156230Smux	XNB_ASSERT(pkt.list_len == 2);
461156230Smux	XNB_ASSERT(pkt.car == start_idx);
462156230Smux	XNB_ASSERT(pkt.cdr == start_idx + 1);
463156230Smux}
464156230Smux
465156230Smux/**
466156230Smux * Verify that xnb_ring2pkt can convert a three request packet correctly
467156230Smux */
468156230Smuxstatic void
469156230Smuxxnb_ring2pkt_3req(char *buffer, size_t buflen)
470156230Smux{
471156230Smux	struct xnb_pkt pkt;
472156230Smux	int num_consumed;
473156230Smux	struct netif_tx_request *req;
474156230Smux	RING_IDX start_idx = xnb_unit_pvt.txf.req_prod_pvt;
475156230Smux
476156230Smux	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
477156230Smux	    xnb_unit_pvt.txf.req_prod_pvt);
478156230Smux	req->flags = NETTXF_more_data;
479156230Smux	req->size = 200;
480156230Smux	xnb_unit_pvt.txf.req_prod_pvt++;
481156230Smux
482156230Smux	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
483156230Smux	    xnb_unit_pvt.txf.req_prod_pvt);
484156230Smux	req->flags = NETTXF_more_data;
485156230Smux	req->size = 40;
486156230Smux	xnb_unit_pvt.txf.req_prod_pvt++;
487156230Smux
488156230Smux	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
489156230Smux	    xnb_unit_pvt.txf.req_prod_pvt);
490156230Smux	req->flags = 0;
491156230Smux	req->size = 50;
492156230Smux	xnb_unit_pvt.txf.req_prod_pvt++;
493156230Smux
494156230Smux	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
495156230Smux
496156230Smux	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
497156230Smux	                            xnb_unit_pvt.txb.req_cons);
498156230Smux	XNB_ASSERT(num_consumed == 3);
499156230Smux	XNB_ASSERT(pkt.size == 200);
500156230Smux	XNB_ASSERT(pkt.car_size == 110);
501156230Smux	XNB_ASSERT(pkt.flags == 0);
502156230Smux	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
503156230Smux	XNB_ASSERT(pkt.list_len == 3);
504156230Smux	XNB_ASSERT(pkt.car == start_idx);
505156230Smux	XNB_ASSERT(pkt.cdr == start_idx + 1);
506156230Smux	XNB_ASSERT(RING_GET_REQUEST(&xnb_unit_pvt.txb, pkt.cdr + 1) == req);
507156230Smux}
508156230Smux
509156230Smux/**
510156230Smux * Verify that xnb_ring2pkt can read extra inf
511156230Smux */
512156230Smuxstatic void
513156230Smuxxnb_ring2pkt_extra(char *buffer, size_t buflen)
514156230Smux{
515156230Smux	struct xnb_pkt pkt;
516156230Smux	int num_consumed;
517156230Smux	struct netif_tx_request *req;
518156230Smux	struct netif_extra_info *ext;
519156230Smux	RING_IDX start_idx = xnb_unit_pvt.txf.req_prod_pvt;
520156230Smux
521156230Smux	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
522156230Smux	    xnb_unit_pvt.txf.req_prod_pvt);
523156230Smux	req->flags = NETTXF_extra_info | NETTXF_more_data;
524156230Smux	req->size = 150;
525156230Smux	xnb_unit_pvt.txf.req_prod_pvt++;
526156230Smux
527156230Smux	ext = (struct netif_extra_info*) RING_GET_REQUEST(&xnb_unit_pvt.txf,
528156230Smux	    xnb_unit_pvt.txf.req_prod_pvt);
529156230Smux	ext->flags = 0;
530156230Smux	ext->type = XEN_NETIF_EXTRA_TYPE_GSO;
531156230Smux	ext->u.gso.size = 250;
532156230Smux	ext->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
533156230Smux	ext->u.gso.features = 0;
534156230Smux	xnb_unit_pvt.txf.req_prod_pvt++;
535156230Smux
536156230Smux	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
537156230Smux	    xnb_unit_pvt.txf.req_prod_pvt);
538156230Smux	req->flags = 0;
539156230Smux	req->size = 50;
540156230Smux	xnb_unit_pvt.txf.req_prod_pvt++;
541156230Smux
542156230Smux	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
543156230Smux
544156230Smux	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
545156230Smux	                            xnb_unit_pvt.txb.req_cons);
546156230Smux	XNB_ASSERT(num_consumed == 3);
547156230Smux	XNB_ASSERT(pkt.extra.flags == 0);
548156230Smux	XNB_ASSERT(pkt.extra.type == XEN_NETIF_EXTRA_TYPE_GSO);
549156230Smux	XNB_ASSERT(pkt.extra.u.gso.size == 250);
550156230Smux	XNB_ASSERT(pkt.extra.u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4);
551156230Smux	XNB_ASSERT(pkt.size == 150);
552156230Smux	XNB_ASSERT(pkt.car_size == 100);
553156230Smux	XNB_ASSERT(pkt.flags == NETTXF_extra_info);
554156230Smux	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
555156230Smux	XNB_ASSERT(pkt.list_len == 2);
556156230Smux	XNB_ASSERT(pkt.car == start_idx);
557156230Smux	XNB_ASSERT(pkt.cdr == start_idx + 2);
558156230Smux	XNB_ASSERT(RING_GET_REQUEST(&xnb_unit_pvt.txb, pkt.cdr) == req);
559156230Smux}
560156230Smux
561156230Smux/**
562156230Smux * Verify that xnb_ring2pkt will consume no requests if the entire packet is
563156230Smux * not yet in the ring
564156230Smux */
565156230Smuxstatic void
566156230Smuxxnb_ring2pkt_partial(char *buffer, size_t buflen)
567156230Smux{
568156230Smux	struct xnb_pkt pkt;
569156230Smux	int num_consumed;
570156230Smux	struct netif_tx_request *req;
571156230Smux
572156230Smux	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
573156230Smux	    xnb_unit_pvt.txf.req_prod_pvt);
574156230Smux	req->flags = NETTXF_more_data;
575156230Smux	req->size = 150;
576156230Smux	xnb_unit_pvt.txf.req_prod_pvt++;
577156230Smux
578156230Smux	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
579156230Smux
580156230Smux	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
581156230Smux	                            xnb_unit_pvt.txb.req_cons);
582156230Smux	XNB_ASSERT(num_consumed == 0);
583156230Smux	XNB_ASSERT(! xnb_pkt_is_valid(&pkt));
584156230Smux}
585156230Smux
586156230Smux/**
587156230Smux * Verity that xnb_ring2pkt can read a packet whose requests wrap around
588156230Smux * the end of the ring
589156230Smux */
590156230Smuxstatic void
591156230Smuxxnb_ring2pkt_wraps(char *buffer, size_t buflen)
592156230Smux{
593156230Smux	struct xnb_pkt pkt;
594156230Smux	int num_consumed;
595156230Smux	struct netif_tx_request *req;
596156230Smux	unsigned int rsize;
597156230Smux
598156230Smux	/*
599156230Smux	 * Manually tweak the ring indices to create a ring with no responses
600156230Smux	 * and the next request slot at position 2 from the end
601156230Smux	 */
602156230Smux	rsize = RING_SIZE(&xnb_unit_pvt.txf);
603156230Smux	xnb_unit_pvt.txf.req_prod_pvt = rsize - 2;
604156230Smux	xnb_unit_pvt.txf.rsp_cons = rsize - 2;
605156230Smux	xnb_unit_pvt.txs->req_prod = rsize - 2;
606156230Smux	xnb_unit_pvt.txs->req_event = rsize - 1;
607156230Smux	xnb_unit_pvt.txs->rsp_prod = rsize - 2;
608156230Smux	xnb_unit_pvt.txs->rsp_event = rsize - 1;
609156230Smux	xnb_unit_pvt.txb.rsp_prod_pvt = rsize - 2;
610156230Smux	xnb_unit_pvt.txb.req_cons = rsize - 2;
611156230Smux
612156230Smux	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
613156230Smux	    xnb_unit_pvt.txf.req_prod_pvt);
614156230Smux	req->flags = NETTXF_more_data;
615156230Smux	req->size = 550;
616156230Smux	xnb_unit_pvt.txf.req_prod_pvt++;
617156230Smux
618156230Smux	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
619156230Smux	    xnb_unit_pvt.txf.req_prod_pvt);
620156230Smux	req->flags = NETTXF_more_data;
621156230Smux	req->size = 100;
622156230Smux	xnb_unit_pvt.txf.req_prod_pvt++;
623156230Smux
624156230Smux	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
625156230Smux	    xnb_unit_pvt.txf.req_prod_pvt);
626156230Smux	req->flags = 0;
627156230Smux	req->size = 50;
628156230Smux	xnb_unit_pvt.txf.req_prod_pvt++;
629156230Smux
630156230Smux	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
631156230Smux
632156230Smux	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
633156230Smux	                            xnb_unit_pvt.txb.req_cons);
634156230Smux	XNB_ASSERT(num_consumed == 3);
635156230Smux	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
636156230Smux	XNB_ASSERT(pkt.list_len == 3);
637156230Smux	XNB_ASSERT(RING_GET_REQUEST(&xnb_unit_pvt.txb, pkt.cdr + 1) == req);
638156230Smux}
639156230Smux
640156230Smux
641156230Smux/**
642156230Smux * xnb_txpkt2rsp should do nothing for an empty packet
643156230Smux */
644156230Smuxstatic void
645156230Smuxxnb_txpkt2rsp_emptypkt(char *buffer, size_t buflen)
646156230Smux{
647156230Smux	int num_consumed;
648156230Smux	struct xnb_pkt pkt;
649156230Smux	netif_tx_back_ring_t txb_backup = xnb_unit_pvt.txb;
650156230Smux	netif_tx_sring_t txs_backup = *xnb_unit_pvt.txs;
651156230Smux	pkt.list_len = 0;
652156230Smux
653156230Smux	/* must call xnb_ring2pkt just to intialize pkt */
654156230Smux	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
655156230Smux	                            xnb_unit_pvt.txb.req_cons);
656156230Smux	xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
657156230Smux	XNB_ASSERT(
658156230Smux	    memcmp(&txb_backup, &xnb_unit_pvt.txb, sizeof(txb_backup)) == 0);
659156230Smux	XNB_ASSERT(
660156230Smux	    memcmp(&txs_backup, xnb_unit_pvt.txs, sizeof(txs_backup)) == 0);
661156230Smux}
662156230Smux
663156230Smux/**
664156230Smux * xnb_txpkt2rsp responding to one request
665156230Smux */
666156230Smuxstatic void
667156230Smuxxnb_txpkt2rsp_1req(char *buffer, size_t buflen)
668156230Smux{
669156230Smux	uint16_t num_consumed;
670156230Smux	struct xnb_pkt pkt;
671156230Smux	struct netif_tx_request *req;
672156230Smux	struct netif_tx_response *rsp;
673156230Smux
674156230Smux	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
675156230Smux	    xnb_unit_pvt.txf.req_prod_pvt);
676156230Smux	req->size = 1000;
677156230Smux	req->flags = 0;
678156230Smux	xnb_unit_pvt.txf.req_prod_pvt++;
679156230Smux
680156230Smux	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
681156230Smux
682156230Smux	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
683156230Smux	                            xnb_unit_pvt.txb.req_cons);
684156230Smux	xnb_unit_pvt.txb.req_cons += num_consumed;
685156230Smux
686156230Smux	xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
687156230Smux	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
688156230Smux
689156230Smux	XNB_ASSERT(
690156230Smux	    xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
691156230Smux	XNB_ASSERT(rsp->id == req->id);
692156230Smux	XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
693156230Smux};
694156230Smux
695156230Smux/**
696156230Smux * xnb_txpkt2rsp responding to 1 data request and 1 extra info
697156230Smux */
698156230Smuxstatic void
699156230Smuxxnb_txpkt2rsp_extra(char *buffer, size_t buflen)
700156230Smux{
701156230Smux	uint16_t num_consumed;
702156230Smux	struct xnb_pkt pkt;
703156230Smux	struct netif_tx_request *req;
704156230Smux	netif_extra_info_t *ext;
705156230Smux	struct netif_tx_response *rsp;
706156230Smux
707156230Smux	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
708156230Smux	    xnb_unit_pvt.txf.req_prod_pvt);
709156230Smux	req->size = 1000;
710156230Smux	req->flags = NETTXF_extra_info;
711156230Smux	req->id = 69;
712156230Smux	xnb_unit_pvt.txf.req_prod_pvt++;
713156230Smux
714156230Smux	ext = (netif_extra_info_t*) RING_GET_REQUEST(&xnb_unit_pvt.txf,
715156230Smux	    xnb_unit_pvt.txf.req_prod_pvt);
716156230Smux	ext->type = XEN_NETIF_EXTRA_TYPE_GSO;
717156230Smux	ext->flags = 0;
718156230Smux	xnb_unit_pvt.txf.req_prod_pvt++;
719156230Smux
720156230Smux	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
721156230Smux
722156230Smux	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
723156230Smux	                            xnb_unit_pvt.txb.req_cons);
724156230Smux	xnb_unit_pvt.txb.req_cons += num_consumed;
725156230Smux
726156230Smux	xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
727156230Smux
728156230Smux	XNB_ASSERT(
729156230Smux	    xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
730156230Smux
731156230Smux	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
732156230Smux	XNB_ASSERT(rsp->id == req->id);
733156230Smux	XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
734156230Smux
735156230Smux	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
736156230Smux	    xnb_unit_pvt.txf.rsp_cons + 1);
737156230Smux	XNB_ASSERT(rsp->status == NETIF_RSP_NULL);
738156230Smux};
739156230Smux
740156230Smux/**
741156230Smux * xnb_pkg2rsp responding to 3 data requests and 1 extra info
742156230Smux */
743156230Smuxstatic void
744156230Smuxxnb_txpkt2rsp_long(char *buffer, size_t buflen)
745156230Smux{
746156230Smux	uint16_t num_consumed;
747156230Smux	struct xnb_pkt pkt;
748156230Smux	struct netif_tx_request *req;
749156230Smux	netif_extra_info_t *ext;
750156230Smux	struct netif_tx_response *rsp;
751156230Smux
752156230Smux	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
753156230Smux	    xnb_unit_pvt.txf.req_prod_pvt);
754156230Smux	req->size = 1000;
755156230Smux	req->flags = NETTXF_extra_info | NETTXF_more_data;
756156230Smux	req->id = 254;
757156230Smux	xnb_unit_pvt.txf.req_prod_pvt++;
758156230Smux
759156230Smux	ext = (netif_extra_info_t*) RING_GET_REQUEST(&xnb_unit_pvt.txf,
760156230Smux	    xnb_unit_pvt.txf.req_prod_pvt);
761156230Smux	ext->type = XEN_NETIF_EXTRA_TYPE_GSO;
762156230Smux	ext->flags = 0;
763156230Smux	xnb_unit_pvt.txf.req_prod_pvt++;
764156230Smux
765156230Smux	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
766156230Smux	    xnb_unit_pvt.txf.req_prod_pvt);
767156230Smux	req->size = 300;
768156230Smux	req->flags = NETTXF_more_data;
769156230Smux	req->id = 1034;
770156230Smux	xnb_unit_pvt.txf.req_prod_pvt++;
771156230Smux
772156230Smux	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
773156230Smux	    xnb_unit_pvt.txf.req_prod_pvt);
774156230Smux	req->size = 400;
775156230Smux	req->flags = 0;
776156230Smux	req->id = 34;
777156230Smux	xnb_unit_pvt.txf.req_prod_pvt++;
778156230Smux
779156230Smux	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
780156230Smux
781156230Smux	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
782156230Smux	                            xnb_unit_pvt.txb.req_cons);
783156230Smux	xnb_unit_pvt.txb.req_cons += num_consumed;
784156230Smux
785173715Sjb	xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
786156230Smux
787156230Smux	XNB_ASSERT(
788186781Slulf	    xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
789156230Smux
790156230Smux	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
791156230Smux	XNB_ASSERT(rsp->id ==
792156230Smux	    RING_GET_REQUEST(&xnb_unit_pvt.txf, 0)->id);
793156230Smux	XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
794156230Smux
795156230Smux	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
796156230Smux	    xnb_unit_pvt.txf.rsp_cons + 1);
797156230Smux	XNB_ASSERT(rsp->status == NETIF_RSP_NULL);
798156230Smux
799156230Smux	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
800156230Smux	    xnb_unit_pvt.txf.rsp_cons + 2);
801156230Smux	XNB_ASSERT(rsp->id ==
802156230Smux	    RING_GET_REQUEST(&xnb_unit_pvt.txf, 2)->id);
803156230Smux	XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
804156230Smux
805156230Smux	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
806156230Smux	    xnb_unit_pvt.txf.rsp_cons + 3);
807156230Smux	XNB_ASSERT(rsp->id ==
808156230Smux	    RING_GET_REQUEST(&xnb_unit_pvt.txf, 3)->id);
809156230Smux	XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
810156230Smux}
811156230Smux
812156230Smux/**
813156230Smux * xnb_txpkt2rsp responding to an invalid packet.
814156230Smux * Note: this test will result in an error message being printed to the console
815156230Smux * such as:
816156230Smux * xnb(xnb_ring2pkt:1306): Unknown extra info type 255.  Discarding packet
817156230Smux */
818156230Smuxstatic void
819156230Smuxxnb_txpkt2rsp_invalid(char *buffer, size_t buflen)
820156230Smux{
821156230Smux	uint16_t num_consumed;
822156230Smux	struct xnb_pkt pkt;
823156230Smux	struct netif_tx_request *req;
824156230Smux	netif_extra_info_t *ext;
825156230Smux	struct netif_tx_response *rsp;
826156230Smux
827156230Smux	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
828156230Smux	    xnb_unit_pvt.txf.req_prod_pvt);
829156230Smux	req->size = 1000;
830156230Smux	req->flags = NETTXF_extra_info;
831156230Smux	req->id = 69;
832156230Smux	xnb_unit_pvt.txf.req_prod_pvt++;
833156230Smux
834156230Smux	ext = (netif_extra_info_t*) RING_GET_REQUEST(&xnb_unit_pvt.txf,
835156230Smux	    xnb_unit_pvt.txf.req_prod_pvt);
836156230Smux	ext->type = 0xFF;	/* Invalid extra type */
837156230Smux	ext->flags = 0;
838156230Smux	xnb_unit_pvt.txf.req_prod_pvt++;
839156230Smux
840156230Smux	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
841156230Smux
842156230Smux	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
843156230Smux	                            xnb_unit_pvt.txb.req_cons);
844156230Smux	xnb_unit_pvt.txb.req_cons += num_consumed;
845156230Smux	XNB_ASSERT(! xnb_pkt_is_valid(&pkt));
846156230Smux
847156230Smux	xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
848156230Smux
849156230Smux	XNB_ASSERT(
850156230Smux	    xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
851156230Smux
852156230Smux	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
853156230Smux	XNB_ASSERT(rsp->id == req->id);
854156230Smux	XNB_ASSERT(rsp->status == NETIF_RSP_ERROR);
855156230Smux
856156230Smux	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
857156230Smux	    xnb_unit_pvt.txf.rsp_cons + 1);
858156230Smux	XNB_ASSERT(rsp->status == NETIF_RSP_NULL);
859156230Smux};
860156230Smux
861156230Smux/**
862156230Smux * xnb_txpkt2rsp responding to one request which caused an error
863156230Smux */
864156230Smuxstatic void
865156230Smuxxnb_txpkt2rsp_error(char *buffer, size_t buflen)
866156230Smux{
867156230Smux	uint16_t num_consumed;
868156230Smux	struct xnb_pkt pkt;
869156230Smux	struct netif_tx_request *req;
870156230Smux	struct netif_tx_response *rsp;
871156230Smux
872156230Smux	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
873156230Smux	    xnb_unit_pvt.txf.req_prod_pvt);
874156230Smux	req->size = 1000;
875156230Smux	req->flags = 0;
876156230Smux	xnb_unit_pvt.txf.req_prod_pvt++;
877156230Smux
878156230Smux	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
879156230Smux
880156230Smux	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
881156230Smux	                            xnb_unit_pvt.txb.req_cons);
882156230Smux	xnb_unit_pvt.txb.req_cons += num_consumed;
883156230Smux
884156230Smux	xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 1);
885156230Smux	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
886156230Smux
887156230Smux	XNB_ASSERT(
888156230Smux	    xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
889156230Smux	XNB_ASSERT(rsp->id == req->id);
890156230Smux	XNB_ASSERT(rsp->status == NETIF_RSP_ERROR);
891156230Smux};
892156230Smux
893156230Smux/**
894156230Smux * xnb_txpkt2rsp's responses wrap around the end of the ring
895156230Smux */
896156230Smuxstatic void
897156230Smuxxnb_txpkt2rsp_wraps(char *buffer, size_t buflen)
898156230Smux{
899156230Smux	struct xnb_pkt pkt;
900156230Smux	int num_consumed;
901156230Smux	struct netif_tx_request *req;
902156230Smux	struct netif_tx_response *rsp;
903156230Smux	unsigned int rsize;
904156230Smux
905156230Smux	/*
906156230Smux	 * Manually tweak the ring indices to create a ring with no responses
907156230Smux	 * and the next request slot at position 2 from the end
908156230Smux	 */
909156230Smux	rsize = RING_SIZE(&xnb_unit_pvt.txf);
910156230Smux	xnb_unit_pvt.txf.req_prod_pvt = rsize - 2;
911156230Smux	xnb_unit_pvt.txf.rsp_cons = rsize - 2;
912156230Smux	xnb_unit_pvt.txs->req_prod = rsize - 2;
913156230Smux	xnb_unit_pvt.txs->req_event = rsize - 1;
914156230Smux	xnb_unit_pvt.txs->rsp_prod = rsize - 2;
915156230Smux	xnb_unit_pvt.txs->rsp_event = rsize - 1;
916156230Smux	xnb_unit_pvt.txb.rsp_prod_pvt = rsize - 2;
917156230Smux	xnb_unit_pvt.txb.req_cons = rsize - 2;
918156230Smux
919156230Smux	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
920156230Smux	    xnb_unit_pvt.txf.req_prod_pvt);
921156230Smux	req->flags = NETTXF_more_data;
922156230Smux	req->size = 550;
923156230Smux	req->id = 1;
924156230Smux	xnb_unit_pvt.txf.req_prod_pvt++;
925156230Smux
926156230Smux	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
927156230Smux	    xnb_unit_pvt.txf.req_prod_pvt);
928156230Smux	req->flags = NETTXF_more_data;
929156230Smux	req->size = 100;
930156230Smux	req->id = 2;
931156230Smux	xnb_unit_pvt.txf.req_prod_pvt++;
932156230Smux
933156230Smux	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
934156230Smux	    xnb_unit_pvt.txf.req_prod_pvt);
935156230Smux	req->flags = 0;
936156230Smux	req->size = 50;
937156230Smux	req->id = 3;
938156230Smux	xnb_unit_pvt.txf.req_prod_pvt++;
939156230Smux
940156230Smux	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
941156230Smux
942156230Smux	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
943156230Smux	                            xnb_unit_pvt.txb.req_cons);
944156230Smux
945156230Smux	xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
946156230Smux
947156230Smux	XNB_ASSERT(
948156230Smux	    xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
949156230Smux	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
950156230Smux	    xnb_unit_pvt.txf.rsp_cons + 2);
951156230Smux	XNB_ASSERT(rsp->id == req->id);
952156230Smux	XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
953156230Smux}
954156230Smux
955156230Smux
956156230Smux/**
957156230Smux * Helper function used to setup pkt2mbufc tests
958156230Smux * \param size     size in bytes of the single request to push to the ring
959156230Smux * \param flags		optional flags to put in the netif request
960156230Smux * \param[out] pkt the returned packet object
961156230Smux * \return number of requests consumed from the ring
962156230Smux */
963156230Smuxstatic int
964156230Smuxxnb_get1pkt(struct xnb_pkt *pkt, size_t size, uint16_t flags)
965156230Smux{
966156230Smux	struct netif_tx_request *req;
967156230Smux
968156230Smux	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
969156230Smux	    xnb_unit_pvt.txf.req_prod_pvt);
970156230Smux	req->flags = flags;
971156230Smux	req->size = size;
972156230Smux	xnb_unit_pvt.txf.req_prod_pvt++;
973156230Smux
974156230Smux	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
975156230Smux
976156230Smux	return xnb_ring2pkt(pkt, &xnb_unit_pvt.txb,
977156230Smux	                            xnb_unit_pvt.txb.req_cons);
978156230Smux}
979156230Smux
980156230Smux/**
981156230Smux * xnb_pkt2mbufc on an empty packet
982156230Smux */
983156230Smuxstatic void
984156230Smuxxnb_pkt2mbufc_empty(char *buffer, size_t buflen)
985156230Smux{
986156230Smux	int num_consumed;
987156230Smux	struct xnb_pkt pkt;
988156230Smux	struct mbuf *pMbuf;
989156230Smux	pkt.list_len = 0;
990156230Smux
991156230Smux	/* must call xnb_ring2pkt just to intialize pkt */
992156230Smux	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
993156230Smux	                            xnb_unit_pvt.txb.req_cons);
994156230Smux	pkt.size = 0;
995156230Smux	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
996156230Smux	safe_m_freem(&pMbuf);
997156230Smux}
998156230Smux
999156230Smux/**
1000156230Smux * xnb_pkt2mbufc on short packet that can fit in an mbuf internal buffer
1001156230Smux */
1002156230Smuxstatic void
1003156230Smuxxnb_pkt2mbufc_short(char *buffer, size_t buflen)
1004156230Smux{
1005156230Smux	const size_t size = MINCLSIZE - 1;
1006156230Smux	struct xnb_pkt pkt;
1007156230Smux	struct mbuf *pMbuf;
1008156230Smux
1009156230Smux	xnb_get1pkt(&pkt, size, 0);
1010156230Smux
1011156230Smux	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1012156230Smux	XNB_ASSERT(M_TRAILINGSPACE(pMbuf) >= size);
1013156230Smux	safe_m_freem(&pMbuf);
1014156230Smux}
1015156230Smux
1016156230Smux/**
1017156230Smux * xnb_pkt2mbufc on short packet whose checksum was validated by the netfron
1018156230Smux */
1019156230Smuxstatic void
1020156230Smuxxnb_pkt2mbufc_csum(char *buffer, size_t buflen)
1021156230Smux{
1022156230Smux	const size_t size = MINCLSIZE - 1;
1023156230Smux	struct xnb_pkt pkt;
1024156230Smux	struct mbuf *pMbuf;
1025156230Smux
1026156230Smux	xnb_get1pkt(&pkt, size, NETTXF_data_validated);
1027156230Smux
1028156230Smux	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1029156230Smux	XNB_ASSERT(M_TRAILINGSPACE(pMbuf) >= size);
1030156230Smux	XNB_ASSERT(pMbuf->m_pkthdr.csum_flags & CSUM_IP_CHECKED);
1031156230Smux	XNB_ASSERT(pMbuf->m_pkthdr.csum_flags & CSUM_IP_VALID);
1032156230Smux	XNB_ASSERT(pMbuf->m_pkthdr.csum_flags & CSUM_DATA_VALID);
1033156230Smux	XNB_ASSERT(pMbuf->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR);
1034156230Smux	safe_m_freem(&pMbuf);
1035156230Smux}
1036156230Smux
1037156230Smux/**
1038156230Smux * xnb_pkt2mbufc on packet that can fit in one cluster
1039156230Smux */
1040156230Smuxstatic void
1041156230Smuxxnb_pkt2mbufc_1cluster(char *buffer, size_t buflen)
1042156230Smux{
1043156230Smux	const size_t size = MINCLSIZE;
1044156230Smux	struct xnb_pkt pkt;
1045156230Smux	struct mbuf *pMbuf;
1046156230Smux
1047156230Smux	xnb_get1pkt(&pkt, size, 0);
1048156230Smux
1049156230Smux	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1050156230Smux	XNB_ASSERT(M_TRAILINGSPACE(pMbuf) >= size);
1051156230Smux	safe_m_freem(&pMbuf);
1052156230Smux}
1053156230Smux
1054156230Smux/**
1055156230Smux * xnb_pkt2mbufc on packet that cannot fit in one regular cluster
1056156230Smux */
1057156230Smuxstatic void
1058156230Smuxxnb_pkt2mbufc_largecluster(char *buffer, size_t buflen)
1059156230Smux{
1060156230Smux	const size_t size = MCLBYTES + 1;
1061156230Smux	struct xnb_pkt pkt;
1062156230Smux	struct mbuf *pMbuf;
1063156230Smux
1064156230Smux	xnb_get1pkt(&pkt, size, 0);
1065156230Smux
1066156230Smux	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1067156230Smux	XNB_ASSERT(M_TRAILINGSPACE(pMbuf) >= size);
1068156230Smux	safe_m_freem(&pMbuf);
1069156230Smux}
1070156230Smux
1071156230Smux/**
1072156230Smux * xnb_pkt2mbufc on packet that cannot fit in one clusters
1073156230Smux */
1074156230Smuxstatic void
1075156230Smuxxnb_pkt2mbufc_2cluster(char *buffer, size_t buflen)
1076156230Smux{
1077156230Smux	const size_t size = 2 * MCLBYTES + 1;
1078156230Smux	size_t space = 0;
1079156230Smux	struct xnb_pkt pkt;
1080156230Smux	struct mbuf *pMbuf;
1081156230Smux	struct mbuf *m;
1082156230Smux
1083156230Smux	xnb_get1pkt(&pkt, size, 0);
1084156230Smux
1085156230Smux	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1086156230Smux
1087156230Smux	for (m = pMbuf; m != NULL; m = m->m_next) {
1088156230Smux		space += M_TRAILINGSPACE(m);
1089156230Smux	}
1090156230Smux	XNB_ASSERT(space >= size);
1091156230Smux	safe_m_freem(&pMbuf);
1092156230Smux}
1093156230Smux
1094156230Smux/**
1095156230Smux * xnb_txpkt2gnttab on an empty packet.  Should return empty gnttab
1096156230Smux */
1097156230Smuxstatic void
1098156230Smuxxnb_txpkt2gnttab_empty(char *buffer, size_t buflen)
1099156230Smux{
1100156230Smux	int n_entries;
1101156230Smux	struct xnb_pkt pkt;
1102156230Smux	struct mbuf *pMbuf;
1103156230Smux	pkt.list_len = 0;
1104156230Smux
1105156230Smux	/* must call xnb_ring2pkt just to intialize pkt */
1106156230Smux	xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1107156230Smux	pkt.size = 0;
1108156230Smux	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1109156230Smux	n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1110156230Smux	    &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1111156230Smux	XNB_ASSERT(n_entries == 0);
1112156230Smux	safe_m_freem(&pMbuf);
1113156230Smux}
1114156230Smux
1115156230Smux/**
1116156230Smux * xnb_txpkt2gnttab on a short packet, that can fit in one mbuf internal buffer
1117156230Smux * and has one request
1118156230Smux */
1119156230Smuxstatic void
1120156230Smuxxnb_txpkt2gnttab_short(char *buffer, size_t buflen)
1121156230Smux{
1122156230Smux	const size_t size = MINCLSIZE - 1;
1123156230Smux	int n_entries;
1124156230Smux	struct xnb_pkt pkt;
1125156230Smux	struct mbuf *pMbuf;
1126156230Smux
1127156230Smux	struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1128156230Smux	    xnb_unit_pvt.txf.req_prod_pvt);
1129156230Smux	req->flags = 0;
1130156230Smux	req->size = size;
1131156230Smux	req->gref = 7;
1132156230Smux	req->offset = 17;
1133156230Smux	xnb_unit_pvt.txf.req_prod_pvt++;
1134156230Smux
1135156230Smux	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1136156230Smux
1137156230Smux	xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1138156230Smux
1139156230Smux	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1140156230Smux	n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1141156230Smux	    &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1142156230Smux	XNB_ASSERT(n_entries == 1);
1143156230Smux	XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == size);
1144156230Smux	/* flags should indicate gref's for source */
1145156230Smux	XNB_ASSERT(xnb_unit_pvt.gnttab[0].flags & GNTCOPY_source_gref);
1146156230Smux	XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.offset == req->offset);
1147156230Smux	XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.domid == DOMID_SELF);
1148156230Smux	XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.offset == virt_to_offset(
1149156230Smux	      mtod(pMbuf, vm_offset_t)));
1150156230Smux	XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.u.gmfn ==
1151156230Smux		virt_to_mfn(mtod(pMbuf, vm_offset_t)));
1152156230Smux	XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.domid == DOMID_FIRST_RESERVED);
1153156230Smux	safe_m_freem(&pMbuf);
1154156230Smux}
1155156230Smux
1156156230Smux/**
1157156230Smux * xnb_txpkt2gnttab on a packet with two requests, that can fit into a single
1158156230Smux * mbuf cluster
1159156230Smux */
1160156230Smuxstatic void
1161156230Smuxxnb_txpkt2gnttab_2req(char *buffer, size_t buflen)
1162156230Smux{
1163156230Smux	int n_entries;
1164156230Smux	struct xnb_pkt pkt;
1165156230Smux	struct mbuf *pMbuf;
1166156230Smux
1167156230Smux	struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1168156230Smux	    xnb_unit_pvt.txf.req_prod_pvt);
1169156230Smux	req->flags = NETTXF_more_data;
1170156230Smux	req->size = 1900;
1171156230Smux	req->gref = 7;
1172156230Smux	req->offset = 0;
1173156230Smux	xnb_unit_pvt.txf.req_prod_pvt++;
1174156230Smux
1175156230Smux	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1176156230Smux	    xnb_unit_pvt.txf.req_prod_pvt);
1177156230Smux	req->flags = 0;
1178156230Smux	req->size = 500;
1179156230Smux	req->gref = 8;
1180156230Smux	req->offset = 0;
1181156230Smux	xnb_unit_pvt.txf.req_prod_pvt++;
1182156230Smux
1183156230Smux	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1184156230Smux
1185156230Smux	xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1186156230Smux
1187156230Smux	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1188156230Smux	n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1189156230Smux	    &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1190156230Smux
1191156230Smux	XNB_ASSERT(n_entries == 2);
1192156230Smux	XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == 1400);
1193156230Smux	XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.offset == virt_to_offset(
1194156230Smux	      mtod(pMbuf, vm_offset_t)));
1195156230Smux
1196156230Smux	XNB_ASSERT(xnb_unit_pvt.gnttab[1].len == 500);
1197156230Smux	XNB_ASSERT(xnb_unit_pvt.gnttab[1].dest.offset == virt_to_offset(
1198156230Smux	      mtod(pMbuf, vm_offset_t) + 1400));
1199156230Smux	safe_m_freem(&pMbuf);
1200156230Smux}
1201156230Smux
1202156230Smux/**
1203 * xnb_txpkt2gnttab on a single request that spans two mbuf clusters
1204 */
1205static void
1206xnb_txpkt2gnttab_2cluster(char *buffer, size_t buflen)
1207{
1208	int n_entries;
1209	struct xnb_pkt pkt;
1210	struct mbuf *pMbuf;
1211	const uint16_t data_this_transaction = (MCLBYTES*2) + 1;
1212
1213	struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1214	    xnb_unit_pvt.txf.req_prod_pvt);
1215	req->flags = 0;
1216	req->size = data_this_transaction;
1217	req->gref = 8;
1218	req->offset = 0;
1219	xnb_unit_pvt.txf.req_prod_pvt++;
1220
1221	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1222	xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1223
1224	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1225	n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1226	    &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1227
1228	if (M_TRAILINGSPACE(pMbuf) == MCLBYTES) {
1229		/* there should be three mbufs and three gnttab entries */
1230		XNB_ASSERT(n_entries == 3);
1231		XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == MCLBYTES);
1232		XNB_ASSERT(
1233		    xnb_unit_pvt.gnttab[0].dest.offset == virt_to_offset(
1234		      mtod(pMbuf, vm_offset_t)));
1235		XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.offset == 0);
1236
1237		XNB_ASSERT(xnb_unit_pvt.gnttab[1].len == MCLBYTES);
1238		XNB_ASSERT(
1239		    xnb_unit_pvt.gnttab[1].dest.offset == virt_to_offset(
1240		      mtod(pMbuf->m_next, vm_offset_t)));
1241		XNB_ASSERT(xnb_unit_pvt.gnttab[1].source.offset == MCLBYTES);
1242
1243		XNB_ASSERT(xnb_unit_pvt.gnttab[2].len == 1);
1244		XNB_ASSERT(
1245		    xnb_unit_pvt.gnttab[2].dest.offset == virt_to_offset(
1246		      mtod(pMbuf->m_next, vm_offset_t)));
1247		XNB_ASSERT(xnb_unit_pvt.gnttab[2].source.offset == 2 *
1248			    MCLBYTES);
1249	} else if (M_TRAILINGSPACE(pMbuf) == 2 * MCLBYTES) {
1250		/* there should be two mbufs and two gnttab entries */
1251		XNB_ASSERT(n_entries == 2);
1252		XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == 2 * MCLBYTES);
1253		XNB_ASSERT(
1254		    xnb_unit_pvt.gnttab[0].dest.offset == virt_to_offset(
1255		      mtod(pMbuf, vm_offset_t)));
1256		XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.offset == 0);
1257
1258		XNB_ASSERT(xnb_unit_pvt.gnttab[1].len == 1);
1259		XNB_ASSERT(
1260		    xnb_unit_pvt.gnttab[1].dest.offset == virt_to_offset(
1261		      mtod(pMbuf->m_next, vm_offset_t)));
1262		XNB_ASSERT(
1263		    xnb_unit_pvt.gnttab[1].source.offset == 2 * MCLBYTES);
1264
1265	} else {
1266		/* should never get here */
1267		XNB_ASSERT(0);
1268	}
1269	if (pMbuf != NULL)
1270		m_freem(pMbuf);
1271}
1272
1273
1274/**
1275 * xnb_update_mbufc on a short packet that only has one gnttab entry
1276 */
1277static void
1278xnb_update_mbufc_short(char *buffer, size_t buflen)
1279{
1280	const size_t size = MINCLSIZE - 1;
1281	int n_entries;
1282	struct xnb_pkt pkt;
1283	struct mbuf *pMbuf;
1284
1285	struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1286	    xnb_unit_pvt.txf.req_prod_pvt);
1287	req->flags = 0;
1288	req->size = size;
1289	req->gref = 7;
1290	req->offset = 17;
1291	xnb_unit_pvt.txf.req_prod_pvt++;
1292
1293	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1294
1295	xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1296
1297	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1298	n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1299	    &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1300
1301	/* Update grant table's status fields as the hypervisor call would */
1302	xnb_unit_pvt.gnttab[0].status = GNTST_okay;
1303
1304	xnb_update_mbufc(pMbuf, xnb_unit_pvt.gnttab, n_entries);
1305	XNB_ASSERT(pMbuf->m_len == size);
1306	XNB_ASSERT(pMbuf->m_pkthdr.len == size);
1307	safe_m_freem(&pMbuf);
1308}
1309
1310/**
1311 * xnb_update_mbufc on a packet with two requests, that can fit into a single
1312 * mbuf cluster
1313 */
1314static void
1315xnb_update_mbufc_2req(char *buffer, size_t buflen)
1316{
1317	int n_entries;
1318	struct xnb_pkt pkt;
1319	struct mbuf *pMbuf;
1320
1321	struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1322	    xnb_unit_pvt.txf.req_prod_pvt);
1323	req->flags = NETTXF_more_data;
1324	req->size = 1900;
1325	req->gref = 7;
1326	req->offset = 0;
1327	xnb_unit_pvt.txf.req_prod_pvt++;
1328
1329	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1330	    xnb_unit_pvt.txf.req_prod_pvt);
1331	req->flags = 0;
1332	req->size = 500;
1333	req->gref = 8;
1334	req->offset = 0;
1335	xnb_unit_pvt.txf.req_prod_pvt++;
1336
1337	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1338
1339	xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1340
1341	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1342	n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1343	    &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1344
1345	/* Update grant table's status fields as the hypervisor call would */
1346	xnb_unit_pvt.gnttab[0].status = GNTST_okay;
1347	xnb_unit_pvt.gnttab[1].status = GNTST_okay;
1348
1349	xnb_update_mbufc(pMbuf, xnb_unit_pvt.gnttab, n_entries);
1350	XNB_ASSERT(n_entries == 2);
1351	XNB_ASSERT(pMbuf->m_pkthdr.len == 1900);
1352	XNB_ASSERT(pMbuf->m_len == 1900);
1353
1354	safe_m_freem(&pMbuf);
1355}
1356
1357/**
1358 * xnb_update_mbufc on a single request that spans two mbuf clusters
1359 */
1360static void
1361xnb_update_mbufc_2cluster(char *buffer, size_t buflen)
1362{
1363	int i;
1364	int n_entries;
1365	struct xnb_pkt pkt;
1366	struct mbuf *pMbuf;
1367	const uint16_t data_this_transaction = (MCLBYTES*2) + 1;
1368
1369	struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1370	    xnb_unit_pvt.txf.req_prod_pvt);
1371	req->flags = 0;
1372	req->size = data_this_transaction;
1373	req->gref = 8;
1374	req->offset = 0;
1375	xnb_unit_pvt.txf.req_prod_pvt++;
1376
1377	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1378	xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1379
1380	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1381	n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1382	    &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1383
1384	/* Update grant table's status fields */
1385	for (i = 0; i < n_entries; i++) {
1386		xnb_unit_pvt.gnttab[0].status = GNTST_okay;
1387	}
1388	xnb_update_mbufc(pMbuf, xnb_unit_pvt.gnttab, n_entries);
1389
1390	if (n_entries == 3) {
1391		/* there should be three mbufs and three gnttab entries */
1392		XNB_ASSERT(pMbuf->m_pkthdr.len == data_this_transaction);
1393		XNB_ASSERT(pMbuf->m_len == MCLBYTES);
1394		XNB_ASSERT(pMbuf->m_next->m_len == MCLBYTES);
1395		XNB_ASSERT(pMbuf->m_next->m_next->m_len == 1);
1396	} else if (n_entries == 2) {
1397		/* there should be two mbufs and two gnttab entries */
1398		XNB_ASSERT(n_entries == 2);
1399		XNB_ASSERT(pMbuf->m_pkthdr.len == data_this_transaction);
1400		XNB_ASSERT(pMbuf->m_len == 2 * MCLBYTES);
1401		XNB_ASSERT(pMbuf->m_next->m_len == 1);
1402	} else {
1403		/* should never get here */
1404		XNB_ASSERT(0);
1405	}
1406	safe_m_freem(&pMbuf);
1407}
1408
1409/** xnb_mbufc2pkt on an empty mbufc */
1410static void
1411xnb_mbufc2pkt_empty(char *buffer, size_t buflen) {
1412	struct xnb_pkt pkt;
1413	int free_slots = 64;
1414	struct mbuf *mbuf;
1415
1416	mbuf = m_get(M_WAITOK, MT_DATA);
1417	/*
1418	 * note: it is illegal to set M_PKTHDR on a mbuf with no data.  Doing so
1419	 * will cause m_freem to segfault
1420	 */
1421	XNB_ASSERT(mbuf->m_len == 0);
1422
1423	xnb_mbufc2pkt(mbuf, &pkt, 0, free_slots);
1424	XNB_ASSERT(! xnb_pkt_is_valid(&pkt));
1425
1426	safe_m_freem(&mbuf);
1427}
1428
1429/** xnb_mbufc2pkt on a short mbufc */
1430static void
1431xnb_mbufc2pkt_short(char *buffer, size_t buflen) {
1432	struct xnb_pkt pkt;
1433	size_t size = 128;
1434	int free_slots = 64;
1435	RING_IDX start = 9;
1436	struct mbuf *mbuf;
1437
1438	mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1439	mbuf->m_flags |= M_PKTHDR;
1440	mbuf->m_pkthdr.len = size;
1441	mbuf->m_len = size;
1442
1443	xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1444	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
1445	XNB_ASSERT(pkt.size == size);
1446	XNB_ASSERT(pkt.car_size == size);
1447	XNB_ASSERT(! (pkt.flags &
1448	      (NETRXF_more_data | NETRXF_extra_info)));
1449	XNB_ASSERT(pkt.list_len == 1);
1450	XNB_ASSERT(pkt.car == start);
1451
1452	safe_m_freem(&mbuf);
1453}
1454
1455/** xnb_mbufc2pkt on a single mbuf with an mbuf cluster */
1456static void
1457xnb_mbufc2pkt_1cluster(char *buffer, size_t buflen) {
1458	struct xnb_pkt pkt;
1459	size_t size = MCLBYTES;
1460	int free_slots = 32;
1461	RING_IDX start = 12;
1462	struct mbuf *mbuf;
1463
1464	mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1465	mbuf->m_flags |= M_PKTHDR;
1466	mbuf->m_pkthdr.len = size;
1467	mbuf->m_len = size;
1468
1469	xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1470	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
1471	XNB_ASSERT(pkt.size == size);
1472	XNB_ASSERT(pkt.car_size == size);
1473	XNB_ASSERT(! (pkt.flags &
1474	      (NETRXF_more_data | NETRXF_extra_info)));
1475	XNB_ASSERT(pkt.list_len == 1);
1476	XNB_ASSERT(pkt.car == start);
1477
1478	safe_m_freem(&mbuf);
1479}
1480
1481/** xnb_mbufc2pkt on a two-mbuf chain with short data regions */
1482static void
1483xnb_mbufc2pkt_2short(char *buffer, size_t buflen) {
1484	struct xnb_pkt pkt;
1485	size_t size1 = MHLEN - 5;
1486	size_t size2 = MHLEN - 15;
1487	int free_slots = 32;
1488	RING_IDX start = 14;
1489	struct mbuf *mbufc, *mbufc2;
1490
1491	mbufc = m_getm(NULL, size1, M_WAITOK, MT_DATA);
1492	mbufc->m_flags |= M_PKTHDR;
1493	if (mbufc == NULL) {
1494		XNB_ASSERT(mbufc != NULL);
1495		return;
1496	}
1497
1498	mbufc2 = m_getm(mbufc, size2, M_WAITOK, MT_DATA);
1499	if (mbufc2 == NULL) {
1500		XNB_ASSERT(mbufc2 != NULL);
1501		safe_m_freem(&mbufc);
1502		return;
1503	}
1504	mbufc2->m_pkthdr.len = size1 + size2;
1505	mbufc2->m_len = size1;
1506
1507	xnb_mbufc2pkt(mbufc2, &pkt, start, free_slots);
1508	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
1509	XNB_ASSERT(pkt.size == size1 + size2);
1510	XNB_ASSERT(pkt.car == start);
1511	/*
1512	 * The second m_getm may allocate a new mbuf and append
1513	 * it to the chain, or it may simply extend the first mbuf.
1514	 */
1515	if (mbufc2->m_next != NULL) {
1516		XNB_ASSERT(pkt.car_size == size1);
1517		XNB_ASSERT(pkt.list_len == 1);
1518		XNB_ASSERT(pkt.cdr == start + 1);
1519	}
1520
1521	safe_m_freem(&mbufc2);
1522}
1523
1524/** xnb_mbufc2pkt on a mbuf chain with >1 mbuf cluster */
1525static void
1526xnb_mbufc2pkt_long(char *buffer, size_t buflen) {
1527	struct xnb_pkt pkt;
1528	size_t size = 14 * MCLBYTES / 3;
1529	size_t size_remaining;
1530	int free_slots = 15;
1531	RING_IDX start = 3;
1532	struct mbuf *mbufc, *m;
1533
1534	mbufc = m_getm(NULL, size, M_WAITOK, MT_DATA);
1535	mbufc->m_flags |= M_PKTHDR;
1536	if (mbufc == NULL) {
1537		XNB_ASSERT(mbufc != NULL);
1538		return;
1539	}
1540
1541	mbufc->m_pkthdr.len = size;
1542	size_remaining = size;
1543	for (m = mbufc; m != NULL; m = m->m_next) {
1544		m->m_len = MAX(M_TRAILINGSPACE(m), size_remaining);
1545		size_remaining -= m->m_len;
1546	}
1547
1548	xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
1549	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
1550	XNB_ASSERT(pkt.size == size);
1551	XNB_ASSERT(pkt.car == start);
1552	XNB_ASSERT(pkt.car_size = mbufc->m_len);
1553	/*
1554	 * There should be >1 response in the packet, and there is no
1555	 * extra info.
1556	 */
1557	XNB_ASSERT(! (pkt.flags & NETRXF_extra_info));
1558	XNB_ASSERT(pkt.cdr == pkt.car + 1);
1559
1560	safe_m_freem(&mbufc);
1561}
1562
1563/** xnb_mbufc2pkt on a mbuf chain with >1 mbuf cluster and extra info */
1564static void
1565xnb_mbufc2pkt_extra(char *buffer, size_t buflen) {
1566	struct xnb_pkt pkt;
1567	size_t size = 14 * MCLBYTES / 3;
1568	size_t size_remaining;
1569	int free_slots = 15;
1570	RING_IDX start = 3;
1571	struct mbuf *mbufc, *m;
1572
1573	mbufc = m_getm(NULL, size, M_WAITOK, MT_DATA);
1574	if (mbufc == NULL) {
1575		XNB_ASSERT(mbufc != NULL);
1576		return;
1577	}
1578
1579	mbufc->m_flags |= M_PKTHDR;
1580	mbufc->m_pkthdr.len = size;
1581	mbufc->m_pkthdr.csum_flags |= CSUM_TSO;
1582	mbufc->m_pkthdr.tso_segsz = TCP_MSS - 40;
1583	size_remaining = size;
1584	for (m = mbufc; m != NULL; m = m->m_next) {
1585		m->m_len = MAX(M_TRAILINGSPACE(m), size_remaining);
1586		size_remaining -= m->m_len;
1587	}
1588
1589	xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
1590	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
1591	XNB_ASSERT(pkt.size == size);
1592	XNB_ASSERT(pkt.car == start);
1593	XNB_ASSERT(pkt.car_size = mbufc->m_len);
1594	/* There should be >1 response in the packet, there is extra info */
1595	XNB_ASSERT(pkt.flags & NETRXF_extra_info);
1596	XNB_ASSERT(pkt.flags & NETRXF_data_validated);
1597	XNB_ASSERT(pkt.cdr == pkt.car + 2);
1598	XNB_ASSERT(pkt.extra.u.gso.size = mbufc->m_pkthdr.tso_segsz);
1599	XNB_ASSERT(pkt.extra.type == XEN_NETIF_EXTRA_TYPE_GSO);
1600	XNB_ASSERT(! (pkt.extra.flags & XEN_NETIF_EXTRA_FLAG_MORE));
1601
1602	safe_m_freem(&mbufc);
1603}
1604
1605/** xnb_mbufc2pkt with insufficient space in the ring */
1606static void
1607xnb_mbufc2pkt_nospace(char *buffer, size_t buflen) {
1608	struct xnb_pkt pkt;
1609	size_t size = 14 * MCLBYTES / 3;
1610	size_t size_remaining;
1611	int free_slots = 2;
1612	RING_IDX start = 3;
1613	struct mbuf *mbufc, *m;
1614	int error;
1615
1616	mbufc = m_getm(NULL, size, M_WAITOK, MT_DATA);
1617	mbufc->m_flags |= M_PKTHDR;
1618	if (mbufc == NULL) {
1619		XNB_ASSERT(mbufc != NULL);
1620		return;
1621	}
1622
1623	mbufc->m_pkthdr.len = size;
1624	size_remaining = size;
1625	for (m = mbufc; m != NULL; m = m->m_next) {
1626		m->m_len = MAX(M_TRAILINGSPACE(m), size_remaining);
1627		size_remaining -= m->m_len;
1628	}
1629
1630	error = xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
1631	XNB_ASSERT(error == EAGAIN);
1632	XNB_ASSERT(! xnb_pkt_is_valid(&pkt));
1633
1634	safe_m_freem(&mbufc);
1635}
1636
1637/**
1638 * xnb_rxpkt2gnttab on an empty packet.  Should return empty gnttab
1639 */
1640static void
1641xnb_rxpkt2gnttab_empty(char *buffer, size_t buflen)
1642{
1643	struct xnb_pkt pkt;
1644	int nr_entries;
1645	int free_slots = 60;
1646	struct mbuf *mbuf;
1647
1648	mbuf = m_get(M_WAITOK, MT_DATA);
1649
1650	xnb_mbufc2pkt(mbuf, &pkt, 0, free_slots);
1651	nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1652			&xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1653
1654	XNB_ASSERT(nr_entries == 0);
1655
1656	safe_m_freem(&mbuf);
1657}
1658
1659/** xnb_rxpkt2gnttab on a short packet without extra data */
1660static void
1661xnb_rxpkt2gnttab_short(char *buffer, size_t buflen) {
1662	struct xnb_pkt pkt;
1663	int nr_entries;
1664	size_t size = 128;
1665	int free_slots = 60;
1666	RING_IDX start = 9;
1667	struct netif_rx_request *req;
1668	struct mbuf *mbuf;
1669
1670	mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1671	mbuf->m_flags |= M_PKTHDR;
1672	mbuf->m_pkthdr.len = size;
1673	mbuf->m_len = size;
1674
1675	xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1676	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf,
1677			       xnb_unit_pvt.txf.req_prod_pvt);
1678	req->gref = 7;
1679
1680	nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1681				      &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1682
1683	XNB_ASSERT(nr_entries == 1);
1684	XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == size);
1685	/* flags should indicate gref's for dest */
1686	XNB_ASSERT(xnb_unit_pvt.gnttab[0].flags & GNTCOPY_dest_gref);
1687	XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.offset == 0);
1688	XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.domid == DOMID_SELF);
1689	XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.offset == virt_to_offset(
1690		   mtod(mbuf, vm_offset_t)));
1691	XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.u.gmfn ==
1692		   virt_to_mfn(mtod(mbuf, vm_offset_t)));
1693	XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.domid == DOMID_FIRST_RESERVED);
1694
1695	safe_m_freem(&mbuf);
1696}
1697
1698/**
1699 * xnb_rxpkt2gnttab on a packet with two different mbufs in a single chai
1700 */
1701static void
1702xnb_rxpkt2gnttab_2req(char *buffer, size_t buflen)
1703{
1704	struct xnb_pkt pkt;
1705	int nr_entries;
1706	int i, num_mbufs;
1707	size_t total_granted_size = 0;
1708	size_t size = MJUMPAGESIZE + 1;
1709	int free_slots = 60;
1710	RING_IDX start = 11;
1711	struct netif_rx_request *req;
1712	struct mbuf *mbuf, *m;
1713
1714	mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1715	mbuf->m_flags |= M_PKTHDR;
1716	mbuf->m_pkthdr.len = size;
1717	mbuf->m_len = size;
1718
1719	xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1720
1721	for (i = 0, m=mbuf; m != NULL; i++, m = m->m_next) {
1722		req = RING_GET_REQUEST(&xnb_unit_pvt.rxf,
1723		    xnb_unit_pvt.txf.req_prod_pvt);
1724		req->gref = i;
1725		req->id = 5;
1726	}
1727	num_mbufs = i;
1728
1729	nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1730			&xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1731
1732	XNB_ASSERT(nr_entries >= num_mbufs);
1733	for (i = 0; i < nr_entries; i++) {
1734		int end_offset = xnb_unit_pvt.gnttab[i].len +
1735			xnb_unit_pvt.gnttab[i].dest.offset;
1736		XNB_ASSERT(end_offset <= PAGE_SIZE);
1737		total_granted_size += xnb_unit_pvt.gnttab[i].len;
1738	}
1739	XNB_ASSERT(total_granted_size == size);
1740}
1741
1742/**
1743 * xnb_rxpkt2rsp on an empty packet.  Shouldn't make any response
1744 */
1745static void
1746xnb_rxpkt2rsp_empty(char *buffer, size_t buflen)
1747{
1748	struct xnb_pkt pkt;
1749	int nr_entries;
1750	int nr_reqs;
1751	int free_slots = 60;
1752	netif_rx_back_ring_t rxb_backup = xnb_unit_pvt.rxb;
1753	netif_rx_sring_t rxs_backup = *xnb_unit_pvt.rxs;
1754	struct mbuf *mbuf;
1755
1756	mbuf = m_get(M_WAITOK, MT_DATA);
1757
1758	xnb_mbufc2pkt(mbuf, &pkt, 0, free_slots);
1759	nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1760			&xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1761
1762	nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
1763	    &xnb_unit_pvt.rxb);
1764	XNB_ASSERT(nr_reqs == 0);
1765	XNB_ASSERT(
1766	    memcmp(&rxb_backup, &xnb_unit_pvt.rxb, sizeof(rxb_backup)) == 0);
1767	XNB_ASSERT(
1768	    memcmp(&rxs_backup, xnb_unit_pvt.rxs, sizeof(rxs_backup)) == 0);
1769
1770	safe_m_freem(&mbuf);
1771}
1772
1773/**
1774 * xnb_rxpkt2rsp on a short packet with no extras
1775 */
1776static void
1777xnb_rxpkt2rsp_short(char *buffer, size_t buflen)
1778{
1779	struct xnb_pkt pkt;
1780	int nr_entries, nr_reqs;
1781	size_t size = 128;
1782	int free_slots = 60;
1783	RING_IDX start = 5;
1784	struct netif_rx_request *req;
1785	struct netif_rx_response *rsp;
1786	struct mbuf *mbuf;
1787
1788	mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1789	mbuf->m_flags |= M_PKTHDR;
1790	mbuf->m_pkthdr.len = size;
1791	mbuf->m_len = size;
1792
1793	xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1794	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
1795	req->gref = 7;
1796	xnb_unit_pvt.rxb.req_cons = start;
1797	xnb_unit_pvt.rxb.rsp_prod_pvt = start;
1798	xnb_unit_pvt.rxs->req_prod = start + 1;
1799	xnb_unit_pvt.rxs->rsp_prod = start;
1800
1801	nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1802			&xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1803
1804	nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
1805	    &xnb_unit_pvt.rxb);
1806
1807	XNB_ASSERT(nr_reqs == 1);
1808	XNB_ASSERT(xnb_unit_pvt.rxb.rsp_prod_pvt == start + 1);
1809	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
1810	XNB_ASSERT(rsp->id == req->id);
1811	XNB_ASSERT(rsp->offset == 0);
1812	XNB_ASSERT((rsp->flags & (NETRXF_more_data | NETRXF_extra_info)) == 0);
1813	XNB_ASSERT(rsp->status == size);
1814
1815	safe_m_freem(&mbuf);
1816}
1817
1818/**
1819 * xnb_rxpkt2rsp with extra data
1820 */
1821static void
1822xnb_rxpkt2rsp_extra(char *buffer, size_t buflen)
1823{
1824	struct xnb_pkt pkt;
1825	int nr_entries, nr_reqs;
1826	size_t size = 14;
1827	int free_slots = 15;
1828	RING_IDX start = 3;
1829	uint16_t id = 49;
1830	uint16_t gref = 65;
1831	uint16_t mss = TCP_MSS - 40;
1832	struct mbuf *mbufc;
1833	struct netif_rx_request *req;
1834	struct netif_rx_response *rsp;
1835	struct netif_extra_info *ext;
1836
1837	mbufc = m_getm(NULL, size, M_WAITOK, MT_DATA);
1838	if (mbufc == NULL) {
1839		XNB_ASSERT(mbufc != NULL);
1840		return;
1841	}
1842
1843	mbufc->m_flags |= M_PKTHDR;
1844	mbufc->m_pkthdr.len = size;
1845	mbufc->m_pkthdr.csum_flags |= CSUM_TSO;
1846	mbufc->m_pkthdr.tso_segsz = mss;
1847	mbufc->m_len = size;
1848
1849	xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
1850	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
1851	req->id = id;
1852	req->gref = gref;
1853	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start + 1);
1854	req->id = id + 1;
1855	req->gref = gref + 1;
1856	xnb_unit_pvt.rxb.req_cons = start;
1857	xnb_unit_pvt.rxb.rsp_prod_pvt = start;
1858	xnb_unit_pvt.rxs->req_prod = start + 2;
1859	xnb_unit_pvt.rxs->rsp_prod = start;
1860
1861	nr_entries = xnb_rxpkt2gnttab(&pkt, mbufc, xnb_unit_pvt.gnttab,
1862			&xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1863
1864	nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
1865	    &xnb_unit_pvt.rxb);
1866
1867	XNB_ASSERT(nr_reqs == 2);
1868	XNB_ASSERT(xnb_unit_pvt.rxb.rsp_prod_pvt == start + 2);
1869	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
1870	XNB_ASSERT(rsp->id == id);
1871	XNB_ASSERT((rsp->flags & NETRXF_more_data) == 0);
1872	XNB_ASSERT((rsp->flags & NETRXF_extra_info));
1873	XNB_ASSERT((rsp->flags & NETRXF_data_validated));
1874	XNB_ASSERT((rsp->flags & NETRXF_csum_blank));
1875	XNB_ASSERT(rsp->status == size);
1876
1877	ext = (struct netif_extra_info*)
1878		RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start + 1);
1879	XNB_ASSERT(ext->type == XEN_NETIF_EXTRA_TYPE_GSO);
1880	XNB_ASSERT(! (ext->flags & XEN_NETIF_EXTRA_FLAG_MORE));
1881	XNB_ASSERT(ext->u.gso.size == mss);
1882	XNB_ASSERT(ext->u.gso.type == XEN_NETIF_EXTRA_TYPE_GSO);
1883
1884	safe_m_freem(&mbufc);
1885}
1886
1887/**
1888 * xnb_rxpkt2rsp on a packet with more than a pages's worth of data.  It should
1889 * generate two response slot
1890 */
1891static void
1892xnb_rxpkt2rsp_2slots(char *buffer, size_t buflen)
1893{
1894	struct xnb_pkt pkt;
1895	int nr_entries, nr_reqs;
1896	size_t size = PAGE_SIZE + 100;
1897	int free_slots = 3;
1898	uint16_t id1 = 17;
1899	uint16_t id2 = 37;
1900	uint16_t gref1 = 24;
1901	uint16_t gref2 = 34;
1902	RING_IDX start = 15;
1903	struct netif_rx_request *req;
1904	struct netif_rx_response *rsp;
1905	struct mbuf *mbuf;
1906
1907	mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1908	mbuf->m_flags |= M_PKTHDR;
1909	mbuf->m_pkthdr.len = size;
1910	if (mbuf->m_next != NULL) {
1911		size_t first_len = MIN(M_TRAILINGSPACE(mbuf), size);
1912		mbuf->m_len = first_len;
1913		mbuf->m_next->m_len = size - first_len;
1914
1915	} else {
1916		mbuf->m_len = size;
1917	}
1918
1919	xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1920	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
1921	req->gref = gref1;
1922	req->id = id1;
1923	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start + 1);
1924	req->gref = gref2;
1925	req->id = id2;
1926	xnb_unit_pvt.rxb.req_cons = start;
1927	xnb_unit_pvt.rxb.rsp_prod_pvt = start;
1928	xnb_unit_pvt.rxs->req_prod = start + 2;
1929	xnb_unit_pvt.rxs->rsp_prod = start;
1930
1931	nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1932			&xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1933
1934	nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
1935	    &xnb_unit_pvt.rxb);
1936
1937	XNB_ASSERT(nr_reqs == 2);
1938	XNB_ASSERT(xnb_unit_pvt.rxb.rsp_prod_pvt == start + 2);
1939	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
1940	XNB_ASSERT(rsp->id == id1);
1941	XNB_ASSERT(rsp->offset == 0);
1942	XNB_ASSERT((rsp->flags & NETRXF_extra_info) == 0);
1943	XNB_ASSERT(rsp->flags & NETRXF_more_data);
1944	XNB_ASSERT(rsp->status == PAGE_SIZE);
1945
1946	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start + 1);
1947	XNB_ASSERT(rsp->id == id2);
1948	XNB_ASSERT(rsp->offset == 0);
1949	XNB_ASSERT((rsp->flags & NETRXF_extra_info) == 0);
1950	XNB_ASSERT(! (rsp->flags & NETRXF_more_data));
1951	XNB_ASSERT(rsp->status == size - PAGE_SIZE);
1952
1953	safe_m_freem(&mbuf);
1954}
1955
1956/** xnb_rxpkt2rsp on a grant table with two sub-page entries */
1957static void
1958xnb_rxpkt2rsp_2short(char *buffer, size_t buflen) {
1959	struct xnb_pkt pkt;
1960	int nr_reqs, nr_entries;
1961	size_t size1 = MHLEN - 5;
1962	size_t size2 = MHLEN - 15;
1963	int free_slots = 32;
1964	RING_IDX start = 14;
1965	uint16_t id = 47;
1966	uint16_t gref = 54;
1967	struct netif_rx_request *req;
1968	struct netif_rx_response *rsp;
1969	struct mbuf *mbufc;
1970
1971	mbufc = m_getm(NULL, size1, M_WAITOK, MT_DATA);
1972	mbufc->m_flags |= M_PKTHDR;
1973	if (mbufc == NULL) {
1974		XNB_ASSERT(mbufc != NULL);
1975		return;
1976	}
1977
1978	m_getm(mbufc, size2, M_WAITOK, MT_DATA);
1979	XNB_ASSERT(mbufc->m_next != NULL);
1980	mbufc->m_pkthdr.len = size1 + size2;
1981	mbufc->m_len = size1;
1982	mbufc->m_next->m_len = size2;
1983
1984	xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
1985
1986	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
1987	req->gref = gref;
1988	req->id = id;
1989	xnb_unit_pvt.rxb.req_cons = start;
1990	xnb_unit_pvt.rxb.rsp_prod_pvt = start;
1991	xnb_unit_pvt.rxs->req_prod = start + 1;
1992	xnb_unit_pvt.rxs->rsp_prod = start;
1993
1994	nr_entries = xnb_rxpkt2gnttab(&pkt, mbufc, xnb_unit_pvt.gnttab,
1995			&xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1996
1997	nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
1998	    &xnb_unit_pvt.rxb);
1999
2000	XNB_ASSERT(nr_entries == 2);
2001	XNB_ASSERT(nr_reqs == 1);
2002	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
2003	XNB_ASSERT(rsp->id == id);
2004	XNB_ASSERT(rsp->status == size1 + size2);
2005	XNB_ASSERT(rsp->offset == 0);
2006	XNB_ASSERT(! (rsp->flags & (NETRXF_more_data | NETRXF_extra_info)));
2007
2008	safe_m_freem(&mbufc);
2009}
2010
2011/**
2012 * xnb_rxpkt2rsp on a long packet with a hypervisor gnttab_copy error
2013 * Note: this test will result in an error message being printed to the console
2014 * such as:
2015 * xnb(xnb_rxpkt2rsp:1720): Got error -1 for hypervisor gnttab_copy status
2016 */
2017static void
2018xnb_rxpkt2rsp_copyerror(char *buffer, size_t buflen)
2019{
2020	struct xnb_pkt pkt;
2021	int nr_entries, nr_reqs;
2022	int id = 7;
2023	int gref = 42;
2024	uint16_t canary = 6859;
2025	size_t size = 7 * MCLBYTES;
2026	int free_slots = 9;
2027	RING_IDX start = 2;
2028	struct netif_rx_request *req;
2029	struct netif_rx_response *rsp;
2030	struct mbuf *mbuf;
2031
2032	mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
2033	mbuf->m_flags |= M_PKTHDR;
2034	mbuf->m_pkthdr.len = size;
2035	mbuf->m_len = size;
2036
2037	xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
2038	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
2039	req->gref = gref;
2040	req->id = id;
2041	xnb_unit_pvt.rxb.req_cons = start;
2042	xnb_unit_pvt.rxb.rsp_prod_pvt = start;
2043	xnb_unit_pvt.rxs->req_prod = start + 1;
2044	xnb_unit_pvt.rxs->rsp_prod = start;
2045	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start + 1);
2046	req->gref = canary;
2047	req->id = canary;
2048
2049	nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
2050			&xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
2051	/* Inject the error*/
2052	xnb_unit_pvt.gnttab[2].status = GNTST_general_error;
2053
2054	nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
2055	    &xnb_unit_pvt.rxb);
2056
2057	XNB_ASSERT(nr_reqs == 1);
2058	XNB_ASSERT(xnb_unit_pvt.rxb.rsp_prod_pvt == start + 1);
2059	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
2060	XNB_ASSERT(rsp->id == id);
2061	XNB_ASSERT(rsp->status == NETIF_RSP_ERROR);
2062	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start + 1);
2063	XNB_ASSERT(req->gref == canary);
2064	XNB_ASSERT(req->id == canary);
2065
2066	safe_m_freem(&mbuf);
2067}
2068
2069/**
2070 * xnb_add_mbuf_cksum on an ARP request packet
2071 */
2072static void
2073xnb_add_mbuf_cksum_arp(char *buffer, size_t buflen)
2074{
2075	const size_t pkt_len = sizeof(struct ether_header) +
2076		sizeof(struct ether_arp);
2077	struct mbuf *mbufc;
2078	struct ether_header *eh;
2079	struct ether_arp *ep;
2080	unsigned char pkt_orig[pkt_len];
2081
2082	mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
2083	/* Fill in an example arp request */
2084	eh = mtod(mbufc, struct ether_header*);
2085	eh->ether_dhost[0] = 0xff;
2086	eh->ether_dhost[1] = 0xff;
2087	eh->ether_dhost[2] = 0xff;
2088	eh->ether_dhost[3] = 0xff;
2089	eh->ether_dhost[4] = 0xff;
2090	eh->ether_dhost[5] = 0xff;
2091	eh->ether_shost[0] = 0x00;
2092	eh->ether_shost[1] = 0x15;
2093	eh->ether_shost[2] = 0x17;
2094	eh->ether_shost[3] = 0xe9;
2095	eh->ether_shost[4] = 0x30;
2096	eh->ether_shost[5] = 0x68;
2097	eh->ether_type = htons(ETHERTYPE_ARP);
2098	ep = (struct ether_arp*)(eh + 1);
2099	ep->ea_hdr.ar_hrd = htons(ARPHRD_ETHER);
2100	ep->ea_hdr.ar_pro = htons(ETHERTYPE_IP);
2101	ep->ea_hdr.ar_hln = 6;
2102	ep->ea_hdr.ar_pln = 4;
2103	ep->ea_hdr.ar_op = htons(ARPOP_REQUEST);
2104	ep->arp_sha[0] = 0x00;
2105	ep->arp_sha[1] = 0x15;
2106	ep->arp_sha[2] = 0x17;
2107	ep->arp_sha[3] = 0xe9;
2108	ep->arp_sha[4] = 0x30;
2109	ep->arp_sha[5] = 0x68;
2110	ep->arp_spa[0] = 0xc0;
2111	ep->arp_spa[1] = 0xa8;
2112	ep->arp_spa[2] = 0x0a;
2113	ep->arp_spa[3] = 0x04;
2114	bzero(&(ep->arp_tha), ETHER_ADDR_LEN);
2115	ep->arp_tpa[0] = 0xc0;
2116	ep->arp_tpa[1] = 0xa8;
2117	ep->arp_tpa[2] = 0x0a;
2118	ep->arp_tpa[3] = 0x06;
2119
2120	/* fill in the length field */
2121	mbufc->m_len = pkt_len;
2122	mbufc->m_pkthdr.len = pkt_len;
2123	/* indicate that the netfront uses hw-assisted checksums */
2124	mbufc->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID   |
2125				CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2126
2127	/* Make a backup copy of the packet */
2128	bcopy(mtod(mbufc, const void*), pkt_orig, pkt_len);
2129
2130	/* Function under test */
2131	xnb_add_mbuf_cksum(mbufc);
2132
2133	/* Verify that the packet's data did not change */
2134	XNB_ASSERT(bcmp(mtod(mbufc, const void*), pkt_orig, pkt_len) == 0);
2135	m_freem(mbufc);
2136}
2137
2138/**
2139 * Helper function that populates the ethernet header and IP header used by
2140 * some of the xnb_add_mbuf_cksum unit tests.  m must already be allocated
2141 * and must be large enough
2142 */
2143static void
2144xnb_fill_eh_and_ip(struct mbuf *m, uint16_t ip_len, uint16_t ip_id,
2145		   uint16_t ip_p, uint16_t ip_off, uint16_t ip_sum)
2146{
2147	struct ether_header *eh;
2148	struct ip *iph;
2149
2150	eh = mtod(m, struct ether_header*);
2151	eh->ether_dhost[0] = 0x00;
2152	eh->ether_dhost[1] = 0x16;
2153	eh->ether_dhost[2] = 0x3e;
2154	eh->ether_dhost[3] = 0x23;
2155	eh->ether_dhost[4] = 0x50;
2156	eh->ether_dhost[5] = 0x0b;
2157	eh->ether_shost[0] = 0x00;
2158	eh->ether_shost[1] = 0x16;
2159	eh->ether_shost[2] = 0x30;
2160	eh->ether_shost[3] = 0x00;
2161	eh->ether_shost[4] = 0x00;
2162	eh->ether_shost[5] = 0x00;
2163	eh->ether_type = htons(ETHERTYPE_IP);
2164	iph = (struct ip*)(eh + 1);
2165	iph->ip_hl = 0x5;	/* 5 dwords == 20 bytes */
2166	iph->ip_v = 4;		/* IP v4 */
2167	iph->ip_tos = 0;
2168	iph->ip_len = htons(ip_len);
2169	iph->ip_id = htons(ip_id);
2170	iph->ip_off = htons(ip_off);
2171	iph->ip_ttl = 64;
2172	iph->ip_p = ip_p;
2173	iph->ip_sum = htons(ip_sum);
2174	iph->ip_src.s_addr = htonl(0xc0a80a04);
2175	iph->ip_dst.s_addr = htonl(0xc0a80a05);
2176}
2177
2178/**
2179 * xnb_add_mbuf_cksum on an ICMP packet, based on a tcpdump of an actual
2180 * ICMP packet
2181 */
2182static void
2183xnb_add_mbuf_cksum_icmp(char *buffer, size_t buflen)
2184{
2185	const size_t icmp_len = 64;	/* set by ping(1) */
2186	const size_t pkt_len = sizeof(struct ether_header) +
2187		sizeof(struct ip) + icmp_len;
2188	struct mbuf *mbufc;
2189	struct ether_header *eh;
2190	struct ip *iph;
2191	struct icmp *icmph;
2192	unsigned char pkt_orig[icmp_len];
2193	uint32_t *tv_field;
2194	uint8_t *data_payload;
2195	int i;
2196	const uint16_t ICMP_CSUM = 0xaed7;
2197	const uint16_t IP_CSUM = 0xe533;
2198
2199	mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
2200	/* Fill in an example ICMP ping request */
2201	eh = mtod(mbufc, struct ether_header*);
2202	xnb_fill_eh_and_ip(mbufc, 84, 28, IPPROTO_ICMP, 0, 0);
2203	iph = (struct ip*)(eh + 1);
2204	icmph = (struct icmp*)(iph + 1);
2205	icmph->icmp_type = ICMP_ECHO;
2206	icmph->icmp_code = 0;
2207	icmph->icmp_cksum = htons(ICMP_CSUM);
2208	icmph->icmp_id = htons(31492);
2209	icmph->icmp_seq = htons(0);
2210	/*
2211	 * ping(1) uses bcopy to insert a native-endian timeval after icmp_seq.
2212	 * For this test, we will set the bytes individually for portability.
2213	 */
2214	tv_field = (uint32_t*)(&(icmph->icmp_hun));
2215	tv_field[0] = 0x4f02cfac;
2216	tv_field[1] = 0x0007c46a;
2217	/*
2218	 * Remainder of packet is an incrmenting 8 bit integer, starting with 8
2219	 */
2220	data_payload = (uint8_t*)(&tv_field[2]);
2221	for (i = 8; i < 37; i++) {
2222		*data_payload++ = i;
2223	}
2224
2225	/* fill in the length field */
2226	mbufc->m_len = pkt_len;
2227	mbufc->m_pkthdr.len = pkt_len;
2228	/* indicate that the netfront uses hw-assisted checksums */
2229	mbufc->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID   |
2230				CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2231
2232	bcopy(mtod(mbufc, const void*), pkt_orig, icmp_len);
2233	/* Function under test */
2234	xnb_add_mbuf_cksum(mbufc);
2235
2236	/* Check the IP checksum */
2237	XNB_ASSERT(iph->ip_sum == htons(IP_CSUM));
2238
2239	/* Check that the ICMP packet did not change */
2240	XNB_ASSERT(bcmp(icmph, pkt_orig, icmp_len));
2241	m_freem(mbufc);
2242}
2243
2244/**
2245 * xnb_add_mbuf_cksum on a UDP packet, based on a tcpdump of an actual
2246 * UDP packet
2247 */
2248static void
2249xnb_add_mbuf_cksum_udp(char *buffer, size_t buflen)
2250{
2251	const size_t udp_len = 16;
2252	const size_t pkt_len = sizeof(struct ether_header) +
2253		sizeof(struct ip) + udp_len;
2254	struct mbuf *mbufc;
2255	struct ether_header *eh;
2256	struct ip *iph;
2257	struct udphdr *udp;
2258	uint8_t *data_payload;
2259	const uint16_t IP_CSUM = 0xe56b;
2260	const uint16_t UDP_CSUM = 0xdde2;
2261
2262	mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
2263	/* Fill in an example UDP packet made by 'uname | nc -u <host> 2222 */
2264	eh = mtod(mbufc, struct ether_header*);
2265	xnb_fill_eh_and_ip(mbufc, 36, 4, IPPROTO_UDP, 0, 0xbaad);
2266	iph = (struct ip*)(eh + 1);
2267	udp = (struct udphdr*)(iph + 1);
2268	udp->uh_sport = htons(0x51ae);
2269	udp->uh_dport = htons(0x08ae);
2270	udp->uh_ulen = htons(udp_len);
2271	udp->uh_sum = htons(0xbaad);  /* xnb_add_mbuf_cksum will fill this in */
2272	data_payload = (uint8_t*)(udp + 1);
2273	data_payload[0] = 'F';
2274	data_payload[1] = 'r';
2275	data_payload[2] = 'e';
2276	data_payload[3] = 'e';
2277	data_payload[4] = 'B';
2278	data_payload[5] = 'S';
2279	data_payload[6] = 'D';
2280	data_payload[7] = '\n';
2281
2282	/* fill in the length field */
2283	mbufc->m_len = pkt_len;
2284	mbufc->m_pkthdr.len = pkt_len;
2285	/* indicate that the netfront uses hw-assisted checksums */
2286	mbufc->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID   |
2287				CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2288
2289	/* Function under test */
2290	xnb_add_mbuf_cksum(mbufc);
2291
2292	/* Check the checksums */
2293	XNB_ASSERT(iph->ip_sum == htons(IP_CSUM));
2294	XNB_ASSERT(udp->uh_sum == htons(UDP_CSUM));
2295
2296	m_freem(mbufc);
2297}
2298
2299/**
2300 * Helper function that populates a TCP packet used by all of the
2301 * xnb_add_mbuf_cksum tcp unit tests.  m must already be allocated and must be
2302 * large enough
2303 */
2304static void
2305xnb_fill_tcp(struct mbuf *m)
2306{
2307	struct ether_header *eh;
2308	struct ip *iph;
2309	struct tcphdr *tcp;
2310	uint32_t *options;
2311	uint8_t *data_payload;
2312
2313	/* Fill in an example TCP packet made by 'uname | nc <host> 2222' */
2314	eh = mtod(m, struct ether_header*);
2315	xnb_fill_eh_and_ip(m, 60, 8, IPPROTO_TCP, IP_DF, 0);
2316	iph = (struct ip*)(eh + 1);
2317	tcp = (struct tcphdr*)(iph + 1);
2318	tcp->th_sport = htons(0x9cd9);
2319	tcp->th_dport = htons(2222);
2320	tcp->th_seq = htonl(0x00f72b10);
2321	tcp->th_ack = htonl(0x7f37ba6c);
2322	tcp->th_x2 = 0;
2323	tcp->th_off = 8;
2324	tcp->th_flags = 0x18;
2325	tcp->th_win = htons(0x410);
2326	/* th_sum is incorrect; will be inserted by function under test */
2327	tcp->th_sum = htons(0xbaad);
2328	tcp->th_urp = htons(0);
2329	/*
2330	 * The following 12 bytes of options encode:
2331	 * [nop, nop, TS val 33247 ecr 3457687679]
2332	 */
2333	options = (uint32_t*)(tcp + 1);
2334	options[0] = htonl(0x0101080a);
2335	options[1] = htonl(0x000081df);
2336	options[2] = htonl(0xce18207f);
2337	data_payload = (uint8_t*)(&options[3]);
2338	data_payload[0] = 'F';
2339	data_payload[1] = 'r';
2340	data_payload[2] = 'e';
2341	data_payload[3] = 'e';
2342	data_payload[4] = 'B';
2343	data_payload[5] = 'S';
2344	data_payload[6] = 'D';
2345	data_payload[7] = '\n';
2346}
2347
2348/**
2349 * xnb_add_mbuf_cksum on a TCP packet, based on a tcpdump of an actual TCP
2350 * packet
2351 */
2352static void
2353xnb_add_mbuf_cksum_tcp(char *buffer, size_t buflen)
2354{
2355	const size_t payload_len = 8;
2356	const size_t tcp_options_len = 12;
2357	const size_t pkt_len = sizeof(struct ether_header) + sizeof(struct ip) +
2358	    sizeof(struct tcphdr) + tcp_options_len + payload_len;
2359	struct mbuf *mbufc;
2360	struct ether_header *eh;
2361	struct ip *iph;
2362	struct tcphdr *tcp;
2363	const uint16_t IP_CSUM = 0xa55a;
2364	const uint16_t TCP_CSUM = 0x2f64;
2365
2366	mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
2367	/* Fill in an example TCP packet made by 'uname | nc <host> 2222' */
2368	xnb_fill_tcp(mbufc);
2369	eh = mtod(mbufc, struct ether_header*);
2370	iph = (struct ip*)(eh + 1);
2371	tcp = (struct tcphdr*)(iph + 1);
2372
2373	/* fill in the length field */
2374	mbufc->m_len = pkt_len;
2375	mbufc->m_pkthdr.len = pkt_len;
2376	/* indicate that the netfront uses hw-assisted checksums */
2377	mbufc->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID   |
2378				CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2379
2380	/* Function under test */
2381	xnb_add_mbuf_cksum(mbufc);
2382
2383	/* Check the checksums */
2384	XNB_ASSERT(iph->ip_sum == htons(IP_CSUM));
2385	XNB_ASSERT(tcp->th_sum == htons(TCP_CSUM));
2386
2387	m_freem(mbufc);
2388}
2389
2390/**
2391 * xnb_add_mbuf_cksum on a TCP packet that does not use HW assisted checksums
2392 */
2393static void
2394xnb_add_mbuf_cksum_tcp_swcksum(char *buffer, size_t buflen)
2395{
2396	const size_t payload_len = 8;
2397	const size_t tcp_options_len = 12;
2398	const size_t pkt_len = sizeof(struct ether_header) + sizeof(struct ip) +
2399	    sizeof(struct tcphdr) + tcp_options_len + payload_len;
2400	struct mbuf *mbufc;
2401	struct ether_header *eh;
2402	struct ip *iph;
2403	struct tcphdr *tcp;
2404	/* Use deliberately bad checksums, and verify that they don't get */
2405	/* corrected by xnb_add_mbuf_cksum */
2406	const uint16_t IP_CSUM = 0xdead;
2407	const uint16_t TCP_CSUM = 0xbeef;
2408
2409	mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
2410	/* Fill in an example TCP packet made by 'uname | nc <host> 2222' */
2411	xnb_fill_tcp(mbufc);
2412	eh = mtod(mbufc, struct ether_header*);
2413	iph = (struct ip*)(eh + 1);
2414	iph->ip_sum = htons(IP_CSUM);
2415	tcp = (struct tcphdr*)(iph + 1);
2416	tcp->th_sum = htons(TCP_CSUM);
2417
2418	/* fill in the length field */
2419	mbufc->m_len = pkt_len;
2420	mbufc->m_pkthdr.len = pkt_len;
2421	/* indicate that the netfront does not use hw-assisted checksums */
2422	mbufc->m_pkthdr.csum_flags = 0;
2423
2424	/* Function under test */
2425	xnb_add_mbuf_cksum(mbufc);
2426
2427	/* Check that the checksums didn't change */
2428	XNB_ASSERT(iph->ip_sum == htons(IP_CSUM));
2429	XNB_ASSERT(tcp->th_sum == htons(TCP_CSUM));
2430
2431	m_freem(mbufc);
2432}
2433
2434/**
2435 * sscanf on unsigned chars
2436 */
2437static void
2438xnb_sscanf_hhu(char *buffer, size_t buflen)
2439{
2440	const char mystr[] = "137";
2441	uint8_t dest[12];
2442	int i;
2443
2444	for (i = 0; i < 12; i++)
2445		dest[i] = 'X';
2446
2447	sscanf(mystr, "%hhu", &dest[4]);
2448	for (i = 0; i < 12; i++)
2449		XNB_ASSERT(dest[i] == (i == 4 ? 137 : 'X'));
2450}
2451
2452/**
2453 * sscanf on signed chars
2454 */
2455static void
2456xnb_sscanf_hhd(char *buffer, size_t buflen)
2457{
2458	const char mystr[] = "-27";
2459	int8_t dest[12];
2460	int i;
2461
2462	for (i = 0; i < 12; i++)
2463		dest[i] = 'X';
2464
2465	sscanf(mystr, "%hhd", &dest[4]);
2466	for (i = 0; i < 12; i++)
2467		XNB_ASSERT(dest[i] == (i == 4 ? -27 : 'X'));
2468}
2469
2470/**
2471 * sscanf on signed long longs
2472 */
2473static void
2474xnb_sscanf_lld(char *buffer, size_t buflen)
2475{
2476	const char mystr[] = "-123456789012345";	/* about -2**47 */
2477	long long dest[3];
2478	int i;
2479
2480	for (i = 0; i < 3; i++)
2481		dest[i] = (long long)0xdeadbeefdeadbeef;
2482
2483	sscanf(mystr, "%lld", &dest[1]);
2484	for (i = 0; i < 3; i++)
2485		XNB_ASSERT(dest[i] == (i != 1 ? (long long)0xdeadbeefdeadbeef :
2486		    -123456789012345));
2487}
2488
2489/**
2490 * sscanf on unsigned long longs
2491 */
2492static void
2493xnb_sscanf_llu(char *buffer, size_t buflen)
2494{
2495	const char mystr[] = "12802747070103273189";
2496	unsigned long long dest[3];
2497	int i;
2498
2499	for (i = 0; i < 3; i++)
2500		dest[i] = (long long)0xdeadbeefdeadbeef;
2501
2502	sscanf(mystr, "%llu", &dest[1]);
2503	for (i = 0; i < 3; i++)
2504		XNB_ASSERT(dest[i] == (i != 1 ? (long long)0xdeadbeefdeadbeef :
2505		    12802747070103273189ull));
2506}
2507
2508/**
2509 * sscanf on unsigned short short n's
2510 */
2511static void
2512xnb_sscanf_hhn(char *buffer, size_t buflen)
2513{
2514	const char mystr[] =
2515	    "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f"
2516	    "202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f"
2517	    "404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f";
2518	unsigned char dest[12];
2519	int i;
2520
2521	for (i = 0; i < 12; i++)
2522		dest[i] = (unsigned char)'X';
2523
2524	sscanf(mystr,
2525	    "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f"
2526	    "202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f"
2527	    "404142434445464748494a4b4c4d4e4f%hhn", &dest[4]);
2528	for (i = 0; i < 12; i++)
2529		XNB_ASSERT(dest[i] == (i == 4 ? 160 : 'X'));
2530}
2531