netback_unit_tests.c revision 242889
1/*-
2 * Copyright (c) 2009-2011 Spectra Logic Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions, and the following disclaimer,
10 *    without modification.
11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12 *    substantially similar to the "NO WARRANTY" disclaimer below
13 *    ("Disclaimer") and any redistribution must be conditioned upon
14 *    including a substantially similar Disclaimer requirement for further
15 *    binary redistribution.
16 *
17 * NO WARRANTY
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
26 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
27 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGES.
29 *
30 * Authors: Justin T. Gibbs     (Spectra Logic Corporation)
31 *          Alan Somers         (Spectra Logic Corporation)
32 *          John Suykerbuyk     (Spectra Logic Corporation)
33 */
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD: head/sys/dev/xen/netback/netback_unit_tests.c 242889 2012-11-11 10:42:34Z rdivacky $");
37
38/**
39 * \file netback_unit_tests.c
40 *
41 * \brief Unit tests for the Xen netback driver.
42 *
43 * Due to the driver's use of static functions, these tests cannot be compiled
44 * standalone; they must be #include'd from the driver's .c file.
45 */
46
47
48/** Helper macro used to snprintf to a buffer and update the buffer pointer */
49#define	SNCATF(buffer, buflen, ...) do {				\
50	size_t new_chars = snprintf(buffer, buflen, __VA_ARGS__);	\
51	buffer += new_chars;						\
52	/* be careful; snprintf's return value can be  > buflen */	\
53	buflen -= MIN(buflen, new_chars);				\
54} while (0)
55
56/* STRINGIFY and TOSTRING are used only to help turn __LINE__ into a string */
57#define	STRINGIFY(x) #x
58#define	TOSTRING(x) STRINGIFY(x)
59
60/**
61 * Writes an error message to buffer if cond is false, and returns true
62 * iff the assertion failed.  Note the implied parameters buffer and
63 * buflen
64 */
65#define	XNB_ASSERT(cond) do (						\
66	int passed = (cond);						\
67	char *_buffer = (buffer);					\
68	size_t _buflen = (buflen);					\
69	if (! passed) {							\
70		strlcat(_buffer, __func__, _buflen);			\
71		strlcat(_buffer, ":" TOSTRING(__LINE__) 		\
72		  " Assertion Error: " #cond "\n", _buflen);		\
73	}								\
74	} while (0)
75
76
77/**
78 * The signature used by all testcases.  If the test writes anything
79 * to buffer, then it will be considered a failure
80 * \param buffer	Return storage for error messages
81 * \param buflen	The space available in the buffer
82 */
83typedef void testcase_t(char *buffer, size_t buflen);
84
85/**
86 * Signature used by setup functions
87 * \return nonzero on error
88 */
89typedef int setup_t(void);
90
91typedef void teardown_t(void);
92
93/** A simple test fixture comprising setup, teardown, and test */
94struct test_fixture {
95	/** Will be run before the test to allocate and initialize variables */
96	setup_t *setup;
97
98	/** Will be run if setup succeeds */
99	testcase_t *test;
100
101	/** Cleans up test data whether or not the setup suceeded*/
102	teardown_t *teardown;
103};
104
105typedef struct test_fixture test_fixture_t;
106
107static void	xnb_fill_eh_and_ip(struct mbuf *m, uint16_t ip_len,
108				   uint16_t ip_id, uint16_t ip_p,
109				   uint16_t ip_off, uint16_t ip_sum);
110static void	xnb_fill_tcp(struct mbuf *m);
111static int	xnb_get1pkt(struct xnb_pkt *pkt, size_t size, uint16_t flags);
112static int	xnb_unit_test_runner(test_fixture_t const tests[], int ntests,
113				     char *buffer, size_t buflen);
114
115static int __unused
116null_setup(void) { return 0; }
117
118static void __unused
119null_teardown(void) { }
120
121static setup_t setup_pvt_data;
122static teardown_t teardown_pvt_data;
123static testcase_t xnb_ring2pkt_emptyring;
124static testcase_t xnb_ring2pkt_1req;
125static testcase_t xnb_ring2pkt_2req;
126static testcase_t xnb_ring2pkt_3req;
127static testcase_t xnb_ring2pkt_extra;
128static testcase_t xnb_ring2pkt_partial;
129static testcase_t xnb_ring2pkt_wraps;
130static testcase_t xnb_txpkt2rsp_emptypkt;
131static testcase_t xnb_txpkt2rsp_1req;
132static testcase_t xnb_txpkt2rsp_extra;
133static testcase_t xnb_txpkt2rsp_long;
134static testcase_t xnb_txpkt2rsp_invalid;
135static testcase_t xnb_txpkt2rsp_error;
136static testcase_t xnb_txpkt2rsp_wraps;
137static testcase_t xnb_pkt2mbufc_empty;
138static testcase_t xnb_pkt2mbufc_short;
139static testcase_t xnb_pkt2mbufc_csum;
140static testcase_t xnb_pkt2mbufc_1cluster;
141static testcase_t xnb_pkt2mbufc_largecluster;
142static testcase_t xnb_pkt2mbufc_2cluster;
143static testcase_t xnb_txpkt2gnttab_empty;
144static testcase_t xnb_txpkt2gnttab_short;
145static testcase_t xnb_txpkt2gnttab_2req;
146static testcase_t xnb_txpkt2gnttab_2cluster;
147static testcase_t xnb_update_mbufc_short;
148static testcase_t xnb_update_mbufc_2req;
149static testcase_t xnb_update_mbufc_2cluster;
150static testcase_t xnb_mbufc2pkt_empty;
151static testcase_t xnb_mbufc2pkt_short;
152static testcase_t xnb_mbufc2pkt_1cluster;
153static testcase_t xnb_mbufc2pkt_2short;
154static testcase_t xnb_mbufc2pkt_long;
155static testcase_t xnb_mbufc2pkt_extra;
156static testcase_t xnb_mbufc2pkt_nospace;
157static testcase_t xnb_rxpkt2gnttab_empty;
158static testcase_t xnb_rxpkt2gnttab_short;
159static testcase_t xnb_rxpkt2gnttab_2req;
160static testcase_t xnb_rxpkt2rsp_empty;
161static testcase_t xnb_rxpkt2rsp_short;
162static testcase_t xnb_rxpkt2rsp_extra;
163static testcase_t xnb_rxpkt2rsp_2short;
164static testcase_t xnb_rxpkt2rsp_2slots;
165static testcase_t xnb_rxpkt2rsp_copyerror;
166/* TODO: add test cases for xnb_add_mbuf_cksum for IPV6 tcp and udp */
167static testcase_t xnb_add_mbuf_cksum_arp;
168static testcase_t xnb_add_mbuf_cksum_tcp;
169static testcase_t xnb_add_mbuf_cksum_udp;
170static testcase_t xnb_add_mbuf_cksum_icmp;
171static testcase_t xnb_add_mbuf_cksum_tcp_swcksum;
172static testcase_t xnb_sscanf_llu;
173static testcase_t xnb_sscanf_lld;
174static testcase_t xnb_sscanf_hhu;
175static testcase_t xnb_sscanf_hhd;
176static testcase_t xnb_sscanf_hhn;
177
178/** Private data used by unit tests */
179static struct {
180	gnttab_copy_table 	gnttab;
181	netif_rx_back_ring_t	rxb;
182	netif_rx_front_ring_t	rxf;
183	netif_tx_back_ring_t	txb;
184	netif_tx_front_ring_t	txf;
185	struct ifnet*		ifp;
186	netif_rx_sring_t*	rxs;
187	netif_tx_sring_t*	txs;
188} xnb_unit_pvt;
189
190static inline void safe_m_freem(struct mbuf **ppMbuf) {
191	if (*ppMbuf != NULL) {
192		m_freem(*ppMbuf);
193		*ppMbuf = NULL;
194	}
195}
196
197/**
198 * The unit test runner.  It will run every supplied test and return an
199 * output message as a string
200 * \param tests		An array of tests.  Every test will be attempted.
201 * \param ntests	The length of tests
202 * \param buffer	Return storage for the result string
203 * \param buflen	The length of buffer
204 * \return		The number of tests that failed
205 */
206static int
207xnb_unit_test_runner(test_fixture_t const tests[], int ntests, char *buffer,
208    		     size_t buflen)
209{
210	int i;
211	int n_passes;
212	int n_failures = 0;
213
214	for (i = 0; i < ntests; i++) {
215		int error = tests[i].setup();
216		if (error != 0) {
217			SNCATF(buffer, buflen,
218			    "Setup failed for test idx %d\n", i);
219			n_failures++;
220		} else {
221			size_t new_chars;
222
223			tests[i].test(buffer, buflen);
224			new_chars = strnlen(buffer, buflen);
225			buffer += new_chars;
226			buflen -= new_chars;
227
228			if (new_chars > 0) {
229				n_failures++;
230			}
231		}
232		tests[i].teardown();
233	}
234
235	n_passes = ntests - n_failures;
236	if (n_passes > 0) {
237		SNCATF(buffer, buflen, "%d Tests Passed\n", n_passes);
238	}
239	if (n_failures > 0) {
240		SNCATF(buffer, buflen, "%d Tests FAILED\n", n_failures);
241	}
242
243	return n_failures;
244}
245
246/** Number of unit tests.  Must match the length of the tests array below */
247#define	TOTAL_TESTS	(53)
248/**
249 * Max memory available for returning results.  400 chars/test should give
250 * enough space for a five line error message for every test
251 */
252#define	TOTAL_BUFLEN	(400 * TOTAL_TESTS + 2)
253
254/**
255 * Called from userspace by a sysctl.  Runs all internal unit tests, and
256 * returns the results to userspace as a string
257 * \param oidp	unused
258 * \param arg1	pointer to an xnb_softc for a specific xnb device
259 * \param arg2	unused
260 * \param req	sysctl access structure
261 * \return a string via the special SYSCTL_OUT macro.
262 */
263
264static int
265xnb_unit_test_main(SYSCTL_HANDLER_ARGS) {
266	test_fixture_t const tests[TOTAL_TESTS] = {
267		{setup_pvt_data, xnb_ring2pkt_emptyring, teardown_pvt_data},
268		{setup_pvt_data, xnb_ring2pkt_1req, teardown_pvt_data},
269		{setup_pvt_data, xnb_ring2pkt_2req, teardown_pvt_data},
270		{setup_pvt_data, xnb_ring2pkt_3req, teardown_pvt_data},
271		{setup_pvt_data, xnb_ring2pkt_extra, teardown_pvt_data},
272		{setup_pvt_data, xnb_ring2pkt_partial, teardown_pvt_data},
273		{setup_pvt_data, xnb_ring2pkt_wraps, teardown_pvt_data},
274		{setup_pvt_data, xnb_txpkt2rsp_emptypkt, teardown_pvt_data},
275		{setup_pvt_data, xnb_txpkt2rsp_1req, teardown_pvt_data},
276		{setup_pvt_data, xnb_txpkt2rsp_extra, teardown_pvt_data},
277		{setup_pvt_data, xnb_txpkt2rsp_long, teardown_pvt_data},
278		{setup_pvt_data, xnb_txpkt2rsp_invalid, teardown_pvt_data},
279		{setup_pvt_data, xnb_txpkt2rsp_error, teardown_pvt_data},
280		{setup_pvt_data, xnb_txpkt2rsp_wraps, teardown_pvt_data},
281		{setup_pvt_data, xnb_pkt2mbufc_empty, teardown_pvt_data},
282		{setup_pvt_data, xnb_pkt2mbufc_short, teardown_pvt_data},
283		{setup_pvt_data, xnb_pkt2mbufc_csum, teardown_pvt_data},
284		{setup_pvt_data, xnb_pkt2mbufc_1cluster, teardown_pvt_data},
285		{setup_pvt_data, xnb_pkt2mbufc_largecluster, teardown_pvt_data},
286		{setup_pvt_data, xnb_pkt2mbufc_2cluster, teardown_pvt_data},
287		{setup_pvt_data, xnb_txpkt2gnttab_empty, teardown_pvt_data},
288		{setup_pvt_data, xnb_txpkt2gnttab_short, teardown_pvt_data},
289		{setup_pvt_data, xnb_txpkt2gnttab_2req, teardown_pvt_data},
290		{setup_pvt_data, xnb_txpkt2gnttab_2cluster, teardown_pvt_data},
291		{setup_pvt_data, xnb_update_mbufc_short, teardown_pvt_data},
292		{setup_pvt_data, xnb_update_mbufc_2req, teardown_pvt_data},
293		{setup_pvt_data, xnb_update_mbufc_2cluster, teardown_pvt_data},
294		{setup_pvt_data, xnb_mbufc2pkt_empty, teardown_pvt_data},
295		{setup_pvt_data, xnb_mbufc2pkt_short, teardown_pvt_data},
296		{setup_pvt_data, xnb_mbufc2pkt_1cluster, teardown_pvt_data},
297		{setup_pvt_data, xnb_mbufc2pkt_2short, teardown_pvt_data},
298		{setup_pvt_data, xnb_mbufc2pkt_long, teardown_pvt_data},
299		{setup_pvt_data, xnb_mbufc2pkt_extra, teardown_pvt_data},
300		{setup_pvt_data, xnb_mbufc2pkt_nospace, teardown_pvt_data},
301		{setup_pvt_data, xnb_rxpkt2gnttab_empty, teardown_pvt_data},
302		{setup_pvt_data, xnb_rxpkt2gnttab_short, teardown_pvt_data},
303		{setup_pvt_data, xnb_rxpkt2gnttab_2req, teardown_pvt_data},
304		{setup_pvt_data, xnb_rxpkt2rsp_empty, teardown_pvt_data},
305		{setup_pvt_data, xnb_rxpkt2rsp_short, teardown_pvt_data},
306		{setup_pvt_data, xnb_rxpkt2rsp_extra, teardown_pvt_data},
307		{setup_pvt_data, xnb_rxpkt2rsp_2short, teardown_pvt_data},
308		{setup_pvt_data, xnb_rxpkt2rsp_2slots, teardown_pvt_data},
309		{setup_pvt_data, xnb_rxpkt2rsp_copyerror, teardown_pvt_data},
310		{null_setup, xnb_add_mbuf_cksum_arp, null_teardown},
311		{null_setup, xnb_add_mbuf_cksum_icmp, null_teardown},
312		{null_setup, xnb_add_mbuf_cksum_tcp, null_teardown},
313		{null_setup, xnb_add_mbuf_cksum_tcp_swcksum, null_teardown},
314		{null_setup, xnb_add_mbuf_cksum_udp, null_teardown},
315		{null_setup, xnb_sscanf_hhd, null_teardown},
316		{null_setup, xnb_sscanf_hhu, null_teardown},
317		{null_setup, xnb_sscanf_lld, null_teardown},
318		{null_setup, xnb_sscanf_llu, null_teardown},
319		{null_setup, xnb_sscanf_hhn, null_teardown},
320	};
321	/**
322	 * results is static so that the data will persist after this function
323	 * returns.  The sysctl code expects us to return a constant string.
324	 * \todo: the static variable is not thread safe.  Put a mutex around
325	 * it.
326	 */
327	static char results[TOTAL_BUFLEN];
328
329	/* empty the result strings */
330	results[0] = 0;
331	xnb_unit_test_runner(tests, TOTAL_TESTS, results, TOTAL_BUFLEN);
332
333	return (SYSCTL_OUT(req, results, strnlen(results, TOTAL_BUFLEN)));
334}
335
336static int
337setup_pvt_data(void)
338{
339	int error = 0;
340
341	bzero(xnb_unit_pvt.gnttab, sizeof(xnb_unit_pvt.gnttab));
342
343	xnb_unit_pvt.txs = malloc(PAGE_SIZE, M_XENNETBACK, M_WAITOK|M_ZERO);
344	if (xnb_unit_pvt.txs != NULL) {
345		SHARED_RING_INIT(xnb_unit_pvt.txs);
346		BACK_RING_INIT(&xnb_unit_pvt.txb, xnb_unit_pvt.txs, PAGE_SIZE);
347		FRONT_RING_INIT(&xnb_unit_pvt.txf, xnb_unit_pvt.txs, PAGE_SIZE);
348	} else {
349		error = 1;
350	}
351
352	xnb_unit_pvt.ifp = if_alloc(IFT_ETHER);
353	if (xnb_unit_pvt.ifp == NULL) {
354		error = 1;
355	}
356
357	xnb_unit_pvt.rxs = malloc(PAGE_SIZE, M_XENNETBACK, M_WAITOK|M_ZERO);
358	if (xnb_unit_pvt.rxs != NULL) {
359		SHARED_RING_INIT(xnb_unit_pvt.rxs);
360		BACK_RING_INIT(&xnb_unit_pvt.rxb, xnb_unit_pvt.rxs, PAGE_SIZE);
361		FRONT_RING_INIT(&xnb_unit_pvt.rxf, xnb_unit_pvt.rxs, PAGE_SIZE);
362	} else {
363		error = 1;
364	}
365
366	return error;
367}
368
369static void
370teardown_pvt_data(void)
371{
372	if (xnb_unit_pvt.txs != NULL) {
373		free(xnb_unit_pvt.txs, M_XENNETBACK);
374	}
375	if (xnb_unit_pvt.rxs != NULL) {
376		free(xnb_unit_pvt.rxs, M_XENNETBACK);
377	}
378	if (xnb_unit_pvt.ifp != NULL) {
379		if_free(xnb_unit_pvt.ifp);
380	}
381}
382
383/**
384 * Verify that xnb_ring2pkt will not consume any requests from an empty ring
385 */
386static void
387xnb_ring2pkt_emptyring(char *buffer, size_t buflen)
388{
389	struct xnb_pkt pkt;
390	int num_consumed;
391
392	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
393	                            xnb_unit_pvt.txb.req_cons);
394	XNB_ASSERT(num_consumed == 0);
395}
396
397/**
398 * Verify that xnb_ring2pkt can convert a single request packet correctly
399 */
400static void
401xnb_ring2pkt_1req(char *buffer, size_t buflen)
402{
403	struct xnb_pkt pkt;
404	int num_consumed;
405	struct netif_tx_request *req;
406
407	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
408	    xnb_unit_pvt.txf.req_prod_pvt);
409
410	req->flags = 0;
411	req->size = 69;	/* arbitrary number for test */
412	xnb_unit_pvt.txf.req_prod_pvt++;
413
414	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
415
416	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
417	                            xnb_unit_pvt.txb.req_cons);
418	XNB_ASSERT(num_consumed == 1);
419	XNB_ASSERT(pkt.size == 69);
420	XNB_ASSERT(pkt.car_size == 69);
421	XNB_ASSERT(pkt.flags == 0);
422	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
423	XNB_ASSERT(pkt.list_len == 1);
424	XNB_ASSERT(pkt.car == 0);
425}
426
427/**
428 * Verify that xnb_ring2pkt can convert a two request packet correctly.
429 * This tests handling of the MORE_DATA flag and cdr
430 */
431static void
432xnb_ring2pkt_2req(char *buffer, size_t buflen)
433{
434	struct xnb_pkt pkt;
435	int num_consumed;
436	struct netif_tx_request *req;
437	RING_IDX start_idx = xnb_unit_pvt.txf.req_prod_pvt;
438
439	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
440	    xnb_unit_pvt.txf.req_prod_pvt);
441	req->flags = NETTXF_more_data;
442	req->size = 100;
443	xnb_unit_pvt.txf.req_prod_pvt++;
444
445	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
446	    xnb_unit_pvt.txf.req_prod_pvt);
447	req->flags = 0;
448	req->size = 40;
449	xnb_unit_pvt.txf.req_prod_pvt++;
450
451	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
452
453	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
454	                            xnb_unit_pvt.txb.req_cons);
455	XNB_ASSERT(num_consumed == 2);
456	XNB_ASSERT(pkt.size == 100);
457	XNB_ASSERT(pkt.car_size == 60);
458	XNB_ASSERT(pkt.flags == 0);
459	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
460	XNB_ASSERT(pkt.list_len == 2);
461	XNB_ASSERT(pkt.car == start_idx);
462	XNB_ASSERT(pkt.cdr == start_idx + 1);
463}
464
465/**
466 * Verify that xnb_ring2pkt can convert a three request packet correctly
467 */
468static void
469xnb_ring2pkt_3req(char *buffer, size_t buflen)
470{
471	struct xnb_pkt pkt;
472	int num_consumed;
473	struct netif_tx_request *req;
474	RING_IDX start_idx = xnb_unit_pvt.txf.req_prod_pvt;
475
476	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
477	    xnb_unit_pvt.txf.req_prod_pvt);
478	req->flags = NETTXF_more_data;
479	req->size = 200;
480	xnb_unit_pvt.txf.req_prod_pvt++;
481
482	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
483	    xnb_unit_pvt.txf.req_prod_pvt);
484	req->flags = NETTXF_more_data;
485	req->size = 40;
486	xnb_unit_pvt.txf.req_prod_pvt++;
487
488	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
489	    xnb_unit_pvt.txf.req_prod_pvt);
490	req->flags = 0;
491	req->size = 50;
492	xnb_unit_pvt.txf.req_prod_pvt++;
493
494	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
495
496	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
497	                            xnb_unit_pvt.txb.req_cons);
498	XNB_ASSERT(num_consumed == 3);
499	XNB_ASSERT(pkt.size == 200);
500	XNB_ASSERT(pkt.car_size == 110);
501	XNB_ASSERT(pkt.flags == 0);
502	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
503	XNB_ASSERT(pkt.list_len == 3);
504	XNB_ASSERT(pkt.car == start_idx);
505	XNB_ASSERT(pkt.cdr == start_idx + 1);
506	XNB_ASSERT(RING_GET_REQUEST(&xnb_unit_pvt.txb, pkt.cdr + 1) == req);
507}
508
509/**
510 * Verify that xnb_ring2pkt can read extra inf
511 */
512static void
513xnb_ring2pkt_extra(char *buffer, size_t buflen)
514{
515	struct xnb_pkt pkt;
516	int num_consumed;
517	struct netif_tx_request *req;
518	struct netif_extra_info *ext;
519	RING_IDX start_idx = xnb_unit_pvt.txf.req_prod_pvt;
520
521	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
522	    xnb_unit_pvt.txf.req_prod_pvt);
523	req->flags = NETTXF_extra_info | NETTXF_more_data;
524	req->size = 150;
525	xnb_unit_pvt.txf.req_prod_pvt++;
526
527	ext = (struct netif_extra_info*) RING_GET_REQUEST(&xnb_unit_pvt.txf,
528	    xnb_unit_pvt.txf.req_prod_pvt);
529	ext->flags = 0;
530	ext->type = XEN_NETIF_EXTRA_TYPE_GSO;
531	ext->u.gso.size = 250;
532	ext->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
533	ext->u.gso.features = 0;
534	xnb_unit_pvt.txf.req_prod_pvt++;
535
536	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
537	    xnb_unit_pvt.txf.req_prod_pvt);
538	req->flags = 0;
539	req->size = 50;
540	xnb_unit_pvt.txf.req_prod_pvt++;
541
542	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
543
544	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
545	                            xnb_unit_pvt.txb.req_cons);
546	XNB_ASSERT(num_consumed == 3);
547	XNB_ASSERT(pkt.extra.flags == 0);
548	XNB_ASSERT(pkt.extra.type == XEN_NETIF_EXTRA_TYPE_GSO);
549	XNB_ASSERT(pkt.extra.u.gso.size == 250);
550	XNB_ASSERT(pkt.extra.u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4);
551	XNB_ASSERT(pkt.size == 150);
552	XNB_ASSERT(pkt.car_size == 100);
553	XNB_ASSERT(pkt.flags == NETTXF_extra_info);
554	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
555	XNB_ASSERT(pkt.list_len == 2);
556	XNB_ASSERT(pkt.car == start_idx);
557	XNB_ASSERT(pkt.cdr == start_idx + 2);
558	XNB_ASSERT(RING_GET_REQUEST(&xnb_unit_pvt.txb, pkt.cdr) == req);
559}
560
561/**
562 * Verify that xnb_ring2pkt will consume no requests if the entire packet is
563 * not yet in the ring
564 */
565static void
566xnb_ring2pkt_partial(char *buffer, size_t buflen)
567{
568	struct xnb_pkt pkt;
569	int num_consumed;
570	struct netif_tx_request *req;
571
572	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
573	    xnb_unit_pvt.txf.req_prod_pvt);
574	req->flags = NETTXF_more_data;
575	req->size = 150;
576	xnb_unit_pvt.txf.req_prod_pvt++;
577
578	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
579
580	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
581	                            xnb_unit_pvt.txb.req_cons);
582	XNB_ASSERT(num_consumed == 0);
583	XNB_ASSERT(! xnb_pkt_is_valid(&pkt));
584}
585
586/**
587 * Verity that xnb_ring2pkt can read a packet whose requests wrap around
588 * the end of the ring
589 */
590static void
591xnb_ring2pkt_wraps(char *buffer, size_t buflen)
592{
593	struct xnb_pkt pkt;
594	int num_consumed;
595	struct netif_tx_request *req;
596	unsigned int rsize;
597
598	/*
599	 * Manually tweak the ring indices to create a ring with no responses
600	 * and the next request slot at position 2 from the end
601	 */
602	rsize = RING_SIZE(&xnb_unit_pvt.txf);
603	xnb_unit_pvt.txf.req_prod_pvt = rsize - 2;
604	xnb_unit_pvt.txf.rsp_cons = rsize - 2;
605	xnb_unit_pvt.txs->req_prod = rsize - 2;
606	xnb_unit_pvt.txs->req_event = rsize - 1;
607	xnb_unit_pvt.txs->rsp_prod = rsize - 2;
608	xnb_unit_pvt.txs->rsp_event = rsize - 1;
609	xnb_unit_pvt.txb.rsp_prod_pvt = rsize - 2;
610	xnb_unit_pvt.txb.req_cons = rsize - 2;
611
612	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
613	    xnb_unit_pvt.txf.req_prod_pvt);
614	req->flags = NETTXF_more_data;
615	req->size = 550;
616	xnb_unit_pvt.txf.req_prod_pvt++;
617
618	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
619	    xnb_unit_pvt.txf.req_prod_pvt);
620	req->flags = NETTXF_more_data;
621	req->size = 100;
622	xnb_unit_pvt.txf.req_prod_pvt++;
623
624	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
625	    xnb_unit_pvt.txf.req_prod_pvt);
626	req->flags = 0;
627	req->size = 50;
628	xnb_unit_pvt.txf.req_prod_pvt++;
629
630	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
631
632	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
633	                            xnb_unit_pvt.txb.req_cons);
634	XNB_ASSERT(num_consumed == 3);
635	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
636	XNB_ASSERT(pkt.list_len == 3);
637	XNB_ASSERT(RING_GET_REQUEST(&xnb_unit_pvt.txb, pkt.cdr + 1) == req);
638}
639
640
641/**
642 * xnb_txpkt2rsp should do nothing for an empty packet
643 */
644static void
645xnb_txpkt2rsp_emptypkt(char *buffer, size_t buflen)
646{
647	int num_consumed;
648	struct xnb_pkt pkt;
649	netif_tx_back_ring_t txb_backup = xnb_unit_pvt.txb;
650	netif_tx_sring_t txs_backup = *xnb_unit_pvt.txs;
651	pkt.list_len = 0;
652
653	/* must call xnb_ring2pkt just to intialize pkt */
654	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
655	                            xnb_unit_pvt.txb.req_cons);
656	xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
657	XNB_ASSERT(
658	    memcmp(&txb_backup, &xnb_unit_pvt.txb, sizeof(txb_backup)) == 0);
659	XNB_ASSERT(
660	    memcmp(&txs_backup, xnb_unit_pvt.txs, sizeof(txs_backup)) == 0);
661}
662
663/**
664 * xnb_txpkt2rsp responding to one request
665 */
666static void
667xnb_txpkt2rsp_1req(char *buffer, size_t buflen)
668{
669	uint16_t num_consumed;
670	struct xnb_pkt pkt;
671	struct netif_tx_request *req;
672	struct netif_tx_response *rsp;
673
674	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
675	    xnb_unit_pvt.txf.req_prod_pvt);
676	req->size = 1000;
677	req->flags = 0;
678	xnb_unit_pvt.txf.req_prod_pvt++;
679
680	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
681
682	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
683	                            xnb_unit_pvt.txb.req_cons);
684	xnb_unit_pvt.txb.req_cons += num_consumed;
685
686	xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
687	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
688
689	XNB_ASSERT(
690	    xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
691	XNB_ASSERT(rsp->id == req->id);
692	XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
693};
694
695/**
696 * xnb_txpkt2rsp responding to 1 data request and 1 extra info
697 */
698static void
699xnb_txpkt2rsp_extra(char *buffer, size_t buflen)
700{
701	uint16_t num_consumed;
702	struct xnb_pkt pkt;
703	struct netif_tx_request *req;
704	netif_extra_info_t *ext;
705	struct netif_tx_response *rsp;
706
707	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
708	    xnb_unit_pvt.txf.req_prod_pvt);
709	req->size = 1000;
710	req->flags = NETTXF_extra_info;
711	req->id = 69;
712	xnb_unit_pvt.txf.req_prod_pvt++;
713
714	ext = (netif_extra_info_t*) RING_GET_REQUEST(&xnb_unit_pvt.txf,
715	    xnb_unit_pvt.txf.req_prod_pvt);
716	ext->type = XEN_NETIF_EXTRA_TYPE_GSO;
717	ext->flags = 0;
718	xnb_unit_pvt.txf.req_prod_pvt++;
719
720	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
721
722	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
723	                            xnb_unit_pvt.txb.req_cons);
724	xnb_unit_pvt.txb.req_cons += num_consumed;
725
726	xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
727
728	XNB_ASSERT(
729	    xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
730
731	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
732	XNB_ASSERT(rsp->id == req->id);
733	XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
734
735	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
736	    xnb_unit_pvt.txf.rsp_cons + 1);
737	XNB_ASSERT(rsp->status == NETIF_RSP_NULL);
738};
739
740/**
741 * xnb_pkg2rsp responding to 3 data requests and 1 extra info
742 */
743static void
744xnb_txpkt2rsp_long(char *buffer, size_t buflen)
745{
746	uint16_t num_consumed;
747	struct xnb_pkt pkt;
748	struct netif_tx_request *req;
749	netif_extra_info_t *ext;
750	struct netif_tx_response *rsp;
751
752	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
753	    xnb_unit_pvt.txf.req_prod_pvt);
754	req->size = 1000;
755	req->flags = NETTXF_extra_info | NETTXF_more_data;
756	req->id = 254;
757	xnb_unit_pvt.txf.req_prod_pvt++;
758
759	ext = (netif_extra_info_t*) RING_GET_REQUEST(&xnb_unit_pvt.txf,
760	    xnb_unit_pvt.txf.req_prod_pvt);
761	ext->type = XEN_NETIF_EXTRA_TYPE_GSO;
762	ext->flags = 0;
763	xnb_unit_pvt.txf.req_prod_pvt++;
764
765	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
766	    xnb_unit_pvt.txf.req_prod_pvt);
767	req->size = 300;
768	req->flags = NETTXF_more_data;
769	req->id = 1034;
770	xnb_unit_pvt.txf.req_prod_pvt++;
771
772	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
773	    xnb_unit_pvt.txf.req_prod_pvt);
774	req->size = 400;
775	req->flags = 0;
776	req->id = 34;
777	xnb_unit_pvt.txf.req_prod_pvt++;
778
779	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
780
781	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
782	                            xnb_unit_pvt.txb.req_cons);
783	xnb_unit_pvt.txb.req_cons += num_consumed;
784
785	xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
786
787	XNB_ASSERT(
788	    xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
789
790	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
791	XNB_ASSERT(rsp->id ==
792	    RING_GET_REQUEST(&xnb_unit_pvt.txf, 0)->id);
793	XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
794
795	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
796	    xnb_unit_pvt.txf.rsp_cons + 1);
797	XNB_ASSERT(rsp->status == NETIF_RSP_NULL);
798
799	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
800	    xnb_unit_pvt.txf.rsp_cons + 2);
801	XNB_ASSERT(rsp->id ==
802	    RING_GET_REQUEST(&xnb_unit_pvt.txf, 2)->id);
803	XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
804
805	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
806	    xnb_unit_pvt.txf.rsp_cons + 3);
807	XNB_ASSERT(rsp->id ==
808	    RING_GET_REQUEST(&xnb_unit_pvt.txf, 3)->id);
809	XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
810}
811
812/**
813 * xnb_txpkt2rsp responding to an invalid packet.
814 * Note: this test will result in an error message being printed to the console
815 * such as:
816 * xnb(xnb_ring2pkt:1306): Unknown extra info type 255.  Discarding packet
817 */
818static void
819xnb_txpkt2rsp_invalid(char *buffer, size_t buflen)
820{
821	uint16_t num_consumed;
822	struct xnb_pkt pkt;
823	struct netif_tx_request *req;
824	netif_extra_info_t *ext;
825	struct netif_tx_response *rsp;
826
827	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
828	    xnb_unit_pvt.txf.req_prod_pvt);
829	req->size = 1000;
830	req->flags = NETTXF_extra_info;
831	req->id = 69;
832	xnb_unit_pvt.txf.req_prod_pvt++;
833
834	ext = (netif_extra_info_t*) RING_GET_REQUEST(&xnb_unit_pvt.txf,
835	    xnb_unit_pvt.txf.req_prod_pvt);
836	ext->type = 0xFF;	/* Invalid extra type */
837	ext->flags = 0;
838	xnb_unit_pvt.txf.req_prod_pvt++;
839
840	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
841
842	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
843	                            xnb_unit_pvt.txb.req_cons);
844	xnb_unit_pvt.txb.req_cons += num_consumed;
845	XNB_ASSERT(! xnb_pkt_is_valid(&pkt));
846
847	xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
848
849	XNB_ASSERT(
850	    xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
851
852	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
853	XNB_ASSERT(rsp->id == req->id);
854	XNB_ASSERT(rsp->status == NETIF_RSP_ERROR);
855
856	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
857	    xnb_unit_pvt.txf.rsp_cons + 1);
858	XNB_ASSERT(rsp->status == NETIF_RSP_NULL);
859};
860
861/**
862 * xnb_txpkt2rsp responding to one request which caused an error
863 */
864static void
865xnb_txpkt2rsp_error(char *buffer, size_t buflen)
866{
867	uint16_t num_consumed;
868	struct xnb_pkt pkt;
869	struct netif_tx_request *req;
870	struct netif_tx_response *rsp;
871
872	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
873	    xnb_unit_pvt.txf.req_prod_pvt);
874	req->size = 1000;
875	req->flags = 0;
876	xnb_unit_pvt.txf.req_prod_pvt++;
877
878	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
879
880	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
881	                            xnb_unit_pvt.txb.req_cons);
882	xnb_unit_pvt.txb.req_cons += num_consumed;
883
884	xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 1);
885	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
886
887	XNB_ASSERT(
888	    xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
889	XNB_ASSERT(rsp->id == req->id);
890	XNB_ASSERT(rsp->status == NETIF_RSP_ERROR);
891};
892
893/**
894 * xnb_txpkt2rsp's responses wrap around the end of the ring
895 */
896static void
897xnb_txpkt2rsp_wraps(char *buffer, size_t buflen)
898{
899	struct xnb_pkt pkt;
900	int num_consumed;
901	struct netif_tx_request *req;
902	struct netif_tx_response *rsp;
903	unsigned int rsize;
904
905	/*
906	 * Manually tweak the ring indices to create a ring with no responses
907	 * and the next request slot at position 2 from the end
908	 */
909	rsize = RING_SIZE(&xnb_unit_pvt.txf);
910	xnb_unit_pvt.txf.req_prod_pvt = rsize - 2;
911	xnb_unit_pvt.txf.rsp_cons = rsize - 2;
912	xnb_unit_pvt.txs->req_prod = rsize - 2;
913	xnb_unit_pvt.txs->req_event = rsize - 1;
914	xnb_unit_pvt.txs->rsp_prod = rsize - 2;
915	xnb_unit_pvt.txs->rsp_event = rsize - 1;
916	xnb_unit_pvt.txb.rsp_prod_pvt = rsize - 2;
917	xnb_unit_pvt.txb.req_cons = rsize - 2;
918
919	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
920	    xnb_unit_pvt.txf.req_prod_pvt);
921	req->flags = NETTXF_more_data;
922	req->size = 550;
923	req->id = 1;
924	xnb_unit_pvt.txf.req_prod_pvt++;
925
926	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
927	    xnb_unit_pvt.txf.req_prod_pvt);
928	req->flags = NETTXF_more_data;
929	req->size = 100;
930	req->id = 2;
931	xnb_unit_pvt.txf.req_prod_pvt++;
932
933	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
934	    xnb_unit_pvt.txf.req_prod_pvt);
935	req->flags = 0;
936	req->size = 50;
937	req->id = 3;
938	xnb_unit_pvt.txf.req_prod_pvt++;
939
940	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
941
942	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
943	                            xnb_unit_pvt.txb.req_cons);
944
945	xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
946
947	XNB_ASSERT(
948	    xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
949	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
950	    xnb_unit_pvt.txf.rsp_cons + 2);
951	XNB_ASSERT(rsp->id == req->id);
952	XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
953}
954
955
956/**
957 * Helper function used to setup pkt2mbufc tests
958 * \param size     size in bytes of the single request to push to the ring
959 * \param flags		optional flags to put in the netif request
960 * \param[out] pkt the returned packet object
961 * \return number of requests consumed from the ring
962 */
963static int
964xnb_get1pkt(struct xnb_pkt *pkt, size_t size, uint16_t flags)
965{
966	struct netif_tx_request *req;
967
968	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
969	    xnb_unit_pvt.txf.req_prod_pvt);
970	req->flags = flags;
971	req->size = size;
972	xnb_unit_pvt.txf.req_prod_pvt++;
973
974	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
975
976	return xnb_ring2pkt(pkt, &xnb_unit_pvt.txb,
977	                            xnb_unit_pvt.txb.req_cons);
978}
979
980/**
981 * xnb_pkt2mbufc on an empty packet
982 */
983static void
984xnb_pkt2mbufc_empty(char *buffer, size_t buflen)
985{
986	int num_consumed;
987	struct xnb_pkt pkt;
988	struct mbuf *pMbuf;
989	pkt.list_len = 0;
990
991	/* must call xnb_ring2pkt just to intialize pkt */
992	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
993	                            xnb_unit_pvt.txb.req_cons);
994	pkt.size = 0;
995	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
996	safe_m_freem(&pMbuf);
997}
998
999/**
1000 * xnb_pkt2mbufc on short packet that can fit in an mbuf internal buffer
1001 */
1002static void
1003xnb_pkt2mbufc_short(char *buffer, size_t buflen)
1004{
1005	const size_t size = MINCLSIZE - 1;
1006	struct xnb_pkt pkt;
1007	struct mbuf *pMbuf;
1008
1009	xnb_get1pkt(&pkt, size, 0);
1010
1011	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1012	XNB_ASSERT(M_TRAILINGSPACE(pMbuf) >= size);
1013	safe_m_freem(&pMbuf);
1014}
1015
1016/**
1017 * xnb_pkt2mbufc on short packet whose checksum was validated by the netfron
1018 */
1019static void
1020xnb_pkt2mbufc_csum(char *buffer, size_t buflen)
1021{
1022	const size_t size = MINCLSIZE - 1;
1023	struct xnb_pkt pkt;
1024	struct mbuf *pMbuf;
1025
1026	xnb_get1pkt(&pkt, size, NETTXF_data_validated);
1027
1028	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1029	XNB_ASSERT(M_TRAILINGSPACE(pMbuf) >= size);
1030	XNB_ASSERT(pMbuf->m_pkthdr.csum_flags & CSUM_IP_CHECKED);
1031	XNB_ASSERT(pMbuf->m_pkthdr.csum_flags & CSUM_IP_VALID);
1032	XNB_ASSERT(pMbuf->m_pkthdr.csum_flags & CSUM_DATA_VALID);
1033	XNB_ASSERT(pMbuf->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR);
1034	safe_m_freem(&pMbuf);
1035}
1036
1037/**
1038 * xnb_pkt2mbufc on packet that can fit in one cluster
1039 */
1040static void
1041xnb_pkt2mbufc_1cluster(char *buffer, size_t buflen)
1042{
1043	const size_t size = MINCLSIZE;
1044	struct xnb_pkt pkt;
1045	struct mbuf *pMbuf;
1046
1047	xnb_get1pkt(&pkt, size, 0);
1048
1049	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1050	XNB_ASSERT(M_TRAILINGSPACE(pMbuf) >= size);
1051	safe_m_freem(&pMbuf);
1052}
1053
1054/**
1055 * xnb_pkt2mbufc on packet that cannot fit in one regular cluster
1056 */
1057static void
1058xnb_pkt2mbufc_largecluster(char *buffer, size_t buflen)
1059{
1060	const size_t size = MCLBYTES + 1;
1061	struct xnb_pkt pkt;
1062	struct mbuf *pMbuf;
1063
1064	xnb_get1pkt(&pkt, size, 0);
1065
1066	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1067	XNB_ASSERT(M_TRAILINGSPACE(pMbuf) >= size);
1068	safe_m_freem(&pMbuf);
1069}
1070
1071/**
1072 * xnb_pkt2mbufc on packet that cannot fit in one clusters
1073 */
1074static void
1075xnb_pkt2mbufc_2cluster(char *buffer, size_t buflen)
1076{
1077	const size_t size = 2 * MCLBYTES + 1;
1078	size_t space = 0;
1079	struct xnb_pkt pkt;
1080	struct mbuf *pMbuf;
1081	struct mbuf *m;
1082
1083	xnb_get1pkt(&pkt, size, 0);
1084
1085	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1086
1087	for (m = pMbuf; m != NULL; m = m->m_next) {
1088		space += M_TRAILINGSPACE(m);
1089	}
1090	XNB_ASSERT(space >= size);
1091	safe_m_freem(&pMbuf);
1092}
1093
1094/**
1095 * xnb_txpkt2gnttab on an empty packet.  Should return empty gnttab
1096 */
1097static void
1098xnb_txpkt2gnttab_empty(char *buffer, size_t buflen)
1099{
1100	int n_entries;
1101	struct xnb_pkt pkt;
1102	struct mbuf *pMbuf;
1103	pkt.list_len = 0;
1104
1105	/* must call xnb_ring2pkt just to intialize pkt */
1106	xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1107	pkt.size = 0;
1108	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1109	n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1110	    &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1111	XNB_ASSERT(n_entries == 0);
1112	safe_m_freem(&pMbuf);
1113}
1114
1115/**
1116 * xnb_txpkt2gnttab on a short packet, that can fit in one mbuf internal buffer
1117 * and has one request
1118 */
1119static void
1120xnb_txpkt2gnttab_short(char *buffer, size_t buflen)
1121{
1122	const size_t size = MINCLSIZE - 1;
1123	int n_entries;
1124	struct xnb_pkt pkt;
1125	struct mbuf *pMbuf;
1126
1127	struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1128	    xnb_unit_pvt.txf.req_prod_pvt);
1129	req->flags = 0;
1130	req->size = size;
1131	req->gref = 7;
1132	req->offset = 17;
1133	xnb_unit_pvt.txf.req_prod_pvt++;
1134
1135	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1136
1137	xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1138
1139	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1140	n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1141	    &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1142	XNB_ASSERT(n_entries == 1);
1143	XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == size);
1144	/* flags should indicate gref's for source */
1145	XNB_ASSERT(xnb_unit_pvt.gnttab[0].flags & GNTCOPY_source_gref);
1146	XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.offset == req->offset);
1147	XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.domid == DOMID_SELF);
1148	XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.offset == virt_to_offset(
1149	      mtod(pMbuf, vm_offset_t)));
1150	XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.u.gmfn ==
1151		virt_to_mfn(mtod(pMbuf, vm_offset_t)));
1152	XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.domid == DOMID_FIRST_RESERVED);
1153	safe_m_freem(&pMbuf);
1154}
1155
1156/**
1157 * xnb_txpkt2gnttab on a packet with two requests, that can fit into a single
1158 * mbuf cluster
1159 */
1160static void
1161xnb_txpkt2gnttab_2req(char *buffer, size_t buflen)
1162{
1163	int n_entries;
1164	struct xnb_pkt pkt;
1165	struct mbuf *pMbuf;
1166
1167	struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1168	    xnb_unit_pvt.txf.req_prod_pvt);
1169	req->flags = NETTXF_more_data;
1170	req->size = 1900;
1171	req->gref = 7;
1172	req->offset = 0;
1173	xnb_unit_pvt.txf.req_prod_pvt++;
1174
1175	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1176	    xnb_unit_pvt.txf.req_prod_pvt);
1177	req->flags = 0;
1178	req->size = 500;
1179	req->gref = 8;
1180	req->offset = 0;
1181	xnb_unit_pvt.txf.req_prod_pvt++;
1182
1183	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1184
1185	xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1186
1187	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1188	n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1189	    &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1190
1191	XNB_ASSERT(n_entries == 2);
1192	XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == 1400);
1193	XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.offset == virt_to_offset(
1194	      mtod(pMbuf, vm_offset_t)));
1195
1196	XNB_ASSERT(xnb_unit_pvt.gnttab[1].len == 500);
1197	XNB_ASSERT(xnb_unit_pvt.gnttab[1].dest.offset == virt_to_offset(
1198	      mtod(pMbuf, vm_offset_t) + 1400));
1199	safe_m_freem(&pMbuf);
1200}
1201
1202/**
1203 * xnb_txpkt2gnttab on a single request that spans two mbuf clusters
1204 */
1205static void
1206xnb_txpkt2gnttab_2cluster(char *buffer, size_t buflen)
1207{
1208	int n_entries;
1209	struct xnb_pkt pkt;
1210	struct mbuf *pMbuf;
1211	const uint16_t data_this_transaction = (MCLBYTES*2) + 1;
1212
1213	struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1214	    xnb_unit_pvt.txf.req_prod_pvt);
1215	req->flags = 0;
1216	req->size = data_this_transaction;
1217	req->gref = 8;
1218	req->offset = 0;
1219	xnb_unit_pvt.txf.req_prod_pvt++;
1220
1221	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1222	xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1223
1224	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1225	n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1226	    &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1227
1228	if (M_TRAILINGSPACE(pMbuf) == MCLBYTES) {
1229		/* there should be three mbufs and three gnttab entries */
1230		XNB_ASSERT(n_entries == 3);
1231		XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == MCLBYTES);
1232		XNB_ASSERT(
1233		    xnb_unit_pvt.gnttab[0].dest.offset == virt_to_offset(
1234		      mtod(pMbuf, vm_offset_t)));
1235		XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.offset == 0);
1236
1237		XNB_ASSERT(xnb_unit_pvt.gnttab[1].len == MCLBYTES);
1238		XNB_ASSERT(
1239		    xnb_unit_pvt.gnttab[1].dest.offset == virt_to_offset(
1240		      mtod(pMbuf->m_next, vm_offset_t)));
1241		XNB_ASSERT(xnb_unit_pvt.gnttab[1].source.offset == MCLBYTES);
1242
1243		XNB_ASSERT(xnb_unit_pvt.gnttab[2].len == 1);
1244		XNB_ASSERT(
1245		    xnb_unit_pvt.gnttab[2].dest.offset == virt_to_offset(
1246		      mtod(pMbuf->m_next, vm_offset_t)));
1247		XNB_ASSERT(xnb_unit_pvt.gnttab[2].source.offset == 2 *
1248			    MCLBYTES);
1249	} else if (M_TRAILINGSPACE(pMbuf) == 2 * MCLBYTES) {
1250		/* there should be two mbufs and two gnttab entries */
1251		XNB_ASSERT(n_entries == 2);
1252		XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == 2 * MCLBYTES);
1253		XNB_ASSERT(
1254		    xnb_unit_pvt.gnttab[0].dest.offset == virt_to_offset(
1255		      mtod(pMbuf, vm_offset_t)));
1256		XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.offset == 0);
1257
1258		XNB_ASSERT(xnb_unit_pvt.gnttab[1].len == 1);
1259		XNB_ASSERT(
1260		    xnb_unit_pvt.gnttab[1].dest.offset == virt_to_offset(
1261		      mtod(pMbuf->m_next, vm_offset_t)));
1262		XNB_ASSERT(
1263		    xnb_unit_pvt.gnttab[1].source.offset == 2 * MCLBYTES);
1264
1265	} else {
1266		/* should never get here */
1267		XNB_ASSERT(0);
1268	}
1269	if (pMbuf != NULL)
1270		m_freem(pMbuf);
1271}
1272
1273
1274/**
1275 * xnb_update_mbufc on a short packet that only has one gnttab entry
1276 */
1277static void
1278xnb_update_mbufc_short(char *buffer, size_t buflen)
1279{
1280	const size_t size = MINCLSIZE - 1;
1281	int n_entries;
1282	struct xnb_pkt pkt;
1283	struct mbuf *pMbuf;
1284
1285	struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1286	    xnb_unit_pvt.txf.req_prod_pvt);
1287	req->flags = 0;
1288	req->size = size;
1289	req->gref = 7;
1290	req->offset = 17;
1291	xnb_unit_pvt.txf.req_prod_pvt++;
1292
1293	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1294
1295	xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1296
1297	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1298	n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1299	    &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1300
1301	/* Update grant table's status fields as the hypervisor call would */
1302	xnb_unit_pvt.gnttab[0].status = GNTST_okay;
1303
1304	xnb_update_mbufc(pMbuf, xnb_unit_pvt.gnttab, n_entries);
1305	XNB_ASSERT(pMbuf->m_len == size);
1306	XNB_ASSERT(pMbuf->m_pkthdr.len == size);
1307	safe_m_freem(&pMbuf);
1308}
1309
1310/**
1311 * xnb_update_mbufc on a packet with two requests, that can fit into a single
1312 * mbuf cluster
1313 */
1314static void
1315xnb_update_mbufc_2req(char *buffer, size_t buflen)
1316{
1317	int n_entries;
1318	struct xnb_pkt pkt;
1319	struct mbuf *pMbuf;
1320
1321	struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1322	    xnb_unit_pvt.txf.req_prod_pvt);
1323	req->flags = NETTXF_more_data;
1324	req->size = 1900;
1325	req->gref = 7;
1326	req->offset = 0;
1327	xnb_unit_pvt.txf.req_prod_pvt++;
1328
1329	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1330	    xnb_unit_pvt.txf.req_prod_pvt);
1331	req->flags = 0;
1332	req->size = 500;
1333	req->gref = 8;
1334	req->offset = 0;
1335	xnb_unit_pvt.txf.req_prod_pvt++;
1336
1337	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1338
1339	xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1340
1341	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1342	n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1343	    &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1344
1345	/* Update grant table's status fields as the hypervisor call would */
1346	xnb_unit_pvt.gnttab[0].status = GNTST_okay;
1347	xnb_unit_pvt.gnttab[1].status = GNTST_okay;
1348
1349	xnb_update_mbufc(pMbuf, xnb_unit_pvt.gnttab, n_entries);
1350	XNB_ASSERT(n_entries == 2);
1351	XNB_ASSERT(pMbuf->m_pkthdr.len == 1900);
1352	XNB_ASSERT(pMbuf->m_len == 1900);
1353
1354	safe_m_freem(&pMbuf);
1355}
1356
1357/**
1358 * xnb_update_mbufc on a single request that spans two mbuf clusters
1359 */
1360static void
1361xnb_update_mbufc_2cluster(char *buffer, size_t buflen)
1362{
1363	int i;
1364	int n_entries;
1365	struct xnb_pkt pkt;
1366	struct mbuf *pMbuf;
1367	const uint16_t data_this_transaction = (MCLBYTES*2) + 1;
1368
1369	struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1370	    xnb_unit_pvt.txf.req_prod_pvt);
1371	req->flags = 0;
1372	req->size = data_this_transaction;
1373	req->gref = 8;
1374	req->offset = 0;
1375	xnb_unit_pvt.txf.req_prod_pvt++;
1376
1377	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1378	xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1379
1380	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1381	n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1382	    &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1383
1384	/* Update grant table's status fields */
1385	for (i = 0; i < n_entries; i++) {
1386		xnb_unit_pvt.gnttab[0].status = GNTST_okay;
1387	}
1388	xnb_update_mbufc(pMbuf, xnb_unit_pvt.gnttab, n_entries);
1389
1390	if (n_entries == 3) {
1391		/* there should be three mbufs and three gnttab entries */
1392		XNB_ASSERT(pMbuf->m_pkthdr.len == data_this_transaction);
1393		XNB_ASSERT(pMbuf->m_len == MCLBYTES);
1394		XNB_ASSERT(pMbuf->m_next->m_len == MCLBYTES);
1395		XNB_ASSERT(pMbuf->m_next->m_next->m_len == 1);
1396	} else if (n_entries == 2) {
1397		/* there should be two mbufs and two gnttab entries */
1398		XNB_ASSERT(n_entries == 2);
1399		XNB_ASSERT(pMbuf->m_pkthdr.len == data_this_transaction);
1400		XNB_ASSERT(pMbuf->m_len == 2 * MCLBYTES);
1401		XNB_ASSERT(pMbuf->m_next->m_len == 1);
1402	} else {
1403		/* should never get here */
1404		XNB_ASSERT(0);
1405	}
1406	safe_m_freem(&pMbuf);
1407}
1408
1409/** xnb_mbufc2pkt on an empty mbufc */
1410static void
1411xnb_mbufc2pkt_empty(char *buffer, size_t buflen) {
1412	struct xnb_pkt pkt;
1413	int free_slots = 64;
1414	struct mbuf *mbuf;
1415
1416	mbuf = m_get(M_WAITOK, MT_DATA);
1417	/*
1418	 * note: it is illegal to set M_PKTHDR on a mbuf with no data.  Doing so
1419	 * will cause m_freem to segfault
1420	 */
1421	XNB_ASSERT(mbuf->m_len == 0);
1422
1423	xnb_mbufc2pkt(mbuf, &pkt, 0, free_slots);
1424	XNB_ASSERT(! xnb_pkt_is_valid(&pkt));
1425
1426	safe_m_freem(&mbuf);
1427}
1428
1429/** xnb_mbufc2pkt on a short mbufc */
1430static void
1431xnb_mbufc2pkt_short(char *buffer, size_t buflen) {
1432	struct xnb_pkt pkt;
1433	size_t size = 128;
1434	int free_slots = 64;
1435	RING_IDX start = 9;
1436	struct mbuf *mbuf;
1437
1438	mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1439	mbuf->m_flags |= M_PKTHDR;
1440	mbuf->m_pkthdr.len = size;
1441	mbuf->m_len = size;
1442
1443	xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1444	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
1445	XNB_ASSERT(pkt.size == size);
1446	XNB_ASSERT(pkt.car_size == size);
1447	XNB_ASSERT(! (pkt.flags &
1448	      (NETRXF_more_data | NETRXF_extra_info)));
1449	XNB_ASSERT(pkt.list_len == 1);
1450	XNB_ASSERT(pkt.car == start);
1451
1452	safe_m_freem(&mbuf);
1453}
1454
1455/** xnb_mbufc2pkt on a single mbuf with an mbuf cluster */
1456static void
1457xnb_mbufc2pkt_1cluster(char *buffer, size_t buflen) {
1458	struct xnb_pkt pkt;
1459	size_t size = MCLBYTES;
1460	int free_slots = 32;
1461	RING_IDX start = 12;
1462	struct mbuf *mbuf;
1463
1464	mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1465	mbuf->m_flags |= M_PKTHDR;
1466	mbuf->m_pkthdr.len = size;
1467	mbuf->m_len = size;
1468
1469	xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1470	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
1471	XNB_ASSERT(pkt.size == size);
1472	XNB_ASSERT(pkt.car_size == size);
1473	XNB_ASSERT(! (pkt.flags &
1474	      (NETRXF_more_data | NETRXF_extra_info)));
1475	XNB_ASSERT(pkt.list_len == 1);
1476	XNB_ASSERT(pkt.car == start);
1477
1478	safe_m_freem(&mbuf);
1479}
1480
1481/** xnb_mbufc2pkt on a two-mbuf chain with short data regions */
1482static void
1483xnb_mbufc2pkt_2short(char *buffer, size_t buflen) {
1484	struct xnb_pkt pkt;
1485	size_t size1 = MHLEN - 5;
1486	size_t size2 = MHLEN - 15;
1487	int free_slots = 32;
1488	RING_IDX start = 14;
1489	struct mbuf *mbufc, *mbufc2;
1490
1491	mbufc = m_getm(NULL, size1, M_WAITOK, MT_DATA);
1492	mbufc->m_flags |= M_PKTHDR;
1493	if (mbufc == NULL) {
1494		XNB_ASSERT(mbufc != NULL);
1495		return;
1496	}
1497
1498	mbufc2 = m_getm(mbufc, size2, M_WAITOK, MT_DATA);
1499	if (mbufc2 == NULL) {
1500		XNB_ASSERT(mbufc2 != NULL);
1501		safe_m_freem(&mbufc);
1502		return;
1503	}
1504	mbufc2->m_pkthdr.len = size1 + size2;
1505	mbufc2->m_len = size1;
1506
1507	xnb_mbufc2pkt(mbufc2, &pkt, start, free_slots);
1508	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
1509	XNB_ASSERT(pkt.size == size1 + size2);
1510	XNB_ASSERT(pkt.car == start);
1511	/*
1512	 * The second m_getm may allocate a new mbuf and append
1513	 * it to the chain, or it may simply extend the first mbuf.
1514	 */
1515	if (mbufc2->m_next != NULL) {
1516		XNB_ASSERT(pkt.car_size == size1);
1517		XNB_ASSERT(pkt.list_len == 1);
1518		XNB_ASSERT(pkt.cdr == start + 1);
1519	}
1520
1521	safe_m_freem(&mbufc2);
1522}
1523
1524/** xnb_mbufc2pkt on a mbuf chain with >1 mbuf cluster */
1525static void
1526xnb_mbufc2pkt_long(char *buffer, size_t buflen) {
1527	struct xnb_pkt pkt;
1528	size_t size = 14 * MCLBYTES / 3;
1529	size_t size_remaining;
1530	int free_slots = 15;
1531	RING_IDX start = 3;
1532	struct mbuf *mbufc, *m;
1533
1534	mbufc = m_getm(NULL, size, M_WAITOK, MT_DATA);
1535	mbufc->m_flags |= M_PKTHDR;
1536	if (mbufc == NULL) {
1537		XNB_ASSERT(mbufc != NULL);
1538		return;
1539	}
1540
1541	mbufc->m_pkthdr.len = size;
1542	size_remaining = size;
1543	for (m = mbufc; m != NULL; m = m->m_next) {
1544		m->m_len = MAX(M_TRAILINGSPACE(m), size_remaining);
1545		size_remaining -= m->m_len;
1546	}
1547
1548	xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
1549	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
1550	XNB_ASSERT(pkt.size == size);
1551	XNB_ASSERT(pkt.car == start);
1552	XNB_ASSERT(pkt.car_size = mbufc->m_len);
1553	/*
1554	 * There should be >1 response in the packet, and there is no
1555	 * extra info.
1556	 */
1557	XNB_ASSERT(! (pkt.flags & NETRXF_extra_info));
1558	XNB_ASSERT(pkt.cdr == pkt.car + 1);
1559
1560	safe_m_freem(&mbufc);
1561}
1562
1563/** xnb_mbufc2pkt on a mbuf chain with >1 mbuf cluster and extra info */
1564static void
1565xnb_mbufc2pkt_extra(char *buffer, size_t buflen) {
1566	struct xnb_pkt pkt;
1567	size_t size = 14 * MCLBYTES / 3;
1568	size_t size_remaining;
1569	int free_slots = 15;
1570	RING_IDX start = 3;
1571	struct mbuf *mbufc, *m;
1572
1573	mbufc = m_getm(NULL, size, M_WAITOK, MT_DATA);
1574	if (mbufc == NULL) {
1575		XNB_ASSERT(mbufc != NULL);
1576		return;
1577	}
1578
1579	mbufc->m_flags |= M_PKTHDR;
1580	mbufc->m_pkthdr.len = size;
1581	mbufc->m_pkthdr.csum_flags |= CSUM_TSO;
1582	mbufc->m_pkthdr.tso_segsz = TCP_MSS - 40;
1583	size_remaining = size;
1584	for (m = mbufc; m != NULL; m = m->m_next) {
1585		m->m_len = MAX(M_TRAILINGSPACE(m), size_remaining);
1586		size_remaining -= m->m_len;
1587	}
1588
1589	xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
1590	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
1591	XNB_ASSERT(pkt.size == size);
1592	XNB_ASSERT(pkt.car == start);
1593	XNB_ASSERT(pkt.car_size = mbufc->m_len);
1594	/* There should be >1 response in the packet, there is extra info */
1595	XNB_ASSERT(pkt.flags & NETRXF_extra_info);
1596	XNB_ASSERT(pkt.flags & NETRXF_data_validated);
1597	XNB_ASSERT(pkt.cdr == pkt.car + 2);
1598	XNB_ASSERT(pkt.extra.u.gso.size = mbufc->m_pkthdr.tso_segsz);
1599	XNB_ASSERT(pkt.extra.type == XEN_NETIF_EXTRA_TYPE_GSO);
1600	XNB_ASSERT(! (pkt.extra.flags & XEN_NETIF_EXTRA_FLAG_MORE));
1601
1602	safe_m_freem(&mbufc);
1603}
1604
1605/** xnb_mbufc2pkt with insufficient space in the ring */
1606static void
1607xnb_mbufc2pkt_nospace(char *buffer, size_t buflen) {
1608	struct xnb_pkt pkt;
1609	size_t size = 14 * MCLBYTES / 3;
1610	size_t size_remaining;
1611	int free_slots = 2;
1612	RING_IDX start = 3;
1613	struct mbuf *mbufc, *m;
1614	int error;
1615
1616	mbufc = m_getm(NULL, size, M_WAITOK, MT_DATA);
1617	mbufc->m_flags |= M_PKTHDR;
1618	if (mbufc == NULL) {
1619		XNB_ASSERT(mbufc != NULL);
1620		return;
1621	}
1622
1623	mbufc->m_pkthdr.len = size;
1624	size_remaining = size;
1625	for (m = mbufc; m != NULL; m = m->m_next) {
1626		m->m_len = MAX(M_TRAILINGSPACE(m), size_remaining);
1627		size_remaining -= m->m_len;
1628	}
1629
1630	error = xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
1631	XNB_ASSERT(error == EAGAIN);
1632	XNB_ASSERT(! xnb_pkt_is_valid(&pkt));
1633
1634	safe_m_freem(&mbufc);
1635}
1636
1637/**
1638 * xnb_rxpkt2gnttab on an empty packet.  Should return empty gnttab
1639 */
1640static void
1641xnb_rxpkt2gnttab_empty(char *buffer, size_t buflen)
1642{
1643	struct xnb_pkt pkt;
1644	int nr_entries;
1645	int free_slots = 60;
1646	struct mbuf *mbuf;
1647
1648	mbuf = m_get(M_WAITOK, MT_DATA);
1649
1650	xnb_mbufc2pkt(mbuf, &pkt, 0, free_slots);
1651	nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1652			&xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1653
1654	XNB_ASSERT(nr_entries == 0);
1655
1656	safe_m_freem(&mbuf);
1657}
1658
1659/** xnb_rxpkt2gnttab on a short packet without extra data */
1660static void
1661xnb_rxpkt2gnttab_short(char *buffer, size_t buflen) {
1662	struct xnb_pkt pkt;
1663	int nr_entries;
1664	size_t size = 128;
1665	int free_slots = 60;
1666	RING_IDX start = 9;
1667	struct netif_rx_request *req;
1668	struct mbuf *mbuf;
1669
1670	mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1671	mbuf->m_flags |= M_PKTHDR;
1672	mbuf->m_pkthdr.len = size;
1673	mbuf->m_len = size;
1674
1675	xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1676	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf,
1677			       xnb_unit_pvt.txf.req_prod_pvt);
1678	req->gref = 7;
1679
1680	nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1681				      &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1682
1683	XNB_ASSERT(nr_entries == 1);
1684	XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == size);
1685	/* flags should indicate gref's for dest */
1686	XNB_ASSERT(xnb_unit_pvt.gnttab[0].flags & GNTCOPY_dest_gref);
1687	XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.offset == 0);
1688	XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.domid == DOMID_SELF);
1689	XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.offset == virt_to_offset(
1690		   mtod(mbuf, vm_offset_t)));
1691	XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.u.gmfn ==
1692		   virt_to_mfn(mtod(mbuf, vm_offset_t)));
1693	XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.domid == DOMID_FIRST_RESERVED);
1694
1695	safe_m_freem(&mbuf);
1696}
1697
1698/**
1699 * xnb_rxpkt2gnttab on a packet with two different mbufs in a single chai
1700 */
1701static void
1702xnb_rxpkt2gnttab_2req(char *buffer, size_t buflen)
1703{
1704	struct xnb_pkt pkt;
1705	int nr_entries;
1706	int i, num_mbufs;
1707	size_t total_granted_size = 0;
1708	size_t size = MJUMPAGESIZE + 1;
1709	int free_slots = 60;
1710	RING_IDX start = 11;
1711	struct netif_rx_request *req;
1712	struct mbuf *mbuf, *m;
1713
1714	mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1715	mbuf->m_flags |= M_PKTHDR;
1716	mbuf->m_pkthdr.len = size;
1717	mbuf->m_len = size;
1718
1719	xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1720
1721	for (i = 0, m=mbuf; m != NULL; i++, m = m->m_next) {
1722		req = RING_GET_REQUEST(&xnb_unit_pvt.rxf,
1723		    xnb_unit_pvt.txf.req_prod_pvt);
1724		req->gref = i;
1725		req->id = 5;
1726	}
1727	num_mbufs = i;
1728
1729	nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1730			&xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1731
1732	XNB_ASSERT(nr_entries >= num_mbufs);
1733	for (i = 0; i < nr_entries; i++) {
1734		int end_offset = xnb_unit_pvt.gnttab[i].len +
1735			xnb_unit_pvt.gnttab[i].dest.offset;
1736		XNB_ASSERT(end_offset <= PAGE_SIZE);
1737		total_granted_size += xnb_unit_pvt.gnttab[i].len;
1738	}
1739	XNB_ASSERT(total_granted_size == size);
1740}
1741
1742/**
1743 * xnb_rxpkt2rsp on an empty packet.  Shouldn't make any response
1744 */
1745static void
1746xnb_rxpkt2rsp_empty(char *buffer, size_t buflen)
1747{
1748	struct xnb_pkt pkt;
1749	int nr_entries;
1750	int nr_reqs;
1751	int free_slots = 60;
1752	netif_rx_back_ring_t rxb_backup = xnb_unit_pvt.rxb;
1753	netif_rx_sring_t rxs_backup = *xnb_unit_pvt.rxs;
1754	struct mbuf *mbuf;
1755
1756	mbuf = m_get(M_WAITOK, MT_DATA);
1757
1758	xnb_mbufc2pkt(mbuf, &pkt, 0, free_slots);
1759	nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1760			&xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1761
1762	nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
1763	    &xnb_unit_pvt.rxb);
1764	XNB_ASSERT(nr_reqs == 0);
1765	XNB_ASSERT(
1766	    memcmp(&rxb_backup, &xnb_unit_pvt.rxb, sizeof(rxb_backup)) == 0);
1767	XNB_ASSERT(
1768	    memcmp(&rxs_backup, xnb_unit_pvt.rxs, sizeof(rxs_backup)) == 0);
1769
1770	safe_m_freem(&mbuf);
1771}
1772
1773/**
1774 * xnb_rxpkt2rsp on a short packet with no extras
1775 */
1776static void
1777xnb_rxpkt2rsp_short(char *buffer, size_t buflen)
1778{
1779	struct xnb_pkt pkt;
1780	int nr_entries, nr_reqs;
1781	size_t size = 128;
1782	int free_slots = 60;
1783	RING_IDX start = 5;
1784	struct netif_rx_request *req;
1785	struct netif_rx_response *rsp;
1786	struct mbuf *mbuf;
1787
1788	mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1789	mbuf->m_flags |= M_PKTHDR;
1790	mbuf->m_pkthdr.len = size;
1791	mbuf->m_len = size;
1792
1793	xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1794	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
1795	req->gref = 7;
1796	xnb_unit_pvt.rxb.req_cons = start;
1797	xnb_unit_pvt.rxb.rsp_prod_pvt = start;
1798	xnb_unit_pvt.rxs->req_prod = start + 1;
1799	xnb_unit_pvt.rxs->rsp_prod = start;
1800
1801	nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1802			&xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1803
1804	nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
1805	    &xnb_unit_pvt.rxb);
1806
1807	XNB_ASSERT(nr_reqs == 1);
1808	XNB_ASSERT(xnb_unit_pvt.rxb.rsp_prod_pvt == start + 1);
1809	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
1810	XNB_ASSERT(rsp->id == req->id);
1811	XNB_ASSERT(rsp->offset == 0);
1812	XNB_ASSERT((rsp->flags & (NETRXF_more_data | NETRXF_extra_info)) == 0);
1813	XNB_ASSERT(rsp->status == size);
1814
1815	safe_m_freem(&mbuf);
1816}
1817
1818/**
1819 * xnb_rxpkt2rsp with extra data
1820 */
1821static void
1822xnb_rxpkt2rsp_extra(char *buffer, size_t buflen)
1823{
1824	struct xnb_pkt pkt;
1825	int nr_entries, nr_reqs;
1826	size_t size = 14;
1827	int free_slots = 15;
1828	RING_IDX start = 3;
1829	uint16_t id = 49;
1830	uint16_t gref = 65;
1831	uint16_t mss = TCP_MSS - 40;
1832	struct mbuf *mbufc;
1833	struct netif_rx_request *req;
1834	struct netif_rx_response *rsp;
1835	struct netif_extra_info *ext;
1836
1837	mbufc = m_getm(NULL, size, M_WAITOK, MT_DATA);
1838	if (mbufc == NULL) {
1839		XNB_ASSERT(mbufc != NULL);
1840		return;
1841	}
1842
1843	mbufc->m_flags |= M_PKTHDR;
1844	mbufc->m_pkthdr.len = size;
1845	mbufc->m_pkthdr.csum_flags |= CSUM_TSO;
1846	mbufc->m_pkthdr.tso_segsz = mss;
1847	mbufc->m_len = size;
1848
1849	xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
1850	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
1851	req->id = id;
1852	req->gref = gref;
1853	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start + 1);
1854	req->id = id + 1;
1855	req->gref = gref + 1;
1856	xnb_unit_pvt.rxb.req_cons = start;
1857	xnb_unit_pvt.rxb.rsp_prod_pvt = start;
1858	xnb_unit_pvt.rxs->req_prod = start + 2;
1859	xnb_unit_pvt.rxs->rsp_prod = start;
1860
1861	nr_entries = xnb_rxpkt2gnttab(&pkt, mbufc, xnb_unit_pvt.gnttab,
1862			&xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1863
1864	nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
1865	    &xnb_unit_pvt.rxb);
1866
1867	XNB_ASSERT(nr_reqs == 2);
1868	XNB_ASSERT(xnb_unit_pvt.rxb.rsp_prod_pvt == start + 2);
1869	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
1870	XNB_ASSERT(rsp->id == id);
1871	XNB_ASSERT((rsp->flags & NETRXF_more_data) == 0);
1872	XNB_ASSERT((rsp->flags & NETRXF_extra_info));
1873	XNB_ASSERT((rsp->flags & NETRXF_data_validated));
1874	XNB_ASSERT((rsp->flags & NETRXF_csum_blank));
1875	XNB_ASSERT(rsp->status == size);
1876
1877	ext = (struct netif_extra_info*)
1878		RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start + 1);
1879	XNB_ASSERT(ext->type == XEN_NETIF_EXTRA_TYPE_GSO);
1880	XNB_ASSERT(! (ext->flags & XEN_NETIF_EXTRA_FLAG_MORE));
1881	XNB_ASSERT(ext->u.gso.size == mss);
1882	XNB_ASSERT(ext->u.gso.type == XEN_NETIF_EXTRA_TYPE_GSO);
1883
1884	safe_m_freem(&mbufc);
1885}
1886
1887/**
1888 * xnb_rxpkt2rsp on a packet with more than a pages's worth of data.  It should
1889 * generate two response slot
1890 */
1891static void
1892xnb_rxpkt2rsp_2slots(char *buffer, size_t buflen)
1893{
1894	struct xnb_pkt pkt;
1895	int nr_entries, nr_reqs;
1896	size_t size = PAGE_SIZE + 100;
1897	int free_slots = 3;
1898	uint16_t id1 = 17;
1899	uint16_t id2 = 37;
1900	uint16_t gref1 = 24;
1901	uint16_t gref2 = 34;
1902	RING_IDX start = 15;
1903	struct netif_rx_request *req;
1904	struct netif_rx_response *rsp;
1905	struct mbuf *mbuf;
1906
1907	mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1908	mbuf->m_flags |= M_PKTHDR;
1909	mbuf->m_pkthdr.len = size;
1910	if (mbuf->m_next != NULL) {
1911		size_t first_len = MIN(M_TRAILINGSPACE(mbuf), size);
1912		mbuf->m_len = first_len;
1913		mbuf->m_next->m_len = size - first_len;
1914
1915	} else {
1916		mbuf->m_len = size;
1917	}
1918
1919	xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1920	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
1921	req->gref = gref1;
1922	req->id = id1;
1923	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start + 1);
1924	req->gref = gref2;
1925	req->id = id2;
1926	xnb_unit_pvt.rxb.req_cons = start;
1927	xnb_unit_pvt.rxb.rsp_prod_pvt = start;
1928	xnb_unit_pvt.rxs->req_prod = start + 2;
1929	xnb_unit_pvt.rxs->rsp_prod = start;
1930
1931	nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1932			&xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1933
1934	nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
1935	    &xnb_unit_pvt.rxb);
1936
1937	XNB_ASSERT(nr_reqs == 2);
1938	XNB_ASSERT(xnb_unit_pvt.rxb.rsp_prod_pvt == start + 2);
1939	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
1940	XNB_ASSERT(rsp->id == id1);
1941	XNB_ASSERT(rsp->offset == 0);
1942	XNB_ASSERT((rsp->flags & NETRXF_extra_info) == 0);
1943	XNB_ASSERT(rsp->flags & NETRXF_more_data);
1944	XNB_ASSERT(rsp->status == PAGE_SIZE);
1945
1946	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start + 1);
1947	XNB_ASSERT(rsp->id == id2);
1948	XNB_ASSERT(rsp->offset == 0);
1949	XNB_ASSERT((rsp->flags & NETRXF_extra_info) == 0);
1950	XNB_ASSERT(! (rsp->flags & NETRXF_more_data));
1951	XNB_ASSERT(rsp->status == size - PAGE_SIZE);
1952
1953	safe_m_freem(&mbuf);
1954}
1955
1956/** xnb_rxpkt2rsp on a grant table with two sub-page entries */
1957static void
1958xnb_rxpkt2rsp_2short(char *buffer, size_t buflen) {
1959	struct xnb_pkt pkt;
1960	int nr_reqs, nr_entries;
1961	size_t size1 = MHLEN - 5;
1962	size_t size2 = MHLEN - 15;
1963	int free_slots = 32;
1964	RING_IDX start = 14;
1965	uint16_t id = 47;
1966	uint16_t gref = 54;
1967	struct netif_rx_request *req;
1968	struct netif_rx_response *rsp;
1969	struct mbuf *mbufc;
1970
1971	mbufc = m_getm(NULL, size1, M_WAITOK, MT_DATA);
1972	mbufc->m_flags |= M_PKTHDR;
1973	if (mbufc == NULL) {
1974		XNB_ASSERT(mbufc != NULL);
1975		return;
1976	}
1977
1978	m_getm(mbufc, size2, M_WAITOK, MT_DATA);
1979	XNB_ASSERT(mbufc->m_next != NULL);
1980	mbufc->m_pkthdr.len = size1 + size2;
1981	mbufc->m_len = size1;
1982	mbufc->m_next->m_len = size2;
1983
1984	xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
1985
1986	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
1987	req->gref = gref;
1988	req->id = id;
1989	xnb_unit_pvt.rxb.req_cons = start;
1990	xnb_unit_pvt.rxb.rsp_prod_pvt = start;
1991	xnb_unit_pvt.rxs->req_prod = start + 1;
1992	xnb_unit_pvt.rxs->rsp_prod = start;
1993
1994	nr_entries = xnb_rxpkt2gnttab(&pkt, mbufc, xnb_unit_pvt.gnttab,
1995			&xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1996
1997	nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
1998	    &xnb_unit_pvt.rxb);
1999
2000	XNB_ASSERT(nr_entries == 2);
2001	XNB_ASSERT(nr_reqs == 1);
2002	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
2003	XNB_ASSERT(rsp->id == id);
2004	XNB_ASSERT(rsp->status == size1 + size2);
2005	XNB_ASSERT(rsp->offset == 0);
2006	XNB_ASSERT(! (rsp->flags & (NETRXF_more_data | NETRXF_extra_info)));
2007
2008	safe_m_freem(&mbufc);
2009}
2010
2011/**
2012 * xnb_rxpkt2rsp on a long packet with a hypervisor gnttab_copy error
2013 * Note: this test will result in an error message being printed to the console
2014 * such as:
2015 * xnb(xnb_rxpkt2rsp:1720): Got error -1 for hypervisor gnttab_copy status
2016 */
2017static void
2018xnb_rxpkt2rsp_copyerror(char *buffer, size_t buflen)
2019{
2020	struct xnb_pkt pkt;
2021	int nr_entries, nr_reqs;
2022	int id = 7;
2023	int gref = 42;
2024	uint16_t canary = 6859;
2025	size_t size = 7 * MCLBYTES;
2026	int free_slots = 9;
2027	RING_IDX start = 2;
2028	struct netif_rx_request *req;
2029	struct netif_rx_response *rsp;
2030	struct mbuf *mbuf;
2031
2032	mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
2033	mbuf->m_flags |= M_PKTHDR;
2034	mbuf->m_pkthdr.len = size;
2035	mbuf->m_len = size;
2036
2037	xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
2038	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
2039	req->gref = gref;
2040	req->id = id;
2041	xnb_unit_pvt.rxb.req_cons = start;
2042	xnb_unit_pvt.rxb.rsp_prod_pvt = start;
2043	xnb_unit_pvt.rxs->req_prod = start + 1;
2044	xnb_unit_pvt.rxs->rsp_prod = start;
2045	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start + 1);
2046	req->gref = canary;
2047	req->id = canary;
2048
2049	nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
2050			&xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
2051	/* Inject the error*/
2052	xnb_unit_pvt.gnttab[2].status = GNTST_general_error;
2053
2054	nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
2055	    &xnb_unit_pvt.rxb);
2056
2057	XNB_ASSERT(nr_reqs == 1);
2058	XNB_ASSERT(xnb_unit_pvt.rxb.rsp_prod_pvt == start + 1);
2059	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
2060	XNB_ASSERT(rsp->id == id);
2061	XNB_ASSERT(rsp->status == NETIF_RSP_ERROR);
2062	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start + 1);
2063	XNB_ASSERT(req->gref == canary);
2064	XNB_ASSERT(req->id == canary);
2065
2066	safe_m_freem(&mbuf);
2067}
2068
2069/**
2070 * xnb_add_mbuf_cksum on an ARP request packet
2071 */
2072static void
2073xnb_add_mbuf_cksum_arp(char *buffer, size_t buflen)
2074{
2075	const size_t pkt_len = sizeof(struct ether_header) +
2076		sizeof(struct ether_arp);
2077	struct mbuf *mbufc;
2078	struct ether_header *eh;
2079	struct ether_arp *ep;
2080	unsigned char pkt_orig[pkt_len];
2081
2082	mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
2083	/* Fill in an example arp request */
2084	eh = mtod(mbufc, struct ether_header*);
2085	eh->ether_dhost[0] = 0xff;
2086	eh->ether_dhost[1] = 0xff;
2087	eh->ether_dhost[2] = 0xff;
2088	eh->ether_dhost[3] = 0xff;
2089	eh->ether_dhost[4] = 0xff;
2090	eh->ether_dhost[5] = 0xff;
2091	eh->ether_shost[0] = 0x00;
2092	eh->ether_shost[1] = 0x15;
2093	eh->ether_shost[2] = 0x17;
2094	eh->ether_shost[3] = 0xe9;
2095	eh->ether_shost[4] = 0x30;
2096	eh->ether_shost[5] = 0x68;
2097	eh->ether_type = htons(ETHERTYPE_ARP);
2098	ep = (struct ether_arp*)(eh + 1);
2099	ep->ea_hdr.ar_hrd = htons(ARPHRD_ETHER);
2100	ep->ea_hdr.ar_pro = htons(ETHERTYPE_IP);
2101	ep->ea_hdr.ar_hln = 6;
2102	ep->ea_hdr.ar_pln = 4;
2103	ep->ea_hdr.ar_op = htons(ARPOP_REQUEST);
2104	ep->arp_sha[0] = 0x00;
2105	ep->arp_sha[1] = 0x15;
2106	ep->arp_sha[2] = 0x17;
2107	ep->arp_sha[3] = 0xe9;
2108	ep->arp_sha[4] = 0x30;
2109	ep->arp_sha[5] = 0x68;
2110	ep->arp_spa[0] = 0xc0;
2111	ep->arp_spa[1] = 0xa8;
2112	ep->arp_spa[2] = 0x0a;
2113	ep->arp_spa[3] = 0x04;
2114	bzero(&(ep->arp_tha), ETHER_ADDR_LEN);
2115	ep->arp_tpa[0] = 0xc0;
2116	ep->arp_tpa[1] = 0xa8;
2117	ep->arp_tpa[2] = 0x0a;
2118	ep->arp_tpa[3] = 0x06;
2119
2120	/* fill in the length field */
2121	mbufc->m_len = pkt_len;
2122	mbufc->m_pkthdr.len = pkt_len;
2123	/* indicate that the netfront uses hw-assisted checksums */
2124	mbufc->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID   |
2125				CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2126
2127	/* Make a backup copy of the packet */
2128	bcopy(mtod(mbufc, const void*), pkt_orig, pkt_len);
2129
2130	/* Function under test */
2131	xnb_add_mbuf_cksum(mbufc);
2132
2133	/* Verify that the packet's data did not change */
2134	XNB_ASSERT(bcmp(mtod(mbufc, const void*), pkt_orig, pkt_len) == 0);
2135	m_freem(mbufc);
2136}
2137
2138/**
2139 * Helper function that populates the ethernet header and IP header used by
2140 * some of the xnb_add_mbuf_cksum unit tests.  m must already be allocated
2141 * and must be large enough
2142 */
2143static void
2144xnb_fill_eh_and_ip(struct mbuf *m, uint16_t ip_len, uint16_t ip_id,
2145		   uint16_t ip_p, uint16_t ip_off, uint16_t ip_sum)
2146{
2147	struct ether_header *eh;
2148	struct ip *iph;
2149
2150	eh = mtod(m, struct ether_header*);
2151	eh->ether_dhost[0] = 0x00;
2152	eh->ether_dhost[1] = 0x16;
2153	eh->ether_dhost[2] = 0x3e;
2154	eh->ether_dhost[3] = 0x23;
2155	eh->ether_dhost[4] = 0x50;
2156	eh->ether_dhost[5] = 0x0b;
2157	eh->ether_shost[0] = 0x00;
2158	eh->ether_shost[1] = 0x16;
2159	eh->ether_shost[2] = 0x30;
2160	eh->ether_shost[3] = 0x00;
2161	eh->ether_shost[4] = 0x00;
2162	eh->ether_shost[5] = 0x00;
2163	eh->ether_type = htons(ETHERTYPE_IP);
2164	iph = (struct ip*)(eh + 1);
2165	iph->ip_hl = 0x5;	/* 5 dwords == 20 bytes */
2166	iph->ip_v = 4;		/* IP v4 */
2167	iph->ip_tos = 0;
2168	iph->ip_len = htons(ip_len);
2169	iph->ip_id = htons(ip_id);
2170	iph->ip_off = htons(ip_off);
2171	iph->ip_ttl = 64;
2172	iph->ip_p = ip_p;
2173	iph->ip_sum = htons(ip_sum);
2174	iph->ip_src.s_addr = htonl(0xc0a80a04);
2175	iph->ip_dst.s_addr = htonl(0xc0a80a05);
2176}
2177
2178/**
2179 * xnb_add_mbuf_cksum on an ICMP packet, based on a tcpdump of an actual
2180 * ICMP packet
2181 */
2182static void
2183xnb_add_mbuf_cksum_icmp(char *buffer, size_t buflen)
2184{
2185	const size_t icmp_len = 64;	/* set by ping(1) */
2186	const size_t pkt_len = sizeof(struct ether_header) +
2187		sizeof(struct ip) + icmp_len;
2188	struct mbuf *mbufc;
2189	struct ether_header *eh;
2190	struct ip *iph;
2191	struct icmp *icmph;
2192	unsigned char pkt_orig[icmp_len];
2193	uint32_t *tv_field;
2194	uint8_t *data_payload;
2195	int i;
2196	const uint16_t ICMP_CSUM = 0xaed7;
2197	const uint16_t IP_CSUM = 0xe533;
2198
2199	mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
2200	/* Fill in an example ICMP ping request */
2201	eh = mtod(mbufc, struct ether_header*);
2202	xnb_fill_eh_and_ip(mbufc, 84, 28, IPPROTO_ICMP, 0, 0);
2203	iph = (struct ip*)(eh + 1);
2204	icmph = (struct icmp*)(iph + 1);
2205	icmph->icmp_type = ICMP_ECHO;
2206	icmph->icmp_code = 0;
2207	icmph->icmp_cksum = htons(ICMP_CSUM);
2208	icmph->icmp_id = htons(31492);
2209	icmph->icmp_seq = htons(0);
2210	/*
2211	 * ping(1) uses bcopy to insert a native-endian timeval after icmp_seq.
2212	 * For this test, we will set the bytes individually for portability.
2213	 */
2214	tv_field = (uint32_t*)(&(icmph->icmp_hun));
2215	tv_field[0] = 0x4f02cfac;
2216	tv_field[1] = 0x0007c46a;
2217	/*
2218	 * Remainder of packet is an incrmenting 8 bit integer, starting with 8
2219	 */
2220	data_payload = (uint8_t*)(&tv_field[2]);
2221	for (i = 8; i < 37; i++) {
2222		*data_payload++ = i;
2223	}
2224
2225	/* fill in the length field */
2226	mbufc->m_len = pkt_len;
2227	mbufc->m_pkthdr.len = pkt_len;
2228	/* indicate that the netfront uses hw-assisted checksums */
2229	mbufc->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID   |
2230				CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2231
2232	bcopy(mtod(mbufc, const void*), pkt_orig, icmp_len);
2233	/* Function under test */
2234	xnb_add_mbuf_cksum(mbufc);
2235
2236	/* Check the IP checksum */
2237	XNB_ASSERT(iph->ip_sum == htons(IP_CSUM));
2238
2239	/* Check that the ICMP packet did not change */
2240	XNB_ASSERT(bcmp(icmph, pkt_orig, icmp_len));
2241	m_freem(mbufc);
2242}
2243
2244/**
2245 * xnb_add_mbuf_cksum on a UDP packet, based on a tcpdump of an actual
2246 * UDP packet
2247 */
2248static void
2249xnb_add_mbuf_cksum_udp(char *buffer, size_t buflen)
2250{
2251	const size_t udp_len = 16;
2252	const size_t pkt_len = sizeof(struct ether_header) +
2253		sizeof(struct ip) + udp_len;
2254	struct mbuf *mbufc;
2255	struct ether_header *eh;
2256	struct ip *iph;
2257	struct udphdr *udp;
2258	uint8_t *data_payload;
2259	const uint16_t IP_CSUM = 0xe56b;
2260	const uint16_t UDP_CSUM = 0xdde2;
2261
2262	mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
2263	/* Fill in an example UDP packet made by 'uname | nc -u <host> 2222 */
2264	eh = mtod(mbufc, struct ether_header*);
2265	xnb_fill_eh_and_ip(mbufc, 36, 4, IPPROTO_UDP, 0, 0xbaad);
2266	iph = (struct ip*)(eh + 1);
2267	udp = (struct udphdr*)(iph + 1);
2268	udp->uh_sport = htons(0x51ae);
2269	udp->uh_dport = htons(0x08ae);
2270	udp->uh_ulen = htons(udp_len);
2271	udp->uh_sum = htons(0xbaad);  /* xnb_add_mbuf_cksum will fill this in */
2272	data_payload = (uint8_t*)(udp + 1);
2273	data_payload[0] = 'F';
2274	data_payload[1] = 'r';
2275	data_payload[2] = 'e';
2276	data_payload[3] = 'e';
2277	data_payload[4] = 'B';
2278	data_payload[5] = 'S';
2279	data_payload[6] = 'D';
2280	data_payload[7] = '\n';
2281
2282	/* fill in the length field */
2283	mbufc->m_len = pkt_len;
2284	mbufc->m_pkthdr.len = pkt_len;
2285	/* indicate that the netfront uses hw-assisted checksums */
2286	mbufc->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID   |
2287				CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2288
2289	/* Function under test */
2290	xnb_add_mbuf_cksum(mbufc);
2291
2292	/* Check the checksums */
2293	XNB_ASSERT(iph->ip_sum == htons(IP_CSUM));
2294	XNB_ASSERT(udp->uh_sum == htons(UDP_CSUM));
2295
2296	m_freem(mbufc);
2297}
2298
2299/**
2300 * Helper function that populates a TCP packet used by all of the
2301 * xnb_add_mbuf_cksum tcp unit tests.  m must already be allocated and must be
2302 * large enough
2303 */
2304static void
2305xnb_fill_tcp(struct mbuf *m)
2306{
2307	struct ether_header *eh;
2308	struct ip *iph;
2309	struct tcphdr *tcp;
2310	uint32_t *options;
2311	uint8_t *data_payload;
2312
2313	/* Fill in an example TCP packet made by 'uname | nc <host> 2222' */
2314	eh = mtod(m, struct ether_header*);
2315	xnb_fill_eh_and_ip(m, 60, 8, IPPROTO_TCP, IP_DF, 0);
2316	iph = (struct ip*)(eh + 1);
2317	tcp = (struct tcphdr*)(iph + 1);
2318	tcp->th_sport = htons(0x9cd9);
2319	tcp->th_dport = htons(2222);
2320	tcp->th_seq = htonl(0x00f72b10);
2321	tcp->th_ack = htonl(0x7f37ba6c);
2322	tcp->th_x2 = 0;
2323	tcp->th_off = 8;
2324	tcp->th_flags = 0x18;
2325	tcp->th_win = htons(0x410);
2326	/* th_sum is incorrect; will be inserted by function under test */
2327	tcp->th_sum = htons(0xbaad);
2328	tcp->th_urp = htons(0);
2329	/*
2330	 * The following 12 bytes of options encode:
2331	 * [nop, nop, TS val 33247 ecr 3457687679]
2332	 */
2333	options = (uint32_t*)(tcp + 1);
2334	options[0] = htonl(0x0101080a);
2335	options[1] = htonl(0x000081df);
2336	options[2] = htonl(0xce18207f);
2337	data_payload = (uint8_t*)(&options[3]);
2338	data_payload[0] = 'F';
2339	data_payload[1] = 'r';
2340	data_payload[2] = 'e';
2341	data_payload[3] = 'e';
2342	data_payload[4] = 'B';
2343	data_payload[5] = 'S';
2344	data_payload[6] = 'D';
2345	data_payload[7] = '\n';
2346}
2347
2348/**
2349 * xnb_add_mbuf_cksum on a TCP packet, based on a tcpdump of an actual TCP
2350 * packet
2351 */
2352static void
2353xnb_add_mbuf_cksum_tcp(char *buffer, size_t buflen)
2354{
2355	const size_t payload_len = 8;
2356	const size_t tcp_options_len = 12;
2357	const size_t pkt_len = sizeof(struct ether_header) + sizeof(struct ip) +
2358	    sizeof(struct tcphdr) + tcp_options_len + payload_len;
2359	struct mbuf *mbufc;
2360	struct ether_header *eh;
2361	struct ip *iph;
2362	struct tcphdr *tcp;
2363	const uint16_t IP_CSUM = 0xa55a;
2364	const uint16_t TCP_CSUM = 0x2f64;
2365
2366	mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
2367	/* Fill in an example TCP packet made by 'uname | nc <host> 2222' */
2368	xnb_fill_tcp(mbufc);
2369	eh = mtod(mbufc, struct ether_header*);
2370	iph = (struct ip*)(eh + 1);
2371	tcp = (struct tcphdr*)(iph + 1);
2372
2373	/* fill in the length field */
2374	mbufc->m_len = pkt_len;
2375	mbufc->m_pkthdr.len = pkt_len;
2376	/* indicate that the netfront uses hw-assisted checksums */
2377	mbufc->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID   |
2378				CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2379
2380	/* Function under test */
2381	xnb_add_mbuf_cksum(mbufc);
2382
2383	/* Check the checksums */
2384	XNB_ASSERT(iph->ip_sum == htons(IP_CSUM));
2385	XNB_ASSERT(tcp->th_sum == htons(TCP_CSUM));
2386
2387	m_freem(mbufc);
2388}
2389
2390/**
2391 * xnb_add_mbuf_cksum on a TCP packet that does not use HW assisted checksums
2392 */
2393static void
2394xnb_add_mbuf_cksum_tcp_swcksum(char *buffer, size_t buflen)
2395{
2396	const size_t payload_len = 8;
2397	const size_t tcp_options_len = 12;
2398	const size_t pkt_len = sizeof(struct ether_header) + sizeof(struct ip) +
2399	    sizeof(struct tcphdr) + tcp_options_len + payload_len;
2400	struct mbuf *mbufc;
2401	struct ether_header *eh;
2402	struct ip *iph;
2403	struct tcphdr *tcp;
2404	/* Use deliberately bad checksums, and verify that they don't get */
2405	/* corrected by xnb_add_mbuf_cksum */
2406	const uint16_t IP_CSUM = 0xdead;
2407	const uint16_t TCP_CSUM = 0xbeef;
2408
2409	mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
2410	/* Fill in an example TCP packet made by 'uname | nc <host> 2222' */
2411	xnb_fill_tcp(mbufc);
2412	eh = mtod(mbufc, struct ether_header*);
2413	iph = (struct ip*)(eh + 1);
2414	iph->ip_sum = htons(IP_CSUM);
2415	tcp = (struct tcphdr*)(iph + 1);
2416	tcp->th_sum = htons(TCP_CSUM);
2417
2418	/* fill in the length field */
2419	mbufc->m_len = pkt_len;
2420	mbufc->m_pkthdr.len = pkt_len;
2421	/* indicate that the netfront does not use hw-assisted checksums */
2422	mbufc->m_pkthdr.csum_flags = 0;
2423
2424	/* Function under test */
2425	xnb_add_mbuf_cksum(mbufc);
2426
2427	/* Check that the checksums didn't change */
2428	XNB_ASSERT(iph->ip_sum == htons(IP_CSUM));
2429	XNB_ASSERT(tcp->th_sum == htons(TCP_CSUM));
2430
2431	m_freem(mbufc);
2432}
2433
2434/**
2435 * sscanf on unsigned chars
2436 */
2437static void
2438xnb_sscanf_hhu(char *buffer, size_t buflen)
2439{
2440	const char mystr[] = "137";
2441	uint8_t dest[12];
2442	int i;
2443
2444	for (i = 0; i < 12; i++)
2445		dest[i] = 'X';
2446
2447	sscanf(mystr, "%hhu", &dest[4]);
2448	for (i = 0; i < 12; i++)
2449		XNB_ASSERT(dest[i] == (i == 4 ? 137 : 'X'));
2450}
2451
2452/**
2453 * sscanf on signed chars
2454 */
2455static void
2456xnb_sscanf_hhd(char *buffer, size_t buflen)
2457{
2458	const char mystr[] = "-27";
2459	int8_t dest[12];
2460	int i;
2461
2462	for (i = 0; i < 12; i++)
2463		dest[i] = 'X';
2464
2465	sscanf(mystr, "%hhd", &dest[4]);
2466	for (i = 0; i < 12; i++)
2467		XNB_ASSERT(dest[i] == (i == 4 ? -27 : 'X'));
2468}
2469
2470/**
2471 * sscanf on signed long longs
2472 */
2473static void
2474xnb_sscanf_lld(char *buffer, size_t buflen)
2475{
2476	const char mystr[] = "-123456789012345";	/* about -2**47 */
2477	long long dest[3];
2478	int i;
2479
2480	for (i = 0; i < 3; i++)
2481		dest[i] = (long long)0xdeadbeefdeadbeef;
2482
2483	sscanf(mystr, "%lld", &dest[1]);
2484	for (i = 0; i < 3; i++)
2485		XNB_ASSERT(dest[i] == (i != 1 ? (long long)0xdeadbeefdeadbeef :
2486		    -123456789012345));
2487}
2488
2489/**
2490 * sscanf on unsigned long longs
2491 */
2492static void
2493xnb_sscanf_llu(char *buffer, size_t buflen)
2494{
2495	const char mystr[] = "12802747070103273189";
2496	unsigned long long dest[3];
2497	int i;
2498
2499	for (i = 0; i < 3; i++)
2500		dest[i] = (long long)0xdeadbeefdeadbeef;
2501
2502	sscanf(mystr, "%llu", &dest[1]);
2503	for (i = 0; i < 3; i++)
2504		XNB_ASSERT(dest[i] == (i != 1 ? (long long)0xdeadbeefdeadbeef :
2505		    12802747070103273189ull));
2506}
2507
2508/**
2509 * sscanf on unsigned short short n's
2510 */
2511static void
2512xnb_sscanf_hhn(char *buffer, size_t buflen)
2513{
2514	const char mystr[] =
2515	    "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f"
2516	    "202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f"
2517	    "404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f";
2518	unsigned char dest[12];
2519	int i;
2520
2521	for (i = 0; i < 12; i++)
2522		dest[i] = (unsigned char)'X';
2523
2524	sscanf(mystr,
2525	    "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f"
2526	    "202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f"
2527	    "404142434445464748494a4b4c4d4e4f%hhn", &dest[4]);
2528	for (i = 0; i < 12; i++)
2529		XNB_ASSERT(dest[i] == (i == 4 ? 160 : 'X'));
2530}
2531