1// SPDX-License-Identifier: GPL-2.0-only
2/* I/O iterator tests.  This can only test kernel-backed iterator types.
3 *
4 * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/module.h>
11#include <linux/vmalloc.h>
12#include <linux/mm.h>
13#include <linux/uio.h>
14#include <linux/bvec.h>
15#include <kunit/test.h>
16
17MODULE_DESCRIPTION("iov_iter testing");
18MODULE_AUTHOR("David Howells <dhowells@redhat.com>");
19MODULE_LICENSE("GPL");
20
21struct kvec_test_range {
22	int	from, to;
23};
24
25static const struct kvec_test_range kvec_test_ranges[] = {
26	{ 0x00002, 0x00002 },
27	{ 0x00027, 0x03000 },
28	{ 0x05193, 0x18794 },
29	{ 0x20000, 0x20000 },
30	{ 0x20000, 0x24000 },
31	{ 0x24000, 0x27001 },
32	{ 0x29000, 0xffffb },
33	{ 0xffffd, 0xffffe },
34	{ -1 }
35};
36
37static inline u8 pattern(unsigned long x)
38{
39	return x & 0xff;
40}
41
42static void iov_kunit_unmap(void *data)
43{
44	vunmap(data);
45}
46
47static void *__init iov_kunit_create_buffer(struct kunit *test,
48					    struct page ***ppages,
49					    size_t npages)
50{
51	struct page **pages;
52	unsigned long got;
53	void *buffer;
54
55	pages = kunit_kcalloc(test, npages, sizeof(struct page *), GFP_KERNEL);
56        KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pages);
57	*ppages = pages;
58
59	got = alloc_pages_bulk_array(GFP_KERNEL, npages, pages);
60	if (got != npages) {
61		release_pages(pages, got);
62		KUNIT_ASSERT_EQ(test, got, npages);
63	}
64
65	buffer = vmap(pages, npages, VM_MAP | VM_MAP_PUT_PAGES, PAGE_KERNEL);
66        KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buffer);
67
68	kunit_add_action_or_reset(test, iov_kunit_unmap, buffer);
69	return buffer;
70}
71
72static void __init iov_kunit_load_kvec(struct kunit *test,
73				       struct iov_iter *iter, int dir,
74				       struct kvec *kvec, unsigned int kvmax,
75				       void *buffer, size_t bufsize,
76				       const struct kvec_test_range *pr)
77{
78	size_t size = 0;
79	int i;
80
81	for (i = 0; i < kvmax; i++, pr++) {
82		if (pr->from < 0)
83			break;
84		KUNIT_ASSERT_GE(test, pr->to, pr->from);
85		KUNIT_ASSERT_LE(test, pr->to, bufsize);
86		kvec[i].iov_base = buffer + pr->from;
87		kvec[i].iov_len = pr->to - pr->from;
88		size += pr->to - pr->from;
89	}
90	KUNIT_ASSERT_LE(test, size, bufsize);
91
92	iov_iter_kvec(iter, dir, kvec, i, size);
93}
94
95/*
96 * Test copying to a ITER_KVEC-type iterator.
97 */
98static void __init iov_kunit_copy_to_kvec(struct kunit *test)
99{
100	const struct kvec_test_range *pr;
101	struct iov_iter iter;
102	struct page **spages, **bpages;
103	struct kvec kvec[8];
104	u8 *scratch, *buffer;
105	size_t bufsize, npages, size, copied;
106	int i, patt;
107
108	bufsize = 0x100000;
109	npages = bufsize / PAGE_SIZE;
110
111	scratch = iov_kunit_create_buffer(test, &spages, npages);
112	for (i = 0; i < bufsize; i++)
113		scratch[i] = pattern(i);
114
115	buffer = iov_kunit_create_buffer(test, &bpages, npages);
116	memset(buffer, 0, bufsize);
117
118	iov_kunit_load_kvec(test, &iter, READ, kvec, ARRAY_SIZE(kvec),
119			    buffer, bufsize, kvec_test_ranges);
120	size = iter.count;
121
122	copied = copy_to_iter(scratch, size, &iter);
123
124	KUNIT_EXPECT_EQ(test, copied, size);
125	KUNIT_EXPECT_EQ(test, iter.count, 0);
126	KUNIT_EXPECT_EQ(test, iter.nr_segs, 0);
127
128	/* Build the expected image in the scratch buffer. */
129	patt = 0;
130	memset(scratch, 0, bufsize);
131	for (pr = kvec_test_ranges; pr->from >= 0; pr++)
132		for (i = pr->from; i < pr->to; i++)
133			scratch[i] = pattern(patt++);
134
135	/* Compare the images */
136	for (i = 0; i < bufsize; i++) {
137		KUNIT_EXPECT_EQ_MSG(test, buffer[i], scratch[i], "at i=%x", i);
138		if (buffer[i] != scratch[i])
139			return;
140	}
141
142	KUNIT_SUCCEED();
143}
144
145/*
146 * Test copying from a ITER_KVEC-type iterator.
147 */
148static void __init iov_kunit_copy_from_kvec(struct kunit *test)
149{
150	const struct kvec_test_range *pr;
151	struct iov_iter iter;
152	struct page **spages, **bpages;
153	struct kvec kvec[8];
154	u8 *scratch, *buffer;
155	size_t bufsize, npages, size, copied;
156	int i, j;
157
158	bufsize = 0x100000;
159	npages = bufsize / PAGE_SIZE;
160
161	buffer = iov_kunit_create_buffer(test, &bpages, npages);
162	for (i = 0; i < bufsize; i++)
163		buffer[i] = pattern(i);
164
165	scratch = iov_kunit_create_buffer(test, &spages, npages);
166	memset(scratch, 0, bufsize);
167
168	iov_kunit_load_kvec(test, &iter, WRITE, kvec, ARRAY_SIZE(kvec),
169			    buffer, bufsize, kvec_test_ranges);
170	size = min(iter.count, bufsize);
171
172	copied = copy_from_iter(scratch, size, &iter);
173
174	KUNIT_EXPECT_EQ(test, copied, size);
175	KUNIT_EXPECT_EQ(test, iter.count, 0);
176	KUNIT_EXPECT_EQ(test, iter.nr_segs, 0);
177
178	/* Build the expected image in the main buffer. */
179	i = 0;
180	memset(buffer, 0, bufsize);
181	for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
182		for (j = pr->from; j < pr->to; j++) {
183			buffer[i++] = pattern(j);
184			if (i >= bufsize)
185				goto stop;
186		}
187	}
188stop:
189
190	/* Compare the images */
191	for (i = 0; i < bufsize; i++) {
192		KUNIT_EXPECT_EQ_MSG(test, scratch[i], buffer[i], "at i=%x", i);
193		if (scratch[i] != buffer[i])
194			return;
195	}
196
197	KUNIT_SUCCEED();
198}
199
200struct bvec_test_range {
201	int	page, from, to;
202};
203
204static const struct bvec_test_range bvec_test_ranges[] = {
205	{ 0, 0x0002, 0x0002 },
206	{ 1, 0x0027, 0x0893 },
207	{ 2, 0x0193, 0x0794 },
208	{ 3, 0x0000, 0x1000 },
209	{ 4, 0x0000, 0x1000 },
210	{ 5, 0x0000, 0x1000 },
211	{ 6, 0x0000, 0x0ffb },
212	{ 6, 0x0ffd, 0x0ffe },
213	{ -1, -1, -1 }
214};
215
216static void __init iov_kunit_load_bvec(struct kunit *test,
217				       struct iov_iter *iter, int dir,
218				       struct bio_vec *bvec, unsigned int bvmax,
219				       struct page **pages, size_t npages,
220				       size_t bufsize,
221				       const struct bvec_test_range *pr)
222{
223	struct page *can_merge = NULL, *page;
224	size_t size = 0;
225	int i;
226
227	for (i = 0; i < bvmax; i++, pr++) {
228		if (pr->from < 0)
229			break;
230		KUNIT_ASSERT_LT(test, pr->page, npages);
231		KUNIT_ASSERT_LT(test, pr->page * PAGE_SIZE, bufsize);
232		KUNIT_ASSERT_GE(test, pr->from, 0);
233		KUNIT_ASSERT_GE(test, pr->to, pr->from);
234		KUNIT_ASSERT_LE(test, pr->to, PAGE_SIZE);
235
236		page = pages[pr->page];
237		if (pr->from == 0 && pr->from != pr->to && page == can_merge) {
238			i--;
239			bvec[i].bv_len += pr->to;
240		} else {
241			bvec_set_page(&bvec[i], page, pr->to - pr->from, pr->from);
242		}
243
244		size += pr->to - pr->from;
245		if ((pr->to & ~PAGE_MASK) == 0)
246			can_merge = page + pr->to / PAGE_SIZE;
247		else
248			can_merge = NULL;
249	}
250
251	iov_iter_bvec(iter, dir, bvec, i, size);
252}
253
254/*
255 * Test copying to a ITER_BVEC-type iterator.
256 */
257static void __init iov_kunit_copy_to_bvec(struct kunit *test)
258{
259	const struct bvec_test_range *pr;
260	struct iov_iter iter;
261	struct bio_vec bvec[8];
262	struct page **spages, **bpages;
263	u8 *scratch, *buffer;
264	size_t bufsize, npages, size, copied;
265	int i, b, patt;
266
267	bufsize = 0x100000;
268	npages = bufsize / PAGE_SIZE;
269
270	scratch = iov_kunit_create_buffer(test, &spages, npages);
271	for (i = 0; i < bufsize; i++)
272		scratch[i] = pattern(i);
273
274	buffer = iov_kunit_create_buffer(test, &bpages, npages);
275	memset(buffer, 0, bufsize);
276
277	iov_kunit_load_bvec(test, &iter, READ, bvec, ARRAY_SIZE(bvec),
278			    bpages, npages, bufsize, bvec_test_ranges);
279	size = iter.count;
280
281	copied = copy_to_iter(scratch, size, &iter);
282
283	KUNIT_EXPECT_EQ(test, copied, size);
284	KUNIT_EXPECT_EQ(test, iter.count, 0);
285	KUNIT_EXPECT_EQ(test, iter.nr_segs, 0);
286
287	/* Build the expected image in the scratch buffer. */
288	b = 0;
289	patt = 0;
290	memset(scratch, 0, bufsize);
291	for (pr = bvec_test_ranges; pr->from >= 0; pr++, b++) {
292		u8 *p = scratch + pr->page * PAGE_SIZE;
293
294		for (i = pr->from; i < pr->to; i++)
295			p[i] = pattern(patt++);
296	}
297
298	/* Compare the images */
299	for (i = 0; i < bufsize; i++) {
300		KUNIT_EXPECT_EQ_MSG(test, buffer[i], scratch[i], "at i=%x", i);
301		if (buffer[i] != scratch[i])
302			return;
303	}
304
305	KUNIT_SUCCEED();
306}
307
308/*
309 * Test copying from a ITER_BVEC-type iterator.
310 */
311static void __init iov_kunit_copy_from_bvec(struct kunit *test)
312{
313	const struct bvec_test_range *pr;
314	struct iov_iter iter;
315	struct bio_vec bvec[8];
316	struct page **spages, **bpages;
317	u8 *scratch, *buffer;
318	size_t bufsize, npages, size, copied;
319	int i, j;
320
321	bufsize = 0x100000;
322	npages = bufsize / PAGE_SIZE;
323
324	buffer = iov_kunit_create_buffer(test, &bpages, npages);
325	for (i = 0; i < bufsize; i++)
326		buffer[i] = pattern(i);
327
328	scratch = iov_kunit_create_buffer(test, &spages, npages);
329	memset(scratch, 0, bufsize);
330
331	iov_kunit_load_bvec(test, &iter, WRITE, bvec, ARRAY_SIZE(bvec),
332			    bpages, npages, bufsize, bvec_test_ranges);
333	size = iter.count;
334
335	copied = copy_from_iter(scratch, size, &iter);
336
337	KUNIT_EXPECT_EQ(test, copied, size);
338	KUNIT_EXPECT_EQ(test, iter.count, 0);
339	KUNIT_EXPECT_EQ(test, iter.nr_segs, 0);
340
341	/* Build the expected image in the main buffer. */
342	i = 0;
343	memset(buffer, 0, bufsize);
344	for (pr = bvec_test_ranges; pr->from >= 0; pr++) {
345		size_t patt = pr->page * PAGE_SIZE;
346
347		for (j = pr->from; j < pr->to; j++) {
348			buffer[i++] = pattern(patt + j);
349			if (i >= bufsize)
350				goto stop;
351		}
352	}
353stop:
354
355	/* Compare the images */
356	for (i = 0; i < bufsize; i++) {
357		KUNIT_EXPECT_EQ_MSG(test, scratch[i], buffer[i], "at i=%x", i);
358		if (scratch[i] != buffer[i])
359			return;
360	}
361
362	KUNIT_SUCCEED();
363}
364
365static void iov_kunit_destroy_xarray(void *data)
366{
367	struct xarray *xarray = data;
368
369	xa_destroy(xarray);
370	kfree(xarray);
371}
372
373static void __init iov_kunit_load_xarray(struct kunit *test,
374					 struct iov_iter *iter, int dir,
375					 struct xarray *xarray,
376					 struct page **pages, size_t npages)
377{
378	size_t size = 0;
379	int i;
380
381	for (i = 0; i < npages; i++) {
382		void *x = xa_store(xarray, i, pages[i], GFP_KERNEL);
383
384		KUNIT_ASSERT_FALSE(test, xa_is_err(x));
385		size += PAGE_SIZE;
386	}
387	iov_iter_xarray(iter, dir, xarray, 0, size);
388}
389
390static struct xarray *iov_kunit_create_xarray(struct kunit *test)
391{
392	struct xarray *xarray;
393
394	xarray = kzalloc(sizeof(struct xarray), GFP_KERNEL);
395	xa_init(xarray);
396	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xarray);
397	kunit_add_action_or_reset(test, iov_kunit_destroy_xarray, xarray);
398	return xarray;
399}
400
401/*
402 * Test copying to a ITER_XARRAY-type iterator.
403 */
404static void __init iov_kunit_copy_to_xarray(struct kunit *test)
405{
406	const struct kvec_test_range *pr;
407	struct iov_iter iter;
408	struct xarray *xarray;
409	struct page **spages, **bpages;
410	u8 *scratch, *buffer;
411	size_t bufsize, npages, size, copied;
412	int i, patt;
413
414	bufsize = 0x100000;
415	npages = bufsize / PAGE_SIZE;
416
417	xarray = iov_kunit_create_xarray(test);
418
419	scratch = iov_kunit_create_buffer(test, &spages, npages);
420	for (i = 0; i < bufsize; i++)
421		scratch[i] = pattern(i);
422
423	buffer = iov_kunit_create_buffer(test, &bpages, npages);
424	memset(buffer, 0, bufsize);
425
426	iov_kunit_load_xarray(test, &iter, READ, xarray, bpages, npages);
427
428	i = 0;
429	for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
430		size = pr->to - pr->from;
431		KUNIT_ASSERT_LE(test, pr->to, bufsize);
432
433		iov_iter_xarray(&iter, READ, xarray, pr->from, size);
434		copied = copy_to_iter(scratch + i, size, &iter);
435
436		KUNIT_EXPECT_EQ(test, copied, size);
437		KUNIT_EXPECT_EQ(test, iter.count, 0);
438		KUNIT_EXPECT_EQ(test, iter.iov_offset, size);
439		i += size;
440	}
441
442	/* Build the expected image in the scratch buffer. */
443	patt = 0;
444	memset(scratch, 0, bufsize);
445	for (pr = kvec_test_ranges; pr->from >= 0; pr++)
446		for (i = pr->from; i < pr->to; i++)
447			scratch[i] = pattern(patt++);
448
449	/* Compare the images */
450	for (i = 0; i < bufsize; i++) {
451		KUNIT_EXPECT_EQ_MSG(test, buffer[i], scratch[i], "at i=%x", i);
452		if (buffer[i] != scratch[i])
453			return;
454	}
455
456	KUNIT_SUCCEED();
457}
458
459/*
460 * Test copying from a ITER_XARRAY-type iterator.
461 */
462static void __init iov_kunit_copy_from_xarray(struct kunit *test)
463{
464	const struct kvec_test_range *pr;
465	struct iov_iter iter;
466	struct xarray *xarray;
467	struct page **spages, **bpages;
468	u8 *scratch, *buffer;
469	size_t bufsize, npages, size, copied;
470	int i, j;
471
472	bufsize = 0x100000;
473	npages = bufsize / PAGE_SIZE;
474
475	xarray = iov_kunit_create_xarray(test);
476
477	buffer = iov_kunit_create_buffer(test, &bpages, npages);
478	for (i = 0; i < bufsize; i++)
479		buffer[i] = pattern(i);
480
481	scratch = iov_kunit_create_buffer(test, &spages, npages);
482	memset(scratch, 0, bufsize);
483
484	iov_kunit_load_xarray(test, &iter, READ, xarray, bpages, npages);
485
486	i = 0;
487	for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
488		size = pr->to - pr->from;
489		KUNIT_ASSERT_LE(test, pr->to, bufsize);
490
491		iov_iter_xarray(&iter, WRITE, xarray, pr->from, size);
492		copied = copy_from_iter(scratch + i, size, &iter);
493
494		KUNIT_EXPECT_EQ(test, copied, size);
495		KUNIT_EXPECT_EQ(test, iter.count, 0);
496		KUNIT_EXPECT_EQ(test, iter.iov_offset, size);
497		i += size;
498	}
499
500	/* Build the expected image in the main buffer. */
501	i = 0;
502	memset(buffer, 0, bufsize);
503	for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
504		for (j = pr->from; j < pr->to; j++) {
505			buffer[i++] = pattern(j);
506			if (i >= bufsize)
507				goto stop;
508		}
509	}
510stop:
511
512	/* Compare the images */
513	for (i = 0; i < bufsize; i++) {
514		KUNIT_EXPECT_EQ_MSG(test, scratch[i], buffer[i], "at i=%x", i);
515		if (scratch[i] != buffer[i])
516			return;
517	}
518
519	KUNIT_SUCCEED();
520}
521
522/*
523 * Test the extraction of ITER_KVEC-type iterators.
524 */
525static void __init iov_kunit_extract_pages_kvec(struct kunit *test)
526{
527	const struct kvec_test_range *pr;
528	struct iov_iter iter;
529	struct page **bpages, *pagelist[8], **pages = pagelist;
530	struct kvec kvec[8];
531	u8 *buffer;
532	ssize_t len;
533	size_t bufsize, size = 0, npages;
534	int i, from;
535
536	bufsize = 0x100000;
537	npages = bufsize / PAGE_SIZE;
538
539	buffer = iov_kunit_create_buffer(test, &bpages, npages);
540
541	iov_kunit_load_kvec(test, &iter, READ, kvec, ARRAY_SIZE(kvec),
542			    buffer, bufsize, kvec_test_ranges);
543	size = iter.count;
544
545	pr = kvec_test_ranges;
546	from = pr->from;
547	do {
548		size_t offset0 = LONG_MAX;
549
550		for (i = 0; i < ARRAY_SIZE(pagelist); i++)
551			pagelist[i] = (void *)(unsigned long)0xaa55aa55aa55aa55ULL;
552
553		len = iov_iter_extract_pages(&iter, &pages, 100 * 1024,
554					     ARRAY_SIZE(pagelist), 0, &offset0);
555		KUNIT_EXPECT_GE(test, len, 0);
556		if (len < 0)
557			break;
558		KUNIT_EXPECT_GE(test, (ssize_t)offset0, 0);
559		KUNIT_EXPECT_LT(test, offset0, PAGE_SIZE);
560		KUNIT_EXPECT_LE(test, len, size);
561		KUNIT_EXPECT_EQ(test, iter.count, size - len);
562		size -= len;
563
564		if (len == 0)
565			break;
566
567		for (i = 0; i < ARRAY_SIZE(pagelist); i++) {
568			struct page *p;
569			ssize_t part = min_t(ssize_t, len, PAGE_SIZE - offset0);
570			int ix;
571
572			KUNIT_ASSERT_GE(test, part, 0);
573			while (from == pr->to) {
574				pr++;
575				from = pr->from;
576				if (from < 0)
577					goto stop;
578			}
579			ix = from / PAGE_SIZE;
580			KUNIT_ASSERT_LT(test, ix, npages);
581			p = bpages[ix];
582			KUNIT_EXPECT_PTR_EQ(test, pagelist[i], p);
583			KUNIT_EXPECT_EQ(test, offset0, from % PAGE_SIZE);
584			from += part;
585			len -= part;
586			KUNIT_ASSERT_GE(test, len, 0);
587			if (len == 0)
588				break;
589			offset0 = 0;
590		}
591
592		if (test->status == KUNIT_FAILURE)
593			break;
594	} while (iov_iter_count(&iter) > 0);
595
596stop:
597	KUNIT_EXPECT_EQ(test, size, 0);
598	KUNIT_EXPECT_EQ(test, iter.count, 0);
599	KUNIT_SUCCEED();
600}
601
602/*
603 * Test the extraction of ITER_BVEC-type iterators.
604 */
605static void __init iov_kunit_extract_pages_bvec(struct kunit *test)
606{
607	const struct bvec_test_range *pr;
608	struct iov_iter iter;
609	struct page **bpages, *pagelist[8], **pages = pagelist;
610	struct bio_vec bvec[8];
611	ssize_t len;
612	size_t bufsize, size = 0, npages;
613	int i, from;
614
615	bufsize = 0x100000;
616	npages = bufsize / PAGE_SIZE;
617
618	iov_kunit_create_buffer(test, &bpages, npages);
619	iov_kunit_load_bvec(test, &iter, READ, bvec, ARRAY_SIZE(bvec),
620			    bpages, npages, bufsize, bvec_test_ranges);
621	size = iter.count;
622
623	pr = bvec_test_ranges;
624	from = pr->from;
625	do {
626		size_t offset0 = LONG_MAX;
627
628		for (i = 0; i < ARRAY_SIZE(pagelist); i++)
629			pagelist[i] = (void *)(unsigned long)0xaa55aa55aa55aa55ULL;
630
631		len = iov_iter_extract_pages(&iter, &pages, 100 * 1024,
632					     ARRAY_SIZE(pagelist), 0, &offset0);
633		KUNIT_EXPECT_GE(test, len, 0);
634		if (len < 0)
635			break;
636		KUNIT_EXPECT_GE(test, (ssize_t)offset0, 0);
637		KUNIT_EXPECT_LT(test, offset0, PAGE_SIZE);
638		KUNIT_EXPECT_LE(test, len, size);
639		KUNIT_EXPECT_EQ(test, iter.count, size - len);
640		size -= len;
641
642		if (len == 0)
643			break;
644
645		for (i = 0; i < ARRAY_SIZE(pagelist); i++) {
646			struct page *p;
647			ssize_t part = min_t(ssize_t, len, PAGE_SIZE - offset0);
648			int ix;
649
650			KUNIT_ASSERT_GE(test, part, 0);
651			while (from == pr->to) {
652				pr++;
653				from = pr->from;
654				if (from < 0)
655					goto stop;
656			}
657			ix = pr->page + from / PAGE_SIZE;
658			KUNIT_ASSERT_LT(test, ix, npages);
659			p = bpages[ix];
660			KUNIT_EXPECT_PTR_EQ(test, pagelist[i], p);
661			KUNIT_EXPECT_EQ(test, offset0, from % PAGE_SIZE);
662			from += part;
663			len -= part;
664			KUNIT_ASSERT_GE(test, len, 0);
665			if (len == 0)
666				break;
667			offset0 = 0;
668		}
669
670		if (test->status == KUNIT_FAILURE)
671			break;
672	} while (iov_iter_count(&iter) > 0);
673
674stop:
675	KUNIT_EXPECT_EQ(test, size, 0);
676	KUNIT_EXPECT_EQ(test, iter.count, 0);
677	KUNIT_SUCCEED();
678}
679
680/*
681 * Test the extraction of ITER_XARRAY-type iterators.
682 */
683static void __init iov_kunit_extract_pages_xarray(struct kunit *test)
684{
685	const struct kvec_test_range *pr;
686	struct iov_iter iter;
687	struct xarray *xarray;
688	struct page **bpages, *pagelist[8], **pages = pagelist;
689	ssize_t len;
690	size_t bufsize, size = 0, npages;
691	int i, from;
692
693	bufsize = 0x100000;
694	npages = bufsize / PAGE_SIZE;
695
696	xarray = iov_kunit_create_xarray(test);
697
698	iov_kunit_create_buffer(test, &bpages, npages);
699	iov_kunit_load_xarray(test, &iter, READ, xarray, bpages, npages);
700
701	for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
702		from = pr->from;
703		size = pr->to - from;
704		KUNIT_ASSERT_LE(test, pr->to, bufsize);
705
706		iov_iter_xarray(&iter, WRITE, xarray, from, size);
707
708		do {
709			size_t offset0 = LONG_MAX;
710
711			for (i = 0; i < ARRAY_SIZE(pagelist); i++)
712				pagelist[i] = (void *)(unsigned long)0xaa55aa55aa55aa55ULL;
713
714			len = iov_iter_extract_pages(&iter, &pages, 100 * 1024,
715						     ARRAY_SIZE(pagelist), 0, &offset0);
716			KUNIT_EXPECT_GE(test, len, 0);
717			if (len < 0)
718				break;
719			KUNIT_EXPECT_LE(test, len, size);
720			KUNIT_EXPECT_EQ(test, iter.count, size - len);
721			if (len == 0)
722				break;
723			size -= len;
724			KUNIT_EXPECT_GE(test, (ssize_t)offset0, 0);
725			KUNIT_EXPECT_LT(test, offset0, PAGE_SIZE);
726
727			for (i = 0; i < ARRAY_SIZE(pagelist); i++) {
728				struct page *p;
729				ssize_t part = min_t(ssize_t, len, PAGE_SIZE - offset0);
730				int ix;
731
732				KUNIT_ASSERT_GE(test, part, 0);
733				ix = from / PAGE_SIZE;
734				KUNIT_ASSERT_LT(test, ix, npages);
735				p = bpages[ix];
736				KUNIT_EXPECT_PTR_EQ(test, pagelist[i], p);
737				KUNIT_EXPECT_EQ(test, offset0, from % PAGE_SIZE);
738				from += part;
739				len -= part;
740				KUNIT_ASSERT_GE(test, len, 0);
741				if (len == 0)
742					break;
743				offset0 = 0;
744			}
745
746			if (test->status == KUNIT_FAILURE)
747				goto stop;
748		} while (iov_iter_count(&iter) > 0);
749
750		KUNIT_EXPECT_EQ(test, size, 0);
751		KUNIT_EXPECT_EQ(test, iter.count, 0);
752		KUNIT_EXPECT_EQ(test, iter.iov_offset, pr->to - pr->from);
753	}
754
755stop:
756	KUNIT_SUCCEED();
757}
758
759static struct kunit_case __refdata iov_kunit_cases[] = {
760	KUNIT_CASE(iov_kunit_copy_to_kvec),
761	KUNIT_CASE(iov_kunit_copy_from_kvec),
762	KUNIT_CASE(iov_kunit_copy_to_bvec),
763	KUNIT_CASE(iov_kunit_copy_from_bvec),
764	KUNIT_CASE(iov_kunit_copy_to_xarray),
765	KUNIT_CASE(iov_kunit_copy_from_xarray),
766	KUNIT_CASE(iov_kunit_extract_pages_kvec),
767	KUNIT_CASE(iov_kunit_extract_pages_bvec),
768	KUNIT_CASE(iov_kunit_extract_pages_xarray),
769	{}
770};
771
772static struct kunit_suite iov_kunit_suite = {
773	.name = "iov_iter",
774	.test_cases = iov_kunit_cases,
775};
776
777kunit_test_suites(&iov_kunit_suite);
778