scatterlist.h revision 345939
1/*-
2 * Copyright (c) 2010 Isilon Systems, Inc.
3 * Copyright (c) 2010 iX Systems, Inc.
4 * Copyright (c) 2010 Panasas, Inc.
5 * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
6 * Copyright (c) 2015 Matthew Dillon <dillon@backplane.com>
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice unmodified, this list of conditions, and the following
14 *    disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 *
30 * $FreeBSD: stable/11/sys/compat/linuxkpi/common/include/linux/scatterlist.h 345939 2019-04-05 11:32:57Z hselasky $
31 */
32#ifndef	_LINUX_SCATTERLIST_H_
33#define	_LINUX_SCATTERLIST_H_
34
35#include <linux/page.h>
36#include <linux/slab.h>
37#include <linux/mm.h>
38
39struct scatterlist {
40	unsigned long page_link;
41#define	SG_PAGE_LINK_CHAIN	0x1UL
42#define	SG_PAGE_LINK_LAST	0x2UL
43#define	SG_PAGE_LINK_MASK	0x3UL
44	unsigned int offset;
45	unsigned int length;
46	dma_addr_t address;
47};
48
49CTASSERT((sizeof(struct scatterlist) & SG_PAGE_LINK_MASK) == 0);
50
51struct sg_table {
52	struct scatterlist *sgl;
53	unsigned int nents;
54	unsigned int orig_nents;
55};
56
57struct sg_page_iter {
58	struct scatterlist *sg;
59	unsigned int sg_pgoffset;
60	unsigned int maxents;
61	struct {
62		unsigned int nents;
63		int	pg_advance;
64	} internal;
65};
66
67#define	SCATTERLIST_MAX_SEGMENT	(-1U & ~(PAGE_SIZE - 1))
68
69#define	SG_MAX_SINGLE_ALLOC	(PAGE_SIZE / sizeof(struct scatterlist))
70
71#define	SG_MAGIC		0x87654321UL
72#define	SG_CHAIN		SG_PAGE_LINK_CHAIN
73#define	SG_END			SG_PAGE_LINK_LAST
74
75#define	sg_is_chain(sg)		((sg)->page_link & SG_PAGE_LINK_CHAIN)
76#define	sg_is_last(sg)		((sg)->page_link & SG_PAGE_LINK_LAST)
77#define	sg_chain_ptr(sg)	\
78	((struct scatterlist *) ((sg)->page_link & ~SG_PAGE_LINK_MASK))
79
80#define	sg_dma_address(sg)	(sg)->address
81#define	sg_dma_len(sg)		(sg)->length
82
83#define	for_each_sg_page(sgl, iter, nents, pgoffset)			\
84	for (_sg_iter_init(sgl, iter, nents, pgoffset);			\
85	     (iter)->sg; _sg_iter_next(iter))
86
87#define	for_each_sg(sglist, sg, sgmax, iter)				\
88	for (iter = 0, sg = (sglist); iter < (sgmax); iter++, sg = sg_next(sg))
89
90typedef struct scatterlist *(sg_alloc_fn) (unsigned int, gfp_t);
91typedef void (sg_free_fn) (struct scatterlist *, unsigned int);
92
93static inline void
94sg_assign_page(struct scatterlist *sg, struct page *page)
95{
96	unsigned long page_link = sg->page_link & SG_PAGE_LINK_MASK;
97
98	sg->page_link = page_link | (unsigned long)page;
99}
100
101static inline void
102sg_set_page(struct scatterlist *sg, struct page *page, unsigned int len,
103    unsigned int offset)
104{
105	sg_assign_page(sg, page);
106	sg->offset = offset;
107	sg->length = len;
108}
109
110static inline struct page *
111sg_page(struct scatterlist *sg)
112{
113	return ((struct page *)((sg)->page_link & ~SG_PAGE_LINK_MASK));
114}
115
116static inline void
117sg_set_buf(struct scatterlist *sg, const void *buf, unsigned int buflen)
118{
119	sg_set_page(sg, virt_to_page(buf), buflen,
120	    ((uintptr_t)buf) & (PAGE_SIZE - 1));
121}
122
123static inline struct scatterlist *
124sg_next(struct scatterlist *sg)
125{
126	if (sg_is_last(sg))
127		return (NULL);
128	sg++;
129	if (sg_is_chain(sg))
130		sg = sg_chain_ptr(sg);
131	return (sg);
132}
133
134static inline vm_paddr_t
135sg_phys(struct scatterlist *sg)
136{
137	return (VM_PAGE_TO_PHYS(sg_page(sg)) + sg->offset);
138}
139
140static inline void *
141sg_virt(struct scatterlist *sg)
142{
143
144	return ((void *)((unsigned long)page_address(sg_page(sg)) + sg->offset));
145}
146
147static inline void
148sg_chain(struct scatterlist *prv, unsigned int prv_nents,
149    struct scatterlist *sgl)
150{
151	struct scatterlist *sg = &prv[prv_nents - 1];
152
153	sg->offset = 0;
154	sg->length = 0;
155	sg->page_link = ((unsigned long)sgl |
156	    SG_PAGE_LINK_CHAIN) & ~SG_PAGE_LINK_LAST;
157}
158
159static inline void
160sg_mark_end(struct scatterlist *sg)
161{
162	sg->page_link |= SG_PAGE_LINK_LAST;
163	sg->page_link &= ~SG_PAGE_LINK_CHAIN;
164}
165
166static inline void
167sg_init_table(struct scatterlist *sg, unsigned int nents)
168{
169	bzero(sg, sizeof(*sg) * nents);
170	sg_mark_end(&sg[nents - 1]);
171}
172
173static struct scatterlist *
174sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
175{
176	if (nents == SG_MAX_SINGLE_ALLOC) {
177		return ((void *)__get_free_page(gfp_mask));
178	} else
179		return (kmalloc(nents * sizeof(struct scatterlist), gfp_mask));
180}
181
182static inline void
183sg_kfree(struct scatterlist *sg, unsigned int nents)
184{
185	if (nents == SG_MAX_SINGLE_ALLOC) {
186		free_page((unsigned long)sg);
187	} else
188		kfree(sg);
189}
190
191static inline void
192__sg_free_table(struct sg_table *table, unsigned int max_ents,
193    bool skip_first_chunk, sg_free_fn * free_fn)
194{
195	struct scatterlist *sgl, *next;
196
197	if (unlikely(!table->sgl))
198		return;
199
200	sgl = table->sgl;
201	while (table->orig_nents) {
202		unsigned int alloc_size = table->orig_nents;
203		unsigned int sg_size;
204
205		if (alloc_size > max_ents) {
206			next = sg_chain_ptr(&sgl[max_ents - 1]);
207			alloc_size = max_ents;
208			sg_size = alloc_size - 1;
209		} else {
210			sg_size = alloc_size;
211			next = NULL;
212		}
213
214		table->orig_nents -= sg_size;
215		if (skip_first_chunk)
216			skip_first_chunk = 0;
217		else
218			free_fn(sgl, alloc_size);
219		sgl = next;
220	}
221
222	table->sgl = NULL;
223}
224
225static inline void
226sg_free_table(struct sg_table *table)
227{
228	__sg_free_table(table, SG_MAX_SINGLE_ALLOC, 0, sg_kfree);
229}
230
231static inline int
232__sg_alloc_table(struct sg_table *table, unsigned int nents,
233    unsigned int max_ents, struct scatterlist *first_chunk,
234    gfp_t gfp_mask, sg_alloc_fn *alloc_fn)
235{
236	struct scatterlist *sg, *prv;
237	unsigned int left;
238
239	memset(table, 0, sizeof(*table));
240
241	if (nents == 0)
242		return (-EINVAL);
243	left = nents;
244	prv = NULL;
245	do {
246		unsigned int sg_size;
247		unsigned int alloc_size = left;
248
249		if (alloc_size > max_ents) {
250			alloc_size = max_ents;
251			sg_size = alloc_size - 1;
252		} else
253			sg_size = alloc_size;
254
255		left -= sg_size;
256
257		if (first_chunk) {
258			sg = first_chunk;
259			first_chunk = NULL;
260		} else {
261			sg = alloc_fn(alloc_size, gfp_mask);
262		}
263		if (unlikely(!sg)) {
264			if (prv)
265				table->nents = ++table->orig_nents;
266
267			return (-ENOMEM);
268		}
269		sg_init_table(sg, alloc_size);
270		table->nents = table->orig_nents += sg_size;
271
272		if (prv)
273			sg_chain(prv, max_ents, sg);
274		else
275			table->sgl = sg;
276
277		if (!left)
278			sg_mark_end(&sg[sg_size - 1]);
279
280		prv = sg;
281	} while (left);
282
283	return (0);
284}
285
286static inline int
287sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
288{
289	int ret;
290
291	ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
292	    NULL, gfp_mask, sg_kmalloc);
293	if (unlikely(ret))
294		__sg_free_table(table, SG_MAX_SINGLE_ALLOC, 0, sg_kfree);
295
296	return (ret);
297}
298
299static inline int
300__sg_alloc_table_from_pages(struct sg_table *sgt,
301    struct page **pages, unsigned int count,
302    unsigned long off, unsigned long size,
303    unsigned int max_segment, gfp_t gfp_mask)
304{
305	unsigned int i, segs, cur, len;
306	int rc;
307	struct scatterlist *s;
308
309	if (__predict_false(!max_segment || offset_in_page(max_segment)))
310		return (-EINVAL);
311
312	len = 0;
313	for (segs = i = 1; i < count; ++i) {
314		len += PAGE_SIZE;
315		if (len >= max_segment ||
316		    page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) {
317			++segs;
318			len = 0;
319		}
320	}
321	if (__predict_false((rc = sg_alloc_table(sgt, segs, gfp_mask))))
322		return (rc);
323
324	cur = 0;
325	for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
326		unsigned long seg_size;
327		unsigned int j;
328
329		len = 0;
330		for (j = cur + 1; j < count; ++j) {
331			len += PAGE_SIZE;
332			if (len >= max_segment || page_to_pfn(pages[j]) !=
333			    page_to_pfn(pages[j - 1]) + 1)
334				break;
335		}
336
337		seg_size = ((j - cur) << PAGE_SHIFT) - off;
338		sg_set_page(s, pages[cur], min(size, seg_size), off);
339		size -= seg_size;
340		off = 0;
341		cur = j;
342	}
343	return (0);
344}
345
346static inline int
347sg_alloc_table_from_pages(struct sg_table *sgt,
348    struct page **pages, unsigned int count,
349    unsigned long off, unsigned long size,
350    gfp_t gfp_mask)
351{
352
353	return (__sg_alloc_table_from_pages(sgt, pages, count, off, size,
354	    SCATTERLIST_MAX_SEGMENT, gfp_mask));
355}
356
357static inline int
358sg_nents(struct scatterlist *sg)
359{
360	int nents;
361
362	for (nents = 0; sg; sg = sg_next(sg))
363		nents++;
364	return (nents);
365}
366
367static inline void
368__sg_page_iter_start(struct sg_page_iter *piter,
369    struct scatterlist *sglist, unsigned int nents,
370    unsigned long pgoffset)
371{
372	piter->internal.pg_advance = 0;
373	piter->internal.nents = nents;
374
375	piter->sg = sglist;
376	piter->sg_pgoffset = pgoffset;
377}
378
379static inline void
380_sg_iter_next(struct sg_page_iter *iter)
381{
382	struct scatterlist *sg;
383	unsigned int pgcount;
384
385	sg = iter->sg;
386	pgcount = (sg->offset + sg->length + PAGE_SIZE - 1) >> PAGE_SHIFT;
387
388	++iter->sg_pgoffset;
389	while (iter->sg_pgoffset >= pgcount) {
390		iter->sg_pgoffset -= pgcount;
391		sg = sg_next(sg);
392		--iter->maxents;
393		if (sg == NULL || iter->maxents == 0)
394			break;
395		pgcount = (sg->offset + sg->length + PAGE_SIZE - 1) >> PAGE_SHIFT;
396	}
397	iter->sg = sg;
398}
399
400static inline int
401sg_page_count(struct scatterlist *sg)
402{
403	return (PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT);
404}
405
406static inline bool
407__sg_page_iter_next(struct sg_page_iter *piter)
408{
409	if (piter->internal.nents == 0)
410		return (0);
411	if (piter->sg == NULL)
412		return (0);
413
414	piter->sg_pgoffset += piter->internal.pg_advance;
415	piter->internal.pg_advance = 1;
416
417	while (piter->sg_pgoffset >= sg_page_count(piter->sg)) {
418		piter->sg_pgoffset -= sg_page_count(piter->sg);
419		piter->sg = sg_next(piter->sg);
420		if (--piter->internal.nents == 0)
421			return (0);
422		if (piter->sg == NULL)
423			return (0);
424	}
425	return (1);
426}
427
428static inline void
429_sg_iter_init(struct scatterlist *sgl, struct sg_page_iter *iter,
430    unsigned int nents, unsigned long pgoffset)
431{
432	if (nents) {
433		iter->sg = sgl;
434		iter->sg_pgoffset = pgoffset - 1;
435		iter->maxents = nents;
436		_sg_iter_next(iter);
437	} else {
438		iter->sg = NULL;
439		iter->sg_pgoffset = 0;
440		iter->maxents = 0;
441	}
442}
443
444static inline dma_addr_t
445sg_page_iter_dma_address(struct sg_page_iter *spi)
446{
447	return (spi->sg->address + (spi->sg_pgoffset << PAGE_SHIFT));
448}
449
450static inline struct page *
451sg_page_iter_page(struct sg_page_iter *piter)
452{
453	return (nth_page(sg_page(piter->sg), piter->sg_pgoffset));
454}
455
456
457#endif					/* _LINUX_SCATTERLIST_H_ */
458