1/*-
2 * Copyright (c) 2010 Isilon Systems, Inc.
3 * Copyright (c) 2010 iX Systems, Inc.
4 * Copyright (c) 2010 Panasas, Inc.
5 * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
6 * Copyright (c) 2015 Matthew Dillon <dillon@backplane.com>
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice unmodified, this list of conditions, and the following
14 *    disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 *
30 * $FreeBSD: stable/11/sys/compat/linuxkpi/common/include/linux/scatterlist.h 368828 2020-12-30 01:11:14Z hselasky $
31 */
32#ifndef	_LINUX_SCATTERLIST_H_
33#define	_LINUX_SCATTERLIST_H_
34
35#include <linux/page.h>
36#include <linux/slab.h>
37#include <linux/mm.h>
38
39struct scatterlist {
40	unsigned long page_link;
41#define	SG_PAGE_LINK_CHAIN	0x1UL
42#define	SG_PAGE_LINK_LAST	0x2UL
43#define	SG_PAGE_LINK_MASK	0x3UL
44	unsigned int offset;
45	unsigned int length;
46	dma_addr_t address;
47};
48
49CTASSERT((sizeof(struct scatterlist) & SG_PAGE_LINK_MASK) == 0);
50
51struct sg_table {
52	struct scatterlist *sgl;
53	unsigned int nents;
54	unsigned int orig_nents;
55};
56
57struct sg_page_iter {
58	struct scatterlist *sg;
59	unsigned int sg_pgoffset;
60	unsigned int maxents;
61	struct {
62		unsigned int nents;
63		int	pg_advance;
64	} internal;
65};
66
67struct sg_dma_page_iter {
68	struct sg_page_iter base;
69};
70
71#define	SCATTERLIST_MAX_SEGMENT	(-1U & ~(PAGE_SIZE - 1))
72
73#define	SG_MAX_SINGLE_ALLOC	(PAGE_SIZE / sizeof(struct scatterlist))
74
75#define	SG_MAGIC		0x87654321UL
76#define	SG_CHAIN		SG_PAGE_LINK_CHAIN
77#define	SG_END			SG_PAGE_LINK_LAST
78
79#define	sg_is_chain(sg)		((sg)->page_link & SG_PAGE_LINK_CHAIN)
80#define	sg_is_last(sg)		((sg)->page_link & SG_PAGE_LINK_LAST)
81#define	sg_chain_ptr(sg)	\
82	((struct scatterlist *) ((sg)->page_link & ~SG_PAGE_LINK_MASK))
83
84#define	sg_dma_address(sg)	(sg)->address
85#define	sg_dma_len(sg)		(sg)->length
86
87#define	for_each_sg_page(sgl, iter, nents, pgoffset)			\
88	for (_sg_iter_init(sgl, iter, nents, pgoffset);			\
89	     (iter)->sg; _sg_iter_next(iter))
90#define	for_each_sg_dma_page(sgl, iter, nents, pgoffset) 		\
91	for_each_sg_page(sgl, &(iter)->base, nents, pgoffset)
92
93#define	for_each_sg(sglist, sg, sgmax, iter)				\
94	for (iter = 0, sg = (sglist); iter < (sgmax); iter++, sg = sg_next(sg))
95
96typedef struct scatterlist *(sg_alloc_fn) (unsigned int, gfp_t);
97typedef void (sg_free_fn) (struct scatterlist *, unsigned int);
98
99static inline void
100sg_assign_page(struct scatterlist *sg, struct page *page)
101{
102	unsigned long page_link = sg->page_link & SG_PAGE_LINK_MASK;
103
104	sg->page_link = page_link | (unsigned long)page;
105}
106
107static inline void
108sg_set_page(struct scatterlist *sg, struct page *page, unsigned int len,
109    unsigned int offset)
110{
111	sg_assign_page(sg, page);
112	sg->offset = offset;
113	sg->length = len;
114}
115
116static inline struct page *
117sg_page(struct scatterlist *sg)
118{
119	return ((struct page *)((sg)->page_link & ~SG_PAGE_LINK_MASK));
120}
121
122static inline void
123sg_set_buf(struct scatterlist *sg, const void *buf, unsigned int buflen)
124{
125	sg_set_page(sg, virt_to_page(buf), buflen,
126	    ((uintptr_t)buf) & (PAGE_SIZE - 1));
127}
128
129static inline struct scatterlist *
130sg_next(struct scatterlist *sg)
131{
132	if (sg_is_last(sg))
133		return (NULL);
134	sg++;
135	if (sg_is_chain(sg))
136		sg = sg_chain_ptr(sg);
137	return (sg);
138}
139
140static inline vm_paddr_t
141sg_phys(struct scatterlist *sg)
142{
143	return (VM_PAGE_TO_PHYS(sg_page(sg)) + sg->offset);
144}
145
146static inline void *
147sg_virt(struct scatterlist *sg)
148{
149
150	return ((void *)((unsigned long)page_address(sg_page(sg)) + sg->offset));
151}
152
153static inline void
154sg_chain(struct scatterlist *prv, unsigned int prv_nents,
155    struct scatterlist *sgl)
156{
157	struct scatterlist *sg = &prv[prv_nents - 1];
158
159	sg->offset = 0;
160	sg->length = 0;
161	sg->page_link = ((unsigned long)sgl |
162	    SG_PAGE_LINK_CHAIN) & ~SG_PAGE_LINK_LAST;
163}
164
165static inline void
166sg_mark_end(struct scatterlist *sg)
167{
168	sg->page_link |= SG_PAGE_LINK_LAST;
169	sg->page_link &= ~SG_PAGE_LINK_CHAIN;
170}
171
172static inline void
173sg_init_table(struct scatterlist *sg, unsigned int nents)
174{
175	bzero(sg, sizeof(*sg) * nents);
176	sg_mark_end(&sg[nents - 1]);
177}
178
179static struct scatterlist *
180sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
181{
182	if (nents == SG_MAX_SINGLE_ALLOC) {
183		return ((void *)__get_free_page(gfp_mask));
184	} else
185		return (kmalloc(nents * sizeof(struct scatterlist), gfp_mask));
186}
187
188static inline void
189sg_kfree(struct scatterlist *sg, unsigned int nents)
190{
191	if (nents == SG_MAX_SINGLE_ALLOC) {
192		free_page((unsigned long)sg);
193	} else
194		kfree(sg);
195}
196
197static inline void
198__sg_free_table(struct sg_table *table, unsigned int max_ents,
199    bool skip_first_chunk, sg_free_fn * free_fn)
200{
201	struct scatterlist *sgl, *next;
202
203	if (unlikely(!table->sgl))
204		return;
205
206	sgl = table->sgl;
207	while (table->orig_nents) {
208		unsigned int alloc_size = table->orig_nents;
209		unsigned int sg_size;
210
211		if (alloc_size > max_ents) {
212			next = sg_chain_ptr(&sgl[max_ents - 1]);
213			alloc_size = max_ents;
214			sg_size = alloc_size - 1;
215		} else {
216			sg_size = alloc_size;
217			next = NULL;
218		}
219
220		table->orig_nents -= sg_size;
221		if (skip_first_chunk)
222			skip_first_chunk = 0;
223		else
224			free_fn(sgl, alloc_size);
225		sgl = next;
226	}
227
228	table->sgl = NULL;
229}
230
231static inline void
232sg_free_table(struct sg_table *table)
233{
234	__sg_free_table(table, SG_MAX_SINGLE_ALLOC, 0, sg_kfree);
235}
236
237static inline int
238__sg_alloc_table(struct sg_table *table, unsigned int nents,
239    unsigned int max_ents, struct scatterlist *first_chunk,
240    gfp_t gfp_mask, sg_alloc_fn *alloc_fn)
241{
242	struct scatterlist *sg, *prv;
243	unsigned int left;
244
245	memset(table, 0, sizeof(*table));
246
247	if (nents == 0)
248		return (-EINVAL);
249	left = nents;
250	prv = NULL;
251	do {
252		unsigned int sg_size;
253		unsigned int alloc_size = left;
254
255		if (alloc_size > max_ents) {
256			alloc_size = max_ents;
257			sg_size = alloc_size - 1;
258		} else
259			sg_size = alloc_size;
260
261		left -= sg_size;
262
263		if (first_chunk) {
264			sg = first_chunk;
265			first_chunk = NULL;
266		} else {
267			sg = alloc_fn(alloc_size, gfp_mask);
268		}
269		if (unlikely(!sg)) {
270			if (prv)
271				table->nents = ++table->orig_nents;
272
273			return (-ENOMEM);
274		}
275		sg_init_table(sg, alloc_size);
276		table->nents = table->orig_nents += sg_size;
277
278		if (prv)
279			sg_chain(prv, max_ents, sg);
280		else
281			table->sgl = sg;
282
283		if (!left)
284			sg_mark_end(&sg[sg_size - 1]);
285
286		prv = sg;
287	} while (left);
288
289	return (0);
290}
291
292static inline int
293sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
294{
295	int ret;
296
297	ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
298	    NULL, gfp_mask, sg_kmalloc);
299	if (unlikely(ret))
300		__sg_free_table(table, SG_MAX_SINGLE_ALLOC, 0, sg_kfree);
301
302	return (ret);
303}
304
305static inline int
306__sg_alloc_table_from_pages(struct sg_table *sgt,
307    struct page **pages, unsigned int count,
308    unsigned long off, unsigned long size,
309    unsigned int max_segment, gfp_t gfp_mask)
310{
311	unsigned int i, segs, cur, len;
312	int rc;
313	struct scatterlist *s;
314
315	if (__predict_false(!max_segment || offset_in_page(max_segment)))
316		return (-EINVAL);
317
318	len = 0;
319	for (segs = i = 1; i < count; ++i) {
320		len += PAGE_SIZE;
321		if (len >= max_segment ||
322		    page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) {
323			++segs;
324			len = 0;
325		}
326	}
327	if (__predict_false((rc = sg_alloc_table(sgt, segs, gfp_mask))))
328		return (rc);
329
330	cur = 0;
331	for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
332		unsigned long seg_size;
333		unsigned int j;
334
335		len = 0;
336		for (j = cur + 1; j < count; ++j) {
337			len += PAGE_SIZE;
338			if (len >= max_segment || page_to_pfn(pages[j]) !=
339			    page_to_pfn(pages[j - 1]) + 1)
340				break;
341		}
342
343		seg_size = ((j - cur) << PAGE_SHIFT) - off;
344		sg_set_page(s, pages[cur], MIN(size, seg_size), off);
345		size -= seg_size;
346		off = 0;
347		cur = j;
348	}
349	return (0);
350}
351
352static inline int
353sg_alloc_table_from_pages(struct sg_table *sgt,
354    struct page **pages, unsigned int count,
355    unsigned long off, unsigned long size,
356    gfp_t gfp_mask)
357{
358
359	return (__sg_alloc_table_from_pages(sgt, pages, count, off, size,
360	    SCATTERLIST_MAX_SEGMENT, gfp_mask));
361}
362
363static inline int
364sg_nents(struct scatterlist *sg)
365{
366	int nents;
367
368	for (nents = 0; sg; sg = sg_next(sg))
369		nents++;
370	return (nents);
371}
372
373static inline void
374__sg_page_iter_start(struct sg_page_iter *piter,
375    struct scatterlist *sglist, unsigned int nents,
376    unsigned long pgoffset)
377{
378	piter->internal.pg_advance = 0;
379	piter->internal.nents = nents;
380
381	piter->sg = sglist;
382	piter->sg_pgoffset = pgoffset;
383}
384
385static inline void
386_sg_iter_next(struct sg_page_iter *iter)
387{
388	struct scatterlist *sg;
389	unsigned int pgcount;
390
391	sg = iter->sg;
392	pgcount = (sg->offset + sg->length + PAGE_SIZE - 1) >> PAGE_SHIFT;
393
394	++iter->sg_pgoffset;
395	while (iter->sg_pgoffset >= pgcount) {
396		iter->sg_pgoffset -= pgcount;
397		sg = sg_next(sg);
398		--iter->maxents;
399		if (sg == NULL || iter->maxents == 0)
400			break;
401		pgcount = (sg->offset + sg->length + PAGE_SIZE - 1) >> PAGE_SHIFT;
402	}
403	iter->sg = sg;
404}
405
406static inline int
407sg_page_count(struct scatterlist *sg)
408{
409	return (PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT);
410}
411#define	sg_dma_page_count(sg) \
412	sg_page_count(sg)
413
414static inline bool
415__sg_page_iter_next(struct sg_page_iter *piter)
416{
417	unsigned int pgcount;
418
419	if (piter->internal.nents == 0)
420		return (0);
421	if (piter->sg == NULL)
422		return (0);
423
424	piter->sg_pgoffset += piter->internal.pg_advance;
425	piter->internal.pg_advance = 1;
426
427	while (1) {
428		pgcount = sg_page_count(piter->sg);
429		if (likely(piter->sg_pgoffset < pgcount))
430			break;
431		piter->sg_pgoffset -= pgcount;
432		piter->sg = sg_next(piter->sg);
433		if (--piter->internal.nents == 0)
434			return (0);
435		if (piter->sg == NULL)
436			return (0);
437	}
438	return (1);
439}
440#define	__sg_page_iter_dma_next(itr) \
441	__sg_page_iter_next(&(itr)->base)
442
443static inline void
444_sg_iter_init(struct scatterlist *sgl, struct sg_page_iter *iter,
445    unsigned int nents, unsigned long pgoffset)
446{
447	if (nents) {
448		iter->sg = sgl;
449		iter->sg_pgoffset = pgoffset - 1;
450		iter->maxents = nents;
451		_sg_iter_next(iter);
452	} else {
453		iter->sg = NULL;
454		iter->sg_pgoffset = 0;
455		iter->maxents = 0;
456	}
457}
458
459/*
460 * sg_page_iter_dma_address() is implemented as a macro because it
461 * needs to accept two different and identical structure types. This
462 * allows both old and new code to co-exist. The compile time assert
463 * adds some safety, that the structure sizes match.
464 */
465#define	sg_page_iter_dma_address(spi) ({		\
466	struct sg_page_iter *__spi = (void *)(spi);	\
467	dma_addr_t __dma_address;			\
468	CTASSERT(sizeof(*(spi)) == sizeof(*__spi));	\
469	__dma_address = __spi->sg->address +	\
470	    (__spi->sg_pgoffset << PAGE_SHIFT);		\
471	__dma_address;					\
472})
473
474static inline struct page *
475sg_page_iter_page(struct sg_page_iter *piter)
476{
477	return (nth_page(sg_page(piter->sg), piter->sg_pgoffset));
478}
479
480
481#endif					/* _LINUX_SCATTERLIST_H_ */
482