1/*-
2 * Copyright (c) 2010 Isilon Systems, Inc.
3 * Copyright (c) 2010 iX Systems, Inc.
4 * Copyright (c) 2010 Panasas, Inc.
5 * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
6 * Copyright (c) 2015 Matthew Dillon <dillon@backplane.com>
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice unmodified, this list of conditions, and the following
14 *    disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 *
30 * $FreeBSD$
31 */
32#ifndef	_LINUX_SCATTERLIST_H_
33#define	_LINUX_SCATTERLIST_H_
34
35#include <linux/page.h>
36#include <linux/slab.h>
37#include <linux/mm.h>
38
39struct bus_dmamap;
40struct scatterlist {
41	unsigned long page_link;
42#define	SG_PAGE_LINK_CHAIN	0x1UL
43#define	SG_PAGE_LINK_LAST	0x2UL
44#define	SG_PAGE_LINK_MASK	0x3UL
45	unsigned int offset;
46	unsigned int length;
47	dma_addr_t dma_address;
48	struct bus_dmamap *dma_map;	/* FreeBSD specific */
49};
50
51CTASSERT((sizeof(struct scatterlist) & SG_PAGE_LINK_MASK) == 0);
52
53struct sg_table {
54	struct scatterlist *sgl;
55	unsigned int nents;
56	unsigned int orig_nents;
57};
58
59struct sg_page_iter {
60	struct scatterlist *sg;
61	unsigned int sg_pgoffset;
62	unsigned int maxents;
63	struct {
64		unsigned int nents;
65		int	pg_advance;
66	} internal;
67};
68
69struct sg_dma_page_iter {
70	struct sg_page_iter base;
71};
72
73#define	SCATTERLIST_MAX_SEGMENT	(-1U & ~(PAGE_SIZE - 1))
74
75#define	SG_MAX_SINGLE_ALLOC	(PAGE_SIZE / sizeof(struct scatterlist))
76
77#define	SG_MAGIC		0x87654321UL
78#define	SG_CHAIN		SG_PAGE_LINK_CHAIN
79#define	SG_END			SG_PAGE_LINK_LAST
80
81#define	sg_is_chain(sg)		((sg)->page_link & SG_PAGE_LINK_CHAIN)
82#define	sg_is_last(sg)		((sg)->page_link & SG_PAGE_LINK_LAST)
83#define	sg_chain_ptr(sg)	\
84	((struct scatterlist *) ((sg)->page_link & ~SG_PAGE_LINK_MASK))
85
86#define	sg_dma_address(sg)	(sg)->dma_address
87#define	sg_dma_len(sg)		(sg)->length
88
89#define	for_each_sg_page(sgl, iter, nents, pgoffset)			\
90	for (_sg_iter_init(sgl, iter, nents, pgoffset);			\
91	     (iter)->sg; _sg_iter_next(iter))
92#define	for_each_sg_dma_page(sgl, iter, nents, pgoffset) 		\
93	for_each_sg_page(sgl, &(iter)->base, nents, pgoffset)
94
95#define	for_each_sg(sglist, sg, sgmax, iter)				\
96	for (iter = 0, sg = (sglist); iter < (sgmax); iter++, sg = sg_next(sg))
97
98typedef struct scatterlist *(sg_alloc_fn) (unsigned int, gfp_t);
99typedef void (sg_free_fn) (struct scatterlist *, unsigned int);
100
101static inline void
102sg_assign_page(struct scatterlist *sg, struct page *page)
103{
104	unsigned long page_link = sg->page_link & SG_PAGE_LINK_MASK;
105
106	sg->page_link = page_link | (unsigned long)page;
107}
108
109static inline void
110sg_set_page(struct scatterlist *sg, struct page *page, unsigned int len,
111    unsigned int offset)
112{
113	sg_assign_page(sg, page);
114	sg->offset = offset;
115	sg->length = len;
116}
117
118static inline struct page *
119sg_page(struct scatterlist *sg)
120{
121	return ((struct page *)((sg)->page_link & ~SG_PAGE_LINK_MASK));
122}
123
124static inline void
125sg_set_buf(struct scatterlist *sg, const void *buf, unsigned int buflen)
126{
127	sg_set_page(sg, virt_to_page(buf), buflen,
128	    ((uintptr_t)buf) & (PAGE_SIZE - 1));
129}
130
131static inline struct scatterlist *
132sg_next(struct scatterlist *sg)
133{
134	if (sg_is_last(sg))
135		return (NULL);
136	sg++;
137	if (sg_is_chain(sg))
138		sg = sg_chain_ptr(sg);
139	return (sg);
140}
141
142static inline vm_paddr_t
143sg_phys(struct scatterlist *sg)
144{
145	return (VM_PAGE_TO_PHYS(sg_page(sg)) + sg->offset);
146}
147
148static inline void *
149sg_virt(struct scatterlist *sg)
150{
151
152	return ((void *)((unsigned long)page_address(sg_page(sg)) + sg->offset));
153}
154
155static inline void
156sg_chain(struct scatterlist *prv, unsigned int prv_nents,
157    struct scatterlist *sgl)
158{
159	struct scatterlist *sg = &prv[prv_nents - 1];
160
161	sg->offset = 0;
162	sg->length = 0;
163	sg->page_link = ((unsigned long)sgl |
164	    SG_PAGE_LINK_CHAIN) & ~SG_PAGE_LINK_LAST;
165}
166
167static inline void
168sg_mark_end(struct scatterlist *sg)
169{
170	sg->page_link |= SG_PAGE_LINK_LAST;
171	sg->page_link &= ~SG_PAGE_LINK_CHAIN;
172}
173
174static inline void
175sg_init_table(struct scatterlist *sg, unsigned int nents)
176{
177	bzero(sg, sizeof(*sg) * nents);
178	sg_mark_end(&sg[nents - 1]);
179}
180
181static struct scatterlist *
182sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
183{
184	if (nents == SG_MAX_SINGLE_ALLOC) {
185		return ((void *)__get_free_page(gfp_mask));
186	} else
187		return (kmalloc(nents * sizeof(struct scatterlist), gfp_mask));
188}
189
190static inline void
191sg_kfree(struct scatterlist *sg, unsigned int nents)
192{
193	if (nents == SG_MAX_SINGLE_ALLOC) {
194		free_page((unsigned long)sg);
195	} else
196		kfree(sg);
197}
198
199static inline void
200__sg_free_table(struct sg_table *table, unsigned int max_ents,
201    bool skip_first_chunk, sg_free_fn * free_fn)
202{
203	struct scatterlist *sgl, *next;
204
205	if (unlikely(!table->sgl))
206		return;
207
208	sgl = table->sgl;
209	while (table->orig_nents) {
210		unsigned int alloc_size = table->orig_nents;
211		unsigned int sg_size;
212
213		if (alloc_size > max_ents) {
214			next = sg_chain_ptr(&sgl[max_ents - 1]);
215			alloc_size = max_ents;
216			sg_size = alloc_size - 1;
217		} else {
218			sg_size = alloc_size;
219			next = NULL;
220		}
221
222		table->orig_nents -= sg_size;
223		if (skip_first_chunk)
224			skip_first_chunk = 0;
225		else
226			free_fn(sgl, alloc_size);
227		sgl = next;
228	}
229
230	table->sgl = NULL;
231}
232
233static inline void
234sg_free_table(struct sg_table *table)
235{
236	__sg_free_table(table, SG_MAX_SINGLE_ALLOC, 0, sg_kfree);
237}
238
239static inline int
240__sg_alloc_table(struct sg_table *table, unsigned int nents,
241    unsigned int max_ents, struct scatterlist *first_chunk,
242    gfp_t gfp_mask, sg_alloc_fn *alloc_fn)
243{
244	struct scatterlist *sg, *prv;
245	unsigned int left;
246
247	memset(table, 0, sizeof(*table));
248
249	if (nents == 0)
250		return (-EINVAL);
251	left = nents;
252	prv = NULL;
253	do {
254		unsigned int sg_size;
255		unsigned int alloc_size = left;
256
257		if (alloc_size > max_ents) {
258			alloc_size = max_ents;
259			sg_size = alloc_size - 1;
260		} else
261			sg_size = alloc_size;
262
263		left -= sg_size;
264
265		if (first_chunk) {
266			sg = first_chunk;
267			first_chunk = NULL;
268		} else {
269			sg = alloc_fn(alloc_size, gfp_mask);
270		}
271		if (unlikely(!sg)) {
272			if (prv)
273				table->nents = ++table->orig_nents;
274
275			return (-ENOMEM);
276		}
277		sg_init_table(sg, alloc_size);
278		table->nents = table->orig_nents += sg_size;
279
280		if (prv)
281			sg_chain(prv, max_ents, sg);
282		else
283			table->sgl = sg;
284
285		if (!left)
286			sg_mark_end(&sg[sg_size - 1]);
287
288		prv = sg;
289	} while (left);
290
291	return (0);
292}
293
294static inline int
295sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
296{
297	int ret;
298
299	ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
300	    NULL, gfp_mask, sg_kmalloc);
301	if (unlikely(ret))
302		__sg_free_table(table, SG_MAX_SINGLE_ALLOC, 0, sg_kfree);
303
304	return (ret);
305}
306
307static inline int
308__sg_alloc_table_from_pages(struct sg_table *sgt,
309    struct page **pages, unsigned int count,
310    unsigned long off, unsigned long size,
311    unsigned int max_segment, gfp_t gfp_mask)
312{
313	unsigned int i, segs, cur, len;
314	int rc;
315	struct scatterlist *s;
316
317	if (__predict_false(!max_segment || offset_in_page(max_segment)))
318		return (-EINVAL);
319
320	len = 0;
321	for (segs = i = 1; i < count; ++i) {
322		len += PAGE_SIZE;
323		if (len >= max_segment ||
324		    page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) {
325			++segs;
326			len = 0;
327		}
328	}
329	if (__predict_false((rc = sg_alloc_table(sgt, segs, gfp_mask))))
330		return (rc);
331
332	cur = 0;
333	for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
334		unsigned long seg_size;
335		unsigned int j;
336
337		len = 0;
338		for (j = cur + 1; j < count; ++j) {
339			len += PAGE_SIZE;
340			if (len >= max_segment || page_to_pfn(pages[j]) !=
341			    page_to_pfn(pages[j - 1]) + 1)
342				break;
343		}
344
345		seg_size = ((j - cur) << PAGE_SHIFT) - off;
346		sg_set_page(s, pages[cur], MIN(size, seg_size), off);
347		size -= seg_size;
348		off = 0;
349		cur = j;
350	}
351	return (0);
352}
353
354static inline int
355sg_alloc_table_from_pages(struct sg_table *sgt,
356    struct page **pages, unsigned int count,
357    unsigned long off, unsigned long size,
358    gfp_t gfp_mask)
359{
360
361	return (__sg_alloc_table_from_pages(sgt, pages, count, off, size,
362	    SCATTERLIST_MAX_SEGMENT, gfp_mask));
363}
364
365static inline int
366sg_nents(struct scatterlist *sg)
367{
368	int nents;
369
370	for (nents = 0; sg; sg = sg_next(sg))
371		nents++;
372	return (nents);
373}
374
375static inline void
376__sg_page_iter_start(struct sg_page_iter *piter,
377    struct scatterlist *sglist, unsigned int nents,
378    unsigned long pgoffset)
379{
380	piter->internal.pg_advance = 0;
381	piter->internal.nents = nents;
382
383	piter->sg = sglist;
384	piter->sg_pgoffset = pgoffset;
385}
386
387static inline void
388_sg_iter_next(struct sg_page_iter *iter)
389{
390	struct scatterlist *sg;
391	unsigned int pgcount;
392
393	sg = iter->sg;
394	pgcount = (sg->offset + sg->length + PAGE_SIZE - 1) >> PAGE_SHIFT;
395
396	++iter->sg_pgoffset;
397	while (iter->sg_pgoffset >= pgcount) {
398		iter->sg_pgoffset -= pgcount;
399		sg = sg_next(sg);
400		--iter->maxents;
401		if (sg == NULL || iter->maxents == 0)
402			break;
403		pgcount = (sg->offset + sg->length + PAGE_SIZE - 1) >> PAGE_SHIFT;
404	}
405	iter->sg = sg;
406}
407
408static inline int
409sg_page_count(struct scatterlist *sg)
410{
411	return (PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT);
412}
413#define	sg_dma_page_count(sg) \
414	sg_page_count(sg)
415
416static inline bool
417__sg_page_iter_next(struct sg_page_iter *piter)
418{
419	unsigned int pgcount;
420
421	if (piter->internal.nents == 0)
422		return (0);
423	if (piter->sg == NULL)
424		return (0);
425
426	piter->sg_pgoffset += piter->internal.pg_advance;
427	piter->internal.pg_advance = 1;
428
429	while (1) {
430		pgcount = sg_page_count(piter->sg);
431		if (likely(piter->sg_pgoffset < pgcount))
432			break;
433		piter->sg_pgoffset -= pgcount;
434		piter->sg = sg_next(piter->sg);
435		if (--piter->internal.nents == 0)
436			return (0);
437		if (piter->sg == NULL)
438			return (0);
439	}
440	return (1);
441}
442#define	__sg_page_iter_dma_next(itr) \
443	__sg_page_iter_next(&(itr)->base)
444
445static inline void
446_sg_iter_init(struct scatterlist *sgl, struct sg_page_iter *iter,
447    unsigned int nents, unsigned long pgoffset)
448{
449	if (nents) {
450		iter->sg = sgl;
451		iter->sg_pgoffset = pgoffset - 1;
452		iter->maxents = nents;
453		_sg_iter_next(iter);
454	} else {
455		iter->sg = NULL;
456		iter->sg_pgoffset = 0;
457		iter->maxents = 0;
458	}
459}
460
461/*
462 * sg_page_iter_dma_address() is implemented as a macro because it
463 * needs to accept two different and identical structure types. This
464 * allows both old and new code to co-exist. The compile time assert
465 * adds some safety, that the structure sizes match.
466 */
467#define	sg_page_iter_dma_address(spi) ({		\
468	struct sg_page_iter *__spi = (void *)(spi);	\
469	dma_addr_t __dma_address;			\
470	CTASSERT(sizeof(*(spi)) == sizeof(*__spi));	\
471	__dma_address = __spi->sg->dma_address +	\
472	    (__spi->sg_pgoffset << PAGE_SHIFT);		\
473	__dma_address;					\
474})
475
476static inline struct page *
477sg_page_iter_page(struct sg_page_iter *piter)
478{
479	return (nth_page(sg_page(piter->sg), piter->sg_pgoffset));
480}
481
482
483#endif					/* _LINUX_SCATTERLIST_H_ */
484