scatterlist.h revision 289567
1/*-
2 * Copyright (c) 2010 Isilon Systems, Inc.
3 * Copyright (c) 2010 iX Systems, Inc.
4 * Copyright (c) 2010 Panasas, Inc.
5 * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
6 * Copyright (c) 2015 Matthew Dillon <dillon@backplane.com>
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice unmodified, this list of conditions, and the following
14 *    disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#ifndef	_LINUX_SCATTERLIST_H_
32#define	_LINUX_SCATTERLIST_H_
33
34#include <linux/page.h>
35#include <linux/slab.h>
36
37/*
38 * SG table design.
39 *
40 * If flags bit 0 is set, then the sg field contains a pointer to the next sg
41 * table list. Otherwise the next entry is at sg + 1, can be determined using
42 * the sg_is_chain() function.
43 *
44 * If flags bit 1 is set, then this sg entry is the last element in a list,
45 * can be determined using the sg_is_last() function.
46 *
47 * See sg_next().
48 *
49 */
50
51struct scatterlist {
52	union {
53		struct page		*page;
54		struct scatterlist	*sg;
55	} sl_un;
56	dma_addr_t	address;
57	unsigned long	offset;
58	uint32_t	length;
59	uint32_t	flags;
60};
61
62struct sg_table {
63	struct scatterlist *sgl;        /* the list */
64	unsigned int nents;             /* number of mapped entries */
65	unsigned int orig_nents;        /* original size of list */
66};
67
68struct sg_page_iter {
69	struct scatterlist	*sg;
70	unsigned int		sg_pgoffset;	/* page index */
71	unsigned int		maxents;
72};
73
74/*
75 * Maximum number of entries that will be allocated in one piece, if
76 * a list larger than this is required then chaining will be utilized.
77 */
78#define SG_MAX_SINGLE_ALLOC             (PAGE_SIZE / sizeof(struct scatterlist))
79
80#define	sg_dma_address(sg)	(sg)->address
81#define	sg_dma_len(sg)		(sg)->length
82#define	sg_page(sg)		(sg)->sl_un.page
83#define	sg_scatternext(sg)	(sg)->sl_un.sg
84
85#define	SG_END		0x01
86#define	SG_CHAIN	0x02
87
88static inline void
89sg_set_page(struct scatterlist *sg, struct page *page, unsigned int len,
90    unsigned int offset)
91{
92	sg_page(sg) = page;
93	sg_dma_len(sg) = len;
94	sg->offset = offset;
95	if (offset > PAGE_SIZE)
96		panic("sg_set_page: Invalid offset %d\n", offset);
97}
98
99static inline void
100sg_set_buf(struct scatterlist *sg, const void *buf, unsigned int buflen)
101{
102	sg_set_page(sg, virt_to_page(buf), buflen,
103	    ((uintptr_t)buf) & (PAGE_SIZE - 1));
104}
105
106static inline void
107sg_init_table(struct scatterlist *sg, unsigned int nents)
108{
109	bzero(sg, sizeof(*sg) * nents);
110	sg[nents - 1].flags = SG_END;
111}
112
113static inline struct scatterlist *
114sg_next(struct scatterlist *sg)
115{
116	if (sg->flags & SG_END)
117		return (NULL);
118	sg++;
119	if (sg->flags & SG_CHAIN)
120		sg = sg_scatternext(sg);
121	return (sg);
122}
123
124static inline vm_paddr_t
125sg_phys(struct scatterlist *sg)
126{
127	return sg_page(sg)->phys_addr + sg->offset;
128}
129
130/**
131 * sg_chain - Chain two sglists together
132 * @prv:        First scatterlist
133 * @prv_nents:  Number of entries in prv
134 * @sgl:        Second scatterlist
135 *
136 * Description:
137 *   Links @prv@ and @sgl@ together, to form a longer scatterlist.
138 *
139 **/
140static inline void
141sg_chain(struct scatterlist *prv, unsigned int prv_nents,
142					struct scatterlist *sgl)
143{
144/*
145 * offset and length are unused for chain entry.  Clear them.
146 */
147	struct scatterlist *sg = &prv[prv_nents - 1];
148
149	sg->offset = 0;
150	sg->length = 0;
151
152	/*
153	 * Indicate a link pointer, and set the link to the second list.
154	 */
155	sg->flags = SG_CHAIN;
156	sg->sl_un.sg = sgl;
157}
158
159/**
160 * sg_mark_end - Mark the end of the scatterlist
161 * @sg:          SG entryScatterlist
162 *
163 * Description:
164 *   Marks the passed in sg entry as the termination point for the sg
165 *   table. A call to sg_next() on this entry will return NULL.
166 *
167 **/
168static inline void sg_mark_end(struct scatterlist *sg)
169{
170        sg->flags = SG_END;
171}
172
173/**
174 * __sg_free_table - Free a previously mapped sg table
175 * @table:      The sg table header to use
176 * @max_ents:   The maximum number of entries per single scatterlist
177 *
178 *  Description:
179 *    Free an sg table previously allocated and setup with
180 *    __sg_alloc_table().  The @max_ents value must be identical to
181 *    that previously used with __sg_alloc_table().
182 *
183 **/
184static inline void
185__sg_free_table(struct sg_table *table, unsigned int max_ents)
186{
187	struct scatterlist *sgl, *next;
188
189	if (unlikely(!table->sgl))
190		return;
191
192	sgl = table->sgl;
193	while (table->orig_nents) {
194		unsigned int alloc_size = table->orig_nents;
195		unsigned int sg_size;
196
197		/*
198		 * If we have more than max_ents segments left,
199		 * then assign 'next' to the sg table after the current one.
200		 * sg_size is then one less than alloc size, since the last
201		 * element is the chain pointer.
202		 */
203		if (alloc_size > max_ents) {
204			next = sgl[max_ents - 1].sl_un.sg;
205			alloc_size = max_ents;
206			sg_size = alloc_size - 1;
207		} else {
208			sg_size = alloc_size;
209			next = NULL;
210		}
211
212		table->orig_nents -= sg_size;
213		kfree(sgl);
214		sgl = next;
215	}
216
217	table->sgl = NULL;
218}
219
220/**
221 * sg_free_table - Free a previously allocated sg table
222 * @table:      The mapped sg table header
223 *
224 **/
225static inline void
226sg_free_table(struct sg_table *table)
227{
228	__sg_free_table(table, SG_MAX_SINGLE_ALLOC);
229}
230
231/**
232 * __sg_alloc_table - Allocate and initialize an sg table with given allocator
233 * @table:      The sg table header to use
234 * @nents:      Number of entries in sg list
235 * @max_ents:   The maximum number of entries the allocator returns per call
236 * @gfp_mask:   GFP allocation mask
237 *
238 * Description:
239 *   This function returns a @table @nents long. The allocator is
240 *   defined to return scatterlist chunks of maximum size @max_ents.
241 *   Thus if @nents is bigger than @max_ents, the scatterlists will be
242 *   chained in units of @max_ents.
243 *
244 * Notes:
245 *   If this function returns non-0 (eg failure), the caller must call
246 *   __sg_free_table() to cleanup any leftover allocations.
247 *
248 **/
249static inline int
250__sg_alloc_table(struct sg_table *table, unsigned int nents,
251		unsigned int max_ents, gfp_t gfp_mask)
252{
253	struct scatterlist *sg, *prv;
254	unsigned int left;
255
256	memset(table, 0, sizeof(*table));
257
258	if (nents == 0)
259		return -EINVAL;
260	left = nents;
261	prv = NULL;
262	do {
263		unsigned int sg_size, alloc_size = left;
264
265		if (alloc_size > max_ents) {
266			alloc_size = max_ents;
267			sg_size = alloc_size - 1;
268		} else
269			sg_size = alloc_size;
270
271		left -= sg_size;
272
273		sg = kmalloc(alloc_size * sizeof(struct scatterlist), gfp_mask);
274		if (unlikely(!sg)) {
275		/*
276		 * Adjust entry count to reflect that the last
277		 * entry of the previous table won't be used for
278		 * linkage.  Without this, sg_kfree() may get
279		 * confused.
280		 */
281			if (prv)
282				table->nents = ++table->orig_nents;
283
284			return -ENOMEM;
285		}
286
287		sg_init_table(sg, alloc_size);
288		table->nents = table->orig_nents += sg_size;
289
290		/*
291		 * If this is the first mapping, assign the sg table header.
292		 * If this is not the first mapping, chain previous part.
293		 */
294		if (prv)
295			sg_chain(prv, max_ents, sg);
296		else
297			table->sgl = sg;
298
299		/*
300		* If no more entries after this one, mark the end
301		*/
302		if (!left)
303			sg_mark_end(&sg[sg_size - 1]);
304
305		prv = sg;
306	} while (left);
307
308	return 0;
309}
310
311/**
312 * sg_alloc_table - Allocate and initialize an sg table
313 * @table:      The sg table header to use
314 * @nents:      Number of entries in sg list
315 * @gfp_mask:   GFP allocation mask
316 *
317 *  Description:
318 *    Allocate and initialize an sg table. If @nents@ is larger than
319 *    SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
320 *
321 **/
322
323static inline int
324sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
325{
326	int ret;
327
328	ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
329		gfp_mask);
330	if (unlikely(ret))
331		__sg_free_table(table, SG_MAX_SINGLE_ALLOC);
332
333	return ret;
334}
335
336/*
337 * Iterate pages in sg list.
338 */
339static inline void
340_sg_iter_next(struct sg_page_iter *iter)
341{
342	struct scatterlist *sg;
343	unsigned int pgcount;
344
345	sg = iter->sg;
346	pgcount = (sg->offset + sg->length + PAGE_SIZE - 1) >> PAGE_SHIFT;
347
348	++iter->sg_pgoffset;
349	while (iter->sg_pgoffset >= pgcount) {
350		iter->sg_pgoffset -= pgcount;
351		sg = sg_next(sg);
352		--iter->maxents;
353		if (sg == NULL || iter->maxents == 0)
354			break;
355		pgcount = (sg->offset + sg->length + PAGE_SIZE - 1) >> PAGE_SHIFT;
356	}
357	iter->sg = sg;
358}
359
360/*
361 * NOTE: pgoffset is really a page index, not a byte offset.
362 */
363static inline void
364_sg_iter_init(struct scatterlist *sgl, struct sg_page_iter *iter,
365	      unsigned int nents, unsigned long pgoffset)
366{
367	if (nents) {
368		/*
369		 * Nominal case.  Note subtract 1 from starting page index
370		 * for initial _sg_iter_next() call.
371		 */
372		iter->sg = sgl;
373		iter->sg_pgoffset = pgoffset - 1;
374		iter->maxents = nents;
375		_sg_iter_next(iter);
376	} else {
377		/*
378		 * Degenerate case
379		 */
380		iter->sg = NULL;
381		iter->sg_pgoffset = 0;
382		iter->maxents = 0;
383	}
384}
385
386static inline dma_addr_t
387sg_page_iter_dma_address(struct sg_page_iter *spi)
388{
389	return spi->sg->address + (spi->sg_pgoffset << PAGE_SHIFT);
390}
391
392#define	for_each_sg_page(sgl, iter, nents, pgoffset)			\
393	for (_sg_iter_init(sgl, iter, nents, pgoffset);			\
394	     (iter)->sg; _sg_iter_next(iter))
395
396#define	for_each_sg(sglist, sg, sgmax, _itr)				\
397	for (_itr = 0, sg = (sglist); _itr < (sgmax); _itr++, sg = sg_next(sg))
398
399#endif	/* _LINUX_SCATTERLIST_H_ */
400