scatterlist.h revision 273135
1/*-
2 * Copyright (c) 2010 Isilon Systems, Inc.
3 * Copyright (c) 2010 iX Systems, Inc.
4 * Copyright (c) 2010 Panasas, Inc.
5 * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice unmodified, this list of conditions, and the following
13 *    disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30#ifndef	_LINUX_SCATTERLIST_H_
31#define	_LINUX_SCATTERLIST_H_
32
33#include <linux/page.h>
34#include <linux/slab.h>
35
36/*
37 * SG table design.
38 *
39 * If flags bit 0 is set, then the sg field contains a pointer to the next sg
40 * table list. Otherwise the next entry is at sg + 1, can be determined using
41 * the sg_is_chain() function.
42 *
43 * If flags bit 1 is set, then this sg entry is the last element in a list,
44 * can be determined using the sg_is_last() function.
45 *
46 * See sg_next().
47 *
48 */
49
50struct scatterlist {
51	union {
52		struct page		*page;
53		struct scatterlist	*sg;
54	} sl_un;
55	dma_addr_t	address;
56	unsigned long	offset;
57	uint32_t	length;
58	uint32_t	flags;
59};
60
61struct sg_table {
62	struct scatterlist *sgl;        /* the list */
63	unsigned int nents;             /* number of mapped entries */
64	unsigned int orig_nents;        /* original size of list */
65};
66
67/*
68 * Maximum number of entries that will be allocated in one piece, if
69 * a list larger than this is required then chaining will be utilized.
70 */
71#define SG_MAX_SINGLE_ALLOC             (PAGE_SIZE / sizeof(struct scatterlist))
72
73#define	sg_dma_address(sg)	(sg)->address
74#define	sg_dma_len(sg)		(sg)->length
75#define	sg_page(sg)		(sg)->sl_un.page
76#define	sg_scatternext(sg)	(sg)->sl_un.sg
77
78#define	SG_END		0x01
79#define	SG_CHAIN	0x02
80
81static inline void
82sg_set_page(struct scatterlist *sg, struct page *page, unsigned int len,
83    unsigned int offset)
84{
85	sg_page(sg) = page;
86	sg_dma_len(sg) = len;
87	sg->offset = offset;
88	if (offset > PAGE_SIZE)
89		panic("sg_set_page: Invalid offset %d\n", offset);
90}
91
92static inline void
93sg_set_buf(struct scatterlist *sg, const void *buf, unsigned int buflen)
94{
95	sg_set_page(sg, virt_to_page(buf), buflen,
96	    ((uintptr_t)buf) & ~PAGE_MASK);
97}
98
99static inline void
100sg_init_table(struct scatterlist *sg, unsigned int nents)
101{
102	bzero(sg, sizeof(*sg) * nents);
103	sg[nents - 1].flags = SG_END;
104}
105
106static inline struct scatterlist *
107sg_next(struct scatterlist *sg)
108{
109	if (sg->flags & SG_END)
110		return (NULL);
111	sg++;
112	if (sg->flags & SG_CHAIN)
113		sg = sg_scatternext(sg);
114	return (sg);
115}
116
117static inline vm_paddr_t
118sg_phys(struct scatterlist *sg)
119{
120	return sg_page(sg)->phys_addr + sg->offset;
121}
122
123/**
124 * sg_chain - Chain two sglists together
125 * @prv:        First scatterlist
126 * @prv_nents:  Number of entries in prv
127 * @sgl:        Second scatterlist
128 *
129 * Description:
130 *   Links @prv@ and @sgl@ together, to form a longer scatterlist.
131 *
132 **/
133static inline void
134sg_chain(struct scatterlist *prv, unsigned int prv_nents,
135					struct scatterlist *sgl)
136{
137/*
138 * offset and length are unused for chain entry.  Clear them.
139 */
140	struct scatterlist *sg = &prv[prv_nents - 1];
141
142	sg->offset = 0;
143	sg->length = 0;
144
145	/*
146	 * Indicate a link pointer, and set the link to the second list.
147	 */
148	sg->flags = SG_CHAIN;
149	sg->sl_un.sg = sgl;
150}
151
152/**
153 * sg_mark_end - Mark the end of the scatterlist
154 * @sg:          SG entryScatterlist
155 *
156 * Description:
157 *   Marks the passed in sg entry as the termination point for the sg
158 *   table. A call to sg_next() on this entry will return NULL.
159 *
160 **/
161static inline void sg_mark_end(struct scatterlist *sg)
162{
163        sg->flags = SG_END;
164}
165
166/**
167 * __sg_free_table - Free a previously mapped sg table
168 * @table:      The sg table header to use
169 * @max_ents:   The maximum number of entries per single scatterlist
170 *
171 *  Description:
172 *    Free an sg table previously allocated and setup with
173 *    __sg_alloc_table().  The @max_ents value must be identical to
174 *    that previously used with __sg_alloc_table().
175 *
176 **/
177static inline void
178__sg_free_table(struct sg_table *table, unsigned int max_ents)
179{
180	struct scatterlist *sgl, *next;
181
182	if (unlikely(!table->sgl))
183		return;
184
185	sgl = table->sgl;
186	while (table->orig_nents) {
187		unsigned int alloc_size = table->orig_nents;
188		unsigned int sg_size;
189
190		/*
191		 * If we have more than max_ents segments left,
192		 * then assign 'next' to the sg table after the current one.
193		 * sg_size is then one less than alloc size, since the last
194		 * element is the chain pointer.
195		 */
196		if (alloc_size > max_ents) {
197			next = sgl[max_ents - 1].sl_un.sg;
198			alloc_size = max_ents;
199			sg_size = alloc_size - 1;
200		} else {
201			sg_size = alloc_size;
202			next = NULL;
203		}
204
205		table->orig_nents -= sg_size;
206		kfree(sgl);
207		sgl = next;
208	}
209
210	table->sgl = NULL;
211}
212
213/**
214 * sg_free_table - Free a previously allocated sg table
215 * @table:      The mapped sg table header
216 *
217 **/
218static inline void
219sg_free_table(struct sg_table *table)
220{
221	__sg_free_table(table, SG_MAX_SINGLE_ALLOC);
222}
223
224/**
225 * __sg_alloc_table - Allocate and initialize an sg table with given allocator
226 * @table:      The sg table header to use
227 * @nents:      Number of entries in sg list
228 * @max_ents:   The maximum number of entries the allocator returns per call
229 * @gfp_mask:   GFP allocation mask
230 *
231 * Description:
232 *   This function returns a @table @nents long. The allocator is
233 *   defined to return scatterlist chunks of maximum size @max_ents.
234 *   Thus if @nents is bigger than @max_ents, the scatterlists will be
235 *   chained in units of @max_ents.
236 *
237 * Notes:
238 *   If this function returns non-0 (eg failure), the caller must call
239 *   __sg_free_table() to cleanup any leftover allocations.
240 *
241 **/
242static inline int
243__sg_alloc_table(struct sg_table *table, unsigned int nents,
244		unsigned int max_ents, gfp_t gfp_mask)
245{
246	struct scatterlist *sg, *prv;
247	unsigned int left;
248
249	memset(table, 0, sizeof(*table));
250
251	if (nents == 0)
252		return -EINVAL;
253	left = nents;
254	prv = NULL;
255	do {
256		unsigned int sg_size, alloc_size = left;
257
258		if (alloc_size > max_ents) {
259			alloc_size = max_ents;
260			sg_size = alloc_size - 1;
261		} else
262			sg_size = alloc_size;
263
264		left -= sg_size;
265
266		sg = kmalloc(alloc_size * sizeof(struct scatterlist), gfp_mask);
267		if (unlikely(!sg)) {
268		/*
269		 * Adjust entry count to reflect that the last
270		 * entry of the previous table won't be used for
271		 * linkage.  Without this, sg_kfree() may get
272		 * confused.
273		 */
274			if (prv)
275				table->nents = ++table->orig_nents;
276
277			return -ENOMEM;
278		}
279
280		sg_init_table(sg, alloc_size);
281		table->nents = table->orig_nents += sg_size;
282
283		/*
284		 * If this is the first mapping, assign the sg table header.
285		 * If this is not the first mapping, chain previous part.
286		 */
287		if (prv)
288			sg_chain(prv, max_ents, sg);
289		else
290			table->sgl = sg;
291
292		/*
293		* If no more entries after this one, mark the end
294		*/
295		if (!left)
296			sg_mark_end(&sg[sg_size - 1]);
297
298		prv = sg;
299	} while (left);
300
301	return 0;
302}
303
304/**
305 * sg_alloc_table - Allocate and initialize an sg table
306 * @table:      The sg table header to use
307 * @nents:      Number of entries in sg list
308 * @gfp_mask:   GFP allocation mask
309 *
310 *  Description:
311 *    Allocate and initialize an sg table. If @nents@ is larger than
312 *    SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
313 *
314 **/
315
316static inline int
317sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
318{
319	int ret;
320
321	ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
322		gfp_mask);
323	if (unlikely(ret))
324		__sg_free_table(table, SG_MAX_SINGLE_ALLOC);
325
326	return ret;
327}
328
329#define	for_each_sg(sglist, sg, sgmax, _itr)				\
330	for (_itr = 0, sg = (sglist); _itr < (sgmax); _itr++, sg = sg_next(sg))
331
332#endif	/* _LINUX_SCATTERLIST_H_ */
333