scatterlist.h revision 289624
1/* $FreeBSD: head/sys/ofed/include/linux/scatterlist.h 289624 2015-10-20 11:40:04Z hselasky $ */
2/*-
3 * Copyright (c) 2010 Isilon Systems, Inc.
4 * Copyright (c) 2010 iX Systems, Inc.
5 * Copyright (c) 2010 Panasas, Inc.
6 * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
7 * Copyright (c) 2015 Matthew Dillon <dillon@backplane.com>
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice unmodified, this list of conditions, and the following
15 *    disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32#ifndef	_LINUX_SCATTERLIST_H_
33#define	_LINUX_SCATTERLIST_H_
34
35#include <linux/page.h>
36#include <linux/slab.h>
37
38/*
39 * SG table design.
40 *
41 * If flags bit 0 is set, then the sg field contains a pointer to the next sg
42 * table list. Otherwise the next entry is at sg + 1, can be determined using
43 * the sg_is_chain() function.
44 *
45 * If flags bit 1 is set, then this sg entry is the last element in a list,
46 * can be determined using the sg_is_last() function.
47 *
48 * See sg_next().
49 *
50 */
51
52struct scatterlist {
53	union {
54		struct page		*page;
55		struct scatterlist	*sg;
56	} sl_un;
57	dma_addr_t	address;
58	unsigned long	offset;
59	uint32_t	length;
60	uint32_t	flags;
61};
62
63struct sg_table {
64	struct scatterlist *sgl;        /* the list */
65	unsigned int nents;             /* number of mapped entries */
66	unsigned int orig_nents;        /* original size of list */
67};
68
69struct sg_page_iter {
70	struct scatterlist	*sg;
71	unsigned int		sg_pgoffset;	/* page index */
72	unsigned int		maxents;
73};
74
75/*
76 * Maximum number of entries that will be allocated in one piece, if
77 * a list larger than this is required then chaining will be utilized.
78 */
79#define SG_MAX_SINGLE_ALLOC             (PAGE_SIZE / sizeof(struct scatterlist))
80
81#define	sg_dma_address(sg)	(sg)->address
82#define	sg_dma_len(sg)		(sg)->length
83#define	sg_page(sg)		(sg)->sl_un.page
84#define	sg_scatternext(sg)	(sg)->sl_un.sg
85
86#define	SG_END		0x01
87#define	SG_CHAIN	0x02
88
89static inline void
90sg_set_page(struct scatterlist *sg, struct page *page, unsigned int len,
91    unsigned int offset)
92{
93	sg_page(sg) = page;
94	sg_dma_len(sg) = len;
95	sg->offset = offset;
96	if (offset > PAGE_SIZE)
97		panic("sg_set_page: Invalid offset %d\n", offset);
98}
99
100static inline void
101sg_set_buf(struct scatterlist *sg, const void *buf, unsigned int buflen)
102{
103	sg_set_page(sg, virt_to_page(buf), buflen,
104	    ((uintptr_t)buf) & (PAGE_SIZE - 1));
105}
106
107static inline void
108sg_init_table(struct scatterlist *sg, unsigned int nents)
109{
110	bzero(sg, sizeof(*sg) * nents);
111	sg[nents - 1].flags = SG_END;
112}
113
114static inline struct scatterlist *
115sg_next(struct scatterlist *sg)
116{
117	if (sg->flags & SG_END)
118		return (NULL);
119	sg++;
120	if (sg->flags & SG_CHAIN)
121		sg = sg_scatternext(sg);
122	return (sg);
123}
124
125static inline vm_paddr_t
126sg_phys(struct scatterlist *sg)
127{
128	return sg_page(sg)->phys_addr + sg->offset;
129}
130
131/**
132 * sg_chain - Chain two sglists together
133 * @prv:        First scatterlist
134 * @prv_nents:  Number of entries in prv
135 * @sgl:        Second scatterlist
136 *
137 * Description:
138 *   Links @prv@ and @sgl@ together, to form a longer scatterlist.
139 *
140 **/
141static inline void
142sg_chain(struct scatterlist *prv, unsigned int prv_nents,
143					struct scatterlist *sgl)
144{
145/*
146 * offset and length are unused for chain entry.  Clear them.
147 */
148	struct scatterlist *sg = &prv[prv_nents - 1];
149
150	sg->offset = 0;
151	sg->length = 0;
152
153	/*
154	 * Indicate a link pointer, and set the link to the second list.
155	 */
156	sg->flags = SG_CHAIN;
157	sg->sl_un.sg = sgl;
158}
159
160/**
161 * sg_mark_end - Mark the end of the scatterlist
162 * @sg:          SG entryScatterlist
163 *
164 * Description:
165 *   Marks the passed in sg entry as the termination point for the sg
166 *   table. A call to sg_next() on this entry will return NULL.
167 *
168 **/
169static inline void sg_mark_end(struct scatterlist *sg)
170{
171        sg->flags = SG_END;
172}
173
174/**
175 * __sg_free_table - Free a previously mapped sg table
176 * @table:      The sg table header to use
177 * @max_ents:   The maximum number of entries per single scatterlist
178 *
179 *  Description:
180 *    Free an sg table previously allocated and setup with
181 *    __sg_alloc_table().  The @max_ents value must be identical to
182 *    that previously used with __sg_alloc_table().
183 *
184 **/
185static inline void
186__sg_free_table(struct sg_table *table, unsigned int max_ents)
187{
188	struct scatterlist *sgl, *next;
189
190	if (unlikely(!table->sgl))
191		return;
192
193	sgl = table->sgl;
194	while (table->orig_nents) {
195		unsigned int alloc_size = table->orig_nents;
196		unsigned int sg_size;
197
198		/*
199		 * If we have more than max_ents segments left,
200		 * then assign 'next' to the sg table after the current one.
201		 * sg_size is then one less than alloc size, since the last
202		 * element is the chain pointer.
203		 */
204		if (alloc_size > max_ents) {
205			next = sgl[max_ents - 1].sl_un.sg;
206			alloc_size = max_ents;
207			sg_size = alloc_size - 1;
208		} else {
209			sg_size = alloc_size;
210			next = NULL;
211		}
212
213		table->orig_nents -= sg_size;
214		kfree(sgl);
215		sgl = next;
216	}
217
218	table->sgl = NULL;
219}
220
221/**
222 * sg_free_table - Free a previously allocated sg table
223 * @table:      The mapped sg table header
224 *
225 **/
226static inline void
227sg_free_table(struct sg_table *table)
228{
229	__sg_free_table(table, SG_MAX_SINGLE_ALLOC);
230}
231
232/**
233 * __sg_alloc_table - Allocate and initialize an sg table with given allocator
234 * @table:      The sg table header to use
235 * @nents:      Number of entries in sg list
236 * @max_ents:   The maximum number of entries the allocator returns per call
237 * @gfp_mask:   GFP allocation mask
238 *
239 * Description:
240 *   This function returns a @table @nents long. The allocator is
241 *   defined to return scatterlist chunks of maximum size @max_ents.
242 *   Thus if @nents is bigger than @max_ents, the scatterlists will be
243 *   chained in units of @max_ents.
244 *
245 * Notes:
246 *   If this function returns non-0 (eg failure), the caller must call
247 *   __sg_free_table() to cleanup any leftover allocations.
248 *
249 **/
250static inline int
251__sg_alloc_table(struct sg_table *table, unsigned int nents,
252		unsigned int max_ents, gfp_t gfp_mask)
253{
254	struct scatterlist *sg, *prv;
255	unsigned int left;
256
257	memset(table, 0, sizeof(*table));
258
259	if (nents == 0)
260		return -EINVAL;
261	left = nents;
262	prv = NULL;
263	do {
264		unsigned int sg_size, alloc_size = left;
265
266		if (alloc_size > max_ents) {
267			alloc_size = max_ents;
268			sg_size = alloc_size - 1;
269		} else
270			sg_size = alloc_size;
271
272		left -= sg_size;
273
274		sg = kmalloc(alloc_size * sizeof(struct scatterlist), gfp_mask);
275		if (unlikely(!sg)) {
276		/*
277		 * Adjust entry count to reflect that the last
278		 * entry of the previous table won't be used for
279		 * linkage.  Without this, sg_kfree() may get
280		 * confused.
281		 */
282			if (prv)
283				table->nents = ++table->orig_nents;
284
285			return -ENOMEM;
286		}
287
288		sg_init_table(sg, alloc_size);
289		table->nents = table->orig_nents += sg_size;
290
291		/*
292		 * If this is the first mapping, assign the sg table header.
293		 * If this is not the first mapping, chain previous part.
294		 */
295		if (prv)
296			sg_chain(prv, max_ents, sg);
297		else
298			table->sgl = sg;
299
300		/*
301		* If no more entries after this one, mark the end
302		*/
303		if (!left)
304			sg_mark_end(&sg[sg_size - 1]);
305
306		prv = sg;
307	} while (left);
308
309	return 0;
310}
311
312/**
313 * sg_alloc_table - Allocate and initialize an sg table
314 * @table:      The sg table header to use
315 * @nents:      Number of entries in sg list
316 * @gfp_mask:   GFP allocation mask
317 *
318 *  Description:
319 *    Allocate and initialize an sg table. If @nents@ is larger than
320 *    SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
321 *
322 **/
323
324static inline int
325sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
326{
327	int ret;
328
329	ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
330		gfp_mask);
331	if (unlikely(ret))
332		__sg_free_table(table, SG_MAX_SINGLE_ALLOC);
333
334	return ret;
335}
336
337/*
338 * Iterate pages in sg list.
339 */
340static inline void
341_sg_iter_next(struct sg_page_iter *iter)
342{
343	struct scatterlist *sg;
344	unsigned int pgcount;
345
346	sg = iter->sg;
347	pgcount = (sg->offset + sg->length + PAGE_SIZE - 1) >> PAGE_SHIFT;
348
349	++iter->sg_pgoffset;
350	while (iter->sg_pgoffset >= pgcount) {
351		iter->sg_pgoffset -= pgcount;
352		sg = sg_next(sg);
353		--iter->maxents;
354		if (sg == NULL || iter->maxents == 0)
355			break;
356		pgcount = (sg->offset + sg->length + PAGE_SIZE - 1) >> PAGE_SHIFT;
357	}
358	iter->sg = sg;
359}
360
361/*
362 * NOTE: pgoffset is really a page index, not a byte offset.
363 */
364static inline void
365_sg_iter_init(struct scatterlist *sgl, struct sg_page_iter *iter,
366	      unsigned int nents, unsigned long pgoffset)
367{
368	if (nents) {
369		/*
370		 * Nominal case.  Note subtract 1 from starting page index
371		 * for initial _sg_iter_next() call.
372		 */
373		iter->sg = sgl;
374		iter->sg_pgoffset = pgoffset - 1;
375		iter->maxents = nents;
376		_sg_iter_next(iter);
377	} else {
378		/*
379		 * Degenerate case
380		 */
381		iter->sg = NULL;
382		iter->sg_pgoffset = 0;
383		iter->maxents = 0;
384	}
385}
386
387static inline dma_addr_t
388sg_page_iter_dma_address(struct sg_page_iter *spi)
389{
390	return spi->sg->address + (spi->sg_pgoffset << PAGE_SHIFT);
391}
392
393#define	for_each_sg_page(sgl, iter, nents, pgoffset)			\
394	for (_sg_iter_init(sgl, iter, nents, pgoffset);			\
395	     (iter)->sg; _sg_iter_next(iter))
396
397#define	for_each_sg(sglist, sg, sgmax, _itr)				\
398	for (_itr = 0, sg = (sglist); _itr < (sgmax); _itr++, sg = sg_next(sg))
399
400#endif	/* _LINUX_SCATTERLIST_H_ */
401