1/*
2 * This file and its contents are supplied under the terms of the
3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 * You may only use this file in accordance with the terms of version
5 * 1.0 of the CDDL.
6 *
7 * A full copy of the text of the CDDL should have accompanied this
8 * source.  A copy of the CDDL is also available via the Internet at
9 * http://www.illumos.org/license/CDDL.
10 */
11
12/*
13 * Copyright (c) 2014 by Chunwei Chen. All rights reserved.
14 * Copyright (c) 2016 by Delphix. All rights reserved.
15 */
16
17/*
18 * ARC buffer data (ABD).
19 *
20 * ABDs are an abstract data structure for the ARC which can use two
21 * different ways of storing the underlying data:
22 *
23 * (a) Linear buffer. In this case, all the data in the ABD is stored in one
24 *     contiguous buffer in memory (from a zio_[data_]buf_* kmem cache).
25 *
26 *         +-------------------+
27 *         | ABD (linear)      |
28 *         |   abd_flags = ... |
29 *         |   abd_size = ...  |     +--------------------------------+
30 *         |   abd_buf ------------->| raw buffer of size abd_size    |
31 *         +-------------------+     +--------------------------------+
32 *              no abd_chunks
33 *
34 * (b) Scattered buffer. In this case, the data in the ABD is split into
35 *     equal-sized chunks (from the abd_chunk_cache kmem_cache), with pointers
36 *     to the chunks recorded in an array at the end of the ABD structure.
37 *
38 *         +-------------------+
39 *         | ABD (scattered)   |
40 *         |   abd_flags = ... |
41 *         |   abd_size = ...  |
42 *         |   abd_offset = 0  |                           +-----------+
43 *         |   abd_chunks[0] ----------------------------->| chunk 0   |
44 *         |   abd_chunks[1] ---------------------+        +-----------+
45 *         |   ...             |                  |        +-----------+
46 *         |   abd_chunks[N-1] ---------+         +------->| chunk 1   |
47 *         +-------------------+        |                  +-----------+
48 *                                      |                      ...
49 *                                      |                  +-----------+
50 *                                      +----------------->| chunk N-1 |
51 *                                                         +-----------+
52 *
53 * Using a large proportion of scattered ABDs decreases ARC fragmentation since
54 * when we are at the limit of allocatable space, using equal-size chunks will
55 * allow us to quickly reclaim enough space for a new large allocation (assuming
56 * it is also scattered).
57 *
58 * In addition to directly allocating a linear or scattered ABD, it is also
59 * possible to create an ABD by requesting the "sub-ABD" starting at an offset
60 * within an existing ABD. In linear buffers this is simple (set abd_buf of
61 * the new ABD to the starting point within the original raw buffer), but
62 * scattered ABDs are a little more complex. The new ABD makes a copy of the
63 * relevant abd_chunks pointers (but not the underlying data). However, to
64 * provide arbitrary rather than only chunk-aligned starting offsets, it also
65 * tracks an abd_offset field which represents the starting point of the data
66 * within the first chunk in abd_chunks. For both linear and scattered ABDs,
67 * creating an offset ABD marks the original ABD as the offset's parent, and the
68 * original ABD's abd_children refcount is incremented. This data allows us to
69 * ensure the root ABD isn't deleted before its children.
70 *
71 * Most consumers should never need to know what type of ABD they're using --
72 * the ABD public API ensures that it's possible to transparently switch from
73 * using a linear ABD to a scattered one when doing so would be beneficial.
74 *
75 * If you need to use the data within an ABD directly, if you know it's linear
76 * (because you allocated it) you can use abd_to_buf() to access the underlying
77 * raw buffer. Otherwise, you should use one of the abd_borrow_buf* functions
78 * which will allocate a raw buffer if necessary. Use the abd_return_buf*
79 * functions to return any raw buffers that are no longer necessary when you're
80 * done using them.
81 *
82 * There are a variety of ABD APIs that implement basic buffer operations:
83 * compare, copy, read, write, and fill with zeroes. If you need a custom
84 * function which progressively accesses the whole ABD, use the abd_iterate_*
85 * functions.
86 */
87
88#include <sys/abd.h>
89#include <sys/param.h>
90#include <sys/zio.h>
91#include <sys/zfs_context.h>
92#include <sys/zfs_znode.h>
93
94typedef struct abd_stats {
95	kstat_named_t abdstat_struct_size;
96	kstat_named_t abdstat_scatter_cnt;
97	kstat_named_t abdstat_scatter_data_size;
98	kstat_named_t abdstat_scatter_chunk_waste;
99	kstat_named_t abdstat_linear_cnt;
100	kstat_named_t abdstat_linear_data_size;
101} abd_stats_t;
102
103static abd_stats_t abd_stats = {
104	/* Amount of memory occupied by all of the abd_t struct allocations */
105	{ "struct_size",			KSTAT_DATA_UINT64 },
106	/*
107	 * The number of scatter ABDs which are currently allocated, excluding
108	 * ABDs which don't own their data (for instance the ones which were
109	 * allocated through abd_get_offset()).
110	 */
111	{ "scatter_cnt",			KSTAT_DATA_UINT64 },
112	/* Amount of data stored in all scatter ABDs tracked by scatter_cnt */
113	{ "scatter_data_size",			KSTAT_DATA_UINT64 },
114	/*
115	 * The amount of space wasted at the end of the last chunk across all
116	 * scatter ABDs tracked by scatter_cnt.
117	 */
118	{ "scatter_chunk_waste",		KSTAT_DATA_UINT64 },
119	/*
120	 * The number of linear ABDs which are currently allocated, excluding
121	 * ABDs which don't own their data (for instance the ones which were
122	 * allocated through abd_get_offset() and abd_get_from_buf()). If an
123	 * ABD takes ownership of its buf then it will become tracked.
124	 */
125	{ "linear_cnt",				KSTAT_DATA_UINT64 },
126	/* Amount of data stored in all linear ABDs tracked by linear_cnt */
127	{ "linear_data_size",			KSTAT_DATA_UINT64 },
128};
129
130#define	ABDSTAT(stat)		(abd_stats.stat.value.ui64)
131#define	ABDSTAT_INCR(stat, val) \
132	atomic_add_64(&abd_stats.stat.value.ui64, (val))
133#define	ABDSTAT_BUMP(stat)	ABDSTAT_INCR(stat, 1)
134#define	ABDSTAT_BUMPDOWN(stat)	ABDSTAT_INCR(stat, -1)
135
136/*
137 * It is possible to make all future ABDs be linear by setting this to B_FALSE.
138 * Otherwise, ABDs are allocated scattered by default unless the caller uses
139 * abd_alloc_linear().
140 */
141boolean_t zfs_abd_scatter_enabled = B_TRUE;
142
143/*
144 * The size of the chunks ABD allocates. Because the sizes allocated from the
145 * kmem_cache can't change, this tunable can only be modified at boot. Changing
146 * it at runtime would cause ABD iteration to work incorrectly for ABDs which
147 * were allocated with the old size, so a safeguard has been put in place which
148 * will cause the machine to panic if you change it and try to access the data
149 * within a scattered ABD.
150 */
151size_t zfs_abd_chunk_size = 4096;
152
153#if defined(__FreeBSD__) && defined(_KERNEL)
154SYSCTL_DECL(_vfs_zfs);
155
156SYSCTL_INT(_vfs_zfs, OID_AUTO, abd_scatter_enabled, CTLFLAG_RWTUN,
157    &zfs_abd_scatter_enabled, 0, "Enable scattered ARC data buffers");
158SYSCTL_ULONG(_vfs_zfs, OID_AUTO, abd_chunk_size, CTLFLAG_RDTUN,
159    &zfs_abd_chunk_size, 0, "The size of the chunks ABD allocates");
160#endif
161
162#ifdef _KERNEL
163extern vmem_t *zio_alloc_arena;
164#endif
165
166kmem_cache_t *abd_chunk_cache;
167static kstat_t *abd_ksp;
168
169extern inline boolean_t abd_is_linear(abd_t *abd);
170extern inline void abd_copy(abd_t *dabd, abd_t *sabd, size_t size);
171extern inline void abd_copy_from_buf(abd_t *abd, const void *buf, size_t size);
172extern inline void abd_copy_to_buf(void* buf, abd_t *abd, size_t size);
173extern inline int abd_cmp_buf(abd_t *abd, const void *buf, size_t size);
174extern inline void abd_zero(abd_t *abd, size_t size);
175
176static void *
177abd_alloc_chunk()
178{
179	void *c = kmem_cache_alloc(abd_chunk_cache, KM_PUSHPAGE);
180	ASSERT3P(c, !=, NULL);
181	return (c);
182}
183
184static void
185abd_free_chunk(void *c)
186{
187	kmem_cache_free(abd_chunk_cache, c);
188}
189
190void
191abd_init(void)
192{
193#ifdef illumos
194	vmem_t *data_alloc_arena = NULL;
195
196#ifdef _KERNEL
197	data_alloc_arena = zio_alloc_arena;
198#endif
199
200	/*
201	 * Since ABD chunks do not appear in crash dumps, we pass KMC_NOTOUCH
202	 * so that no allocator metadata is stored with the buffers.
203	 */
204	abd_chunk_cache = kmem_cache_create("abd_chunk", zfs_abd_chunk_size, 0,
205	    NULL, NULL, NULL, NULL, data_alloc_arena, KMC_NOTOUCH);
206#else
207	abd_chunk_cache = kmem_cache_create("abd_chunk", zfs_abd_chunk_size, 0,
208	    NULL, NULL, NULL, NULL, 0, KMC_NOTOUCH | KMC_NODEBUG);
209#endif
210	abd_ksp = kstat_create("zfs", 0, "abdstats", "misc", KSTAT_TYPE_NAMED,
211	    sizeof (abd_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
212	if (abd_ksp != NULL) {
213		abd_ksp->ks_data = &abd_stats;
214		kstat_install(abd_ksp);
215	}
216}
217
218void
219abd_fini(void)
220{
221	if (abd_ksp != NULL) {
222		kstat_delete(abd_ksp);
223		abd_ksp = NULL;
224	}
225
226	kmem_cache_destroy(abd_chunk_cache);
227	abd_chunk_cache = NULL;
228}
229
230static inline size_t
231abd_chunkcnt_for_bytes(size_t size)
232{
233	return (P2ROUNDUP(size, zfs_abd_chunk_size) / zfs_abd_chunk_size);
234}
235
236static inline size_t
237abd_scatter_chunkcnt(abd_t *abd)
238{
239	ASSERT(!abd_is_linear(abd));
240	return (abd_chunkcnt_for_bytes(
241	    abd->abd_u.abd_scatter.abd_offset + abd->abd_size));
242}
243
244static inline void
245abd_verify(abd_t *abd)
246{
247	ASSERT3U(abd->abd_size, >, 0);
248	ASSERT3U(abd->abd_size, <=, SPA_MAXBLOCKSIZE);
249	ASSERT3U(abd->abd_flags, ==, abd->abd_flags & (ABD_FLAG_LINEAR |
250	    ABD_FLAG_OWNER | ABD_FLAG_META));
251	IMPLY(abd->abd_parent != NULL, !(abd->abd_flags & ABD_FLAG_OWNER));
252	IMPLY(abd->abd_flags & ABD_FLAG_META, abd->abd_flags & ABD_FLAG_OWNER);
253	if (abd_is_linear(abd)) {
254		ASSERT3P(abd->abd_u.abd_linear.abd_buf, !=, NULL);
255	} else {
256		ASSERT3U(abd->abd_u.abd_scatter.abd_offset, <,
257		    zfs_abd_chunk_size);
258		size_t n = abd_scatter_chunkcnt(abd);
259		for (int i = 0; i < n; i++) {
260			ASSERT3P(
261			    abd->abd_u.abd_scatter.abd_chunks[i], !=, NULL);
262		}
263	}
264}
265
266static inline abd_t *
267abd_alloc_struct(size_t chunkcnt)
268{
269	size_t size = offsetof(abd_t, abd_u.abd_scatter.abd_chunks[chunkcnt]);
270	abd_t *abd = kmem_alloc(size, KM_PUSHPAGE);
271	ASSERT3P(abd, !=, NULL);
272	ABDSTAT_INCR(abdstat_struct_size, size);
273
274	return (abd);
275}
276
277static inline void
278abd_free_struct(abd_t *abd)
279{
280	size_t chunkcnt = abd_is_linear(abd) ? 0 : abd_scatter_chunkcnt(abd);
281	int size = offsetof(abd_t, abd_u.abd_scatter.abd_chunks[chunkcnt]);
282	kmem_free(abd, size);
283	ABDSTAT_INCR(abdstat_struct_size, -size);
284}
285
286/*
287 * Allocate an ABD, along with its own underlying data buffers. Use this if you
288 * don't care whether the ABD is linear or not.
289 */
290abd_t *
291abd_alloc(size_t size, boolean_t is_metadata)
292{
293	if (!zfs_abd_scatter_enabled)
294		return (abd_alloc_linear(size, is_metadata));
295
296	VERIFY3U(size, <=, SPA_MAXBLOCKSIZE);
297
298	size_t n = abd_chunkcnt_for_bytes(size);
299	abd_t *abd = abd_alloc_struct(n);
300
301	abd->abd_flags = ABD_FLAG_OWNER;
302	if (is_metadata) {
303		abd->abd_flags |= ABD_FLAG_META;
304	}
305	abd->abd_size = size;
306	abd->abd_parent = NULL;
307	refcount_create(&abd->abd_children);
308
309	abd->abd_u.abd_scatter.abd_offset = 0;
310	abd->abd_u.abd_scatter.abd_chunk_size = zfs_abd_chunk_size;
311
312	for (int i = 0; i < n; i++) {
313		void *c = abd_alloc_chunk();
314		ASSERT3P(c, !=, NULL);
315		abd->abd_u.abd_scatter.abd_chunks[i] = c;
316	}
317
318	ABDSTAT_BUMP(abdstat_scatter_cnt);
319	ABDSTAT_INCR(abdstat_scatter_data_size, size);
320	ABDSTAT_INCR(abdstat_scatter_chunk_waste,
321	    n * zfs_abd_chunk_size - size);
322
323	return (abd);
324}
325
326static void
327abd_free_scatter(abd_t *abd)
328{
329	size_t n = abd_scatter_chunkcnt(abd);
330	for (int i = 0; i < n; i++) {
331		abd_free_chunk(abd->abd_u.abd_scatter.abd_chunks[i]);
332	}
333
334	refcount_destroy(&abd->abd_children);
335	ABDSTAT_BUMPDOWN(abdstat_scatter_cnt);
336	ABDSTAT_INCR(abdstat_scatter_data_size, -(int)abd->abd_size);
337	ABDSTAT_INCR(abdstat_scatter_chunk_waste,
338	    abd->abd_size - n * zfs_abd_chunk_size);
339
340	abd_free_struct(abd);
341}
342
343/*
344 * Allocate an ABD that must be linear, along with its own underlying data
345 * buffer. Only use this when it would be very annoying to write your ABD
346 * consumer with a scattered ABD.
347 */
348abd_t *
349abd_alloc_linear(size_t size, boolean_t is_metadata)
350{
351	abd_t *abd = abd_alloc_struct(0);
352
353	VERIFY3U(size, <=, SPA_MAXBLOCKSIZE);
354
355	abd->abd_flags = ABD_FLAG_LINEAR | ABD_FLAG_OWNER;
356	if (is_metadata) {
357		abd->abd_flags |= ABD_FLAG_META;
358	}
359	abd->abd_size = size;
360	abd->abd_parent = NULL;
361	refcount_create(&abd->abd_children);
362
363	if (is_metadata) {
364		abd->abd_u.abd_linear.abd_buf = zio_buf_alloc(size);
365	} else {
366		abd->abd_u.abd_linear.abd_buf = zio_data_buf_alloc(size);
367	}
368
369	ABDSTAT_BUMP(abdstat_linear_cnt);
370	ABDSTAT_INCR(abdstat_linear_data_size, size);
371
372	return (abd);
373}
374
375static void
376abd_free_linear(abd_t *abd)
377{
378	if (abd->abd_flags & ABD_FLAG_META) {
379		zio_buf_free(abd->abd_u.abd_linear.abd_buf, abd->abd_size);
380	} else {
381		zio_data_buf_free(abd->abd_u.abd_linear.abd_buf, abd->abd_size);
382	}
383
384	refcount_destroy(&abd->abd_children);
385	ABDSTAT_BUMPDOWN(abdstat_linear_cnt);
386	ABDSTAT_INCR(abdstat_linear_data_size, -(int)abd->abd_size);
387
388	abd_free_struct(abd);
389}
390
391/*
392 * Free an ABD. Only use this on ABDs allocated with abd_alloc() or
393 * abd_alloc_linear().
394 */
395void
396abd_free(abd_t *abd)
397{
398	abd_verify(abd);
399	ASSERT3P(abd->abd_parent, ==, NULL);
400	ASSERT(abd->abd_flags & ABD_FLAG_OWNER);
401	if (abd_is_linear(abd))
402		abd_free_linear(abd);
403	else
404		abd_free_scatter(abd);
405}
406
407/*
408 * Allocate an ABD of the same format (same metadata flag, same scatterize
409 * setting) as another ABD.
410 */
411abd_t *
412abd_alloc_sametype(abd_t *sabd, size_t size)
413{
414	boolean_t is_metadata = (sabd->abd_flags & ABD_FLAG_META) != 0;
415	if (abd_is_linear(sabd)) {
416		return (abd_alloc_linear(size, is_metadata));
417	} else {
418		return (abd_alloc(size, is_metadata));
419	}
420}
421
422/*
423 * If we're going to use this ABD for doing I/O using the block layer, the
424 * consumer of the ABD data doesn't care if it's scattered or not, and we don't
425 * plan to store this ABD in memory for a long period of time, we should
426 * allocate the ABD type that requires the least data copying to do the I/O.
427 *
428 * Currently this is linear ABDs, however if ldi_strategy() can ever issue I/Os
429 * using a scatter/gather list we should switch to that and replace this call
430 * with vanilla abd_alloc().
431 */
432abd_t *
433abd_alloc_for_io(size_t size, boolean_t is_metadata)
434{
435	return (abd_alloc_linear(size, is_metadata));
436}
437
438/*
439 * Allocate a new ABD to point to offset off of sabd. It shares the underlying
440 * buffer data with sabd. Use abd_put() to free. sabd must not be freed while
441 * any derived ABDs exist.
442 */
443abd_t *
444abd_get_offset(abd_t *sabd, size_t off)
445{
446	abd_t *abd;
447
448	abd_verify(sabd);
449	ASSERT3U(off, <=, sabd->abd_size);
450
451	if (abd_is_linear(sabd)) {
452		abd = abd_alloc_struct(0);
453
454		/*
455		 * Even if this buf is filesystem metadata, we only track that
456		 * if we own the underlying data buffer, which is not true in
457		 * this case. Therefore, we don't ever use ABD_FLAG_META here.
458		 */
459		abd->abd_flags = ABD_FLAG_LINEAR;
460
461		abd->abd_u.abd_linear.abd_buf =
462		    (char *)sabd->abd_u.abd_linear.abd_buf + off;
463	} else {
464		size_t new_offset = sabd->abd_u.abd_scatter.abd_offset + off;
465		size_t chunkcnt = abd_scatter_chunkcnt(sabd) -
466		    (new_offset / zfs_abd_chunk_size);
467
468		abd = abd_alloc_struct(chunkcnt);
469
470		/*
471		 * Even if this buf is filesystem metadata, we only track that
472		 * if we own the underlying data buffer, which is not true in
473		 * this case. Therefore, we don't ever use ABD_FLAG_META here.
474		 */
475		abd->abd_flags = 0;
476
477		abd->abd_u.abd_scatter.abd_offset =
478		    new_offset % zfs_abd_chunk_size;
479		abd->abd_u.abd_scatter.abd_chunk_size = zfs_abd_chunk_size;
480
481		/* Copy the scatterlist starting at the correct offset */
482		(void) memcpy(&abd->abd_u.abd_scatter.abd_chunks,
483		    &sabd->abd_u.abd_scatter.abd_chunks[new_offset /
484		    zfs_abd_chunk_size],
485		    chunkcnt * sizeof (void *));
486	}
487
488	abd->abd_size = sabd->abd_size - off;
489	abd->abd_parent = sabd;
490	refcount_create(&abd->abd_children);
491	(void) refcount_add_many(&sabd->abd_children, abd->abd_size, abd);
492
493	return (abd);
494}
495
496/*
497 * Allocate a linear ABD structure for buf. You must free this with abd_put()
498 * since the resulting ABD doesn't own its own buffer.
499 */
500abd_t *
501abd_get_from_buf(void *buf, size_t size)
502{
503	abd_t *abd = abd_alloc_struct(0);
504
505	VERIFY3U(size, <=, SPA_MAXBLOCKSIZE);
506
507	/*
508	 * Even if this buf is filesystem metadata, we only track that if we
509	 * own the underlying data buffer, which is not true in this case.
510	 * Therefore, we don't ever use ABD_FLAG_META here.
511	 */
512	abd->abd_flags = ABD_FLAG_LINEAR;
513	abd->abd_size = size;
514	abd->abd_parent = NULL;
515	refcount_create(&abd->abd_children);
516
517	abd->abd_u.abd_linear.abd_buf = buf;
518
519	return (abd);
520}
521
522/*
523 * Free an ABD allocated from abd_get_offset() or abd_get_from_buf(). Will not
524 * free the underlying scatterlist or buffer.
525 */
526void
527abd_put(abd_t *abd)
528{
529	abd_verify(abd);
530	ASSERT(!(abd->abd_flags & ABD_FLAG_OWNER));
531
532	if (abd->abd_parent != NULL) {
533		(void) refcount_remove_many(&abd->abd_parent->abd_children,
534		    abd->abd_size, abd);
535	}
536
537	refcount_destroy(&abd->abd_children);
538	abd_free_struct(abd);
539}
540
541/*
542 * Get the raw buffer associated with a linear ABD.
543 */
544void *
545abd_to_buf(abd_t *abd)
546{
547	ASSERT(abd_is_linear(abd));
548	abd_verify(abd);
549	return (abd->abd_u.abd_linear.abd_buf);
550}
551
552/*
553 * Borrow a raw buffer from an ABD without copying the contents of the ABD
554 * into the buffer. If the ABD is scattered, this will allocate a raw buffer
555 * whose contents are undefined. To copy over the existing data in the ABD, use
556 * abd_borrow_buf_copy() instead.
557 */
558void *
559abd_borrow_buf(abd_t *abd, size_t n)
560{
561	void *buf;
562	abd_verify(abd);
563	ASSERT3U(abd->abd_size, >=, n);
564	if (abd_is_linear(abd)) {
565		buf = abd_to_buf(abd);
566	} else {
567		buf = zio_buf_alloc(n);
568	}
569	(void) refcount_add_many(&abd->abd_children, n, buf);
570
571	return (buf);
572}
573
574void *
575abd_borrow_buf_copy(abd_t *abd, size_t n)
576{
577	void *buf = abd_borrow_buf(abd, n);
578	if (!abd_is_linear(abd)) {
579		abd_copy_to_buf(buf, abd, n);
580	}
581	return (buf);
582}
583
584/*
585 * Return a borrowed raw buffer to an ABD. If the ABD is scattered, this will
586 * not change the contents of the ABD and will ASSERT that you didn't modify
587 * the buffer since it was borrowed. If you want any changes you made to buf to
588 * be copied back to abd, use abd_return_buf_copy() instead.
589 */
590void
591abd_return_buf(abd_t *abd, void *buf, size_t n)
592{
593	abd_verify(abd);
594	ASSERT3U(abd->abd_size, >=, n);
595	if (abd_is_linear(abd)) {
596		ASSERT3P(buf, ==, abd_to_buf(abd));
597	} else {
598		ASSERT0(abd_cmp_buf(abd, buf, n));
599		zio_buf_free(buf, n);
600	}
601	(void) refcount_remove_many(&abd->abd_children, n, buf);
602}
603
604void
605abd_return_buf_copy(abd_t *abd, void *buf, size_t n)
606{
607	if (!abd_is_linear(abd)) {
608		abd_copy_from_buf(abd, buf, n);
609	}
610	abd_return_buf(abd, buf, n);
611}
612
613/*
614 * Give this ABD ownership of the buffer that it's storing. Can only be used on
615 * linear ABDs which were allocated via abd_get_from_buf(), or ones allocated
616 * with abd_alloc_linear() which subsequently released ownership of their buf
617 * with abd_release_ownership_of_buf().
618 */
619void
620abd_take_ownership_of_buf(abd_t *abd, boolean_t is_metadata)
621{
622	ASSERT(abd_is_linear(abd));
623	ASSERT(!(abd->abd_flags & ABD_FLAG_OWNER));
624	abd_verify(abd);
625
626	abd->abd_flags |= ABD_FLAG_OWNER;
627	if (is_metadata) {
628		abd->abd_flags |= ABD_FLAG_META;
629	}
630
631	ABDSTAT_BUMP(abdstat_linear_cnt);
632	ABDSTAT_INCR(abdstat_linear_data_size, abd->abd_size);
633}
634
635void
636abd_release_ownership_of_buf(abd_t *abd)
637{
638	ASSERT(abd_is_linear(abd));
639	ASSERT(abd->abd_flags & ABD_FLAG_OWNER);
640	abd_verify(abd);
641
642	abd->abd_flags &= ~ABD_FLAG_OWNER;
643	/* Disable this flag since we no longer own the data buffer */
644	abd->abd_flags &= ~ABD_FLAG_META;
645
646	ABDSTAT_BUMPDOWN(abdstat_linear_cnt);
647	ABDSTAT_INCR(abdstat_linear_data_size, -(int)abd->abd_size);
648}
649
650struct abd_iter {
651	abd_t		*iter_abd;	/* ABD being iterated through */
652	size_t		iter_pos;	/* position (relative to abd_offset) */
653	void		*iter_mapaddr;	/* addr corresponding to iter_pos */
654	size_t		iter_mapsize;	/* length of data valid at mapaddr */
655};
656
657static inline size_t
658abd_iter_scatter_chunk_offset(struct abd_iter *aiter)
659{
660	ASSERT(!abd_is_linear(aiter->iter_abd));
661	return ((aiter->iter_abd->abd_u.abd_scatter.abd_offset +
662	    aiter->iter_pos) % zfs_abd_chunk_size);
663}
664
665static inline size_t
666abd_iter_scatter_chunk_index(struct abd_iter *aiter)
667{
668	ASSERT(!abd_is_linear(aiter->iter_abd));
669	return ((aiter->iter_abd->abd_u.abd_scatter.abd_offset +
670	    aiter->iter_pos) / zfs_abd_chunk_size);
671}
672
673/*
674 * Initialize the abd_iter.
675 */
676static void
677abd_iter_init(struct abd_iter *aiter, abd_t *abd)
678{
679	abd_verify(abd);
680	aiter->iter_abd = abd;
681	aiter->iter_pos = 0;
682	aiter->iter_mapaddr = NULL;
683	aiter->iter_mapsize = 0;
684}
685
686/*
687 * Advance the iterator by a certain amount. Cannot be called when a chunk is
688 * in use. This can be safely called when the aiter has already exhausted, in
689 * which case this does nothing.
690 */
691static void
692abd_iter_advance(struct abd_iter *aiter, size_t amount)
693{
694	ASSERT3P(aiter->iter_mapaddr, ==, NULL);
695	ASSERT0(aiter->iter_mapsize);
696
697	/* There's nothing left to advance to, so do nothing */
698	if (aiter->iter_pos == aiter->iter_abd->abd_size)
699		return;
700
701	aiter->iter_pos += amount;
702}
703
704/*
705 * Map the current chunk into aiter. This can be safely called when the aiter
706 * has already exhausted, in which case this does nothing.
707 */
708static void
709abd_iter_map(struct abd_iter *aiter)
710{
711	void *paddr;
712	size_t offset = 0;
713
714	ASSERT3P(aiter->iter_mapaddr, ==, NULL);
715	ASSERT0(aiter->iter_mapsize);
716
717	/* Panic if someone has changed zfs_abd_chunk_size */
718	IMPLY(!abd_is_linear(aiter->iter_abd), zfs_abd_chunk_size ==
719	    aiter->iter_abd->abd_u.abd_scatter.abd_chunk_size);
720
721	/* There's nothing left to iterate over, so do nothing */
722	if (aiter->iter_pos == aiter->iter_abd->abd_size)
723		return;
724
725	if (abd_is_linear(aiter->iter_abd)) {
726		offset = aiter->iter_pos;
727		aiter->iter_mapsize = aiter->iter_abd->abd_size - offset;
728		paddr = aiter->iter_abd->abd_u.abd_linear.abd_buf;
729	} else {
730		size_t index = abd_iter_scatter_chunk_index(aiter);
731		offset = abd_iter_scatter_chunk_offset(aiter);
732		aiter->iter_mapsize = zfs_abd_chunk_size - offset;
733		paddr = aiter->iter_abd->abd_u.abd_scatter.abd_chunks[index];
734	}
735	aiter->iter_mapaddr = (char *)paddr + offset;
736}
737
738/*
739 * Unmap the current chunk from aiter. This can be safely called when the aiter
740 * has already exhausted, in which case this does nothing.
741 */
742static void
743abd_iter_unmap(struct abd_iter *aiter)
744{
745	/* There's nothing left to unmap, so do nothing */
746	if (aiter->iter_pos == aiter->iter_abd->abd_size)
747		return;
748
749	ASSERT3P(aiter->iter_mapaddr, !=, NULL);
750	ASSERT3U(aiter->iter_mapsize, >, 0);
751
752	aiter->iter_mapaddr = NULL;
753	aiter->iter_mapsize = 0;
754}
755
756int
757abd_iterate_func(abd_t *abd, size_t off, size_t size,
758    abd_iter_func_t *func, void *private)
759{
760	int ret = 0;
761	struct abd_iter aiter;
762
763	abd_verify(abd);
764	ASSERT3U(off + size, <=, abd->abd_size);
765
766	abd_iter_init(&aiter, abd);
767	abd_iter_advance(&aiter, off);
768
769	while (size > 0) {
770		abd_iter_map(&aiter);
771
772		size_t len = MIN(aiter.iter_mapsize, size);
773		ASSERT3U(len, >, 0);
774
775		ret = func(aiter.iter_mapaddr, len, private);
776
777		abd_iter_unmap(&aiter);
778
779		if (ret != 0)
780			break;
781
782		size -= len;
783		abd_iter_advance(&aiter, len);
784	}
785
786	return (ret);
787}
788
789struct buf_arg {
790	void *arg_buf;
791};
792
793static int
794abd_copy_to_buf_off_cb(void *buf, size_t size, void *private)
795{
796	struct buf_arg *ba_ptr = private;
797
798	(void) memcpy(ba_ptr->arg_buf, buf, size);
799	ba_ptr->arg_buf = (char *)ba_ptr->arg_buf + size;
800
801	return (0);
802}
803
804/*
805 * Copy abd to buf. (off is the offset in abd.)
806 */
807void
808abd_copy_to_buf_off(void *buf, abd_t *abd, size_t off, size_t size)
809{
810	struct buf_arg ba_ptr = { buf };
811
812	(void) abd_iterate_func(abd, off, size, abd_copy_to_buf_off_cb,
813	    &ba_ptr);
814}
815
816static int
817abd_cmp_buf_off_cb(void *buf, size_t size, void *private)
818{
819	int ret;
820	struct buf_arg *ba_ptr = private;
821
822	ret = memcmp(buf, ba_ptr->arg_buf, size);
823	ba_ptr->arg_buf = (char *)ba_ptr->arg_buf + size;
824
825	return (ret);
826}
827
828/*
829 * Compare the contents of abd to buf. (off is the offset in abd.)
830 */
831int
832abd_cmp_buf_off(abd_t *abd, const void *buf, size_t off, size_t size)
833{
834	struct buf_arg ba_ptr = { (void *) buf };
835
836	return (abd_iterate_func(abd, off, size, abd_cmp_buf_off_cb, &ba_ptr));
837}
838
839static int
840abd_copy_from_buf_off_cb(void *buf, size_t size, void *private)
841{
842	struct buf_arg *ba_ptr = private;
843
844	(void) memcpy(buf, ba_ptr->arg_buf, size);
845	ba_ptr->arg_buf = (char *)ba_ptr->arg_buf + size;
846
847	return (0);
848}
849
850/*
851 * Copy from buf to abd. (off is the offset in abd.)
852 */
853void
854abd_copy_from_buf_off(abd_t *abd, const void *buf, size_t off, size_t size)
855{
856	struct buf_arg ba_ptr = { (void *) buf };
857
858	(void) abd_iterate_func(abd, off, size, abd_copy_from_buf_off_cb,
859	    &ba_ptr);
860}
861
862/*ARGSUSED*/
863static int
864abd_zero_off_cb(void *buf, size_t size, void *private)
865{
866	(void) memset(buf, 0, size);
867	return (0);
868}
869
870/*
871 * Zero out the abd from a particular offset to the end.
872 */
873void
874abd_zero_off(abd_t *abd, size_t off, size_t size)
875{
876	(void) abd_iterate_func(abd, off, size, abd_zero_off_cb, NULL);
877}
878
879/*
880 * Iterate over two ABDs and call func incrementally on the two ABDs' data in
881 * equal-sized chunks (passed to func as raw buffers). func could be called many
882 * times during this iteration.
883 */
884int
885abd_iterate_func2(abd_t *dabd, abd_t *sabd, size_t doff, size_t soff,
886    size_t size, abd_iter_func2_t *func, void *private)
887{
888	int ret = 0;
889	struct abd_iter daiter, saiter;
890
891	abd_verify(dabd);
892	abd_verify(sabd);
893
894	ASSERT3U(doff + size, <=, dabd->abd_size);
895	ASSERT3U(soff + size, <=, sabd->abd_size);
896
897	abd_iter_init(&daiter, dabd);
898	abd_iter_init(&saiter, sabd);
899	abd_iter_advance(&daiter, doff);
900	abd_iter_advance(&saiter, soff);
901
902	while (size > 0) {
903		abd_iter_map(&daiter);
904		abd_iter_map(&saiter);
905
906		size_t dlen = MIN(daiter.iter_mapsize, size);
907		size_t slen = MIN(saiter.iter_mapsize, size);
908		size_t len = MIN(dlen, slen);
909		ASSERT(dlen > 0 || slen > 0);
910
911		ret = func(daiter.iter_mapaddr, saiter.iter_mapaddr, len,
912		    private);
913
914		abd_iter_unmap(&saiter);
915		abd_iter_unmap(&daiter);
916
917		if (ret != 0)
918			break;
919
920		size -= len;
921		abd_iter_advance(&daiter, len);
922		abd_iter_advance(&saiter, len);
923	}
924
925	return (ret);
926}
927
928/*ARGSUSED*/
929static int
930abd_copy_off_cb(void *dbuf, void *sbuf, size_t size, void *private)
931{
932	(void) memcpy(dbuf, sbuf, size);
933	return (0);
934}
935
936/*
937 * Copy from sabd to dabd starting from soff and doff.
938 */
939void
940abd_copy_off(abd_t *dabd, abd_t *sabd, size_t doff, size_t soff, size_t size)
941{
942	(void) abd_iterate_func2(dabd, sabd, doff, soff, size,
943	    abd_copy_off_cb, NULL);
944}
945
946/*ARGSUSED*/
947static int
948abd_cmp_cb(void *bufa, void *bufb, size_t size, void *private)
949{
950	return (memcmp(bufa, bufb, size));
951}
952
953/*
954 * Compares the first size bytes of two ABDs.
955 */
956int
957abd_cmp(abd_t *dabd, abd_t *sabd, size_t size)
958{
959	return (abd_iterate_func2(dabd, sabd, 0, 0, size, abd_cmp_cb, NULL));
960}
961