dmu_send.c revision 284757
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
25 * Copyright (c) 2014, Joyent, Inc. All rights reserved.
26 * Copyright (c) 2012, Martin Matuska <mm@FreeBSD.org>. All rights reserved.
27 * Copyright 2014 HybridCluster. All rights reserved.
28 */
29
30#include <sys/dmu.h>
31#include <sys/dmu_impl.h>
32#include <sys/dmu_tx.h>
33#include <sys/dbuf.h>
34#include <sys/dnode.h>
35#include <sys/zfs_context.h>
36#include <sys/dmu_objset.h>
37#include <sys/dmu_traverse.h>
38#include <sys/dsl_dataset.h>
39#include <sys/dsl_dir.h>
40#include <sys/dsl_prop.h>
41#include <sys/dsl_pool.h>
42#include <sys/dsl_synctask.h>
43#include <sys/zfs_ioctl.h>
44#include <sys/zap.h>
45#include <sys/zio_checksum.h>
46#include <sys/zfs_znode.h>
47#include <zfs_fletcher.h>
48#include <sys/avl.h>
49#include <sys/ddt.h>
50#include <sys/zfs_onexit.h>
51#include <sys/dmu_send.h>
52#include <sys/dsl_destroy.h>
53#include <sys/blkptr.h>
54#include <sys/dsl_bookmark.h>
55#include <sys/zfeature.h>
56
57#ifdef __FreeBSD__
58#undef dump_write
59#define dump_write dmu_dump_write
60#endif
61
62/* Set this tunable to TRUE to replace corrupt data with 0x2f5baddb10c */
63int zfs_send_corrupt_data = B_FALSE;
64
65static char *dmu_recv_tag = "dmu_recv_tag";
66static const char *recv_clone_name = "%recv";
67
68static int
69dump_bytes(dmu_sendarg_t *dsp, void *buf, int len)
70{
71	dsl_dataset_t *ds = dsp->dsa_os->os_dsl_dataset;
72	struct uio auio;
73	struct iovec aiov;
74	ASSERT0(len % 8);
75
76	fletcher_4_incremental_native(buf, len, &dsp->dsa_zc);
77	aiov.iov_base = buf;
78	aiov.iov_len = len;
79	auio.uio_iov = &aiov;
80	auio.uio_iovcnt = 1;
81	auio.uio_resid = len;
82	auio.uio_segflg = UIO_SYSSPACE;
83	auio.uio_rw = UIO_WRITE;
84	auio.uio_offset = (off_t)-1;
85	auio.uio_td = dsp->dsa_td;
86#ifdef _KERNEL
87	if (dsp->dsa_fp->f_type == DTYPE_VNODE)
88		bwillwrite();
89	dsp->dsa_err = fo_write(dsp->dsa_fp, &auio, dsp->dsa_td->td_ucred, 0,
90	    dsp->dsa_td);
91#else
92	fprintf(stderr, "%s: returning EOPNOTSUPP\n", __func__);
93	dsp->dsa_err = EOPNOTSUPP;
94#endif
95	mutex_enter(&ds->ds_sendstream_lock);
96	*dsp->dsa_off += len;
97	mutex_exit(&ds->ds_sendstream_lock);
98
99	return (dsp->dsa_err);
100}
101
102static int
103dump_free(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset,
104    uint64_t length)
105{
106	struct drr_free *drrf = &(dsp->dsa_drr->drr_u.drr_free);
107
108	/*
109	 * When we receive a free record, dbuf_free_range() assumes
110	 * that the receiving system doesn't have any dbufs in the range
111	 * being freed.  This is always true because there is a one-record
112	 * constraint: we only send one WRITE record for any given
113	 * object+offset.  We know that the one-record constraint is
114	 * true because we always send data in increasing order by
115	 * object,offset.
116	 *
117	 * If the increasing-order constraint ever changes, we should find
118	 * another way to assert that the one-record constraint is still
119	 * satisfied.
120	 */
121	ASSERT(object > dsp->dsa_last_data_object ||
122	    (object == dsp->dsa_last_data_object &&
123	    offset > dsp->dsa_last_data_offset));
124
125	/*
126	 * If we are doing a non-incremental send, then there can't
127	 * be any data in the dataset we're receiving into.  Therefore
128	 * a free record would simply be a no-op.  Save space by not
129	 * sending it to begin with.
130	 */
131	if (!dsp->dsa_incremental)
132		return (0);
133
134	if (length != -1ULL && offset + length < offset)
135		length = -1ULL;
136
137	/*
138	 * If there is a pending op, but it's not PENDING_FREE, push it out,
139	 * since free block aggregation can only be done for blocks of the
140	 * same type (i.e., DRR_FREE records can only be aggregated with
141	 * other DRR_FREE records.  DRR_FREEOBJECTS records can only be
142	 * aggregated with other DRR_FREEOBJECTS records.
143	 */
144	if (dsp->dsa_pending_op != PENDING_NONE &&
145	    dsp->dsa_pending_op != PENDING_FREE) {
146		if (dump_bytes(dsp, dsp->dsa_drr,
147		    sizeof (dmu_replay_record_t)) != 0)
148			return (SET_ERROR(EINTR));
149		dsp->dsa_pending_op = PENDING_NONE;
150	}
151
152	if (dsp->dsa_pending_op == PENDING_FREE) {
153		/*
154		 * There should never be a PENDING_FREE if length is -1
155		 * (because dump_dnode is the only place where this
156		 * function is called with a -1, and only after flushing
157		 * any pending record).
158		 */
159		ASSERT(length != -1ULL);
160		/*
161		 * Check to see whether this free block can be aggregated
162		 * with pending one.
163		 */
164		if (drrf->drr_object == object && drrf->drr_offset +
165		    drrf->drr_length == offset) {
166			drrf->drr_length += length;
167			return (0);
168		} else {
169			/* not a continuation.  Push out pending record */
170			if (dump_bytes(dsp, dsp->dsa_drr,
171			    sizeof (dmu_replay_record_t)) != 0)
172				return (SET_ERROR(EINTR));
173			dsp->dsa_pending_op = PENDING_NONE;
174		}
175	}
176	/* create a FREE record and make it pending */
177	bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
178	dsp->dsa_drr->drr_type = DRR_FREE;
179	drrf->drr_object = object;
180	drrf->drr_offset = offset;
181	drrf->drr_length = length;
182	drrf->drr_toguid = dsp->dsa_toguid;
183	if (length == -1ULL) {
184		if (dump_bytes(dsp, dsp->dsa_drr,
185		    sizeof (dmu_replay_record_t)) != 0)
186			return (SET_ERROR(EINTR));
187	} else {
188		dsp->dsa_pending_op = PENDING_FREE;
189	}
190
191	return (0);
192}
193
194static int
195dump_write(dmu_sendarg_t *dsp, dmu_object_type_t type,
196    uint64_t object, uint64_t offset, int blksz, const blkptr_t *bp, void *data)
197{
198	struct drr_write *drrw = &(dsp->dsa_drr->drr_u.drr_write);
199
200	/*
201	 * We send data in increasing object, offset order.
202	 * See comment in dump_free() for details.
203	 */
204	ASSERT(object > dsp->dsa_last_data_object ||
205	    (object == dsp->dsa_last_data_object &&
206	    offset > dsp->dsa_last_data_offset));
207	dsp->dsa_last_data_object = object;
208	dsp->dsa_last_data_offset = offset + blksz - 1;
209
210	/*
211	 * If there is any kind of pending aggregation (currently either
212	 * a grouping of free objects or free blocks), push it out to
213	 * the stream, since aggregation can't be done across operations
214	 * of different types.
215	 */
216	if (dsp->dsa_pending_op != PENDING_NONE) {
217		if (dump_bytes(dsp, dsp->dsa_drr,
218		    sizeof (dmu_replay_record_t)) != 0)
219			return (SET_ERROR(EINTR));
220		dsp->dsa_pending_op = PENDING_NONE;
221	}
222	/* write a DATA record */
223	bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
224	dsp->dsa_drr->drr_type = DRR_WRITE;
225	drrw->drr_object = object;
226	drrw->drr_type = type;
227	drrw->drr_offset = offset;
228	drrw->drr_length = blksz;
229	drrw->drr_toguid = dsp->dsa_toguid;
230	if (bp == NULL || BP_IS_EMBEDDED(bp)) {
231		/*
232		 * There's no pre-computed checksum for partial-block
233		 * writes or embedded BP's, so (like
234		 * fletcher4-checkummed blocks) userland will have to
235		 * compute a dedup-capable checksum itself.
236		 */
237		drrw->drr_checksumtype = ZIO_CHECKSUM_OFF;
238	} else {
239		drrw->drr_checksumtype = BP_GET_CHECKSUM(bp);
240		if (zio_checksum_table[drrw->drr_checksumtype].ci_dedup)
241			drrw->drr_checksumflags |= DRR_CHECKSUM_DEDUP;
242		DDK_SET_LSIZE(&drrw->drr_key, BP_GET_LSIZE(bp));
243		DDK_SET_PSIZE(&drrw->drr_key, BP_GET_PSIZE(bp));
244		DDK_SET_COMPRESS(&drrw->drr_key, BP_GET_COMPRESS(bp));
245		drrw->drr_key.ddk_cksum = bp->blk_cksum;
246	}
247
248	if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0)
249		return (SET_ERROR(EINTR));
250	if (dump_bytes(dsp, data, blksz) != 0)
251		return (SET_ERROR(EINTR));
252	return (0);
253}
254
255static int
256dump_write_embedded(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset,
257    int blksz, const blkptr_t *bp)
258{
259	char buf[BPE_PAYLOAD_SIZE];
260	struct drr_write_embedded *drrw =
261	    &(dsp->dsa_drr->drr_u.drr_write_embedded);
262
263	if (dsp->dsa_pending_op != PENDING_NONE) {
264		if (dump_bytes(dsp, dsp->dsa_drr,
265		    sizeof (dmu_replay_record_t)) != 0)
266			return (EINTR);
267		dsp->dsa_pending_op = PENDING_NONE;
268	}
269
270	ASSERT(BP_IS_EMBEDDED(bp));
271
272	bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
273	dsp->dsa_drr->drr_type = DRR_WRITE_EMBEDDED;
274	drrw->drr_object = object;
275	drrw->drr_offset = offset;
276	drrw->drr_length = blksz;
277	drrw->drr_toguid = dsp->dsa_toguid;
278	drrw->drr_compression = BP_GET_COMPRESS(bp);
279	drrw->drr_etype = BPE_GET_ETYPE(bp);
280	drrw->drr_lsize = BPE_GET_LSIZE(bp);
281	drrw->drr_psize = BPE_GET_PSIZE(bp);
282
283	decode_embedded_bp_compressed(bp, buf);
284
285	if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0)
286		return (EINTR);
287	if (dump_bytes(dsp, buf, P2ROUNDUP(drrw->drr_psize, 8)) != 0)
288		return (EINTR);
289	return (0);
290}
291
292static int
293dump_spill(dmu_sendarg_t *dsp, uint64_t object, int blksz, void *data)
294{
295	struct drr_spill *drrs = &(dsp->dsa_drr->drr_u.drr_spill);
296
297	if (dsp->dsa_pending_op != PENDING_NONE) {
298		if (dump_bytes(dsp, dsp->dsa_drr,
299		    sizeof (dmu_replay_record_t)) != 0)
300			return (SET_ERROR(EINTR));
301		dsp->dsa_pending_op = PENDING_NONE;
302	}
303
304	/* write a SPILL record */
305	bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
306	dsp->dsa_drr->drr_type = DRR_SPILL;
307	drrs->drr_object = object;
308	drrs->drr_length = blksz;
309	drrs->drr_toguid = dsp->dsa_toguid;
310
311	if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)))
312		return (SET_ERROR(EINTR));
313	if (dump_bytes(dsp, data, blksz))
314		return (SET_ERROR(EINTR));
315	return (0);
316}
317
318static int
319dump_freeobjects(dmu_sendarg_t *dsp, uint64_t firstobj, uint64_t numobjs)
320{
321	struct drr_freeobjects *drrfo = &(dsp->dsa_drr->drr_u.drr_freeobjects);
322
323	/* See comment in dump_free(). */
324	if (!dsp->dsa_incremental)
325		return (0);
326
327	/*
328	 * If there is a pending op, but it's not PENDING_FREEOBJECTS,
329	 * push it out, since free block aggregation can only be done for
330	 * blocks of the same type (i.e., DRR_FREE records can only be
331	 * aggregated with other DRR_FREE records.  DRR_FREEOBJECTS records
332	 * can only be aggregated with other DRR_FREEOBJECTS records.
333	 */
334	if (dsp->dsa_pending_op != PENDING_NONE &&
335	    dsp->dsa_pending_op != PENDING_FREEOBJECTS) {
336		if (dump_bytes(dsp, dsp->dsa_drr,
337		    sizeof (dmu_replay_record_t)) != 0)
338			return (SET_ERROR(EINTR));
339		dsp->dsa_pending_op = PENDING_NONE;
340	}
341	if (dsp->dsa_pending_op == PENDING_FREEOBJECTS) {
342		/*
343		 * See whether this free object array can be aggregated
344		 * with pending one
345		 */
346		if (drrfo->drr_firstobj + drrfo->drr_numobjs == firstobj) {
347			drrfo->drr_numobjs += numobjs;
348			return (0);
349		} else {
350			/* can't be aggregated.  Push out pending record */
351			if (dump_bytes(dsp, dsp->dsa_drr,
352			    sizeof (dmu_replay_record_t)) != 0)
353				return (SET_ERROR(EINTR));
354			dsp->dsa_pending_op = PENDING_NONE;
355		}
356	}
357
358	/* write a FREEOBJECTS record */
359	bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
360	dsp->dsa_drr->drr_type = DRR_FREEOBJECTS;
361	drrfo->drr_firstobj = firstobj;
362	drrfo->drr_numobjs = numobjs;
363	drrfo->drr_toguid = dsp->dsa_toguid;
364
365	dsp->dsa_pending_op = PENDING_FREEOBJECTS;
366
367	return (0);
368}
369
370static int
371dump_dnode(dmu_sendarg_t *dsp, uint64_t object, dnode_phys_t *dnp)
372{
373	struct drr_object *drro = &(dsp->dsa_drr->drr_u.drr_object);
374
375	if (dnp == NULL || dnp->dn_type == DMU_OT_NONE)
376		return (dump_freeobjects(dsp, object, 1));
377
378	if (dsp->dsa_pending_op != PENDING_NONE) {
379		if (dump_bytes(dsp, dsp->dsa_drr,
380		    sizeof (dmu_replay_record_t)) != 0)
381			return (SET_ERROR(EINTR));
382		dsp->dsa_pending_op = PENDING_NONE;
383	}
384
385	/* write an OBJECT record */
386	bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
387	dsp->dsa_drr->drr_type = DRR_OBJECT;
388	drro->drr_object = object;
389	drro->drr_type = dnp->dn_type;
390	drro->drr_bonustype = dnp->dn_bonustype;
391	drro->drr_blksz = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT;
392	drro->drr_bonuslen = dnp->dn_bonuslen;
393	drro->drr_checksumtype = dnp->dn_checksum;
394	drro->drr_compress = dnp->dn_compress;
395	drro->drr_toguid = dsp->dsa_toguid;
396
397	if (!(dsp->dsa_featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
398	    drro->drr_blksz > SPA_OLD_MAXBLOCKSIZE)
399		drro->drr_blksz = SPA_OLD_MAXBLOCKSIZE;
400
401	if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0)
402		return (SET_ERROR(EINTR));
403
404	if (dump_bytes(dsp, DN_BONUS(dnp), P2ROUNDUP(dnp->dn_bonuslen, 8)) != 0)
405		return (SET_ERROR(EINTR));
406
407	/* Free anything past the end of the file. */
408	if (dump_free(dsp, object, (dnp->dn_maxblkid + 1) *
409	    (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT), -1ULL) != 0)
410		return (SET_ERROR(EINTR));
411	if (dsp->dsa_err != 0)
412		return (SET_ERROR(EINTR));
413	return (0);
414}
415
416static boolean_t
417backup_do_embed(dmu_sendarg_t *dsp, const blkptr_t *bp)
418{
419	if (!BP_IS_EMBEDDED(bp))
420		return (B_FALSE);
421
422	/*
423	 * Compression function must be legacy, or explicitly enabled.
424	 */
425	if ((BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_LEGACY_FUNCTIONS &&
426	    !(dsp->dsa_featureflags & DMU_BACKUP_FEATURE_EMBED_DATA_LZ4)))
427		return (B_FALSE);
428
429	/*
430	 * Embed type must be explicitly enabled.
431	 */
432	switch (BPE_GET_ETYPE(bp)) {
433	case BP_EMBEDDED_TYPE_DATA:
434		if (dsp->dsa_featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)
435			return (B_TRUE);
436		break;
437	default:
438		return (B_FALSE);
439	}
440	return (B_FALSE);
441}
442
443#define	BP_SPAN(dnp, level) \
444	(((uint64_t)dnp->dn_datablkszsec) << (SPA_MINBLOCKSHIFT + \
445	(level) * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT)))
446
447/* ARGSUSED */
448static int
449backup_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
450    const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
451{
452	dmu_sendarg_t *dsp = arg;
453	dmu_object_type_t type = bp ? BP_GET_TYPE(bp) : DMU_OT_NONE;
454	int err = 0;
455
456	if (issig(JUSTLOOKING) && issig(FORREAL))
457		return (SET_ERROR(EINTR));
458
459	if (zb->zb_object != DMU_META_DNODE_OBJECT &&
460	    DMU_OBJECT_IS_SPECIAL(zb->zb_object)) {
461		return (0);
462	} else if (zb->zb_level == ZB_ZIL_LEVEL) {
463		/*
464		 * If we are sending a non-snapshot (which is allowed on
465		 * read-only pools), it may have a ZIL, which must be ignored.
466		 */
467		return (0);
468	} else if (BP_IS_HOLE(bp) &&
469	    zb->zb_object == DMU_META_DNODE_OBJECT) {
470		uint64_t span = BP_SPAN(dnp, zb->zb_level);
471		uint64_t dnobj = (zb->zb_blkid * span) >> DNODE_SHIFT;
472		err = dump_freeobjects(dsp, dnobj, span >> DNODE_SHIFT);
473	} else if (BP_IS_HOLE(bp)) {
474		uint64_t span = BP_SPAN(dnp, zb->zb_level);
475		err = dump_free(dsp, zb->zb_object, zb->zb_blkid * span, span);
476	} else if (zb->zb_level > 0 || type == DMU_OT_OBJSET) {
477		return (0);
478	} else if (type == DMU_OT_DNODE) {
479		dnode_phys_t *blk;
480		int i;
481		int blksz = BP_GET_LSIZE(bp);
482		arc_flags_t aflags = ARC_FLAG_WAIT;
483		arc_buf_t *abuf;
484
485		if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
486		    ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
487		    &aflags, zb) != 0)
488			return (SET_ERROR(EIO));
489
490		blk = abuf->b_data;
491		for (i = 0; i < blksz >> DNODE_SHIFT; i++) {
492			uint64_t dnobj = (zb->zb_blkid <<
493			    (DNODE_BLOCK_SHIFT - DNODE_SHIFT)) + i;
494			err = dump_dnode(dsp, dnobj, blk+i);
495			if (err != 0)
496				break;
497		}
498		(void) arc_buf_remove_ref(abuf, &abuf);
499	} else if (type == DMU_OT_SA) {
500		arc_flags_t aflags = ARC_FLAG_WAIT;
501		arc_buf_t *abuf;
502		int blksz = BP_GET_LSIZE(bp);
503
504		if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
505		    ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
506		    &aflags, zb) != 0)
507			return (SET_ERROR(EIO));
508
509		err = dump_spill(dsp, zb->zb_object, blksz, abuf->b_data);
510		(void) arc_buf_remove_ref(abuf, &abuf);
511	} else if (backup_do_embed(dsp, bp)) {
512		/* it's an embedded level-0 block of a regular object */
513		int blksz = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT;
514		err = dump_write_embedded(dsp, zb->zb_object,
515		    zb->zb_blkid * blksz, blksz, bp);
516	} else { /* it's a level-0 block of a regular object */
517		arc_flags_t aflags = ARC_FLAG_WAIT;
518		arc_buf_t *abuf;
519		int blksz = BP_GET_LSIZE(bp);
520		uint64_t offset;
521
522		ASSERT3U(blksz, ==, dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
523		ASSERT0(zb->zb_level);
524		if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
525		    ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
526		    &aflags, zb) != 0) {
527			if (zfs_send_corrupt_data) {
528				/* Send a block filled with 0x"zfs badd bloc" */
529				abuf = arc_buf_alloc(spa, blksz, &abuf,
530				    ARC_BUFC_DATA);
531				uint64_t *ptr;
532				for (ptr = abuf->b_data;
533				    (char *)ptr < (char *)abuf->b_data + blksz;
534				    ptr++)
535					*ptr = 0x2f5baddb10c;
536			} else {
537				return (SET_ERROR(EIO));
538			}
539		}
540
541		offset = zb->zb_blkid * blksz;
542
543		if (!(dsp->dsa_featureflags &
544		    DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
545		    blksz > SPA_OLD_MAXBLOCKSIZE) {
546			char *buf = abuf->b_data;
547			while (blksz > 0 && err == 0) {
548				int n = MIN(blksz, SPA_OLD_MAXBLOCKSIZE);
549				err = dump_write(dsp, type, zb->zb_object,
550				    offset, n, NULL, buf);
551				offset += n;
552				buf += n;
553				blksz -= n;
554			}
555		} else {
556			err = dump_write(dsp, type, zb->zb_object,
557			    offset, blksz, bp, abuf->b_data);
558		}
559		(void) arc_buf_remove_ref(abuf, &abuf);
560	}
561
562	ASSERT(err == 0 || err == EINTR);
563	return (err);
564}
565
566/*
567 * Releases dp using the specified tag.
568 */
569static int
570dmu_send_impl(void *tag, dsl_pool_t *dp, dsl_dataset_t *ds,
571    zfs_bookmark_phys_t *fromzb, boolean_t is_clone, boolean_t embedok,
572#ifdef illumos
573    boolean_t large_block_ok, int outfd, vnode_t *vp, offset_t *off)
574#else
575    boolean_t large_block_ok, int outfd, struct file *fp, offset_t *off)
576#endif
577{
578	objset_t *os;
579	dmu_replay_record_t *drr;
580	dmu_sendarg_t *dsp;
581	int err;
582	uint64_t fromtxg = 0;
583	uint64_t featureflags = 0;
584
585	err = dmu_objset_from_ds(ds, &os);
586	if (err != 0) {
587		dsl_pool_rele(dp, tag);
588		return (err);
589	}
590
591	drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP);
592	drr->drr_type = DRR_BEGIN;
593	drr->drr_u.drr_begin.drr_magic = DMU_BACKUP_MAGIC;
594	DMU_SET_STREAM_HDRTYPE(drr->drr_u.drr_begin.drr_versioninfo,
595	    DMU_SUBSTREAM);
596
597#ifdef _KERNEL
598	if (dmu_objset_type(os) == DMU_OST_ZFS) {
599		uint64_t version;
600		if (zfs_get_zplprop(os, ZFS_PROP_VERSION, &version) != 0) {
601			kmem_free(drr, sizeof (dmu_replay_record_t));
602			dsl_pool_rele(dp, tag);
603			return (SET_ERROR(EINVAL));
604		}
605		if (version >= ZPL_VERSION_SA) {
606			featureflags |= DMU_BACKUP_FEATURE_SA_SPILL;
607		}
608	}
609#endif
610
611	if (large_block_ok && ds->ds_large_blocks)
612		featureflags |= DMU_BACKUP_FEATURE_LARGE_BLOCKS;
613	if (embedok &&
614	    spa_feature_is_active(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA)) {
615		featureflags |= DMU_BACKUP_FEATURE_EMBED_DATA;
616		if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS))
617			featureflags |= DMU_BACKUP_FEATURE_EMBED_DATA_LZ4;
618	} else {
619		embedok = B_FALSE;
620	}
621
622	DMU_SET_FEATUREFLAGS(drr->drr_u.drr_begin.drr_versioninfo,
623	    featureflags);
624
625	drr->drr_u.drr_begin.drr_creation_time =
626	    dsl_dataset_phys(ds)->ds_creation_time;
627	drr->drr_u.drr_begin.drr_type = dmu_objset_type(os);
628	if (is_clone)
629		drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CLONE;
630	drr->drr_u.drr_begin.drr_toguid = dsl_dataset_phys(ds)->ds_guid;
631	if (dsl_dataset_phys(ds)->ds_flags & DS_FLAG_CI_DATASET)
632		drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CI_DATA;
633
634	if (fromzb != NULL) {
635		drr->drr_u.drr_begin.drr_fromguid = fromzb->zbm_guid;
636		fromtxg = fromzb->zbm_creation_txg;
637	}
638	dsl_dataset_name(ds, drr->drr_u.drr_begin.drr_toname);
639	if (!dsl_dataset_is_snapshot(ds)) {
640		(void) strlcat(drr->drr_u.drr_begin.drr_toname, "@--head--",
641		    sizeof (drr->drr_u.drr_begin.drr_toname));
642	}
643
644	dsp = kmem_zalloc(sizeof (dmu_sendarg_t), KM_SLEEP);
645
646	dsp->dsa_drr = drr;
647	dsp->dsa_outfd = outfd;
648	dsp->dsa_proc = curproc;
649	dsp->dsa_td = curthread;
650	dsp->dsa_fp = fp;
651	dsp->dsa_os = os;
652	dsp->dsa_off = off;
653	dsp->dsa_toguid = dsl_dataset_phys(ds)->ds_guid;
654	ZIO_SET_CHECKSUM(&dsp->dsa_zc, 0, 0, 0, 0);
655	dsp->dsa_pending_op = PENDING_NONE;
656	dsp->dsa_incremental = (fromzb != NULL);
657	dsp->dsa_featureflags = featureflags;
658
659	mutex_enter(&ds->ds_sendstream_lock);
660	list_insert_head(&ds->ds_sendstreams, dsp);
661	mutex_exit(&ds->ds_sendstream_lock);
662
663	dsl_dataset_long_hold(ds, FTAG);
664	dsl_pool_rele(dp, tag);
665
666	if (dump_bytes(dsp, drr, sizeof (dmu_replay_record_t)) != 0) {
667		err = dsp->dsa_err;
668		goto out;
669	}
670
671	err = traverse_dataset(ds, fromtxg, TRAVERSE_PRE | TRAVERSE_PREFETCH,
672	    backup_cb, dsp);
673
674	if (dsp->dsa_pending_op != PENDING_NONE)
675		if (dump_bytes(dsp, drr, sizeof (dmu_replay_record_t)) != 0)
676			err = SET_ERROR(EINTR);
677
678	if (err != 0) {
679		if (err == EINTR && dsp->dsa_err != 0)
680			err = dsp->dsa_err;
681		goto out;
682	}
683
684	bzero(drr, sizeof (dmu_replay_record_t));
685	drr->drr_type = DRR_END;
686	drr->drr_u.drr_end.drr_checksum = dsp->dsa_zc;
687	drr->drr_u.drr_end.drr_toguid = dsp->dsa_toguid;
688
689	if (dump_bytes(dsp, drr, sizeof (dmu_replay_record_t)) != 0) {
690		err = dsp->dsa_err;
691		goto out;
692	}
693
694out:
695	mutex_enter(&ds->ds_sendstream_lock);
696	list_remove(&ds->ds_sendstreams, dsp);
697	mutex_exit(&ds->ds_sendstream_lock);
698
699	kmem_free(drr, sizeof (dmu_replay_record_t));
700	kmem_free(dsp, sizeof (dmu_sendarg_t));
701
702	dsl_dataset_long_rele(ds, FTAG);
703
704	return (err);
705}
706
707int
708dmu_send_obj(const char *pool, uint64_t tosnap, uint64_t fromsnap,
709    boolean_t embedok, boolean_t large_block_ok,
710#ifdef illumos
711    int outfd, vnode_t *vp, offset_t *off)
712#else
713    int outfd, struct file *fp, offset_t *off)
714#endif
715{
716	dsl_pool_t *dp;
717	dsl_dataset_t *ds;
718	dsl_dataset_t *fromds = NULL;
719	int err;
720
721	err = dsl_pool_hold(pool, FTAG, &dp);
722	if (err != 0)
723		return (err);
724
725	err = dsl_dataset_hold_obj(dp, tosnap, FTAG, &ds);
726	if (err != 0) {
727		dsl_pool_rele(dp, FTAG);
728		return (err);
729	}
730
731	if (fromsnap != 0) {
732		zfs_bookmark_phys_t zb;
733		boolean_t is_clone;
734
735		err = dsl_dataset_hold_obj(dp, fromsnap, FTAG, &fromds);
736		if (err != 0) {
737			dsl_dataset_rele(ds, FTAG);
738			dsl_pool_rele(dp, FTAG);
739			return (err);
740		}
741		if (!dsl_dataset_is_before(ds, fromds, 0))
742			err = SET_ERROR(EXDEV);
743		zb.zbm_creation_time =
744		    dsl_dataset_phys(fromds)->ds_creation_time;
745		zb.zbm_creation_txg = dsl_dataset_phys(fromds)->ds_creation_txg;
746		zb.zbm_guid = dsl_dataset_phys(fromds)->ds_guid;
747		is_clone = (fromds->ds_dir != ds->ds_dir);
748		dsl_dataset_rele(fromds, FTAG);
749		err = dmu_send_impl(FTAG, dp, ds, &zb, is_clone,
750		    embedok, large_block_ok, outfd, fp, off);
751	} else {
752		err = dmu_send_impl(FTAG, dp, ds, NULL, B_FALSE,
753		    embedok, large_block_ok, outfd, fp, off);
754	}
755	dsl_dataset_rele(ds, FTAG);
756	return (err);
757}
758
759int
760dmu_send(const char *tosnap, const char *fromsnap,
761    boolean_t embedok, boolean_t large_block_ok,
762#ifdef illumos
763    int outfd, vnode_t *vp, offset_t *off)
764#else
765    int outfd, struct file *fp, offset_t *off)
766#endif
767{
768	dsl_pool_t *dp;
769	dsl_dataset_t *ds;
770	int err;
771	boolean_t owned = B_FALSE;
772
773	if (fromsnap != NULL && strpbrk(fromsnap, "@#") == NULL)
774		return (SET_ERROR(EINVAL));
775
776	err = dsl_pool_hold(tosnap, FTAG, &dp);
777	if (err != 0)
778		return (err);
779
780	if (strchr(tosnap, '@') == NULL && spa_writeable(dp->dp_spa)) {
781		/*
782		 * We are sending a filesystem or volume.  Ensure
783		 * that it doesn't change by owning the dataset.
784		 */
785		err = dsl_dataset_own(dp, tosnap, FTAG, &ds);
786		owned = B_TRUE;
787	} else {
788		err = dsl_dataset_hold(dp, tosnap, FTAG, &ds);
789	}
790	if (err != 0) {
791		dsl_pool_rele(dp, FTAG);
792		return (err);
793	}
794
795	if (fromsnap != NULL) {
796		zfs_bookmark_phys_t zb;
797		boolean_t is_clone = B_FALSE;
798		int fsnamelen = strchr(tosnap, '@') - tosnap;
799
800		/*
801		 * If the fromsnap is in a different filesystem, then
802		 * mark the send stream as a clone.
803		 */
804		if (strncmp(tosnap, fromsnap, fsnamelen) != 0 ||
805		    (fromsnap[fsnamelen] != '@' &&
806		    fromsnap[fsnamelen] != '#')) {
807			is_clone = B_TRUE;
808		}
809
810		if (strchr(fromsnap, '@')) {
811			dsl_dataset_t *fromds;
812			err = dsl_dataset_hold(dp, fromsnap, FTAG, &fromds);
813			if (err == 0) {
814				if (!dsl_dataset_is_before(ds, fromds, 0))
815					err = SET_ERROR(EXDEV);
816				zb.zbm_creation_time =
817				    dsl_dataset_phys(fromds)->ds_creation_time;
818				zb.zbm_creation_txg =
819				    dsl_dataset_phys(fromds)->ds_creation_txg;
820				zb.zbm_guid = dsl_dataset_phys(fromds)->ds_guid;
821				is_clone = (ds->ds_dir != fromds->ds_dir);
822				dsl_dataset_rele(fromds, FTAG);
823			}
824		} else {
825			err = dsl_bookmark_lookup(dp, fromsnap, ds, &zb);
826		}
827		if (err != 0) {
828			dsl_dataset_rele(ds, FTAG);
829			dsl_pool_rele(dp, FTAG);
830			return (err);
831		}
832		err = dmu_send_impl(FTAG, dp, ds, &zb, is_clone,
833		    embedok, large_block_ok, outfd, fp, off);
834	} else {
835		err = dmu_send_impl(FTAG, dp, ds, NULL, B_FALSE,
836		    embedok, large_block_ok, outfd, fp, off);
837	}
838	if (owned)
839		dsl_dataset_disown(ds, FTAG);
840	else
841		dsl_dataset_rele(ds, FTAG);
842	return (err);
843}
844
845int
846dmu_send_estimate(dsl_dataset_t *ds, dsl_dataset_t *fromds, uint64_t *sizep)
847{
848	dsl_pool_t *dp = ds->ds_dir->dd_pool;
849	int err;
850	uint64_t size;
851
852	ASSERT(dsl_pool_config_held(dp));
853
854	/* tosnap must be a snapshot */
855	if (!dsl_dataset_is_snapshot(ds))
856		return (SET_ERROR(EINVAL));
857
858	/* fromsnap, if provided, must be a snapshot */
859	if (fromds != NULL && !dsl_dataset_is_snapshot(fromds))
860		return (SET_ERROR(EINVAL));
861
862	/*
863	 * fromsnap must be an earlier snapshot from the same fs as tosnap,
864	 * or the origin's fs.
865	 */
866	if (fromds != NULL && !dsl_dataset_is_before(ds, fromds, 0))
867		return (SET_ERROR(EXDEV));
868
869	/* Get uncompressed size estimate of changed data. */
870	if (fromds == NULL) {
871		size = dsl_dataset_phys(ds)->ds_uncompressed_bytes;
872	} else {
873		uint64_t used, comp;
874		err = dsl_dataset_space_written(fromds, ds,
875		    &used, &comp, &size);
876		if (err != 0)
877			return (err);
878	}
879
880	/*
881	 * Assume that space (both on-disk and in-stream) is dominated by
882	 * data.  We will adjust for indirect blocks and the copies property,
883	 * but ignore per-object space used (eg, dnodes and DRR_OBJECT records).
884	 */
885
886	/*
887	 * Subtract out approximate space used by indirect blocks.
888	 * Assume most space is used by data blocks (non-indirect, non-dnode).
889	 * Assume all blocks are recordsize.  Assume ditto blocks and
890	 * internal fragmentation counter out compression.
891	 *
892	 * Therefore, space used by indirect blocks is sizeof(blkptr_t) per
893	 * block, which we observe in practice.
894	 */
895	uint64_t recordsize;
896	err = dsl_prop_get_int_ds(ds, "recordsize", &recordsize);
897	if (err != 0)
898		return (err);
899	size -= size / recordsize * sizeof (blkptr_t);
900
901	/* Add in the space for the record associated with each block. */
902	size += size / recordsize * sizeof (dmu_replay_record_t);
903
904	*sizep = size;
905
906	return (0);
907}
908
909typedef struct dmu_recv_begin_arg {
910	const char *drba_origin;
911	dmu_recv_cookie_t *drba_cookie;
912	cred_t *drba_cred;
913	uint64_t drba_snapobj;
914} dmu_recv_begin_arg_t;
915
916static int
917recv_begin_check_existing_impl(dmu_recv_begin_arg_t *drba, dsl_dataset_t *ds,
918    uint64_t fromguid)
919{
920	uint64_t val;
921	int error;
922	dsl_pool_t *dp = ds->ds_dir->dd_pool;
923
924	/* temporary clone name must not exist */
925	error = zap_lookup(dp->dp_meta_objset,
926	    dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, recv_clone_name,
927	    8, 1, &val);
928	if (error != ENOENT)
929		return (error == 0 ? EBUSY : error);
930
931	/* new snapshot name must not exist */
932	error = zap_lookup(dp->dp_meta_objset,
933	    dsl_dataset_phys(ds)->ds_snapnames_zapobj,
934	    drba->drba_cookie->drc_tosnap, 8, 1, &val);
935	if (error != ENOENT)
936		return (error == 0 ? EEXIST : error);
937
938	/*
939	 * Check snapshot limit before receiving. We'll recheck again at the
940	 * end, but might as well abort before receiving if we're already over
941	 * the limit.
942	 *
943	 * Note that we do not check the file system limit with
944	 * dsl_dir_fscount_check because the temporary %clones don't count
945	 * against that limit.
946	 */
947	error = dsl_fs_ss_limit_check(ds->ds_dir, 1, ZFS_PROP_SNAPSHOT_LIMIT,
948	    NULL, drba->drba_cred);
949	if (error != 0)
950		return (error);
951
952	if (fromguid != 0) {
953		dsl_dataset_t *snap;
954		uint64_t obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
955
956		/* Find snapshot in this dir that matches fromguid. */
957		while (obj != 0) {
958			error = dsl_dataset_hold_obj(dp, obj, FTAG,
959			    &snap);
960			if (error != 0)
961				return (SET_ERROR(ENODEV));
962			if (snap->ds_dir != ds->ds_dir) {
963				dsl_dataset_rele(snap, FTAG);
964				return (SET_ERROR(ENODEV));
965			}
966			if (dsl_dataset_phys(snap)->ds_guid == fromguid)
967				break;
968			obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
969			dsl_dataset_rele(snap, FTAG);
970		}
971		if (obj == 0)
972			return (SET_ERROR(ENODEV));
973
974		if (drba->drba_cookie->drc_force) {
975			drba->drba_snapobj = obj;
976		} else {
977			/*
978			 * If we are not forcing, there must be no
979			 * changes since fromsnap.
980			 */
981			if (dsl_dataset_modified_since_snap(ds, snap)) {
982				dsl_dataset_rele(snap, FTAG);
983				return (SET_ERROR(ETXTBSY));
984			}
985			drba->drba_snapobj = ds->ds_prev->ds_object;
986		}
987
988		dsl_dataset_rele(snap, FTAG);
989	} else {
990		/* if full, then must be forced */
991		if (!drba->drba_cookie->drc_force)
992			return (SET_ERROR(EEXIST));
993		/* start from $ORIGIN@$ORIGIN, if supported */
994		drba->drba_snapobj = dp->dp_origin_snap != NULL ?
995		    dp->dp_origin_snap->ds_object : 0;
996	}
997
998	return (0);
999
1000}
1001
1002static int
1003dmu_recv_begin_check(void *arg, dmu_tx_t *tx)
1004{
1005	dmu_recv_begin_arg_t *drba = arg;
1006	dsl_pool_t *dp = dmu_tx_pool(tx);
1007	struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
1008	uint64_t fromguid = drrb->drr_fromguid;
1009	int flags = drrb->drr_flags;
1010	int error;
1011	uint64_t featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
1012	dsl_dataset_t *ds;
1013	const char *tofs = drba->drba_cookie->drc_tofs;
1014
1015	/* already checked */
1016	ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC);
1017
1018	if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
1019	    DMU_COMPOUNDSTREAM ||
1020	    drrb->drr_type >= DMU_OST_NUMTYPES ||
1021	    ((flags & DRR_FLAG_CLONE) && drba->drba_origin == NULL))
1022		return (SET_ERROR(EINVAL));
1023
1024	/* Verify pool version supports SA if SA_SPILL feature set */
1025	if ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) &&
1026	    spa_version(dp->dp_spa) < SPA_VERSION_SA)
1027		return (SET_ERROR(ENOTSUP));
1028
1029	/*
1030	 * The receiving code doesn't know how to translate a WRITE_EMBEDDED
1031	 * record to a plan WRITE record, so the pool must have the
1032	 * EMBEDDED_DATA feature enabled if the stream has WRITE_EMBEDDED
1033	 * records.  Same with WRITE_EMBEDDED records that use LZ4 compression.
1034	 */
1035	if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) &&
1036	    !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA))
1037		return (SET_ERROR(ENOTSUP));
1038	if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA_LZ4) &&
1039	    !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS))
1040		return (SET_ERROR(ENOTSUP));
1041
1042	/*
1043	 * The receiving code doesn't know how to translate large blocks
1044	 * to smaller ones, so the pool must have the LARGE_BLOCKS
1045	 * feature enabled if the stream has LARGE_BLOCKS.
1046	 */
1047	if ((featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
1048	    !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LARGE_BLOCKS))
1049		return (SET_ERROR(ENOTSUP));
1050
1051	error = dsl_dataset_hold(dp, tofs, FTAG, &ds);
1052	if (error == 0) {
1053		/* target fs already exists; recv into temp clone */
1054
1055		/* Can't recv a clone into an existing fs */
1056		if (flags & DRR_FLAG_CLONE) {
1057			dsl_dataset_rele(ds, FTAG);
1058			return (SET_ERROR(EINVAL));
1059		}
1060
1061		error = recv_begin_check_existing_impl(drba, ds, fromguid);
1062		dsl_dataset_rele(ds, FTAG);
1063	} else if (error == ENOENT) {
1064		/* target fs does not exist; must be a full backup or clone */
1065		char buf[MAXNAMELEN];
1066
1067		/*
1068		 * If it's a non-clone incremental, we are missing the
1069		 * target fs, so fail the recv.
1070		 */
1071		if (fromguid != 0 && !(flags & DRR_FLAG_CLONE))
1072			return (SET_ERROR(ENOENT));
1073
1074		/* Open the parent of tofs */
1075		ASSERT3U(strlen(tofs), <, MAXNAMELEN);
1076		(void) strlcpy(buf, tofs, strrchr(tofs, '/') - tofs + 1);
1077		error = dsl_dataset_hold(dp, buf, FTAG, &ds);
1078		if (error != 0)
1079			return (error);
1080
1081		/*
1082		 * Check filesystem and snapshot limits before receiving. We'll
1083		 * recheck snapshot limits again at the end (we create the
1084		 * filesystems and increment those counts during begin_sync).
1085		 */
1086		error = dsl_fs_ss_limit_check(ds->ds_dir, 1,
1087		    ZFS_PROP_FILESYSTEM_LIMIT, NULL, drba->drba_cred);
1088		if (error != 0) {
1089			dsl_dataset_rele(ds, FTAG);
1090			return (error);
1091		}
1092
1093		error = dsl_fs_ss_limit_check(ds->ds_dir, 1,
1094		    ZFS_PROP_SNAPSHOT_LIMIT, NULL, drba->drba_cred);
1095		if (error != 0) {
1096			dsl_dataset_rele(ds, FTAG);
1097			return (error);
1098		}
1099
1100		if (drba->drba_origin != NULL) {
1101			dsl_dataset_t *origin;
1102			error = dsl_dataset_hold(dp, drba->drba_origin,
1103			    FTAG, &origin);
1104			if (error != 0) {
1105				dsl_dataset_rele(ds, FTAG);
1106				return (error);
1107			}
1108			if (!dsl_dataset_is_snapshot(origin)) {
1109				dsl_dataset_rele(origin, FTAG);
1110				dsl_dataset_rele(ds, FTAG);
1111				return (SET_ERROR(EINVAL));
1112			}
1113			if (dsl_dataset_phys(origin)->ds_guid != fromguid) {
1114				dsl_dataset_rele(origin, FTAG);
1115				dsl_dataset_rele(ds, FTAG);
1116				return (SET_ERROR(ENODEV));
1117			}
1118			dsl_dataset_rele(origin, FTAG);
1119		}
1120		dsl_dataset_rele(ds, FTAG);
1121		error = 0;
1122	}
1123	return (error);
1124}
1125
1126static void
1127dmu_recv_begin_sync(void *arg, dmu_tx_t *tx)
1128{
1129	dmu_recv_begin_arg_t *drba = arg;
1130	dsl_pool_t *dp = dmu_tx_pool(tx);
1131	struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
1132	const char *tofs = drba->drba_cookie->drc_tofs;
1133	dsl_dataset_t *ds, *newds;
1134	uint64_t dsobj;
1135	int error;
1136	uint64_t crflags;
1137
1138	crflags = (drrb->drr_flags & DRR_FLAG_CI_DATA) ?
1139	    DS_FLAG_CI_DATASET : 0;
1140
1141	error = dsl_dataset_hold(dp, tofs, FTAG, &ds);
1142	if (error == 0) {
1143		/* create temporary clone */
1144		dsl_dataset_t *snap = NULL;
1145		if (drba->drba_snapobj != 0) {
1146			VERIFY0(dsl_dataset_hold_obj(dp,
1147			    drba->drba_snapobj, FTAG, &snap));
1148		}
1149		dsobj = dsl_dataset_create_sync(ds->ds_dir, recv_clone_name,
1150		    snap, crflags, drba->drba_cred, tx);
1151		if (drba->drba_snapobj != 0)
1152			dsl_dataset_rele(snap, FTAG);
1153		dsl_dataset_rele(ds, FTAG);
1154	} else {
1155		dsl_dir_t *dd;
1156		const char *tail;
1157		dsl_dataset_t *origin = NULL;
1158
1159		VERIFY0(dsl_dir_hold(dp, tofs, FTAG, &dd, &tail));
1160
1161		if (drba->drba_origin != NULL) {
1162			VERIFY0(dsl_dataset_hold(dp, drba->drba_origin,
1163			    FTAG, &origin));
1164		}
1165
1166		/* Create new dataset. */
1167		dsobj = dsl_dataset_create_sync(dd,
1168		    strrchr(tofs, '/') + 1,
1169		    origin, crflags, drba->drba_cred, tx);
1170		if (origin != NULL)
1171			dsl_dataset_rele(origin, FTAG);
1172		dsl_dir_rele(dd, FTAG);
1173		drba->drba_cookie->drc_newfs = B_TRUE;
1174	}
1175	VERIFY0(dsl_dataset_own_obj(dp, dsobj, dmu_recv_tag, &newds));
1176
1177	if ((DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
1178	    DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
1179	    !newds->ds_large_blocks) {
1180		dsl_dataset_activate_large_blocks_sync_impl(dsobj, tx);
1181		newds->ds_large_blocks = B_TRUE;
1182	}
1183
1184	dmu_buf_will_dirty(newds->ds_dbuf, tx);
1185	dsl_dataset_phys(newds)->ds_flags |= DS_FLAG_INCONSISTENT;
1186
1187	/*
1188	 * If we actually created a non-clone, we need to create the
1189	 * objset in our new dataset.
1190	 */
1191	if (BP_IS_HOLE(dsl_dataset_get_blkptr(newds))) {
1192		(void) dmu_objset_create_impl(dp->dp_spa,
1193		    newds, dsl_dataset_get_blkptr(newds), drrb->drr_type, tx);
1194	}
1195
1196	drba->drba_cookie->drc_ds = newds;
1197
1198	spa_history_log_internal_ds(newds, "receive", tx, "");
1199}
1200
1201/*
1202 * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin()
1203 * succeeds; otherwise we will leak the holds on the datasets.
1204 */
1205int
1206dmu_recv_begin(char *tofs, char *tosnap, struct drr_begin *drrb,
1207    boolean_t force, char *origin, dmu_recv_cookie_t *drc)
1208{
1209	dmu_recv_begin_arg_t drba = { 0 };
1210	dmu_replay_record_t *drr;
1211
1212	bzero(drc, sizeof (dmu_recv_cookie_t));
1213	drc->drc_drrb = drrb;
1214	drc->drc_tosnap = tosnap;
1215	drc->drc_tofs = tofs;
1216	drc->drc_force = force;
1217	drc->drc_cred = CRED();
1218
1219	if (drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC))
1220		drc->drc_byteswap = B_TRUE;
1221	else if (drrb->drr_magic != DMU_BACKUP_MAGIC)
1222		return (SET_ERROR(EINVAL));
1223
1224	drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP);
1225	drr->drr_type = DRR_BEGIN;
1226	drr->drr_u.drr_begin = *drc->drc_drrb;
1227	if (drc->drc_byteswap) {
1228		fletcher_4_incremental_byteswap(drr,
1229		    sizeof (dmu_replay_record_t), &drc->drc_cksum);
1230	} else {
1231		fletcher_4_incremental_native(drr,
1232		    sizeof (dmu_replay_record_t), &drc->drc_cksum);
1233	}
1234	kmem_free(drr, sizeof (dmu_replay_record_t));
1235
1236	if (drc->drc_byteswap) {
1237		drrb->drr_magic = BSWAP_64(drrb->drr_magic);
1238		drrb->drr_versioninfo = BSWAP_64(drrb->drr_versioninfo);
1239		drrb->drr_creation_time = BSWAP_64(drrb->drr_creation_time);
1240		drrb->drr_type = BSWAP_32(drrb->drr_type);
1241		drrb->drr_toguid = BSWAP_64(drrb->drr_toguid);
1242		drrb->drr_fromguid = BSWAP_64(drrb->drr_fromguid);
1243	}
1244
1245	drba.drba_origin = origin;
1246	drba.drba_cookie = drc;
1247	drba.drba_cred = CRED();
1248
1249	return (dsl_sync_task(tofs, dmu_recv_begin_check, dmu_recv_begin_sync,
1250	    &drba, 5, ZFS_SPACE_CHECK_NORMAL));
1251}
1252
1253struct restorearg {
1254	int err;
1255	boolean_t byteswap;
1256	kthread_t *td;
1257	struct file *fp;
1258	char *buf;
1259	uint64_t voff;
1260	int bufsize; /* amount of memory allocated for buf */
1261	zio_cksum_t cksum;
1262	avl_tree_t *guid_to_ds_map;
1263};
1264
1265typedef struct guid_map_entry {
1266	uint64_t	guid;
1267	dsl_dataset_t	*gme_ds;
1268	avl_node_t	avlnode;
1269} guid_map_entry_t;
1270
1271static int
1272guid_compare(const void *arg1, const void *arg2)
1273{
1274	const guid_map_entry_t *gmep1 = arg1;
1275	const guid_map_entry_t *gmep2 = arg2;
1276
1277	if (gmep1->guid < gmep2->guid)
1278		return (-1);
1279	else if (gmep1->guid > gmep2->guid)
1280		return (1);
1281	return (0);
1282}
1283
1284static void
1285free_guid_map_onexit(void *arg)
1286{
1287	avl_tree_t *ca = arg;
1288	void *cookie = NULL;
1289	guid_map_entry_t *gmep;
1290
1291	while ((gmep = avl_destroy_nodes(ca, &cookie)) != NULL) {
1292		dsl_dataset_long_rele(gmep->gme_ds, gmep);
1293		dsl_dataset_rele(gmep->gme_ds, gmep);
1294		kmem_free(gmep, sizeof (guid_map_entry_t));
1295	}
1296	avl_destroy(ca);
1297	kmem_free(ca, sizeof (avl_tree_t));
1298}
1299
1300static int
1301restore_bytes(struct restorearg *ra, void *buf, int len, off_t off, ssize_t *resid)
1302{
1303	struct uio auio;
1304	struct iovec aiov;
1305	int error;
1306
1307	aiov.iov_base = buf;
1308	aiov.iov_len = len;
1309	auio.uio_iov = &aiov;
1310	auio.uio_iovcnt = 1;
1311	auio.uio_resid = len;
1312	auio.uio_segflg = UIO_SYSSPACE;
1313	auio.uio_rw = UIO_READ;
1314	auio.uio_offset = off;
1315	auio.uio_td = ra->td;
1316#ifdef _KERNEL
1317	error = fo_read(ra->fp, &auio, ra->td->td_ucred, FOF_OFFSET, ra->td);
1318#else
1319	fprintf(stderr, "%s: returning EOPNOTSUPP\n", __func__);
1320	error = EOPNOTSUPP;
1321#endif
1322	*resid = auio.uio_resid;
1323	return (error);
1324}
1325
1326static void *
1327restore_read(struct restorearg *ra, int len, char *buf)
1328{
1329	int done = 0;
1330
1331	if (buf == NULL)
1332		buf = ra->buf;
1333
1334	/* some things will require 8-byte alignment, so everything must */
1335	ASSERT0(len % 8);
1336	ASSERT3U(len, <=, ra->bufsize);
1337
1338	while (done < len) {
1339		ssize_t resid;
1340
1341		ra->err = restore_bytes(ra, buf + done,
1342		    len - done, ra->voff, &resid);
1343
1344		if (resid == len - done)
1345			ra->err = SET_ERROR(EINVAL);
1346		ra->voff += len - done - resid;
1347		done = len - resid;
1348		if (ra->err != 0)
1349			return (NULL);
1350	}
1351
1352	ASSERT3U(done, ==, len);
1353	if (ra->byteswap)
1354		fletcher_4_incremental_byteswap(buf, len, &ra->cksum);
1355	else
1356		fletcher_4_incremental_native(buf, len, &ra->cksum);
1357	return (buf);
1358}
1359
1360static void
1361backup_byteswap(dmu_replay_record_t *drr)
1362{
1363#define	DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X))
1364#define	DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X))
1365	drr->drr_type = BSWAP_32(drr->drr_type);
1366	drr->drr_payloadlen = BSWAP_32(drr->drr_payloadlen);
1367	switch (drr->drr_type) {
1368	case DRR_BEGIN:
1369		DO64(drr_begin.drr_magic);
1370		DO64(drr_begin.drr_versioninfo);
1371		DO64(drr_begin.drr_creation_time);
1372		DO32(drr_begin.drr_type);
1373		DO32(drr_begin.drr_flags);
1374		DO64(drr_begin.drr_toguid);
1375		DO64(drr_begin.drr_fromguid);
1376		break;
1377	case DRR_OBJECT:
1378		DO64(drr_object.drr_object);
1379		DO32(drr_object.drr_type);
1380		DO32(drr_object.drr_bonustype);
1381		DO32(drr_object.drr_blksz);
1382		DO32(drr_object.drr_bonuslen);
1383		DO64(drr_object.drr_toguid);
1384		break;
1385	case DRR_FREEOBJECTS:
1386		DO64(drr_freeobjects.drr_firstobj);
1387		DO64(drr_freeobjects.drr_numobjs);
1388		DO64(drr_freeobjects.drr_toguid);
1389		break;
1390	case DRR_WRITE:
1391		DO64(drr_write.drr_object);
1392		DO32(drr_write.drr_type);
1393		DO64(drr_write.drr_offset);
1394		DO64(drr_write.drr_length);
1395		DO64(drr_write.drr_toguid);
1396		DO64(drr_write.drr_key.ddk_cksum.zc_word[0]);
1397		DO64(drr_write.drr_key.ddk_cksum.zc_word[1]);
1398		DO64(drr_write.drr_key.ddk_cksum.zc_word[2]);
1399		DO64(drr_write.drr_key.ddk_cksum.zc_word[3]);
1400		DO64(drr_write.drr_key.ddk_prop);
1401		break;
1402	case DRR_WRITE_BYREF:
1403		DO64(drr_write_byref.drr_object);
1404		DO64(drr_write_byref.drr_offset);
1405		DO64(drr_write_byref.drr_length);
1406		DO64(drr_write_byref.drr_toguid);
1407		DO64(drr_write_byref.drr_refguid);
1408		DO64(drr_write_byref.drr_refobject);
1409		DO64(drr_write_byref.drr_refoffset);
1410		DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[0]);
1411		DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[1]);
1412		DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[2]);
1413		DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[3]);
1414		DO64(drr_write_byref.drr_key.ddk_prop);
1415		break;
1416	case DRR_WRITE_EMBEDDED:
1417		DO64(drr_write_embedded.drr_object);
1418		DO64(drr_write_embedded.drr_offset);
1419		DO64(drr_write_embedded.drr_length);
1420		DO64(drr_write_embedded.drr_toguid);
1421		DO32(drr_write_embedded.drr_lsize);
1422		DO32(drr_write_embedded.drr_psize);
1423		break;
1424	case DRR_FREE:
1425		DO64(drr_free.drr_object);
1426		DO64(drr_free.drr_offset);
1427		DO64(drr_free.drr_length);
1428		DO64(drr_free.drr_toguid);
1429		break;
1430	case DRR_SPILL:
1431		DO64(drr_spill.drr_object);
1432		DO64(drr_spill.drr_length);
1433		DO64(drr_spill.drr_toguid);
1434		break;
1435	case DRR_END:
1436		DO64(drr_end.drr_checksum.zc_word[0]);
1437		DO64(drr_end.drr_checksum.zc_word[1]);
1438		DO64(drr_end.drr_checksum.zc_word[2]);
1439		DO64(drr_end.drr_checksum.zc_word[3]);
1440		DO64(drr_end.drr_toguid);
1441		break;
1442	}
1443#undef DO64
1444#undef DO32
1445}
1446
1447static inline uint8_t
1448deduce_nblkptr(dmu_object_type_t bonus_type, uint64_t bonus_size)
1449{
1450	if (bonus_type == DMU_OT_SA) {
1451		return (1);
1452	} else {
1453		return (1 +
1454		    ((DN_MAX_BONUSLEN - bonus_size) >> SPA_BLKPTRSHIFT));
1455	}
1456}
1457
1458static int
1459restore_object(struct restorearg *ra, objset_t *os, struct drr_object *drro)
1460{
1461	dmu_object_info_t doi;
1462	dmu_tx_t *tx;
1463	void *data = NULL;
1464	uint64_t object;
1465	int err;
1466
1467	if (drro->drr_type == DMU_OT_NONE ||
1468	    !DMU_OT_IS_VALID(drro->drr_type) ||
1469	    !DMU_OT_IS_VALID(drro->drr_bonustype) ||
1470	    drro->drr_checksumtype >= ZIO_CHECKSUM_FUNCTIONS ||
1471	    drro->drr_compress >= ZIO_COMPRESS_FUNCTIONS ||
1472	    P2PHASE(drro->drr_blksz, SPA_MINBLOCKSIZE) ||
1473	    drro->drr_blksz < SPA_MINBLOCKSIZE ||
1474	    drro->drr_blksz > spa_maxblocksize(dmu_objset_spa(os)) ||
1475	    drro->drr_bonuslen > DN_MAX_BONUSLEN) {
1476		return (SET_ERROR(EINVAL));
1477	}
1478
1479	err = dmu_object_info(os, drro->drr_object, &doi);
1480
1481	if (err != 0 && err != ENOENT)
1482		return (SET_ERROR(EINVAL));
1483	object = err == 0 ? drro->drr_object : DMU_NEW_OBJECT;
1484
1485	if (drro->drr_bonuslen) {
1486		data = restore_read(ra, P2ROUNDUP(drro->drr_bonuslen, 8), NULL);
1487		if (ra->err != 0)
1488			return (ra->err);
1489	}
1490
1491	/*
1492	 * If we are losing blkptrs or changing the block size this must
1493	 * be a new file instance.  We must clear out the previous file
1494	 * contents before we can change this type of metadata in the dnode.
1495	 */
1496	if (err == 0) {
1497		int nblkptr;
1498
1499		nblkptr = deduce_nblkptr(drro->drr_bonustype,
1500		    drro->drr_bonuslen);
1501
1502		if (drro->drr_blksz != doi.doi_data_block_size ||
1503		    nblkptr < doi.doi_nblkptr) {
1504			err = dmu_free_long_range(os, drro->drr_object,
1505			    0, DMU_OBJECT_END);
1506			if (err != 0)
1507				return (SET_ERROR(EINVAL));
1508		}
1509	}
1510
1511	tx = dmu_tx_create(os);
1512	dmu_tx_hold_bonus(tx, object);
1513	err = dmu_tx_assign(tx, TXG_WAIT);
1514	if (err != 0) {
1515		dmu_tx_abort(tx);
1516		return (err);
1517	}
1518
1519	if (object == DMU_NEW_OBJECT) {
1520		/* currently free, want to be allocated */
1521		err = dmu_object_claim(os, drro->drr_object,
1522		    drro->drr_type, drro->drr_blksz,
1523		    drro->drr_bonustype, drro->drr_bonuslen, tx);
1524	} else if (drro->drr_type != doi.doi_type ||
1525	    drro->drr_blksz != doi.doi_data_block_size ||
1526	    drro->drr_bonustype != doi.doi_bonus_type ||
1527	    drro->drr_bonuslen != doi.doi_bonus_size) {
1528		/* currently allocated, but with different properties */
1529		err = dmu_object_reclaim(os, drro->drr_object,
1530		    drro->drr_type, drro->drr_blksz,
1531		    drro->drr_bonustype, drro->drr_bonuslen, tx);
1532	}
1533	if (err != 0) {
1534		dmu_tx_commit(tx);
1535		return (SET_ERROR(EINVAL));
1536	}
1537
1538	dmu_object_set_checksum(os, drro->drr_object, drro->drr_checksumtype,
1539	    tx);
1540	dmu_object_set_compress(os, drro->drr_object, drro->drr_compress, tx);
1541
1542	if (data != NULL) {
1543		dmu_buf_t *db;
1544
1545		VERIFY(0 == dmu_bonus_hold(os, drro->drr_object, FTAG, &db));
1546		dmu_buf_will_dirty(db, tx);
1547
1548		ASSERT3U(db->db_size, >=, drro->drr_bonuslen);
1549		bcopy(data, db->db_data, drro->drr_bonuslen);
1550		if (ra->byteswap) {
1551			dmu_object_byteswap_t byteswap =
1552			    DMU_OT_BYTESWAP(drro->drr_bonustype);
1553			dmu_ot_byteswap[byteswap].ob_func(db->db_data,
1554			    drro->drr_bonuslen);
1555		}
1556		dmu_buf_rele(db, FTAG);
1557	}
1558	dmu_tx_commit(tx);
1559	return (0);
1560}
1561
1562/* ARGSUSED */
1563static int
1564restore_freeobjects(struct restorearg *ra, objset_t *os,
1565    struct drr_freeobjects *drrfo)
1566{
1567	uint64_t obj;
1568
1569	if (drrfo->drr_firstobj + drrfo->drr_numobjs < drrfo->drr_firstobj)
1570		return (SET_ERROR(EINVAL));
1571
1572	for (obj = drrfo->drr_firstobj;
1573	    obj < drrfo->drr_firstobj + drrfo->drr_numobjs;
1574	    (void) dmu_object_next(os, &obj, FALSE, 0)) {
1575		int err;
1576
1577		if (dmu_object_info(os, obj, NULL) != 0)
1578			continue;
1579
1580		err = dmu_free_long_object(os, obj);
1581		if (err != 0)
1582			return (err);
1583	}
1584	return (0);
1585}
1586
1587static int
1588restore_write(struct restorearg *ra, objset_t *os,
1589    struct drr_write *drrw)
1590{
1591	dmu_tx_t *tx;
1592	void *data;
1593	int err;
1594
1595	if (drrw->drr_offset + drrw->drr_length < drrw->drr_offset ||
1596	    !DMU_OT_IS_VALID(drrw->drr_type))
1597		return (SET_ERROR(EINVAL));
1598
1599	if (dmu_object_info(os, drrw->drr_object, NULL) != 0)
1600		return (SET_ERROR(EINVAL));
1601
1602	dmu_buf_t *bonus;
1603	if (dmu_bonus_hold(os, drrw->drr_object, FTAG, &bonus) != 0)
1604		return (SET_ERROR(EINVAL));
1605
1606	arc_buf_t *abuf = dmu_request_arcbuf(bonus, drrw->drr_length);
1607
1608	data = restore_read(ra, drrw->drr_length, abuf->b_data);
1609	if (data == NULL) {
1610		dmu_return_arcbuf(abuf);
1611		dmu_buf_rele(bonus, FTAG);
1612		return (ra->err);
1613	}
1614
1615	tx = dmu_tx_create(os);
1616
1617	dmu_tx_hold_write(tx, drrw->drr_object,
1618	    drrw->drr_offset, drrw->drr_length);
1619	err = dmu_tx_assign(tx, TXG_WAIT);
1620	if (err != 0) {
1621		dmu_return_arcbuf(abuf);
1622		dmu_buf_rele(bonus, FTAG);
1623		dmu_tx_abort(tx);
1624		return (err);
1625	}
1626	if (ra->byteswap) {
1627		dmu_object_byteswap_t byteswap =
1628		    DMU_OT_BYTESWAP(drrw->drr_type);
1629		dmu_ot_byteswap[byteswap].ob_func(data, drrw->drr_length);
1630	}
1631	dmu_assign_arcbuf(bonus, drrw->drr_offset, abuf, tx);
1632	dmu_tx_commit(tx);
1633	dmu_buf_rele(bonus, FTAG);
1634	return (0);
1635}
1636
1637/*
1638 * Handle a DRR_WRITE_BYREF record.  This record is used in dedup'ed
1639 * streams to refer to a copy of the data that is already on the
1640 * system because it came in earlier in the stream.  This function
1641 * finds the earlier copy of the data, and uses that copy instead of
1642 * data from the stream to fulfill this write.
1643 */
1644static int
1645restore_write_byref(struct restorearg *ra, objset_t *os,
1646    struct drr_write_byref *drrwbr)
1647{
1648	dmu_tx_t *tx;
1649	int err;
1650	guid_map_entry_t gmesrch;
1651	guid_map_entry_t *gmep;
1652	avl_index_t where;
1653	objset_t *ref_os = NULL;
1654	dmu_buf_t *dbp;
1655
1656	if (drrwbr->drr_offset + drrwbr->drr_length < drrwbr->drr_offset)
1657		return (SET_ERROR(EINVAL));
1658
1659	/*
1660	 * If the GUID of the referenced dataset is different from the
1661	 * GUID of the target dataset, find the referenced dataset.
1662	 */
1663	if (drrwbr->drr_toguid != drrwbr->drr_refguid) {
1664		gmesrch.guid = drrwbr->drr_refguid;
1665		if ((gmep = avl_find(ra->guid_to_ds_map, &gmesrch,
1666		    &where)) == NULL) {
1667			return (SET_ERROR(EINVAL));
1668		}
1669		if (dmu_objset_from_ds(gmep->gme_ds, &ref_os))
1670			return (SET_ERROR(EINVAL));
1671	} else {
1672		ref_os = os;
1673	}
1674
1675	err = dmu_buf_hold(ref_os, drrwbr->drr_refobject,
1676	    drrwbr->drr_refoffset, FTAG, &dbp, DMU_READ_PREFETCH);
1677	if (err != 0)
1678		return (err);
1679
1680	tx = dmu_tx_create(os);
1681
1682	dmu_tx_hold_write(tx, drrwbr->drr_object,
1683	    drrwbr->drr_offset, drrwbr->drr_length);
1684	err = dmu_tx_assign(tx, TXG_WAIT);
1685	if (err != 0) {
1686		dmu_tx_abort(tx);
1687		return (err);
1688	}
1689	dmu_write(os, drrwbr->drr_object,
1690	    drrwbr->drr_offset, drrwbr->drr_length, dbp->db_data, tx);
1691	dmu_buf_rele(dbp, FTAG);
1692	dmu_tx_commit(tx);
1693	return (0);
1694}
1695
1696static int
1697restore_write_embedded(struct restorearg *ra, objset_t *os,
1698    struct drr_write_embedded *drrwnp)
1699{
1700	dmu_tx_t *tx;
1701	int err;
1702	void *data;
1703
1704	if (drrwnp->drr_offset + drrwnp->drr_length < drrwnp->drr_offset)
1705		return (EINVAL);
1706
1707	if (drrwnp->drr_psize > BPE_PAYLOAD_SIZE)
1708		return (EINVAL);
1709
1710	if (drrwnp->drr_etype >= NUM_BP_EMBEDDED_TYPES)
1711		return (EINVAL);
1712	if (drrwnp->drr_compression >= ZIO_COMPRESS_FUNCTIONS)
1713		return (EINVAL);
1714
1715	data = restore_read(ra, P2ROUNDUP(drrwnp->drr_psize, 8), NULL);
1716	if (data == NULL)
1717		return (ra->err);
1718
1719	tx = dmu_tx_create(os);
1720
1721	dmu_tx_hold_write(tx, drrwnp->drr_object,
1722	    drrwnp->drr_offset, drrwnp->drr_length);
1723	err = dmu_tx_assign(tx, TXG_WAIT);
1724	if (err != 0) {
1725		dmu_tx_abort(tx);
1726		return (err);
1727	}
1728
1729	dmu_write_embedded(os, drrwnp->drr_object,
1730	    drrwnp->drr_offset, data, drrwnp->drr_etype,
1731	    drrwnp->drr_compression, drrwnp->drr_lsize, drrwnp->drr_psize,
1732	    ra->byteswap ^ ZFS_HOST_BYTEORDER, tx);
1733
1734	dmu_tx_commit(tx);
1735	return (0);
1736}
1737
1738static int
1739restore_spill(struct restorearg *ra, objset_t *os, struct drr_spill *drrs)
1740{
1741	dmu_tx_t *tx;
1742	void *data;
1743	dmu_buf_t *db, *db_spill;
1744	int err;
1745
1746	if (drrs->drr_length < SPA_MINBLOCKSIZE ||
1747	    drrs->drr_length > spa_maxblocksize(dmu_objset_spa(os)))
1748		return (SET_ERROR(EINVAL));
1749
1750	data = restore_read(ra, drrs->drr_length, NULL);
1751	if (data == NULL)
1752		return (ra->err);
1753
1754	if (dmu_object_info(os, drrs->drr_object, NULL) != 0)
1755		return (SET_ERROR(EINVAL));
1756
1757	VERIFY(0 == dmu_bonus_hold(os, drrs->drr_object, FTAG, &db));
1758	if ((err = dmu_spill_hold_by_bonus(db, FTAG, &db_spill)) != 0) {
1759		dmu_buf_rele(db, FTAG);
1760		return (err);
1761	}
1762
1763	tx = dmu_tx_create(os);
1764
1765	dmu_tx_hold_spill(tx, db->db_object);
1766
1767	err = dmu_tx_assign(tx, TXG_WAIT);
1768	if (err != 0) {
1769		dmu_buf_rele(db, FTAG);
1770		dmu_buf_rele(db_spill, FTAG);
1771		dmu_tx_abort(tx);
1772		return (err);
1773	}
1774	dmu_buf_will_dirty(db_spill, tx);
1775
1776	if (db_spill->db_size < drrs->drr_length)
1777		VERIFY(0 == dbuf_spill_set_blksz(db_spill,
1778		    drrs->drr_length, tx));
1779	bcopy(data, db_spill->db_data, drrs->drr_length);
1780
1781	dmu_buf_rele(db, FTAG);
1782	dmu_buf_rele(db_spill, FTAG);
1783
1784	dmu_tx_commit(tx);
1785	return (0);
1786}
1787
1788/* ARGSUSED */
1789static int
1790restore_free(struct restorearg *ra, objset_t *os,
1791    struct drr_free *drrf)
1792{
1793	int err;
1794
1795	if (drrf->drr_length != -1ULL &&
1796	    drrf->drr_offset + drrf->drr_length < drrf->drr_offset)
1797		return (SET_ERROR(EINVAL));
1798
1799	if (dmu_object_info(os, drrf->drr_object, NULL) != 0)
1800		return (SET_ERROR(EINVAL));
1801
1802	err = dmu_free_long_range(os, drrf->drr_object,
1803	    drrf->drr_offset, drrf->drr_length);
1804	return (err);
1805}
1806
1807/* used to destroy the drc_ds on error */
1808static void
1809dmu_recv_cleanup_ds(dmu_recv_cookie_t *drc)
1810{
1811	char name[MAXNAMELEN];
1812	dsl_dataset_name(drc->drc_ds, name);
1813	dsl_dataset_disown(drc->drc_ds, dmu_recv_tag);
1814	(void) dsl_destroy_head(name);
1815}
1816
1817/*
1818 * NB: callers *must* call dmu_recv_end() if this succeeds.
1819 */
1820int
1821dmu_recv_stream(dmu_recv_cookie_t *drc, struct file *fp, offset_t *voffp,
1822    int cleanup_fd, uint64_t *action_handlep)
1823{
1824	struct restorearg ra = { 0 };
1825	dmu_replay_record_t *drr;
1826	objset_t *os;
1827	zio_cksum_t pcksum;
1828	int featureflags;
1829
1830	ra.byteswap = drc->drc_byteswap;
1831	ra.cksum = drc->drc_cksum;
1832	ra.td = curthread;
1833	ra.fp = fp;
1834	ra.voff = *voffp;
1835	ra.bufsize = SPA_MAXBLOCKSIZE;
1836	ra.buf = kmem_alloc(ra.bufsize, KM_SLEEP);
1837
1838	/* these were verified in dmu_recv_begin */
1839	ASSERT3U(DMU_GET_STREAM_HDRTYPE(drc->drc_drrb->drr_versioninfo), ==,
1840	    DMU_SUBSTREAM);
1841	ASSERT3U(drc->drc_drrb->drr_type, <, DMU_OST_NUMTYPES);
1842
1843	/*
1844	 * Open the objset we are modifying.
1845	 */
1846	VERIFY0(dmu_objset_from_ds(drc->drc_ds, &os));
1847
1848	ASSERT(dsl_dataset_phys(drc->drc_ds)->ds_flags & DS_FLAG_INCONSISTENT);
1849
1850	featureflags = DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo);
1851
1852	/* if this stream is dedup'ed, set up the avl tree for guid mapping */
1853	if (featureflags & DMU_BACKUP_FEATURE_DEDUP) {
1854		minor_t minor;
1855
1856		if (cleanup_fd == -1) {
1857			ra.err = SET_ERROR(EBADF);
1858			goto out;
1859		}
1860		ra.err = zfs_onexit_fd_hold(cleanup_fd, &minor);
1861		if (ra.err != 0) {
1862			cleanup_fd = -1;
1863			goto out;
1864		}
1865
1866		if (*action_handlep == 0) {
1867			ra.guid_to_ds_map =
1868			    kmem_alloc(sizeof (avl_tree_t), KM_SLEEP);
1869			avl_create(ra.guid_to_ds_map, guid_compare,
1870			    sizeof (guid_map_entry_t),
1871			    offsetof(guid_map_entry_t, avlnode));
1872			ra.err = zfs_onexit_add_cb(minor,
1873			    free_guid_map_onexit, ra.guid_to_ds_map,
1874			    action_handlep);
1875			if (ra.err != 0)
1876				goto out;
1877		} else {
1878			ra.err = zfs_onexit_cb_data(minor, *action_handlep,
1879			    (void **)&ra.guid_to_ds_map);
1880			if (ra.err != 0)
1881				goto out;
1882		}
1883
1884		drc->drc_guid_to_ds_map = ra.guid_to_ds_map;
1885	}
1886
1887	/*
1888	 * Read records and process them.
1889	 */
1890	pcksum = ra.cksum;
1891	while (ra.err == 0 &&
1892	    NULL != (drr = restore_read(&ra, sizeof (*drr), NULL))) {
1893		if (issig(JUSTLOOKING) && issig(FORREAL)) {
1894			ra.err = SET_ERROR(EINTR);
1895			goto out;
1896		}
1897
1898		if (ra.byteswap)
1899			backup_byteswap(drr);
1900
1901		switch (drr->drr_type) {
1902		case DRR_OBJECT:
1903		{
1904			/*
1905			 * We need to make a copy of the record header,
1906			 * because restore_{object,write} may need to
1907			 * restore_read(), which will invalidate drr.
1908			 */
1909			struct drr_object drro = drr->drr_u.drr_object;
1910			ra.err = restore_object(&ra, os, &drro);
1911			break;
1912		}
1913		case DRR_FREEOBJECTS:
1914		{
1915			struct drr_freeobjects drrfo =
1916			    drr->drr_u.drr_freeobjects;
1917			ra.err = restore_freeobjects(&ra, os, &drrfo);
1918			break;
1919		}
1920		case DRR_WRITE:
1921		{
1922			struct drr_write drrw = drr->drr_u.drr_write;
1923			ra.err = restore_write(&ra, os, &drrw);
1924			break;
1925		}
1926		case DRR_WRITE_BYREF:
1927		{
1928			struct drr_write_byref drrwbr =
1929			    drr->drr_u.drr_write_byref;
1930			ra.err = restore_write_byref(&ra, os, &drrwbr);
1931			break;
1932		}
1933		case DRR_WRITE_EMBEDDED:
1934		{
1935			struct drr_write_embedded drrwe =
1936			    drr->drr_u.drr_write_embedded;
1937			ra.err = restore_write_embedded(&ra, os, &drrwe);
1938			break;
1939		}
1940		case DRR_FREE:
1941		{
1942			struct drr_free drrf = drr->drr_u.drr_free;
1943			ra.err = restore_free(&ra, os, &drrf);
1944			break;
1945		}
1946		case DRR_END:
1947		{
1948			struct drr_end drre = drr->drr_u.drr_end;
1949			/*
1950			 * We compare against the *previous* checksum
1951			 * value, because the stored checksum is of
1952			 * everything before the DRR_END record.
1953			 */
1954			if (!ZIO_CHECKSUM_EQUAL(drre.drr_checksum, pcksum))
1955				ra.err = SET_ERROR(ECKSUM);
1956			goto out;
1957		}
1958		case DRR_SPILL:
1959		{
1960			struct drr_spill drrs = drr->drr_u.drr_spill;
1961			ra.err = restore_spill(&ra, os, &drrs);
1962			break;
1963		}
1964		default:
1965			ra.err = SET_ERROR(EINVAL);
1966			goto out;
1967		}
1968		pcksum = ra.cksum;
1969	}
1970	ASSERT(ra.err != 0);
1971
1972out:
1973	if ((featureflags & DMU_BACKUP_FEATURE_DEDUP) && (cleanup_fd != -1))
1974		zfs_onexit_fd_rele(cleanup_fd);
1975
1976	if (ra.err != 0) {
1977		/*
1978		 * destroy what we created, so we don't leave it in the
1979		 * inconsistent restoring state.
1980		 */
1981		dmu_recv_cleanup_ds(drc);
1982	}
1983
1984	kmem_free(ra.buf, ra.bufsize);
1985	*voffp = ra.voff;
1986	return (ra.err);
1987}
1988
1989static int
1990dmu_recv_end_check(void *arg, dmu_tx_t *tx)
1991{
1992	dmu_recv_cookie_t *drc = arg;
1993	dsl_pool_t *dp = dmu_tx_pool(tx);
1994	int error;
1995
1996	ASSERT3P(drc->drc_ds->ds_owner, ==, dmu_recv_tag);
1997
1998	if (!drc->drc_newfs) {
1999		dsl_dataset_t *origin_head;
2000
2001		error = dsl_dataset_hold(dp, drc->drc_tofs, FTAG, &origin_head);
2002		if (error != 0)
2003			return (error);
2004		if (drc->drc_force) {
2005			/*
2006			 * We will destroy any snapshots in tofs (i.e. before
2007			 * origin_head) that are after the origin (which is
2008			 * the snap before drc_ds, because drc_ds can not
2009			 * have any snaps of its own).
2010			 */
2011			uint64_t obj;
2012
2013			obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
2014			while (obj !=
2015			    dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) {
2016				dsl_dataset_t *snap;
2017				error = dsl_dataset_hold_obj(dp, obj, FTAG,
2018				    &snap);
2019				if (error != 0)
2020					break;
2021				if (snap->ds_dir != origin_head->ds_dir)
2022					error = SET_ERROR(EINVAL);
2023				if (error == 0)  {
2024					error = dsl_destroy_snapshot_check_impl(
2025					    snap, B_FALSE);
2026				}
2027				obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
2028				dsl_dataset_rele(snap, FTAG);
2029				if (error != 0)
2030					break;
2031			}
2032			if (error != 0) {
2033				dsl_dataset_rele(origin_head, FTAG);
2034				return (error);
2035			}
2036		}
2037		error = dsl_dataset_clone_swap_check_impl(drc->drc_ds,
2038		    origin_head, drc->drc_force, drc->drc_owner, tx);
2039		if (error != 0) {
2040			dsl_dataset_rele(origin_head, FTAG);
2041			return (error);
2042		}
2043		error = dsl_dataset_snapshot_check_impl(origin_head,
2044		    drc->drc_tosnap, tx, B_TRUE, 1, drc->drc_cred);
2045		dsl_dataset_rele(origin_head, FTAG);
2046		if (error != 0)
2047			return (error);
2048
2049		error = dsl_destroy_head_check_impl(drc->drc_ds, 1);
2050	} else {
2051		error = dsl_dataset_snapshot_check_impl(drc->drc_ds,
2052		    drc->drc_tosnap, tx, B_TRUE, 1, drc->drc_cred);
2053	}
2054	return (error);
2055}
2056
2057static void
2058dmu_recv_end_sync(void *arg, dmu_tx_t *tx)
2059{
2060	dmu_recv_cookie_t *drc = arg;
2061	dsl_pool_t *dp = dmu_tx_pool(tx);
2062
2063	spa_history_log_internal_ds(drc->drc_ds, "finish receiving",
2064	    tx, "snap=%s", drc->drc_tosnap);
2065
2066	if (!drc->drc_newfs) {
2067		dsl_dataset_t *origin_head;
2068
2069		VERIFY0(dsl_dataset_hold(dp, drc->drc_tofs, FTAG,
2070		    &origin_head));
2071
2072		if (drc->drc_force) {
2073			/*
2074			 * Destroy any snapshots of drc_tofs (origin_head)
2075			 * after the origin (the snap before drc_ds).
2076			 */
2077			uint64_t obj;
2078
2079			obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
2080			while (obj !=
2081			    dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) {
2082				dsl_dataset_t *snap;
2083				VERIFY0(dsl_dataset_hold_obj(dp, obj, FTAG,
2084				    &snap));
2085				ASSERT3P(snap->ds_dir, ==, origin_head->ds_dir);
2086				obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
2087				dsl_destroy_snapshot_sync_impl(snap,
2088				    B_FALSE, tx);
2089				dsl_dataset_rele(snap, FTAG);
2090			}
2091		}
2092		VERIFY3P(drc->drc_ds->ds_prev, ==,
2093		    origin_head->ds_prev);
2094
2095		dsl_dataset_clone_swap_sync_impl(drc->drc_ds,
2096		    origin_head, tx);
2097		dsl_dataset_snapshot_sync_impl(origin_head,
2098		    drc->drc_tosnap, tx);
2099
2100		/* set snapshot's creation time and guid */
2101		dmu_buf_will_dirty(origin_head->ds_prev->ds_dbuf, tx);
2102		dsl_dataset_phys(origin_head->ds_prev)->ds_creation_time =
2103		    drc->drc_drrb->drr_creation_time;
2104		dsl_dataset_phys(origin_head->ds_prev)->ds_guid =
2105		    drc->drc_drrb->drr_toguid;
2106		dsl_dataset_phys(origin_head->ds_prev)->ds_flags &=
2107		    ~DS_FLAG_INCONSISTENT;
2108
2109		dmu_buf_will_dirty(origin_head->ds_dbuf, tx);
2110		dsl_dataset_phys(origin_head)->ds_flags &=
2111		    ~DS_FLAG_INCONSISTENT;
2112
2113		dsl_dataset_rele(origin_head, FTAG);
2114		dsl_destroy_head_sync_impl(drc->drc_ds, tx);
2115
2116		if (drc->drc_owner != NULL)
2117			VERIFY3P(origin_head->ds_owner, ==, drc->drc_owner);
2118	} else {
2119		dsl_dataset_t *ds = drc->drc_ds;
2120
2121		dsl_dataset_snapshot_sync_impl(ds, drc->drc_tosnap, tx);
2122
2123		/* set snapshot's creation time and guid */
2124		dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
2125		dsl_dataset_phys(ds->ds_prev)->ds_creation_time =
2126		    drc->drc_drrb->drr_creation_time;
2127		dsl_dataset_phys(ds->ds_prev)->ds_guid =
2128		    drc->drc_drrb->drr_toguid;
2129		dsl_dataset_phys(ds->ds_prev)->ds_flags &=
2130		    ~DS_FLAG_INCONSISTENT;
2131
2132		dmu_buf_will_dirty(ds->ds_dbuf, tx);
2133		dsl_dataset_phys(ds)->ds_flags &= ~DS_FLAG_INCONSISTENT;
2134	}
2135	drc->drc_newsnapobj = dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj;
2136	/*
2137	 * Release the hold from dmu_recv_begin.  This must be done before
2138	 * we return to open context, so that when we free the dataset's dnode,
2139	 * we can evict its bonus buffer.
2140	 */
2141	dsl_dataset_disown(drc->drc_ds, dmu_recv_tag);
2142	drc->drc_ds = NULL;
2143}
2144
2145static int
2146add_ds_to_guidmap(const char *name, avl_tree_t *guid_map, uint64_t snapobj)
2147{
2148	dsl_pool_t *dp;
2149	dsl_dataset_t *snapds;
2150	guid_map_entry_t *gmep;
2151	int err;
2152
2153	ASSERT(guid_map != NULL);
2154
2155	err = dsl_pool_hold(name, FTAG, &dp);
2156	if (err != 0)
2157		return (err);
2158	gmep = kmem_alloc(sizeof (*gmep), KM_SLEEP);
2159	err = dsl_dataset_hold_obj(dp, snapobj, gmep, &snapds);
2160	if (err == 0) {
2161		gmep->guid = dsl_dataset_phys(snapds)->ds_guid;
2162		gmep->gme_ds = snapds;
2163		avl_add(guid_map, gmep);
2164		dsl_dataset_long_hold(snapds, gmep);
2165	} else
2166		kmem_free(gmep, sizeof (*gmep));
2167
2168	dsl_pool_rele(dp, FTAG);
2169	return (err);
2170}
2171
2172static int dmu_recv_end_modified_blocks = 3;
2173
2174static int
2175dmu_recv_existing_end(dmu_recv_cookie_t *drc)
2176{
2177	int error;
2178	char name[MAXNAMELEN];
2179
2180#ifdef _KERNEL
2181	/*
2182	 * We will be destroying the ds; make sure its origin is unmounted if
2183	 * necessary.
2184	 */
2185	dsl_dataset_name(drc->drc_ds, name);
2186	zfs_destroy_unmount_origin(name);
2187#endif
2188
2189	error = dsl_sync_task(drc->drc_tofs,
2190	    dmu_recv_end_check, dmu_recv_end_sync, drc,
2191	    dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL);
2192
2193	if (error != 0)
2194		dmu_recv_cleanup_ds(drc);
2195	return (error);
2196}
2197
2198static int
2199dmu_recv_new_end(dmu_recv_cookie_t *drc)
2200{
2201	int error;
2202
2203	error = dsl_sync_task(drc->drc_tofs,
2204	    dmu_recv_end_check, dmu_recv_end_sync, drc,
2205	    dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL);
2206
2207	if (error != 0) {
2208		dmu_recv_cleanup_ds(drc);
2209	} else if (drc->drc_guid_to_ds_map != NULL) {
2210		(void) add_ds_to_guidmap(drc->drc_tofs,
2211		    drc->drc_guid_to_ds_map,
2212		    drc->drc_newsnapobj);
2213	}
2214	return (error);
2215}
2216
2217int
2218dmu_recv_end(dmu_recv_cookie_t *drc, void *owner)
2219{
2220	drc->drc_owner = owner;
2221
2222	if (drc->drc_newfs)
2223		return (dmu_recv_new_end(drc));
2224	else
2225		return (dmu_recv_existing_end(drc));
2226}
2227
2228/*
2229 * Return TRUE if this objset is currently being received into.
2230 */
2231boolean_t
2232dmu_objset_is_receiving(objset_t *os)
2233{
2234	return (os->os_dsl_dataset != NULL &&
2235	    os->os_dsl_dataset->ds_owner == dmu_recv_tag);
2236}
2237