Deleted Added
full compact
dmu_send.c (249196) dmu_send.c (249356)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2013 by Delphix. All rights reserved.
25 * Copyright (c) 2012, Joyent, Inc. All rights reserved.
26 * Copyright (c) 2012, Martin Matuska <mm@FreeBSD.org>. All rights reserved.
27 */
28
29#include <sys/dmu.h>
30#include <sys/dmu_impl.h>
31#include <sys/dmu_tx.h>
32#include <sys/dbuf.h>
33#include <sys/dnode.h>
34#include <sys/zfs_context.h>
35#include <sys/dmu_objset.h>
36#include <sys/dmu_traverse.h>
37#include <sys/dsl_dataset.h>
38#include <sys/dsl_dir.h>
39#include <sys/dsl_prop.h>
40#include <sys/dsl_pool.h>
41#include <sys/dsl_synctask.h>
42#include <sys/zfs_ioctl.h>
43#include <sys/zap.h>
44#include <sys/zio_checksum.h>
45#include <sys/zfs_znode.h>
46#include <zfs_fletcher.h>
47#include <sys/avl.h>
48#include <sys/ddt.h>
49#include <sys/zfs_onexit.h>
50#include <sys/dmu_send.h>
51#include <sys/dsl_destroy.h>
52
53/* Set this tunable to TRUE to replace corrupt data with 0x2f5baddb10c */
54int zfs_send_corrupt_data = B_FALSE;
55
56static char *dmu_recv_tag = "dmu_recv_tag";
57static const char *recv_clone_name = "%recv";
58
59static int
60dump_bytes(dmu_sendarg_t *dsp, void *buf, int len)
61{
62 dsl_dataset_t *ds = dsp->dsa_os->os_dsl_dataset;
63 struct uio auio;
64 struct iovec aiov;
65 ASSERT0(len % 8);
66
67 fletcher_4_incremental_native(buf, len, &dsp->dsa_zc);
68 aiov.iov_base = buf;
69 aiov.iov_len = len;
70 auio.uio_iov = &aiov;
71 auio.uio_iovcnt = 1;
72 auio.uio_resid = len;
73 auio.uio_segflg = UIO_SYSSPACE;
74 auio.uio_rw = UIO_WRITE;
75 auio.uio_offset = (off_t)-1;
76 auio.uio_td = dsp->dsa_td;
77#ifdef _KERNEL
78 if (dsp->dsa_fp->f_type == DTYPE_VNODE)
79 bwillwrite();
80 dsp->dsa_err = fo_write(dsp->dsa_fp, &auio, dsp->dsa_td->td_ucred, 0,
81 dsp->dsa_td);
82#else
83 fprintf(stderr, "%s: returning EOPNOTSUPP\n", __func__);
84 dsp->dsa_err = EOPNOTSUPP;
85#endif
86 mutex_enter(&ds->ds_sendstream_lock);
87 *dsp->dsa_off += len;
88 mutex_exit(&ds->ds_sendstream_lock);
89
90 return (dsp->dsa_err);
91}
92
93static int
94dump_free(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset,
95 uint64_t length)
96{
97 struct drr_free *drrf = &(dsp->dsa_drr->drr_u.drr_free);
98
99 if (length != -1ULL && offset + length < offset)
100 length = -1ULL;
101
102 /*
103 * If there is a pending op, but it's not PENDING_FREE, push it out,
104 * since free block aggregation can only be done for blocks of the
105 * same type (i.e., DRR_FREE records can only be aggregated with
106 * other DRR_FREE records. DRR_FREEOBJECTS records can only be
107 * aggregated with other DRR_FREEOBJECTS records.
108 */
109 if (dsp->dsa_pending_op != PENDING_NONE &&
110 dsp->dsa_pending_op != PENDING_FREE) {
111 if (dump_bytes(dsp, dsp->dsa_drr,
112 sizeof (dmu_replay_record_t)) != 0)
113 return (SET_ERROR(EINTR));
114 dsp->dsa_pending_op = PENDING_NONE;
115 }
116
117 if (dsp->dsa_pending_op == PENDING_FREE) {
118 /*
119 * There should never be a PENDING_FREE if length is -1
120 * (because dump_dnode is the only place where this
121 * function is called with a -1, and only after flushing
122 * any pending record).
123 */
124 ASSERT(length != -1ULL);
125 /*
126 * Check to see whether this free block can be aggregated
127 * with pending one.
128 */
129 if (drrf->drr_object == object && drrf->drr_offset +
130 drrf->drr_length == offset) {
131 drrf->drr_length += length;
132 return (0);
133 } else {
134 /* not a continuation. Push out pending record */
135 if (dump_bytes(dsp, dsp->dsa_drr,
136 sizeof (dmu_replay_record_t)) != 0)
137 return (SET_ERROR(EINTR));
138 dsp->dsa_pending_op = PENDING_NONE;
139 }
140 }
141 /* create a FREE record and make it pending */
142 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
143 dsp->dsa_drr->drr_type = DRR_FREE;
144 drrf->drr_object = object;
145 drrf->drr_offset = offset;
146 drrf->drr_length = length;
147 drrf->drr_toguid = dsp->dsa_toguid;
148 if (length == -1ULL) {
149 if (dump_bytes(dsp, dsp->dsa_drr,
150 sizeof (dmu_replay_record_t)) != 0)
151 return (SET_ERROR(EINTR));
152 } else {
153 dsp->dsa_pending_op = PENDING_FREE;
154 }
155
156 return (0);
157}
158
159static int
160dump_data(dmu_sendarg_t *dsp, dmu_object_type_t type,
161 uint64_t object, uint64_t offset, int blksz, const blkptr_t *bp, void *data)
162{
163 struct drr_write *drrw = &(dsp->dsa_drr->drr_u.drr_write);
164
165
166 /*
167 * If there is any kind of pending aggregation (currently either
168 * a grouping of free objects or free blocks), push it out to
169 * the stream, since aggregation can't be done across operations
170 * of different types.
171 */
172 if (dsp->dsa_pending_op != PENDING_NONE) {
173 if (dump_bytes(dsp, dsp->dsa_drr,
174 sizeof (dmu_replay_record_t)) != 0)
175 return (SET_ERROR(EINTR));
176 dsp->dsa_pending_op = PENDING_NONE;
177 }
178 /* write a DATA record */
179 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
180 dsp->dsa_drr->drr_type = DRR_WRITE;
181 drrw->drr_object = object;
182 drrw->drr_type = type;
183 drrw->drr_offset = offset;
184 drrw->drr_length = blksz;
185 drrw->drr_toguid = dsp->dsa_toguid;
186 drrw->drr_checksumtype = BP_GET_CHECKSUM(bp);
187 if (zio_checksum_table[drrw->drr_checksumtype].ci_dedup)
188 drrw->drr_checksumflags |= DRR_CHECKSUM_DEDUP;
189 DDK_SET_LSIZE(&drrw->drr_key, BP_GET_LSIZE(bp));
190 DDK_SET_PSIZE(&drrw->drr_key, BP_GET_PSIZE(bp));
191 DDK_SET_COMPRESS(&drrw->drr_key, BP_GET_COMPRESS(bp));
192 drrw->drr_key.ddk_cksum = bp->blk_cksum;
193
194 if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0)
195 return (SET_ERROR(EINTR));
196 if (dump_bytes(dsp, data, blksz) != 0)
197 return (SET_ERROR(EINTR));
198 return (0);
199}
200
201static int
202dump_spill(dmu_sendarg_t *dsp, uint64_t object, int blksz, void *data)
203{
204 struct drr_spill *drrs = &(dsp->dsa_drr->drr_u.drr_spill);
205
206 if (dsp->dsa_pending_op != PENDING_NONE) {
207 if (dump_bytes(dsp, dsp->dsa_drr,
208 sizeof (dmu_replay_record_t)) != 0)
209 return (SET_ERROR(EINTR));
210 dsp->dsa_pending_op = PENDING_NONE;
211 }
212
213 /* write a SPILL record */
214 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
215 dsp->dsa_drr->drr_type = DRR_SPILL;
216 drrs->drr_object = object;
217 drrs->drr_length = blksz;
218 drrs->drr_toguid = dsp->dsa_toguid;
219
220 if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)))
221 return (SET_ERROR(EINTR));
222 if (dump_bytes(dsp, data, blksz))
223 return (SET_ERROR(EINTR));
224 return (0);
225}
226
227static int
228dump_freeobjects(dmu_sendarg_t *dsp, uint64_t firstobj, uint64_t numobjs)
229{
230 struct drr_freeobjects *drrfo = &(dsp->dsa_drr->drr_u.drr_freeobjects);
231
232 /*
233 * If there is a pending op, but it's not PENDING_FREEOBJECTS,
234 * push it out, since free block aggregation can only be done for
235 * blocks of the same type (i.e., DRR_FREE records can only be
236 * aggregated with other DRR_FREE records. DRR_FREEOBJECTS records
237 * can only be aggregated with other DRR_FREEOBJECTS records.
238 */
239 if (dsp->dsa_pending_op != PENDING_NONE &&
240 dsp->dsa_pending_op != PENDING_FREEOBJECTS) {
241 if (dump_bytes(dsp, dsp->dsa_drr,
242 sizeof (dmu_replay_record_t)) != 0)
243 return (SET_ERROR(EINTR));
244 dsp->dsa_pending_op = PENDING_NONE;
245 }
246 if (dsp->dsa_pending_op == PENDING_FREEOBJECTS) {
247 /*
248 * See whether this free object array can be aggregated
249 * with pending one
250 */
251 if (drrfo->drr_firstobj + drrfo->drr_numobjs == firstobj) {
252 drrfo->drr_numobjs += numobjs;
253 return (0);
254 } else {
255 /* can't be aggregated. Push out pending record */
256 if (dump_bytes(dsp, dsp->dsa_drr,
257 sizeof (dmu_replay_record_t)) != 0)
258 return (SET_ERROR(EINTR));
259 dsp->dsa_pending_op = PENDING_NONE;
260 }
261 }
262
263 /* write a FREEOBJECTS record */
264 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
265 dsp->dsa_drr->drr_type = DRR_FREEOBJECTS;
266 drrfo->drr_firstobj = firstobj;
267 drrfo->drr_numobjs = numobjs;
268 drrfo->drr_toguid = dsp->dsa_toguid;
269
270 dsp->dsa_pending_op = PENDING_FREEOBJECTS;
271
272 return (0);
273}
274
275static int
276dump_dnode(dmu_sendarg_t *dsp, uint64_t object, dnode_phys_t *dnp)
277{
278 struct drr_object *drro = &(dsp->dsa_drr->drr_u.drr_object);
279
280 if (dnp == NULL || dnp->dn_type == DMU_OT_NONE)
281 return (dump_freeobjects(dsp, object, 1));
282
283 if (dsp->dsa_pending_op != PENDING_NONE) {
284 if (dump_bytes(dsp, dsp->dsa_drr,
285 sizeof (dmu_replay_record_t)) != 0)
286 return (SET_ERROR(EINTR));
287 dsp->dsa_pending_op = PENDING_NONE;
288 }
289
290 /* write an OBJECT record */
291 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
292 dsp->dsa_drr->drr_type = DRR_OBJECT;
293 drro->drr_object = object;
294 drro->drr_type = dnp->dn_type;
295 drro->drr_bonustype = dnp->dn_bonustype;
296 drro->drr_blksz = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT;
297 drro->drr_bonuslen = dnp->dn_bonuslen;
298 drro->drr_checksumtype = dnp->dn_checksum;
299 drro->drr_compress = dnp->dn_compress;
300 drro->drr_toguid = dsp->dsa_toguid;
301
302 if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0)
303 return (SET_ERROR(EINTR));
304
305 if (dump_bytes(dsp, DN_BONUS(dnp), P2ROUNDUP(dnp->dn_bonuslen, 8)) != 0)
306 return (SET_ERROR(EINTR));
307
308 /* free anything past the end of the file */
309 if (dump_free(dsp, object, (dnp->dn_maxblkid + 1) *
310 (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT), -1ULL))
311 return (SET_ERROR(EINTR));
312 if (dsp->dsa_err != 0)
313 return (SET_ERROR(EINTR));
314 return (0);
315}
316
317#define BP_SPAN(dnp, level) \
318 (((uint64_t)dnp->dn_datablkszsec) << (SPA_MINBLOCKSHIFT + \
319 (level) * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT)))
320
321/* ARGSUSED */
322static int
323backup_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
324 const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
325{
326 dmu_sendarg_t *dsp = arg;
327 dmu_object_type_t type = bp ? BP_GET_TYPE(bp) : DMU_OT_NONE;
328 int err = 0;
329
330 if (issig(JUSTLOOKING) && issig(FORREAL))
331 return (SET_ERROR(EINTR));
332
333 if (zb->zb_object != DMU_META_DNODE_OBJECT &&
334 DMU_OBJECT_IS_SPECIAL(zb->zb_object)) {
335 return (0);
336 } else if (bp == NULL && zb->zb_object == DMU_META_DNODE_OBJECT) {
337 uint64_t span = BP_SPAN(dnp, zb->zb_level);
338 uint64_t dnobj = (zb->zb_blkid * span) >> DNODE_SHIFT;
339 err = dump_freeobjects(dsp, dnobj, span >> DNODE_SHIFT);
340 } else if (bp == NULL) {
341 uint64_t span = BP_SPAN(dnp, zb->zb_level);
342 err = dump_free(dsp, zb->zb_object, zb->zb_blkid * span, span);
343 } else if (zb->zb_level > 0 || type == DMU_OT_OBJSET) {
344 return (0);
345 } else if (type == DMU_OT_DNODE) {
346 dnode_phys_t *blk;
347 int i;
348 int blksz = BP_GET_LSIZE(bp);
349 uint32_t aflags = ARC_WAIT;
350 arc_buf_t *abuf;
351
352 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
353 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
354 &aflags, zb) != 0)
355 return (SET_ERROR(EIO));
356
357 blk = abuf->b_data;
358 for (i = 0; i < blksz >> DNODE_SHIFT; i++) {
359 uint64_t dnobj = (zb->zb_blkid <<
360 (DNODE_BLOCK_SHIFT - DNODE_SHIFT)) + i;
361 err = dump_dnode(dsp, dnobj, blk+i);
362 if (err != 0)
363 break;
364 }
365 (void) arc_buf_remove_ref(abuf, &abuf);
366 } else if (type == DMU_OT_SA) {
367 uint32_t aflags = ARC_WAIT;
368 arc_buf_t *abuf;
369 int blksz = BP_GET_LSIZE(bp);
370
371 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
372 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
373 &aflags, zb) != 0)
374 return (SET_ERROR(EIO));
375
376 err = dump_spill(dsp, zb->zb_object, blksz, abuf->b_data);
377 (void) arc_buf_remove_ref(abuf, &abuf);
378 } else { /* it's a level-0 block of a regular object */
379 uint32_t aflags = ARC_WAIT;
380 arc_buf_t *abuf;
381 int blksz = BP_GET_LSIZE(bp);
382
383 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
384 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
385 &aflags, zb) != 0) {
386 if (zfs_send_corrupt_data) {
387 /* Send a block filled with 0x"zfs badd bloc" */
388 abuf = arc_buf_alloc(spa, blksz, &abuf,
389 ARC_BUFC_DATA);
390 uint64_t *ptr;
391 for (ptr = abuf->b_data;
392 (char *)ptr < (char *)abuf->b_data + blksz;
393 ptr++)
394 *ptr = 0x2f5baddb10c;
395 } else {
396 return (SET_ERROR(EIO));
397 }
398 }
399
400 err = dump_data(dsp, type, zb->zb_object, zb->zb_blkid * blksz,
401 blksz, bp, abuf->b_data);
402 (void) arc_buf_remove_ref(abuf, &abuf);
403 }
404
405 ASSERT(err == 0 || err == EINTR);
406 return (err);
407}
408
409/*
410 * Releases dp, ds, and fromds, using the specified tag.
411 */
412static int
413dmu_send_impl(void *tag, dsl_pool_t *dp, dsl_dataset_t *ds,
414#ifdef illumos
415 dsl_dataset_t *fromds, int outfd, vnode_t *vp, offset_t *off)
416#else
417 dsl_dataset_t *fromds, int outfd, struct file *fp, offset_t *off)
418#endif
419{
420 objset_t *os;
421 dmu_replay_record_t *drr;
422 dmu_sendarg_t *dsp;
423 int err;
424 uint64_t fromtxg = 0;
425
426 if (fromds != NULL && !dsl_dataset_is_before(ds, fromds)) {
427 dsl_dataset_rele(fromds, tag);
428 dsl_dataset_rele(ds, tag);
429 dsl_pool_rele(dp, tag);
430 return (SET_ERROR(EXDEV));
431 }
432
433 err = dmu_objset_from_ds(ds, &os);
434 if (err != 0) {
435 if (fromds != NULL)
436 dsl_dataset_rele(fromds, tag);
437 dsl_dataset_rele(ds, tag);
438 dsl_pool_rele(dp, tag);
439 return (err);
440 }
441
442 drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP);
443 drr->drr_type = DRR_BEGIN;
444 drr->drr_u.drr_begin.drr_magic = DMU_BACKUP_MAGIC;
445 DMU_SET_STREAM_HDRTYPE(drr->drr_u.drr_begin.drr_versioninfo,
446 DMU_SUBSTREAM);
447
448#ifdef _KERNEL
449 if (dmu_objset_type(os) == DMU_OST_ZFS) {
450 uint64_t version;
451 if (zfs_get_zplprop(os, ZFS_PROP_VERSION, &version) != 0) {
452 kmem_free(drr, sizeof (dmu_replay_record_t));
453 if (fromds != NULL)
454 dsl_dataset_rele(fromds, tag);
455 dsl_dataset_rele(ds, tag);
456 dsl_pool_rele(dp, tag);
457 return (SET_ERROR(EINVAL));
458 }
459 if (version >= ZPL_VERSION_SA) {
460 DMU_SET_FEATUREFLAGS(
461 drr->drr_u.drr_begin.drr_versioninfo,
462 DMU_BACKUP_FEATURE_SA_SPILL);
463 }
464 }
465#endif
466
467 drr->drr_u.drr_begin.drr_creation_time =
468 ds->ds_phys->ds_creation_time;
469 drr->drr_u.drr_begin.drr_type = dmu_objset_type(os);
470 if (fromds != NULL && ds->ds_dir != fromds->ds_dir)
471 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CLONE;
472 drr->drr_u.drr_begin.drr_toguid = ds->ds_phys->ds_guid;
473 if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
474 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CI_DATA;
475
476 if (fromds != NULL)
477 drr->drr_u.drr_begin.drr_fromguid = fromds->ds_phys->ds_guid;
478 dsl_dataset_name(ds, drr->drr_u.drr_begin.drr_toname);
479
480 if (fromds != NULL) {
481 fromtxg = fromds->ds_phys->ds_creation_txg;
482 dsl_dataset_rele(fromds, tag);
483 fromds = NULL;
484 }
485
486 dsp = kmem_zalloc(sizeof (dmu_sendarg_t), KM_SLEEP);
487
488 dsp->dsa_drr = drr;
489 dsp->dsa_outfd = outfd;
490 dsp->dsa_proc = curproc;
491 dsp->dsa_td = curthread;
492 dsp->dsa_fp = fp;
493 dsp->dsa_os = os;
494 dsp->dsa_off = off;
495 dsp->dsa_toguid = ds->ds_phys->ds_guid;
496 ZIO_SET_CHECKSUM(&dsp->dsa_zc, 0, 0, 0, 0);
497 dsp->dsa_pending_op = PENDING_NONE;
498
499 mutex_enter(&ds->ds_sendstream_lock);
500 list_insert_head(&ds->ds_sendstreams, dsp);
501 mutex_exit(&ds->ds_sendstream_lock);
502
503 dsl_dataset_long_hold(ds, FTAG);
504 dsl_pool_rele(dp, tag);
505
506 if (dump_bytes(dsp, drr, sizeof (dmu_replay_record_t)) != 0) {
507 err = dsp->dsa_err;
508 goto out;
509 }
510
511 err = traverse_dataset(ds, fromtxg, TRAVERSE_PRE | TRAVERSE_PREFETCH,
512 backup_cb, dsp);
513
514 if (dsp->dsa_pending_op != PENDING_NONE)
515 if (dump_bytes(dsp, drr, sizeof (dmu_replay_record_t)) != 0)
516 err = SET_ERROR(EINTR);
517
518 if (err != 0) {
519 if (err == EINTR && dsp->dsa_err != 0)
520 err = dsp->dsa_err;
521 goto out;
522 }
523
524 bzero(drr, sizeof (dmu_replay_record_t));
525 drr->drr_type = DRR_END;
526 drr->drr_u.drr_end.drr_checksum = dsp->dsa_zc;
527 drr->drr_u.drr_end.drr_toguid = dsp->dsa_toguid;
528
529 if (dump_bytes(dsp, drr, sizeof (dmu_replay_record_t)) != 0) {
530 err = dsp->dsa_err;
531 goto out;
532 }
533
534out:
535 mutex_enter(&ds->ds_sendstream_lock);
536 list_remove(&ds->ds_sendstreams, dsp);
537 mutex_exit(&ds->ds_sendstream_lock);
538
539 kmem_free(drr, sizeof (dmu_replay_record_t));
540 kmem_free(dsp, sizeof (dmu_sendarg_t));
541
542 dsl_dataset_long_rele(ds, FTAG);
543 dsl_dataset_rele(ds, tag);
544
545 return (err);
546}
547
548int
549dmu_send_obj(const char *pool, uint64_t tosnap, uint64_t fromsnap,
550#ifdef illumos
551 int outfd, vnode_t *vp, offset_t *off)
552#else
553 int outfd, struct file *fp, offset_t *off)
554#endif
555{
556 dsl_pool_t *dp;
557 dsl_dataset_t *ds;
558 dsl_dataset_t *fromds = NULL;
559 int err;
560
561 err = dsl_pool_hold(pool, FTAG, &dp);
562 if (err != 0)
563 return (err);
564
565 err = dsl_dataset_hold_obj(dp, tosnap, FTAG, &ds);
566 if (err != 0) {
567 dsl_pool_rele(dp, FTAG);
568 return (err);
569 }
570
571 if (fromsnap != 0) {
572 err = dsl_dataset_hold_obj(dp, fromsnap, FTAG, &fromds);
573 if (err != 0) {
574 dsl_dataset_rele(ds, FTAG);
575 dsl_pool_rele(dp, FTAG);
576 return (err);
577 }
578 }
579
580 return (dmu_send_impl(FTAG, dp, ds, fromds, outfd, fp, off));
581}
582
583int
584dmu_send(const char *tosnap, const char *fromsnap,
585#ifdef illumos
586 int outfd, vnode_t *vp, offset_t *off)
587#else
588 int outfd, struct file *fp, offset_t *off)
589#endif
590{
591 dsl_pool_t *dp;
592 dsl_dataset_t *ds;
593 dsl_dataset_t *fromds = NULL;
594 int err;
595
596 if (strchr(tosnap, '@') == NULL)
597 return (SET_ERROR(EINVAL));
598 if (fromsnap != NULL && strchr(fromsnap, '@') == NULL)
599 return (SET_ERROR(EINVAL));
600
601 err = dsl_pool_hold(tosnap, FTAG, &dp);
602 if (err != 0)
603 return (err);
604
605 err = dsl_dataset_hold(dp, tosnap, FTAG, &ds);
606 if (err != 0) {
607 dsl_pool_rele(dp, FTAG);
608 return (err);
609 }
610
611 if (fromsnap != NULL) {
612 err = dsl_dataset_hold(dp, fromsnap, FTAG, &fromds);
613 if (err != 0) {
614 dsl_dataset_rele(ds, FTAG);
615 dsl_pool_rele(dp, FTAG);
616 return (err);
617 }
618 }
619 return (dmu_send_impl(FTAG, dp, ds, fromds, outfd, fp, off));
620}
621
622int
623dmu_send_estimate(dsl_dataset_t *ds, dsl_dataset_t *fromds, uint64_t *sizep)
624{
625 dsl_pool_t *dp = ds->ds_dir->dd_pool;
626 int err;
627 uint64_t size;
628
629 ASSERT(dsl_pool_config_held(dp));
630
631 /* tosnap must be a snapshot */
632 if (!dsl_dataset_is_snapshot(ds))
633 return (SET_ERROR(EINVAL));
634
635 /*
636 * fromsnap must be an earlier snapshot from the same fs as tosnap,
637 * or the origin's fs.
638 */
639 if (fromds != NULL && !dsl_dataset_is_before(ds, fromds))
640 return (SET_ERROR(EXDEV));
641
642 /* Get uncompressed size estimate of changed data. */
643 if (fromds == NULL) {
644 size = ds->ds_phys->ds_uncompressed_bytes;
645 } else {
646 uint64_t used, comp;
647 err = dsl_dataset_space_written(fromds, ds,
648 &used, &comp, &size);
649 if (err != 0)
650 return (err);
651 }
652
653 /*
654 * Assume that space (both on-disk and in-stream) is dominated by
655 * data. We will adjust for indirect blocks and the copies property,
656 * but ignore per-object space used (eg, dnodes and DRR_OBJECT records).
657 */
658
659 /*
660 * Subtract out approximate space used by indirect blocks.
661 * Assume most space is used by data blocks (non-indirect, non-dnode).
662 * Assume all blocks are recordsize. Assume ditto blocks and
663 * internal fragmentation counter out compression.
664 *
665 * Therefore, space used by indirect blocks is sizeof(blkptr_t) per
666 * block, which we observe in practice.
667 */
668 uint64_t recordsize;
669 err = dsl_prop_get_int_ds(ds, "recordsize", &recordsize);
670 if (err != 0)
671 return (err);
672 size -= size / recordsize * sizeof (blkptr_t);
673
674 /* Add in the space for the record associated with each block. */
675 size += size / recordsize * sizeof (dmu_replay_record_t);
676
677 *sizep = size;
678
679 return (0);
680}
681
682typedef struct dmu_recv_begin_arg {
683 const char *drba_origin;
684 dmu_recv_cookie_t *drba_cookie;
685 cred_t *drba_cred;
686} dmu_recv_begin_arg_t;
687
688static int
689recv_begin_check_existing_impl(dmu_recv_begin_arg_t *drba, dsl_dataset_t *ds,
690 uint64_t fromguid)
691{
692 uint64_t val;
693 int error;
694 dsl_pool_t *dp = ds->ds_dir->dd_pool;
695
696 /* must not have any changes since most recent snapshot */
697 if (!drba->drba_cookie->drc_force &&
698 dsl_dataset_modified_since_lastsnap(ds))
699 return (SET_ERROR(ETXTBSY));
700
701 /* temporary clone name must not exist */
702 error = zap_lookup(dp->dp_meta_objset,
703 ds->ds_dir->dd_phys->dd_child_dir_zapobj, recv_clone_name,
704 8, 1, &val);
705 if (error != ENOENT)
706 return (error == 0 ? EBUSY : error);
707
708 /* new snapshot name must not exist */
709 error = zap_lookup(dp->dp_meta_objset,
710 ds->ds_phys->ds_snapnames_zapobj, drba->drba_cookie->drc_tosnap,
711 8, 1, &val);
712 if (error != ENOENT)
713 return (error == 0 ? EEXIST : error);
714
715 if (fromguid != 0) {
716 /* if incremental, most recent snapshot must match fromguid */
717 if (ds->ds_prev == NULL)
718 return (SET_ERROR(ENODEV));
719
720 /*
721 * most recent snapshot must match fromguid, or there are no
722 * changes since the fromguid one
723 */
724 if (ds->ds_prev->ds_phys->ds_guid != fromguid) {
725 uint64_t birth = ds->ds_prev->ds_phys->ds_bp.blk_birth;
726 uint64_t obj = ds->ds_prev->ds_phys->ds_prev_snap_obj;
727 while (obj != 0) {
728 dsl_dataset_t *snap;
729 error = dsl_dataset_hold_obj(dp, obj, FTAG,
730 &snap);
731 if (error != 0)
732 return (SET_ERROR(ENODEV));
733 if (snap->ds_phys->ds_creation_txg < birth) {
734 dsl_dataset_rele(snap, FTAG);
735 return (SET_ERROR(ENODEV));
736 }
737 if (snap->ds_phys->ds_guid == fromguid) {
738 dsl_dataset_rele(snap, FTAG);
739 break; /* it's ok */
740 }
741 obj = snap->ds_phys->ds_prev_snap_obj;
742 dsl_dataset_rele(snap, FTAG);
743 }
744 if (obj == 0)
745 return (SET_ERROR(ENODEV));
746 }
747 } else {
748 /* if full, most recent snapshot must be $ORIGIN */
749 if (ds->ds_phys->ds_prev_snap_txg >= TXG_INITIAL)
750 return (SET_ERROR(ENODEV));
751 }
752
753 return (0);
754
755}
756
757static int
758dmu_recv_begin_check(void *arg, dmu_tx_t *tx)
759{
760 dmu_recv_begin_arg_t *drba = arg;
761 dsl_pool_t *dp = dmu_tx_pool(tx);
762 struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
763 uint64_t fromguid = drrb->drr_fromguid;
764 int flags = drrb->drr_flags;
765 int error;
766 dsl_dataset_t *ds;
767 const char *tofs = drba->drba_cookie->drc_tofs;
768
769 /* already checked */
770 ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC);
771
772 if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
773 DMU_COMPOUNDSTREAM ||
774 drrb->drr_type >= DMU_OST_NUMTYPES ||
775 ((flags & DRR_FLAG_CLONE) && drba->drba_origin == NULL))
776 return (SET_ERROR(EINVAL));
777
778 /* Verify pool version supports SA if SA_SPILL feature set */
779 if ((DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
780 DMU_BACKUP_FEATURE_SA_SPILL) &&
781 spa_version(dp->dp_spa) < SPA_VERSION_SA) {
782 return (SET_ERROR(ENOTSUP));
783 }
784
785 error = dsl_dataset_hold(dp, tofs, FTAG, &ds);
786 if (error == 0) {
787 /* target fs already exists; recv into temp clone */
788
789 /* Can't recv a clone into an existing fs */
790 if (flags & DRR_FLAG_CLONE) {
791 dsl_dataset_rele(ds, FTAG);
792 return (SET_ERROR(EINVAL));
793 }
794
795 error = recv_begin_check_existing_impl(drba, ds, fromguid);
796 dsl_dataset_rele(ds, FTAG);
797 } else if (error == ENOENT) {
798 /* target fs does not exist; must be a full backup or clone */
799 char buf[MAXNAMELEN];
800
801 /*
802 * If it's a non-clone incremental, we are missing the
803 * target fs, so fail the recv.
804 */
805 if (fromguid != 0 && !(flags & DRR_FLAG_CLONE))
806 return (SET_ERROR(ENOENT));
807
808 /* Open the parent of tofs */
809 ASSERT3U(strlen(tofs), <, MAXNAMELEN);
810 (void) strlcpy(buf, tofs, strrchr(tofs, '/') - tofs + 1);
811 error = dsl_dataset_hold(dp, buf, FTAG, &ds);
812 if (error != 0)
813 return (error);
814
815 if (drba->drba_origin != NULL) {
816 dsl_dataset_t *origin;
817 error = dsl_dataset_hold(dp, drba->drba_origin,
818 FTAG, &origin);
819 if (error != 0) {
820 dsl_dataset_rele(ds, FTAG);
821 return (error);
822 }
823 if (!dsl_dataset_is_snapshot(origin)) {
824 dsl_dataset_rele(origin, FTAG);
825 dsl_dataset_rele(ds, FTAG);
826 return (SET_ERROR(EINVAL));
827 }
828 if (origin->ds_phys->ds_guid != fromguid) {
829 dsl_dataset_rele(origin, FTAG);
830 dsl_dataset_rele(ds, FTAG);
831 return (SET_ERROR(ENODEV));
832 }
833 dsl_dataset_rele(origin, FTAG);
834 }
835 dsl_dataset_rele(ds, FTAG);
836 error = 0;
837 }
838 return (error);
839}
840
841static void
842dmu_recv_begin_sync(void *arg, dmu_tx_t *tx)
843{
844 dmu_recv_begin_arg_t *drba = arg;
845 dsl_pool_t *dp = dmu_tx_pool(tx);
846 struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
847 const char *tofs = drba->drba_cookie->drc_tofs;
848 dsl_dataset_t *ds, *newds;
849 uint64_t dsobj;
850 int error;
851 uint64_t crflags;
852
853 crflags = (drrb->drr_flags & DRR_FLAG_CI_DATA) ?
854 DS_FLAG_CI_DATASET : 0;
855
856 error = dsl_dataset_hold(dp, tofs, FTAG, &ds);
857 if (error == 0) {
858 /* create temporary clone */
859 dsobj = dsl_dataset_create_sync(ds->ds_dir, recv_clone_name,
860 ds->ds_prev, crflags, drba->drba_cred, tx);
861 dsl_dataset_rele(ds, FTAG);
862 } else {
863 dsl_dir_t *dd;
864 const char *tail;
865 dsl_dataset_t *origin = NULL;
866
867 VERIFY0(dsl_dir_hold(dp, tofs, FTAG, &dd, &tail));
868
869 if (drba->drba_origin != NULL) {
870 VERIFY0(dsl_dataset_hold(dp, drba->drba_origin,
871 FTAG, &origin));
872 }
873
874 /* Create new dataset. */
875 dsobj = dsl_dataset_create_sync(dd,
876 strrchr(tofs, '/') + 1,
877 origin, crflags, drba->drba_cred, tx);
878 if (origin != NULL)
879 dsl_dataset_rele(origin, FTAG);
880 dsl_dir_rele(dd, FTAG);
881 drba->drba_cookie->drc_newfs = B_TRUE;
882 }
883 VERIFY0(dsl_dataset_own_obj(dp, dsobj, dmu_recv_tag, &newds));
884
885 dmu_buf_will_dirty(newds->ds_dbuf, tx);
886 newds->ds_phys->ds_flags |= DS_FLAG_INCONSISTENT;
887
888 /*
889 * If we actually created a non-clone, we need to create the
890 * objset in our new dataset.
891 */
892 if (BP_IS_HOLE(dsl_dataset_get_blkptr(newds))) {
893 (void) dmu_objset_create_impl(dp->dp_spa,
894 newds, dsl_dataset_get_blkptr(newds), drrb->drr_type, tx);
895 }
896
897 drba->drba_cookie->drc_ds = newds;
898
899 spa_history_log_internal_ds(newds, "receive", tx, "");
900}
901
902/*
903 * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin()
904 * succeeds; otherwise we will leak the holds on the datasets.
905 */
906int
907dmu_recv_begin(char *tofs, char *tosnap, struct drr_begin *drrb,
908 boolean_t force, char *origin, dmu_recv_cookie_t *drc)
909{
910 dmu_recv_begin_arg_t drba = { 0 };
911 dmu_replay_record_t *drr;
912
913 bzero(drc, sizeof (dmu_recv_cookie_t));
914 drc->drc_drrb = drrb;
915 drc->drc_tosnap = tosnap;
916 drc->drc_tofs = tofs;
917 drc->drc_force = force;
918
919 if (drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC))
920 drc->drc_byteswap = B_TRUE;
921 else if (drrb->drr_magic != DMU_BACKUP_MAGIC)
922 return (SET_ERROR(EINVAL));
923
924 drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP);
925 drr->drr_type = DRR_BEGIN;
926 drr->drr_u.drr_begin = *drc->drc_drrb;
927 if (drc->drc_byteswap) {
928 fletcher_4_incremental_byteswap(drr,
929 sizeof (dmu_replay_record_t), &drc->drc_cksum);
930 } else {
931 fletcher_4_incremental_native(drr,
932 sizeof (dmu_replay_record_t), &drc->drc_cksum);
933 }
934 kmem_free(drr, sizeof (dmu_replay_record_t));
935
936 if (drc->drc_byteswap) {
937 drrb->drr_magic = BSWAP_64(drrb->drr_magic);
938 drrb->drr_versioninfo = BSWAP_64(drrb->drr_versioninfo);
939 drrb->drr_creation_time = BSWAP_64(drrb->drr_creation_time);
940 drrb->drr_type = BSWAP_32(drrb->drr_type);
941 drrb->drr_toguid = BSWAP_64(drrb->drr_toguid);
942 drrb->drr_fromguid = BSWAP_64(drrb->drr_fromguid);
943 }
944
945 drba.drba_origin = origin;
946 drba.drba_cookie = drc;
947 drba.drba_cred = CRED();
948
949 return (dsl_sync_task(tofs, dmu_recv_begin_check, dmu_recv_begin_sync,
950 &drba, 5));
951}
952
953struct restorearg {
954 int err;
955 boolean_t byteswap;
956 kthread_t *td;
957 struct file *fp;
958 char *buf;
959 uint64_t voff;
960 int bufsize; /* amount of memory allocated for buf */
961 zio_cksum_t cksum;
962 avl_tree_t *guid_to_ds_map;
963};
964
965typedef struct guid_map_entry {
966 uint64_t guid;
967 dsl_dataset_t *gme_ds;
968 avl_node_t avlnode;
969} guid_map_entry_t;
970
971static int
972guid_compare(const void *arg1, const void *arg2)
973{
974 const guid_map_entry_t *gmep1 = arg1;
975 const guid_map_entry_t *gmep2 = arg2;
976
977 if (gmep1->guid < gmep2->guid)
978 return (-1);
979 else if (gmep1->guid > gmep2->guid)
980 return (1);
981 return (0);
982}
983
984static void
985free_guid_map_onexit(void *arg)
986{
987 avl_tree_t *ca = arg;
988 void *cookie = NULL;
989 guid_map_entry_t *gmep;
990
991 while ((gmep = avl_destroy_nodes(ca, &cookie)) != NULL) {
992 dsl_dataset_long_rele(gmep->gme_ds, gmep);
993 dsl_dataset_rele(gmep->gme_ds, gmep);
994 kmem_free(gmep, sizeof (guid_map_entry_t));
995 }
996 avl_destroy(ca);
997 kmem_free(ca, sizeof (avl_tree_t));
998}
999
1000static int
1001restore_bytes(struct restorearg *ra, void *buf, int len, off_t off, ssize_t *resid)
1002{
1003 struct uio auio;
1004 struct iovec aiov;
1005 int error;
1006
1007 aiov.iov_base = buf;
1008 aiov.iov_len = len;
1009 auio.uio_iov = &aiov;
1010 auio.uio_iovcnt = 1;
1011 auio.uio_resid = len;
1012 auio.uio_segflg = UIO_SYSSPACE;
1013 auio.uio_rw = UIO_READ;
1014 auio.uio_offset = off;
1015 auio.uio_td = ra->td;
1016#ifdef _KERNEL
1017 error = fo_read(ra->fp, &auio, ra->td->td_ucred, FOF_OFFSET, ra->td);
1018#else
1019 fprintf(stderr, "%s: returning EOPNOTSUPP\n", __func__);
1020 error = EOPNOTSUPP;
1021#endif
1022 *resid = auio.uio_resid;
1023 return (error);
1024}
1025
1026static void *
1027restore_read(struct restorearg *ra, int len)
1028{
1029 void *rv;
1030 int done = 0;
1031
1032 /* some things will require 8-byte alignment, so everything must */
1033 ASSERT0(len % 8);
1034
1035 while (done < len) {
1036 ssize_t resid;
1037
1038 ra->err = restore_bytes(ra, (caddr_t)ra->buf + done,
1039 len - done, ra->voff, &resid);
1040
1041 if (resid == len - done)
1042 ra->err = SET_ERROR(EINVAL);
1043 ra->voff += len - done - resid;
1044 done = len - resid;
1045 if (ra->err != 0)
1046 return (NULL);
1047 }
1048
1049 ASSERT3U(done, ==, len);
1050 rv = ra->buf;
1051 if (ra->byteswap)
1052 fletcher_4_incremental_byteswap(rv, len, &ra->cksum);
1053 else
1054 fletcher_4_incremental_native(rv, len, &ra->cksum);
1055 return (rv);
1056}
1057
1058static void
1059backup_byteswap(dmu_replay_record_t *drr)
1060{
1061#define DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X))
1062#define DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X))
1063 drr->drr_type = BSWAP_32(drr->drr_type);
1064 drr->drr_payloadlen = BSWAP_32(drr->drr_payloadlen);
1065 switch (drr->drr_type) {
1066 case DRR_BEGIN:
1067 DO64(drr_begin.drr_magic);
1068 DO64(drr_begin.drr_versioninfo);
1069 DO64(drr_begin.drr_creation_time);
1070 DO32(drr_begin.drr_type);
1071 DO32(drr_begin.drr_flags);
1072 DO64(drr_begin.drr_toguid);
1073 DO64(drr_begin.drr_fromguid);
1074 break;
1075 case DRR_OBJECT:
1076 DO64(drr_object.drr_object);
1077 /* DO64(drr_object.drr_allocation_txg); */
1078 DO32(drr_object.drr_type);
1079 DO32(drr_object.drr_bonustype);
1080 DO32(drr_object.drr_blksz);
1081 DO32(drr_object.drr_bonuslen);
1082 DO64(drr_object.drr_toguid);
1083 break;
1084 case DRR_FREEOBJECTS:
1085 DO64(drr_freeobjects.drr_firstobj);
1086 DO64(drr_freeobjects.drr_numobjs);
1087 DO64(drr_freeobjects.drr_toguid);
1088 break;
1089 case DRR_WRITE:
1090 DO64(drr_write.drr_object);
1091 DO32(drr_write.drr_type);
1092 DO64(drr_write.drr_offset);
1093 DO64(drr_write.drr_length);
1094 DO64(drr_write.drr_toguid);
1095 DO64(drr_write.drr_key.ddk_cksum.zc_word[0]);
1096 DO64(drr_write.drr_key.ddk_cksum.zc_word[1]);
1097 DO64(drr_write.drr_key.ddk_cksum.zc_word[2]);
1098 DO64(drr_write.drr_key.ddk_cksum.zc_word[3]);
1099 DO64(drr_write.drr_key.ddk_prop);
1100 break;
1101 case DRR_WRITE_BYREF:
1102 DO64(drr_write_byref.drr_object);
1103 DO64(drr_write_byref.drr_offset);
1104 DO64(drr_write_byref.drr_length);
1105 DO64(drr_write_byref.drr_toguid);
1106 DO64(drr_write_byref.drr_refguid);
1107 DO64(drr_write_byref.drr_refobject);
1108 DO64(drr_write_byref.drr_refoffset);
1109 DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[0]);
1110 DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[1]);
1111 DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[2]);
1112 DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[3]);
1113 DO64(drr_write_byref.drr_key.ddk_prop);
1114 break;
1115 case DRR_FREE:
1116 DO64(drr_free.drr_object);
1117 DO64(drr_free.drr_offset);
1118 DO64(drr_free.drr_length);
1119 DO64(drr_free.drr_toguid);
1120 break;
1121 case DRR_SPILL:
1122 DO64(drr_spill.drr_object);
1123 DO64(drr_spill.drr_length);
1124 DO64(drr_spill.drr_toguid);
1125 break;
1126 case DRR_END:
1127 DO64(drr_end.drr_checksum.zc_word[0]);
1128 DO64(drr_end.drr_checksum.zc_word[1]);
1129 DO64(drr_end.drr_checksum.zc_word[2]);
1130 DO64(drr_end.drr_checksum.zc_word[3]);
1131 DO64(drr_end.drr_toguid);
1132 break;
1133 }
1134#undef DO64
1135#undef DO32
1136}
1137
1138static int
1139restore_object(struct restorearg *ra, objset_t *os, struct drr_object *drro)
1140{
1141 int err;
1142 dmu_tx_t *tx;
1143 void *data = NULL;
1144
1145 if (drro->drr_type == DMU_OT_NONE ||
1146 !DMU_OT_IS_VALID(drro->drr_type) ||
1147 !DMU_OT_IS_VALID(drro->drr_bonustype) ||
1148 drro->drr_checksumtype >= ZIO_CHECKSUM_FUNCTIONS ||
1149 drro->drr_compress >= ZIO_COMPRESS_FUNCTIONS ||
1150 P2PHASE(drro->drr_blksz, SPA_MINBLOCKSIZE) ||
1151 drro->drr_blksz < SPA_MINBLOCKSIZE ||
1152 drro->drr_blksz > SPA_MAXBLOCKSIZE ||
1153 drro->drr_bonuslen > DN_MAX_BONUSLEN) {
1154 return (SET_ERROR(EINVAL));
1155 }
1156
1157 err = dmu_object_info(os, drro->drr_object, NULL);
1158
1159 if (err != 0 && err != ENOENT)
1160 return (SET_ERROR(EINVAL));
1161
1162 if (drro->drr_bonuslen) {
1163 data = restore_read(ra, P2ROUNDUP(drro->drr_bonuslen, 8));
1164 if (ra->err != 0)
1165 return (ra->err);
1166 }
1167
1168 if (err == ENOENT) {
1169 /* currently free, want to be allocated */
1170 tx = dmu_tx_create(os);
1171 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
1172 err = dmu_tx_assign(tx, TXG_WAIT);
1173 if (err != 0) {
1174 dmu_tx_abort(tx);
1175 return (err);
1176 }
1177 err = dmu_object_claim(os, drro->drr_object,
1178 drro->drr_type, drro->drr_blksz,
1179 drro->drr_bonustype, drro->drr_bonuslen, tx);
1180 dmu_tx_commit(tx);
1181 } else {
1182 /* currently allocated, want to be allocated */
1183 err = dmu_object_reclaim(os, drro->drr_object,
1184 drro->drr_type, drro->drr_blksz,
1185 drro->drr_bonustype, drro->drr_bonuslen);
1186 }
1187 if (err != 0) {
1188 return (SET_ERROR(EINVAL));
1189 }
1190
1191 tx = dmu_tx_create(os);
1192 dmu_tx_hold_bonus(tx, drro->drr_object);
1193 err = dmu_tx_assign(tx, TXG_WAIT);
1194 if (err != 0) {
1195 dmu_tx_abort(tx);
1196 return (err);
1197 }
1198
1199 dmu_object_set_checksum(os, drro->drr_object, drro->drr_checksumtype,
1200 tx);
1201 dmu_object_set_compress(os, drro->drr_object, drro->drr_compress, tx);
1202
1203 if (data != NULL) {
1204 dmu_buf_t *db;
1205
1206 VERIFY(0 == dmu_bonus_hold(os, drro->drr_object, FTAG, &db));
1207 dmu_buf_will_dirty(db, tx);
1208
1209 ASSERT3U(db->db_size, >=, drro->drr_bonuslen);
1210 bcopy(data, db->db_data, drro->drr_bonuslen);
1211 if (ra->byteswap) {
1212 dmu_object_byteswap_t byteswap =
1213 DMU_OT_BYTESWAP(drro->drr_bonustype);
1214 dmu_ot_byteswap[byteswap].ob_func(db->db_data,
1215 drro->drr_bonuslen);
1216 }
1217 dmu_buf_rele(db, FTAG);
1218 }
1219 dmu_tx_commit(tx);
1220 return (0);
1221}
1222
1223/* ARGSUSED */
1224static int
1225restore_freeobjects(struct restorearg *ra, objset_t *os,
1226 struct drr_freeobjects *drrfo)
1227{
1228 uint64_t obj;
1229
1230 if (drrfo->drr_firstobj + drrfo->drr_numobjs < drrfo->drr_firstobj)
1231 return (SET_ERROR(EINVAL));
1232
1233 for (obj = drrfo->drr_firstobj;
1234 obj < drrfo->drr_firstobj + drrfo->drr_numobjs;
1235 (void) dmu_object_next(os, &obj, FALSE, 0)) {
1236 int err;
1237
1238 if (dmu_object_info(os, obj, NULL) != 0)
1239 continue;
1240
1241 err = dmu_free_object(os, obj);
1242 if (err != 0)
1243 return (err);
1244 }
1245 return (0);
1246}
1247
1248static int
1249restore_write(struct restorearg *ra, objset_t *os,
1250 struct drr_write *drrw)
1251{
1252 dmu_tx_t *tx;
1253 void *data;
1254 int err;
1255
1256 if (drrw->drr_offset + drrw->drr_length < drrw->drr_offset ||
1257 !DMU_OT_IS_VALID(drrw->drr_type))
1258 return (SET_ERROR(EINVAL));
1259
1260 data = restore_read(ra, drrw->drr_length);
1261 if (data == NULL)
1262 return (ra->err);
1263
1264 if (dmu_object_info(os, drrw->drr_object, NULL) != 0)
1265 return (SET_ERROR(EINVAL));
1266
1267 tx = dmu_tx_create(os);
1268
1269 dmu_tx_hold_write(tx, drrw->drr_object,
1270 drrw->drr_offset, drrw->drr_length);
1271 err = dmu_tx_assign(tx, TXG_WAIT);
1272 if (err != 0) {
1273 dmu_tx_abort(tx);
1274 return (err);
1275 }
1276 if (ra->byteswap) {
1277 dmu_object_byteswap_t byteswap =
1278 DMU_OT_BYTESWAP(drrw->drr_type);
1279 dmu_ot_byteswap[byteswap].ob_func(data, drrw->drr_length);
1280 }
1281 dmu_write(os, drrw->drr_object,
1282 drrw->drr_offset, drrw->drr_length, data, tx);
1283 dmu_tx_commit(tx);
1284 return (0);
1285}
1286
1287/*
1288 * Handle a DRR_WRITE_BYREF record. This record is used in dedup'ed
1289 * streams to refer to a copy of the data that is already on the
1290 * system because it came in earlier in the stream. This function
1291 * finds the earlier copy of the data, and uses that copy instead of
1292 * data from the stream to fulfill this write.
1293 */
1294static int
1295restore_write_byref(struct restorearg *ra, objset_t *os,
1296 struct drr_write_byref *drrwbr)
1297{
1298 dmu_tx_t *tx;
1299 int err;
1300 guid_map_entry_t gmesrch;
1301 guid_map_entry_t *gmep;
1302 avl_index_t where;
1303 objset_t *ref_os = NULL;
1304 dmu_buf_t *dbp;
1305
1306 if (drrwbr->drr_offset + drrwbr->drr_length < drrwbr->drr_offset)
1307 return (SET_ERROR(EINVAL));
1308
1309 /*
1310 * If the GUID of the referenced dataset is different from the
1311 * GUID of the target dataset, find the referenced dataset.
1312 */
1313 if (drrwbr->drr_toguid != drrwbr->drr_refguid) {
1314 gmesrch.guid = drrwbr->drr_refguid;
1315 if ((gmep = avl_find(ra->guid_to_ds_map, &gmesrch,
1316 &where)) == NULL) {
1317 return (SET_ERROR(EINVAL));
1318 }
1319 if (dmu_objset_from_ds(gmep->gme_ds, &ref_os))
1320 return (SET_ERROR(EINVAL));
1321 } else {
1322 ref_os = os;
1323 }
1324
1325 if (err = dmu_buf_hold(ref_os, drrwbr->drr_refobject,
1326 drrwbr->drr_refoffset, FTAG, &dbp, DMU_READ_PREFETCH))
1327 return (err);
1328
1329 tx = dmu_tx_create(os);
1330
1331 dmu_tx_hold_write(tx, drrwbr->drr_object,
1332 drrwbr->drr_offset, drrwbr->drr_length);
1333 err = dmu_tx_assign(tx, TXG_WAIT);
1334 if (err != 0) {
1335 dmu_tx_abort(tx);
1336 return (err);
1337 }
1338 dmu_write(os, drrwbr->drr_object,
1339 drrwbr->drr_offset, drrwbr->drr_length, dbp->db_data, tx);
1340 dmu_buf_rele(dbp, FTAG);
1341 dmu_tx_commit(tx);
1342 return (0);
1343}
1344
1345static int
1346restore_spill(struct restorearg *ra, objset_t *os, struct drr_spill *drrs)
1347{
1348 dmu_tx_t *tx;
1349 void *data;
1350 dmu_buf_t *db, *db_spill;
1351 int err;
1352
1353 if (drrs->drr_length < SPA_MINBLOCKSIZE ||
1354 drrs->drr_length > SPA_MAXBLOCKSIZE)
1355 return (SET_ERROR(EINVAL));
1356
1357 data = restore_read(ra, drrs->drr_length);
1358 if (data == NULL)
1359 return (ra->err);
1360
1361 if (dmu_object_info(os, drrs->drr_object, NULL) != 0)
1362 return (SET_ERROR(EINVAL));
1363
1364 VERIFY(0 == dmu_bonus_hold(os, drrs->drr_object, FTAG, &db));
1365 if ((err = dmu_spill_hold_by_bonus(db, FTAG, &db_spill)) != 0) {
1366 dmu_buf_rele(db, FTAG);
1367 return (err);
1368 }
1369
1370 tx = dmu_tx_create(os);
1371
1372 dmu_tx_hold_spill(tx, db->db_object);
1373
1374 err = dmu_tx_assign(tx, TXG_WAIT);
1375 if (err != 0) {
1376 dmu_buf_rele(db, FTAG);
1377 dmu_buf_rele(db_spill, FTAG);
1378 dmu_tx_abort(tx);
1379 return (err);
1380 }
1381 dmu_buf_will_dirty(db_spill, tx);
1382
1383 if (db_spill->db_size < drrs->drr_length)
1384 VERIFY(0 == dbuf_spill_set_blksz(db_spill,
1385 drrs->drr_length, tx));
1386 bcopy(data, db_spill->db_data, drrs->drr_length);
1387
1388 dmu_buf_rele(db, FTAG);
1389 dmu_buf_rele(db_spill, FTAG);
1390
1391 dmu_tx_commit(tx);
1392 return (0);
1393}
1394
1395/* ARGSUSED */
1396static int
1397restore_free(struct restorearg *ra, objset_t *os,
1398 struct drr_free *drrf)
1399{
1400 int err;
1401
1402 if (drrf->drr_length != -1ULL &&
1403 drrf->drr_offset + drrf->drr_length < drrf->drr_offset)
1404 return (SET_ERROR(EINVAL));
1405
1406 if (dmu_object_info(os, drrf->drr_object, NULL) != 0)
1407 return (SET_ERROR(EINVAL));
1408
1409 err = dmu_free_long_range(os, drrf->drr_object,
1410 drrf->drr_offset, drrf->drr_length);
1411 return (err);
1412}
1413
1414/* used to destroy the drc_ds on error */
1415static void
1416dmu_recv_cleanup_ds(dmu_recv_cookie_t *drc)
1417{
1418 char name[MAXNAMELEN];
1419 dsl_dataset_name(drc->drc_ds, name);
1420 dsl_dataset_disown(drc->drc_ds, dmu_recv_tag);
1421 (void) dsl_destroy_head(name);
1422}
1423
1424/*
1425 * NB: callers *must* call dmu_recv_end() if this succeeds.
1426 */
1427int
1428dmu_recv_stream(dmu_recv_cookie_t *drc, struct file *fp, offset_t *voffp,
1429 int cleanup_fd, uint64_t *action_handlep)
1430{
1431 struct restorearg ra = { 0 };
1432 dmu_replay_record_t *drr;
1433 objset_t *os;
1434 zio_cksum_t pcksum;
1435 int featureflags;
1436
1437 ra.byteswap = drc->drc_byteswap;
1438 ra.cksum = drc->drc_cksum;
1439 ra.td = curthread;
1440 ra.fp = fp;
1441 ra.voff = *voffp;
1442 ra.bufsize = 1<<20;
1443 ra.buf = kmem_alloc(ra.bufsize, KM_SLEEP);
1444
1445 /* these were verified in dmu_recv_begin */
1446 ASSERT3U(DMU_GET_STREAM_HDRTYPE(drc->drc_drrb->drr_versioninfo), ==,
1447 DMU_SUBSTREAM);
1448 ASSERT3U(drc->drc_drrb->drr_type, <, DMU_OST_NUMTYPES);
1449
1450 /*
1451 * Open the objset we are modifying.
1452 */
1453 VERIFY0(dmu_objset_from_ds(drc->drc_ds, &os));
1454
1455 ASSERT(drc->drc_ds->ds_phys->ds_flags & DS_FLAG_INCONSISTENT);
1456
1457 featureflags = DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo);
1458
1459 /* if this stream is dedup'ed, set up the avl tree for guid mapping */
1460 if (featureflags & DMU_BACKUP_FEATURE_DEDUP) {
1461 minor_t minor;
1462
1463 if (cleanup_fd == -1) {
1464 ra.err = SET_ERROR(EBADF);
1465 goto out;
1466 }
1467 ra.err = zfs_onexit_fd_hold(cleanup_fd, &minor);
1468 if (ra.err != 0) {
1469 cleanup_fd = -1;
1470 goto out;
1471 }
1472
1473 if (*action_handlep == 0) {
1474 ra.guid_to_ds_map =
1475 kmem_alloc(sizeof (avl_tree_t), KM_SLEEP);
1476 avl_create(ra.guid_to_ds_map, guid_compare,
1477 sizeof (guid_map_entry_t),
1478 offsetof(guid_map_entry_t, avlnode));
1479 ra.err = zfs_onexit_add_cb(minor,
1480 free_guid_map_onexit, ra.guid_to_ds_map,
1481 action_handlep);
1482 if (ra.err != 0)
1483 goto out;
1484 } else {
1485 ra.err = zfs_onexit_cb_data(minor, *action_handlep,
1486 (void **)&ra.guid_to_ds_map);
1487 if (ra.err != 0)
1488 goto out;
1489 }
1490
1491 drc->drc_guid_to_ds_map = ra.guid_to_ds_map;
1492 }
1493
1494 /*
1495 * Read records and process them.
1496 */
1497 pcksum = ra.cksum;
1498 while (ra.err == 0 &&
1499 NULL != (drr = restore_read(&ra, sizeof (*drr)))) {
1500 if (issig(JUSTLOOKING) && issig(FORREAL)) {
1501 ra.err = SET_ERROR(EINTR);
1502 goto out;
1503 }
1504
1505 if (ra.byteswap)
1506 backup_byteswap(drr);
1507
1508 switch (drr->drr_type) {
1509 case DRR_OBJECT:
1510 {
1511 /*
1512 * We need to make a copy of the record header,
1513 * because restore_{object,write} may need to
1514 * restore_read(), which will invalidate drr.
1515 */
1516 struct drr_object drro = drr->drr_u.drr_object;
1517 ra.err = restore_object(&ra, os, &drro);
1518 break;
1519 }
1520 case DRR_FREEOBJECTS:
1521 {
1522 struct drr_freeobjects drrfo =
1523 drr->drr_u.drr_freeobjects;
1524 ra.err = restore_freeobjects(&ra, os, &drrfo);
1525 break;
1526 }
1527 case DRR_WRITE:
1528 {
1529 struct drr_write drrw = drr->drr_u.drr_write;
1530 ra.err = restore_write(&ra, os, &drrw);
1531 break;
1532 }
1533 case DRR_WRITE_BYREF:
1534 {
1535 struct drr_write_byref drrwbr =
1536 drr->drr_u.drr_write_byref;
1537 ra.err = restore_write_byref(&ra, os, &drrwbr);
1538 break;
1539 }
1540 case DRR_FREE:
1541 {
1542 struct drr_free drrf = drr->drr_u.drr_free;
1543 ra.err = restore_free(&ra, os, &drrf);
1544 break;
1545 }
1546 case DRR_END:
1547 {
1548 struct drr_end drre = drr->drr_u.drr_end;
1549 /*
1550 * We compare against the *previous* checksum
1551 * value, because the stored checksum is of
1552 * everything before the DRR_END record.
1553 */
1554 if (!ZIO_CHECKSUM_EQUAL(drre.drr_checksum, pcksum))
1555 ra.err = SET_ERROR(ECKSUM);
1556 goto out;
1557 }
1558 case DRR_SPILL:
1559 {
1560 struct drr_spill drrs = drr->drr_u.drr_spill;
1561 ra.err = restore_spill(&ra, os, &drrs);
1562 break;
1563 }
1564 default:
1565 ra.err = SET_ERROR(EINVAL);
1566 goto out;
1567 }
1568 pcksum = ra.cksum;
1569 }
1570 ASSERT(ra.err != 0);
1571
1572out:
1573 if ((featureflags & DMU_BACKUP_FEATURE_DEDUP) && (cleanup_fd != -1))
1574 zfs_onexit_fd_rele(cleanup_fd);
1575
1576 if (ra.err != 0) {
1577 /*
1578 * destroy what we created, so we don't leave it in the
1579 * inconsistent restoring state.
1580 */
1581 dmu_recv_cleanup_ds(drc);
1582 }
1583
1584 kmem_free(ra.buf, ra.bufsize);
1585 *voffp = ra.voff;
1586 return (ra.err);
1587}
1588
1589static int
1590dmu_recv_end_check(void *arg, dmu_tx_t *tx)
1591{
1592 dmu_recv_cookie_t *drc = arg;
1593 dsl_pool_t *dp = dmu_tx_pool(tx);
1594 int error;
1595
1596 ASSERT3P(drc->drc_ds->ds_owner, ==, dmu_recv_tag);
1597
1598 if (!drc->drc_newfs) {
1599 dsl_dataset_t *origin_head;
1600
1601 error = dsl_dataset_hold(dp, drc->drc_tofs, FTAG, &origin_head);
1602 if (error != 0)
1603 return (error);
1604 error = dsl_dataset_clone_swap_check_impl(drc->drc_ds,
1605 origin_head, drc->drc_force);
1606 if (error != 0) {
1607 dsl_dataset_rele(origin_head, FTAG);
1608 return (error);
1609 }
1610 error = dsl_dataset_snapshot_check_impl(origin_head,
1611 drc->drc_tosnap, tx);
1612 dsl_dataset_rele(origin_head, FTAG);
1613 if (error != 0)
1614 return (error);
1615
1616 error = dsl_destroy_head_check_impl(drc->drc_ds, 1);
1617 } else {
1618 error = dsl_dataset_snapshot_check_impl(drc->drc_ds,
1619 drc->drc_tosnap, tx);
1620 }
1621 return (error);
1622}
1623
1624static void
1625dmu_recv_end_sync(void *arg, dmu_tx_t *tx)
1626{
1627 dmu_recv_cookie_t *drc = arg;
1628 dsl_pool_t *dp = dmu_tx_pool(tx);
1629
1630 spa_history_log_internal_ds(drc->drc_ds, "finish receiving",
1631 tx, "snap=%s", drc->drc_tosnap);
1632
1633 if (!drc->drc_newfs) {
1634 dsl_dataset_t *origin_head;
1635
1636 VERIFY0(dsl_dataset_hold(dp, drc->drc_tofs, FTAG,
1637 &origin_head));
1638 dsl_dataset_clone_swap_sync_impl(drc->drc_ds,
1639 origin_head, tx);
1640 dsl_dataset_snapshot_sync_impl(origin_head,
1641 drc->drc_tosnap, tx);
1642
1643 /* set snapshot's creation time and guid */
1644 dmu_buf_will_dirty(origin_head->ds_prev->ds_dbuf, tx);
1645 origin_head->ds_prev->ds_phys->ds_creation_time =
1646 drc->drc_drrb->drr_creation_time;
1647 origin_head->ds_prev->ds_phys->ds_guid =
1648 drc->drc_drrb->drr_toguid;
1649 origin_head->ds_prev->ds_phys->ds_flags &=
1650 ~DS_FLAG_INCONSISTENT;
1651
1652 dmu_buf_will_dirty(origin_head->ds_dbuf, tx);
1653 origin_head->ds_phys->ds_flags &= ~DS_FLAG_INCONSISTENT;
1654
1655 dsl_dataset_rele(origin_head, FTAG);
1656 dsl_destroy_head_sync_impl(drc->drc_ds, tx);
1657 } else {
1658 dsl_dataset_t *ds = drc->drc_ds;
1659
1660 dsl_dataset_snapshot_sync_impl(ds, drc->drc_tosnap, tx);
1661
1662 /* set snapshot's creation time and guid */
1663 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
1664 ds->ds_prev->ds_phys->ds_creation_time =
1665 drc->drc_drrb->drr_creation_time;
1666 ds->ds_prev->ds_phys->ds_guid = drc->drc_drrb->drr_toguid;
1667 ds->ds_prev->ds_phys->ds_flags &= ~DS_FLAG_INCONSISTENT;
1668
1669 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1670 ds->ds_phys->ds_flags &= ~DS_FLAG_INCONSISTENT;
1671 }
1672 drc->drc_newsnapobj = drc->drc_ds->ds_phys->ds_prev_snap_obj;
1673 /*
1674 * Release the hold from dmu_recv_begin. This must be done before
1675 * we return to open context, so that when we free the dataset's dnode,
1676 * we can evict its bonus buffer.
1677 */
1678 dsl_dataset_disown(drc->drc_ds, dmu_recv_tag);
1679 drc->drc_ds = NULL;
1680}
1681
1682static int
1683add_ds_to_guidmap(const char *name, avl_tree_t *guid_map, uint64_t snapobj)
1684{
1685 dsl_pool_t *dp;
1686 dsl_dataset_t *snapds;
1687 guid_map_entry_t *gmep;
1688 int err;
1689
1690 ASSERT(guid_map != NULL);
1691
1692 err = dsl_pool_hold(name, FTAG, &dp);
1693 if (err != 0)
1694 return (err);
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2013 by Delphix. All rights reserved.
25 * Copyright (c) 2012, Joyent, Inc. All rights reserved.
26 * Copyright (c) 2012, Martin Matuska <mm@FreeBSD.org>. All rights reserved.
27 */
28
29#include <sys/dmu.h>
30#include <sys/dmu_impl.h>
31#include <sys/dmu_tx.h>
32#include <sys/dbuf.h>
33#include <sys/dnode.h>
34#include <sys/zfs_context.h>
35#include <sys/dmu_objset.h>
36#include <sys/dmu_traverse.h>
37#include <sys/dsl_dataset.h>
38#include <sys/dsl_dir.h>
39#include <sys/dsl_prop.h>
40#include <sys/dsl_pool.h>
41#include <sys/dsl_synctask.h>
42#include <sys/zfs_ioctl.h>
43#include <sys/zap.h>
44#include <sys/zio_checksum.h>
45#include <sys/zfs_znode.h>
46#include <zfs_fletcher.h>
47#include <sys/avl.h>
48#include <sys/ddt.h>
49#include <sys/zfs_onexit.h>
50#include <sys/dmu_send.h>
51#include <sys/dsl_destroy.h>
52
53/* Set this tunable to TRUE to replace corrupt data with 0x2f5baddb10c */
54int zfs_send_corrupt_data = B_FALSE;
55
56static char *dmu_recv_tag = "dmu_recv_tag";
57static const char *recv_clone_name = "%recv";
58
59static int
60dump_bytes(dmu_sendarg_t *dsp, void *buf, int len)
61{
62 dsl_dataset_t *ds = dsp->dsa_os->os_dsl_dataset;
63 struct uio auio;
64 struct iovec aiov;
65 ASSERT0(len % 8);
66
67 fletcher_4_incremental_native(buf, len, &dsp->dsa_zc);
68 aiov.iov_base = buf;
69 aiov.iov_len = len;
70 auio.uio_iov = &aiov;
71 auio.uio_iovcnt = 1;
72 auio.uio_resid = len;
73 auio.uio_segflg = UIO_SYSSPACE;
74 auio.uio_rw = UIO_WRITE;
75 auio.uio_offset = (off_t)-1;
76 auio.uio_td = dsp->dsa_td;
77#ifdef _KERNEL
78 if (dsp->dsa_fp->f_type == DTYPE_VNODE)
79 bwillwrite();
80 dsp->dsa_err = fo_write(dsp->dsa_fp, &auio, dsp->dsa_td->td_ucred, 0,
81 dsp->dsa_td);
82#else
83 fprintf(stderr, "%s: returning EOPNOTSUPP\n", __func__);
84 dsp->dsa_err = EOPNOTSUPP;
85#endif
86 mutex_enter(&ds->ds_sendstream_lock);
87 *dsp->dsa_off += len;
88 mutex_exit(&ds->ds_sendstream_lock);
89
90 return (dsp->dsa_err);
91}
92
93static int
94dump_free(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset,
95 uint64_t length)
96{
97 struct drr_free *drrf = &(dsp->dsa_drr->drr_u.drr_free);
98
99 if (length != -1ULL && offset + length < offset)
100 length = -1ULL;
101
102 /*
103 * If there is a pending op, but it's not PENDING_FREE, push it out,
104 * since free block aggregation can only be done for blocks of the
105 * same type (i.e., DRR_FREE records can only be aggregated with
106 * other DRR_FREE records. DRR_FREEOBJECTS records can only be
107 * aggregated with other DRR_FREEOBJECTS records.
108 */
109 if (dsp->dsa_pending_op != PENDING_NONE &&
110 dsp->dsa_pending_op != PENDING_FREE) {
111 if (dump_bytes(dsp, dsp->dsa_drr,
112 sizeof (dmu_replay_record_t)) != 0)
113 return (SET_ERROR(EINTR));
114 dsp->dsa_pending_op = PENDING_NONE;
115 }
116
117 if (dsp->dsa_pending_op == PENDING_FREE) {
118 /*
119 * There should never be a PENDING_FREE if length is -1
120 * (because dump_dnode is the only place where this
121 * function is called with a -1, and only after flushing
122 * any pending record).
123 */
124 ASSERT(length != -1ULL);
125 /*
126 * Check to see whether this free block can be aggregated
127 * with pending one.
128 */
129 if (drrf->drr_object == object && drrf->drr_offset +
130 drrf->drr_length == offset) {
131 drrf->drr_length += length;
132 return (0);
133 } else {
134 /* not a continuation. Push out pending record */
135 if (dump_bytes(dsp, dsp->dsa_drr,
136 sizeof (dmu_replay_record_t)) != 0)
137 return (SET_ERROR(EINTR));
138 dsp->dsa_pending_op = PENDING_NONE;
139 }
140 }
141 /* create a FREE record and make it pending */
142 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
143 dsp->dsa_drr->drr_type = DRR_FREE;
144 drrf->drr_object = object;
145 drrf->drr_offset = offset;
146 drrf->drr_length = length;
147 drrf->drr_toguid = dsp->dsa_toguid;
148 if (length == -1ULL) {
149 if (dump_bytes(dsp, dsp->dsa_drr,
150 sizeof (dmu_replay_record_t)) != 0)
151 return (SET_ERROR(EINTR));
152 } else {
153 dsp->dsa_pending_op = PENDING_FREE;
154 }
155
156 return (0);
157}
158
159static int
160dump_data(dmu_sendarg_t *dsp, dmu_object_type_t type,
161 uint64_t object, uint64_t offset, int blksz, const blkptr_t *bp, void *data)
162{
163 struct drr_write *drrw = &(dsp->dsa_drr->drr_u.drr_write);
164
165
166 /*
167 * If there is any kind of pending aggregation (currently either
168 * a grouping of free objects or free blocks), push it out to
169 * the stream, since aggregation can't be done across operations
170 * of different types.
171 */
172 if (dsp->dsa_pending_op != PENDING_NONE) {
173 if (dump_bytes(dsp, dsp->dsa_drr,
174 sizeof (dmu_replay_record_t)) != 0)
175 return (SET_ERROR(EINTR));
176 dsp->dsa_pending_op = PENDING_NONE;
177 }
178 /* write a DATA record */
179 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
180 dsp->dsa_drr->drr_type = DRR_WRITE;
181 drrw->drr_object = object;
182 drrw->drr_type = type;
183 drrw->drr_offset = offset;
184 drrw->drr_length = blksz;
185 drrw->drr_toguid = dsp->dsa_toguid;
186 drrw->drr_checksumtype = BP_GET_CHECKSUM(bp);
187 if (zio_checksum_table[drrw->drr_checksumtype].ci_dedup)
188 drrw->drr_checksumflags |= DRR_CHECKSUM_DEDUP;
189 DDK_SET_LSIZE(&drrw->drr_key, BP_GET_LSIZE(bp));
190 DDK_SET_PSIZE(&drrw->drr_key, BP_GET_PSIZE(bp));
191 DDK_SET_COMPRESS(&drrw->drr_key, BP_GET_COMPRESS(bp));
192 drrw->drr_key.ddk_cksum = bp->blk_cksum;
193
194 if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0)
195 return (SET_ERROR(EINTR));
196 if (dump_bytes(dsp, data, blksz) != 0)
197 return (SET_ERROR(EINTR));
198 return (0);
199}
200
201static int
202dump_spill(dmu_sendarg_t *dsp, uint64_t object, int blksz, void *data)
203{
204 struct drr_spill *drrs = &(dsp->dsa_drr->drr_u.drr_spill);
205
206 if (dsp->dsa_pending_op != PENDING_NONE) {
207 if (dump_bytes(dsp, dsp->dsa_drr,
208 sizeof (dmu_replay_record_t)) != 0)
209 return (SET_ERROR(EINTR));
210 dsp->dsa_pending_op = PENDING_NONE;
211 }
212
213 /* write a SPILL record */
214 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
215 dsp->dsa_drr->drr_type = DRR_SPILL;
216 drrs->drr_object = object;
217 drrs->drr_length = blksz;
218 drrs->drr_toguid = dsp->dsa_toguid;
219
220 if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)))
221 return (SET_ERROR(EINTR));
222 if (dump_bytes(dsp, data, blksz))
223 return (SET_ERROR(EINTR));
224 return (0);
225}
226
227static int
228dump_freeobjects(dmu_sendarg_t *dsp, uint64_t firstobj, uint64_t numobjs)
229{
230 struct drr_freeobjects *drrfo = &(dsp->dsa_drr->drr_u.drr_freeobjects);
231
232 /*
233 * If there is a pending op, but it's not PENDING_FREEOBJECTS,
234 * push it out, since free block aggregation can only be done for
235 * blocks of the same type (i.e., DRR_FREE records can only be
236 * aggregated with other DRR_FREE records. DRR_FREEOBJECTS records
237 * can only be aggregated with other DRR_FREEOBJECTS records.
238 */
239 if (dsp->dsa_pending_op != PENDING_NONE &&
240 dsp->dsa_pending_op != PENDING_FREEOBJECTS) {
241 if (dump_bytes(dsp, dsp->dsa_drr,
242 sizeof (dmu_replay_record_t)) != 0)
243 return (SET_ERROR(EINTR));
244 dsp->dsa_pending_op = PENDING_NONE;
245 }
246 if (dsp->dsa_pending_op == PENDING_FREEOBJECTS) {
247 /*
248 * See whether this free object array can be aggregated
249 * with pending one
250 */
251 if (drrfo->drr_firstobj + drrfo->drr_numobjs == firstobj) {
252 drrfo->drr_numobjs += numobjs;
253 return (0);
254 } else {
255 /* can't be aggregated. Push out pending record */
256 if (dump_bytes(dsp, dsp->dsa_drr,
257 sizeof (dmu_replay_record_t)) != 0)
258 return (SET_ERROR(EINTR));
259 dsp->dsa_pending_op = PENDING_NONE;
260 }
261 }
262
263 /* write a FREEOBJECTS record */
264 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
265 dsp->dsa_drr->drr_type = DRR_FREEOBJECTS;
266 drrfo->drr_firstobj = firstobj;
267 drrfo->drr_numobjs = numobjs;
268 drrfo->drr_toguid = dsp->dsa_toguid;
269
270 dsp->dsa_pending_op = PENDING_FREEOBJECTS;
271
272 return (0);
273}
274
275static int
276dump_dnode(dmu_sendarg_t *dsp, uint64_t object, dnode_phys_t *dnp)
277{
278 struct drr_object *drro = &(dsp->dsa_drr->drr_u.drr_object);
279
280 if (dnp == NULL || dnp->dn_type == DMU_OT_NONE)
281 return (dump_freeobjects(dsp, object, 1));
282
283 if (dsp->dsa_pending_op != PENDING_NONE) {
284 if (dump_bytes(dsp, dsp->dsa_drr,
285 sizeof (dmu_replay_record_t)) != 0)
286 return (SET_ERROR(EINTR));
287 dsp->dsa_pending_op = PENDING_NONE;
288 }
289
290 /* write an OBJECT record */
291 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
292 dsp->dsa_drr->drr_type = DRR_OBJECT;
293 drro->drr_object = object;
294 drro->drr_type = dnp->dn_type;
295 drro->drr_bonustype = dnp->dn_bonustype;
296 drro->drr_blksz = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT;
297 drro->drr_bonuslen = dnp->dn_bonuslen;
298 drro->drr_checksumtype = dnp->dn_checksum;
299 drro->drr_compress = dnp->dn_compress;
300 drro->drr_toguid = dsp->dsa_toguid;
301
302 if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0)
303 return (SET_ERROR(EINTR));
304
305 if (dump_bytes(dsp, DN_BONUS(dnp), P2ROUNDUP(dnp->dn_bonuslen, 8)) != 0)
306 return (SET_ERROR(EINTR));
307
308 /* free anything past the end of the file */
309 if (dump_free(dsp, object, (dnp->dn_maxblkid + 1) *
310 (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT), -1ULL))
311 return (SET_ERROR(EINTR));
312 if (dsp->dsa_err != 0)
313 return (SET_ERROR(EINTR));
314 return (0);
315}
316
317#define BP_SPAN(dnp, level) \
318 (((uint64_t)dnp->dn_datablkszsec) << (SPA_MINBLOCKSHIFT + \
319 (level) * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT)))
320
321/* ARGSUSED */
322static int
323backup_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
324 const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
325{
326 dmu_sendarg_t *dsp = arg;
327 dmu_object_type_t type = bp ? BP_GET_TYPE(bp) : DMU_OT_NONE;
328 int err = 0;
329
330 if (issig(JUSTLOOKING) && issig(FORREAL))
331 return (SET_ERROR(EINTR));
332
333 if (zb->zb_object != DMU_META_DNODE_OBJECT &&
334 DMU_OBJECT_IS_SPECIAL(zb->zb_object)) {
335 return (0);
336 } else if (bp == NULL && zb->zb_object == DMU_META_DNODE_OBJECT) {
337 uint64_t span = BP_SPAN(dnp, zb->zb_level);
338 uint64_t dnobj = (zb->zb_blkid * span) >> DNODE_SHIFT;
339 err = dump_freeobjects(dsp, dnobj, span >> DNODE_SHIFT);
340 } else if (bp == NULL) {
341 uint64_t span = BP_SPAN(dnp, zb->zb_level);
342 err = dump_free(dsp, zb->zb_object, zb->zb_blkid * span, span);
343 } else if (zb->zb_level > 0 || type == DMU_OT_OBJSET) {
344 return (0);
345 } else if (type == DMU_OT_DNODE) {
346 dnode_phys_t *blk;
347 int i;
348 int blksz = BP_GET_LSIZE(bp);
349 uint32_t aflags = ARC_WAIT;
350 arc_buf_t *abuf;
351
352 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
353 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
354 &aflags, zb) != 0)
355 return (SET_ERROR(EIO));
356
357 blk = abuf->b_data;
358 for (i = 0; i < blksz >> DNODE_SHIFT; i++) {
359 uint64_t dnobj = (zb->zb_blkid <<
360 (DNODE_BLOCK_SHIFT - DNODE_SHIFT)) + i;
361 err = dump_dnode(dsp, dnobj, blk+i);
362 if (err != 0)
363 break;
364 }
365 (void) arc_buf_remove_ref(abuf, &abuf);
366 } else if (type == DMU_OT_SA) {
367 uint32_t aflags = ARC_WAIT;
368 arc_buf_t *abuf;
369 int blksz = BP_GET_LSIZE(bp);
370
371 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
372 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
373 &aflags, zb) != 0)
374 return (SET_ERROR(EIO));
375
376 err = dump_spill(dsp, zb->zb_object, blksz, abuf->b_data);
377 (void) arc_buf_remove_ref(abuf, &abuf);
378 } else { /* it's a level-0 block of a regular object */
379 uint32_t aflags = ARC_WAIT;
380 arc_buf_t *abuf;
381 int blksz = BP_GET_LSIZE(bp);
382
383 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
384 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
385 &aflags, zb) != 0) {
386 if (zfs_send_corrupt_data) {
387 /* Send a block filled with 0x"zfs badd bloc" */
388 abuf = arc_buf_alloc(spa, blksz, &abuf,
389 ARC_BUFC_DATA);
390 uint64_t *ptr;
391 for (ptr = abuf->b_data;
392 (char *)ptr < (char *)abuf->b_data + blksz;
393 ptr++)
394 *ptr = 0x2f5baddb10c;
395 } else {
396 return (SET_ERROR(EIO));
397 }
398 }
399
400 err = dump_data(dsp, type, zb->zb_object, zb->zb_blkid * blksz,
401 blksz, bp, abuf->b_data);
402 (void) arc_buf_remove_ref(abuf, &abuf);
403 }
404
405 ASSERT(err == 0 || err == EINTR);
406 return (err);
407}
408
409/*
410 * Releases dp, ds, and fromds, using the specified tag.
411 */
412static int
413dmu_send_impl(void *tag, dsl_pool_t *dp, dsl_dataset_t *ds,
414#ifdef illumos
415 dsl_dataset_t *fromds, int outfd, vnode_t *vp, offset_t *off)
416#else
417 dsl_dataset_t *fromds, int outfd, struct file *fp, offset_t *off)
418#endif
419{
420 objset_t *os;
421 dmu_replay_record_t *drr;
422 dmu_sendarg_t *dsp;
423 int err;
424 uint64_t fromtxg = 0;
425
426 if (fromds != NULL && !dsl_dataset_is_before(ds, fromds)) {
427 dsl_dataset_rele(fromds, tag);
428 dsl_dataset_rele(ds, tag);
429 dsl_pool_rele(dp, tag);
430 return (SET_ERROR(EXDEV));
431 }
432
433 err = dmu_objset_from_ds(ds, &os);
434 if (err != 0) {
435 if (fromds != NULL)
436 dsl_dataset_rele(fromds, tag);
437 dsl_dataset_rele(ds, tag);
438 dsl_pool_rele(dp, tag);
439 return (err);
440 }
441
442 drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP);
443 drr->drr_type = DRR_BEGIN;
444 drr->drr_u.drr_begin.drr_magic = DMU_BACKUP_MAGIC;
445 DMU_SET_STREAM_HDRTYPE(drr->drr_u.drr_begin.drr_versioninfo,
446 DMU_SUBSTREAM);
447
448#ifdef _KERNEL
449 if (dmu_objset_type(os) == DMU_OST_ZFS) {
450 uint64_t version;
451 if (zfs_get_zplprop(os, ZFS_PROP_VERSION, &version) != 0) {
452 kmem_free(drr, sizeof (dmu_replay_record_t));
453 if (fromds != NULL)
454 dsl_dataset_rele(fromds, tag);
455 dsl_dataset_rele(ds, tag);
456 dsl_pool_rele(dp, tag);
457 return (SET_ERROR(EINVAL));
458 }
459 if (version >= ZPL_VERSION_SA) {
460 DMU_SET_FEATUREFLAGS(
461 drr->drr_u.drr_begin.drr_versioninfo,
462 DMU_BACKUP_FEATURE_SA_SPILL);
463 }
464 }
465#endif
466
467 drr->drr_u.drr_begin.drr_creation_time =
468 ds->ds_phys->ds_creation_time;
469 drr->drr_u.drr_begin.drr_type = dmu_objset_type(os);
470 if (fromds != NULL && ds->ds_dir != fromds->ds_dir)
471 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CLONE;
472 drr->drr_u.drr_begin.drr_toguid = ds->ds_phys->ds_guid;
473 if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
474 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CI_DATA;
475
476 if (fromds != NULL)
477 drr->drr_u.drr_begin.drr_fromguid = fromds->ds_phys->ds_guid;
478 dsl_dataset_name(ds, drr->drr_u.drr_begin.drr_toname);
479
480 if (fromds != NULL) {
481 fromtxg = fromds->ds_phys->ds_creation_txg;
482 dsl_dataset_rele(fromds, tag);
483 fromds = NULL;
484 }
485
486 dsp = kmem_zalloc(sizeof (dmu_sendarg_t), KM_SLEEP);
487
488 dsp->dsa_drr = drr;
489 dsp->dsa_outfd = outfd;
490 dsp->dsa_proc = curproc;
491 dsp->dsa_td = curthread;
492 dsp->dsa_fp = fp;
493 dsp->dsa_os = os;
494 dsp->dsa_off = off;
495 dsp->dsa_toguid = ds->ds_phys->ds_guid;
496 ZIO_SET_CHECKSUM(&dsp->dsa_zc, 0, 0, 0, 0);
497 dsp->dsa_pending_op = PENDING_NONE;
498
499 mutex_enter(&ds->ds_sendstream_lock);
500 list_insert_head(&ds->ds_sendstreams, dsp);
501 mutex_exit(&ds->ds_sendstream_lock);
502
503 dsl_dataset_long_hold(ds, FTAG);
504 dsl_pool_rele(dp, tag);
505
506 if (dump_bytes(dsp, drr, sizeof (dmu_replay_record_t)) != 0) {
507 err = dsp->dsa_err;
508 goto out;
509 }
510
511 err = traverse_dataset(ds, fromtxg, TRAVERSE_PRE | TRAVERSE_PREFETCH,
512 backup_cb, dsp);
513
514 if (dsp->dsa_pending_op != PENDING_NONE)
515 if (dump_bytes(dsp, drr, sizeof (dmu_replay_record_t)) != 0)
516 err = SET_ERROR(EINTR);
517
518 if (err != 0) {
519 if (err == EINTR && dsp->dsa_err != 0)
520 err = dsp->dsa_err;
521 goto out;
522 }
523
524 bzero(drr, sizeof (dmu_replay_record_t));
525 drr->drr_type = DRR_END;
526 drr->drr_u.drr_end.drr_checksum = dsp->dsa_zc;
527 drr->drr_u.drr_end.drr_toguid = dsp->dsa_toguid;
528
529 if (dump_bytes(dsp, drr, sizeof (dmu_replay_record_t)) != 0) {
530 err = dsp->dsa_err;
531 goto out;
532 }
533
534out:
535 mutex_enter(&ds->ds_sendstream_lock);
536 list_remove(&ds->ds_sendstreams, dsp);
537 mutex_exit(&ds->ds_sendstream_lock);
538
539 kmem_free(drr, sizeof (dmu_replay_record_t));
540 kmem_free(dsp, sizeof (dmu_sendarg_t));
541
542 dsl_dataset_long_rele(ds, FTAG);
543 dsl_dataset_rele(ds, tag);
544
545 return (err);
546}
547
548int
549dmu_send_obj(const char *pool, uint64_t tosnap, uint64_t fromsnap,
550#ifdef illumos
551 int outfd, vnode_t *vp, offset_t *off)
552#else
553 int outfd, struct file *fp, offset_t *off)
554#endif
555{
556 dsl_pool_t *dp;
557 dsl_dataset_t *ds;
558 dsl_dataset_t *fromds = NULL;
559 int err;
560
561 err = dsl_pool_hold(pool, FTAG, &dp);
562 if (err != 0)
563 return (err);
564
565 err = dsl_dataset_hold_obj(dp, tosnap, FTAG, &ds);
566 if (err != 0) {
567 dsl_pool_rele(dp, FTAG);
568 return (err);
569 }
570
571 if (fromsnap != 0) {
572 err = dsl_dataset_hold_obj(dp, fromsnap, FTAG, &fromds);
573 if (err != 0) {
574 dsl_dataset_rele(ds, FTAG);
575 dsl_pool_rele(dp, FTAG);
576 return (err);
577 }
578 }
579
580 return (dmu_send_impl(FTAG, dp, ds, fromds, outfd, fp, off));
581}
582
583int
584dmu_send(const char *tosnap, const char *fromsnap,
585#ifdef illumos
586 int outfd, vnode_t *vp, offset_t *off)
587#else
588 int outfd, struct file *fp, offset_t *off)
589#endif
590{
591 dsl_pool_t *dp;
592 dsl_dataset_t *ds;
593 dsl_dataset_t *fromds = NULL;
594 int err;
595
596 if (strchr(tosnap, '@') == NULL)
597 return (SET_ERROR(EINVAL));
598 if (fromsnap != NULL && strchr(fromsnap, '@') == NULL)
599 return (SET_ERROR(EINVAL));
600
601 err = dsl_pool_hold(tosnap, FTAG, &dp);
602 if (err != 0)
603 return (err);
604
605 err = dsl_dataset_hold(dp, tosnap, FTAG, &ds);
606 if (err != 0) {
607 dsl_pool_rele(dp, FTAG);
608 return (err);
609 }
610
611 if (fromsnap != NULL) {
612 err = dsl_dataset_hold(dp, fromsnap, FTAG, &fromds);
613 if (err != 0) {
614 dsl_dataset_rele(ds, FTAG);
615 dsl_pool_rele(dp, FTAG);
616 return (err);
617 }
618 }
619 return (dmu_send_impl(FTAG, dp, ds, fromds, outfd, fp, off));
620}
621
622int
623dmu_send_estimate(dsl_dataset_t *ds, dsl_dataset_t *fromds, uint64_t *sizep)
624{
625 dsl_pool_t *dp = ds->ds_dir->dd_pool;
626 int err;
627 uint64_t size;
628
629 ASSERT(dsl_pool_config_held(dp));
630
631 /* tosnap must be a snapshot */
632 if (!dsl_dataset_is_snapshot(ds))
633 return (SET_ERROR(EINVAL));
634
635 /*
636 * fromsnap must be an earlier snapshot from the same fs as tosnap,
637 * or the origin's fs.
638 */
639 if (fromds != NULL && !dsl_dataset_is_before(ds, fromds))
640 return (SET_ERROR(EXDEV));
641
642 /* Get uncompressed size estimate of changed data. */
643 if (fromds == NULL) {
644 size = ds->ds_phys->ds_uncompressed_bytes;
645 } else {
646 uint64_t used, comp;
647 err = dsl_dataset_space_written(fromds, ds,
648 &used, &comp, &size);
649 if (err != 0)
650 return (err);
651 }
652
653 /*
654 * Assume that space (both on-disk and in-stream) is dominated by
655 * data. We will adjust for indirect blocks and the copies property,
656 * but ignore per-object space used (eg, dnodes and DRR_OBJECT records).
657 */
658
659 /*
660 * Subtract out approximate space used by indirect blocks.
661 * Assume most space is used by data blocks (non-indirect, non-dnode).
662 * Assume all blocks are recordsize. Assume ditto blocks and
663 * internal fragmentation counter out compression.
664 *
665 * Therefore, space used by indirect blocks is sizeof(blkptr_t) per
666 * block, which we observe in practice.
667 */
668 uint64_t recordsize;
669 err = dsl_prop_get_int_ds(ds, "recordsize", &recordsize);
670 if (err != 0)
671 return (err);
672 size -= size / recordsize * sizeof (blkptr_t);
673
674 /* Add in the space for the record associated with each block. */
675 size += size / recordsize * sizeof (dmu_replay_record_t);
676
677 *sizep = size;
678
679 return (0);
680}
681
682typedef struct dmu_recv_begin_arg {
683 const char *drba_origin;
684 dmu_recv_cookie_t *drba_cookie;
685 cred_t *drba_cred;
686} dmu_recv_begin_arg_t;
687
688static int
689recv_begin_check_existing_impl(dmu_recv_begin_arg_t *drba, dsl_dataset_t *ds,
690 uint64_t fromguid)
691{
692 uint64_t val;
693 int error;
694 dsl_pool_t *dp = ds->ds_dir->dd_pool;
695
696 /* must not have any changes since most recent snapshot */
697 if (!drba->drba_cookie->drc_force &&
698 dsl_dataset_modified_since_lastsnap(ds))
699 return (SET_ERROR(ETXTBSY));
700
701 /* temporary clone name must not exist */
702 error = zap_lookup(dp->dp_meta_objset,
703 ds->ds_dir->dd_phys->dd_child_dir_zapobj, recv_clone_name,
704 8, 1, &val);
705 if (error != ENOENT)
706 return (error == 0 ? EBUSY : error);
707
708 /* new snapshot name must not exist */
709 error = zap_lookup(dp->dp_meta_objset,
710 ds->ds_phys->ds_snapnames_zapobj, drba->drba_cookie->drc_tosnap,
711 8, 1, &val);
712 if (error != ENOENT)
713 return (error == 0 ? EEXIST : error);
714
715 if (fromguid != 0) {
716 /* if incremental, most recent snapshot must match fromguid */
717 if (ds->ds_prev == NULL)
718 return (SET_ERROR(ENODEV));
719
720 /*
721 * most recent snapshot must match fromguid, or there are no
722 * changes since the fromguid one
723 */
724 if (ds->ds_prev->ds_phys->ds_guid != fromguid) {
725 uint64_t birth = ds->ds_prev->ds_phys->ds_bp.blk_birth;
726 uint64_t obj = ds->ds_prev->ds_phys->ds_prev_snap_obj;
727 while (obj != 0) {
728 dsl_dataset_t *snap;
729 error = dsl_dataset_hold_obj(dp, obj, FTAG,
730 &snap);
731 if (error != 0)
732 return (SET_ERROR(ENODEV));
733 if (snap->ds_phys->ds_creation_txg < birth) {
734 dsl_dataset_rele(snap, FTAG);
735 return (SET_ERROR(ENODEV));
736 }
737 if (snap->ds_phys->ds_guid == fromguid) {
738 dsl_dataset_rele(snap, FTAG);
739 break; /* it's ok */
740 }
741 obj = snap->ds_phys->ds_prev_snap_obj;
742 dsl_dataset_rele(snap, FTAG);
743 }
744 if (obj == 0)
745 return (SET_ERROR(ENODEV));
746 }
747 } else {
748 /* if full, most recent snapshot must be $ORIGIN */
749 if (ds->ds_phys->ds_prev_snap_txg >= TXG_INITIAL)
750 return (SET_ERROR(ENODEV));
751 }
752
753 return (0);
754
755}
756
757static int
758dmu_recv_begin_check(void *arg, dmu_tx_t *tx)
759{
760 dmu_recv_begin_arg_t *drba = arg;
761 dsl_pool_t *dp = dmu_tx_pool(tx);
762 struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
763 uint64_t fromguid = drrb->drr_fromguid;
764 int flags = drrb->drr_flags;
765 int error;
766 dsl_dataset_t *ds;
767 const char *tofs = drba->drba_cookie->drc_tofs;
768
769 /* already checked */
770 ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC);
771
772 if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
773 DMU_COMPOUNDSTREAM ||
774 drrb->drr_type >= DMU_OST_NUMTYPES ||
775 ((flags & DRR_FLAG_CLONE) && drba->drba_origin == NULL))
776 return (SET_ERROR(EINVAL));
777
778 /* Verify pool version supports SA if SA_SPILL feature set */
779 if ((DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
780 DMU_BACKUP_FEATURE_SA_SPILL) &&
781 spa_version(dp->dp_spa) < SPA_VERSION_SA) {
782 return (SET_ERROR(ENOTSUP));
783 }
784
785 error = dsl_dataset_hold(dp, tofs, FTAG, &ds);
786 if (error == 0) {
787 /* target fs already exists; recv into temp clone */
788
789 /* Can't recv a clone into an existing fs */
790 if (flags & DRR_FLAG_CLONE) {
791 dsl_dataset_rele(ds, FTAG);
792 return (SET_ERROR(EINVAL));
793 }
794
795 error = recv_begin_check_existing_impl(drba, ds, fromguid);
796 dsl_dataset_rele(ds, FTAG);
797 } else if (error == ENOENT) {
798 /* target fs does not exist; must be a full backup or clone */
799 char buf[MAXNAMELEN];
800
801 /*
802 * If it's a non-clone incremental, we are missing the
803 * target fs, so fail the recv.
804 */
805 if (fromguid != 0 && !(flags & DRR_FLAG_CLONE))
806 return (SET_ERROR(ENOENT));
807
808 /* Open the parent of tofs */
809 ASSERT3U(strlen(tofs), <, MAXNAMELEN);
810 (void) strlcpy(buf, tofs, strrchr(tofs, '/') - tofs + 1);
811 error = dsl_dataset_hold(dp, buf, FTAG, &ds);
812 if (error != 0)
813 return (error);
814
815 if (drba->drba_origin != NULL) {
816 dsl_dataset_t *origin;
817 error = dsl_dataset_hold(dp, drba->drba_origin,
818 FTAG, &origin);
819 if (error != 0) {
820 dsl_dataset_rele(ds, FTAG);
821 return (error);
822 }
823 if (!dsl_dataset_is_snapshot(origin)) {
824 dsl_dataset_rele(origin, FTAG);
825 dsl_dataset_rele(ds, FTAG);
826 return (SET_ERROR(EINVAL));
827 }
828 if (origin->ds_phys->ds_guid != fromguid) {
829 dsl_dataset_rele(origin, FTAG);
830 dsl_dataset_rele(ds, FTAG);
831 return (SET_ERROR(ENODEV));
832 }
833 dsl_dataset_rele(origin, FTAG);
834 }
835 dsl_dataset_rele(ds, FTAG);
836 error = 0;
837 }
838 return (error);
839}
840
841static void
842dmu_recv_begin_sync(void *arg, dmu_tx_t *tx)
843{
844 dmu_recv_begin_arg_t *drba = arg;
845 dsl_pool_t *dp = dmu_tx_pool(tx);
846 struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
847 const char *tofs = drba->drba_cookie->drc_tofs;
848 dsl_dataset_t *ds, *newds;
849 uint64_t dsobj;
850 int error;
851 uint64_t crflags;
852
853 crflags = (drrb->drr_flags & DRR_FLAG_CI_DATA) ?
854 DS_FLAG_CI_DATASET : 0;
855
856 error = dsl_dataset_hold(dp, tofs, FTAG, &ds);
857 if (error == 0) {
858 /* create temporary clone */
859 dsobj = dsl_dataset_create_sync(ds->ds_dir, recv_clone_name,
860 ds->ds_prev, crflags, drba->drba_cred, tx);
861 dsl_dataset_rele(ds, FTAG);
862 } else {
863 dsl_dir_t *dd;
864 const char *tail;
865 dsl_dataset_t *origin = NULL;
866
867 VERIFY0(dsl_dir_hold(dp, tofs, FTAG, &dd, &tail));
868
869 if (drba->drba_origin != NULL) {
870 VERIFY0(dsl_dataset_hold(dp, drba->drba_origin,
871 FTAG, &origin));
872 }
873
874 /* Create new dataset. */
875 dsobj = dsl_dataset_create_sync(dd,
876 strrchr(tofs, '/') + 1,
877 origin, crflags, drba->drba_cred, tx);
878 if (origin != NULL)
879 dsl_dataset_rele(origin, FTAG);
880 dsl_dir_rele(dd, FTAG);
881 drba->drba_cookie->drc_newfs = B_TRUE;
882 }
883 VERIFY0(dsl_dataset_own_obj(dp, dsobj, dmu_recv_tag, &newds));
884
885 dmu_buf_will_dirty(newds->ds_dbuf, tx);
886 newds->ds_phys->ds_flags |= DS_FLAG_INCONSISTENT;
887
888 /*
889 * If we actually created a non-clone, we need to create the
890 * objset in our new dataset.
891 */
892 if (BP_IS_HOLE(dsl_dataset_get_blkptr(newds))) {
893 (void) dmu_objset_create_impl(dp->dp_spa,
894 newds, dsl_dataset_get_blkptr(newds), drrb->drr_type, tx);
895 }
896
897 drba->drba_cookie->drc_ds = newds;
898
899 spa_history_log_internal_ds(newds, "receive", tx, "");
900}
901
902/*
903 * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin()
904 * succeeds; otherwise we will leak the holds on the datasets.
905 */
906int
907dmu_recv_begin(char *tofs, char *tosnap, struct drr_begin *drrb,
908 boolean_t force, char *origin, dmu_recv_cookie_t *drc)
909{
910 dmu_recv_begin_arg_t drba = { 0 };
911 dmu_replay_record_t *drr;
912
913 bzero(drc, sizeof (dmu_recv_cookie_t));
914 drc->drc_drrb = drrb;
915 drc->drc_tosnap = tosnap;
916 drc->drc_tofs = tofs;
917 drc->drc_force = force;
918
919 if (drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC))
920 drc->drc_byteswap = B_TRUE;
921 else if (drrb->drr_magic != DMU_BACKUP_MAGIC)
922 return (SET_ERROR(EINVAL));
923
924 drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP);
925 drr->drr_type = DRR_BEGIN;
926 drr->drr_u.drr_begin = *drc->drc_drrb;
927 if (drc->drc_byteswap) {
928 fletcher_4_incremental_byteswap(drr,
929 sizeof (dmu_replay_record_t), &drc->drc_cksum);
930 } else {
931 fletcher_4_incremental_native(drr,
932 sizeof (dmu_replay_record_t), &drc->drc_cksum);
933 }
934 kmem_free(drr, sizeof (dmu_replay_record_t));
935
936 if (drc->drc_byteswap) {
937 drrb->drr_magic = BSWAP_64(drrb->drr_magic);
938 drrb->drr_versioninfo = BSWAP_64(drrb->drr_versioninfo);
939 drrb->drr_creation_time = BSWAP_64(drrb->drr_creation_time);
940 drrb->drr_type = BSWAP_32(drrb->drr_type);
941 drrb->drr_toguid = BSWAP_64(drrb->drr_toguid);
942 drrb->drr_fromguid = BSWAP_64(drrb->drr_fromguid);
943 }
944
945 drba.drba_origin = origin;
946 drba.drba_cookie = drc;
947 drba.drba_cred = CRED();
948
949 return (dsl_sync_task(tofs, dmu_recv_begin_check, dmu_recv_begin_sync,
950 &drba, 5));
951}
952
953struct restorearg {
954 int err;
955 boolean_t byteswap;
956 kthread_t *td;
957 struct file *fp;
958 char *buf;
959 uint64_t voff;
960 int bufsize; /* amount of memory allocated for buf */
961 zio_cksum_t cksum;
962 avl_tree_t *guid_to_ds_map;
963};
964
965typedef struct guid_map_entry {
966 uint64_t guid;
967 dsl_dataset_t *gme_ds;
968 avl_node_t avlnode;
969} guid_map_entry_t;
970
971static int
972guid_compare(const void *arg1, const void *arg2)
973{
974 const guid_map_entry_t *gmep1 = arg1;
975 const guid_map_entry_t *gmep2 = arg2;
976
977 if (gmep1->guid < gmep2->guid)
978 return (-1);
979 else if (gmep1->guid > gmep2->guid)
980 return (1);
981 return (0);
982}
983
984static void
985free_guid_map_onexit(void *arg)
986{
987 avl_tree_t *ca = arg;
988 void *cookie = NULL;
989 guid_map_entry_t *gmep;
990
991 while ((gmep = avl_destroy_nodes(ca, &cookie)) != NULL) {
992 dsl_dataset_long_rele(gmep->gme_ds, gmep);
993 dsl_dataset_rele(gmep->gme_ds, gmep);
994 kmem_free(gmep, sizeof (guid_map_entry_t));
995 }
996 avl_destroy(ca);
997 kmem_free(ca, sizeof (avl_tree_t));
998}
999
1000static int
1001restore_bytes(struct restorearg *ra, void *buf, int len, off_t off, ssize_t *resid)
1002{
1003 struct uio auio;
1004 struct iovec aiov;
1005 int error;
1006
1007 aiov.iov_base = buf;
1008 aiov.iov_len = len;
1009 auio.uio_iov = &aiov;
1010 auio.uio_iovcnt = 1;
1011 auio.uio_resid = len;
1012 auio.uio_segflg = UIO_SYSSPACE;
1013 auio.uio_rw = UIO_READ;
1014 auio.uio_offset = off;
1015 auio.uio_td = ra->td;
1016#ifdef _KERNEL
1017 error = fo_read(ra->fp, &auio, ra->td->td_ucred, FOF_OFFSET, ra->td);
1018#else
1019 fprintf(stderr, "%s: returning EOPNOTSUPP\n", __func__);
1020 error = EOPNOTSUPP;
1021#endif
1022 *resid = auio.uio_resid;
1023 return (error);
1024}
1025
1026static void *
1027restore_read(struct restorearg *ra, int len)
1028{
1029 void *rv;
1030 int done = 0;
1031
1032 /* some things will require 8-byte alignment, so everything must */
1033 ASSERT0(len % 8);
1034
1035 while (done < len) {
1036 ssize_t resid;
1037
1038 ra->err = restore_bytes(ra, (caddr_t)ra->buf + done,
1039 len - done, ra->voff, &resid);
1040
1041 if (resid == len - done)
1042 ra->err = SET_ERROR(EINVAL);
1043 ra->voff += len - done - resid;
1044 done = len - resid;
1045 if (ra->err != 0)
1046 return (NULL);
1047 }
1048
1049 ASSERT3U(done, ==, len);
1050 rv = ra->buf;
1051 if (ra->byteswap)
1052 fletcher_4_incremental_byteswap(rv, len, &ra->cksum);
1053 else
1054 fletcher_4_incremental_native(rv, len, &ra->cksum);
1055 return (rv);
1056}
1057
1058static void
1059backup_byteswap(dmu_replay_record_t *drr)
1060{
1061#define DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X))
1062#define DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X))
1063 drr->drr_type = BSWAP_32(drr->drr_type);
1064 drr->drr_payloadlen = BSWAP_32(drr->drr_payloadlen);
1065 switch (drr->drr_type) {
1066 case DRR_BEGIN:
1067 DO64(drr_begin.drr_magic);
1068 DO64(drr_begin.drr_versioninfo);
1069 DO64(drr_begin.drr_creation_time);
1070 DO32(drr_begin.drr_type);
1071 DO32(drr_begin.drr_flags);
1072 DO64(drr_begin.drr_toguid);
1073 DO64(drr_begin.drr_fromguid);
1074 break;
1075 case DRR_OBJECT:
1076 DO64(drr_object.drr_object);
1077 /* DO64(drr_object.drr_allocation_txg); */
1078 DO32(drr_object.drr_type);
1079 DO32(drr_object.drr_bonustype);
1080 DO32(drr_object.drr_blksz);
1081 DO32(drr_object.drr_bonuslen);
1082 DO64(drr_object.drr_toguid);
1083 break;
1084 case DRR_FREEOBJECTS:
1085 DO64(drr_freeobjects.drr_firstobj);
1086 DO64(drr_freeobjects.drr_numobjs);
1087 DO64(drr_freeobjects.drr_toguid);
1088 break;
1089 case DRR_WRITE:
1090 DO64(drr_write.drr_object);
1091 DO32(drr_write.drr_type);
1092 DO64(drr_write.drr_offset);
1093 DO64(drr_write.drr_length);
1094 DO64(drr_write.drr_toguid);
1095 DO64(drr_write.drr_key.ddk_cksum.zc_word[0]);
1096 DO64(drr_write.drr_key.ddk_cksum.zc_word[1]);
1097 DO64(drr_write.drr_key.ddk_cksum.zc_word[2]);
1098 DO64(drr_write.drr_key.ddk_cksum.zc_word[3]);
1099 DO64(drr_write.drr_key.ddk_prop);
1100 break;
1101 case DRR_WRITE_BYREF:
1102 DO64(drr_write_byref.drr_object);
1103 DO64(drr_write_byref.drr_offset);
1104 DO64(drr_write_byref.drr_length);
1105 DO64(drr_write_byref.drr_toguid);
1106 DO64(drr_write_byref.drr_refguid);
1107 DO64(drr_write_byref.drr_refobject);
1108 DO64(drr_write_byref.drr_refoffset);
1109 DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[0]);
1110 DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[1]);
1111 DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[2]);
1112 DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[3]);
1113 DO64(drr_write_byref.drr_key.ddk_prop);
1114 break;
1115 case DRR_FREE:
1116 DO64(drr_free.drr_object);
1117 DO64(drr_free.drr_offset);
1118 DO64(drr_free.drr_length);
1119 DO64(drr_free.drr_toguid);
1120 break;
1121 case DRR_SPILL:
1122 DO64(drr_spill.drr_object);
1123 DO64(drr_spill.drr_length);
1124 DO64(drr_spill.drr_toguid);
1125 break;
1126 case DRR_END:
1127 DO64(drr_end.drr_checksum.zc_word[0]);
1128 DO64(drr_end.drr_checksum.zc_word[1]);
1129 DO64(drr_end.drr_checksum.zc_word[2]);
1130 DO64(drr_end.drr_checksum.zc_word[3]);
1131 DO64(drr_end.drr_toguid);
1132 break;
1133 }
1134#undef DO64
1135#undef DO32
1136}
1137
1138static int
1139restore_object(struct restorearg *ra, objset_t *os, struct drr_object *drro)
1140{
1141 int err;
1142 dmu_tx_t *tx;
1143 void *data = NULL;
1144
1145 if (drro->drr_type == DMU_OT_NONE ||
1146 !DMU_OT_IS_VALID(drro->drr_type) ||
1147 !DMU_OT_IS_VALID(drro->drr_bonustype) ||
1148 drro->drr_checksumtype >= ZIO_CHECKSUM_FUNCTIONS ||
1149 drro->drr_compress >= ZIO_COMPRESS_FUNCTIONS ||
1150 P2PHASE(drro->drr_blksz, SPA_MINBLOCKSIZE) ||
1151 drro->drr_blksz < SPA_MINBLOCKSIZE ||
1152 drro->drr_blksz > SPA_MAXBLOCKSIZE ||
1153 drro->drr_bonuslen > DN_MAX_BONUSLEN) {
1154 return (SET_ERROR(EINVAL));
1155 }
1156
1157 err = dmu_object_info(os, drro->drr_object, NULL);
1158
1159 if (err != 0 && err != ENOENT)
1160 return (SET_ERROR(EINVAL));
1161
1162 if (drro->drr_bonuslen) {
1163 data = restore_read(ra, P2ROUNDUP(drro->drr_bonuslen, 8));
1164 if (ra->err != 0)
1165 return (ra->err);
1166 }
1167
1168 if (err == ENOENT) {
1169 /* currently free, want to be allocated */
1170 tx = dmu_tx_create(os);
1171 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
1172 err = dmu_tx_assign(tx, TXG_WAIT);
1173 if (err != 0) {
1174 dmu_tx_abort(tx);
1175 return (err);
1176 }
1177 err = dmu_object_claim(os, drro->drr_object,
1178 drro->drr_type, drro->drr_blksz,
1179 drro->drr_bonustype, drro->drr_bonuslen, tx);
1180 dmu_tx_commit(tx);
1181 } else {
1182 /* currently allocated, want to be allocated */
1183 err = dmu_object_reclaim(os, drro->drr_object,
1184 drro->drr_type, drro->drr_blksz,
1185 drro->drr_bonustype, drro->drr_bonuslen);
1186 }
1187 if (err != 0) {
1188 return (SET_ERROR(EINVAL));
1189 }
1190
1191 tx = dmu_tx_create(os);
1192 dmu_tx_hold_bonus(tx, drro->drr_object);
1193 err = dmu_tx_assign(tx, TXG_WAIT);
1194 if (err != 0) {
1195 dmu_tx_abort(tx);
1196 return (err);
1197 }
1198
1199 dmu_object_set_checksum(os, drro->drr_object, drro->drr_checksumtype,
1200 tx);
1201 dmu_object_set_compress(os, drro->drr_object, drro->drr_compress, tx);
1202
1203 if (data != NULL) {
1204 dmu_buf_t *db;
1205
1206 VERIFY(0 == dmu_bonus_hold(os, drro->drr_object, FTAG, &db));
1207 dmu_buf_will_dirty(db, tx);
1208
1209 ASSERT3U(db->db_size, >=, drro->drr_bonuslen);
1210 bcopy(data, db->db_data, drro->drr_bonuslen);
1211 if (ra->byteswap) {
1212 dmu_object_byteswap_t byteswap =
1213 DMU_OT_BYTESWAP(drro->drr_bonustype);
1214 dmu_ot_byteswap[byteswap].ob_func(db->db_data,
1215 drro->drr_bonuslen);
1216 }
1217 dmu_buf_rele(db, FTAG);
1218 }
1219 dmu_tx_commit(tx);
1220 return (0);
1221}
1222
1223/* ARGSUSED */
1224static int
1225restore_freeobjects(struct restorearg *ra, objset_t *os,
1226 struct drr_freeobjects *drrfo)
1227{
1228 uint64_t obj;
1229
1230 if (drrfo->drr_firstobj + drrfo->drr_numobjs < drrfo->drr_firstobj)
1231 return (SET_ERROR(EINVAL));
1232
1233 for (obj = drrfo->drr_firstobj;
1234 obj < drrfo->drr_firstobj + drrfo->drr_numobjs;
1235 (void) dmu_object_next(os, &obj, FALSE, 0)) {
1236 int err;
1237
1238 if (dmu_object_info(os, obj, NULL) != 0)
1239 continue;
1240
1241 err = dmu_free_object(os, obj);
1242 if (err != 0)
1243 return (err);
1244 }
1245 return (0);
1246}
1247
1248static int
1249restore_write(struct restorearg *ra, objset_t *os,
1250 struct drr_write *drrw)
1251{
1252 dmu_tx_t *tx;
1253 void *data;
1254 int err;
1255
1256 if (drrw->drr_offset + drrw->drr_length < drrw->drr_offset ||
1257 !DMU_OT_IS_VALID(drrw->drr_type))
1258 return (SET_ERROR(EINVAL));
1259
1260 data = restore_read(ra, drrw->drr_length);
1261 if (data == NULL)
1262 return (ra->err);
1263
1264 if (dmu_object_info(os, drrw->drr_object, NULL) != 0)
1265 return (SET_ERROR(EINVAL));
1266
1267 tx = dmu_tx_create(os);
1268
1269 dmu_tx_hold_write(tx, drrw->drr_object,
1270 drrw->drr_offset, drrw->drr_length);
1271 err = dmu_tx_assign(tx, TXG_WAIT);
1272 if (err != 0) {
1273 dmu_tx_abort(tx);
1274 return (err);
1275 }
1276 if (ra->byteswap) {
1277 dmu_object_byteswap_t byteswap =
1278 DMU_OT_BYTESWAP(drrw->drr_type);
1279 dmu_ot_byteswap[byteswap].ob_func(data, drrw->drr_length);
1280 }
1281 dmu_write(os, drrw->drr_object,
1282 drrw->drr_offset, drrw->drr_length, data, tx);
1283 dmu_tx_commit(tx);
1284 return (0);
1285}
1286
1287/*
1288 * Handle a DRR_WRITE_BYREF record. This record is used in dedup'ed
1289 * streams to refer to a copy of the data that is already on the
1290 * system because it came in earlier in the stream. This function
1291 * finds the earlier copy of the data, and uses that copy instead of
1292 * data from the stream to fulfill this write.
1293 */
1294static int
1295restore_write_byref(struct restorearg *ra, objset_t *os,
1296 struct drr_write_byref *drrwbr)
1297{
1298 dmu_tx_t *tx;
1299 int err;
1300 guid_map_entry_t gmesrch;
1301 guid_map_entry_t *gmep;
1302 avl_index_t where;
1303 objset_t *ref_os = NULL;
1304 dmu_buf_t *dbp;
1305
1306 if (drrwbr->drr_offset + drrwbr->drr_length < drrwbr->drr_offset)
1307 return (SET_ERROR(EINVAL));
1308
1309 /*
1310 * If the GUID of the referenced dataset is different from the
1311 * GUID of the target dataset, find the referenced dataset.
1312 */
1313 if (drrwbr->drr_toguid != drrwbr->drr_refguid) {
1314 gmesrch.guid = drrwbr->drr_refguid;
1315 if ((gmep = avl_find(ra->guid_to_ds_map, &gmesrch,
1316 &where)) == NULL) {
1317 return (SET_ERROR(EINVAL));
1318 }
1319 if (dmu_objset_from_ds(gmep->gme_ds, &ref_os))
1320 return (SET_ERROR(EINVAL));
1321 } else {
1322 ref_os = os;
1323 }
1324
1325 if (err = dmu_buf_hold(ref_os, drrwbr->drr_refobject,
1326 drrwbr->drr_refoffset, FTAG, &dbp, DMU_READ_PREFETCH))
1327 return (err);
1328
1329 tx = dmu_tx_create(os);
1330
1331 dmu_tx_hold_write(tx, drrwbr->drr_object,
1332 drrwbr->drr_offset, drrwbr->drr_length);
1333 err = dmu_tx_assign(tx, TXG_WAIT);
1334 if (err != 0) {
1335 dmu_tx_abort(tx);
1336 return (err);
1337 }
1338 dmu_write(os, drrwbr->drr_object,
1339 drrwbr->drr_offset, drrwbr->drr_length, dbp->db_data, tx);
1340 dmu_buf_rele(dbp, FTAG);
1341 dmu_tx_commit(tx);
1342 return (0);
1343}
1344
1345static int
1346restore_spill(struct restorearg *ra, objset_t *os, struct drr_spill *drrs)
1347{
1348 dmu_tx_t *tx;
1349 void *data;
1350 dmu_buf_t *db, *db_spill;
1351 int err;
1352
1353 if (drrs->drr_length < SPA_MINBLOCKSIZE ||
1354 drrs->drr_length > SPA_MAXBLOCKSIZE)
1355 return (SET_ERROR(EINVAL));
1356
1357 data = restore_read(ra, drrs->drr_length);
1358 if (data == NULL)
1359 return (ra->err);
1360
1361 if (dmu_object_info(os, drrs->drr_object, NULL) != 0)
1362 return (SET_ERROR(EINVAL));
1363
1364 VERIFY(0 == dmu_bonus_hold(os, drrs->drr_object, FTAG, &db));
1365 if ((err = dmu_spill_hold_by_bonus(db, FTAG, &db_spill)) != 0) {
1366 dmu_buf_rele(db, FTAG);
1367 return (err);
1368 }
1369
1370 tx = dmu_tx_create(os);
1371
1372 dmu_tx_hold_spill(tx, db->db_object);
1373
1374 err = dmu_tx_assign(tx, TXG_WAIT);
1375 if (err != 0) {
1376 dmu_buf_rele(db, FTAG);
1377 dmu_buf_rele(db_spill, FTAG);
1378 dmu_tx_abort(tx);
1379 return (err);
1380 }
1381 dmu_buf_will_dirty(db_spill, tx);
1382
1383 if (db_spill->db_size < drrs->drr_length)
1384 VERIFY(0 == dbuf_spill_set_blksz(db_spill,
1385 drrs->drr_length, tx));
1386 bcopy(data, db_spill->db_data, drrs->drr_length);
1387
1388 dmu_buf_rele(db, FTAG);
1389 dmu_buf_rele(db_spill, FTAG);
1390
1391 dmu_tx_commit(tx);
1392 return (0);
1393}
1394
1395/* ARGSUSED */
1396static int
1397restore_free(struct restorearg *ra, objset_t *os,
1398 struct drr_free *drrf)
1399{
1400 int err;
1401
1402 if (drrf->drr_length != -1ULL &&
1403 drrf->drr_offset + drrf->drr_length < drrf->drr_offset)
1404 return (SET_ERROR(EINVAL));
1405
1406 if (dmu_object_info(os, drrf->drr_object, NULL) != 0)
1407 return (SET_ERROR(EINVAL));
1408
1409 err = dmu_free_long_range(os, drrf->drr_object,
1410 drrf->drr_offset, drrf->drr_length);
1411 return (err);
1412}
1413
1414/* used to destroy the drc_ds on error */
1415static void
1416dmu_recv_cleanup_ds(dmu_recv_cookie_t *drc)
1417{
1418 char name[MAXNAMELEN];
1419 dsl_dataset_name(drc->drc_ds, name);
1420 dsl_dataset_disown(drc->drc_ds, dmu_recv_tag);
1421 (void) dsl_destroy_head(name);
1422}
1423
1424/*
1425 * NB: callers *must* call dmu_recv_end() if this succeeds.
1426 */
1427int
1428dmu_recv_stream(dmu_recv_cookie_t *drc, struct file *fp, offset_t *voffp,
1429 int cleanup_fd, uint64_t *action_handlep)
1430{
1431 struct restorearg ra = { 0 };
1432 dmu_replay_record_t *drr;
1433 objset_t *os;
1434 zio_cksum_t pcksum;
1435 int featureflags;
1436
1437 ra.byteswap = drc->drc_byteswap;
1438 ra.cksum = drc->drc_cksum;
1439 ra.td = curthread;
1440 ra.fp = fp;
1441 ra.voff = *voffp;
1442 ra.bufsize = 1<<20;
1443 ra.buf = kmem_alloc(ra.bufsize, KM_SLEEP);
1444
1445 /* these were verified in dmu_recv_begin */
1446 ASSERT3U(DMU_GET_STREAM_HDRTYPE(drc->drc_drrb->drr_versioninfo), ==,
1447 DMU_SUBSTREAM);
1448 ASSERT3U(drc->drc_drrb->drr_type, <, DMU_OST_NUMTYPES);
1449
1450 /*
1451 * Open the objset we are modifying.
1452 */
1453 VERIFY0(dmu_objset_from_ds(drc->drc_ds, &os));
1454
1455 ASSERT(drc->drc_ds->ds_phys->ds_flags & DS_FLAG_INCONSISTENT);
1456
1457 featureflags = DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo);
1458
1459 /* if this stream is dedup'ed, set up the avl tree for guid mapping */
1460 if (featureflags & DMU_BACKUP_FEATURE_DEDUP) {
1461 minor_t minor;
1462
1463 if (cleanup_fd == -1) {
1464 ra.err = SET_ERROR(EBADF);
1465 goto out;
1466 }
1467 ra.err = zfs_onexit_fd_hold(cleanup_fd, &minor);
1468 if (ra.err != 0) {
1469 cleanup_fd = -1;
1470 goto out;
1471 }
1472
1473 if (*action_handlep == 0) {
1474 ra.guid_to_ds_map =
1475 kmem_alloc(sizeof (avl_tree_t), KM_SLEEP);
1476 avl_create(ra.guid_to_ds_map, guid_compare,
1477 sizeof (guid_map_entry_t),
1478 offsetof(guid_map_entry_t, avlnode));
1479 ra.err = zfs_onexit_add_cb(minor,
1480 free_guid_map_onexit, ra.guid_to_ds_map,
1481 action_handlep);
1482 if (ra.err != 0)
1483 goto out;
1484 } else {
1485 ra.err = zfs_onexit_cb_data(minor, *action_handlep,
1486 (void **)&ra.guid_to_ds_map);
1487 if (ra.err != 0)
1488 goto out;
1489 }
1490
1491 drc->drc_guid_to_ds_map = ra.guid_to_ds_map;
1492 }
1493
1494 /*
1495 * Read records and process them.
1496 */
1497 pcksum = ra.cksum;
1498 while (ra.err == 0 &&
1499 NULL != (drr = restore_read(&ra, sizeof (*drr)))) {
1500 if (issig(JUSTLOOKING) && issig(FORREAL)) {
1501 ra.err = SET_ERROR(EINTR);
1502 goto out;
1503 }
1504
1505 if (ra.byteswap)
1506 backup_byteswap(drr);
1507
1508 switch (drr->drr_type) {
1509 case DRR_OBJECT:
1510 {
1511 /*
1512 * We need to make a copy of the record header,
1513 * because restore_{object,write} may need to
1514 * restore_read(), which will invalidate drr.
1515 */
1516 struct drr_object drro = drr->drr_u.drr_object;
1517 ra.err = restore_object(&ra, os, &drro);
1518 break;
1519 }
1520 case DRR_FREEOBJECTS:
1521 {
1522 struct drr_freeobjects drrfo =
1523 drr->drr_u.drr_freeobjects;
1524 ra.err = restore_freeobjects(&ra, os, &drrfo);
1525 break;
1526 }
1527 case DRR_WRITE:
1528 {
1529 struct drr_write drrw = drr->drr_u.drr_write;
1530 ra.err = restore_write(&ra, os, &drrw);
1531 break;
1532 }
1533 case DRR_WRITE_BYREF:
1534 {
1535 struct drr_write_byref drrwbr =
1536 drr->drr_u.drr_write_byref;
1537 ra.err = restore_write_byref(&ra, os, &drrwbr);
1538 break;
1539 }
1540 case DRR_FREE:
1541 {
1542 struct drr_free drrf = drr->drr_u.drr_free;
1543 ra.err = restore_free(&ra, os, &drrf);
1544 break;
1545 }
1546 case DRR_END:
1547 {
1548 struct drr_end drre = drr->drr_u.drr_end;
1549 /*
1550 * We compare against the *previous* checksum
1551 * value, because the stored checksum is of
1552 * everything before the DRR_END record.
1553 */
1554 if (!ZIO_CHECKSUM_EQUAL(drre.drr_checksum, pcksum))
1555 ra.err = SET_ERROR(ECKSUM);
1556 goto out;
1557 }
1558 case DRR_SPILL:
1559 {
1560 struct drr_spill drrs = drr->drr_u.drr_spill;
1561 ra.err = restore_spill(&ra, os, &drrs);
1562 break;
1563 }
1564 default:
1565 ra.err = SET_ERROR(EINVAL);
1566 goto out;
1567 }
1568 pcksum = ra.cksum;
1569 }
1570 ASSERT(ra.err != 0);
1571
1572out:
1573 if ((featureflags & DMU_BACKUP_FEATURE_DEDUP) && (cleanup_fd != -1))
1574 zfs_onexit_fd_rele(cleanup_fd);
1575
1576 if (ra.err != 0) {
1577 /*
1578 * destroy what we created, so we don't leave it in the
1579 * inconsistent restoring state.
1580 */
1581 dmu_recv_cleanup_ds(drc);
1582 }
1583
1584 kmem_free(ra.buf, ra.bufsize);
1585 *voffp = ra.voff;
1586 return (ra.err);
1587}
1588
1589static int
1590dmu_recv_end_check(void *arg, dmu_tx_t *tx)
1591{
1592 dmu_recv_cookie_t *drc = arg;
1593 dsl_pool_t *dp = dmu_tx_pool(tx);
1594 int error;
1595
1596 ASSERT3P(drc->drc_ds->ds_owner, ==, dmu_recv_tag);
1597
1598 if (!drc->drc_newfs) {
1599 dsl_dataset_t *origin_head;
1600
1601 error = dsl_dataset_hold(dp, drc->drc_tofs, FTAG, &origin_head);
1602 if (error != 0)
1603 return (error);
1604 error = dsl_dataset_clone_swap_check_impl(drc->drc_ds,
1605 origin_head, drc->drc_force);
1606 if (error != 0) {
1607 dsl_dataset_rele(origin_head, FTAG);
1608 return (error);
1609 }
1610 error = dsl_dataset_snapshot_check_impl(origin_head,
1611 drc->drc_tosnap, tx);
1612 dsl_dataset_rele(origin_head, FTAG);
1613 if (error != 0)
1614 return (error);
1615
1616 error = dsl_destroy_head_check_impl(drc->drc_ds, 1);
1617 } else {
1618 error = dsl_dataset_snapshot_check_impl(drc->drc_ds,
1619 drc->drc_tosnap, tx);
1620 }
1621 return (error);
1622}
1623
1624static void
1625dmu_recv_end_sync(void *arg, dmu_tx_t *tx)
1626{
1627 dmu_recv_cookie_t *drc = arg;
1628 dsl_pool_t *dp = dmu_tx_pool(tx);
1629
1630 spa_history_log_internal_ds(drc->drc_ds, "finish receiving",
1631 tx, "snap=%s", drc->drc_tosnap);
1632
1633 if (!drc->drc_newfs) {
1634 dsl_dataset_t *origin_head;
1635
1636 VERIFY0(dsl_dataset_hold(dp, drc->drc_tofs, FTAG,
1637 &origin_head));
1638 dsl_dataset_clone_swap_sync_impl(drc->drc_ds,
1639 origin_head, tx);
1640 dsl_dataset_snapshot_sync_impl(origin_head,
1641 drc->drc_tosnap, tx);
1642
1643 /* set snapshot's creation time and guid */
1644 dmu_buf_will_dirty(origin_head->ds_prev->ds_dbuf, tx);
1645 origin_head->ds_prev->ds_phys->ds_creation_time =
1646 drc->drc_drrb->drr_creation_time;
1647 origin_head->ds_prev->ds_phys->ds_guid =
1648 drc->drc_drrb->drr_toguid;
1649 origin_head->ds_prev->ds_phys->ds_flags &=
1650 ~DS_FLAG_INCONSISTENT;
1651
1652 dmu_buf_will_dirty(origin_head->ds_dbuf, tx);
1653 origin_head->ds_phys->ds_flags &= ~DS_FLAG_INCONSISTENT;
1654
1655 dsl_dataset_rele(origin_head, FTAG);
1656 dsl_destroy_head_sync_impl(drc->drc_ds, tx);
1657 } else {
1658 dsl_dataset_t *ds = drc->drc_ds;
1659
1660 dsl_dataset_snapshot_sync_impl(ds, drc->drc_tosnap, tx);
1661
1662 /* set snapshot's creation time and guid */
1663 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
1664 ds->ds_prev->ds_phys->ds_creation_time =
1665 drc->drc_drrb->drr_creation_time;
1666 ds->ds_prev->ds_phys->ds_guid = drc->drc_drrb->drr_toguid;
1667 ds->ds_prev->ds_phys->ds_flags &= ~DS_FLAG_INCONSISTENT;
1668
1669 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1670 ds->ds_phys->ds_flags &= ~DS_FLAG_INCONSISTENT;
1671 }
1672 drc->drc_newsnapobj = drc->drc_ds->ds_phys->ds_prev_snap_obj;
1673 /*
1674 * Release the hold from dmu_recv_begin. This must be done before
1675 * we return to open context, so that when we free the dataset's dnode,
1676 * we can evict its bonus buffer.
1677 */
1678 dsl_dataset_disown(drc->drc_ds, dmu_recv_tag);
1679 drc->drc_ds = NULL;
1680}
1681
1682static int
1683add_ds_to_guidmap(const char *name, avl_tree_t *guid_map, uint64_t snapobj)
1684{
1685 dsl_pool_t *dp;
1686 dsl_dataset_t *snapds;
1687 guid_map_entry_t *gmep;
1688 int err;
1689
1690 ASSERT(guid_map != NULL);
1691
1692 err = dsl_pool_hold(name, FTAG, &dp);
1693 if (err != 0)
1694 return (err);
1695 gmep = kmem_alloc(sizeof (guid_map_entry_t), KM_SLEEP);
1695 gmep = kmem_alloc(sizeof (*gmep), KM_SLEEP);
1696 err = dsl_dataset_hold_obj(dp, snapobj, gmep, &snapds);
1697 if (err == 0) {
1698 gmep->guid = snapds->ds_phys->ds_guid;
1699 gmep->gme_ds = snapds;
1700 avl_add(guid_map, gmep);
1701 dsl_dataset_long_hold(snapds, gmep);
1702 } else
1696 err = dsl_dataset_hold_obj(dp, snapobj, gmep, &snapds);
1697 if (err == 0) {
1698 gmep->guid = snapds->ds_phys->ds_guid;
1699 gmep->gme_ds = snapds;
1700 avl_add(guid_map, gmep);
1701 dsl_dataset_long_hold(snapds, gmep);
1702 } else
1703 kmem_free(gmep, sizeof (guid_map_entry_t));
1703 kmem_free(gmep, sizeof (*gmep));
1704
1705 dsl_pool_rele(dp, FTAG);
1706 return (err);
1707}
1708
1709static int dmu_recv_end_modified_blocks = 3;
1710
1711static int
1712dmu_recv_existing_end(dmu_recv_cookie_t *drc)
1713{
1714 int error;
1715 char name[MAXNAMELEN];
1716
1717#ifdef _KERNEL
1718 /*
1719 * We will be destroying the ds; make sure its origin is unmounted if
1720 * necessary.
1721 */
1722 dsl_dataset_name(drc->drc_ds, name);
1723 zfs_destroy_unmount_origin(name);
1724#endif
1725
1726 error = dsl_sync_task(drc->drc_tofs,
1727 dmu_recv_end_check, dmu_recv_end_sync, drc,
1728 dmu_recv_end_modified_blocks);
1729
1730 if (error != 0)
1731 dmu_recv_cleanup_ds(drc);
1732 return (error);
1733}
1734
1735static int
1736dmu_recv_new_end(dmu_recv_cookie_t *drc)
1737{
1738 int error;
1739
1740 error = dsl_sync_task(drc->drc_tofs,
1741 dmu_recv_end_check, dmu_recv_end_sync, drc,
1742 dmu_recv_end_modified_blocks);
1743
1744 if (error != 0) {
1745 dmu_recv_cleanup_ds(drc);
1746 } else if (drc->drc_guid_to_ds_map != NULL) {
1747 (void) add_ds_to_guidmap(drc->drc_tofs,
1748 drc->drc_guid_to_ds_map,
1749 drc->drc_newsnapobj);
1750 }
1751 return (error);
1752}
1753
1754int
1755dmu_recv_end(dmu_recv_cookie_t *drc)
1756{
1757 if (drc->drc_newfs)
1758 return (dmu_recv_new_end(drc));
1759 else
1760 return (dmu_recv_existing_end(drc));
1761}
1704
1705 dsl_pool_rele(dp, FTAG);
1706 return (err);
1707}
1708
1709static int dmu_recv_end_modified_blocks = 3;
1710
1711static int
1712dmu_recv_existing_end(dmu_recv_cookie_t *drc)
1713{
1714 int error;
1715 char name[MAXNAMELEN];
1716
1717#ifdef _KERNEL
1718 /*
1719 * We will be destroying the ds; make sure its origin is unmounted if
1720 * necessary.
1721 */
1722 dsl_dataset_name(drc->drc_ds, name);
1723 zfs_destroy_unmount_origin(name);
1724#endif
1725
1726 error = dsl_sync_task(drc->drc_tofs,
1727 dmu_recv_end_check, dmu_recv_end_sync, drc,
1728 dmu_recv_end_modified_blocks);
1729
1730 if (error != 0)
1731 dmu_recv_cleanup_ds(drc);
1732 return (error);
1733}
1734
1735static int
1736dmu_recv_new_end(dmu_recv_cookie_t *drc)
1737{
1738 int error;
1739
1740 error = dsl_sync_task(drc->drc_tofs,
1741 dmu_recv_end_check, dmu_recv_end_sync, drc,
1742 dmu_recv_end_modified_blocks);
1743
1744 if (error != 0) {
1745 dmu_recv_cleanup_ds(drc);
1746 } else if (drc->drc_guid_to_ds_map != NULL) {
1747 (void) add_ds_to_guidmap(drc->drc_tofs,
1748 drc->drc_guid_to_ds_map,
1749 drc->drc_newsnapobj);
1750 }
1751 return (error);
1752}
1753
1754int
1755dmu_recv_end(dmu_recv_cookie_t *drc)
1756{
1757 if (drc->drc_newfs)
1758 return (dmu_recv_new_end(drc));
1759 else
1760 return (dmu_recv_existing_end(drc));
1761}