1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 24 * Copyright (c) 2011, 2015 by Delphix. All rights reserved. 25 * Copyright (c) 2014, Joyent, Inc. All rights reserved. 26 * Copyright (c) 2012, Martin Matuska <mm@FreeBSD.org>. All rights reserved. 27 * Copyright 2014 HybridCluster. All rights reserved. 28 * Copyright 2016 RackTop Systems. 29 * Copyright (c) 2014 Integros [integros.com] 30 * Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>. All rights reserved. 31 */ 32 33#include <sys/dmu.h> 34#include <sys/dmu_impl.h> 35#include <sys/dmu_tx.h> 36#include <sys/dbuf.h> 37#include <sys/dnode.h> 38#include <sys/zfs_context.h> 39#include <sys/dmu_objset.h> 40#include <sys/dmu_traverse.h> 41#include <sys/dsl_dataset.h> 42#include <sys/dsl_dir.h> 43#include <sys/dsl_prop.h> 44#include <sys/dsl_pool.h> 45#include <sys/dsl_synctask.h> 46#include <sys/zfs_ioctl.h> 47#include <sys/zap.h> 48#include <sys/zio_checksum.h> 49#include <sys/zfs_znode.h> 50#include <zfs_fletcher.h> 51#include <sys/avl.h> 52#include <sys/ddt.h> 53#include <sys/zfs_onexit.h> 54#include <sys/dmu_send.h> 55#include <sys/dsl_destroy.h> 56#include <sys/blkptr.h> 57#include <sys/dsl_bookmark.h> 58#include <sys/zfeature.h> 59#include <sys/bqueue.h> 60#ifdef __FreeBSD__ 61#include <sys/zvol.h> 62#endif 63 64#ifdef __FreeBSD__ 65#undef dump_write 66#define dump_write dmu_dump_write 67#endif 68 69/* Set this tunable to TRUE to replace corrupt data with 0x2f5baddb10c */ 70int zfs_send_corrupt_data = B_FALSE; 71int zfs_send_queue_length = 16 * 1024 * 1024; 72int zfs_recv_queue_length = 16 * 1024 * 1024; 73/* Set this tunable to FALSE to disable setting of DRR_FLAG_FREERECORDS */ 74int zfs_send_set_freerecords_bit = B_TRUE; 75 76#ifdef _KERNEL 77TUNABLE_INT("vfs.zfs.send_set_freerecords_bit", &zfs_send_set_freerecords_bit); 78#endif 79 80static char *dmu_recv_tag = "dmu_recv_tag"; 81const char *recv_clone_name = "%recv"; 82 83/* 84 * Use this to override the recordsize calculation for fast zfs send estimates. 85 */ 86uint64_t zfs_override_estimate_recordsize = 0; 87 88#define BP_SPAN(datablkszsec, indblkshift, level) \ 89 (((uint64_t)datablkszsec) << (SPA_MINBLOCKSHIFT + \ 90 (level) * (indblkshift - SPA_BLKPTRSHIFT))) 91 92static void byteswap_record(dmu_replay_record_t *drr); 93 94struct send_thread_arg { 95 bqueue_t q; 96 dsl_dataset_t *ds; /* Dataset to traverse */ 97 uint64_t fromtxg; /* Traverse from this txg */ 98 int flags; /* flags to pass to traverse_dataset */ 99 int error_code; 100 boolean_t cancel; 101 zbookmark_phys_t resume; 102}; 103 104struct send_block_record { 105 boolean_t eos_marker; /* Marks the end of the stream */ 106 blkptr_t bp; 107 zbookmark_phys_t zb; 108 uint8_t indblkshift; 109 uint16_t datablkszsec; 110 bqueue_node_t ln; 111}; 112 113static int 114dump_bytes(dmu_sendarg_t *dsp, void *buf, int len) 115{ 116 dsl_dataset_t *ds = dmu_objset_ds(dsp->dsa_os); 117 struct uio auio; 118 struct iovec aiov; 119 120 /* 121 * The code does not rely on this (len being a multiple of 8). We keep 122 * this assertion because of the corresponding assertion in 123 * receive_read(). Keeping this assertion ensures that we do not 124 * inadvertently break backwards compatibility (causing the assertion 125 * in receive_read() to trigger on old software). 126 * 127 * Removing the assertions could be rolled into a new feature that uses 128 * data that isn't 8-byte aligned; if the assertions were removed, a 129 * feature flag would have to be added. 130 */ 131 132 ASSERT0(len % 8); 133 134 aiov.iov_base = buf; 135 aiov.iov_len = len; 136 auio.uio_iov = &aiov; 137 auio.uio_iovcnt = 1; 138 auio.uio_resid = len; 139 auio.uio_segflg = UIO_SYSSPACE; 140 auio.uio_rw = UIO_WRITE; 141 auio.uio_offset = (off_t)-1; 142 auio.uio_td = dsp->dsa_td; 143#ifdef _KERNEL 144 if (dsp->dsa_fp->f_type == DTYPE_VNODE) 145 bwillwrite(); 146 dsp->dsa_err = fo_write(dsp->dsa_fp, &auio, dsp->dsa_td->td_ucred, 0, 147 dsp->dsa_td); 148#else 149 fprintf(stderr, "%s: returning EOPNOTSUPP\n", __func__); 150 dsp->dsa_err = EOPNOTSUPP; 151#endif 152 mutex_enter(&ds->ds_sendstream_lock); 153 *dsp->dsa_off += len; 154 mutex_exit(&ds->ds_sendstream_lock); 155 156 return (dsp->dsa_err); 157} 158 159/* 160 * For all record types except BEGIN, fill in the checksum (overlaid in 161 * drr_u.drr_checksum.drr_checksum). The checksum verifies everything 162 * up to the start of the checksum itself. 163 */ 164static int 165dump_record(dmu_sendarg_t *dsp, void *payload, int payload_len) 166{ 167 ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum), 168 ==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t)); 169 (void) fletcher_4_incremental_native(dsp->dsa_drr, 170 offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum), 171 &dsp->dsa_zc); 172 if (dsp->dsa_drr->drr_type == DRR_BEGIN) { 173 dsp->dsa_sent_begin = B_TRUE; 174 } else { 175 ASSERT(ZIO_CHECKSUM_IS_ZERO(&dsp->dsa_drr->drr_u. 176 drr_checksum.drr_checksum)); 177 dsp->dsa_drr->drr_u.drr_checksum.drr_checksum = dsp->dsa_zc; 178 } 179 if (dsp->dsa_drr->drr_type == DRR_END) { 180 dsp->dsa_sent_end = B_TRUE; 181 } 182 (void) fletcher_4_incremental_native(&dsp->dsa_drr-> 183 drr_u.drr_checksum.drr_checksum, 184 sizeof (zio_cksum_t), &dsp->dsa_zc); 185 if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0) 186 return (SET_ERROR(EINTR)); 187 if (payload_len != 0) { 188 (void) fletcher_4_incremental_native(payload, payload_len, 189 &dsp->dsa_zc); 190 if (dump_bytes(dsp, payload, payload_len) != 0) 191 return (SET_ERROR(EINTR)); 192 } 193 return (0); 194} 195 196/* 197 * Fill in the drr_free struct, or perform aggregation if the previous record is 198 * also a free record, and the two are adjacent. 199 * 200 * Note that we send free records even for a full send, because we want to be 201 * able to receive a full send as a clone, which requires a list of all the free 202 * and freeobject records that were generated on the source. 203 */ 204static int 205dump_free(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset, 206 uint64_t length) 207{ 208 struct drr_free *drrf = &(dsp->dsa_drr->drr_u.drr_free); 209 210 /* 211 * When we receive a free record, dbuf_free_range() assumes 212 * that the receiving system doesn't have any dbufs in the range 213 * being freed. This is always true because there is a one-record 214 * constraint: we only send one WRITE record for any given 215 * object,offset. We know that the one-record constraint is 216 * true because we always send data in increasing order by 217 * object,offset. 218 * 219 * If the increasing-order constraint ever changes, we should find 220 * another way to assert that the one-record constraint is still 221 * satisfied. 222 */ 223 ASSERT(object > dsp->dsa_last_data_object || 224 (object == dsp->dsa_last_data_object && 225 offset > dsp->dsa_last_data_offset)); 226 227 if (length != -1ULL && offset + length < offset) 228 length = -1ULL; 229 230 /* 231 * If there is a pending op, but it's not PENDING_FREE, push it out, 232 * since free block aggregation can only be done for blocks of the 233 * same type (i.e., DRR_FREE records can only be aggregated with 234 * other DRR_FREE records. DRR_FREEOBJECTS records can only be 235 * aggregated with other DRR_FREEOBJECTS records. 236 */ 237 if (dsp->dsa_pending_op != PENDING_NONE && 238 dsp->dsa_pending_op != PENDING_FREE) { 239 if (dump_record(dsp, NULL, 0) != 0) 240 return (SET_ERROR(EINTR)); 241 dsp->dsa_pending_op = PENDING_NONE; 242 } 243 244 if (dsp->dsa_pending_op == PENDING_FREE) { 245 /* 246 * There should never be a PENDING_FREE if length is -1 247 * (because dump_dnode is the only place where this 248 * function is called with a -1, and only after flushing 249 * any pending record). 250 */ 251 ASSERT(length != -1ULL); 252 /* 253 * Check to see whether this free block can be aggregated 254 * with pending one. 255 */ 256 if (drrf->drr_object == object && drrf->drr_offset + 257 drrf->drr_length == offset) { 258 drrf->drr_length += length; 259 return (0); 260 } else { 261 /* not a continuation. Push out pending record */ 262 if (dump_record(dsp, NULL, 0) != 0) 263 return (SET_ERROR(EINTR)); 264 dsp->dsa_pending_op = PENDING_NONE; 265 } 266 } 267 /* create a FREE record and make it pending */ 268 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t)); 269 dsp->dsa_drr->drr_type = DRR_FREE; 270 drrf->drr_object = object; 271 drrf->drr_offset = offset; 272 drrf->drr_length = length; 273 drrf->drr_toguid = dsp->dsa_toguid; 274 if (length == -1ULL) { 275 if (dump_record(dsp, NULL, 0) != 0) 276 return (SET_ERROR(EINTR)); 277 } else { 278 dsp->dsa_pending_op = PENDING_FREE; 279 } 280 281 return (0); 282} 283 284static int 285dump_write(dmu_sendarg_t *dsp, dmu_object_type_t type, 286 uint64_t object, uint64_t offset, int lsize, int psize, const blkptr_t *bp, 287 void *data) 288{ 289 uint64_t payload_size; 290 struct drr_write *drrw = &(dsp->dsa_drr->drr_u.drr_write); 291 292 /* 293 * We send data in increasing object, offset order. 294 * See comment in dump_free() for details. 295 */ 296 ASSERT(object > dsp->dsa_last_data_object || 297 (object == dsp->dsa_last_data_object && 298 offset > dsp->dsa_last_data_offset)); 299 dsp->dsa_last_data_object = object; 300 dsp->dsa_last_data_offset = offset + lsize - 1; 301 302 /* 303 * If there is any kind of pending aggregation (currently either 304 * a grouping of free objects or free blocks), push it out to 305 * the stream, since aggregation can't be done across operations 306 * of different types. 307 */ 308 if (dsp->dsa_pending_op != PENDING_NONE) { 309 if (dump_record(dsp, NULL, 0) != 0) 310 return (SET_ERROR(EINTR)); 311 dsp->dsa_pending_op = PENDING_NONE; 312 } 313 /* write a WRITE record */ 314 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t)); 315 dsp->dsa_drr->drr_type = DRR_WRITE; 316 drrw->drr_object = object; 317 drrw->drr_type = type; 318 drrw->drr_offset = offset; 319 drrw->drr_toguid = dsp->dsa_toguid; 320 drrw->drr_logical_size = lsize; 321 322 /* only set the compression fields if the buf is compressed */ 323 if (lsize != psize) { 324 ASSERT(dsp->dsa_featureflags & DMU_BACKUP_FEATURE_COMPRESSED); 325 ASSERT(!BP_IS_EMBEDDED(bp)); 326 ASSERT(!BP_SHOULD_BYTESWAP(bp)); 327 ASSERT(!DMU_OT_IS_METADATA(BP_GET_TYPE(bp))); 328 ASSERT3U(BP_GET_COMPRESS(bp), !=, ZIO_COMPRESS_OFF); 329 ASSERT3S(psize, >, 0); 330 ASSERT3S(lsize, >=, psize); 331 332 drrw->drr_compressiontype = BP_GET_COMPRESS(bp); 333 drrw->drr_compressed_size = psize; 334 payload_size = drrw->drr_compressed_size; 335 } else { 336 payload_size = drrw->drr_logical_size; 337 } 338 339 if (bp == NULL || BP_IS_EMBEDDED(bp)) { 340 /* 341 * There's no pre-computed checksum for partial-block 342 * writes or embedded BP's, so (like 343 * fletcher4-checkummed blocks) userland will have to 344 * compute a dedup-capable checksum itself. 345 */ 346 drrw->drr_checksumtype = ZIO_CHECKSUM_OFF; 347 } else { 348 drrw->drr_checksumtype = BP_GET_CHECKSUM(bp); 349 if (zio_checksum_table[drrw->drr_checksumtype].ci_flags & 350 ZCHECKSUM_FLAG_DEDUP) 351 drrw->drr_checksumflags |= DRR_CHECKSUM_DEDUP; 352 DDK_SET_LSIZE(&drrw->drr_key, BP_GET_LSIZE(bp)); 353 DDK_SET_PSIZE(&drrw->drr_key, BP_GET_PSIZE(bp)); 354 DDK_SET_COMPRESS(&drrw->drr_key, BP_GET_COMPRESS(bp)); 355 drrw->drr_key.ddk_cksum = bp->blk_cksum; 356 } 357 358 if (dump_record(dsp, data, payload_size) != 0) 359 return (SET_ERROR(EINTR)); 360 return (0); 361} 362 363static int 364dump_write_embedded(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset, 365 int blksz, const blkptr_t *bp) 366{ 367 char buf[BPE_PAYLOAD_SIZE]; 368 struct drr_write_embedded *drrw = 369 &(dsp->dsa_drr->drr_u.drr_write_embedded); 370 371 if (dsp->dsa_pending_op != PENDING_NONE) { 372 if (dump_record(dsp, NULL, 0) != 0) 373 return (EINTR); 374 dsp->dsa_pending_op = PENDING_NONE; 375 } 376 377 ASSERT(BP_IS_EMBEDDED(bp)); 378 379 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t)); 380 dsp->dsa_drr->drr_type = DRR_WRITE_EMBEDDED; 381 drrw->drr_object = object; 382 drrw->drr_offset = offset; 383 drrw->drr_length = blksz; 384 drrw->drr_toguid = dsp->dsa_toguid; 385 drrw->drr_compression = BP_GET_COMPRESS(bp); 386 drrw->drr_etype = BPE_GET_ETYPE(bp); 387 drrw->drr_lsize = BPE_GET_LSIZE(bp); 388 drrw->drr_psize = BPE_GET_PSIZE(bp); 389 390 decode_embedded_bp_compressed(bp, buf); 391 392 if (dump_record(dsp, buf, P2ROUNDUP(drrw->drr_psize, 8)) != 0) 393 return (EINTR); 394 return (0); 395} 396 397static int 398dump_spill(dmu_sendarg_t *dsp, uint64_t object, int blksz, void *data) 399{ 400 struct drr_spill *drrs = &(dsp->dsa_drr->drr_u.drr_spill); 401 402 if (dsp->dsa_pending_op != PENDING_NONE) { 403 if (dump_record(dsp, NULL, 0) != 0) 404 return (SET_ERROR(EINTR)); 405 dsp->dsa_pending_op = PENDING_NONE; 406 } 407 408 /* write a SPILL record */ 409 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t)); 410 dsp->dsa_drr->drr_type = DRR_SPILL; 411 drrs->drr_object = object; 412 drrs->drr_length = blksz; 413 drrs->drr_toguid = dsp->dsa_toguid; 414 415 if (dump_record(dsp, data, blksz) != 0) 416 return (SET_ERROR(EINTR)); 417 return (0); 418} 419 420static int 421dump_freeobjects(dmu_sendarg_t *dsp, uint64_t firstobj, uint64_t numobjs) 422{ 423 struct drr_freeobjects *drrfo = &(dsp->dsa_drr->drr_u.drr_freeobjects); 424 425 /* 426 * If there is a pending op, but it's not PENDING_FREEOBJECTS, 427 * push it out, since free block aggregation can only be done for 428 * blocks of the same type (i.e., DRR_FREE records can only be 429 * aggregated with other DRR_FREE records. DRR_FREEOBJECTS records 430 * can only be aggregated with other DRR_FREEOBJECTS records. 431 */ 432 if (dsp->dsa_pending_op != PENDING_NONE && 433 dsp->dsa_pending_op != PENDING_FREEOBJECTS) { 434 if (dump_record(dsp, NULL, 0) != 0) 435 return (SET_ERROR(EINTR)); 436 dsp->dsa_pending_op = PENDING_NONE; 437 } 438 if (dsp->dsa_pending_op == PENDING_FREEOBJECTS) { 439 /* 440 * See whether this free object array can be aggregated 441 * with pending one 442 */ 443 if (drrfo->drr_firstobj + drrfo->drr_numobjs == firstobj) { 444 drrfo->drr_numobjs += numobjs; 445 return (0); 446 } else { 447 /* can't be aggregated. Push out pending record */ 448 if (dump_record(dsp, NULL, 0) != 0) 449 return (SET_ERROR(EINTR)); 450 dsp->dsa_pending_op = PENDING_NONE; 451 } 452 } 453 454 /* write a FREEOBJECTS record */ 455 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t)); 456 dsp->dsa_drr->drr_type = DRR_FREEOBJECTS; 457 drrfo->drr_firstobj = firstobj; 458 drrfo->drr_numobjs = numobjs; 459 drrfo->drr_toguid = dsp->dsa_toguid; 460 461 dsp->dsa_pending_op = PENDING_FREEOBJECTS; 462 463 return (0); 464} 465 466static int 467dump_dnode(dmu_sendarg_t *dsp, uint64_t object, dnode_phys_t *dnp) 468{ 469 struct drr_object *drro = &(dsp->dsa_drr->drr_u.drr_object); 470 471 if (object < dsp->dsa_resume_object) { 472 /* 473 * Note: when resuming, we will visit all the dnodes in 474 * the block of dnodes that we are resuming from. In 475 * this case it's unnecessary to send the dnodes prior to 476 * the one we are resuming from. We should be at most one 477 * block's worth of dnodes behind the resume point. 478 */ 479 ASSERT3U(dsp->dsa_resume_object - object, <, 480 1 << (DNODE_BLOCK_SHIFT - DNODE_SHIFT)); 481 return (0); 482 } 483 484 if (dnp == NULL || dnp->dn_type == DMU_OT_NONE) 485 return (dump_freeobjects(dsp, object, 1)); 486 487 if (dsp->dsa_pending_op != PENDING_NONE) { 488 if (dump_record(dsp, NULL, 0) != 0) 489 return (SET_ERROR(EINTR)); 490 dsp->dsa_pending_op = PENDING_NONE; 491 } 492 493 /* write an OBJECT record */ 494 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t)); 495 dsp->dsa_drr->drr_type = DRR_OBJECT; 496 drro->drr_object = object; 497 drro->drr_type = dnp->dn_type; 498 drro->drr_bonustype = dnp->dn_bonustype; 499 drro->drr_blksz = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT; 500 drro->drr_bonuslen = dnp->dn_bonuslen; 501 drro->drr_dn_slots = dnp->dn_extra_slots + 1; 502 drro->drr_checksumtype = dnp->dn_checksum; 503 drro->drr_compress = dnp->dn_compress; 504 drro->drr_toguid = dsp->dsa_toguid; 505 506 if (!(dsp->dsa_featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) && 507 drro->drr_blksz > SPA_OLD_MAXBLOCKSIZE) 508 drro->drr_blksz = SPA_OLD_MAXBLOCKSIZE; 509 510 if (dump_record(dsp, DN_BONUS(dnp), 511 P2ROUNDUP(dnp->dn_bonuslen, 8)) != 0) { 512 return (SET_ERROR(EINTR)); 513 } 514 515 /* Free anything past the end of the file. */ 516 if (dump_free(dsp, object, (dnp->dn_maxblkid + 1) * 517 (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT), -1ULL) != 0) 518 return (SET_ERROR(EINTR)); 519 if (dsp->dsa_err != 0) 520 return (SET_ERROR(EINTR)); 521 return (0); 522} 523 524static boolean_t 525backup_do_embed(dmu_sendarg_t *dsp, const blkptr_t *bp) 526{ 527 if (!BP_IS_EMBEDDED(bp)) 528 return (B_FALSE); 529 530 /* 531 * Compression function must be legacy, or explicitly enabled. 532 */ 533 if ((BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_LEGACY_FUNCTIONS && 534 !(dsp->dsa_featureflags & DMU_BACKUP_FEATURE_LZ4))) 535 return (B_FALSE); 536 537 /* 538 * Embed type must be explicitly enabled. 539 */ 540 switch (BPE_GET_ETYPE(bp)) { 541 case BP_EMBEDDED_TYPE_DATA: 542 if (dsp->dsa_featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) 543 return (B_TRUE); 544 break; 545 default: 546 return (B_FALSE); 547 } 548 return (B_FALSE); 549} 550 551/* 552 * This is the callback function to traverse_dataset that acts as the worker 553 * thread for dmu_send_impl. 554 */ 555/*ARGSUSED*/ 556static int 557send_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, 558 const zbookmark_phys_t *zb, const struct dnode_phys *dnp, void *arg) 559{ 560 struct send_thread_arg *sta = arg; 561 struct send_block_record *record; 562 uint64_t record_size; 563 int err = 0; 564 565 ASSERT(zb->zb_object == DMU_META_DNODE_OBJECT || 566 zb->zb_object >= sta->resume.zb_object); 567 568 if (sta->cancel) 569 return (SET_ERROR(EINTR)); 570 571 if (bp == NULL) { 572 ASSERT3U(zb->zb_level, ==, ZB_DNODE_LEVEL); 573 return (0); 574 } else if (zb->zb_level < 0) { 575 return (0); 576 } 577 578 record = kmem_zalloc(sizeof (struct send_block_record), KM_SLEEP); 579 record->eos_marker = B_FALSE; 580 record->bp = *bp; 581 record->zb = *zb; 582 record->indblkshift = dnp->dn_indblkshift; 583 record->datablkszsec = dnp->dn_datablkszsec; 584 record_size = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT; 585 bqueue_enqueue(&sta->q, record, record_size); 586 587 return (err); 588} 589 590/* 591 * This function kicks off the traverse_dataset. It also handles setting the 592 * error code of the thread in case something goes wrong, and pushes the End of 593 * Stream record when the traverse_dataset call has finished. If there is no 594 * dataset to traverse, the thread immediately pushes End of Stream marker. 595 */ 596static void 597send_traverse_thread(void *arg) 598{ 599 struct send_thread_arg *st_arg = arg; 600 int err; 601 struct send_block_record *data; 602 603 if (st_arg->ds != NULL) { 604 err = traverse_dataset_resume(st_arg->ds, 605 st_arg->fromtxg, &st_arg->resume, 606 st_arg->flags, send_cb, st_arg); 607 608 if (err != EINTR) 609 st_arg->error_code = err; 610 } 611 data = kmem_zalloc(sizeof (*data), KM_SLEEP); 612 data->eos_marker = B_TRUE; 613 bqueue_enqueue(&st_arg->q, data, 1); 614 thread_exit(); 615} 616 617/* 618 * This function actually handles figuring out what kind of record needs to be 619 * dumped, reading the data (which has hopefully been prefetched), and calling 620 * the appropriate helper function. 621 */ 622static int 623do_dump(dmu_sendarg_t *dsa, struct send_block_record *data) 624{ 625 dsl_dataset_t *ds = dmu_objset_ds(dsa->dsa_os); 626 const blkptr_t *bp = &data->bp; 627 const zbookmark_phys_t *zb = &data->zb; 628 uint8_t indblkshift = data->indblkshift; 629 uint16_t dblkszsec = data->datablkszsec; 630 spa_t *spa = ds->ds_dir->dd_pool->dp_spa; 631 dmu_object_type_t type = bp ? BP_GET_TYPE(bp) : DMU_OT_NONE; 632 int err = 0; 633 634 ASSERT3U(zb->zb_level, >=, 0); 635 636 ASSERT(zb->zb_object == DMU_META_DNODE_OBJECT || 637 zb->zb_object >= dsa->dsa_resume_object); 638 639 if (zb->zb_object != DMU_META_DNODE_OBJECT && 640 DMU_OBJECT_IS_SPECIAL(zb->zb_object)) { 641 return (0); 642 } else if (BP_IS_HOLE(bp) && 643 zb->zb_object == DMU_META_DNODE_OBJECT) { 644 uint64_t span = BP_SPAN(dblkszsec, indblkshift, zb->zb_level); 645 uint64_t dnobj = (zb->zb_blkid * span) >> DNODE_SHIFT; 646 err = dump_freeobjects(dsa, dnobj, span >> DNODE_SHIFT); 647 } else if (BP_IS_HOLE(bp)) { 648 uint64_t span = BP_SPAN(dblkszsec, indblkshift, zb->zb_level); 649 uint64_t offset = zb->zb_blkid * span; 650 err = dump_free(dsa, zb->zb_object, offset, span); 651 } else if (zb->zb_level > 0 || type == DMU_OT_OBJSET) { 652 return (0); 653 } else if (type == DMU_OT_DNODE) { 654 int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT; 655 arc_flags_t aflags = ARC_FLAG_WAIT; 656 arc_buf_t *abuf; 657 658 ASSERT0(zb->zb_level); 659 660 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf, 661 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, 662 &aflags, zb) != 0) 663 return (SET_ERROR(EIO)); 664 665 dnode_phys_t *blk = abuf->b_data; 666 uint64_t dnobj = zb->zb_blkid * epb; 667 for (int i = 0; i < epb; i += blk[i].dn_extra_slots + 1) { 668 err = dump_dnode(dsa, dnobj + i, blk + i); 669 if (err != 0) 670 break; 671 } 672 arc_buf_destroy(abuf, &abuf); 673 } else if (type == DMU_OT_SA) { 674 arc_flags_t aflags = ARC_FLAG_WAIT; 675 arc_buf_t *abuf; 676 int blksz = BP_GET_LSIZE(bp); 677 678 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf, 679 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, 680 &aflags, zb) != 0) 681 return (SET_ERROR(EIO)); 682 683 err = dump_spill(dsa, zb->zb_object, blksz, abuf->b_data); 684 arc_buf_destroy(abuf, &abuf); 685 } else if (backup_do_embed(dsa, bp)) { 686 /* it's an embedded level-0 block of a regular object */ 687 int blksz = dblkszsec << SPA_MINBLOCKSHIFT; 688 ASSERT0(zb->zb_level); 689 err = dump_write_embedded(dsa, zb->zb_object, 690 zb->zb_blkid * blksz, blksz, bp); 691 } else { 692 /* it's a level-0 block of a regular object */ 693 arc_flags_t aflags = ARC_FLAG_WAIT; 694 arc_buf_t *abuf; 695 int blksz = dblkszsec << SPA_MINBLOCKSHIFT; 696 uint64_t offset; 697 698 /* 699 * If we have large blocks stored on disk but the send flags 700 * don't allow us to send large blocks, we split the data from 701 * the arc buf into chunks. 702 */ 703 boolean_t split_large_blocks = blksz > SPA_OLD_MAXBLOCKSIZE && 704 !(dsa->dsa_featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS); 705 /* 706 * We should only request compressed data from the ARC if all 707 * the following are true: 708 * - stream compression was requested 709 * - we aren't splitting large blocks into smaller chunks 710 * - the data won't need to be byteswapped before sending 711 * - this isn't an embedded block 712 * - this isn't metadata (if receiving on a different endian 713 * system it can be byteswapped more easily) 714 */ 715 boolean_t request_compressed = 716 (dsa->dsa_featureflags & DMU_BACKUP_FEATURE_COMPRESSED) && 717 !split_large_blocks && !BP_SHOULD_BYTESWAP(bp) && 718 !BP_IS_EMBEDDED(bp) && !DMU_OT_IS_METADATA(BP_GET_TYPE(bp)); 719 720 ASSERT0(zb->zb_level); 721 ASSERT(zb->zb_object > dsa->dsa_resume_object || 722 (zb->zb_object == dsa->dsa_resume_object && 723 zb->zb_blkid * blksz >= dsa->dsa_resume_offset)); 724 725 ASSERT0(zb->zb_level); 726 ASSERT(zb->zb_object > dsa->dsa_resume_object || 727 (zb->zb_object == dsa->dsa_resume_object && 728 zb->zb_blkid * blksz >= dsa->dsa_resume_offset)); 729 730 ASSERT3U(blksz, ==, BP_GET_LSIZE(bp)); 731 732 enum zio_flag zioflags = ZIO_FLAG_CANFAIL; 733 if (request_compressed) 734 zioflags |= ZIO_FLAG_RAW; 735 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf, 736 ZIO_PRIORITY_ASYNC_READ, zioflags, &aflags, zb) != 0) { 737 if (zfs_send_corrupt_data) { 738 /* Send a block filled with 0x"zfs badd bloc" */ 739 abuf = arc_alloc_buf(spa, &abuf, ARC_BUFC_DATA, 740 blksz); 741 uint64_t *ptr; 742 for (ptr = abuf->b_data; 743 (char *)ptr < (char *)abuf->b_data + blksz; 744 ptr++) 745 *ptr = 0x2f5baddb10cULL; 746 } else { 747 return (SET_ERROR(EIO)); 748 } 749 } 750 751 offset = zb->zb_blkid * blksz; 752 753 if (split_large_blocks) { 754 ASSERT3U(arc_get_compression(abuf), ==, 755 ZIO_COMPRESS_OFF); 756 char *buf = abuf->b_data; 757 while (blksz > 0 && err == 0) { 758 int n = MIN(blksz, SPA_OLD_MAXBLOCKSIZE); 759 err = dump_write(dsa, type, zb->zb_object, 760 offset, n, n, NULL, buf); 761 offset += n; 762 buf += n; 763 blksz -= n; 764 } 765 } else { 766 err = dump_write(dsa, type, zb->zb_object, offset, 767 blksz, arc_buf_size(abuf), bp, abuf->b_data); 768 } 769 arc_buf_destroy(abuf, &abuf); 770 } 771 772 ASSERT(err == 0 || err == EINTR); 773 return (err); 774} 775 776/* 777 * Pop the new data off the queue, and free the old data. 778 */ 779static struct send_block_record * 780get_next_record(bqueue_t *bq, struct send_block_record *data) 781{ 782 struct send_block_record *tmp = bqueue_dequeue(bq); 783 kmem_free(data, sizeof (*data)); 784 return (tmp); 785} 786 787/* 788 * Actually do the bulk of the work in a zfs send. 789 * 790 * Note: Releases dp using the specified tag. 791 */ 792static int 793dmu_send_impl(void *tag, dsl_pool_t *dp, dsl_dataset_t *to_ds, 794 zfs_bookmark_phys_t *ancestor_zb, boolean_t is_clone, 795 boolean_t embedok, boolean_t large_block_ok, boolean_t compressok, 796 int outfd, uint64_t resumeobj, uint64_t resumeoff, 797#ifdef illumos 798 vnode_t *vp, offset_t *off) 799#else 800 struct file *fp, offset_t *off) 801#endif 802{ 803 objset_t *os; 804 dmu_replay_record_t *drr; 805 dmu_sendarg_t *dsp; 806 int err; 807 uint64_t fromtxg = 0; 808 uint64_t featureflags = 0; 809 struct send_thread_arg to_arg = { 0 }; 810 811 err = dmu_objset_from_ds(to_ds, &os); 812 if (err != 0) { 813 dsl_pool_rele(dp, tag); 814 return (err); 815 } 816 817 drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP); 818 drr->drr_type = DRR_BEGIN; 819 drr->drr_u.drr_begin.drr_magic = DMU_BACKUP_MAGIC; 820 DMU_SET_STREAM_HDRTYPE(drr->drr_u.drr_begin.drr_versioninfo, 821 DMU_SUBSTREAM); 822 823#ifdef _KERNEL 824 if (dmu_objset_type(os) == DMU_OST_ZFS) { 825 uint64_t version; 826 if (zfs_get_zplprop(os, ZFS_PROP_VERSION, &version) != 0) { 827 kmem_free(drr, sizeof (dmu_replay_record_t)); 828 dsl_pool_rele(dp, tag); 829 return (SET_ERROR(EINVAL)); 830 } 831 if (version >= ZPL_VERSION_SA) { 832 featureflags |= DMU_BACKUP_FEATURE_SA_SPILL; 833 } 834 } 835#endif 836 837 if (large_block_ok && to_ds->ds_feature_inuse[SPA_FEATURE_LARGE_BLOCKS]) 838 featureflags |= DMU_BACKUP_FEATURE_LARGE_BLOCKS; 839 if (to_ds->ds_feature_inuse[SPA_FEATURE_LARGE_DNODE]) 840 featureflags |= DMU_BACKUP_FEATURE_LARGE_DNODE; 841 if (embedok && 842 spa_feature_is_active(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA)) { 843 featureflags |= DMU_BACKUP_FEATURE_EMBED_DATA; 844 if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS)) 845 featureflags |= DMU_BACKUP_FEATURE_LZ4; 846 } 847 if (compressok) { 848 featureflags |= DMU_BACKUP_FEATURE_COMPRESSED; 849 } 850 if ((featureflags & 851 (DMU_BACKUP_FEATURE_EMBED_DATA | DMU_BACKUP_FEATURE_COMPRESSED)) != 852 0 && spa_feature_is_active(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS)) { 853 featureflags |= DMU_BACKUP_FEATURE_LZ4; 854 } 855 856 if (resumeobj != 0 || resumeoff != 0) { 857 featureflags |= DMU_BACKUP_FEATURE_RESUMING; 858 } 859 860 DMU_SET_FEATUREFLAGS(drr->drr_u.drr_begin.drr_versioninfo, 861 featureflags); 862 863 drr->drr_u.drr_begin.drr_creation_time = 864 dsl_dataset_phys(to_ds)->ds_creation_time; 865 drr->drr_u.drr_begin.drr_type = dmu_objset_type(os); 866 if (is_clone) 867 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CLONE; 868 drr->drr_u.drr_begin.drr_toguid = dsl_dataset_phys(to_ds)->ds_guid; 869 if (dsl_dataset_phys(to_ds)->ds_flags & DS_FLAG_CI_DATASET) 870 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CI_DATA; 871 if (zfs_send_set_freerecords_bit) 872 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_FREERECORDS; 873 874 if (ancestor_zb != NULL) { 875 drr->drr_u.drr_begin.drr_fromguid = 876 ancestor_zb->zbm_guid; 877 fromtxg = ancestor_zb->zbm_creation_txg; 878 } 879 dsl_dataset_name(to_ds, drr->drr_u.drr_begin.drr_toname); 880 if (!to_ds->ds_is_snapshot) { 881 (void) strlcat(drr->drr_u.drr_begin.drr_toname, "@--head--", 882 sizeof (drr->drr_u.drr_begin.drr_toname)); 883 } 884 885 dsp = kmem_zalloc(sizeof (dmu_sendarg_t), KM_SLEEP); 886 887 dsp->dsa_drr = drr; 888 dsp->dsa_outfd = outfd; 889 dsp->dsa_proc = curproc; 890 dsp->dsa_td = curthread; 891 dsp->dsa_fp = fp; 892 dsp->dsa_os = os; 893 dsp->dsa_off = off; 894 dsp->dsa_toguid = dsl_dataset_phys(to_ds)->ds_guid; 895 dsp->dsa_pending_op = PENDING_NONE; 896 dsp->dsa_featureflags = featureflags; 897 dsp->dsa_resume_object = resumeobj; 898 dsp->dsa_resume_offset = resumeoff; 899 900 mutex_enter(&to_ds->ds_sendstream_lock); 901 list_insert_head(&to_ds->ds_sendstreams, dsp); 902 mutex_exit(&to_ds->ds_sendstream_lock); 903 904 dsl_dataset_long_hold(to_ds, FTAG); 905 dsl_pool_rele(dp, tag); 906 907 void *payload = NULL; 908 size_t payload_len = 0; 909 if (resumeobj != 0 || resumeoff != 0) { 910 dmu_object_info_t to_doi; 911 err = dmu_object_info(os, resumeobj, &to_doi); 912 if (err != 0) 913 goto out; 914 SET_BOOKMARK(&to_arg.resume, to_ds->ds_object, resumeobj, 0, 915 resumeoff / to_doi.doi_data_block_size); 916 917 nvlist_t *nvl = fnvlist_alloc(); 918 fnvlist_add_uint64(nvl, "resume_object", resumeobj); 919 fnvlist_add_uint64(nvl, "resume_offset", resumeoff); 920 payload = fnvlist_pack(nvl, &payload_len); 921 drr->drr_payloadlen = payload_len; 922 fnvlist_free(nvl); 923 } 924 925 err = dump_record(dsp, payload, payload_len); 926 fnvlist_pack_free(payload, payload_len); 927 if (err != 0) { 928 err = dsp->dsa_err; 929 goto out; 930 } 931 932 err = bqueue_init(&to_arg.q, zfs_send_queue_length, 933 offsetof(struct send_block_record, ln)); 934 to_arg.error_code = 0; 935 to_arg.cancel = B_FALSE; 936 to_arg.ds = to_ds; 937 to_arg.fromtxg = fromtxg; 938 to_arg.flags = TRAVERSE_PRE | TRAVERSE_PREFETCH; 939 (void) thread_create(NULL, 0, send_traverse_thread, &to_arg, 0, &p0, 940 TS_RUN, minclsyspri); 941 942 struct send_block_record *to_data; 943 to_data = bqueue_dequeue(&to_arg.q); 944 945 while (!to_data->eos_marker && err == 0) { 946 err = do_dump(dsp, to_data); 947 to_data = get_next_record(&to_arg.q, to_data); 948 if (issig(JUSTLOOKING) && issig(FORREAL)) 949 err = EINTR; 950 } 951 952 if (err != 0) { 953 to_arg.cancel = B_TRUE; 954 while (!to_data->eos_marker) { 955 to_data = get_next_record(&to_arg.q, to_data); 956 } 957 } 958 kmem_free(to_data, sizeof (*to_data)); 959 960 bqueue_destroy(&to_arg.q); 961 962 if (err == 0 && to_arg.error_code != 0) 963 err = to_arg.error_code; 964 965 if (err != 0) 966 goto out; 967 968 if (dsp->dsa_pending_op != PENDING_NONE) 969 if (dump_record(dsp, NULL, 0) != 0) 970 err = SET_ERROR(EINTR); 971 972 if (err != 0) { 973 if (err == EINTR && dsp->dsa_err != 0) 974 err = dsp->dsa_err; 975 goto out; 976 } 977 978 bzero(drr, sizeof (dmu_replay_record_t)); 979 drr->drr_type = DRR_END; 980 drr->drr_u.drr_end.drr_checksum = dsp->dsa_zc; 981 drr->drr_u.drr_end.drr_toguid = dsp->dsa_toguid; 982 983 if (dump_record(dsp, NULL, 0) != 0) 984 err = dsp->dsa_err; 985 986out: 987 mutex_enter(&to_ds->ds_sendstream_lock); 988 list_remove(&to_ds->ds_sendstreams, dsp); 989 mutex_exit(&to_ds->ds_sendstream_lock); 990 991 VERIFY(err != 0 || (dsp->dsa_sent_begin && dsp->dsa_sent_end)); 992 993 kmem_free(drr, sizeof (dmu_replay_record_t)); 994 kmem_free(dsp, sizeof (dmu_sendarg_t)); 995 996 dsl_dataset_long_rele(to_ds, FTAG); 997 998 return (err); 999} 1000 1001int 1002dmu_send_obj(const char *pool, uint64_t tosnap, uint64_t fromsnap, 1003 boolean_t embedok, boolean_t large_block_ok, boolean_t compressok, 1004#ifdef illumos 1005 int outfd, vnode_t *vp, offset_t *off) 1006#else 1007 int outfd, struct file *fp, offset_t *off) 1008#endif 1009{ 1010 dsl_pool_t *dp; 1011 dsl_dataset_t *ds; 1012 dsl_dataset_t *fromds = NULL; 1013 int err; 1014 1015 err = dsl_pool_hold(pool, FTAG, &dp); 1016 if (err != 0) 1017 return (err); 1018 1019 err = dsl_dataset_hold_obj(dp, tosnap, FTAG, &ds); 1020 if (err != 0) { 1021 dsl_pool_rele(dp, FTAG); 1022 return (err); 1023 } 1024 1025 if (fromsnap != 0) { 1026 zfs_bookmark_phys_t zb; 1027 boolean_t is_clone; 1028 1029 err = dsl_dataset_hold_obj(dp, fromsnap, FTAG, &fromds); 1030 if (err != 0) { 1031 dsl_dataset_rele(ds, FTAG); 1032 dsl_pool_rele(dp, FTAG); 1033 return (err); 1034 } 1035 if (!dsl_dataset_is_before(ds, fromds, 0)) 1036 err = SET_ERROR(EXDEV); 1037 zb.zbm_creation_time = 1038 dsl_dataset_phys(fromds)->ds_creation_time; 1039 zb.zbm_creation_txg = dsl_dataset_phys(fromds)->ds_creation_txg; 1040 zb.zbm_guid = dsl_dataset_phys(fromds)->ds_guid; 1041 is_clone = (fromds->ds_dir != ds->ds_dir); 1042 dsl_dataset_rele(fromds, FTAG); 1043 err = dmu_send_impl(FTAG, dp, ds, &zb, is_clone, 1044 embedok, large_block_ok, compressok, outfd, 0, 0, fp, off); 1045 } else { 1046 err = dmu_send_impl(FTAG, dp, ds, NULL, B_FALSE, 1047 embedok, large_block_ok, compressok, outfd, 0, 0, fp, off); 1048 } 1049 dsl_dataset_rele(ds, FTAG); 1050 return (err); 1051} 1052 1053int 1054dmu_send(const char *tosnap, const char *fromsnap, boolean_t embedok, 1055 boolean_t large_block_ok, boolean_t compressok, int outfd, 1056 uint64_t resumeobj, uint64_t resumeoff, 1057#ifdef illumos 1058 vnode_t *vp, offset_t *off) 1059#else 1060 struct file *fp, offset_t *off) 1061#endif 1062{ 1063 dsl_pool_t *dp; 1064 dsl_dataset_t *ds; 1065 int err; 1066 boolean_t owned = B_FALSE; 1067 1068 if (fromsnap != NULL && strpbrk(fromsnap, "@#") == NULL) 1069 return (SET_ERROR(EINVAL)); 1070 1071 err = dsl_pool_hold(tosnap, FTAG, &dp); 1072 if (err != 0) 1073 return (err); 1074 1075 if (strchr(tosnap, '@') == NULL && spa_writeable(dp->dp_spa)) { 1076 /* 1077 * We are sending a filesystem or volume. Ensure 1078 * that it doesn't change by owning the dataset. 1079 */ 1080 err = dsl_dataset_own(dp, tosnap, FTAG, &ds); 1081 owned = B_TRUE; 1082 } else { 1083 err = dsl_dataset_hold(dp, tosnap, FTAG, &ds); 1084 } 1085 if (err != 0) { 1086 dsl_pool_rele(dp, FTAG); 1087 return (err); 1088 } 1089 1090 if (fromsnap != NULL) { 1091 zfs_bookmark_phys_t zb; 1092 boolean_t is_clone = B_FALSE; 1093 int fsnamelen = strchr(tosnap, '@') - tosnap; 1094 1095 /* 1096 * If the fromsnap is in a different filesystem, then 1097 * mark the send stream as a clone. 1098 */ 1099 if (strncmp(tosnap, fromsnap, fsnamelen) != 0 || 1100 (fromsnap[fsnamelen] != '@' && 1101 fromsnap[fsnamelen] != '#')) { 1102 is_clone = B_TRUE; 1103 } 1104 1105 if (strchr(fromsnap, '@')) { 1106 dsl_dataset_t *fromds; 1107 err = dsl_dataset_hold(dp, fromsnap, FTAG, &fromds); 1108 if (err == 0) { 1109 if (!dsl_dataset_is_before(ds, fromds, 0)) 1110 err = SET_ERROR(EXDEV); 1111 zb.zbm_creation_time = 1112 dsl_dataset_phys(fromds)->ds_creation_time; 1113 zb.zbm_creation_txg = 1114 dsl_dataset_phys(fromds)->ds_creation_txg; 1115 zb.zbm_guid = dsl_dataset_phys(fromds)->ds_guid; 1116 is_clone = (ds->ds_dir != fromds->ds_dir); 1117 dsl_dataset_rele(fromds, FTAG); 1118 } 1119 } else { 1120 err = dsl_bookmark_lookup(dp, fromsnap, ds, &zb); 1121 } 1122 if (err != 0) { 1123 dsl_dataset_rele(ds, FTAG); 1124 dsl_pool_rele(dp, FTAG); 1125 return (err); 1126 } 1127 err = dmu_send_impl(FTAG, dp, ds, &zb, is_clone, 1128 embedok, large_block_ok, compressok, 1129 outfd, resumeobj, resumeoff, fp, off); 1130 } else { 1131 err = dmu_send_impl(FTAG, dp, ds, NULL, B_FALSE, 1132 embedok, large_block_ok, compressok, 1133 outfd, resumeobj, resumeoff, fp, off); 1134 } 1135 if (owned) 1136 dsl_dataset_disown(ds, FTAG); 1137 else 1138 dsl_dataset_rele(ds, FTAG); 1139 return (err); 1140} 1141 1142static int 1143dmu_adjust_send_estimate_for_indirects(dsl_dataset_t *ds, uint64_t uncompressed, 1144 uint64_t compressed, boolean_t stream_compressed, uint64_t *sizep) 1145{ 1146 int err = 0; 1147 uint64_t size; 1148 /* 1149 * Assume that space (both on-disk and in-stream) is dominated by 1150 * data. We will adjust for indirect blocks and the copies property, 1151 * but ignore per-object space used (eg, dnodes and DRR_OBJECT records). 1152 */ 1153 uint64_t recordsize; 1154 uint64_t record_count; 1155 objset_t *os; 1156 VERIFY0(dmu_objset_from_ds(ds, &os)); 1157 1158 /* Assume all (uncompressed) blocks are recordsize. */ 1159 if (zfs_override_estimate_recordsize != 0) { 1160 recordsize = zfs_override_estimate_recordsize; 1161 } else if (os->os_phys->os_type == DMU_OST_ZVOL) { 1162 err = dsl_prop_get_int_ds(ds, 1163 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &recordsize); 1164 } else { 1165 err = dsl_prop_get_int_ds(ds, 1166 zfs_prop_to_name(ZFS_PROP_RECORDSIZE), &recordsize); 1167 } 1168 if (err != 0) 1169 return (err); 1170 record_count = uncompressed / recordsize; 1171 1172 /* 1173 * If we're estimating a send size for a compressed stream, use the 1174 * compressed data size to estimate the stream size. Otherwise, use the 1175 * uncompressed data size. 1176 */ 1177 size = stream_compressed ? compressed : uncompressed; 1178 1179 /* 1180 * Subtract out approximate space used by indirect blocks. 1181 * Assume most space is used by data blocks (non-indirect, non-dnode). 1182 * Assume no ditto blocks or internal fragmentation. 1183 * 1184 * Therefore, space used by indirect blocks is sizeof(blkptr_t) per 1185 * block. 1186 */ 1187 size -= record_count * sizeof (blkptr_t); 1188 1189 /* Add in the space for the record associated with each block. */ 1190 size += record_count * sizeof (dmu_replay_record_t); 1191 1192 *sizep = size; 1193 1194 return (0); 1195} 1196 1197int 1198dmu_send_estimate(dsl_dataset_t *ds, dsl_dataset_t *fromds, 1199 boolean_t stream_compressed, uint64_t *sizep) 1200{ 1201 dsl_pool_t *dp = ds->ds_dir->dd_pool; 1202 int err; 1203 uint64_t uncomp, comp; 1204 1205 ASSERT(dsl_pool_config_held(dp)); 1206 1207 /* tosnap must be a snapshot */ 1208 if (!ds->ds_is_snapshot) 1209 return (SET_ERROR(EINVAL)); 1210 1211 /* fromsnap, if provided, must be a snapshot */ 1212 if (fromds != NULL && !fromds->ds_is_snapshot) 1213 return (SET_ERROR(EINVAL)); 1214 1215 /* 1216 * fromsnap must be an earlier snapshot from the same fs as tosnap, 1217 * or the origin's fs. 1218 */ 1219 if (fromds != NULL && !dsl_dataset_is_before(ds, fromds, 0)) 1220 return (SET_ERROR(EXDEV)); 1221 1222 /* Get compressed and uncompressed size estimates of changed data. */ 1223 if (fromds == NULL) { 1224 uncomp = dsl_dataset_phys(ds)->ds_uncompressed_bytes; 1225 comp = dsl_dataset_phys(ds)->ds_compressed_bytes; 1226 } else { 1227 uint64_t used; 1228 err = dsl_dataset_space_written(fromds, ds, 1229 &used, &comp, &uncomp); 1230 if (err != 0) 1231 return (err); 1232 } 1233 1234 err = dmu_adjust_send_estimate_for_indirects(ds, uncomp, comp, 1235 stream_compressed, sizep); 1236 /* 1237 * Add the size of the BEGIN and END records to the estimate. 1238 */ 1239 *sizep += 2 * sizeof (dmu_replay_record_t); 1240 return (err); 1241} 1242 1243struct calculate_send_arg { 1244 uint64_t uncompressed; 1245 uint64_t compressed; 1246}; 1247 1248/* 1249 * Simple callback used to traverse the blocks of a snapshot and sum their 1250 * uncompressed and compressed sizes. 1251 */ 1252/* ARGSUSED */ 1253static int 1254dmu_calculate_send_traversal(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, 1255 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg) 1256{ 1257 struct calculate_send_arg *space = arg; 1258 if (bp != NULL && !BP_IS_HOLE(bp)) { 1259 space->uncompressed += BP_GET_UCSIZE(bp); 1260 space->compressed += BP_GET_PSIZE(bp); 1261 } 1262 return (0); 1263} 1264 1265/* 1266 * Given a desination snapshot and a TXG, calculate the approximate size of a 1267 * send stream sent from that TXG. from_txg may be zero, indicating that the 1268 * whole snapshot will be sent. 1269 */ 1270int 1271dmu_send_estimate_from_txg(dsl_dataset_t *ds, uint64_t from_txg, 1272 boolean_t stream_compressed, uint64_t *sizep) 1273{ 1274 dsl_pool_t *dp = ds->ds_dir->dd_pool; 1275 int err; 1276 struct calculate_send_arg size = { 0 }; 1277 1278 ASSERT(dsl_pool_config_held(dp)); 1279 1280 /* tosnap must be a snapshot */ 1281 if (!ds->ds_is_snapshot) 1282 return (SET_ERROR(EINVAL)); 1283 1284 /* verify that from_txg is before the provided snapshot was taken */ 1285 if (from_txg >= dsl_dataset_phys(ds)->ds_creation_txg) { 1286 return (SET_ERROR(EXDEV)); 1287 } 1288 1289 /* 1290 * traverse the blocks of the snapshot with birth times after 1291 * from_txg, summing their uncompressed size 1292 */ 1293 err = traverse_dataset(ds, from_txg, TRAVERSE_POST, 1294 dmu_calculate_send_traversal, &size); 1295 if (err) 1296 return (err); 1297 1298 err = dmu_adjust_send_estimate_for_indirects(ds, size.uncompressed, 1299 size.compressed, stream_compressed, sizep); 1300 return (err); 1301} 1302 1303typedef struct dmu_recv_begin_arg { 1304 const char *drba_origin; 1305 dmu_recv_cookie_t *drba_cookie; 1306 cred_t *drba_cred; 1307 uint64_t drba_snapobj; 1308} dmu_recv_begin_arg_t; 1309 1310static int 1311recv_begin_check_existing_impl(dmu_recv_begin_arg_t *drba, dsl_dataset_t *ds, 1312 uint64_t fromguid) 1313{ 1314 uint64_t val; 1315 uint64_t children; 1316 int error; 1317 dsl_pool_t *dp = ds->ds_dir->dd_pool; 1318 1319 /* Temporary clone name must not exist. */ 1320 error = zap_lookup(dp->dp_meta_objset, 1321 dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, recv_clone_name, 1322 8, 1, &val); 1323 if (error != ENOENT) 1324 return (error == 0 ? SET_ERROR(EBUSY) : error); 1325 1326 /* Resume state must not be set. */ 1327 if (dsl_dataset_has_resume_receive_state(ds)) 1328 return (SET_ERROR(EBUSY)); 1329 1330 /* New snapshot name must not exist. */ 1331 error = zap_lookup(dp->dp_meta_objset, 1332 dsl_dataset_phys(ds)->ds_snapnames_zapobj, 1333 drba->drba_cookie->drc_tosnap, 8, 1, &val); 1334 if (error != ENOENT) 1335 return (error == 0 ? SET_ERROR(EEXIST) : error); 1336 1337 /* must not have children if receiving a ZVOL */ 1338 error = zap_count(dp->dp_meta_objset, 1339 dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, &children); 1340 if (error != 0) 1341 return (error); 1342 if (drba->drba_cookie->drc_drrb->drr_type != DMU_OST_ZFS && 1343 children > 0) 1344 return (SET_ERROR(ZFS_ERR_WRONG_PARENT)); 1345 1346 /* 1347 * Check snapshot limit before receiving. We'll recheck again at the 1348 * end, but might as well abort before receiving if we're already over 1349 * the limit. 1350 * 1351 * Note that we do not check the file system limit with 1352 * dsl_dir_fscount_check because the temporary %clones don't count 1353 * against that limit. 1354 */ 1355 error = dsl_fs_ss_limit_check(ds->ds_dir, 1, ZFS_PROP_SNAPSHOT_LIMIT, 1356 NULL, drba->drba_cred); 1357 if (error != 0) 1358 return (error); 1359 1360 if (fromguid != 0) { 1361 dsl_dataset_t *snap; 1362 uint64_t obj = dsl_dataset_phys(ds)->ds_prev_snap_obj; 1363 1364 /* Find snapshot in this dir that matches fromguid. */ 1365 while (obj != 0) { 1366 error = dsl_dataset_hold_obj(dp, obj, FTAG, 1367 &snap); 1368 if (error != 0) 1369 return (SET_ERROR(ENODEV)); 1370 if (snap->ds_dir != ds->ds_dir) { 1371 dsl_dataset_rele(snap, FTAG); 1372 return (SET_ERROR(ENODEV)); 1373 } 1374 if (dsl_dataset_phys(snap)->ds_guid == fromguid) 1375 break; 1376 obj = dsl_dataset_phys(snap)->ds_prev_snap_obj; 1377 dsl_dataset_rele(snap, FTAG); 1378 } 1379 if (obj == 0) 1380 return (SET_ERROR(ENODEV)); 1381 1382 if (drba->drba_cookie->drc_force) { 1383 drba->drba_snapobj = obj; 1384 } else { 1385 /* 1386 * If we are not forcing, there must be no 1387 * changes since fromsnap. 1388 */ 1389 if (dsl_dataset_modified_since_snap(ds, snap)) { 1390 dsl_dataset_rele(snap, FTAG); 1391 return (SET_ERROR(ETXTBSY)); 1392 } 1393 drba->drba_snapobj = ds->ds_prev->ds_object; 1394 } 1395 1396 dsl_dataset_rele(snap, FTAG); 1397 } else { 1398 /* if full, then must be forced */ 1399 if (!drba->drba_cookie->drc_force) 1400 return (SET_ERROR(EEXIST)); 1401 /* start from $ORIGIN@$ORIGIN, if supported */ 1402 drba->drba_snapobj = dp->dp_origin_snap != NULL ? 1403 dp->dp_origin_snap->ds_object : 0; 1404 } 1405 1406 return (0); 1407 1408} 1409 1410static int 1411dmu_recv_begin_check(void *arg, dmu_tx_t *tx) 1412{ 1413 dmu_recv_begin_arg_t *drba = arg; 1414 dsl_pool_t *dp = dmu_tx_pool(tx); 1415 struct drr_begin *drrb = drba->drba_cookie->drc_drrb; 1416 uint64_t fromguid = drrb->drr_fromguid; 1417 int flags = drrb->drr_flags; 1418 int error; 1419 uint64_t featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo); 1420 dsl_dataset_t *ds; 1421 const char *tofs = drba->drba_cookie->drc_tofs; 1422 1423 /* already checked */ 1424 ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC); 1425 ASSERT(!(featureflags & DMU_BACKUP_FEATURE_RESUMING)); 1426 1427 if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) == 1428 DMU_COMPOUNDSTREAM || 1429 drrb->drr_type >= DMU_OST_NUMTYPES || 1430 ((flags & DRR_FLAG_CLONE) && drba->drba_origin == NULL)) 1431 return (SET_ERROR(EINVAL)); 1432 1433 /* Verify pool version supports SA if SA_SPILL feature set */ 1434 if ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) && 1435 spa_version(dp->dp_spa) < SPA_VERSION_SA) 1436 return (SET_ERROR(ENOTSUP)); 1437 1438 if (drba->drba_cookie->drc_resumable && 1439 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EXTENSIBLE_DATASET)) 1440 return (SET_ERROR(ENOTSUP)); 1441 1442 /* 1443 * The receiving code doesn't know how to translate a WRITE_EMBEDDED 1444 * record to a plain WRITE record, so the pool must have the 1445 * EMBEDDED_DATA feature enabled if the stream has WRITE_EMBEDDED 1446 * records. Same with WRITE_EMBEDDED records that use LZ4 compression. 1447 */ 1448 if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) && 1449 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA)) 1450 return (SET_ERROR(ENOTSUP)); 1451 if ((featureflags & DMU_BACKUP_FEATURE_LZ4) && 1452 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS)) 1453 return (SET_ERROR(ENOTSUP)); 1454 1455 /* 1456 * The receiving code doesn't know how to translate large blocks 1457 * to smaller ones, so the pool must have the LARGE_BLOCKS 1458 * feature enabled if the stream has LARGE_BLOCKS. Same with 1459 * large dnodes. 1460 */ 1461 if ((featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) && 1462 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LARGE_BLOCKS)) 1463 return (SET_ERROR(ENOTSUP)); 1464 if ((featureflags & DMU_BACKUP_FEATURE_LARGE_DNODE) && 1465 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LARGE_DNODE)) 1466 return (SET_ERROR(ENOTSUP)); 1467 1468 error = dsl_dataset_hold(dp, tofs, FTAG, &ds); 1469 if (error == 0) { 1470 /* target fs already exists; recv into temp clone */ 1471 1472 /* Can't recv a clone into an existing fs */ 1473 if (flags & DRR_FLAG_CLONE || drba->drba_origin) { 1474 dsl_dataset_rele(ds, FTAG); 1475 return (SET_ERROR(EINVAL)); 1476 } 1477 1478 error = recv_begin_check_existing_impl(drba, ds, fromguid); 1479 dsl_dataset_rele(ds, FTAG); 1480 } else if (error == ENOENT) { 1481 /* target fs does not exist; must be a full backup or clone */ 1482 char buf[ZFS_MAX_DATASET_NAME_LEN]; 1483 objset_t *os; 1484 1485 /* 1486 * If it's a non-clone incremental, we are missing the 1487 * target fs, so fail the recv. 1488 */ 1489 if (fromguid != 0 && !(flags & DRR_FLAG_CLONE || 1490 drba->drba_origin)) 1491 return (SET_ERROR(ENOENT)); 1492 1493 /* 1494 * If we're receiving a full send as a clone, and it doesn't 1495 * contain all the necessary free records and freeobject 1496 * records, reject it. 1497 */ 1498 if (fromguid == 0 && drba->drba_origin && 1499 !(flags & DRR_FLAG_FREERECORDS)) 1500 return (SET_ERROR(EINVAL)); 1501 1502 /* Open the parent of tofs */ 1503 ASSERT3U(strlen(tofs), <, sizeof (buf)); 1504 (void) strlcpy(buf, tofs, strrchr(tofs, '/') - tofs + 1); 1505 error = dsl_dataset_hold(dp, buf, FTAG, &ds); 1506 if (error != 0) 1507 return (error); 1508 1509 /* 1510 * Check filesystem and snapshot limits before receiving. We'll 1511 * recheck snapshot limits again at the end (we create the 1512 * filesystems and increment those counts during begin_sync). 1513 */ 1514 error = dsl_fs_ss_limit_check(ds->ds_dir, 1, 1515 ZFS_PROP_FILESYSTEM_LIMIT, NULL, drba->drba_cred); 1516 if (error != 0) { 1517 dsl_dataset_rele(ds, FTAG); 1518 return (error); 1519 } 1520 1521 error = dsl_fs_ss_limit_check(ds->ds_dir, 1, 1522 ZFS_PROP_SNAPSHOT_LIMIT, NULL, drba->drba_cred); 1523 if (error != 0) { 1524 dsl_dataset_rele(ds, FTAG); 1525 return (error); 1526 } 1527 1528 /* can't recv below anything but filesystems (eg. no ZVOLs) */ 1529 error = dmu_objset_from_ds(ds, &os); 1530 if (error != 0) { 1531 dsl_dataset_rele(ds, FTAG); 1532 return (error); 1533 } 1534 if (dmu_objset_type(os) != DMU_OST_ZFS) { 1535 dsl_dataset_rele(ds, FTAG); 1536 return (SET_ERROR(ZFS_ERR_WRONG_PARENT)); 1537 } 1538 1539 if (drba->drba_origin != NULL) { 1540 dsl_dataset_t *origin; 1541 error = dsl_dataset_hold(dp, drba->drba_origin, 1542 FTAG, &origin); 1543 if (error != 0) { 1544 dsl_dataset_rele(ds, FTAG); 1545 return (error); 1546 } 1547 if (!origin->ds_is_snapshot) { 1548 dsl_dataset_rele(origin, FTAG); 1549 dsl_dataset_rele(ds, FTAG); 1550 return (SET_ERROR(EINVAL)); 1551 } 1552 if (dsl_dataset_phys(origin)->ds_guid != fromguid && 1553 fromguid != 0) { 1554 dsl_dataset_rele(origin, FTAG); 1555 dsl_dataset_rele(ds, FTAG); 1556 return (SET_ERROR(ENODEV)); 1557 } 1558 dsl_dataset_rele(origin, FTAG); 1559 } 1560 1561 dsl_dataset_rele(ds, FTAG); 1562 error = 0; 1563 } 1564 return (error); 1565} 1566 1567static void 1568dmu_recv_begin_sync(void *arg, dmu_tx_t *tx) 1569{ 1570 dmu_recv_begin_arg_t *drba = arg; 1571 dsl_pool_t *dp = dmu_tx_pool(tx); 1572 objset_t *mos = dp->dp_meta_objset; 1573 struct drr_begin *drrb = drba->drba_cookie->drc_drrb; 1574 const char *tofs = drba->drba_cookie->drc_tofs; 1575 dsl_dataset_t *ds, *newds; 1576 uint64_t dsobj; 1577 int error; 1578 uint64_t crflags = 0; 1579 1580 if (drrb->drr_flags & DRR_FLAG_CI_DATA) 1581 crflags |= DS_FLAG_CI_DATASET; 1582 1583 error = dsl_dataset_hold(dp, tofs, FTAG, &ds); 1584 if (error == 0) { 1585 /* create temporary clone */ 1586 dsl_dataset_t *snap = NULL; 1587 if (drba->drba_snapobj != 0) { 1588 VERIFY0(dsl_dataset_hold_obj(dp, 1589 drba->drba_snapobj, FTAG, &snap)); 1590 } 1591 dsobj = dsl_dataset_create_sync(ds->ds_dir, recv_clone_name, 1592 snap, crflags, drba->drba_cred, tx); 1593 if (drba->drba_snapobj != 0) 1594 dsl_dataset_rele(snap, FTAG); 1595 dsl_dataset_rele(ds, FTAG); 1596 } else { 1597 dsl_dir_t *dd; 1598 const char *tail; 1599 dsl_dataset_t *origin = NULL; 1600 1601 VERIFY0(dsl_dir_hold(dp, tofs, FTAG, &dd, &tail)); 1602 1603 if (drba->drba_origin != NULL) { 1604 VERIFY0(dsl_dataset_hold(dp, drba->drba_origin, 1605 FTAG, &origin)); 1606 } 1607 1608 /* Create new dataset. */ 1609 dsobj = dsl_dataset_create_sync(dd, 1610 strrchr(tofs, '/') + 1, 1611 origin, crflags, drba->drba_cred, tx); 1612 if (origin != NULL) 1613 dsl_dataset_rele(origin, FTAG); 1614 dsl_dir_rele(dd, FTAG); 1615 drba->drba_cookie->drc_newfs = B_TRUE; 1616 } 1617 VERIFY0(dsl_dataset_own_obj(dp, dsobj, dmu_recv_tag, &newds)); 1618 1619 if (drba->drba_cookie->drc_resumable) { 1620 dsl_dataset_zapify(newds, tx); 1621 if (drrb->drr_fromguid != 0) { 1622 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_FROMGUID, 1623 8, 1, &drrb->drr_fromguid, tx)); 1624 } 1625 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_TOGUID, 1626 8, 1, &drrb->drr_toguid, tx)); 1627 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_TONAME, 1628 1, strlen(drrb->drr_toname) + 1, drrb->drr_toname, tx)); 1629 uint64_t one = 1; 1630 uint64_t zero = 0; 1631 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_OBJECT, 1632 8, 1, &one, tx)); 1633 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_OFFSET, 1634 8, 1, &zero, tx)); 1635 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_BYTES, 1636 8, 1, &zero, tx)); 1637 if (DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) & 1638 DMU_BACKUP_FEATURE_LARGE_BLOCKS) { 1639 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_LARGEBLOCK, 1640 8, 1, &one, tx)); 1641 } 1642 if (DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) & 1643 DMU_BACKUP_FEATURE_EMBED_DATA) { 1644 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_EMBEDOK, 1645 8, 1, &one, tx)); 1646 } 1647 if (DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) & 1648 DMU_BACKUP_FEATURE_COMPRESSED) { 1649 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_COMPRESSOK, 1650 8, 1, &one, tx)); 1651 } 1652 } 1653 1654 dmu_buf_will_dirty(newds->ds_dbuf, tx); 1655 dsl_dataset_phys(newds)->ds_flags |= DS_FLAG_INCONSISTENT; 1656 1657 /* 1658 * If we actually created a non-clone, we need to create the 1659 * objset in our new dataset. 1660 */ 1661 rrw_enter(&newds->ds_bp_rwlock, RW_READER, FTAG); 1662 if (BP_IS_HOLE(dsl_dataset_get_blkptr(newds))) { 1663 (void) dmu_objset_create_impl(dp->dp_spa, 1664 newds, dsl_dataset_get_blkptr(newds), drrb->drr_type, tx); 1665 } 1666 rrw_exit(&newds->ds_bp_rwlock, FTAG); 1667 1668 drba->drba_cookie->drc_ds = newds; 1669 1670 spa_history_log_internal_ds(newds, "receive", tx, ""); 1671} 1672 1673static int 1674dmu_recv_resume_begin_check(void *arg, dmu_tx_t *tx) 1675{ 1676 dmu_recv_begin_arg_t *drba = arg; 1677 dsl_pool_t *dp = dmu_tx_pool(tx); 1678 struct drr_begin *drrb = drba->drba_cookie->drc_drrb; 1679 int error; 1680 uint64_t featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo); 1681 dsl_dataset_t *ds; 1682 const char *tofs = drba->drba_cookie->drc_tofs; 1683 1684 /* 6 extra bytes for /%recv */ 1685 char recvname[ZFS_MAX_DATASET_NAME_LEN + 6]; 1686 1687 /* already checked */ 1688 ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC); 1689 ASSERT(featureflags & DMU_BACKUP_FEATURE_RESUMING); 1690 1691 if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) == 1692 DMU_COMPOUNDSTREAM || 1693 drrb->drr_type >= DMU_OST_NUMTYPES) 1694 return (SET_ERROR(EINVAL)); 1695 1696 /* Verify pool version supports SA if SA_SPILL feature set */ 1697 if ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) && 1698 spa_version(dp->dp_spa) < SPA_VERSION_SA) 1699 return (SET_ERROR(ENOTSUP)); 1700 1701 /* 1702 * The receiving code doesn't know how to translate a WRITE_EMBEDDED 1703 * record to a plain WRITE record, so the pool must have the 1704 * EMBEDDED_DATA feature enabled if the stream has WRITE_EMBEDDED 1705 * records. Same with WRITE_EMBEDDED records that use LZ4 compression. 1706 */ 1707 if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) && 1708 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA)) 1709 return (SET_ERROR(ENOTSUP)); 1710 if ((featureflags & DMU_BACKUP_FEATURE_LZ4) && 1711 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS)) 1712 return (SET_ERROR(ENOTSUP)); 1713 1714 /* 1715 * The receiving code doesn't know how to translate large blocks 1716 * to smaller ones, so the pool must have the LARGE_BLOCKS 1717 * feature enabled if the stream has LARGE_BLOCKS. Same with 1718 * large dnodes. 1719 */ 1720 if ((featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) && 1721 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LARGE_BLOCKS)) 1722 return (SET_ERROR(ENOTSUP)); 1723 if ((featureflags & DMU_BACKUP_FEATURE_LARGE_DNODE) && 1724 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LARGE_DNODE)) 1725 return (SET_ERROR(ENOTSUP)); 1726 1727 (void) snprintf(recvname, sizeof (recvname), "%s/%s", 1728 tofs, recv_clone_name); 1729 1730 if (dsl_dataset_hold(dp, recvname, FTAG, &ds) != 0) { 1731 /* %recv does not exist; continue in tofs */ 1732 error = dsl_dataset_hold(dp, tofs, FTAG, &ds); 1733 if (error != 0) 1734 return (error); 1735 } 1736 1737 /* check that ds is marked inconsistent */ 1738 if (!DS_IS_INCONSISTENT(ds)) { 1739 dsl_dataset_rele(ds, FTAG); 1740 return (SET_ERROR(EINVAL)); 1741 } 1742 1743 /* check that there is resuming data, and that the toguid matches */ 1744 if (!dsl_dataset_is_zapified(ds)) { 1745 dsl_dataset_rele(ds, FTAG); 1746 return (SET_ERROR(EINVAL)); 1747 } 1748 uint64_t val; 1749 error = zap_lookup(dp->dp_meta_objset, ds->ds_object, 1750 DS_FIELD_RESUME_TOGUID, sizeof (val), 1, &val); 1751 if (error != 0 || drrb->drr_toguid != val) { 1752 dsl_dataset_rele(ds, FTAG); 1753 return (SET_ERROR(EINVAL)); 1754 } 1755 1756 /* 1757 * Check if the receive is still running. If so, it will be owned. 1758 * Note that nothing else can own the dataset (e.g. after the receive 1759 * fails) because it will be marked inconsistent. 1760 */ 1761 if (dsl_dataset_has_owner(ds)) { 1762 dsl_dataset_rele(ds, FTAG); 1763 return (SET_ERROR(EBUSY)); 1764 } 1765 1766 /* There should not be any snapshots of this fs yet. */ 1767 if (ds->ds_prev != NULL && ds->ds_prev->ds_dir == ds->ds_dir) { 1768 dsl_dataset_rele(ds, FTAG); 1769 return (SET_ERROR(EINVAL)); 1770 } 1771 1772 /* 1773 * Note: resume point will be checked when we process the first WRITE 1774 * record. 1775 */ 1776 1777 /* check that the origin matches */ 1778 val = 0; 1779 (void) zap_lookup(dp->dp_meta_objset, ds->ds_object, 1780 DS_FIELD_RESUME_FROMGUID, sizeof (val), 1, &val); 1781 if (drrb->drr_fromguid != val) { 1782 dsl_dataset_rele(ds, FTAG); 1783 return (SET_ERROR(EINVAL)); 1784 } 1785 1786 dsl_dataset_rele(ds, FTAG); 1787 return (0); 1788} 1789 1790static void 1791dmu_recv_resume_begin_sync(void *arg, dmu_tx_t *tx) 1792{ 1793 dmu_recv_begin_arg_t *drba = arg; 1794 dsl_pool_t *dp = dmu_tx_pool(tx); 1795 const char *tofs = drba->drba_cookie->drc_tofs; 1796 dsl_dataset_t *ds; 1797 uint64_t dsobj; 1798 /* 6 extra bytes for /%recv */ 1799 char recvname[ZFS_MAX_DATASET_NAME_LEN + 6]; 1800 1801 (void) snprintf(recvname, sizeof (recvname), "%s/%s", 1802 tofs, recv_clone_name); 1803 1804 if (dsl_dataset_hold(dp, recvname, FTAG, &ds) != 0) { 1805 /* %recv does not exist; continue in tofs */ 1806 VERIFY0(dsl_dataset_hold(dp, tofs, FTAG, &ds)); 1807 drba->drba_cookie->drc_newfs = B_TRUE; 1808 } 1809 1810 /* clear the inconsistent flag so that we can own it */ 1811 ASSERT(DS_IS_INCONSISTENT(ds)); 1812 dmu_buf_will_dirty(ds->ds_dbuf, tx); 1813 dsl_dataset_phys(ds)->ds_flags &= ~DS_FLAG_INCONSISTENT; 1814 dsobj = ds->ds_object; 1815 dsl_dataset_rele(ds, FTAG); 1816 1817 VERIFY0(dsl_dataset_own_obj(dp, dsobj, dmu_recv_tag, &ds)); 1818 1819 dmu_buf_will_dirty(ds->ds_dbuf, tx); 1820 dsl_dataset_phys(ds)->ds_flags |= DS_FLAG_INCONSISTENT; 1821 1822 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG); 1823 ASSERT(!BP_IS_HOLE(dsl_dataset_get_blkptr(ds))); 1824 rrw_exit(&ds->ds_bp_rwlock, FTAG); 1825 1826 drba->drba_cookie->drc_ds = ds; 1827 1828 spa_history_log_internal_ds(ds, "resume receive", tx, ""); 1829} 1830 1831/* 1832 * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin() 1833 * succeeds; otherwise we will leak the holds on the datasets. 1834 */ 1835int 1836dmu_recv_begin(char *tofs, char *tosnap, dmu_replay_record_t *drr_begin, 1837 boolean_t force, boolean_t resumable, char *origin, dmu_recv_cookie_t *drc) 1838{ 1839 dmu_recv_begin_arg_t drba = { 0 }; 1840 1841 bzero(drc, sizeof (dmu_recv_cookie_t)); 1842 drc->drc_drr_begin = drr_begin; 1843 drc->drc_drrb = &drr_begin->drr_u.drr_begin; 1844 drc->drc_tosnap = tosnap; 1845 drc->drc_tofs = tofs; 1846 drc->drc_force = force; 1847 drc->drc_resumable = resumable; 1848 drc->drc_cred = CRED(); 1849 drc->drc_clone = (origin != NULL); 1850 1851 if (drc->drc_drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC)) { 1852 drc->drc_byteswap = B_TRUE; 1853 (void) fletcher_4_incremental_byteswap(drr_begin, 1854 sizeof (dmu_replay_record_t), &drc->drc_cksum); 1855 byteswap_record(drr_begin); 1856 } else if (drc->drc_drrb->drr_magic == DMU_BACKUP_MAGIC) { 1857 (void) fletcher_4_incremental_native(drr_begin, 1858 sizeof (dmu_replay_record_t), &drc->drc_cksum); 1859 } else { 1860 return (SET_ERROR(EINVAL)); 1861 } 1862 1863 drba.drba_origin = origin; 1864 drba.drba_cookie = drc; 1865 drba.drba_cred = CRED(); 1866 1867 if (DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo) & 1868 DMU_BACKUP_FEATURE_RESUMING) { 1869 return (dsl_sync_task(tofs, 1870 dmu_recv_resume_begin_check, dmu_recv_resume_begin_sync, 1871 &drba, 5, ZFS_SPACE_CHECK_NORMAL)); 1872 } else { 1873 return (dsl_sync_task(tofs, 1874 dmu_recv_begin_check, dmu_recv_begin_sync, 1875 &drba, 5, ZFS_SPACE_CHECK_NORMAL)); 1876 } 1877} 1878 1879struct receive_record_arg { 1880 dmu_replay_record_t header; 1881 void *payload; /* Pointer to a buffer containing the payload */ 1882 /* 1883 * If the record is a write, pointer to the arc_buf_t containing the 1884 * payload. 1885 */ 1886 arc_buf_t *write_buf; 1887 int payload_size; 1888 uint64_t bytes_read; /* bytes read from stream when record created */ 1889 boolean_t eos_marker; /* Marks the end of the stream */ 1890 bqueue_node_t node; 1891}; 1892 1893struct receive_writer_arg { 1894 objset_t *os; 1895 boolean_t byteswap; 1896 bqueue_t q; 1897 1898 /* 1899 * These three args are used to signal to the main thread that we're 1900 * done. 1901 */ 1902 kmutex_t mutex; 1903 kcondvar_t cv; 1904 boolean_t done; 1905 1906 int err; 1907 /* A map from guid to dataset to help handle dedup'd streams. */ 1908 avl_tree_t *guid_to_ds_map; 1909 boolean_t resumable; 1910 uint64_t last_object; 1911 uint64_t last_offset; 1912 uint64_t max_object; /* highest object ID referenced in stream */ 1913 uint64_t bytes_read; /* bytes read when current record created */ 1914}; 1915 1916struct objlist { 1917 list_t list; /* List of struct receive_objnode. */ 1918 /* 1919 * Last object looked up. Used to assert that objects are being looked 1920 * up in ascending order. 1921 */ 1922 uint64_t last_lookup; 1923}; 1924 1925struct receive_objnode { 1926 list_node_t node; 1927 uint64_t object; 1928}; 1929 1930struct receive_arg { 1931 objset_t *os; 1932 kthread_t *td; 1933 struct file *fp; 1934 uint64_t voff; /* The current offset in the stream */ 1935 uint64_t bytes_read; 1936 /* 1937 * A record that has had its payload read in, but hasn't yet been handed 1938 * off to the worker thread. 1939 */ 1940 struct receive_record_arg *rrd; 1941 /* A record that has had its header read in, but not its payload. */ 1942 struct receive_record_arg *next_rrd; 1943 zio_cksum_t cksum; 1944 zio_cksum_t prev_cksum; 1945 int err; 1946 boolean_t byteswap; 1947 /* Sorted list of objects not to issue prefetches for. */ 1948 struct objlist ignore_objlist; 1949}; 1950 1951typedef struct guid_map_entry { 1952 uint64_t guid; 1953 dsl_dataset_t *gme_ds; 1954 avl_node_t avlnode; 1955} guid_map_entry_t; 1956 1957static int 1958guid_compare(const void *arg1, const void *arg2) 1959{ 1960 const guid_map_entry_t *gmep1 = (const guid_map_entry_t *)arg1; 1961 const guid_map_entry_t *gmep2 = (const guid_map_entry_t *)arg2; 1962 1963 return (AVL_CMP(gmep1->guid, gmep2->guid)); 1964} 1965 1966static void 1967free_guid_map_onexit(void *arg) 1968{ 1969 avl_tree_t *ca = arg; 1970 void *cookie = NULL; 1971 guid_map_entry_t *gmep; 1972 1973 while ((gmep = avl_destroy_nodes(ca, &cookie)) != NULL) { 1974 dsl_dataset_long_rele(gmep->gme_ds, gmep); 1975 dsl_dataset_rele(gmep->gme_ds, gmep); 1976 kmem_free(gmep, sizeof (guid_map_entry_t)); 1977 } 1978 avl_destroy(ca); 1979 kmem_free(ca, sizeof (avl_tree_t)); 1980} 1981 1982static int 1983restore_bytes(struct receive_arg *ra, void *buf, int len, off_t off, ssize_t *resid) 1984{ 1985 struct uio auio; 1986 struct iovec aiov; 1987 int error; 1988 1989 aiov.iov_base = buf; 1990 aiov.iov_len = len; 1991 auio.uio_iov = &aiov; 1992 auio.uio_iovcnt = 1; 1993 auio.uio_resid = len; 1994 auio.uio_segflg = UIO_SYSSPACE; 1995 auio.uio_rw = UIO_READ; 1996 auio.uio_offset = off; 1997 auio.uio_td = ra->td; 1998#ifdef _KERNEL 1999 error = fo_read(ra->fp, &auio, ra->td->td_ucred, FOF_OFFSET, ra->td); 2000#else 2001 fprintf(stderr, "%s: returning EOPNOTSUPP\n", __func__); 2002 error = EOPNOTSUPP; 2003#endif 2004 *resid = auio.uio_resid; 2005 return (error); 2006} 2007 2008static int 2009receive_read(struct receive_arg *ra, int len, void *buf) 2010{ 2011 int done = 0; 2012 2013 /* 2014 * The code doesn't rely on this (lengths being multiples of 8). See 2015 * comment in dump_bytes. 2016 */ 2017 ASSERT0(len % 8); 2018 2019 while (done < len) { 2020 ssize_t resid; 2021 2022 ra->err = restore_bytes(ra, buf + done, 2023 len - done, ra->voff, &resid); 2024 2025 if (resid == len - done) { 2026 /* 2027 * Note: ECKSUM indicates that the receive 2028 * was interrupted and can potentially be resumed. 2029 */ 2030 ra->err = SET_ERROR(ECKSUM); 2031 } 2032 ra->voff += len - done - resid; 2033 done = len - resid; 2034 if (ra->err != 0) 2035 return (ra->err); 2036 } 2037 2038 ra->bytes_read += len; 2039 2040 ASSERT3U(done, ==, len); 2041 return (0); 2042} 2043 2044noinline static void 2045byteswap_record(dmu_replay_record_t *drr) 2046{ 2047#define DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X)) 2048#define DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X)) 2049 drr->drr_type = BSWAP_32(drr->drr_type); 2050 drr->drr_payloadlen = BSWAP_32(drr->drr_payloadlen); 2051 2052 switch (drr->drr_type) { 2053 case DRR_BEGIN: 2054 DO64(drr_begin.drr_magic); 2055 DO64(drr_begin.drr_versioninfo); 2056 DO64(drr_begin.drr_creation_time); 2057 DO32(drr_begin.drr_type); 2058 DO32(drr_begin.drr_flags); 2059 DO64(drr_begin.drr_toguid); 2060 DO64(drr_begin.drr_fromguid); 2061 break; 2062 case DRR_OBJECT: 2063 DO64(drr_object.drr_object); 2064 DO32(drr_object.drr_type); 2065 DO32(drr_object.drr_bonustype); 2066 DO32(drr_object.drr_blksz); 2067 DO32(drr_object.drr_bonuslen); 2068 DO64(drr_object.drr_toguid); 2069 break; 2070 case DRR_FREEOBJECTS: 2071 DO64(drr_freeobjects.drr_firstobj); 2072 DO64(drr_freeobjects.drr_numobjs); 2073 DO64(drr_freeobjects.drr_toguid); 2074 break; 2075 case DRR_WRITE: 2076 DO64(drr_write.drr_object); 2077 DO32(drr_write.drr_type); 2078 DO64(drr_write.drr_offset); 2079 DO64(drr_write.drr_logical_size); 2080 DO64(drr_write.drr_toguid); 2081 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_write.drr_key.ddk_cksum); 2082 DO64(drr_write.drr_key.ddk_prop); 2083 DO64(drr_write.drr_compressed_size); 2084 break; 2085 case DRR_WRITE_BYREF: 2086 DO64(drr_write_byref.drr_object); 2087 DO64(drr_write_byref.drr_offset); 2088 DO64(drr_write_byref.drr_length); 2089 DO64(drr_write_byref.drr_toguid); 2090 DO64(drr_write_byref.drr_refguid); 2091 DO64(drr_write_byref.drr_refobject); 2092 DO64(drr_write_byref.drr_refoffset); 2093 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_write_byref. 2094 drr_key.ddk_cksum); 2095 DO64(drr_write_byref.drr_key.ddk_prop); 2096 break; 2097 case DRR_WRITE_EMBEDDED: 2098 DO64(drr_write_embedded.drr_object); 2099 DO64(drr_write_embedded.drr_offset); 2100 DO64(drr_write_embedded.drr_length); 2101 DO64(drr_write_embedded.drr_toguid); 2102 DO32(drr_write_embedded.drr_lsize); 2103 DO32(drr_write_embedded.drr_psize); 2104 break; 2105 case DRR_FREE: 2106 DO64(drr_free.drr_object); 2107 DO64(drr_free.drr_offset); 2108 DO64(drr_free.drr_length); 2109 DO64(drr_free.drr_toguid); 2110 break; 2111 case DRR_SPILL: 2112 DO64(drr_spill.drr_object); 2113 DO64(drr_spill.drr_length); 2114 DO64(drr_spill.drr_toguid); 2115 break; 2116 case DRR_END: 2117 DO64(drr_end.drr_toguid); 2118 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_end.drr_checksum); 2119 break; 2120 } 2121 2122 if (drr->drr_type != DRR_BEGIN) { 2123 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_checksum.drr_checksum); 2124 } 2125 2126#undef DO64 2127#undef DO32 2128} 2129 2130static inline uint8_t 2131deduce_nblkptr(dmu_object_type_t bonus_type, uint64_t bonus_size) 2132{ 2133 if (bonus_type == DMU_OT_SA) { 2134 return (1); 2135 } else { 2136 return (1 + 2137 ((DN_OLD_MAX_BONUSLEN - 2138 MIN(DN_OLD_MAX_BONUSLEN, bonus_size)) >> SPA_BLKPTRSHIFT)); 2139 } 2140} 2141 2142static void 2143save_resume_state(struct receive_writer_arg *rwa, 2144 uint64_t object, uint64_t offset, dmu_tx_t *tx) 2145{ 2146 int txgoff = dmu_tx_get_txg(tx) & TXG_MASK; 2147 2148 if (!rwa->resumable) 2149 return; 2150 2151 /* 2152 * We use ds_resume_bytes[] != 0 to indicate that we need to 2153 * update this on disk, so it must not be 0. 2154 */ 2155 ASSERT(rwa->bytes_read != 0); 2156 2157 /* 2158 * We only resume from write records, which have a valid 2159 * (non-meta-dnode) object number. 2160 */ 2161 ASSERT(object != 0); 2162 2163 /* 2164 * For resuming to work correctly, we must receive records in order, 2165 * sorted by object,offset. This is checked by the callers, but 2166 * assert it here for good measure. 2167 */ 2168 ASSERT3U(object, >=, rwa->os->os_dsl_dataset->ds_resume_object[txgoff]); 2169 ASSERT(object != rwa->os->os_dsl_dataset->ds_resume_object[txgoff] || 2170 offset >= rwa->os->os_dsl_dataset->ds_resume_offset[txgoff]); 2171 ASSERT3U(rwa->bytes_read, >=, 2172 rwa->os->os_dsl_dataset->ds_resume_bytes[txgoff]); 2173 2174 rwa->os->os_dsl_dataset->ds_resume_object[txgoff] = object; 2175 rwa->os->os_dsl_dataset->ds_resume_offset[txgoff] = offset; 2176 rwa->os->os_dsl_dataset->ds_resume_bytes[txgoff] = rwa->bytes_read; 2177} 2178 2179noinline static int 2180receive_object(struct receive_writer_arg *rwa, struct drr_object *drro, 2181 void *data) 2182{ 2183 dmu_object_info_t doi; 2184 dmu_tx_t *tx; 2185 uint64_t object; 2186 int err; 2187 uint8_t dn_slots = drro->drr_dn_slots != 0 ? 2188 drro->drr_dn_slots : DNODE_MIN_SLOTS; 2189 2190 if (drro->drr_type == DMU_OT_NONE || 2191 !DMU_OT_IS_VALID(drro->drr_type) || 2192 !DMU_OT_IS_VALID(drro->drr_bonustype) || 2193 drro->drr_checksumtype >= ZIO_CHECKSUM_FUNCTIONS || 2194 drro->drr_compress >= ZIO_COMPRESS_FUNCTIONS || 2195 P2PHASE(drro->drr_blksz, SPA_MINBLOCKSIZE) || 2196 drro->drr_blksz < SPA_MINBLOCKSIZE || 2197 drro->drr_blksz > spa_maxblocksize(dmu_objset_spa(rwa->os)) || 2198 drro->drr_bonuslen > 2199 DN_BONUS_SIZE(spa_maxdnodesize(dmu_objset_spa(rwa->os))) || 2200 dn_slots > 2201 (spa_maxdnodesize(dmu_objset_spa(rwa->os)) >> DNODE_SHIFT)) { 2202 return (SET_ERROR(EINVAL)); 2203 } 2204 2205 err = dmu_object_info(rwa->os, drro->drr_object, &doi); 2206 2207 if (err != 0 && err != ENOENT && err != EEXIST) 2208 return (SET_ERROR(EINVAL)); 2209 2210 if (drro->drr_object > rwa->max_object) 2211 rwa->max_object = drro->drr_object; 2212 2213 /* 2214 * If we are losing blkptrs or changing the block size this must 2215 * be a new file instance. We must clear out the previous file 2216 * contents before we can change this type of metadata in the dnode. 2217 */ 2218 if (err == 0) { 2219 int nblkptr; 2220 2221 object = drro->drr_object; 2222 2223 nblkptr = deduce_nblkptr(drro->drr_bonustype, 2224 drro->drr_bonuslen); 2225 2226 if (drro->drr_blksz != doi.doi_data_block_size || 2227 nblkptr < doi.doi_nblkptr || 2228 dn_slots != doi.doi_dnodesize >> DNODE_SHIFT) { 2229 err = dmu_free_long_range(rwa->os, drro->drr_object, 2230 0, DMU_OBJECT_END); 2231 if (err != 0) 2232 return (SET_ERROR(EINVAL)); 2233 } 2234 } else if (err == EEXIST) { 2235 /* 2236 * The object requested is currently an interior slot of a 2237 * multi-slot dnode. This will be resolved when the next txg 2238 * is synced out, since the send stream will have told us 2239 * to free this slot when we freed the associated dnode 2240 * earlier in the stream. 2241 */ 2242 txg_wait_synced(dmu_objset_pool(rwa->os), 0); 2243 object = drro->drr_object; 2244 } else { 2245 /* object is free and we are about to allocate a new one */ 2246 object = DMU_NEW_OBJECT; 2247 } 2248 2249 /* 2250 * If this is a multi-slot dnode there is a chance that this 2251 * object will expand into a slot that is already used by 2252 * another object from the previous snapshot. We must free 2253 * these objects before we attempt to allocate the new dnode. 2254 */ 2255 if (dn_slots > 1) { 2256 boolean_t need_sync = B_FALSE; 2257 2258 for (uint64_t slot = drro->drr_object + 1; 2259 slot < drro->drr_object + dn_slots; 2260 slot++) { 2261 dmu_object_info_t slot_doi; 2262 2263 err = dmu_object_info(rwa->os, slot, &slot_doi); 2264 if (err == ENOENT || err == EEXIST) 2265 continue; 2266 else if (err != 0) 2267 return (err); 2268 2269 err = dmu_free_long_object(rwa->os, slot); 2270 2271 if (err != 0) 2272 return (err); 2273 2274 need_sync = B_TRUE; 2275 } 2276 2277 if (need_sync) 2278 txg_wait_synced(dmu_objset_pool(rwa->os), 0); 2279 } 2280 2281 tx = dmu_tx_create(rwa->os); 2282 dmu_tx_hold_bonus(tx, object); 2283 err = dmu_tx_assign(tx, TXG_WAIT); 2284 if (err != 0) { 2285 dmu_tx_abort(tx); 2286 return (err); 2287 } 2288 2289 if (object == DMU_NEW_OBJECT) { 2290 /* currently free, want to be allocated */ 2291 err = dmu_object_claim_dnsize(rwa->os, drro->drr_object, 2292 drro->drr_type, drro->drr_blksz, 2293 drro->drr_bonustype, drro->drr_bonuslen, 2294 dn_slots << DNODE_SHIFT, tx); 2295 } else if (drro->drr_type != doi.doi_type || 2296 drro->drr_blksz != doi.doi_data_block_size || 2297 drro->drr_bonustype != doi.doi_bonus_type || 2298 drro->drr_bonuslen != doi.doi_bonus_size || 2299 drro->drr_dn_slots != (doi.doi_dnodesize >> DNODE_SHIFT)) { 2300 /* currently allocated, but with different properties */ 2301 err = dmu_object_reclaim_dnsize(rwa->os, drro->drr_object, 2302 drro->drr_type, drro->drr_blksz, 2303 drro->drr_bonustype, drro->drr_bonuslen, 2304 drro->drr_dn_slots << DNODE_SHIFT, tx); 2305 } 2306 if (err != 0) { 2307 dmu_tx_commit(tx); 2308 return (SET_ERROR(EINVAL)); 2309 } 2310 2311 dmu_object_set_checksum(rwa->os, drro->drr_object, 2312 drro->drr_checksumtype, tx); 2313 dmu_object_set_compress(rwa->os, drro->drr_object, 2314 drro->drr_compress, tx); 2315 2316 if (data != NULL) { 2317 dmu_buf_t *db; 2318 2319 VERIFY0(dmu_bonus_hold(rwa->os, drro->drr_object, FTAG, &db)); 2320 dmu_buf_will_dirty(db, tx); 2321 2322 ASSERT3U(db->db_size, >=, drro->drr_bonuslen); 2323 bcopy(data, db->db_data, drro->drr_bonuslen); 2324 if (rwa->byteswap) { 2325 dmu_object_byteswap_t byteswap = 2326 DMU_OT_BYTESWAP(drro->drr_bonustype); 2327 dmu_ot_byteswap[byteswap].ob_func(db->db_data, 2328 drro->drr_bonuslen); 2329 } 2330 dmu_buf_rele(db, FTAG); 2331 } 2332 dmu_tx_commit(tx); 2333 2334 return (0); 2335} 2336 2337/* ARGSUSED */ 2338noinline static int 2339receive_freeobjects(struct receive_writer_arg *rwa, 2340 struct drr_freeobjects *drrfo) 2341{ 2342 uint64_t obj; 2343 int next_err = 0; 2344 2345 if (drrfo->drr_firstobj + drrfo->drr_numobjs < drrfo->drr_firstobj) 2346 return (SET_ERROR(EINVAL)); 2347 2348 for (obj = drrfo->drr_firstobj == 0 ? 1 : drrfo->drr_firstobj; 2349 obj < drrfo->drr_firstobj + drrfo->drr_numobjs && next_err == 0; 2350 next_err = dmu_object_next(rwa->os, &obj, FALSE, 0)) { 2351 dmu_object_info_t doi; 2352 int err; 2353 2354 err = dmu_object_info(rwa->os, obj, NULL); 2355 if (err == ENOENT) 2356 continue; 2357 else if (err != 0) 2358 return (err); 2359 2360 err = dmu_free_long_object(rwa->os, obj); 2361 if (err != 0) 2362 return (err); 2363 2364 if (obj > rwa->max_object) 2365 rwa->max_object = obj; 2366 } 2367 if (next_err != ESRCH) 2368 return (next_err); 2369 return (0); 2370} 2371 2372noinline static int 2373receive_write(struct receive_writer_arg *rwa, struct drr_write *drrw, 2374 arc_buf_t *abuf) 2375{ 2376 dmu_tx_t *tx; 2377 int err; 2378 2379 if (drrw->drr_offset + drrw->drr_logical_size < drrw->drr_offset || 2380 !DMU_OT_IS_VALID(drrw->drr_type)) 2381 return (SET_ERROR(EINVAL)); 2382 2383 /* 2384 * For resuming to work, records must be in increasing order 2385 * by (object, offset). 2386 */ 2387 if (drrw->drr_object < rwa->last_object || 2388 (drrw->drr_object == rwa->last_object && 2389 drrw->drr_offset < rwa->last_offset)) { 2390 return (SET_ERROR(EINVAL)); 2391 } 2392 rwa->last_object = drrw->drr_object; 2393 rwa->last_offset = drrw->drr_offset; 2394 2395 if (rwa->last_object > rwa->max_object) 2396 rwa->max_object = rwa->last_object; 2397 2398 if (dmu_object_info(rwa->os, drrw->drr_object, NULL) != 0) 2399 return (SET_ERROR(EINVAL)); 2400 2401 tx = dmu_tx_create(rwa->os); 2402 dmu_tx_hold_write(tx, drrw->drr_object, 2403 drrw->drr_offset, drrw->drr_logical_size); 2404 err = dmu_tx_assign(tx, TXG_WAIT); 2405 if (err != 0) { 2406 dmu_tx_abort(tx); 2407 return (err); 2408 } 2409 if (rwa->byteswap) { 2410 dmu_object_byteswap_t byteswap = 2411 DMU_OT_BYTESWAP(drrw->drr_type); 2412 dmu_ot_byteswap[byteswap].ob_func(abuf->b_data, 2413 DRR_WRITE_PAYLOAD_SIZE(drrw)); 2414 } 2415 2416 /* use the bonus buf to look up the dnode in dmu_assign_arcbuf */ 2417 dmu_buf_t *bonus; 2418 if (dmu_bonus_hold(rwa->os, drrw->drr_object, FTAG, &bonus) != 0) 2419 return (SET_ERROR(EINVAL)); 2420 dmu_assign_arcbuf(bonus, drrw->drr_offset, abuf, tx); 2421 2422 /* 2423 * Note: If the receive fails, we want the resume stream to start 2424 * with the same record that we last successfully received (as opposed 2425 * to the next record), so that we can verify that we are 2426 * resuming from the correct location. 2427 */ 2428 save_resume_state(rwa, drrw->drr_object, drrw->drr_offset, tx); 2429 dmu_tx_commit(tx); 2430 dmu_buf_rele(bonus, FTAG); 2431 2432 return (0); 2433} 2434 2435/* 2436 * Handle a DRR_WRITE_BYREF record. This record is used in dedup'ed 2437 * streams to refer to a copy of the data that is already on the 2438 * system because it came in earlier in the stream. This function 2439 * finds the earlier copy of the data, and uses that copy instead of 2440 * data from the stream to fulfill this write. 2441 */ 2442static int 2443receive_write_byref(struct receive_writer_arg *rwa, 2444 struct drr_write_byref *drrwbr) 2445{ 2446 dmu_tx_t *tx; 2447 int err; 2448 guid_map_entry_t gmesrch; 2449 guid_map_entry_t *gmep; 2450 avl_index_t where; 2451 objset_t *ref_os = NULL; 2452 dmu_buf_t *dbp; 2453 2454 if (drrwbr->drr_offset + drrwbr->drr_length < drrwbr->drr_offset) 2455 return (SET_ERROR(EINVAL)); 2456 2457 /* 2458 * If the GUID of the referenced dataset is different from the 2459 * GUID of the target dataset, find the referenced dataset. 2460 */ 2461 if (drrwbr->drr_toguid != drrwbr->drr_refguid) { 2462 gmesrch.guid = drrwbr->drr_refguid; 2463 if ((gmep = avl_find(rwa->guid_to_ds_map, &gmesrch, 2464 &where)) == NULL) { 2465 return (SET_ERROR(EINVAL)); 2466 } 2467 if (dmu_objset_from_ds(gmep->gme_ds, &ref_os)) 2468 return (SET_ERROR(EINVAL)); 2469 } else { 2470 ref_os = rwa->os; 2471 } 2472 2473 if (drrwbr->drr_object > rwa->max_object) 2474 rwa->max_object = drrwbr->drr_object; 2475 2476 err = dmu_buf_hold(ref_os, drrwbr->drr_refobject, 2477 drrwbr->drr_refoffset, FTAG, &dbp, DMU_READ_PREFETCH); 2478 if (err != 0) 2479 return (err); 2480 2481 tx = dmu_tx_create(rwa->os); 2482 2483 dmu_tx_hold_write(tx, drrwbr->drr_object, 2484 drrwbr->drr_offset, drrwbr->drr_length); 2485 err = dmu_tx_assign(tx, TXG_WAIT); 2486 if (err != 0) { 2487 dmu_tx_abort(tx); 2488 return (err); 2489 } 2490 dmu_write(rwa->os, drrwbr->drr_object, 2491 drrwbr->drr_offset, drrwbr->drr_length, dbp->db_data, tx); 2492 dmu_buf_rele(dbp, FTAG); 2493 2494 /* See comment in restore_write. */ 2495 save_resume_state(rwa, drrwbr->drr_object, drrwbr->drr_offset, tx); 2496 dmu_tx_commit(tx); 2497 return (0); 2498} 2499 2500static int 2501receive_write_embedded(struct receive_writer_arg *rwa, 2502 struct drr_write_embedded *drrwe, void *data) 2503{ 2504 dmu_tx_t *tx; 2505 int err; 2506 2507 if (drrwe->drr_offset + drrwe->drr_length < drrwe->drr_offset) 2508 return (EINVAL); 2509 2510 if (drrwe->drr_psize > BPE_PAYLOAD_SIZE) 2511 return (EINVAL); 2512 2513 if (drrwe->drr_etype >= NUM_BP_EMBEDDED_TYPES) 2514 return (EINVAL); 2515 if (drrwe->drr_compression >= ZIO_COMPRESS_FUNCTIONS) 2516 return (EINVAL); 2517 2518 if (drrwe->drr_object > rwa->max_object) 2519 rwa->max_object = drrwe->drr_object; 2520 2521 tx = dmu_tx_create(rwa->os); 2522 2523 dmu_tx_hold_write(tx, drrwe->drr_object, 2524 drrwe->drr_offset, drrwe->drr_length); 2525 err = dmu_tx_assign(tx, TXG_WAIT); 2526 if (err != 0) { 2527 dmu_tx_abort(tx); 2528 return (err); 2529 } 2530 2531 dmu_write_embedded(rwa->os, drrwe->drr_object, 2532 drrwe->drr_offset, data, drrwe->drr_etype, 2533 drrwe->drr_compression, drrwe->drr_lsize, drrwe->drr_psize, 2534 rwa->byteswap ^ ZFS_HOST_BYTEORDER, tx); 2535 2536 /* See comment in restore_write. */ 2537 save_resume_state(rwa, drrwe->drr_object, drrwe->drr_offset, tx); 2538 dmu_tx_commit(tx); 2539 return (0); 2540} 2541 2542static int 2543receive_spill(struct receive_writer_arg *rwa, struct drr_spill *drrs, 2544 void *data) 2545{ 2546 dmu_tx_t *tx; 2547 dmu_buf_t *db, *db_spill; 2548 int err; 2549 2550 if (drrs->drr_length < SPA_MINBLOCKSIZE || 2551 drrs->drr_length > spa_maxblocksize(dmu_objset_spa(rwa->os))) 2552 return (SET_ERROR(EINVAL)); 2553 2554 if (dmu_object_info(rwa->os, drrs->drr_object, NULL) != 0) 2555 return (SET_ERROR(EINVAL)); 2556 2557 if (drrs->drr_object > rwa->max_object) 2558 rwa->max_object = drrs->drr_object; 2559 2560 VERIFY0(dmu_bonus_hold(rwa->os, drrs->drr_object, FTAG, &db)); 2561 if ((err = dmu_spill_hold_by_bonus(db, FTAG, &db_spill)) != 0) { 2562 dmu_buf_rele(db, FTAG); 2563 return (err); 2564 } 2565 2566 tx = dmu_tx_create(rwa->os); 2567 2568 dmu_tx_hold_spill(tx, db->db_object); 2569 2570 err = dmu_tx_assign(tx, TXG_WAIT); 2571 if (err != 0) { 2572 dmu_buf_rele(db, FTAG); 2573 dmu_buf_rele(db_spill, FTAG); 2574 dmu_tx_abort(tx); 2575 return (err); 2576 } 2577 dmu_buf_will_dirty(db_spill, tx); 2578 2579 if (db_spill->db_size < drrs->drr_length) 2580 VERIFY(0 == dbuf_spill_set_blksz(db_spill, 2581 drrs->drr_length, tx)); 2582 bcopy(data, db_spill->db_data, drrs->drr_length); 2583 2584 dmu_buf_rele(db, FTAG); 2585 dmu_buf_rele(db_spill, FTAG); 2586 2587 dmu_tx_commit(tx); 2588 return (0); 2589} 2590 2591/* ARGSUSED */ 2592noinline static int 2593receive_free(struct receive_writer_arg *rwa, struct drr_free *drrf) 2594{ 2595 int err; 2596 2597 if (drrf->drr_length != -1ULL && 2598 drrf->drr_offset + drrf->drr_length < drrf->drr_offset) 2599 return (SET_ERROR(EINVAL)); 2600 2601 if (dmu_object_info(rwa->os, drrf->drr_object, NULL) != 0) 2602 return (SET_ERROR(EINVAL)); 2603 2604 if (drrf->drr_object > rwa->max_object) 2605 rwa->max_object = drrf->drr_object; 2606 2607 err = dmu_free_long_range(rwa->os, drrf->drr_object, 2608 drrf->drr_offset, drrf->drr_length); 2609 2610 return (err); 2611} 2612 2613/* used to destroy the drc_ds on error */ 2614static void 2615dmu_recv_cleanup_ds(dmu_recv_cookie_t *drc) 2616{ 2617 if (drc->drc_resumable) { 2618 /* wait for our resume state to be written to disk */ 2619 txg_wait_synced(drc->drc_ds->ds_dir->dd_pool, 0); 2620 dsl_dataset_disown(drc->drc_ds, dmu_recv_tag); 2621 } else { 2622 char name[ZFS_MAX_DATASET_NAME_LEN]; 2623 dsl_dataset_name(drc->drc_ds, name); 2624 dsl_dataset_disown(drc->drc_ds, dmu_recv_tag); 2625 (void) dsl_destroy_head(name); 2626 } 2627} 2628 2629static void 2630receive_cksum(struct receive_arg *ra, int len, void *buf) 2631{ 2632 if (ra->byteswap) { 2633 (void) fletcher_4_incremental_byteswap(buf, len, &ra->cksum); 2634 } else { 2635 (void) fletcher_4_incremental_native(buf, len, &ra->cksum); 2636 } 2637} 2638 2639/* 2640 * Read the payload into a buffer of size len, and update the current record's 2641 * payload field. 2642 * Allocate ra->next_rrd and read the next record's header into 2643 * ra->next_rrd->header. 2644 * Verify checksum of payload and next record. 2645 */ 2646static int 2647receive_read_payload_and_next_header(struct receive_arg *ra, int len, void *buf) 2648{ 2649 int err; 2650 2651 if (len != 0) { 2652 ASSERT3U(len, <=, SPA_MAXBLOCKSIZE); 2653 err = receive_read(ra, len, buf); 2654 if (err != 0) 2655 return (err); 2656 receive_cksum(ra, len, buf); 2657 2658 /* note: rrd is NULL when reading the begin record's payload */ 2659 if (ra->rrd != NULL) { 2660 ra->rrd->payload = buf; 2661 ra->rrd->payload_size = len; 2662 ra->rrd->bytes_read = ra->bytes_read; 2663 } 2664 } 2665 2666 ra->prev_cksum = ra->cksum; 2667 2668 ra->next_rrd = kmem_zalloc(sizeof (*ra->next_rrd), KM_SLEEP); 2669 err = receive_read(ra, sizeof (ra->next_rrd->header), 2670 &ra->next_rrd->header); 2671 ra->next_rrd->bytes_read = ra->bytes_read; 2672 if (err != 0) { 2673 kmem_free(ra->next_rrd, sizeof (*ra->next_rrd)); 2674 ra->next_rrd = NULL; 2675 return (err); 2676 } 2677 if (ra->next_rrd->header.drr_type == DRR_BEGIN) { 2678 kmem_free(ra->next_rrd, sizeof (*ra->next_rrd)); 2679 ra->next_rrd = NULL; 2680 return (SET_ERROR(EINVAL)); 2681 } 2682 2683 /* 2684 * Note: checksum is of everything up to but not including the 2685 * checksum itself. 2686 */ 2687 ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum), 2688 ==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t)); 2689 receive_cksum(ra, 2690 offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum), 2691 &ra->next_rrd->header); 2692 2693 zio_cksum_t cksum_orig = 2694 ra->next_rrd->header.drr_u.drr_checksum.drr_checksum; 2695 zio_cksum_t *cksump = 2696 &ra->next_rrd->header.drr_u.drr_checksum.drr_checksum; 2697 2698 if (ra->byteswap) 2699 byteswap_record(&ra->next_rrd->header); 2700 2701 if ((!ZIO_CHECKSUM_IS_ZERO(cksump)) && 2702 !ZIO_CHECKSUM_EQUAL(ra->cksum, *cksump)) { 2703 kmem_free(ra->next_rrd, sizeof (*ra->next_rrd)); 2704 ra->next_rrd = NULL; 2705 return (SET_ERROR(ECKSUM)); 2706 } 2707 2708 receive_cksum(ra, sizeof (cksum_orig), &cksum_orig); 2709 2710 return (0); 2711} 2712 2713static void 2714objlist_create(struct objlist *list) 2715{ 2716 list_create(&list->list, sizeof (struct receive_objnode), 2717 offsetof(struct receive_objnode, node)); 2718 list->last_lookup = 0; 2719} 2720 2721static void 2722objlist_destroy(struct objlist *list) 2723{ 2724 for (struct receive_objnode *n = list_remove_head(&list->list); 2725 n != NULL; n = list_remove_head(&list->list)) { 2726 kmem_free(n, sizeof (*n)); 2727 } 2728 list_destroy(&list->list); 2729} 2730 2731/* 2732 * This function looks through the objlist to see if the specified object number 2733 * is contained in the objlist. In the process, it will remove all object 2734 * numbers in the list that are smaller than the specified object number. Thus, 2735 * any lookup of an object number smaller than a previously looked up object 2736 * number will always return false; therefore, all lookups should be done in 2737 * ascending order. 2738 */ 2739static boolean_t 2740objlist_exists(struct objlist *list, uint64_t object) 2741{ 2742 struct receive_objnode *node = list_head(&list->list); 2743 ASSERT3U(object, >=, list->last_lookup); 2744 list->last_lookup = object; 2745 while (node != NULL && node->object < object) { 2746 VERIFY3P(node, ==, list_remove_head(&list->list)); 2747 kmem_free(node, sizeof (*node)); 2748 node = list_head(&list->list); 2749 } 2750 return (node != NULL && node->object == object); 2751} 2752 2753/* 2754 * The objlist is a list of object numbers stored in ascending order. However, 2755 * the insertion of new object numbers does not seek out the correct location to 2756 * store a new object number; instead, it appends it to the list for simplicity. 2757 * Thus, any users must take care to only insert new object numbers in ascending 2758 * order. 2759 */ 2760static void 2761objlist_insert(struct objlist *list, uint64_t object) 2762{ 2763 struct receive_objnode *node = kmem_zalloc(sizeof (*node), KM_SLEEP); 2764 node->object = object; 2765#ifdef ZFS_DEBUG 2766 struct receive_objnode *last_object = list_tail(&list->list); 2767 uint64_t last_objnum = (last_object != NULL ? last_object->object : 0); 2768 ASSERT3U(node->object, >, last_objnum); 2769#endif 2770 list_insert_tail(&list->list, node); 2771} 2772 2773/* 2774 * Issue the prefetch reads for any necessary indirect blocks. 2775 * 2776 * We use the object ignore list to tell us whether or not to issue prefetches 2777 * for a given object. We do this for both correctness (in case the blocksize 2778 * of an object has changed) and performance (if the object doesn't exist, don't 2779 * needlessly try to issue prefetches). We also trim the list as we go through 2780 * the stream to prevent it from growing to an unbounded size. 2781 * 2782 * The object numbers within will always be in sorted order, and any write 2783 * records we see will also be in sorted order, but they're not sorted with 2784 * respect to each other (i.e. we can get several object records before 2785 * receiving each object's write records). As a result, once we've reached a 2786 * given object number, we can safely remove any reference to lower object 2787 * numbers in the ignore list. In practice, we receive up to 32 object records 2788 * before receiving write records, so the list can have up to 32 nodes in it. 2789 */ 2790/* ARGSUSED */ 2791static void 2792receive_read_prefetch(struct receive_arg *ra, 2793 uint64_t object, uint64_t offset, uint64_t length) 2794{ 2795 if (!objlist_exists(&ra->ignore_objlist, object)) { 2796 dmu_prefetch(ra->os, object, 1, offset, length, 2797 ZIO_PRIORITY_SYNC_READ); 2798 } 2799} 2800 2801/* 2802 * Read records off the stream, issuing any necessary prefetches. 2803 */ 2804static int 2805receive_read_record(struct receive_arg *ra) 2806{ 2807 int err; 2808 2809 switch (ra->rrd->header.drr_type) { 2810 case DRR_OBJECT: 2811 { 2812 struct drr_object *drro = &ra->rrd->header.drr_u.drr_object; 2813 uint32_t size = P2ROUNDUP(drro->drr_bonuslen, 8); 2814 void *buf = kmem_zalloc(size, KM_SLEEP); 2815 dmu_object_info_t doi; 2816 err = receive_read_payload_and_next_header(ra, size, buf); 2817 if (err != 0) { 2818 kmem_free(buf, size); 2819 return (err); 2820 } 2821 err = dmu_object_info(ra->os, drro->drr_object, &doi); 2822 /* 2823 * See receive_read_prefetch for an explanation why we're 2824 * storing this object in the ignore_obj_list. 2825 */ 2826 if (err == ENOENT || 2827 (err == 0 && doi.doi_data_block_size != drro->drr_blksz)) { 2828 objlist_insert(&ra->ignore_objlist, drro->drr_object); 2829 err = 0; 2830 } 2831 return (err); 2832 } 2833 case DRR_FREEOBJECTS: 2834 { 2835 err = receive_read_payload_and_next_header(ra, 0, NULL); 2836 return (err); 2837 } 2838 case DRR_WRITE: 2839 { 2840 struct drr_write *drrw = &ra->rrd->header.drr_u.drr_write; 2841 arc_buf_t *abuf; 2842 boolean_t is_meta = DMU_OT_IS_METADATA(drrw->drr_type); 2843 if (DRR_WRITE_COMPRESSED(drrw)) { 2844 ASSERT3U(drrw->drr_compressed_size, >, 0); 2845 ASSERT3U(drrw->drr_logical_size, >=, 2846 drrw->drr_compressed_size); 2847 ASSERT(!is_meta); 2848 abuf = arc_loan_compressed_buf( 2849 dmu_objset_spa(ra->os), 2850 drrw->drr_compressed_size, drrw->drr_logical_size, 2851 drrw->drr_compressiontype); 2852 } else { 2853 abuf = arc_loan_buf(dmu_objset_spa(ra->os), 2854 is_meta, drrw->drr_logical_size); 2855 } 2856 2857 err = receive_read_payload_and_next_header(ra, 2858 DRR_WRITE_PAYLOAD_SIZE(drrw), abuf->b_data); 2859 if (err != 0) { 2860 dmu_return_arcbuf(abuf); 2861 return (err); 2862 } 2863 ra->rrd->write_buf = abuf; 2864 receive_read_prefetch(ra, drrw->drr_object, drrw->drr_offset, 2865 drrw->drr_logical_size); 2866 return (err); 2867 } 2868 case DRR_WRITE_BYREF: 2869 { 2870 struct drr_write_byref *drrwb = 2871 &ra->rrd->header.drr_u.drr_write_byref; 2872 err = receive_read_payload_and_next_header(ra, 0, NULL); 2873 receive_read_prefetch(ra, drrwb->drr_object, drrwb->drr_offset, 2874 drrwb->drr_length); 2875 return (err); 2876 } 2877 case DRR_WRITE_EMBEDDED: 2878 { 2879 struct drr_write_embedded *drrwe = 2880 &ra->rrd->header.drr_u.drr_write_embedded; 2881 uint32_t size = P2ROUNDUP(drrwe->drr_psize, 8); 2882 void *buf = kmem_zalloc(size, KM_SLEEP); 2883 2884 err = receive_read_payload_and_next_header(ra, size, buf); 2885 if (err != 0) { 2886 kmem_free(buf, size); 2887 return (err); 2888 } 2889 2890 receive_read_prefetch(ra, drrwe->drr_object, drrwe->drr_offset, 2891 drrwe->drr_length); 2892 return (err); 2893 } 2894 case DRR_FREE: 2895 { 2896 /* 2897 * It might be beneficial to prefetch indirect blocks here, but 2898 * we don't really have the data to decide for sure. 2899 */ 2900 err = receive_read_payload_and_next_header(ra, 0, NULL); 2901 return (err); 2902 } 2903 case DRR_END: 2904 { 2905 struct drr_end *drre = &ra->rrd->header.drr_u.drr_end; 2906 if (!ZIO_CHECKSUM_EQUAL(ra->prev_cksum, drre->drr_checksum)) 2907 return (SET_ERROR(ECKSUM)); 2908 return (0); 2909 } 2910 case DRR_SPILL: 2911 { 2912 struct drr_spill *drrs = &ra->rrd->header.drr_u.drr_spill; 2913 void *buf = kmem_zalloc(drrs->drr_length, KM_SLEEP); 2914 err = receive_read_payload_and_next_header(ra, drrs->drr_length, 2915 buf); 2916 if (err != 0) 2917 kmem_free(buf, drrs->drr_length); 2918 return (err); 2919 } 2920 default: 2921 return (SET_ERROR(EINVAL)); 2922 } 2923} 2924 2925/* 2926 * Commit the records to the pool. 2927 */ 2928static int 2929receive_process_record(struct receive_writer_arg *rwa, 2930 struct receive_record_arg *rrd) 2931{ 2932 int err; 2933 2934 /* Processing in order, therefore bytes_read should be increasing. */ 2935 ASSERT3U(rrd->bytes_read, >=, rwa->bytes_read); 2936 rwa->bytes_read = rrd->bytes_read; 2937 2938 switch (rrd->header.drr_type) { 2939 case DRR_OBJECT: 2940 { 2941 struct drr_object *drro = &rrd->header.drr_u.drr_object; 2942 err = receive_object(rwa, drro, rrd->payload); 2943 kmem_free(rrd->payload, rrd->payload_size); 2944 rrd->payload = NULL; 2945 return (err); 2946 } 2947 case DRR_FREEOBJECTS: 2948 { 2949 struct drr_freeobjects *drrfo = 2950 &rrd->header.drr_u.drr_freeobjects; 2951 return (receive_freeobjects(rwa, drrfo)); 2952 } 2953 case DRR_WRITE: 2954 { 2955 struct drr_write *drrw = &rrd->header.drr_u.drr_write; 2956 err = receive_write(rwa, drrw, rrd->write_buf); 2957 /* if receive_write() is successful, it consumes the arc_buf */ 2958 if (err != 0) 2959 dmu_return_arcbuf(rrd->write_buf); 2960 rrd->write_buf = NULL; 2961 rrd->payload = NULL; 2962 return (err); 2963 } 2964 case DRR_WRITE_BYREF: 2965 { 2966 struct drr_write_byref *drrwbr = 2967 &rrd->header.drr_u.drr_write_byref; 2968 return (receive_write_byref(rwa, drrwbr)); 2969 } 2970 case DRR_WRITE_EMBEDDED: 2971 { 2972 struct drr_write_embedded *drrwe = 2973 &rrd->header.drr_u.drr_write_embedded; 2974 err = receive_write_embedded(rwa, drrwe, rrd->payload); 2975 kmem_free(rrd->payload, rrd->payload_size); 2976 rrd->payload = NULL; 2977 return (err); 2978 } 2979 case DRR_FREE: 2980 { 2981 struct drr_free *drrf = &rrd->header.drr_u.drr_free; 2982 return (receive_free(rwa, drrf)); 2983 } 2984 case DRR_SPILL: 2985 { 2986 struct drr_spill *drrs = &rrd->header.drr_u.drr_spill; 2987 err = receive_spill(rwa, drrs, rrd->payload); 2988 kmem_free(rrd->payload, rrd->payload_size); 2989 rrd->payload = NULL; 2990 return (err); 2991 } 2992 default: 2993 return (SET_ERROR(EINVAL)); 2994 } 2995} 2996 2997/* 2998 * dmu_recv_stream's worker thread; pull records off the queue, and then call 2999 * receive_process_record When we're done, signal the main thread and exit. 3000 */ 3001static void 3002receive_writer_thread(void *arg) 3003{ 3004 struct receive_writer_arg *rwa = arg; 3005 struct receive_record_arg *rrd; 3006 for (rrd = bqueue_dequeue(&rwa->q); !rrd->eos_marker; 3007 rrd = bqueue_dequeue(&rwa->q)) { 3008 /* 3009 * If there's an error, the main thread will stop putting things 3010 * on the queue, but we need to clear everything in it before we 3011 * can exit. 3012 */ 3013 if (rwa->err == 0) { 3014 rwa->err = receive_process_record(rwa, rrd); 3015 } else if (rrd->write_buf != NULL) { 3016 dmu_return_arcbuf(rrd->write_buf); 3017 rrd->write_buf = NULL; 3018 rrd->payload = NULL; 3019 } else if (rrd->payload != NULL) { 3020 kmem_free(rrd->payload, rrd->payload_size); 3021 rrd->payload = NULL; 3022 } 3023 kmem_free(rrd, sizeof (*rrd)); 3024 } 3025 kmem_free(rrd, sizeof (*rrd)); 3026 mutex_enter(&rwa->mutex); 3027 rwa->done = B_TRUE; 3028 cv_signal(&rwa->cv); 3029 mutex_exit(&rwa->mutex); 3030 thread_exit(); 3031} 3032 3033static int 3034resume_check(struct receive_arg *ra, nvlist_t *begin_nvl) 3035{ 3036 uint64_t val; 3037 objset_t *mos = dmu_objset_pool(ra->os)->dp_meta_objset; 3038 uint64_t dsobj = dmu_objset_id(ra->os); 3039 uint64_t resume_obj, resume_off; 3040 3041 if (nvlist_lookup_uint64(begin_nvl, 3042 "resume_object", &resume_obj) != 0 || 3043 nvlist_lookup_uint64(begin_nvl, 3044 "resume_offset", &resume_off) != 0) { 3045 return (SET_ERROR(EINVAL)); 3046 } 3047 VERIFY0(zap_lookup(mos, dsobj, 3048 DS_FIELD_RESUME_OBJECT, sizeof (val), 1, &val)); 3049 if (resume_obj != val) 3050 return (SET_ERROR(EINVAL)); 3051 VERIFY0(zap_lookup(mos, dsobj, 3052 DS_FIELD_RESUME_OFFSET, sizeof (val), 1, &val)); 3053 if (resume_off != val) 3054 return (SET_ERROR(EINVAL)); 3055 3056 return (0); 3057} 3058 3059/* 3060 * Read in the stream's records, one by one, and apply them to the pool. There 3061 * are two threads involved; the thread that calls this function will spin up a 3062 * worker thread, read the records off the stream one by one, and issue 3063 * prefetches for any necessary indirect blocks. It will then push the records 3064 * onto an internal blocking queue. The worker thread will pull the records off 3065 * the queue, and actually write the data into the DMU. This way, the worker 3066 * thread doesn't have to wait for reads to complete, since everything it needs 3067 * (the indirect blocks) will be prefetched. 3068 * 3069 * NB: callers *must* call dmu_recv_end() if this succeeds. 3070 */ 3071int 3072dmu_recv_stream(dmu_recv_cookie_t *drc, struct file *fp, offset_t *voffp, 3073 int cleanup_fd, uint64_t *action_handlep) 3074{ 3075 int err = 0; 3076 struct receive_arg ra = { 0 }; 3077 struct receive_writer_arg rwa = { 0 }; 3078 int featureflags; 3079 nvlist_t *begin_nvl = NULL; 3080 3081 ra.byteswap = drc->drc_byteswap; 3082 ra.cksum = drc->drc_cksum; 3083 ra.td = curthread; 3084 ra.fp = fp; 3085 ra.voff = *voffp; 3086 3087 if (dsl_dataset_is_zapified(drc->drc_ds)) { 3088 (void) zap_lookup(drc->drc_ds->ds_dir->dd_pool->dp_meta_objset, 3089 drc->drc_ds->ds_object, DS_FIELD_RESUME_BYTES, 3090 sizeof (ra.bytes_read), 1, &ra.bytes_read); 3091 } 3092 3093 objlist_create(&ra.ignore_objlist); 3094 3095 /* these were verified in dmu_recv_begin */ 3096 ASSERT3U(DMU_GET_STREAM_HDRTYPE(drc->drc_drrb->drr_versioninfo), ==, 3097 DMU_SUBSTREAM); 3098 ASSERT3U(drc->drc_drrb->drr_type, <, DMU_OST_NUMTYPES); 3099 3100 /* 3101 * Open the objset we are modifying. 3102 */ 3103 VERIFY0(dmu_objset_from_ds(drc->drc_ds, &ra.os)); 3104 3105 ASSERT(dsl_dataset_phys(drc->drc_ds)->ds_flags & DS_FLAG_INCONSISTENT); 3106 3107 featureflags = DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo); 3108 3109 /* if this stream is dedup'ed, set up the avl tree for guid mapping */ 3110 if (featureflags & DMU_BACKUP_FEATURE_DEDUP) { 3111 minor_t minor; 3112 3113 if (cleanup_fd == -1) { 3114 ra.err = SET_ERROR(EBADF); 3115 goto out; 3116 } 3117 ra.err = zfs_onexit_fd_hold(cleanup_fd, &minor); 3118 if (ra.err != 0) { 3119 cleanup_fd = -1; 3120 goto out; 3121 } 3122 3123 if (*action_handlep == 0) { 3124 rwa.guid_to_ds_map = 3125 kmem_alloc(sizeof (avl_tree_t), KM_SLEEP); 3126 avl_create(rwa.guid_to_ds_map, guid_compare, 3127 sizeof (guid_map_entry_t), 3128 offsetof(guid_map_entry_t, avlnode)); 3129 err = zfs_onexit_add_cb(minor, 3130 free_guid_map_onexit, rwa.guid_to_ds_map, 3131 action_handlep); 3132 if (ra.err != 0) 3133 goto out; 3134 } else { 3135 err = zfs_onexit_cb_data(minor, *action_handlep, 3136 (void **)&rwa.guid_to_ds_map); 3137 if (ra.err != 0) 3138 goto out; 3139 } 3140 3141 drc->drc_guid_to_ds_map = rwa.guid_to_ds_map; 3142 } 3143 3144 uint32_t payloadlen = drc->drc_drr_begin->drr_payloadlen; 3145 void *payload = NULL; 3146 if (payloadlen != 0) 3147 payload = kmem_alloc(payloadlen, KM_SLEEP); 3148 3149 err = receive_read_payload_and_next_header(&ra, payloadlen, payload); 3150 if (err != 0) { 3151 if (payloadlen != 0) 3152 kmem_free(payload, payloadlen); 3153 goto out; 3154 } 3155 if (payloadlen != 0) { 3156 err = nvlist_unpack(payload, payloadlen, &begin_nvl, KM_SLEEP); 3157 kmem_free(payload, payloadlen); 3158 if (err != 0) 3159 goto out; 3160 } 3161 3162 if (featureflags & DMU_BACKUP_FEATURE_RESUMING) { 3163 err = resume_check(&ra, begin_nvl); 3164 if (err != 0) 3165 goto out; 3166 } 3167 3168 (void) bqueue_init(&rwa.q, zfs_recv_queue_length, 3169 offsetof(struct receive_record_arg, node)); 3170 cv_init(&rwa.cv, NULL, CV_DEFAULT, NULL); 3171 mutex_init(&rwa.mutex, NULL, MUTEX_DEFAULT, NULL); 3172 rwa.os = ra.os; 3173 rwa.byteswap = drc->drc_byteswap; 3174 rwa.resumable = drc->drc_resumable; 3175 3176 (void) thread_create(NULL, 0, receive_writer_thread, &rwa, 0, &p0, 3177 TS_RUN, minclsyspri); 3178 /* 3179 * We're reading rwa.err without locks, which is safe since we are the 3180 * only reader, and the worker thread is the only writer. It's ok if we 3181 * miss a write for an iteration or two of the loop, since the writer 3182 * thread will keep freeing records we send it until we send it an eos 3183 * marker. 3184 * 3185 * We can leave this loop in 3 ways: First, if rwa.err is 3186 * non-zero. In that case, the writer thread will free the rrd we just 3187 * pushed. Second, if we're interrupted; in that case, either it's the 3188 * first loop and ra.rrd was never allocated, or it's later, and ra.rrd 3189 * has been handed off to the writer thread who will free it. Finally, 3190 * if receive_read_record fails or we're at the end of the stream, then 3191 * we free ra.rrd and exit. 3192 */ 3193 while (rwa.err == 0) { 3194 if (issig(JUSTLOOKING) && issig(FORREAL)) { 3195 err = SET_ERROR(EINTR); 3196 break; 3197 } 3198 3199 ASSERT3P(ra.rrd, ==, NULL); 3200 ra.rrd = ra.next_rrd; 3201 ra.next_rrd = NULL; 3202 /* Allocates and loads header into ra.next_rrd */ 3203 err = receive_read_record(&ra); 3204 3205 if (ra.rrd->header.drr_type == DRR_END || err != 0) { 3206 kmem_free(ra.rrd, sizeof (*ra.rrd)); 3207 ra.rrd = NULL; 3208 break; 3209 } 3210 3211 bqueue_enqueue(&rwa.q, ra.rrd, 3212 sizeof (struct receive_record_arg) + ra.rrd->payload_size); 3213 ra.rrd = NULL; 3214 } 3215 if (ra.next_rrd == NULL) 3216 ra.next_rrd = kmem_zalloc(sizeof (*ra.next_rrd), KM_SLEEP); 3217 ra.next_rrd->eos_marker = B_TRUE; 3218 bqueue_enqueue(&rwa.q, ra.next_rrd, 1); 3219 3220 mutex_enter(&rwa.mutex); 3221 while (!rwa.done) { 3222 cv_wait(&rwa.cv, &rwa.mutex); 3223 } 3224 mutex_exit(&rwa.mutex); 3225 3226 /* 3227 * If we are receiving a full stream as a clone, all object IDs which 3228 * are greater than the maximum ID referenced in the stream are 3229 * by definition unused and must be freed. Note that it's possible that 3230 * we've resumed this send and the first record we received was the END 3231 * record. In that case, max_object would be 0, but we shouldn't start 3232 * freeing all objects from there; instead we should start from the 3233 * resumeobj. 3234 */ 3235 if (drc->drc_clone && drc->drc_drrb->drr_fromguid == 0) { 3236 uint64_t obj; 3237 if (nvlist_lookup_uint64(begin_nvl, "resume_object", &obj) != 0) 3238 obj = 0; 3239 if (rwa.max_object > obj) 3240 obj = rwa.max_object; 3241 obj++; 3242 int free_err = 0; 3243 int next_err = 0; 3244 3245 while (next_err == 0) { 3246 free_err = dmu_free_long_object(rwa.os, obj); 3247 if (free_err != 0 && free_err != ENOENT) 3248 break; 3249 3250 next_err = dmu_object_next(rwa.os, &obj, FALSE, 0); 3251 } 3252 3253 if (err == 0) { 3254 if (free_err != 0 && free_err != ENOENT) 3255 err = free_err; 3256 else if (next_err != ESRCH) 3257 err = next_err; 3258 } 3259 } 3260 3261 cv_destroy(&rwa.cv); 3262 mutex_destroy(&rwa.mutex); 3263 bqueue_destroy(&rwa.q); 3264 if (err == 0) 3265 err = rwa.err; 3266 3267out: 3268 nvlist_free(begin_nvl); 3269 if ((featureflags & DMU_BACKUP_FEATURE_DEDUP) && (cleanup_fd != -1)) 3270 zfs_onexit_fd_rele(cleanup_fd); 3271 3272 if (err != 0) { 3273 /* 3274 * Clean up references. If receive is not resumable, 3275 * destroy what we created, so we don't leave it in 3276 * the inconsistent state. 3277 */ 3278 dmu_recv_cleanup_ds(drc); 3279 } 3280 3281 *voffp = ra.voff; 3282 objlist_destroy(&ra.ignore_objlist); 3283 return (err); 3284} 3285 3286static int 3287dmu_recv_end_check(void *arg, dmu_tx_t *tx) 3288{ 3289 dmu_recv_cookie_t *drc = arg; 3290 dsl_pool_t *dp = dmu_tx_pool(tx); 3291 int error; 3292 3293 ASSERT3P(drc->drc_ds->ds_owner, ==, dmu_recv_tag); 3294 3295 if (!drc->drc_newfs) { 3296 dsl_dataset_t *origin_head; 3297 3298 error = dsl_dataset_hold(dp, drc->drc_tofs, FTAG, &origin_head); 3299 if (error != 0) 3300 return (error); 3301 if (drc->drc_force) { 3302 /* 3303 * We will destroy any snapshots in tofs (i.e. before 3304 * origin_head) that are after the origin (which is 3305 * the snap before drc_ds, because drc_ds can not 3306 * have any snaps of its own). 3307 */ 3308 uint64_t obj; 3309 3310 obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj; 3311 while (obj != 3312 dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) { 3313 dsl_dataset_t *snap; 3314 error = dsl_dataset_hold_obj(dp, obj, FTAG, 3315 &snap); 3316 if (error != 0) 3317 break; 3318 if (snap->ds_dir != origin_head->ds_dir) 3319 error = SET_ERROR(EINVAL); 3320 if (error == 0) { 3321 error = dsl_destroy_snapshot_check_impl( 3322 snap, B_FALSE); 3323 } 3324 obj = dsl_dataset_phys(snap)->ds_prev_snap_obj; 3325 dsl_dataset_rele(snap, FTAG); 3326 if (error != 0) 3327 break; 3328 } 3329 if (error != 0) { 3330 dsl_dataset_rele(origin_head, FTAG); 3331 return (error); 3332 } 3333 } 3334 error = dsl_dataset_clone_swap_check_impl(drc->drc_ds, 3335 origin_head, drc->drc_force, drc->drc_owner, tx); 3336 if (error != 0) { 3337 dsl_dataset_rele(origin_head, FTAG); 3338 return (error); 3339 } 3340 error = dsl_dataset_snapshot_check_impl(origin_head, 3341 drc->drc_tosnap, tx, B_TRUE, 1, drc->drc_cred); 3342 dsl_dataset_rele(origin_head, FTAG); 3343 if (error != 0) 3344 return (error); 3345 3346 error = dsl_destroy_head_check_impl(drc->drc_ds, 1); 3347 } else { 3348 error = dsl_dataset_snapshot_check_impl(drc->drc_ds, 3349 drc->drc_tosnap, tx, B_TRUE, 1, drc->drc_cred); 3350 } 3351 return (error); 3352} 3353 3354static void 3355dmu_recv_end_sync(void *arg, dmu_tx_t *tx) 3356{ 3357 dmu_recv_cookie_t *drc = arg; 3358 dsl_pool_t *dp = dmu_tx_pool(tx); 3359 3360 spa_history_log_internal_ds(drc->drc_ds, "finish receiving", 3361 tx, "snap=%s", drc->drc_tosnap); 3362 3363 if (!drc->drc_newfs) { 3364 dsl_dataset_t *origin_head; 3365 3366 VERIFY0(dsl_dataset_hold(dp, drc->drc_tofs, FTAG, 3367 &origin_head)); 3368 3369 if (drc->drc_force) { 3370 /* 3371 * Destroy any snapshots of drc_tofs (origin_head) 3372 * after the origin (the snap before drc_ds). 3373 */ 3374 uint64_t obj; 3375 3376 obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj; 3377 while (obj != 3378 dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) { 3379 dsl_dataset_t *snap; 3380 VERIFY0(dsl_dataset_hold_obj(dp, obj, FTAG, 3381 &snap)); 3382 ASSERT3P(snap->ds_dir, ==, origin_head->ds_dir); 3383 obj = dsl_dataset_phys(snap)->ds_prev_snap_obj; 3384 dsl_destroy_snapshot_sync_impl(snap, 3385 B_FALSE, tx); 3386 dsl_dataset_rele(snap, FTAG); 3387 } 3388 } 3389 VERIFY3P(drc->drc_ds->ds_prev, ==, 3390 origin_head->ds_prev); 3391 3392 dsl_dataset_clone_swap_sync_impl(drc->drc_ds, 3393 origin_head, tx); 3394 dsl_dataset_snapshot_sync_impl(origin_head, 3395 drc->drc_tosnap, tx); 3396 3397 /* set snapshot's creation time and guid */ 3398 dmu_buf_will_dirty(origin_head->ds_prev->ds_dbuf, tx); 3399 dsl_dataset_phys(origin_head->ds_prev)->ds_creation_time = 3400 drc->drc_drrb->drr_creation_time; 3401 dsl_dataset_phys(origin_head->ds_prev)->ds_guid = 3402 drc->drc_drrb->drr_toguid; 3403 dsl_dataset_phys(origin_head->ds_prev)->ds_flags &= 3404 ~DS_FLAG_INCONSISTENT; 3405 3406 dmu_buf_will_dirty(origin_head->ds_dbuf, tx); 3407 dsl_dataset_phys(origin_head)->ds_flags &= 3408 ~DS_FLAG_INCONSISTENT; 3409 3410 drc->drc_newsnapobj = 3411 dsl_dataset_phys(origin_head)->ds_prev_snap_obj; 3412 3413 dsl_dataset_rele(origin_head, FTAG); 3414 dsl_destroy_head_sync_impl(drc->drc_ds, tx); 3415 3416 if (drc->drc_owner != NULL) 3417 VERIFY3P(origin_head->ds_owner, ==, drc->drc_owner); 3418 } else { 3419 dsl_dataset_t *ds = drc->drc_ds; 3420 3421 dsl_dataset_snapshot_sync_impl(ds, drc->drc_tosnap, tx); 3422 3423 /* set snapshot's creation time and guid */ 3424 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx); 3425 dsl_dataset_phys(ds->ds_prev)->ds_creation_time = 3426 drc->drc_drrb->drr_creation_time; 3427 dsl_dataset_phys(ds->ds_prev)->ds_guid = 3428 drc->drc_drrb->drr_toguid; 3429 dsl_dataset_phys(ds->ds_prev)->ds_flags &= 3430 ~DS_FLAG_INCONSISTENT; 3431 3432 dmu_buf_will_dirty(ds->ds_dbuf, tx); 3433 dsl_dataset_phys(ds)->ds_flags &= ~DS_FLAG_INCONSISTENT; 3434 if (dsl_dataset_has_resume_receive_state(ds)) { 3435 (void) zap_remove(dp->dp_meta_objset, ds->ds_object, 3436 DS_FIELD_RESUME_FROMGUID, tx); 3437 (void) zap_remove(dp->dp_meta_objset, ds->ds_object, 3438 DS_FIELD_RESUME_OBJECT, tx); 3439 (void) zap_remove(dp->dp_meta_objset, ds->ds_object, 3440 DS_FIELD_RESUME_OFFSET, tx); 3441 (void) zap_remove(dp->dp_meta_objset, ds->ds_object, 3442 DS_FIELD_RESUME_BYTES, tx); 3443 (void) zap_remove(dp->dp_meta_objset, ds->ds_object, 3444 DS_FIELD_RESUME_TOGUID, tx); 3445 (void) zap_remove(dp->dp_meta_objset, ds->ds_object, 3446 DS_FIELD_RESUME_TONAME, tx); 3447 } 3448 drc->drc_newsnapobj = 3449 dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj; 3450 } 3451 3452#if defined(__FreeBSD__) && defined(_KERNEL) 3453 zvol_create_minors(dp->dp_spa, drc->drc_tofs); 3454#endif 3455 3456 /* 3457 * Release the hold from dmu_recv_begin. This must be done before 3458 * we return to open context, so that when we free the dataset's dnode, 3459 * we can evict its bonus buffer. 3460 */ 3461 dsl_dataset_disown(drc->drc_ds, dmu_recv_tag); 3462 drc->drc_ds = NULL; 3463} 3464 3465static int 3466add_ds_to_guidmap(const char *name, avl_tree_t *guid_map, uint64_t snapobj) 3467{ 3468 dsl_pool_t *dp; 3469 dsl_dataset_t *snapds; 3470 guid_map_entry_t *gmep; 3471 int err; 3472 3473 ASSERT(guid_map != NULL); 3474 3475 err = dsl_pool_hold(name, FTAG, &dp); 3476 if (err != 0) 3477 return (err); 3478 gmep = kmem_alloc(sizeof (*gmep), KM_SLEEP); 3479 err = dsl_dataset_hold_obj(dp, snapobj, gmep, &snapds); 3480 if (err == 0) { 3481 gmep->guid = dsl_dataset_phys(snapds)->ds_guid; 3482 gmep->gme_ds = snapds; 3483 avl_add(guid_map, gmep); 3484 dsl_dataset_long_hold(snapds, gmep); 3485 } else 3486 kmem_free(gmep, sizeof (*gmep)); 3487 3488 dsl_pool_rele(dp, FTAG); 3489 return (err); 3490} 3491 3492static int dmu_recv_end_modified_blocks = 3; 3493 3494static int 3495dmu_recv_existing_end(dmu_recv_cookie_t *drc) 3496{ 3497#ifdef _KERNEL 3498 /* 3499 * We will be destroying the ds; make sure its origin is unmounted if 3500 * necessary. 3501 */ 3502 char name[ZFS_MAX_DATASET_NAME_LEN]; 3503 dsl_dataset_name(drc->drc_ds, name); 3504 zfs_destroy_unmount_origin(name); 3505#endif 3506 3507 return (dsl_sync_task(drc->drc_tofs, 3508 dmu_recv_end_check, dmu_recv_end_sync, drc, 3509 dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL)); 3510} 3511 3512static int 3513dmu_recv_new_end(dmu_recv_cookie_t *drc) 3514{ 3515 return (dsl_sync_task(drc->drc_tofs, 3516 dmu_recv_end_check, dmu_recv_end_sync, drc, 3517 dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL)); 3518} 3519 3520int 3521dmu_recv_end(dmu_recv_cookie_t *drc, void *owner) 3522{ 3523 int error; 3524 3525 drc->drc_owner = owner; 3526 3527 if (drc->drc_newfs) 3528 error = dmu_recv_new_end(drc); 3529 else 3530 error = dmu_recv_existing_end(drc); 3531 3532 if (error != 0) { 3533 dmu_recv_cleanup_ds(drc); 3534 } else if (drc->drc_guid_to_ds_map != NULL) { 3535 (void) add_ds_to_guidmap(drc->drc_tofs, 3536 drc->drc_guid_to_ds_map, 3537 drc->drc_newsnapobj); 3538 } 3539 return (error); 3540} 3541 3542/* 3543 * Return TRUE if this objset is currently being received into. 3544 */ 3545boolean_t 3546dmu_objset_is_receiving(objset_t *os) 3547{ 3548 return (os->os_dsl_dataset != NULL && 3549 os->os_dsl_dataset->ds_owner == dmu_recv_tag); 3550} 3551