• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /freebsd-13-stable/sys/contrib/openzfs/module/zfs/

Lines Matching defs:rwa

1289 save_resume_state(struct receive_writer_arg *rwa,
1294 if (!rwa->resumable)
1301 ASSERT(rwa->bytes_read != 0);
1314 ASSERT3U(object, >=, rwa->os->os_dsl_dataset->ds_resume_object[txgoff]);
1315 ASSERT(object != rwa->os->os_dsl_dataset->ds_resume_object[txgoff] ||
1316 offset >= rwa->os->os_dsl_dataset->ds_resume_offset[txgoff]);
1317 ASSERT3U(rwa->bytes_read, >=,
1318 rwa->os->os_dsl_dataset->ds_resume_bytes[txgoff]);
1320 rwa->os->os_dsl_dataset->ds_resume_object[txgoff] = object;
1321 rwa->os->os_dsl_dataset->ds_resume_offset[txgoff] = offset;
1322 rwa->os->os_dsl_dataset->ds_resume_bytes[txgoff] = rwa->bytes_read;
1354 receive_handle_existing_object(const struct receive_writer_arg *rwa,
1371 if (rwa->raw && nblkptr != drro->drr_nblkptr)
1409 if (rwa->raw) {
1420 } else if (rwa->full) {
1459 err = receive_object_is_same_generation(rwa->os,
1504 if (rwa->raw) {
1512 err = dmu_free_long_range(rwa->os, drro->drr_object,
1529 if ((rwa->raw && drro->drr_nlevels < doi->doi_indirection) ||
1531 err = dmu_free_long_object(rwa->os, drro->drr_object);
1535 txg_wait_synced(dmu_objset_pool(rwa->os), 0);
1552 if (rwa->raw && *object_to_hold != DMU_NEW_OBJECT && !do_free_range) {
1553 err = dmu_free_long_range(rwa->os, drro->drr_object,
1563 receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
1580 drro->drr_blksz > spa_maxblocksize(dmu_objset_spa(rwa->os)) ||
1582 DN_BONUS_SIZE(spa_maxdnodesize(dmu_objset_spa(rwa->os))) ||
1584 (spa_maxdnodesize(dmu_objset_spa(rwa->os)) >> DNODE_SHIFT)) {
1588 if (rwa->raw) {
1591 * containing this block and stored it in rwa.
1593 if (drro->drr_object < rwa->or_firstobj ||
1594 drro->drr_object >= rwa->or_firstobj + rwa->or_numslots ||
1608 (!rwa->spill && DRR_OBJECT_HAS_SPILL(drro->drr_flags))) {
1618 err = dmu_object_info(rwa->os, drro->drr_object, &doi);
1623 if (drro->drr_object > rwa->max_object)
1624 rwa->max_object = drro->drr_object;
1635 err = receive_handle_existing_object(rwa, drro, &doi, data,
1645 txg_wait_synced(dmu_objset_pool(rwa->os), 0);
1647 if (dmu_object_info(rwa->os, drro->drr_object, NULL) != ENOENT)
1671 err = dmu_object_info(rwa->os, slot, &slot_doi);
1677 err = dmu_free_long_object(rwa->os, slot);
1685 txg_wait_synced(dmu_objset_pool(rwa->os), 0);
1688 tx = dmu_tx_create(rwa->os);
1699 err = dmu_object_claim_dnsize(rwa->os, drro->drr_object,
1708 err = dmu_object_reclaim_dnsize(rwa->os, drro->drr_object,
1711 dn_slots << DNODE_SHIFT, rwa->spill ?
1713 } else if (rwa->spill && !DRR_OBJECT_HAS_SPILL(drro->drr_flags)) {
1719 err = dmu_object_rm_spill(rwa->os, drro->drr_object, tx);
1727 if (rwa->or_crypt_params_present) {
1744 uint64_t offset = rwa->or_firstobj * DNODE_MIN_SIZE;
1746 err = dmu_buf_hold_by_dnode(DMU_META_DNODE(rwa->os),
1753 dmu_buf_set_crypt_params(db, rwa->or_byteorder,
1754 rwa->or_salt, rwa->or_iv, rwa->or_mac, tx);
1758 rwa->or_crypt_params_present = B_FALSE;
1761 dmu_object_set_checksum(rwa->os, drro->drr_object,
1763 dmu_object_set_compress(rwa->os, drro->drr_object,
1767 if (rwa->raw) {
1776 VERIFY0(dmu_object_set_blocksize(rwa->os, drro->drr_object,
1778 VERIFY0(dmu_object_set_nlevels(rwa->os, drro->drr_object,
1785 VERIFY0(dmu_object_set_maxblkid(rwa->os, drro->drr_object,
1794 if (rwa->raw)
1797 VERIFY0(dnode_hold(rwa->os, drro->drr_object, FTAG, &dn));
1809 if (rwa->byteswap && !rwa->raw) {
1825 receive_freeobjects(struct receive_writer_arg *rwa,
1837 next_err = dmu_object_next(rwa->os, &obj, FALSE, 0)) {
1841 err = dmu_object_info(rwa->os, obj, &doi);
1847 err = dmu_free_long_object(rwa->os, obj);
1859 * rwa->write_batch list.
1862 flush_write_batch_impl(struct receive_writer_arg *rwa)
1867 if (dnode_hold(rwa->os, rwa->last_object, FTAG, &dn) != 0)
1870 struct receive_record_arg *last_rrd = list_tail(&rwa->write_batch);
1873 struct receive_record_arg *first_rrd = list_head(&rwa->write_batch);
1876 ASSERT3U(rwa->last_object, ==, last_drrw->drr_object);
1877 ASSERT3U(rwa->last_offset, ==, last_drrw->drr_offset);
1879 dmu_tx_t *tx = dmu_tx_create(rwa->os);
1891 while ((rrd = list_head(&rwa->write_batch)) != NULL) {
1895 ASSERT3U(drrw->drr_object, ==, rwa->last_object);
1935 dmu_write_policy(rwa->os, dn, 0, 0, &zp);
1939 if (rwa->raw) {
1944 rwa->byteswap;
1963 } else if (rwa->byteswap) {
2003 save_resume_state(rwa, drrw->drr_object, drrw->drr_offset, tx);
2005 list_remove(&rwa->write_batch, rrd);
2015 flush_write_batch(struct receive_writer_arg *rwa)
2017 if (list_is_empty(&rwa->write_batch))
2019 int err = rwa->err;
2021 err = flush_write_batch_impl(rwa);
2024 while ((rrd = list_remove_head(&rwa->write_batch)) != NULL) {
2029 ASSERT(list_is_empty(&rwa->write_batch));
2034 receive_process_write_record(struct receive_writer_arg *rwa,
2050 if (drrw->drr_object < rwa->last_object ||
2051 (drrw->drr_object == rwa->last_object &&
2052 drrw->drr_offset < rwa->last_offset)) {
2056 struct receive_record_arg *first_rrd = list_head(&rwa->write_batch);
2063 err = flush_write_batch(rwa);
2068 rwa->last_object = drrw->drr_object;
2069 rwa->last_offset = drrw->drr_offset;
2071 if (rwa->last_object > rwa->max_object)
2072 rwa->max_object = rwa->last_object;
2074 list_insert_tail(&rwa->write_batch, rrd);
2083 receive_write_embedded(struct receive_writer_arg *rwa,
2099 if (rwa->raw)
2102 if (drrwe->drr_object > rwa->max_object)
2103 rwa->max_object = drrwe->drr_object;
2105 tx = dmu_tx_create(rwa->os);
2115 dmu_write_embedded(rwa->os, drrwe->drr_object,
2118 rwa->byteswap ^ ZFS_HOST_BYTEORDER, tx);
2121 save_resume_state(rwa, drrwe->drr_object, drrwe->drr_offset, tx);
2127 receive_spill(struct receive_writer_arg *rwa, struct drr_spill *drrs,
2134 drrs->drr_length > spa_maxblocksize(dmu_objset_spa(rwa->os)))
2143 if (rwa->spill && DRR_SPILL_IS_UNMODIFIED(drrs->drr_flags)) {
2148 if (rwa->raw) {
2155 if (dmu_object_info(rwa->os, drrs->drr_object, NULL) != 0)
2158 if (drrs->drr_object > rwa->max_object)
2159 rwa->max_object = drrs->drr_object;
2161 VERIFY0(dmu_bonus_hold(rwa->os, drrs->drr_object, FTAG, &db));
2168 dmu_tx_t *tx = dmu_tx_create(rwa->os);
2192 if (rwa->raw) {
2195 rwa->byteswap;
2197 abuf = arc_loan_raw_buf(dmu_objset_spa(rwa->os),
2203 abuf = arc_loan_buf(dmu_objset_spa(rwa->os),
2206 if (rwa->byteswap) {
2227 receive_free(struct receive_writer_arg *rwa, struct drr_free *drrf)
2235 if (dmu_object_info(rwa->os, drrf->drr_object, NULL) != 0)
2238 if (drrf->drr_object > rwa->max_object)
2239 rwa->max_object = drrf->drr_object;
2241 err = dmu_free_long_range(rwa->os, drrf->drr_object,
2248 receive_object_range(struct receive_writer_arg *rwa,
2254 * the send stream is byteswapped (rwa->byteswap). Finally,
2258 boolean_t byteorder = ZFS_HOST_BYTEORDER ^ rwa->byteswap ^
2274 !rwa->raw)
2277 if (drror->drr_firstobj > rwa->max_object)
2278 rwa->max_object = drror->drr_firstobj;
2285 rwa->or_crypt_params_present = B_TRUE;
2286 rwa->or_firstobj = drror->drr_firstobj;
2287 rwa->or_numslots = drror->drr_numslots;
2288 bcopy(drror->drr_salt, rwa->or_salt, ZIO_DATA_SALT_LEN);
2289 bcopy(drror->drr_iv, rwa->or_iv, ZIO_DATA_IV_LEN);
2290 bcopy(drror->drr_mac, rwa->or_mac, ZIO_DATA_MAC_LEN);
2291 rwa->or_byteorder = byteorder;
2302 receive_redact(struct receive_writer_arg *rwa, struct drr_redact *drrr)
2309 return (receive_free(rwa, &drrf));
2681 receive_process_record(struct receive_writer_arg *rwa,
2687 ASSERT3U(rrd->bytes_read, >=, rwa->bytes_read);
2688 rwa->bytes_read = rrd->bytes_read;
2691 err = flush_write_batch(rwa);
2710 err = receive_object(rwa, drro, rrd->payload);
2719 err = receive_freeobjects(rwa, drrfo);
2724 err = receive_process_write_record(rwa, rrd);
2741 err = receive_write_embedded(rwa, drrwe, rrd->payload);
2749 err = receive_free(rwa, drrf);
2755 err = receive_spill(rwa, drrs, rrd->abd);
2766 err = receive_object_range(rwa, drror);
2772 err = receive_redact(rwa, drrr);
2792 struct receive_writer_arg *rwa = arg;
2796 for (rrd = bqueue_dequeue(&rwa->q); !rrd->eos_marker;
2797 rrd = bqueue_dequeue(&rwa->q)) {
2804 if (rwa->err == 0) {
2805 err = receive_process_record(rwa, rrd);
2820 if (rwa->err == 0)
2821 rwa->err = err;
2827 int err = flush_write_batch(rwa);
2828 if (rwa->err == 0)
2829 rwa->err = err;
2831 mutex_enter(&rwa->mutex);
2832 rwa->done = B_TRUE;
2833 cv_signal(&rwa->cv);
2834 mutex_exit(&rwa->mutex);
2881 struct receive_writer_arg *rwa = kmem_zalloc(sizeof (*rwa), KM_SLEEP);
2948 (void) bqueue_init(&rwa->q, zfs_recv_queue_ff,
2951 cv_init(&rwa->cv, NULL, CV_DEFAULT, NULL);
2952 mutex_init(&rwa->mutex, NULL, MUTEX_DEFAULT, NULL);
2953 rwa->os = drc->drc_os;
2954 rwa->byteswap = drc->drc_byteswap;
2955 rwa->resumable = drc->drc_resumable;
2956 rwa->raw = drc->drc_raw;
2957 rwa->spill = drc->drc_spill;
2958 rwa->full = (drc->drc_drr_begin->drr_u.drr_begin.drr_fromguid == 0);
2959 rwa->os->os_raw_receive = drc->drc_raw;
2960 list_create(&rwa->write_batch, sizeof (struct receive_record_arg),
2963 (void) thread_create(NULL, 0, receive_writer_thread, rwa, 0, curproc,
2966 * We're reading rwa->err without locks, which is safe since we are the
2972 * We can leave this loop in 3 ways: First, if rwa->err is
2980 while (rwa->err == 0) {
2998 bqueue_enqueue(&rwa->q, drc->drc_rrd,
3007 bqueue_enqueue_flush(&rwa->q, drc->drc_rrd, 1);
3009 mutex_enter(&rwa->mutex);
3010 while (!rwa->done) {
3015 (void) cv_wait_sig(&rwa->cv, &rwa->mutex);
3017 mutex_exit(&rwa->mutex);
3025 uint64_t obj = rwa->max_object + 1;
3030 free_err = dmu_free_long_object(rwa->os, obj);
3034 next_err = dmu_object_next(rwa->os, &obj, FALSE, 0);
3045 cv_destroy(&rwa->cv);
3046 mutex_destroy(&rwa->mutex);
3047 bqueue_destroy(&rwa->q);
3048 list_destroy(&rwa->write_batch);
3050 err = rwa->err;
3067 kmem_free(rwa, sizeof (*rwa));