Deleted Added
full compact
zfs_vnops.c (209097) zfs_vnops.c (209962)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE

--- 5 unchanged lines hidden (view full) ---

14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE

--- 5 unchanged lines hidden (view full) ---

14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26/* Portions Copyright 2007 Jeremy Teo */
27
28#include <sys/types.h>
29#include <sys/param.h>
30#include <sys/time.h>

--- 20 unchanged lines hidden (view full) ---

51#include <sys/spa.h>
52#include <sys/txg.h>
53#include <sys/dbuf.h>
54#include <sys/zap.h>
55#include <sys/dirent.h>
56#include <sys/policy.h>
57#include <sys/sunddi.h>
58#include <sys/filio.h>
23 * Use is subject to license terms.
24 */
25
26/* Portions Copyright 2007 Jeremy Teo */
27
28#include <sys/types.h>
29#include <sys/param.h>
30#include <sys/time.h>

--- 20 unchanged lines hidden (view full) ---

51#include <sys/spa.h>
52#include <sys/txg.h>
53#include <sys/dbuf.h>
54#include <sys/zap.h>
55#include <sys/dirent.h>
56#include <sys/policy.h>
57#include <sys/sunddi.h>
58#include <sys/filio.h>
59#include <sys/sid.h>
59#include <sys/zfs_ctldir.h>
60#include <sys/zfs_fuid.h>
61#include <sys/dnlc.h>
62#include <sys/zfs_rlock.h>
63#include <sys/extdirent.h>
64#include <sys/kidmap.h>
65#include <sys/bio.h>
66#include <sys/buf.h>

--- 26 unchanged lines hidden (view full) ---

93 * pushing cached pages (which acquires range locks) and syncing out
94 * cached atime changes. Third, zfs_zinactive() may require a new tx,
95 * which could deadlock the system if you were already holding one.
96 * If you must call VN_RELE() within a tx then use VN_RELE_ASYNC().
97 *
98 * (3) All range locks must be grabbed before calling dmu_tx_assign(),
99 * as they can span dmu_tx_assign() calls.
100 *
60#include <sys/zfs_ctldir.h>
61#include <sys/zfs_fuid.h>
62#include <sys/dnlc.h>
63#include <sys/zfs_rlock.h>
64#include <sys/extdirent.h>
65#include <sys/kidmap.h>
66#include <sys/bio.h>
67#include <sys/buf.h>

--- 26 unchanged lines hidden (view full) ---

94 * pushing cached pages (which acquires range locks) and syncing out
95 * cached atime changes. Third, zfs_zinactive() may require a new tx,
96 * which could deadlock the system if you were already holding one.
97 * If you must call VN_RELE() within a tx then use VN_RELE_ASYNC().
98 *
99 * (3) All range locks must be grabbed before calling dmu_tx_assign(),
100 * as they can span dmu_tx_assign() calls.
101 *
101 * (4) Always pass zfsvfs->z_assign as the second argument to dmu_tx_assign().
102 * In normal operation, this will be TXG_NOWAIT. During ZIL replay,
103 * it will be a specific txg. Either way, dmu_tx_assign() never blocks.
102 * (4) Always pass TXG_NOWAIT as the second argument to dmu_tx_assign().
104 * This is critical because we don't want to block while holding locks.
105 * Note, in particular, that if a lock is sometimes acquired before
106 * the tx assigns, and sometimes after (e.g. z_lock), then failing to
107 * use a non-blocking assign can deadlock the system. The scenario:
108 *
109 * Thread A has grabbed a lock before calling dmu_tx_assign().
110 * Thread B is in an already-assigned tx, and blocks for this lock.
111 * Thread A calls dmu_tx_assign(TXG_WAIT) and blocks in txg_wait_open()
112 * forever, because the previous txg can't quiesce until B's tx commits.
113 *
114 * If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is TXG_NOWAIT,
115 * then drop all locks, call dmu_tx_wait(), and try again.
116 *
117 * (5) If the operation succeeded, generate the intent log entry for it
118 * before dropping locks. This ensures that the ordering of events
119 * in the intent log matches the order in which they actually occurred.
103 * This is critical because we don't want to block while holding locks.
104 * Note, in particular, that if a lock is sometimes acquired before
105 * the tx assigns, and sometimes after (e.g. z_lock), then failing to
106 * use a non-blocking assign can deadlock the system. The scenario:
107 *
108 * Thread A has grabbed a lock before calling dmu_tx_assign().
109 * Thread B is in an already-assigned tx, and blocks for this lock.
110 * Thread A calls dmu_tx_assign(TXG_WAIT) and blocks in txg_wait_open()
111 * forever, because the previous txg can't quiesce until B's tx commits.
112 *
113 * If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is TXG_NOWAIT,
114 * then drop all locks, call dmu_tx_wait(), and try again.
115 *
116 * (5) If the operation succeeded, generate the intent log entry for it
117 * before dropping locks. This ensures that the ordering of events
118 * in the intent log matches the order in which they actually occurred.
119 * During ZIL replay the zfs_log_* functions will update the sequence
120 * number to indicate the zil transaction has replayed.
120 *
121 * (6) At the end of each vnode op, the DMU tx must always commit,
122 * regardless of whether there were any errors.
123 *
124 * (7) After dropping all locks, invoke zil_commit(zilog, seq, foid)
125 * to ensure that synchronous semantics are provided when necessary.
126 *
127 * In general, this is how things should be ordered in each vnode op:
128 *
129 * ZFS_ENTER(zfsvfs); // exit if unmounted
130 * top:
131 * zfs_dirent_lock(&dl, ...) // lock directory entry (may VN_HOLD())
132 * rw_enter(...); // grab any other locks you need
133 * tx = dmu_tx_create(...); // get DMU tx
134 * dmu_tx_hold_*(); // hold each object you might modify
121 *
122 * (6) At the end of each vnode op, the DMU tx must always commit,
123 * regardless of whether there were any errors.
124 *
125 * (7) After dropping all locks, invoke zil_commit(zilog, seq, foid)
126 * to ensure that synchronous semantics are provided when necessary.
127 *
128 * In general, this is how things should be ordered in each vnode op:
129 *
130 * ZFS_ENTER(zfsvfs); // exit if unmounted
131 * top:
132 * zfs_dirent_lock(&dl, ...) // lock directory entry (may VN_HOLD())
133 * rw_enter(...); // grab any other locks you need
134 * tx = dmu_tx_create(...); // get DMU tx
135 * dmu_tx_hold_*(); // hold each object you might modify
135 * error = dmu_tx_assign(tx, zfsvfs->z_assign); // try to assign
136 * error = dmu_tx_assign(tx, TXG_NOWAIT); // try to assign
136 * if (error) {
137 * rw_exit(...); // drop locks
138 * zfs_dirent_unlock(dl); // unlock directory entry
139 * VN_RELE(...); // release held vnodes
137 * if (error) {
138 * rw_exit(...); // drop locks
139 * zfs_dirent_unlock(dl); // unlock directory entry
140 * VN_RELE(...); // release held vnodes
140 * if (error == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) {
141 * if (error == ERESTART) {
141 * dmu_tx_wait(tx);
142 * dmu_tx_abort(tx);
143 * goto top;
144 * }
145 * dmu_tx_abort(tx); // abort DMU tx
146 * ZFS_EXIT(zfsvfs); // finished in zfs
147 * return (error); // really out of space
148 * }

--- 9 unchanged lines hidden (view full) ---

158 * return (error); // done, report error
159 */
160
161/* ARGSUSED */
162static int
163zfs_open(vnode_t **vpp, int flag, cred_t *cr, caller_context_t *ct)
164{
165 znode_t *zp = VTOZ(*vpp);
142 * dmu_tx_wait(tx);
143 * dmu_tx_abort(tx);
144 * goto top;
145 * }
146 * dmu_tx_abort(tx); // abort DMU tx
147 * ZFS_EXIT(zfsvfs); // finished in zfs
148 * return (error); // really out of space
149 * }

--- 9 unchanged lines hidden (view full) ---

159 * return (error); // done, report error
160 */
161
162/* ARGSUSED */
163static int
164zfs_open(vnode_t **vpp, int flag, cred_t *cr, caller_context_t *ct)
165{
166 znode_t *zp = VTOZ(*vpp);
167 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
166
168
169 ZFS_ENTER(zfsvfs);
170 ZFS_VERIFY_ZP(zp);
171
167 if ((flag & FWRITE) && (zp->z_phys->zp_flags & ZFS_APPENDONLY) &&
168 ((flag & FAPPEND) == 0)) {
172 if ((flag & FWRITE) && (zp->z_phys->zp_flags & ZFS_APPENDONLY) &&
173 ((flag & FAPPEND) == 0)) {
174 ZFS_EXIT(zfsvfs);
169 return (EPERM);
170 }
171
172 if (!zfs_has_ctldir(zp) && zp->z_zfsvfs->z_vscan &&
173 ZTOV(zp)->v_type == VREG &&
174 !(zp->z_phys->zp_flags & ZFS_AV_QUARANTINED) &&
175 return (EPERM);
176 }
177
178 if (!zfs_has_ctldir(zp) && zp->z_zfsvfs->z_vscan &&
179 ZTOV(zp)->v_type == VREG &&
180 !(zp->z_phys->zp_flags & ZFS_AV_QUARANTINED) &&
175 zp->z_phys->zp_size > 0)
176 if (fs_vscan(*vpp, cr, 0) != 0)
181 zp->z_phys->zp_size > 0) {
182 if (fs_vscan(*vpp, cr, 0) != 0) {
183 ZFS_EXIT(zfsvfs);
177 return (EACCES);
184 return (EACCES);
185 }
186 }
178
179 /* Keep a count of the synchronous opens in the znode */
180 if (flag & (FSYNC | FDSYNC))
181 atomic_inc_32(&zp->z_sync_cnt);
182
187
188 /* Keep a count of the synchronous opens in the znode */
189 if (flag & (FSYNC | FDSYNC))
190 atomic_inc_32(&zp->z_sync_cnt);
191
192 ZFS_EXIT(zfsvfs);
183 return (0);
184}
185
186/* ARGSUSED */
187static int
188zfs_close(vnode_t *vp, int flag, int count, offset_t offset, cred_t *cr,
189 caller_context_t *ct)
190{
191 znode_t *zp = VTOZ(vp);
193 return (0);
194}
195
196/* ARGSUSED */
197static int
198zfs_close(vnode_t *vp, int flag, int count, offset_t offset, cred_t *cr,
199 caller_context_t *ct)
200{
201 znode_t *zp = VTOZ(vp);
202 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
192
203
204 ZFS_ENTER(zfsvfs);
205 ZFS_VERIFY_ZP(zp);
206
193 /* Decrement the synchronous opens in the znode */
194 if ((flag & (FSYNC | FDSYNC)) && (count == 1))
195 atomic_dec_32(&zp->z_sync_cnt);
196
197 /*
198 * Clean up any locks held by this process on the vp.
199 */
200 cleanlocks(vp, ddi_get_pid(), 0);
201 cleanshares(vp, ddi_get_pid());
202
203 if (!zfs_has_ctldir(zp) && zp->z_zfsvfs->z_vscan &&
204 ZTOV(zp)->v_type == VREG &&
205 !(zp->z_phys->zp_flags & ZFS_AV_QUARANTINED) &&
206 zp->z_phys->zp_size > 0)
207 VERIFY(fs_vscan(vp, cr, 1) == 0);
208
207 /* Decrement the synchronous opens in the znode */
208 if ((flag & (FSYNC | FDSYNC)) && (count == 1))
209 atomic_dec_32(&zp->z_sync_cnt);
210
211 /*
212 * Clean up any locks held by this process on the vp.
213 */
214 cleanlocks(vp, ddi_get_pid(), 0);
215 cleanshares(vp, ddi_get_pid());
216
217 if (!zfs_has_ctldir(zp) && zp->z_zfsvfs->z_vscan &&
218 ZTOV(zp)->v_type == VREG &&
219 !(zp->z_phys->zp_flags & ZFS_AV_QUARANTINED) &&
220 zp->z_phys->zp_size > 0)
221 VERIFY(fs_vscan(vp, cr, 1) == 0);
222
223 ZFS_EXIT(zfsvfs);
209 return (0);
210}
211
212/*
213 * Lseek support for finding holes (cmd == _FIO_SEEK_HOLE) and
214 * data (cmd == _FIO_SEEK_DATA). "off" is an in/out parameter.
215 */
216static int

--- 74 unchanged lines hidden (view full) ---

291 return (error);
292 if (ddi_copyout(&off, (void *)data, sizeof (off), flag))
293 return (EFAULT);
294 return (0);
295 }
296 return (ENOTTY);
297}
298
224 return (0);
225}
226
227/*
228 * Lseek support for finding holes (cmd == _FIO_SEEK_HOLE) and
229 * data (cmd == _FIO_SEEK_DATA). "off" is an in/out parameter.
230 */
231static int

--- 74 unchanged lines hidden (view full) ---

306 return (error);
307 if (ddi_copyout(&off, (void *)data, sizeof (off), flag))
308 return (EFAULT);
309 return (0);
310 }
311 return (ENOTTY);
312}
313
314static vm_page_t
315page_lookup(vnode_t *vp, int64_t start, int64_t off, int64_t nbytes)
316{
317 vm_object_t obj;
318 vm_page_t pp;
319
320 obj = vp->v_object;
321 VM_OBJECT_LOCK_ASSERT(obj, MA_OWNED);
322
323 for (;;) {
324 if ((pp = vm_page_lookup(obj, OFF_TO_IDX(start))) != NULL &&
325 vm_page_is_valid(pp, (vm_offset_t)off, nbytes)) {
326 if (vm_page_sleep_if_busy(pp, FALSE, "zfsmwb"))
327 continue;
328 vm_page_busy(pp);
329 vm_page_lock_queues();
330 vm_page_undirty(pp);
331 vm_page_unlock_queues();
332 } else {
333 if (__predict_false(obj->cache != NULL)) {
334 vm_page_cache_free(obj, OFF_TO_IDX(start),
335 OFF_TO_IDX(start) + 1);
336 }
337 pp = NULL;
338 }
339 break;
340 }
341 return (pp);
342}
343
344static void
345page_unlock(vm_page_t pp)
346{
347
348 vm_page_wakeup(pp);
349}
350
351static caddr_t
352zfs_map_page(vm_page_t pp, struct sf_buf **sfp)
353{
354
355 sched_pin();
356 *sfp = sf_buf_alloc(pp, SFB_CPUPRIVATE);
357 return ((caddr_t)sf_buf_kva(*sfp));
358}
359
360static void
361zfs_unmap_page(struct sf_buf *sf)
362{
363
364 sf_buf_free(sf);
365 sched_unpin();
366}
367
368
299/*
300 * When a file is memory mapped, we must keep the IO data synchronized
301 * between the DMU cache and the memory mapped pages. What this means:
302 *
303 * On Write: If we find a memory mapped page, we write to *both*
304 * the page and the dmu buffer.
369/*
370 * When a file is memory mapped, we must keep the IO data synchronized
371 * between the DMU cache and the memory mapped pages. What this means:
372 *
373 * On Write: If we find a memory mapped page, we write to *both*
374 * the page and the dmu buffer.
305 *
306 * NOTE: We will always "break up" the IO into PAGESIZE uiomoves when
307 * the file is memory mapped.
308 */
375 */
309static int
310mappedwrite(vnode_t *vp, int nbytes, uio_t *uio, dmu_tx_t *tx)
376
377static void
378update_pages(vnode_t *vp, int64_t start, int len, objset_t *os, uint64_t oid,
379 int segflg, dmu_tx_t *tx)
311{
380{
312 znode_t *zp = VTOZ(vp);
313 objset_t *os = zp->z_zfsvfs->z_os;
314 vm_object_t obj;
381 vm_object_t obj;
315 vm_page_t m;
316 struct sf_buf *sf;
382 struct sf_buf *sf;
317 int64_t start, off;
318 int len = nbytes;
319 int error = 0;
320 uint64_t dirbytes;
383 int64_t off;
321
322 ASSERT(vp->v_mount != NULL);
323 obj = vp->v_object;
324 ASSERT(obj != NULL);
325
384
385 ASSERT(vp->v_mount != NULL);
386 obj = vp->v_object;
387 ASSERT(obj != NULL);
388
326 start = uio->uio_loffset;
327 off = start & PAGEOFFSET;
389 off = start & PAGEOFFSET;
328 dirbytes = 0;
329 VM_OBJECT_LOCK(obj);
330 for (start &= PAGEMASK; len > 0; start += PAGESIZE) {
390 VM_OBJECT_LOCK(obj);
391 for (start &= PAGEMASK; len > 0; start += PAGESIZE) {
331 uint64_t bytes = MIN(PAGESIZE - off, len);
332 uint64_t fsize;
392 vm_page_t pp;
393 uint64_t nbytes = MIN(PAGESIZE - off, len);
333
394
334again:
335 if ((m = vm_page_lookup(obj, OFF_TO_IDX(start))) != NULL &&
336 vm_page_is_valid(m, (vm_offset_t)off, bytes)) {
337 uint64_t woff;
395 if ((pp = page_lookup(vp, start, off, nbytes)) != NULL) {
338 caddr_t va;
339
396 caddr_t va;
397
340 if (vm_page_sleep_if_busy(m, FALSE, "zfsmwb"))
341 goto again;
342 fsize = obj->un_pager.vnp.vnp_size;
343 vm_page_busy(m);
344 vm_page_lock_queues();
345 vm_page_undirty(m);
346 vm_page_unlock_queues();
347 VM_OBJECT_UNLOCK(obj);
398 VM_OBJECT_UNLOCK(obj);
348 if (dirbytes > 0) {
349 error = dmu_write_uio(os, zp->z_id, uio,
350 dirbytes, tx);
351 dirbytes = 0;
399 va = zfs_map_page(pp, &sf);
400 if (segflg == UIO_NOCOPY) {
401 (void) dmu_write(os, oid, start+off, nbytes,
402 va+off, tx);
403 } else {
404 (void) dmu_read(os, oid, start+off, nbytes,
405 va+off, DMU_READ_PREFETCH);;
352 }
406 }
353 if (error == 0) {
354 sched_pin();
355 sf = sf_buf_alloc(m, SFB_CPUPRIVATE);
356 va = (caddr_t)sf_buf_kva(sf);
357 woff = uio->uio_loffset - off;
358 error = uiomove(va + off, bytes, UIO_WRITE, uio);
359 /*
360 * The uiomove() above could have been partially
361 * successful, that's why we call dmu_write()
362 * below unconditionally. The page was marked
363 * non-dirty above and we would lose the changes
364 * without doing so. If the uiomove() failed
365 * entirely, well, we just write what we got
366 * before one more time.
367 */
368 dmu_write(os, zp->z_id, woff,
369 MIN(PAGESIZE, fsize - woff), va, tx);
370 sf_buf_free(sf);
371 sched_unpin();
372 }
407 zfs_unmap_page(sf);
373 VM_OBJECT_LOCK(obj);
408 VM_OBJECT_LOCK(obj);
374 vm_page_wakeup(m);
375 } else {
376 if (__predict_false(obj->cache != NULL)) {
377 vm_page_cache_free(obj, OFF_TO_IDX(start),
378 OFF_TO_IDX(start) + 1);
379 }
380 dirbytes += bytes;
409 page_unlock(pp);
410
381 }
411 }
382 len -= bytes;
412 len -= nbytes;
383 off = 0;
413 off = 0;
384 if (error)
385 break;
386 }
387 VM_OBJECT_UNLOCK(obj);
414 }
415 VM_OBJECT_UNLOCK(obj);
388 if (error == 0 && dirbytes > 0)
389 error = dmu_write_uio(os, zp->z_id, uio, dirbytes, tx);
390 return (error);
391}
392
393/*
394 * When a file is memory mapped, we must keep the IO data synchronized
395 * between the DMU cache and the memory mapped pages. What this means:
396 *
397 * On Read: We "read" preferentially from memory mapped pages,
398 * else we default from the dmu buffer.

--- 65 unchanged lines hidden (view full) ---

464 dirbytes);
465 dirbytes = 0;
466 }
467 if (error == 0) {
468 sched_pin();
469 sf = sf_buf_alloc(m, SFB_CPUPRIVATE);
470 va = (caddr_t)sf_buf_kva(sf);
471 error = dmu_read(os, zp->z_id, start + off,
416}
417
418/*
419 * When a file is memory mapped, we must keep the IO data synchronized
420 * between the DMU cache and the memory mapped pages. What this means:
421 *
422 * On Read: We "read" preferentially from memory mapped pages,
423 * else we default from the dmu buffer.

--- 65 unchanged lines hidden (view full) ---

489 dirbytes);
490 dirbytes = 0;
491 }
492 if (error == 0) {
493 sched_pin();
494 sf = sf_buf_alloc(m, SFB_CPUPRIVATE);
495 va = (caddr_t)sf_buf_kva(sf);
496 error = dmu_read(os, zp->z_id, start + off,
472 bytes, (void *)(va + off));
497 bytes, (void *)(va + off),
498 DMU_READ_PREFETCH);
473 sf_buf_free(sf);
474 sched_unpin();
475 }
476 VM_OBJECT_LOCK(obj);
477 vm_page_wakeup(m);
478 if (error == 0)
479 uio->uio_resid -= bytes;
480 } else {

--- 204 unchanged lines hidden (view full) ---

685 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
686 zilog_t *zilog;
687 offset_t woff;
688 ssize_t n, nbytes;
689 rl_t *rl;
690 int max_blksz = zfsvfs->z_max_blksz;
691 uint64_t pflags;
692 int error;
499 sf_buf_free(sf);
500 sched_unpin();
501 }
502 VM_OBJECT_LOCK(obj);
503 vm_page_wakeup(m);
504 if (error == 0)
505 uio->uio_resid -= bytes;
506 } else {

--- 204 unchanged lines hidden (view full) ---

711 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
712 zilog_t *zilog;
713 offset_t woff;
714 ssize_t n, nbytes;
715 rl_t *rl;
716 int max_blksz = zfsvfs->z_max_blksz;
717 uint64_t pflags;
718 int error;
719 arc_buf_t *abuf;
693
694 /*
695 * Fasttrack empty write
696 */
697 n = start_resid;
698 if (n == 0)
699 return (0);
700

--- 80 unchanged lines hidden (view full) ---

781 end_size = MAX(zp->z_phys->zp_size, woff + n);
782
783 /*
784 * Write the file in reasonable size chunks. Each chunk is written
785 * in a separate transaction; this keeps the intent log records small
786 * and allows us to do more fine-grained space accounting.
787 */
788 while (n > 0) {
720
721 /*
722 * Fasttrack empty write
723 */
724 n = start_resid;
725 if (n == 0)
726 return (0);
727

--- 80 unchanged lines hidden (view full) ---

808 end_size = MAX(zp->z_phys->zp_size, woff + n);
809
810 /*
811 * Write the file in reasonable size chunks. Each chunk is written
812 * in a separate transaction; this keeps the intent log records small
813 * and allows us to do more fine-grained space accounting.
814 */
815 while (n > 0) {
816 abuf = NULL;
817 woff = uio->uio_loffset;
818
819again:
820 if (zfs_usergroup_overquota(zfsvfs,
821 B_FALSE, zp->z_phys->zp_uid) ||
822 zfs_usergroup_overquota(zfsvfs,
823 B_TRUE, zp->z_phys->zp_gid)) {
824 if (abuf != NULL)
825 dmu_return_arcbuf(abuf);
826 error = EDQUOT;
827 break;
828 }
829
789 /*
830 /*
831 * If dmu_assign_arcbuf() is expected to execute with minimum
832 * overhead loan an arc buffer and copy user data to it before
833 * we enter a txg. This avoids holding a txg forever while we
834 * pagefault on a hanging NFS server mapping.
835 */
836 if (abuf == NULL && n >= max_blksz &&
837 woff >= zp->z_phys->zp_size &&
838 P2PHASE(woff, max_blksz) == 0 &&
839 zp->z_blksz == max_blksz) {
840 size_t cbytes;
841
842 abuf = dmu_request_arcbuf(zp->z_dbuf, max_blksz);
843 ASSERT(abuf != NULL);
844 ASSERT(arc_buf_size(abuf) == max_blksz);
845 if (error = uiocopy(abuf->b_data, max_blksz,
846 UIO_WRITE, uio, &cbytes)) {
847 dmu_return_arcbuf(abuf);
848 break;
849 }
850 ASSERT(cbytes == max_blksz);
851 }
852
853 /*
790 * Start a transaction.
791 */
854 * Start a transaction.
855 */
792 woff = uio->uio_loffset;
793 tx = dmu_tx_create(zfsvfs->z_os);
794 dmu_tx_hold_bonus(tx, zp->z_id);
795 dmu_tx_hold_write(tx, zp->z_id, woff, MIN(n, max_blksz));
856 tx = dmu_tx_create(zfsvfs->z_os);
857 dmu_tx_hold_bonus(tx, zp->z_id);
858 dmu_tx_hold_write(tx, zp->z_id, woff, MIN(n, max_blksz));
796 error = dmu_tx_assign(tx, zfsvfs->z_assign);
859 error = dmu_tx_assign(tx, TXG_NOWAIT);
797 if (error) {
860 if (error) {
798 if (error == ERESTART &&
799 zfsvfs->z_assign == TXG_NOWAIT) {
861 if (error == ERESTART) {
800 dmu_tx_wait(tx);
801 dmu_tx_abort(tx);
862 dmu_tx_wait(tx);
863 dmu_tx_abort(tx);
802 continue;
864 goto again;
803 }
804 dmu_tx_abort(tx);
865 }
866 dmu_tx_abort(tx);
867 if (abuf != NULL)
868 dmu_return_arcbuf(abuf);
805 break;
806 }
807
808 /*
809 * If zfs_range_lock() over-locked we grow the blocksize
810 * and then reduce the lock range. This will only happen
811 * on the first iteration since zfs_range_reduce() will
812 * shrink down r_len to the appropriate size.

--- 15 unchanged lines hidden (view full) ---

828 * XXX - should we really limit each write to z_max_blksz?
829 * Perhaps we should use SPA_MAXBLOCKSIZE chunks?
830 */
831 nbytes = MIN(n, max_blksz - P2PHASE(woff, max_blksz));
832
833 if (woff + nbytes > zp->z_phys->zp_size)
834 vnode_pager_setsize(vp, woff + nbytes);
835
869 break;
870 }
871
872 /*
873 * If zfs_range_lock() over-locked we grow the blocksize
874 * and then reduce the lock range. This will only happen
875 * on the first iteration since zfs_range_reduce() will
876 * shrink down r_len to the appropriate size.

--- 15 unchanged lines hidden (view full) ---

892 * XXX - should we really limit each write to z_max_blksz?
893 * Perhaps we should use SPA_MAXBLOCKSIZE chunks?
894 */
895 nbytes = MIN(n, max_blksz - P2PHASE(woff, max_blksz));
896
897 if (woff + nbytes > zp->z_phys->zp_size)
898 vnode_pager_setsize(vp, woff + nbytes);
899
836 rw_enter(&zp->z_map_lock, RW_READER);
837
838 tx_bytes = uio->uio_resid;
839 if (vn_has_cached_data(vp)) {
840 rw_exit(&zp->z_map_lock);
841 error = mappedwrite(vp, nbytes, uio, tx);
900 if (abuf == NULL) {
901 tx_bytes = uio->uio_resid;
902 error = dmu_write_uio(zfsvfs->z_os, zp->z_id, uio,
903 nbytes, tx);
904 tx_bytes -= uio->uio_resid;
842 } else {
905 } else {
843 error = dmu_write_uio(zfsvfs->z_os, zp->z_id,
844 uio, nbytes, tx);
845 rw_exit(&zp->z_map_lock);
906 tx_bytes = nbytes;
907 ASSERT(tx_bytes == max_blksz);
908 dmu_assign_arcbuf(zp->z_dbuf, woff, abuf, tx);
909 ASSERT(tx_bytes <= uio->uio_resid);
910 uioskip(uio, tx_bytes);
846 }
911 }
847 tx_bytes -= uio->uio_resid;
848
849 /*
912
913 /*
914 * XXXPJD: There are some cases (triggered by fsx) where
915 * vn_has_cached_data(vp) returns false when it should
916 * return true. This should be investigated.
917 */
918#if 0
919 if (tx_bytes && vn_has_cached_data(vp))
920#else
921 if (tx_bytes && vp->v_object != NULL)
922#endif
923 {
924 update_pages(vp, woff, tx_bytes, zfsvfs->z_os,
925 zp->z_id, uio->uio_segflg, tx);
926 }
927
928 /*
850 * If we made no progress, we're done. If we made even
851 * partial progress, update the znode and ZIL accordingly.
852 */
853 if (tx_bytes == 0) {
854 dmu_tx_commit(tx);
855 ASSERT(error != 0);
856 break;
857 }

--- 43 unchanged lines hidden (view full) ---

901 }
902
903 zfs_range_unlock(rl);
904
905 /*
906 * If we're in replay mode, or we made no progress, return error.
907 * Otherwise, it's at least a partial write, so it's successful.
908 */
929 * If we made no progress, we're done. If we made even
930 * partial progress, update the znode and ZIL accordingly.
931 */
932 if (tx_bytes == 0) {
933 dmu_tx_commit(tx);
934 ASSERT(error != 0);
935 break;
936 }

--- 43 unchanged lines hidden (view full) ---

980 }
981
982 zfs_range_unlock(rl);
983
984 /*
985 * If we're in replay mode, or we made no progress, return error.
986 * Otherwise, it's at least a partial write, so it's successful.
987 */
909 if (zfsvfs->z_assign >= TXG_INITIAL || uio->uio_resid == start_resid) {
988 if (zfsvfs->z_replay || uio->uio_resid == start_resid) {
910 ZFS_EXIT(zfsvfs);
911 return (error);
912 }
913
914 if (ioflag & (FSYNC | FDSYNC))
915 zil_commit(zilog, zp->z_last_itx, zp->z_id);
916
917 ZFS_EXIT(zfsvfs);

--- 65 unchanged lines hidden (view full) ---

983 */
984 if (buf != NULL) { /* immediate write */
985 rl = zfs_range_lock(zp, off, dlen, RL_READER);
986 /* test for truncation needs to be done while range locked */
987 if (off >= zp->z_phys->zp_size) {
988 error = ENOENT;
989 goto out;
990 }
989 ZFS_EXIT(zfsvfs);
990 return (error);
991 }
992
993 if (ioflag & (FSYNC | FDSYNC))
994 zil_commit(zilog, zp->z_last_itx, zp->z_id);
995
996 ZFS_EXIT(zfsvfs);

--- 65 unchanged lines hidden (view full) ---

1062 */
1063 if (buf != NULL) { /* immediate write */
1064 rl = zfs_range_lock(zp, off, dlen, RL_READER);
1065 /* test for truncation needs to be done while range locked */
1066 if (off >= zp->z_phys->zp_size) {
1067 error = ENOENT;
1068 goto out;
1069 }
991 VERIFY(0 == dmu_read(os, lr->lr_foid, off, dlen, buf));
1070 VERIFY(0 == dmu_read(os, lr->lr_foid, off, dlen, buf,
1071 DMU_READ_NO_PREFETCH));
992 } else { /* indirect write */
993 uint64_t boff; /* block starting offset */
994
995 /*
996 * Have to lock the whole block to ensure when it's
997 * written out and it's checksum is being calculated
998 * that no one can change the data. We need to re-check
999 * blocksize after we get the lock in case it's changed!

--- 22 unchanged lines hidden (view full) ---

1022 zgd->zgd_bp = &lr->lr_blkptr;
1023 VERIFY(0 == dmu_buf_hold(os, lr->lr_foid, boff, zgd, &db));
1024 ASSERT(boff == db->db_offset);
1025 lr->lr_blkoff = off - boff;
1026 error = dmu_sync(zio, db, &lr->lr_blkptr,
1027 lr->lr_common.lrc_txg, zfs_get_done, zgd);
1028 ASSERT((error && error != EINPROGRESS) ||
1029 lr->lr_length <= zp->z_blksz);
1072 } else { /* indirect write */
1073 uint64_t boff; /* block starting offset */
1074
1075 /*
1076 * Have to lock the whole block to ensure when it's
1077 * written out and it's checksum is being calculated
1078 * that no one can change the data. We need to re-check
1079 * blocksize after we get the lock in case it's changed!

--- 22 unchanged lines hidden (view full) ---

1102 zgd->zgd_bp = &lr->lr_blkptr;
1103 VERIFY(0 == dmu_buf_hold(os, lr->lr_foid, boff, zgd, &db));
1104 ASSERT(boff == db->db_offset);
1105 lr->lr_blkoff = off - boff;
1106 error = dmu_sync(zio, db, &lr->lr_blkptr,
1107 lr->lr_common.lrc_txg, zfs_get_done, zgd);
1108 ASSERT((error && error != EINPROGRESS) ||
1109 lr->lr_length <= zp->z_blksz);
1030 if (error == 0)
1110 if (error == 0) {
1111 /*
1112 * dmu_sync() can compress a block of zeros to a null
1113 * blkptr but the block size still needs to be passed
1114 * through to replay.
1115 */
1116 BP_SET_LSIZE(&lr->lr_blkptr, db->db_size);
1031 zil_add_block(zfsvfs->z_log, &lr->lr_blkptr);
1117 zil_add_block(zfsvfs->z_log, &lr->lr_blkptr);
1118 }
1119
1032 /*
1033 * If we get EINPROGRESS, then we need to wait for a
1034 * write IO initiated by dmu_sync() to complete before
1035 * we can release this dbuf. We will finish everything
1036 * up in the zfs_get_done() callback.
1037 */
1120 /*
1121 * If we get EINPROGRESS, then we need to wait for a
1122 * write IO initiated by dmu_sync() to complete before
1123 * we can release this dbuf. We will finish everything
1124 * up in the zfs_get_done() callback.
1125 */
1038 if (error == EINPROGRESS)
1126 if (error == EINPROGRESS) {
1039 return (0);
1127 return (0);
1128 } else if (error == EALREADY) {
1129 lr->lr_common.lrc_txtype = TX_WRITE2;
1130 error = 0;
1131 }
1040 dmu_buf_rele(db, zgd);
1041 kmem_free(zgd, sizeof (zgd_t));
1042 }
1043out:
1044 zfs_range_unlock(rl);
1045 /*
1046 * Release the vnode asynchronously as we currently have the
1047 * txg stopped from syncing.

--- 226 unchanged lines hidden (view full) ---

1274{
1275 znode_t *zp, *dzp = VTOZ(dvp);
1276 zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
1277 zilog_t *zilog;
1278 objset_t *os;
1279 zfs_dirlock_t *dl;
1280 dmu_tx_t *tx;
1281 int error;
1132 dmu_buf_rele(db, zgd);
1133 kmem_free(zgd, sizeof (zgd_t));
1134 }
1135out:
1136 zfs_range_unlock(rl);
1137 /*
1138 * Release the vnode asynchronously as we currently have the
1139 * txg stopped from syncing.

--- 226 unchanged lines hidden (view full) ---

1366{
1367 znode_t *zp, *dzp = VTOZ(dvp);
1368 zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
1369 zilog_t *zilog;
1370 objset_t *os;
1371 zfs_dirlock_t *dl;
1372 dmu_tx_t *tx;
1373 int error;
1282 zfs_acl_t *aclp = NULL;
1283 zfs_fuid_info_t *fuidp = NULL;
1374 ksid_t *ksid;
1375 uid_t uid;
1376 gid_t gid = crgetgid(cr);
1377 zfs_acl_ids_t acl_ids;
1378 boolean_t fuid_dirtied;
1284 void *vsecp = NULL;
1285 int flag = 0;
1286
1287 /*
1288 * If we have an ephemeral id, ACL, or XVATTR then
1289 * make sure file system is at proper version
1290 */
1291
1379 void *vsecp = NULL;
1380 int flag = 0;
1381
1382 /*
1383 * If we have an ephemeral id, ACL, or XVATTR then
1384 * make sure file system is at proper version
1385 */
1386
1387 ksid = crgetsid(cr, KSID_OWNER);
1388 if (ksid)
1389 uid = ksid_getid(ksid);
1390 else
1391 uid = crgetuid(cr);
1292 if (zfsvfs->z_use_fuids == B_FALSE &&
1293 (vsecp || (vap->va_mask & AT_XVATTR) ||
1294 IS_EPHEMERAL(crgetuid(cr)) || IS_EPHEMERAL(crgetgid(cr))))
1295 return (EINVAL);
1296
1297 ZFS_ENTER(zfsvfs);
1298 ZFS_VERIFY_ZP(dzp);
1299 os = zfsvfs->z_os;

--- 34 unchanged lines hidden (view full) ---

1334 zflg |= ZCILOOK;
1335
1336 error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
1337 NULL, NULL);
1338 if (error) {
1339 if (strcmp(name, "..") == 0)
1340 error = EISDIR;
1341 ZFS_EXIT(zfsvfs);
1392 if (zfsvfs->z_use_fuids == B_FALSE &&
1393 (vsecp || (vap->va_mask & AT_XVATTR) ||
1394 IS_EPHEMERAL(crgetuid(cr)) || IS_EPHEMERAL(crgetgid(cr))))
1395 return (EINVAL);
1396
1397 ZFS_ENTER(zfsvfs);
1398 ZFS_VERIFY_ZP(dzp);
1399 os = zfsvfs->z_os;

--- 34 unchanged lines hidden (view full) ---

1434 zflg |= ZCILOOK;
1435
1436 error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
1437 NULL, NULL);
1438 if (error) {
1439 if (strcmp(name, "..") == 0)
1440 error = EISDIR;
1441 ZFS_EXIT(zfsvfs);
1342 if (aclp)
1343 zfs_acl_free(aclp);
1344 return (error);
1345 }
1346 }
1442 return (error);
1443 }
1444 }
1347 if (vsecp && aclp == NULL) {
1348 error = zfs_vsec_2_aclp(zfsvfs, vap->va_type, vsecp, &aclp);
1349 if (error) {
1350 ZFS_EXIT(zfsvfs);
1351 if (dl)
1352 zfs_dirent_unlock(dl);
1353 return (error);
1354 }
1355 }
1356
1357 if (zp == NULL) {
1358 uint64_t txtype;
1359
1360 /*
1361 * Create a new file object and update the directory
1362 * to reference it.
1363 */
1364 if (error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr)) {

--- 5 unchanged lines hidden (view full) ---

1370 * extended attribute directories.
1371 */
1372 if ((dzp->z_phys->zp_flags & ZFS_XATTR) &&
1373 (vap->va_type != VREG)) {
1374 error = EINVAL;
1375 goto out;
1376 }
1377
1445 if (zp == NULL) {
1446 uint64_t txtype;
1447
1448 /*
1449 * Create a new file object and update the directory
1450 * to reference it.
1451 */
1452 if (error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr)) {

--- 5 unchanged lines hidden (view full) ---

1458 * extended attribute directories.
1459 */
1460 if ((dzp->z_phys->zp_flags & ZFS_XATTR) &&
1461 (vap->va_type != VREG)) {
1462 error = EINVAL;
1463 goto out;
1464 }
1465
1466
1467 if ((error = zfs_acl_ids_create(dzp, 0, vap, cr, vsecp,
1468 &acl_ids)) != 0)
1469 goto out;
1470 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) {
1471 error = EDQUOT;
1472 goto out;
1473 }
1474
1378 tx = dmu_tx_create(os);
1379 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
1475 tx = dmu_tx_create(os);
1476 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
1380 if ((aclp && aclp->z_has_fuids) || IS_EPHEMERAL(crgetuid(cr)) ||
1381 IS_EPHEMERAL(crgetgid(cr))) {
1382 if (zfsvfs->z_fuid_obj == 0) {
1383 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
1384 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
1385 FUID_SIZE_ESTIMATE(zfsvfs));
1386 dmu_tx_hold_zap(tx, MASTER_NODE_OBJ,
1387 FALSE, NULL);
1388 } else {
1389 dmu_tx_hold_bonus(tx, zfsvfs->z_fuid_obj);
1390 dmu_tx_hold_write(tx, zfsvfs->z_fuid_obj, 0,
1391 FUID_SIZE_ESTIMATE(zfsvfs));
1392 }
1393 }
1477 fuid_dirtied = zfsvfs->z_fuid_dirty;
1478 if (fuid_dirtied)
1479 zfs_fuid_txhold(zfsvfs, tx);
1394 dmu_tx_hold_bonus(tx, dzp->z_id);
1395 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
1480 dmu_tx_hold_bonus(tx, dzp->z_id);
1481 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
1396 if ((dzp->z_phys->zp_flags & ZFS_INHERIT_ACE) || aclp) {
1482 if (acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
1397 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
1398 0, SPA_MAXBLOCKSIZE);
1399 }
1483 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
1484 0, SPA_MAXBLOCKSIZE);
1485 }
1400 error = dmu_tx_assign(tx, zfsvfs->z_assign);
1486 error = dmu_tx_assign(tx, TXG_NOWAIT);
1401 if (error) {
1487 if (error) {
1488 zfs_acl_ids_free(&acl_ids);
1402 zfs_dirent_unlock(dl);
1489 zfs_dirent_unlock(dl);
1403 if (error == ERESTART &&
1404 zfsvfs->z_assign == TXG_NOWAIT) {
1490 if (error == ERESTART) {
1405 dmu_tx_wait(tx);
1406 dmu_tx_abort(tx);
1407 goto top;
1408 }
1409 dmu_tx_abort(tx);
1410 ZFS_EXIT(zfsvfs);
1491 dmu_tx_wait(tx);
1492 dmu_tx_abort(tx);
1493 goto top;
1494 }
1495 dmu_tx_abort(tx);
1496 ZFS_EXIT(zfsvfs);
1411 if (aclp)
1412 zfs_acl_free(aclp);
1413 return (error);
1414 }
1497 return (error);
1498 }
1415 zfs_mknode(dzp, vap, tx, cr, 0, &zp, 0, aclp, &fuidp);
1499 zfs_mknode(dzp, vap, tx, cr, 0, &zp, 0, &acl_ids);
1500
1501 if (fuid_dirtied)
1502 zfs_fuid_sync(zfsvfs, tx);
1503
1416 (void) zfs_link_create(dl, zp, tx, ZNEW);
1504 (void) zfs_link_create(dl, zp, tx, ZNEW);
1505
1417 txtype = zfs_log_create_txtype(Z_FILE, vsecp, vap);
1418 if (flag & FIGNORECASE)
1419 txtype |= TX_CI;
1420 zfs_log_create(zilog, tx, txtype, dzp, zp, name,
1506 txtype = zfs_log_create_txtype(Z_FILE, vsecp, vap);
1507 if (flag & FIGNORECASE)
1508 txtype |= TX_CI;
1509 zfs_log_create(zilog, tx, txtype, dzp, zp, name,
1421 vsecp, fuidp, vap);
1422 if (fuidp)
1423 zfs_fuid_info_free(fuidp);
1510 vsecp, acl_ids.z_fuidp, vap);
1511 zfs_acl_ids_free(&acl_ids);
1424 dmu_tx_commit(tx);
1425 } else {
1426 int aflags = (flag & FAPPEND) ? V_APPEND : 0;
1427
1428 /*
1429 * A directory entry already exists for this name.
1430 */
1431 /*

--- 53 unchanged lines hidden (view full) ---

1485 svp = specvp(*vpp, (*vpp)->v_rdev, (*vpp)->v_type, cr);
1486 VN_RELE(*vpp);
1487 if (svp == NULL) {
1488 error = ENOSYS;
1489 }
1490 *vpp = svp;
1491 }
1492 }
1512 dmu_tx_commit(tx);
1513 } else {
1514 int aflags = (flag & FAPPEND) ? V_APPEND : 0;
1515
1516 /*
1517 * A directory entry already exists for this name.
1518 */
1519 /*

--- 53 unchanged lines hidden (view full) ---

1573 svp = specvp(*vpp, (*vpp)->v_rdev, (*vpp)->v_type, cr);
1574 VN_RELE(*vpp);
1575 if (svp == NULL) {
1576 error = ENOSYS;
1577 }
1578 *vpp = svp;
1579 }
1580 }
1493 if (aclp)
1494 zfs_acl_free(aclp);
1495
1496 ZFS_EXIT(zfsvfs);
1497 return (error);
1498}
1499
1500/*
1501 * Remove an entry from a directory.
1502 *

--- 102 unchanged lines hidden (view full) ---

1605 /* are there any additional acls */
1606 if ((acl_obj = zp->z_phys->zp_acl.z_acl_extern_obj) != 0 &&
1607 may_delete_now)
1608 dmu_tx_hold_free(tx, acl_obj, 0, DMU_OBJECT_END);
1609
1610 /* charge as an update -- would be nice not to charge at all */
1611 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
1612
1581
1582 ZFS_EXIT(zfsvfs);
1583 return (error);
1584}
1585
1586/*
1587 * Remove an entry from a directory.
1588 *

--- 102 unchanged lines hidden (view full) ---

1691 /* are there any additional acls */
1692 if ((acl_obj = zp->z_phys->zp_acl.z_acl_extern_obj) != 0 &&
1693 may_delete_now)
1694 dmu_tx_hold_free(tx, acl_obj, 0, DMU_OBJECT_END);
1695
1696 /* charge as an update -- would be nice not to charge at all */
1697 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
1698
1613 error = dmu_tx_assign(tx, zfsvfs->z_assign);
1699 error = dmu_tx_assign(tx, TXG_NOWAIT);
1614 if (error) {
1615 zfs_dirent_unlock(dl);
1616 VN_RELE(vp);
1700 if (error) {
1701 zfs_dirent_unlock(dl);
1702 VN_RELE(vp);
1617 if (error == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) {
1703 if (error == ERESTART) {
1618 dmu_tx_wait(tx);
1619 dmu_tx_abort(tx);
1620 goto top;
1621 }
1622 if (realnmp)
1623 pn_free(realnmp);
1624 dmu_tx_abort(tx);
1625 ZFS_EXIT(zfsvfs);

--- 93 unchanged lines hidden (view full) ---

1719{
1720 znode_t *zp, *dzp = VTOZ(dvp);
1721 zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
1722 zilog_t *zilog;
1723 zfs_dirlock_t *dl;
1724 uint64_t txtype;
1725 dmu_tx_t *tx;
1726 int error;
1704 dmu_tx_wait(tx);
1705 dmu_tx_abort(tx);
1706 goto top;
1707 }
1708 if (realnmp)
1709 pn_free(realnmp);
1710 dmu_tx_abort(tx);
1711 ZFS_EXIT(zfsvfs);

--- 93 unchanged lines hidden (view full) ---

1805{
1806 znode_t *zp, *dzp = VTOZ(dvp);
1807 zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
1808 zilog_t *zilog;
1809 zfs_dirlock_t *dl;
1810 uint64_t txtype;
1811 dmu_tx_t *tx;
1812 int error;
1727 zfs_acl_t *aclp = NULL;
1728 zfs_fuid_info_t *fuidp = NULL;
1729 int zf = ZNEW;
1813 int zf = ZNEW;
1814 ksid_t *ksid;
1815 uid_t uid;
1816 gid_t gid = crgetgid(cr);
1817 zfs_acl_ids_t acl_ids;
1818 boolean_t fuid_dirtied;
1730
1731 ASSERT(vap->va_type == VDIR);
1732
1733 /*
1734 * If we have an ephemeral id, ACL, or XVATTR then
1735 * make sure file system is at proper version
1736 */
1737
1819
1820 ASSERT(vap->va_type == VDIR);
1821
1822 /*
1823 * If we have an ephemeral id, ACL, or XVATTR then
1824 * make sure file system is at proper version
1825 */
1826
1827 ksid = crgetsid(cr, KSID_OWNER);
1828 if (ksid)
1829 uid = ksid_getid(ksid);
1830 else
1831 uid = crgetuid(cr);
1738 if (zfsvfs->z_use_fuids == B_FALSE &&
1739 (vsecp || (vap->va_mask & AT_XVATTR) || IS_EPHEMERAL(crgetuid(cr))||
1740 IS_EPHEMERAL(crgetgid(cr))))
1741 return (EINVAL);
1742
1743 ZFS_ENTER(zfsvfs);
1744 ZFS_VERIFY_ZP(dzp);
1745 zilog = zfsvfs->z_log;

--- 31 unchanged lines hidden (view full) ---

1777 }
1778
1779 if (error = zfs_zaccess(dzp, ACE_ADD_SUBDIRECTORY, 0, B_FALSE, cr)) {
1780 zfs_dirent_unlock(dl);
1781 ZFS_EXIT(zfsvfs);
1782 return (error);
1783 }
1784
1832 if (zfsvfs->z_use_fuids == B_FALSE &&
1833 (vsecp || (vap->va_mask & AT_XVATTR) || IS_EPHEMERAL(crgetuid(cr))||
1834 IS_EPHEMERAL(crgetgid(cr))))
1835 return (EINVAL);
1836
1837 ZFS_ENTER(zfsvfs);
1838 ZFS_VERIFY_ZP(dzp);
1839 zilog = zfsvfs->z_log;

--- 31 unchanged lines hidden (view full) ---

1871 }
1872
1873 if (error = zfs_zaccess(dzp, ACE_ADD_SUBDIRECTORY, 0, B_FALSE, cr)) {
1874 zfs_dirent_unlock(dl);
1875 ZFS_EXIT(zfsvfs);
1876 return (error);
1877 }
1878
1785 if (vsecp && aclp == NULL) {
1786 error = zfs_vsec_2_aclp(zfsvfs, vap->va_type, vsecp, &aclp);
1787 if (error) {
1788 zfs_dirent_unlock(dl);
1789 ZFS_EXIT(zfsvfs);
1790 return (error);
1791 }
1879 if ((error = zfs_acl_ids_create(dzp, 0, vap, cr, vsecp,
1880 &acl_ids)) != 0) {
1881 zfs_dirent_unlock(dl);
1882 ZFS_EXIT(zfsvfs);
1883 return (error);
1792 }
1884 }
1885 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) {
1886 zfs_dirent_unlock(dl);
1887 ZFS_EXIT(zfsvfs);
1888 return (EDQUOT);
1889 }
1890
1793 /*
1794 * Add a new entry to the directory.
1795 */
1796 tx = dmu_tx_create(zfsvfs->z_os);
1797 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, dirname);
1798 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
1891 /*
1892 * Add a new entry to the directory.
1893 */
1894 tx = dmu_tx_create(zfsvfs->z_os);
1895 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, dirname);
1896 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
1799 if ((aclp && aclp->z_has_fuids) || IS_EPHEMERAL(crgetuid(cr)) ||
1800 IS_EPHEMERAL(crgetgid(cr))) {
1801 if (zfsvfs->z_fuid_obj == 0) {
1802 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
1803 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
1804 FUID_SIZE_ESTIMATE(zfsvfs));
1805 dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, FALSE, NULL);
1806 } else {
1807 dmu_tx_hold_bonus(tx, zfsvfs->z_fuid_obj);
1808 dmu_tx_hold_write(tx, zfsvfs->z_fuid_obj, 0,
1809 FUID_SIZE_ESTIMATE(zfsvfs));
1810 }
1811 }
1812 if ((dzp->z_phys->zp_flags & ZFS_INHERIT_ACE) || aclp)
1897 fuid_dirtied = zfsvfs->z_fuid_dirty;
1898 if (fuid_dirtied)
1899 zfs_fuid_txhold(zfsvfs, tx);
1900 if (acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE)
1813 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
1814 0, SPA_MAXBLOCKSIZE);
1901 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
1902 0, SPA_MAXBLOCKSIZE);
1815 error = dmu_tx_assign(tx, zfsvfs->z_assign);
1903 error = dmu_tx_assign(tx, TXG_NOWAIT);
1816 if (error) {
1904 if (error) {
1905 zfs_acl_ids_free(&acl_ids);
1817 zfs_dirent_unlock(dl);
1906 zfs_dirent_unlock(dl);
1818 if (error == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) {
1907 if (error == ERESTART) {
1819 dmu_tx_wait(tx);
1820 dmu_tx_abort(tx);
1821 goto top;
1822 }
1823 dmu_tx_abort(tx);
1824 ZFS_EXIT(zfsvfs);
1908 dmu_tx_wait(tx);
1909 dmu_tx_abort(tx);
1910 goto top;
1911 }
1912 dmu_tx_abort(tx);
1913 ZFS_EXIT(zfsvfs);
1825 if (aclp)
1826 zfs_acl_free(aclp);
1827 return (error);
1828 }
1829
1830 /*
1831 * Create new node.
1832 */
1914 return (error);
1915 }
1916
1917 /*
1918 * Create new node.
1919 */
1833 zfs_mknode(dzp, vap, tx, cr, 0, &zp, 0, aclp, &fuidp);
1920 zfs_mknode(dzp, vap, tx, cr, 0, &zp, 0, &acl_ids);
1834
1921
1835 if (aclp)
1836 zfs_acl_free(aclp);
1837
1922 if (fuid_dirtied)
1923 zfs_fuid_sync(zfsvfs, tx);
1838 /*
1839 * Now put new name in parent dir.
1840 */
1841 (void) zfs_link_create(dl, zp, tx, ZNEW);
1842
1843 *vpp = ZTOV(zp);
1844
1845 txtype = zfs_log_create_txtype(Z_DIR, vsecp, vap);
1846 if (flags & FIGNORECASE)
1847 txtype |= TX_CI;
1924 /*
1925 * Now put new name in parent dir.
1926 */
1927 (void) zfs_link_create(dl, zp, tx, ZNEW);
1928
1929 *vpp = ZTOV(zp);
1930
1931 txtype = zfs_log_create_txtype(Z_DIR, vsecp, vap);
1932 if (flags & FIGNORECASE)
1933 txtype |= TX_CI;
1848 zfs_log_create(zilog, tx, txtype, dzp, zp, dirname, vsecp, fuidp, vap);
1934 zfs_log_create(zilog, tx, txtype, dzp, zp, dirname, vsecp,
1935 acl_ids.z_fuidp, vap);
1849
1936
1850 if (fuidp)
1851 zfs_fuid_info_free(fuidp);
1937 zfs_acl_ids_free(&acl_ids);
1852 dmu_tx_commit(tx);
1853
1854 zfs_dirent_unlock(dl);
1855
1856 ZFS_EXIT(zfsvfs);
1857 return (0);
1858}
1859

--- 77 unchanged lines hidden (view full) ---

1937 * with the treewalk and directory rename code.
1938 */
1939 rw_enter(&zp->z_parent_lock, RW_WRITER);
1940
1941 tx = dmu_tx_create(zfsvfs->z_os);
1942 dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
1943 dmu_tx_hold_bonus(tx, zp->z_id);
1944 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
1938 dmu_tx_commit(tx);
1939
1940 zfs_dirent_unlock(dl);
1941
1942 ZFS_EXIT(zfsvfs);
1943 return (0);
1944}
1945

--- 77 unchanged lines hidden (view full) ---

2023 * with the treewalk and directory rename code.
2024 */
2025 rw_enter(&zp->z_parent_lock, RW_WRITER);
2026
2027 tx = dmu_tx_create(zfsvfs->z_os);
2028 dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
2029 dmu_tx_hold_bonus(tx, zp->z_id);
2030 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
1945 error = dmu_tx_assign(tx, zfsvfs->z_assign);
2031 error = dmu_tx_assign(tx, TXG_NOWAIT);
1946 if (error) {
1947 rw_exit(&zp->z_parent_lock);
1948 rw_exit(&zp->z_name_lock);
1949 zfs_dirent_unlock(dl);
1950 VN_RELE(vp);
2032 if (error) {
2033 rw_exit(&zp->z_parent_lock);
2034 rw_exit(&zp->z_name_lock);
2035 zfs_dirent_unlock(dl);
2036 VN_RELE(vp);
1951 if (error == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) {
2037 if (error == ERESTART) {
1952 dmu_tx_wait(tx);
1953 dmu_tx_abort(tx);
1954 goto top;
1955 }
1956 dmu_tx_abort(tx);
1957 ZFS_EXIT(zfsvfs);
1958 return (error);
1959 }

--- 569 unchanged lines hidden (view full) ---

2529 caller_context_t *ct)
2530{
2531 znode_t *zp = VTOZ(vp);
2532 znode_phys_t *pzp;
2533 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
2534 zilog_t *zilog;
2535 dmu_tx_t *tx;
2536 vattr_t oldva;
2038 dmu_tx_wait(tx);
2039 dmu_tx_abort(tx);
2040 goto top;
2041 }
2042 dmu_tx_abort(tx);
2043 ZFS_EXIT(zfsvfs);
2044 return (error);
2045 }

--- 569 unchanged lines hidden (view full) ---

2615 caller_context_t *ct)
2616{
2617 znode_t *zp = VTOZ(vp);
2618 znode_phys_t *pzp;
2619 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
2620 zilog_t *zilog;
2621 dmu_tx_t *tx;
2622 vattr_t oldva;
2623 xvattr_t tmpxvattr;
2537 uint_t mask = vap->va_mask;
2538 uint_t saved_mask;
2539 uint64_t saved_mode;
2540 int trim_mask = 0;
2541 uint64_t new_mode;
2624 uint_t mask = vap->va_mask;
2625 uint_t saved_mask;
2626 uint64_t saved_mode;
2627 int trim_mask = 0;
2628 uint64_t new_mode;
2629 uint64_t new_uid, new_gid;
2542 znode_t *attrzp;
2543 int need_policy = FALSE;
2544 int err;
2545 zfs_fuid_info_t *fuidp = NULL;
2546 xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */
2547 xoptattr_t *xoap;
2548 zfs_acl_t *aclp = NULL;
2549 boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
2630 znode_t *attrzp;
2631 int need_policy = FALSE;
2632 int err;
2633 zfs_fuid_info_t *fuidp = NULL;
2634 xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */
2635 xoptattr_t *xoap;
2636 zfs_acl_t *aclp = NULL;
2637 boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
2638 boolean_t fuid_dirtied = B_FALSE;
2550
2551 if (mask == 0)
2552 return (0);
2553
2554 if (mask & AT_NOSET)
2555 return (EINVAL);
2556
2557 ZFS_ENTER(zfsvfs);

--- 26 unchanged lines hidden (view full) ---

2584 }
2585
2586 /*
2587 * If this is an xvattr_t, then get a pointer to the structure of
2588 * optional attributes. If this is NULL, then we have a vattr_t.
2589 */
2590 xoap = xva_getxoptattr(xvap);
2591
2639
2640 if (mask == 0)
2641 return (0);
2642
2643 if (mask & AT_NOSET)
2644 return (EINVAL);
2645
2646 ZFS_ENTER(zfsvfs);

--- 26 unchanged lines hidden (view full) ---

2673 }
2674
2675 /*
2676 * If this is an xvattr_t, then get a pointer to the structure of
2677 * optional attributes. If this is NULL, then we have a vattr_t.
2678 */
2679 xoap = xva_getxoptattr(xvap);
2680
2681 xva_init(&tmpxvattr);
2682
2592 /*
2593 * Immutable files can only alter immutable bit and atime
2594 */
2595 if ((pzp->zp_flags & ZFS_IMMUTABLE) &&
2596 ((mask & (AT_SIZE|AT_UID|AT_GID|AT_MTIME|AT_MODE)) ||
2597 ((mask & AT_XVATTR) && XVA_ISSET_REQ(xvap, XAT_CREATETIME)))) {
2598 ZFS_EXIT(zfsvfs);
2599 return (EPERM);

--- 106 unchanged lines hidden (view full) ---

2706 need_policy = TRUE;
2707 }
2708 }
2709
2710 mutex_enter(&zp->z_lock);
2711 oldva.va_mode = pzp->zp_mode;
2712 zfs_fuid_map_ids(zp, cr, &oldva.va_uid, &oldva.va_gid);
2713 if (mask & AT_XVATTR) {
2683 /*
2684 * Immutable files can only alter immutable bit and atime
2685 */
2686 if ((pzp->zp_flags & ZFS_IMMUTABLE) &&
2687 ((mask & (AT_SIZE|AT_UID|AT_GID|AT_MTIME|AT_MODE)) ||
2688 ((mask & AT_XVATTR) && XVA_ISSET_REQ(xvap, XAT_CREATETIME)))) {
2689 ZFS_EXIT(zfsvfs);
2690 return (EPERM);

--- 106 unchanged lines hidden (view full) ---

2797 need_policy = TRUE;
2798 }
2799 }
2800
2801 mutex_enter(&zp->z_lock);
2802 oldva.va_mode = pzp->zp_mode;
2803 zfs_fuid_map_ids(zp, cr, &oldva.va_uid, &oldva.va_gid);
2804 if (mask & AT_XVATTR) {
2714 if ((need_policy == FALSE) &&
2715 (XVA_ISSET_REQ(xvap, XAT_APPENDONLY) &&
2716 xoap->xoa_appendonly !=
2717 ((pzp->zp_flags & ZFS_APPENDONLY) != 0)) ||
2718 (XVA_ISSET_REQ(xvap, XAT_NOUNLINK) &&
2719 xoap->xoa_nounlink !=
2720 ((pzp->zp_flags & ZFS_NOUNLINK) != 0)) ||
2721 (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE) &&
2722 xoap->xoa_immutable !=
2723 ((pzp->zp_flags & ZFS_IMMUTABLE) != 0)) ||
2724 (XVA_ISSET_REQ(xvap, XAT_NODUMP) &&
2725 xoap->xoa_nodump !=
2726 ((pzp->zp_flags & ZFS_NODUMP) != 0)) ||
2727 (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED) &&
2728 xoap->xoa_av_modified !=
2729 ((pzp->zp_flags & ZFS_AV_MODIFIED) != 0)) ||
2730 ((XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED) &&
2731 ((vp->v_type != VREG && xoap->xoa_av_quarantined) ||
2732 xoap->xoa_av_quarantined !=
2733 ((pzp->zp_flags & ZFS_AV_QUARANTINED) != 0)))) ||
2734 (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) ||
2735 (XVA_ISSET_REQ(xvap, XAT_OPAQUE))) {
2805 /*
2806 * Update xvattr mask to include only those attributes
2807 * that are actually changing.
2808 *
2809 * the bits will be restored prior to actually setting
2810 * the attributes so the caller thinks they were set.
2811 */
2812 if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
2813 if (xoap->xoa_appendonly !=
2814 ((pzp->zp_flags & ZFS_APPENDONLY) != 0)) {
2815 need_policy = TRUE;
2816 } else {
2817 XVA_CLR_REQ(xvap, XAT_APPENDONLY);
2818 XVA_SET_REQ(&tmpxvattr, XAT_APPENDONLY);
2819 }
2820 }
2821
2822 if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
2823 if (xoap->xoa_nounlink !=
2824 ((pzp->zp_flags & ZFS_NOUNLINK) != 0)) {
2825 need_policy = TRUE;
2826 } else {
2827 XVA_CLR_REQ(xvap, XAT_NOUNLINK);
2828 XVA_SET_REQ(&tmpxvattr, XAT_NOUNLINK);
2829 }
2830 }
2831
2832 if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
2833 if (xoap->xoa_immutable !=
2834 ((pzp->zp_flags & ZFS_IMMUTABLE) != 0)) {
2835 need_policy = TRUE;
2836 } else {
2837 XVA_CLR_REQ(xvap, XAT_IMMUTABLE);
2838 XVA_SET_REQ(&tmpxvattr, XAT_IMMUTABLE);
2839 }
2840 }
2841
2842 if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
2843 if (xoap->xoa_nodump !=
2844 ((pzp->zp_flags & ZFS_NODUMP) != 0)) {
2845 need_policy = TRUE;
2846 } else {
2847 XVA_CLR_REQ(xvap, XAT_NODUMP);
2848 XVA_SET_REQ(&tmpxvattr, XAT_NODUMP);
2849 }
2850 }
2851
2852 if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
2853 if (xoap->xoa_av_modified !=
2854 ((pzp->zp_flags & ZFS_AV_MODIFIED) != 0)) {
2855 need_policy = TRUE;
2856 } else {
2857 XVA_CLR_REQ(xvap, XAT_AV_MODIFIED);
2858 XVA_SET_REQ(&tmpxvattr, XAT_AV_MODIFIED);
2859 }
2860 }
2861
2862 if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
2863 if ((vp->v_type != VREG &&
2864 xoap->xoa_av_quarantined) ||
2865 xoap->xoa_av_quarantined !=
2866 ((pzp->zp_flags & ZFS_AV_QUARANTINED) != 0)) {
2867 need_policy = TRUE;
2868 } else {
2869 XVA_CLR_REQ(xvap, XAT_AV_QUARANTINED);
2870 XVA_SET_REQ(&tmpxvattr, XAT_AV_QUARANTINED);
2871 }
2872 }
2873
2874 if (need_policy == FALSE &&
2875 (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) ||
2876 XVA_ISSET_REQ(xvap, XAT_OPAQUE))) {
2736 need_policy = TRUE;
2737 }
2738 }
2739
2740 mutex_exit(&zp->z_lock);
2741
2742 if (mask & AT_MODE) {
2743 if (zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr) == 0) {

--- 51 unchanged lines hidden (view full) ---

2795 /*
2796 * secpolicy_vnode_setattr, or take ownership may have
2797 * changed va_mask
2798 */
2799 mask = vap->va_mask;
2800
2801 tx = dmu_tx_create(zfsvfs->z_os);
2802 dmu_tx_hold_bonus(tx, zp->z_id);
2877 need_policy = TRUE;
2878 }
2879 }
2880
2881 mutex_exit(&zp->z_lock);
2882
2883 if (mask & AT_MODE) {
2884 if (zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr) == 0) {

--- 51 unchanged lines hidden (view full) ---

2936 /*
2937 * secpolicy_vnode_setattr, or take ownership may have
2938 * changed va_mask
2939 */
2940 mask = vap->va_mask;
2941
2942 tx = dmu_tx_create(zfsvfs->z_os);
2943 dmu_tx_hold_bonus(tx, zp->z_id);
2803 if (((mask & AT_UID) && IS_EPHEMERAL(vap->va_uid)) ||
2804 ((mask & AT_GID) && IS_EPHEMERAL(vap->va_gid))) {
2805 if (zfsvfs->z_fuid_obj == 0) {
2806 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
2807 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
2808 FUID_SIZE_ESTIMATE(zfsvfs));
2809 dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, FALSE, NULL);
2810 } else {
2811 dmu_tx_hold_bonus(tx, zfsvfs->z_fuid_obj);
2812 dmu_tx_hold_write(tx, zfsvfs->z_fuid_obj, 0,
2813 FUID_SIZE_ESTIMATE(zfsvfs));
2814 }
2815 }
2816
2817 if (mask & AT_MODE) {
2818 uint64_t pmode = pzp->zp_mode;
2819
2820 new_mode = (pmode & S_IFMT) | (vap->va_mode & ~S_IFMT);
2821
2944
2945 if (mask & AT_MODE) {
2946 uint64_t pmode = pzp->zp_mode;
2947
2948 new_mode = (pmode & S_IFMT) | (vap->va_mode & ~S_IFMT);
2949
2822 if (err = zfs_acl_chmod_setattr(zp, &aclp, new_mode)) {
2823 dmu_tx_abort(tx);
2824 ZFS_EXIT(zfsvfs);
2825 return (err);
2826 }
2950 if (err = zfs_acl_chmod_setattr(zp, &aclp, new_mode))
2951 goto out;
2827 if (pzp->zp_acl.z_acl_extern_obj) {
2828 /* Are we upgrading ACL from old V0 format to new V1 */
2829 if (zfsvfs->z_version <= ZPL_VERSION_FUID &&
2830 pzp->zp_acl.z_acl_version ==
2831 ZFS_ACL_VERSION_INITIAL) {
2832 dmu_tx_hold_free(tx,
2833 pzp->zp_acl.z_acl_extern_obj, 0,
2834 DMU_OBJECT_END);

--- 5 unchanged lines hidden (view full) ---

2840 aclp->z_acl_bytes);
2841 }
2842 } else if (aclp->z_acl_bytes > ZFS_ACE_SPACE) {
2843 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
2844 0, aclp->z_acl_bytes);
2845 }
2846 }
2847
2952 if (pzp->zp_acl.z_acl_extern_obj) {
2953 /* Are we upgrading ACL from old V0 format to new V1 */
2954 if (zfsvfs->z_version <= ZPL_VERSION_FUID &&
2955 pzp->zp_acl.z_acl_version ==
2956 ZFS_ACL_VERSION_INITIAL) {
2957 dmu_tx_hold_free(tx,
2958 pzp->zp_acl.z_acl_extern_obj, 0,
2959 DMU_OBJECT_END);

--- 5 unchanged lines hidden (view full) ---

2965 aclp->z_acl_bytes);
2966 }
2967 } else if (aclp->z_acl_bytes > ZFS_ACE_SPACE) {
2968 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
2969 0, aclp->z_acl_bytes);
2970 }
2971 }
2972
2848 if ((mask & (AT_UID | AT_GID)) && pzp->zp_xattr != 0) {
2849 err = zfs_zget(zp->z_zfsvfs, pzp->zp_xattr, &attrzp);
2850 if (err) {
2851 dmu_tx_abort(tx);
2852 ZFS_EXIT(zfsvfs);
2853 if (aclp)
2854 zfs_acl_free(aclp);
2855 return (err);
2973 if (mask & (AT_UID | AT_GID)) {
2974 if (pzp->zp_xattr) {
2975 err = zfs_zget(zp->z_zfsvfs, pzp->zp_xattr, &attrzp);
2976 if (err)
2977 goto out;
2978 dmu_tx_hold_bonus(tx, attrzp->z_id);
2856 }
2979 }
2857 dmu_tx_hold_bonus(tx, attrzp->z_id);
2858 }
2980 if (mask & AT_UID) {
2981 new_uid = zfs_fuid_create(zfsvfs,
2982 (uint64_t)vap->va_uid, cr, ZFS_OWNER, &fuidp);
2983 if (new_uid != pzp->zp_uid &&
2984 zfs_usergroup_overquota(zfsvfs, B_FALSE, new_uid)) {
2985 err = EDQUOT;
2986 goto out;
2987 }
2988 }
2859
2989
2860 err = dmu_tx_assign(tx, zfsvfs->z_assign);
2861 if (err) {
2862 if (attrzp)
2863 VN_RELE(ZTOV(attrzp));
2864
2865 if (aclp) {
2866 zfs_acl_free(aclp);
2867 aclp = NULL;
2990 if (mask & AT_GID) {
2991 new_gid = zfs_fuid_create(zfsvfs, (uint64_t)vap->va_gid,
2992 cr, ZFS_GROUP, &fuidp);
2993 if (new_gid != pzp->zp_gid &&
2994 zfs_usergroup_overquota(zfsvfs, B_TRUE, new_gid)) {
2995 err = EDQUOT;
2996 goto out;
2997 }
2868 }
2998 }
2999 fuid_dirtied = zfsvfs->z_fuid_dirty;
3000 if (fuid_dirtied) {
3001 if (zfsvfs->z_fuid_obj == 0) {
3002 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
3003 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
3004 FUID_SIZE_ESTIMATE(zfsvfs));
3005 dmu_tx_hold_zap(tx, MASTER_NODE_OBJ,
3006 FALSE, NULL);
3007 } else {
3008 dmu_tx_hold_bonus(tx, zfsvfs->z_fuid_obj);
3009 dmu_tx_hold_write(tx, zfsvfs->z_fuid_obj, 0,
3010 FUID_SIZE_ESTIMATE(zfsvfs));
3011 }
3012 }
3013 }
2869
3014
2870 if (err == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) {
3015 err = dmu_tx_assign(tx, TXG_NOWAIT);
3016 if (err) {
3017 if (err == ERESTART)
2871 dmu_tx_wait(tx);
3018 dmu_tx_wait(tx);
2872 dmu_tx_abort(tx);
2873 goto top;
2874 }
2875 dmu_tx_abort(tx);
2876 ZFS_EXIT(zfsvfs);
2877 return (err);
3019 goto out;
2878 }
2879
2880 dmu_buf_will_dirty(zp->z_dbuf, tx);
2881
2882 /*
2883 * Set each attribute requested.
2884 * We group settings according to the locks they need to acquire.
2885 *
2886 * Note: you cannot set ctime directly, although it will be
2887 * updated as a side-effect of calling this function.
2888 */
2889
2890 mutex_enter(&zp->z_lock);
2891
2892 if (mask & AT_MODE) {
2893 mutex_enter(&zp->z_acl_lock);
2894 zp->z_phys->zp_mode = new_mode;
3020 }
3021
3022 dmu_buf_will_dirty(zp->z_dbuf, tx);
3023
3024 /*
3025 * Set each attribute requested.
3026 * We group settings according to the locks they need to acquire.
3027 *
3028 * Note: you cannot set ctime directly, although it will be
3029 * updated as a side-effect of calling this function.
3030 */
3031
3032 mutex_enter(&zp->z_lock);
3033
3034 if (mask & AT_MODE) {
3035 mutex_enter(&zp->z_acl_lock);
3036 zp->z_phys->zp_mode = new_mode;
2895 err = zfs_aclset_common(zp, aclp, cr, &fuidp, tx);
3037 err = zfs_aclset_common(zp, aclp, cr, tx);
2896 ASSERT3U(err, ==, 0);
2897 mutex_exit(&zp->z_acl_lock);
2898 }
2899
2900 if (attrzp)
2901 mutex_enter(&attrzp->z_lock);
2902
2903 if (mask & AT_UID) {
3038 ASSERT3U(err, ==, 0);
3039 mutex_exit(&zp->z_acl_lock);
3040 }
3041
3042 if (attrzp)
3043 mutex_enter(&attrzp->z_lock);
3044
3045 if (mask & AT_UID) {
2904 pzp->zp_uid = zfs_fuid_create(zfsvfs,
2905 vap->va_uid, cr, ZFS_OWNER, tx, &fuidp);
2906 if (attrzp) {
2907 attrzp->z_phys->zp_uid = zfs_fuid_create(zfsvfs,
2908 vap->va_uid, cr, ZFS_OWNER, tx, &fuidp);
2909 }
3046 pzp->zp_uid = new_uid;
3047 if (attrzp)
3048 attrzp->z_phys->zp_uid = new_uid;
2910 }
2911
2912 if (mask & AT_GID) {
3049 }
3050
3051 if (mask & AT_GID) {
2913 pzp->zp_gid = zfs_fuid_create(zfsvfs, vap->va_gid,
2914 cr, ZFS_GROUP, tx, &fuidp);
3052 pzp->zp_gid = new_gid;
2915 if (attrzp)
3053 if (attrzp)
2916 attrzp->z_phys->zp_gid = zfs_fuid_create(zfsvfs,
2917 vap->va_gid, cr, ZFS_GROUP, tx, &fuidp);
3054 attrzp->z_phys->zp_gid = new_gid;
2918 }
2919
3055 }
3056
2920 if (aclp)
2921 zfs_acl_free(aclp);
2922
2923 if (attrzp)
2924 mutex_exit(&attrzp->z_lock);
2925
2926 if (mask & AT_ATIME)
2927 ZFS_TIME_ENCODE(&vap->va_atime, pzp->zp_atime);
2928
2929 if (mask & AT_MTIME)
2930 ZFS_TIME_ENCODE(&vap->va_mtime, pzp->zp_mtime);

--- 4 unchanged lines hidden (view full) ---

2935 else if (mask != 0)
2936 zfs_time_stamper_locked(zp, STATE_CHANGED, tx);
2937 /*
2938 * Do this after setting timestamps to prevent timestamp
2939 * update from toggling bit
2940 */
2941
2942 if (xoap && (mask & AT_XVATTR)) {
3057 if (attrzp)
3058 mutex_exit(&attrzp->z_lock);
3059
3060 if (mask & AT_ATIME)
3061 ZFS_TIME_ENCODE(&vap->va_atime, pzp->zp_atime);
3062
3063 if (mask & AT_MTIME)
3064 ZFS_TIME_ENCODE(&vap->va_mtime, pzp->zp_mtime);

--- 4 unchanged lines hidden (view full) ---

3069 else if (mask != 0)
3070 zfs_time_stamper_locked(zp, STATE_CHANGED, tx);
3071 /*
3072 * Do this after setting timestamps to prevent timestamp
3073 * update from toggling bit
3074 */
3075
3076 if (xoap && (mask & AT_XVATTR)) {
3077
3078 /*
3079 * restore trimmed off masks
3080 * so that return masks can be set for caller.
3081 */
3082
3083 if (XVA_ISSET_REQ(&tmpxvattr, XAT_APPENDONLY)) {
3084 XVA_SET_REQ(xvap, XAT_APPENDONLY);
3085 }
3086 if (XVA_ISSET_REQ(&tmpxvattr, XAT_NOUNLINK)) {
3087 XVA_SET_REQ(xvap, XAT_NOUNLINK);
3088 }
3089 if (XVA_ISSET_REQ(&tmpxvattr, XAT_IMMUTABLE)) {
3090 XVA_SET_REQ(xvap, XAT_IMMUTABLE);
3091 }
3092 if (XVA_ISSET_REQ(&tmpxvattr, XAT_NODUMP)) {
3093 XVA_SET_REQ(xvap, XAT_NODUMP);
3094 }
3095 if (XVA_ISSET_REQ(&tmpxvattr, XAT_AV_MODIFIED)) {
3096 XVA_SET_REQ(xvap, XAT_AV_MODIFIED);
3097 }
3098 if (XVA_ISSET_REQ(&tmpxvattr, XAT_AV_QUARANTINED)) {
3099 XVA_SET_REQ(xvap, XAT_AV_QUARANTINED);
3100 }
3101
2943 if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) {
2944 size_t len;
2945 dmu_object_info_t doi;
2946
2947 ASSERT(vp->v_type == VREG);
2948
2949 /* Grow the bonus buffer if necessary. */
2950 dmu_object_info_from_db(zp->z_dbuf, &doi);
2951 len = sizeof (xoap->xoa_av_scanstamp) +
2952 sizeof (znode_phys_t);
2953 if (len > doi.doi_bonus_size)
2954 VERIFY(dmu_set_bonus(zp->z_dbuf, len, tx) == 0);
2955 }
2956 zfs_xvattr_set(zp, xvap);
2957 }
2958
3102 if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) {
3103 size_t len;
3104 dmu_object_info_t doi;
3105
3106 ASSERT(vp->v_type == VREG);
3107
3108 /* Grow the bonus buffer if necessary. */
3109 dmu_object_info_from_db(zp->z_dbuf, &doi);
3110 len = sizeof (xoap->xoa_av_scanstamp) +
3111 sizeof (znode_phys_t);
3112 if (len > doi.doi_bonus_size)
3113 VERIFY(dmu_set_bonus(zp->z_dbuf, len, tx) == 0);
3114 }
3115 zfs_xvattr_set(zp, xvap);
3116 }
3117
3118 if (fuid_dirtied)
3119 zfs_fuid_sync(zfsvfs, tx);
3120
2959 if (mask != 0)
2960 zfs_log_setattr(zilog, tx, TX_SETATTR, zp, vap, mask, fuidp);
2961
3121 if (mask != 0)
3122 zfs_log_setattr(zilog, tx, TX_SETATTR, zp, vap, mask, fuidp);
3123
2962 if (fuidp)
2963 zfs_fuid_info_free(fuidp);
2964 mutex_exit(&zp->z_lock);
2965
3124 mutex_exit(&zp->z_lock);
3125
3126out:
2966 if (attrzp)
2967 VN_RELE(ZTOV(attrzp));
2968
3127 if (attrzp)
3128 VN_RELE(ZTOV(attrzp));
3129
2969 dmu_tx_commit(tx);
3130 if (aclp) {
3131 zfs_acl_free(aclp);
3132 aclp = NULL;
3133 }
2970
3134
3135 if (fuidp) {
3136 zfs_fuid_info_free(fuidp);
3137 fuidp = NULL;
3138 }
3139
3140 if (err)
3141 dmu_tx_abort(tx);
3142 else
3143 dmu_tx_commit(tx);
3144
3145 if (err == ERESTART)
3146 goto top;
3147
2971 ZFS_EXIT(zfsvfs);
2972 return (err);
2973}
2974
2975typedef struct zfs_zlock {
2976 krwlock_t *zl_rwlock; /* lock we acquired */
2977 znode_t *zl_znode; /* znode we held */
2978 struct zfs_zlock *zl_next; /* next in list */

--- 345 unchanged lines hidden (view full) ---

3324 dmu_tx_hold_bonus(tx, sdzp->z_id); /* nlink changes */
3325 dmu_tx_hold_zap(tx, sdzp->z_id, FALSE, snm);
3326 dmu_tx_hold_zap(tx, tdzp->z_id, TRUE, tnm);
3327 if (sdzp != tdzp)
3328 dmu_tx_hold_bonus(tx, tdzp->z_id); /* nlink changes */
3329 if (tzp)
3330 dmu_tx_hold_bonus(tx, tzp->z_id); /* parent changes */
3331 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
3148 ZFS_EXIT(zfsvfs);
3149 return (err);
3150}
3151
3152typedef struct zfs_zlock {
3153 krwlock_t *zl_rwlock; /* lock we acquired */
3154 znode_t *zl_znode; /* znode we held */
3155 struct zfs_zlock *zl_next; /* next in list */

--- 345 unchanged lines hidden (view full) ---

3501 dmu_tx_hold_bonus(tx, sdzp->z_id); /* nlink changes */
3502 dmu_tx_hold_zap(tx, sdzp->z_id, FALSE, snm);
3503 dmu_tx_hold_zap(tx, tdzp->z_id, TRUE, tnm);
3504 if (sdzp != tdzp)
3505 dmu_tx_hold_bonus(tx, tdzp->z_id); /* nlink changes */
3506 if (tzp)
3507 dmu_tx_hold_bonus(tx, tzp->z_id); /* parent changes */
3508 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
3332 error = dmu_tx_assign(tx, zfsvfs->z_assign);
3509 error = dmu_tx_assign(tx, TXG_NOWAIT);
3333 if (error) {
3334 if (zl != NULL)
3335 zfs_rename_unlock(&zl);
3336 zfs_dirent_unlock(sdl);
3337 zfs_dirent_unlock(tdl);
3338
3339 if (sdzp == tdzp)
3340 rw_exit(&sdzp->z_name_lock);
3341
3342 VN_RELE(ZTOV(szp));
3343 if (tzp)
3344 VN_RELE(ZTOV(tzp));
3510 if (error) {
3511 if (zl != NULL)
3512 zfs_rename_unlock(&zl);
3513 zfs_dirent_unlock(sdl);
3514 zfs_dirent_unlock(tdl);
3515
3516 if (sdzp == tdzp)
3517 rw_exit(&sdzp->z_name_lock);
3518
3519 VN_RELE(ZTOV(szp));
3520 if (tzp)
3521 VN_RELE(ZTOV(tzp));
3345 if (error == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) {
3522 if (error == ERESTART) {
3346 dmu_tx_wait(tx);
3347 dmu_tx_abort(tx);
3348 goto top;
3349 }
3350 dmu_tx_abort(tx);
3351 ZFS_EXIT(zfsvfs);
3352 return (error);
3353 }

--- 69 unchanged lines hidden (view full) ---

3423 znode_t *zp, *dzp = VTOZ(dvp);
3424 zfs_dirlock_t *dl;
3425 dmu_tx_t *tx;
3426 zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
3427 zilog_t *zilog;
3428 int len = strlen(link);
3429 int error;
3430 int zflg = ZNEW;
3523 dmu_tx_wait(tx);
3524 dmu_tx_abort(tx);
3525 goto top;
3526 }
3527 dmu_tx_abort(tx);
3528 ZFS_EXIT(zfsvfs);
3529 return (error);
3530 }

--- 69 unchanged lines hidden (view full) ---

3600 znode_t *zp, *dzp = VTOZ(dvp);
3601 zfs_dirlock_t *dl;
3602 dmu_tx_t *tx;
3603 zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
3604 zilog_t *zilog;
3605 int len = strlen(link);
3606 int error;
3607 int zflg = ZNEW;
3431 zfs_fuid_info_t *fuidp = NULL;
3608 zfs_acl_ids_t acl_ids;
3609 boolean_t fuid_dirtied;
3432 int flags = 0;
3433
3434 ASSERT(vap->va_type == VLNK);
3435
3436 ZFS_ENTER(zfsvfs);
3437 ZFS_VERIFY_ZP(dzp);
3438 zilog = zfsvfs->z_log;
3439

--- 19 unchanged lines hidden (view full) ---

3459 * Attempt to lock directory; fail if entry already exists.
3460 */
3461 error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, NULL, NULL);
3462 if (error) {
3463 ZFS_EXIT(zfsvfs);
3464 return (error);
3465 }
3466
3610 int flags = 0;
3611
3612 ASSERT(vap->va_type == VLNK);
3613
3614 ZFS_ENTER(zfsvfs);
3615 ZFS_VERIFY_ZP(dzp);
3616 zilog = zfsvfs->z_log;
3617

--- 19 unchanged lines hidden (view full) ---

3637 * Attempt to lock directory; fail if entry already exists.
3638 */
3639 error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, NULL, NULL);
3640 if (error) {
3641 ZFS_EXIT(zfsvfs);
3642 return (error);
3643 }
3644
3645 VERIFY(0 == zfs_acl_ids_create(dzp, 0, vap, cr, NULL, &acl_ids));
3646 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) {
3647 zfs_acl_ids_free(&acl_ids);
3648 zfs_dirent_unlock(dl);
3649 ZFS_EXIT(zfsvfs);
3650 return (EDQUOT);
3651 }
3467 tx = dmu_tx_create(zfsvfs->z_os);
3652 tx = dmu_tx_create(zfsvfs->z_os);
3653 fuid_dirtied = zfsvfs->z_fuid_dirty;
3468 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, MAX(1, len));
3469 dmu_tx_hold_bonus(tx, dzp->z_id);
3470 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
3654 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, MAX(1, len));
3655 dmu_tx_hold_bonus(tx, dzp->z_id);
3656 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
3471 if (dzp->z_phys->zp_flags & ZFS_INHERIT_ACE)
3657 if (acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE)
3472 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, SPA_MAXBLOCKSIZE);
3658 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, SPA_MAXBLOCKSIZE);
3473 if (IS_EPHEMERAL(crgetuid(cr)) || IS_EPHEMERAL(crgetgid(cr))) {
3474 if (zfsvfs->z_fuid_obj == 0) {
3475 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
3476 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
3477 FUID_SIZE_ESTIMATE(zfsvfs));
3478 dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, FALSE, NULL);
3479 } else {
3480 dmu_tx_hold_bonus(tx, zfsvfs->z_fuid_obj);
3481 dmu_tx_hold_write(tx, zfsvfs->z_fuid_obj, 0,
3482 FUID_SIZE_ESTIMATE(zfsvfs));
3483 }
3484 }
3485 error = dmu_tx_assign(tx, zfsvfs->z_assign);
3659 if (fuid_dirtied)
3660 zfs_fuid_txhold(zfsvfs, tx);
3661 error = dmu_tx_assign(tx, TXG_NOWAIT);
3486 if (error) {
3662 if (error) {
3663 zfs_acl_ids_free(&acl_ids);
3487 zfs_dirent_unlock(dl);
3664 zfs_dirent_unlock(dl);
3488 if (error == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) {
3665 if (error == ERESTART) {
3489 dmu_tx_wait(tx);
3490 dmu_tx_abort(tx);
3491 goto top;
3492 }
3493 dmu_tx_abort(tx);
3494 ZFS_EXIT(zfsvfs);
3495 return (error);
3496 }
3497
3498 dmu_buf_will_dirty(dzp->z_dbuf, tx);
3499
3500 /*
3501 * Create a new object for the symlink.
3502 * Put the link content into bonus buffer if it will fit;
3503 * otherwise, store it just like any other file data.
3504 */
3505 if (sizeof (znode_phys_t) + len <= dmu_bonus_max()) {
3666 dmu_tx_wait(tx);
3667 dmu_tx_abort(tx);
3668 goto top;
3669 }
3670 dmu_tx_abort(tx);
3671 ZFS_EXIT(zfsvfs);
3672 return (error);
3673 }
3674
3675 dmu_buf_will_dirty(dzp->z_dbuf, tx);
3676
3677 /*
3678 * Create a new object for the symlink.
3679 * Put the link content into bonus buffer if it will fit;
3680 * otherwise, store it just like any other file data.
3681 */
3682 if (sizeof (znode_phys_t) + len <= dmu_bonus_max()) {
3506 zfs_mknode(dzp, vap, tx, cr, 0, &zp, len, NULL, &fuidp);
3683 zfs_mknode(dzp, vap, tx, cr, 0, &zp, len, &acl_ids);
3507 if (len != 0)
3508 bcopy(link, zp->z_phys + 1, len);
3509 } else {
3510 dmu_buf_t *dbp;
3511
3684 if (len != 0)
3685 bcopy(link, zp->z_phys + 1, len);
3686 } else {
3687 dmu_buf_t *dbp;
3688
3512 zfs_mknode(dzp, vap, tx, cr, 0, &zp, 0, NULL, &fuidp);
3689 zfs_mknode(dzp, vap, tx, cr, 0, &zp, 0, &acl_ids);
3690
3691 if (fuid_dirtied)
3692 zfs_fuid_sync(zfsvfs, tx);
3513 /*
3514 * Nothing can access the znode yet so no locking needed
3515 * for growing the znode's blocksize.
3516 */
3517 zfs_grow_blocksize(zp, len, tx);
3518
3519 VERIFY(0 == dmu_buf_hold(zfsvfs->z_os,
3520 zp->z_id, 0, FTAG, &dbp));

--- 4 unchanged lines hidden (view full) ---

3525 dmu_buf_rele(dbp, FTAG);
3526 }
3527 zp->z_phys->zp_size = len;
3528
3529 /*
3530 * Insert the new object into the directory.
3531 */
3532 (void) zfs_link_create(dl, zp, tx, ZNEW);
3693 /*
3694 * Nothing can access the znode yet so no locking needed
3695 * for growing the znode's blocksize.
3696 */
3697 zfs_grow_blocksize(zp, len, tx);
3698
3699 VERIFY(0 == dmu_buf_hold(zfsvfs->z_os,
3700 zp->z_id, 0, FTAG, &dbp));

--- 4 unchanged lines hidden (view full) ---

3705 dmu_buf_rele(dbp, FTAG);
3706 }
3707 zp->z_phys->zp_size = len;
3708
3709 /*
3710 * Insert the new object into the directory.
3711 */
3712 (void) zfs_link_create(dl, zp, tx, ZNEW);
3533out:
3534 if (error == 0) {
3535 uint64_t txtype = TX_SYMLINK;
3536 if (flags & FIGNORECASE)
3537 txtype |= TX_CI;
3538 zfs_log_symlink(zilog, tx, txtype, dzp, zp, name, link);
3539 *vpp = ZTOV(zp);
3540 }
3713 if (error == 0) {
3714 uint64_t txtype = TX_SYMLINK;
3715 if (flags & FIGNORECASE)
3716 txtype |= TX_CI;
3717 zfs_log_symlink(zilog, tx, txtype, dzp, zp, name, link);
3718 *vpp = ZTOV(zp);
3719 }
3541 if (fuidp)
3542 zfs_fuid_info_free(fuidp);
3543
3720
3721 zfs_acl_ids_free(&acl_ids);
3722
3544 dmu_tx_commit(tx);
3545
3546 zfs_dirent_unlock(dl);
3547
3548 ZFS_EXIT(zfsvfs);
3549 return (error);
3550}
3551

--- 144 unchanged lines hidden (view full) ---

3696 if (error) {
3697 ZFS_EXIT(zfsvfs);
3698 return (error);
3699 }
3700
3701 tx = dmu_tx_create(zfsvfs->z_os);
3702 dmu_tx_hold_bonus(tx, szp->z_id);
3703 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
3723 dmu_tx_commit(tx);
3724
3725 zfs_dirent_unlock(dl);
3726
3727 ZFS_EXIT(zfsvfs);
3728 return (error);
3729}
3730

--- 144 unchanged lines hidden (view full) ---

3875 if (error) {
3876 ZFS_EXIT(zfsvfs);
3877 return (error);
3878 }
3879
3880 tx = dmu_tx_create(zfsvfs->z_os);
3881 dmu_tx_hold_bonus(tx, szp->z_id);
3882 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
3704 error = dmu_tx_assign(tx, zfsvfs->z_assign);
3883 error = dmu_tx_assign(tx, TXG_NOWAIT);
3705 if (error) {
3706 zfs_dirent_unlock(dl);
3884 if (error) {
3885 zfs_dirent_unlock(dl);
3707 if (error == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) {
3886 if (error == ERESTART) {
3708 dmu_tx_wait(tx);
3709 dmu_tx_abort(tx);
3710 goto top;
3711 }
3712 dmu_tx_abort(tx);
3713 ZFS_EXIT(zfsvfs);
3714 return (error);
3715 }

--- 1273 unchanged lines hidden (view full) ---

4989 } */ *ap;
4990{
4991
4992 return (EOPNOTSUPP);
4993}
4994
4995struct vop_vector zfs_vnodeops;
4996struct vop_vector zfs_fifoops;
3887 dmu_tx_wait(tx);
3888 dmu_tx_abort(tx);
3889 goto top;
3890 }
3891 dmu_tx_abort(tx);
3892 ZFS_EXIT(zfsvfs);
3893 return (error);
3894 }

--- 1273 unchanged lines hidden (view full) ---

5168 } */ *ap;
5169{
5170
5171 return (EOPNOTSUPP);
5172}
5173
5174struct vop_vector zfs_vnodeops;
5175struct vop_vector zfs_fifoops;
5176struct vop_vector zfs_shareops;
4997
4998struct vop_vector zfs_vnodeops = {
4999 .vop_default = &default_vnodeops,
5000 .vop_inactive = zfs_freebsd_inactive,
5001 .vop_reclaim = zfs_freebsd_reclaim,
5002 .vop_access = zfs_freebsd_access,
5003#ifdef FREEBSD_NAMECACHE
5004 .vop_lookup = vfs_cache_lookup,

--- 42 unchanged lines hidden (view full) ---

5047 .vop_setattr = zfs_freebsd_setattr,
5048 .vop_write = VOP_PANIC,
5049 .vop_pathconf = zfs_freebsd_fifo_pathconf,
5050 .vop_fid = zfs_freebsd_fid,
5051 .vop_getacl = zfs_freebsd_getacl,
5052 .vop_setacl = zfs_freebsd_setacl,
5053 .vop_aclcheck = zfs_freebsd_aclcheck,
5054};
5177
5178struct vop_vector zfs_vnodeops = {
5179 .vop_default = &default_vnodeops,
5180 .vop_inactive = zfs_freebsd_inactive,
5181 .vop_reclaim = zfs_freebsd_reclaim,
5182 .vop_access = zfs_freebsd_access,
5183#ifdef FREEBSD_NAMECACHE
5184 .vop_lookup = vfs_cache_lookup,

--- 42 unchanged lines hidden (view full) ---

5227 .vop_setattr = zfs_freebsd_setattr,
5228 .vop_write = VOP_PANIC,
5229 .vop_pathconf = zfs_freebsd_fifo_pathconf,
5230 .vop_fid = zfs_freebsd_fid,
5231 .vop_getacl = zfs_freebsd_getacl,
5232 .vop_setacl = zfs_freebsd_setacl,
5233 .vop_aclcheck = zfs_freebsd_aclcheck,
5234};
5235
5236/*
5237 * special share hidden files vnode operations template
5238 */
5239struct vop_vector zfs_shareops = {
5240 .vop_default = &default_vnodeops,
5241 .vop_access = zfs_freebsd_access,
5242 .vop_inactive = zfs_freebsd_inactive,
5243 .vop_reclaim = zfs_freebsd_reclaim,
5244 .vop_fid = zfs_freebsd_fid,
5245 .vop_pathconf = zfs_freebsd_pathconf,
5246};