1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012, 2018 by Delphix. All rights reserved.
25 * Copyright (c) 2015 by Chunwei Chen. All rights reserved.
26 * Copyright 2017 Nexenta Systems, Inc.
27 */
28
29/* Portions Copyright 2007 Jeremy Teo */
30/* Portions Copyright 2010 Robert Milkowski */
31
32
33#include <sys/types.h>
34#include <sys/param.h>
35#include <sys/time.h>
36#include <sys/sysmacros.h>
37#include <sys/vfs.h>
38#include <sys/file.h>
39#include <sys/stat.h>
40#include <sys/kmem.h>
41#include <sys/taskq.h>
42#include <sys/uio.h>
43#include <sys/vmsystm.h>
44#include <sys/atomic.h>
45#include <sys/pathname.h>
46#include <sys/cmn_err.h>
47#include <sys/errno.h>
48#include <sys/zfs_dir.h>
49#include <sys/zfs_acl.h>
50#include <sys/zfs_ioctl.h>
51#include <sys/fs/zfs.h>
52#include <sys/dmu.h>
53#include <sys/dmu_objset.h>
54#include <sys/spa.h>
55#include <sys/txg.h>
56#include <sys/dbuf.h>
57#include <sys/zap.h>
58#include <sys/sa.h>
59#include <sys/policy.h>
60#include <sys/sunddi.h>
61#include <sys/sid.h>
62#include <sys/zfs_ctldir.h>
63#include <sys/zfs_fuid.h>
64#include <sys/zfs_quota.h>
65#include <sys/zfs_sa.h>
66#include <sys/zfs_vnops.h>
67#include <sys/zfs_rlock.h>
68#include <sys/cred.h>
69#include <sys/zpl.h>
70#include <sys/zil.h>
71#include <sys/sa_impl.h>
72
73/*
74 * Programming rules.
75 *
76 * Each vnode op performs some logical unit of work.  To do this, the ZPL must
77 * properly lock its in-core state, create a DMU transaction, do the work,
78 * record this work in the intent log (ZIL), commit the DMU transaction,
79 * and wait for the intent log to commit if it is a synchronous operation.
80 * Moreover, the vnode ops must work in both normal and log replay context.
81 * The ordering of events is important to avoid deadlocks and references
82 * to freed memory.  The example below illustrates the following Big Rules:
83 *
84 *  (1) A check must be made in each zfs thread for a mounted file system.
85 *	This is done avoiding races using ZFS_ENTER(zfsvfs).
86 *      A ZFS_EXIT(zfsvfs) is needed before all returns.  Any znodes
87 *      must be checked with ZFS_VERIFY_ZP(zp).  Both of these macros
88 *      can return EIO from the calling function.
89 *
90 *  (2) zrele() should always be the last thing except for zil_commit() (if
91 *	necessary) and ZFS_EXIT(). This is for 3 reasons: First, if it's the
92 *	last reference, the vnode/znode can be freed, so the zp may point to
93 *	freed memory.  Second, the last reference will call zfs_zinactive(),
94 *	which may induce a lot of work -- pushing cached pages (which acquires
95 *	range locks) and syncing out cached atime changes.  Third,
96 *	zfs_zinactive() may require a new tx, which could deadlock the system
97 *	if you were already holding one. This deadlock occurs because the tx
98 *	currently being operated on prevents a txg from syncing, which
99 *	prevents the new tx from progressing, resulting in a deadlock.  If you
100 *	must call zrele() within a tx, use zfs_zrele_async(). Note that iput()
101 *	is a synonym for zrele().
102 *
103 *  (3)	All range locks must be grabbed before calling dmu_tx_assign(),
104 *	as they can span dmu_tx_assign() calls.
105 *
106 *  (4) If ZPL locks are held, pass TXG_NOWAIT as the second argument to
107 *      dmu_tx_assign().  This is critical because we don't want to block
108 *      while holding locks.
109 *
110 *	If no ZPL locks are held (aside from ZFS_ENTER()), use TXG_WAIT.  This
111 *	reduces lock contention and CPU usage when we must wait (note that if
112 *	throughput is constrained by the storage, nearly every transaction
113 *	must wait).
114 *
115 *      Note, in particular, that if a lock is sometimes acquired before
116 *      the tx assigns, and sometimes after (e.g. z_lock), then failing
117 *      to use a non-blocking assign can deadlock the system.  The scenario:
118 *
119 *	Thread A has grabbed a lock before calling dmu_tx_assign().
120 *	Thread B is in an already-assigned tx, and blocks for this lock.
121 *	Thread A calls dmu_tx_assign(TXG_WAIT) and blocks in txg_wait_open()
122 *	forever, because the previous txg can't quiesce until B's tx commits.
123 *
124 *	If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is TXG_NOWAIT,
125 *	then drop all locks, call dmu_tx_wait(), and try again.  On subsequent
126 *	calls to dmu_tx_assign(), pass TXG_NOTHROTTLE in addition to TXG_NOWAIT,
127 *	to indicate that this operation has already called dmu_tx_wait().
128 *	This will ensure that we don't retry forever, waiting a short bit
129 *	each time.
130 *
131 *  (5)	If the operation succeeded, generate the intent log entry for it
132 *	before dropping locks.  This ensures that the ordering of events
133 *	in the intent log matches the order in which they actually occurred.
134 *	During ZIL replay the zfs_log_* functions will update the sequence
135 *	number to indicate the zil transaction has replayed.
136 *
137 *  (6)	At the end of each vnode op, the DMU tx must always commit,
138 *	regardless of whether there were any errors.
139 *
140 *  (7)	After dropping all locks, invoke zil_commit(zilog, foid)
141 *	to ensure that synchronous semantics are provided when necessary.
142 *
143 * In general, this is how things should be ordered in each vnode op:
144 *
145 *	ZFS_ENTER(zfsvfs);		// exit if unmounted
146 * top:
147 *	zfs_dirent_lock(&dl, ...)	// lock directory entry (may igrab())
148 *	rw_enter(...);			// grab any other locks you need
149 *	tx = dmu_tx_create(...);	// get DMU tx
150 *	dmu_tx_hold_*();		// hold each object you might modify
151 *	error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
152 *	if (error) {
153 *		rw_exit(...);		// drop locks
154 *		zfs_dirent_unlock(dl);	// unlock directory entry
155 *		zrele(...);		// release held znodes
156 *		if (error == ERESTART) {
157 *			waited = B_TRUE;
158 *			dmu_tx_wait(tx);
159 *			dmu_tx_abort(tx);
160 *			goto top;
161 *		}
162 *		dmu_tx_abort(tx);	// abort DMU tx
163 *		ZFS_EXIT(zfsvfs);	// finished in zfs
164 *		return (error);		// really out of space
165 *	}
166 *	error = do_real_work();		// do whatever this VOP does
167 *	if (error == 0)
168 *		zfs_log_*(...);		// on success, make ZIL entry
169 *	dmu_tx_commit(tx);		// commit DMU tx -- error or not
170 *	rw_exit(...);			// drop locks
171 *	zfs_dirent_unlock(dl);		// unlock directory entry
172 *	zrele(...);			// release held znodes
173 *	zil_commit(zilog, foid);	// synchronous when necessary
174 *	ZFS_EXIT(zfsvfs);		// finished in zfs
175 *	return (error);			// done, report error
176 */
177
178/*
179 * Virus scanning is unsupported.  It would be possible to add a hook
180 * here to performance the required virus scan.  This could be done
181 * entirely in the kernel or potentially as an update to invoke a
182 * scanning utility.
183 */
184static int
185zfs_vscan(struct inode *ip, cred_t *cr, int async)
186{
187	return (0);
188}
189
190/* ARGSUSED */
191int
192zfs_open(struct inode *ip, int mode, int flag, cred_t *cr)
193{
194	znode_t	*zp = ITOZ(ip);
195	zfsvfs_t *zfsvfs = ITOZSB(ip);
196
197	ZFS_ENTER(zfsvfs);
198	ZFS_VERIFY_ZP(zp);
199
200	/* Honor ZFS_APPENDONLY file attribute */
201	if ((mode & FMODE_WRITE) && (zp->z_pflags & ZFS_APPENDONLY) &&
202	    ((flag & O_APPEND) == 0)) {
203		ZFS_EXIT(zfsvfs);
204		return (SET_ERROR(EPERM));
205	}
206
207	/* Virus scan eligible files on open */
208	if (!zfs_has_ctldir(zp) && zfsvfs->z_vscan && S_ISREG(ip->i_mode) &&
209	    !(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0) {
210		if (zfs_vscan(ip, cr, 0) != 0) {
211			ZFS_EXIT(zfsvfs);
212			return (SET_ERROR(EACCES));
213		}
214	}
215
216	/* Keep a count of the synchronous opens in the znode */
217	if (flag & O_SYNC)
218		atomic_inc_32(&zp->z_sync_cnt);
219
220	ZFS_EXIT(zfsvfs);
221	return (0);
222}
223
224/* ARGSUSED */
225int
226zfs_close(struct inode *ip, int flag, cred_t *cr)
227{
228	znode_t	*zp = ITOZ(ip);
229	zfsvfs_t *zfsvfs = ITOZSB(ip);
230
231	ZFS_ENTER(zfsvfs);
232	ZFS_VERIFY_ZP(zp);
233
234	/* Decrement the synchronous opens in the znode */
235	if (flag & O_SYNC)
236		atomic_dec_32(&zp->z_sync_cnt);
237
238	if (!zfs_has_ctldir(zp) && zfsvfs->z_vscan && S_ISREG(ip->i_mode) &&
239	    !(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0)
240		VERIFY(zfs_vscan(ip, cr, 1) == 0);
241
242	ZFS_EXIT(zfsvfs);
243	return (0);
244}
245
246#if defined(_KERNEL)
247/*
248 * When a file is memory mapped, we must keep the IO data synchronized
249 * between the DMU cache and the memory mapped pages.  What this means:
250 *
251 * On Write:	If we find a memory mapped page, we write to *both*
252 *		the page and the dmu buffer.
253 */
254void
255update_pages(znode_t *zp, int64_t start, int len, objset_t *os)
256{
257	struct inode *ip = ZTOI(zp);
258	struct address_space *mp = ip->i_mapping;
259	struct page *pp;
260	uint64_t nbytes;
261	int64_t	off;
262	void *pb;
263
264	off = start & (PAGE_SIZE-1);
265	for (start &= PAGE_MASK; len > 0; start += PAGE_SIZE) {
266		nbytes = MIN(PAGE_SIZE - off, len);
267
268		pp = find_lock_page(mp, start >> PAGE_SHIFT);
269		if (pp) {
270			if (mapping_writably_mapped(mp))
271				flush_dcache_page(pp);
272
273			pb = kmap(pp);
274			(void) dmu_read(os, zp->z_id, start + off, nbytes,
275			    pb + off, DMU_READ_PREFETCH);
276			kunmap(pp);
277
278			if (mapping_writably_mapped(mp))
279				flush_dcache_page(pp);
280
281			mark_page_accessed(pp);
282			SetPageUptodate(pp);
283			ClearPageError(pp);
284			unlock_page(pp);
285			put_page(pp);
286		}
287
288		len -= nbytes;
289		off = 0;
290	}
291}
292
293/*
294 * When a file is memory mapped, we must keep the IO data synchronized
295 * between the DMU cache and the memory mapped pages.  What this means:
296 *
297 * On Read:	We "read" preferentially from memory mapped pages,
298 *		else we default from the dmu buffer.
299 *
300 * NOTE: We will always "break up" the IO into PAGESIZE uiomoves when
301 *	 the file is memory mapped.
302 */
303int
304mappedread(znode_t *zp, int nbytes, zfs_uio_t *uio)
305{
306	struct inode *ip = ZTOI(zp);
307	struct address_space *mp = ip->i_mapping;
308	struct page *pp;
309	int64_t	start, off;
310	uint64_t bytes;
311	int len = nbytes;
312	int error = 0;
313	void *pb;
314
315	start = uio->uio_loffset;
316	off = start & (PAGE_SIZE-1);
317	for (start &= PAGE_MASK; len > 0; start += PAGE_SIZE) {
318		bytes = MIN(PAGE_SIZE - off, len);
319
320		pp = find_lock_page(mp, start >> PAGE_SHIFT);
321		if (pp) {
322			ASSERT(PageUptodate(pp));
323			unlock_page(pp);
324
325			pb = kmap(pp);
326			error = zfs_uiomove(pb + off, bytes, UIO_READ, uio);
327			kunmap(pp);
328
329			if (mapping_writably_mapped(mp))
330				flush_dcache_page(pp);
331
332			mark_page_accessed(pp);
333			put_page(pp);
334		} else {
335			error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
336			    uio, bytes);
337		}
338
339		len -= bytes;
340		off = 0;
341		if (error)
342			break;
343	}
344	return (error);
345}
346#endif /* _KERNEL */
347
348unsigned long zfs_delete_blocks = DMU_MAX_DELETEBLKCNT;
349
350/*
351 * Write the bytes to a file.
352 *
353 *	IN:	zp	- znode of file to be written to
354 *		data	- bytes to write
355 *		len	- number of bytes to write
356 *		pos	- offset to start writing at
357 *
358 *	OUT:	resid	- remaining bytes to write
359 *
360 *	RETURN:	0 if success
361 *		positive error code if failure.  EIO is	returned
362 *		for a short write when residp isn't provided.
363 *
364 * Timestamps:
365 *	zp - ctime|mtime updated if byte count > 0
366 */
367int
368zfs_write_simple(znode_t *zp, const void *data, size_t len,
369    loff_t pos, size_t *residp)
370{
371	fstrans_cookie_t cookie;
372	int error;
373
374	struct iovec iov;
375	iov.iov_base = (void *)data;
376	iov.iov_len = len;
377
378	zfs_uio_t uio;
379	zfs_uio_iovec_init(&uio, &iov, 1, pos, UIO_SYSSPACE, len, 0);
380
381	cookie = spl_fstrans_mark();
382	error = zfs_write(zp, &uio, 0, kcred);
383	spl_fstrans_unmark(cookie);
384
385	if (error == 0) {
386		if (residp != NULL)
387			*residp = zfs_uio_resid(&uio);
388		else if (zfs_uio_resid(&uio) != 0)
389			error = SET_ERROR(EIO);
390	}
391
392	return (error);
393}
394
395void
396zfs_zrele_async(znode_t *zp)
397{
398	struct inode *ip = ZTOI(zp);
399	objset_t *os = ITOZSB(ip)->z_os;
400
401	ASSERT(atomic_read(&ip->i_count) > 0);
402	ASSERT(os != NULL);
403
404	/*
405	 * If decrementing the count would put us at 0, we can't do it inline
406	 * here, because that would be synchronous. Instead, dispatch an iput
407	 * to run later.
408	 *
409	 * For more information on the dangers of a synchronous iput, see the
410	 * header comment of this file.
411	 */
412	if (!atomic_add_unless(&ip->i_count, -1, 1)) {
413		VERIFY(taskq_dispatch(dsl_pool_zrele_taskq(dmu_objset_pool(os)),
414		    (task_func_t *)iput, ip, TQ_SLEEP) != TASKQID_INVALID);
415	}
416}
417
418
419/*
420 * Lookup an entry in a directory, or an extended attribute directory.
421 * If it exists, return a held inode reference for it.
422 *
423 *	IN:	zdp	- znode of directory to search.
424 *		nm	- name of entry to lookup.
425 *		flags	- LOOKUP_XATTR set if looking for an attribute.
426 *		cr	- credentials of caller.
427 *		direntflags - directory lookup flags
428 *		realpnp - returned pathname.
429 *
430 *	OUT:	zpp	- znode of located entry, NULL if not found.
431 *
432 *	RETURN:	0 on success, error code on failure.
433 *
434 * Timestamps:
435 *	NA
436 */
437/* ARGSUSED */
438int
439zfs_lookup(znode_t *zdp, char *nm, znode_t **zpp, int flags, cred_t *cr,
440    int *direntflags, pathname_t *realpnp)
441{
442	zfsvfs_t *zfsvfs = ZTOZSB(zdp);
443	int error = 0;
444
445	/*
446	 * Fast path lookup, however we must skip DNLC lookup
447	 * for case folding or normalizing lookups because the
448	 * DNLC code only stores the passed in name.  This means
449	 * creating 'a' and removing 'A' on a case insensitive
450	 * file system would work, but DNLC still thinks 'a'
451	 * exists and won't let you create it again on the next
452	 * pass through fast path.
453	 */
454	if (!(flags & (LOOKUP_XATTR | FIGNORECASE))) {
455
456		if (!S_ISDIR(ZTOI(zdp)->i_mode)) {
457			return (SET_ERROR(ENOTDIR));
458		} else if (zdp->z_sa_hdl == NULL) {
459			return (SET_ERROR(EIO));
460		}
461
462		if (nm[0] == 0 || (nm[0] == '.' && nm[1] == '\0')) {
463			error = zfs_fastaccesschk_execute(zdp, cr);
464			if (!error) {
465				*zpp = zdp;
466				zhold(*zpp);
467				return (0);
468			}
469			return (error);
470		}
471	}
472
473	ZFS_ENTER(zfsvfs);
474	ZFS_VERIFY_ZP(zdp);
475
476	*zpp = NULL;
477
478	if (flags & LOOKUP_XATTR) {
479		/*
480		 * We don't allow recursive attributes..
481		 * Maybe someday we will.
482		 */
483		if (zdp->z_pflags & ZFS_XATTR) {
484			ZFS_EXIT(zfsvfs);
485			return (SET_ERROR(EINVAL));
486		}
487
488		if ((error = zfs_get_xattrdir(zdp, zpp, cr, flags))) {
489			ZFS_EXIT(zfsvfs);
490			return (error);
491		}
492
493		/*
494		 * Do we have permission to get into attribute directory?
495		 */
496
497		if ((error = zfs_zaccess(*zpp, ACE_EXECUTE, 0,
498		    B_FALSE, cr))) {
499			zrele(*zpp);
500			*zpp = NULL;
501		}
502
503		ZFS_EXIT(zfsvfs);
504		return (error);
505	}
506
507	if (!S_ISDIR(ZTOI(zdp)->i_mode)) {
508		ZFS_EXIT(zfsvfs);
509		return (SET_ERROR(ENOTDIR));
510	}
511
512	/*
513	 * Check accessibility of directory.
514	 */
515
516	if ((error = zfs_zaccess(zdp, ACE_EXECUTE, 0, B_FALSE, cr))) {
517		ZFS_EXIT(zfsvfs);
518		return (error);
519	}
520
521	if (zfsvfs->z_utf8 && u8_validate(nm, strlen(nm),
522	    NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
523		ZFS_EXIT(zfsvfs);
524		return (SET_ERROR(EILSEQ));
525	}
526
527	error = zfs_dirlook(zdp, nm, zpp, flags, direntflags, realpnp);
528	if ((error == 0) && (*zpp))
529		zfs_znode_update_vfs(*zpp);
530
531	ZFS_EXIT(zfsvfs);
532	return (error);
533}
534
535/*
536 * Attempt to create a new entry in a directory.  If the entry
537 * already exists, truncate the file if permissible, else return
538 * an error.  Return the ip of the created or trunc'd file.
539 *
540 *	IN:	dzp	- znode of directory to put new file entry in.
541 *		name	- name of new file entry.
542 *		vap	- attributes of new file.
543 *		excl	- flag indicating exclusive or non-exclusive mode.
544 *		mode	- mode to open file with.
545 *		cr	- credentials of caller.
546 *		flag	- file flag.
547 *		vsecp	- ACL to be set
548 *
549 *	OUT:	zpp	- znode of created or trunc'd entry.
550 *
551 *	RETURN:	0 on success, error code on failure.
552 *
553 * Timestamps:
554 *	dzp - ctime|mtime updated if new entry created
555 *	 zp - ctime|mtime always, atime if new
556 */
557
558/* ARGSUSED */
559int
560zfs_create(znode_t *dzp, char *name, vattr_t *vap, int excl,
561    int mode, znode_t **zpp, cred_t *cr, int flag, vsecattr_t *vsecp)
562{
563	znode_t		*zp;
564	zfsvfs_t	*zfsvfs = ZTOZSB(dzp);
565	zilog_t		*zilog;
566	objset_t	*os;
567	zfs_dirlock_t	*dl;
568	dmu_tx_t	*tx;
569	int		error;
570	uid_t		uid;
571	gid_t		gid;
572	zfs_acl_ids_t   acl_ids;
573	boolean_t	fuid_dirtied;
574	boolean_t	have_acl = B_FALSE;
575	boolean_t	waited = B_FALSE;
576
577	/*
578	 * If we have an ephemeral id, ACL, or XVATTR then
579	 * make sure file system is at proper version
580	 */
581
582	gid = crgetgid(cr);
583	uid = crgetuid(cr);
584
585	if (zfsvfs->z_use_fuids == B_FALSE &&
586	    (vsecp || IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
587		return (SET_ERROR(EINVAL));
588
589	if (name == NULL)
590		return (SET_ERROR(EINVAL));
591
592	ZFS_ENTER(zfsvfs);
593	ZFS_VERIFY_ZP(dzp);
594	os = zfsvfs->z_os;
595	zilog = zfsvfs->z_log;
596
597	if (zfsvfs->z_utf8 && u8_validate(name, strlen(name),
598	    NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
599		ZFS_EXIT(zfsvfs);
600		return (SET_ERROR(EILSEQ));
601	}
602
603	if (vap->va_mask & ATTR_XVATTR) {
604		if ((error = secpolicy_xvattr((xvattr_t *)vap,
605		    crgetuid(cr), cr, vap->va_mode)) != 0) {
606			ZFS_EXIT(zfsvfs);
607			return (error);
608		}
609	}
610
611top:
612	*zpp = NULL;
613	if (*name == '\0') {
614		/*
615		 * Null component name refers to the directory itself.
616		 */
617		zhold(dzp);
618		zp = dzp;
619		dl = NULL;
620		error = 0;
621	} else {
622		/* possible igrab(zp) */
623		int zflg = 0;
624
625		if (flag & FIGNORECASE)
626			zflg |= ZCILOOK;
627
628		error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
629		    NULL, NULL);
630		if (error) {
631			if (have_acl)
632				zfs_acl_ids_free(&acl_ids);
633			if (strcmp(name, "..") == 0)
634				error = SET_ERROR(EISDIR);
635			ZFS_EXIT(zfsvfs);
636			return (error);
637		}
638	}
639
640	if (zp == NULL) {
641		uint64_t txtype;
642		uint64_t projid = ZFS_DEFAULT_PROJID;
643
644		/*
645		 * Create a new file object and update the directory
646		 * to reference it.
647		 */
648		if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
649			if (have_acl)
650				zfs_acl_ids_free(&acl_ids);
651			goto out;
652		}
653
654		/*
655		 * We only support the creation of regular files in
656		 * extended attribute directories.
657		 */
658
659		if ((dzp->z_pflags & ZFS_XATTR) && !S_ISREG(vap->va_mode)) {
660			if (have_acl)
661				zfs_acl_ids_free(&acl_ids);
662			error = SET_ERROR(EINVAL);
663			goto out;
664		}
665
666		if (!have_acl && (error = zfs_acl_ids_create(dzp, 0, vap,
667		    cr, vsecp, &acl_ids)) != 0)
668			goto out;
669		have_acl = B_TRUE;
670
671		if (S_ISREG(vap->va_mode) || S_ISDIR(vap->va_mode))
672			projid = zfs_inherit_projid(dzp);
673		if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, projid)) {
674			zfs_acl_ids_free(&acl_ids);
675			error = SET_ERROR(EDQUOT);
676			goto out;
677		}
678
679		tx = dmu_tx_create(os);
680
681		dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
682		    ZFS_SA_BASE_ATTR_SIZE);
683
684		fuid_dirtied = zfsvfs->z_fuid_dirty;
685		if (fuid_dirtied)
686			zfs_fuid_txhold(zfsvfs, tx);
687		dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
688		dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
689		if (!zfsvfs->z_use_sa &&
690		    acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
691			dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
692			    0, acl_ids.z_aclp->z_acl_bytes);
693		}
694
695		error = dmu_tx_assign(tx,
696		    (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
697		if (error) {
698			zfs_dirent_unlock(dl);
699			if (error == ERESTART) {
700				waited = B_TRUE;
701				dmu_tx_wait(tx);
702				dmu_tx_abort(tx);
703				goto top;
704			}
705			zfs_acl_ids_free(&acl_ids);
706			dmu_tx_abort(tx);
707			ZFS_EXIT(zfsvfs);
708			return (error);
709		}
710		zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
711
712		error = zfs_link_create(dl, zp, tx, ZNEW);
713		if (error != 0) {
714			/*
715			 * Since, we failed to add the directory entry for it,
716			 * delete the newly created dnode.
717			 */
718			zfs_znode_delete(zp, tx);
719			remove_inode_hash(ZTOI(zp));
720			zfs_acl_ids_free(&acl_ids);
721			dmu_tx_commit(tx);
722			goto out;
723		}
724
725		if (fuid_dirtied)
726			zfs_fuid_sync(zfsvfs, tx);
727
728		txtype = zfs_log_create_txtype(Z_FILE, vsecp, vap);
729		if (flag & FIGNORECASE)
730			txtype |= TX_CI;
731		zfs_log_create(zilog, tx, txtype, dzp, zp, name,
732		    vsecp, acl_ids.z_fuidp, vap);
733		zfs_acl_ids_free(&acl_ids);
734		dmu_tx_commit(tx);
735	} else {
736		int aflags = (flag & O_APPEND) ? V_APPEND : 0;
737
738		if (have_acl)
739			zfs_acl_ids_free(&acl_ids);
740		have_acl = B_FALSE;
741
742		/*
743		 * A directory entry already exists for this name.
744		 */
745		/*
746		 * Can't truncate an existing file if in exclusive mode.
747		 */
748		if (excl) {
749			error = SET_ERROR(EEXIST);
750			goto out;
751		}
752		/*
753		 * Can't open a directory for writing.
754		 */
755		if (S_ISDIR(ZTOI(zp)->i_mode)) {
756			error = SET_ERROR(EISDIR);
757			goto out;
758		}
759		/*
760		 * Verify requested access to file.
761		 */
762		if (mode && (error = zfs_zaccess_rwx(zp, mode, aflags, cr))) {
763			goto out;
764		}
765
766		mutex_enter(&dzp->z_lock);
767		dzp->z_seq++;
768		mutex_exit(&dzp->z_lock);
769
770		/*
771		 * Truncate regular files if requested.
772		 */
773		if (S_ISREG(ZTOI(zp)->i_mode) &&
774		    (vap->va_mask & ATTR_SIZE) && (vap->va_size == 0)) {
775			/* we can't hold any locks when calling zfs_freesp() */
776			if (dl) {
777				zfs_dirent_unlock(dl);
778				dl = NULL;
779			}
780			error = zfs_freesp(zp, 0, 0, mode, TRUE);
781		}
782	}
783out:
784
785	if (dl)
786		zfs_dirent_unlock(dl);
787
788	if (error) {
789		if (zp)
790			zrele(zp);
791	} else {
792		zfs_znode_update_vfs(dzp);
793		zfs_znode_update_vfs(zp);
794		*zpp = zp;
795	}
796
797	if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
798		zil_commit(zilog, 0);
799
800	ZFS_EXIT(zfsvfs);
801	return (error);
802}
803
804/* ARGSUSED */
805int
806zfs_tmpfile(struct inode *dip, vattr_t *vap, int excl,
807    int mode, struct inode **ipp, cred_t *cr, int flag, vsecattr_t *vsecp)
808{
809	znode_t		*zp = NULL, *dzp = ITOZ(dip);
810	zfsvfs_t	*zfsvfs = ITOZSB(dip);
811	objset_t	*os;
812	dmu_tx_t	*tx;
813	int		error;
814	uid_t		uid;
815	gid_t		gid;
816	zfs_acl_ids_t   acl_ids;
817	uint64_t	projid = ZFS_DEFAULT_PROJID;
818	boolean_t	fuid_dirtied;
819	boolean_t	have_acl = B_FALSE;
820	boolean_t	waited = B_FALSE;
821
822	/*
823	 * If we have an ephemeral id, ACL, or XVATTR then
824	 * make sure file system is at proper version
825	 */
826
827	gid = crgetgid(cr);
828	uid = crgetuid(cr);
829
830	if (zfsvfs->z_use_fuids == B_FALSE &&
831	    (vsecp || IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
832		return (SET_ERROR(EINVAL));
833
834	ZFS_ENTER(zfsvfs);
835	ZFS_VERIFY_ZP(dzp);
836	os = zfsvfs->z_os;
837
838	if (vap->va_mask & ATTR_XVATTR) {
839		if ((error = secpolicy_xvattr((xvattr_t *)vap,
840		    crgetuid(cr), cr, vap->va_mode)) != 0) {
841			ZFS_EXIT(zfsvfs);
842			return (error);
843		}
844	}
845
846top:
847	*ipp = NULL;
848
849	/*
850	 * Create a new file object and update the directory
851	 * to reference it.
852	 */
853	if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
854		if (have_acl)
855			zfs_acl_ids_free(&acl_ids);
856		goto out;
857	}
858
859	if (!have_acl && (error = zfs_acl_ids_create(dzp, 0, vap,
860	    cr, vsecp, &acl_ids)) != 0)
861		goto out;
862	have_acl = B_TRUE;
863
864	if (S_ISREG(vap->va_mode) || S_ISDIR(vap->va_mode))
865		projid = zfs_inherit_projid(dzp);
866	if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, projid)) {
867		zfs_acl_ids_free(&acl_ids);
868		error = SET_ERROR(EDQUOT);
869		goto out;
870	}
871
872	tx = dmu_tx_create(os);
873
874	dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
875	    ZFS_SA_BASE_ATTR_SIZE);
876	dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
877
878	fuid_dirtied = zfsvfs->z_fuid_dirty;
879	if (fuid_dirtied)
880		zfs_fuid_txhold(zfsvfs, tx);
881	if (!zfsvfs->z_use_sa &&
882	    acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
883		dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
884		    0, acl_ids.z_aclp->z_acl_bytes);
885	}
886	error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
887	if (error) {
888		if (error == ERESTART) {
889			waited = B_TRUE;
890			dmu_tx_wait(tx);
891			dmu_tx_abort(tx);
892			goto top;
893		}
894		zfs_acl_ids_free(&acl_ids);
895		dmu_tx_abort(tx);
896		ZFS_EXIT(zfsvfs);
897		return (error);
898	}
899	zfs_mknode(dzp, vap, tx, cr, IS_TMPFILE, &zp, &acl_ids);
900
901	if (fuid_dirtied)
902		zfs_fuid_sync(zfsvfs, tx);
903
904	/* Add to unlinked set */
905	zp->z_unlinked = B_TRUE;
906	zfs_unlinked_add(zp, tx);
907	zfs_acl_ids_free(&acl_ids);
908	dmu_tx_commit(tx);
909out:
910
911	if (error) {
912		if (zp)
913			zrele(zp);
914	} else {
915		zfs_znode_update_vfs(dzp);
916		zfs_znode_update_vfs(zp);
917		*ipp = ZTOI(zp);
918	}
919
920	ZFS_EXIT(zfsvfs);
921	return (error);
922}
923
924/*
925 * Remove an entry from a directory.
926 *
927 *	IN:	dzp	- znode of directory to remove entry from.
928 *		name	- name of entry to remove.
929 *		cr	- credentials of caller.
930 *		flags	- case flags.
931 *
932 *	RETURN:	0 if success
933 *		error code if failure
934 *
935 * Timestamps:
936 *	dzp - ctime|mtime
937 *	 ip - ctime (if nlink > 0)
938 */
939
940uint64_t null_xattr = 0;
941
942/*ARGSUSED*/
943int
944zfs_remove(znode_t *dzp, char *name, cred_t *cr, int flags)
945{
946	znode_t		*zp;
947	znode_t		*xzp;
948	zfsvfs_t	*zfsvfs = ZTOZSB(dzp);
949	zilog_t		*zilog;
950	uint64_t	acl_obj, xattr_obj;
951	uint64_t	xattr_obj_unlinked = 0;
952	uint64_t	obj = 0;
953	uint64_t	links;
954	zfs_dirlock_t	*dl;
955	dmu_tx_t	*tx;
956	boolean_t	may_delete_now, delete_now = FALSE;
957	boolean_t	unlinked, toobig = FALSE;
958	uint64_t	txtype;
959	pathname_t	*realnmp = NULL;
960	pathname_t	realnm;
961	int		error;
962	int		zflg = ZEXISTS;
963	boolean_t	waited = B_FALSE;
964
965	if (name == NULL)
966		return (SET_ERROR(EINVAL));
967
968	ZFS_ENTER(zfsvfs);
969	ZFS_VERIFY_ZP(dzp);
970	zilog = zfsvfs->z_log;
971
972	if (flags & FIGNORECASE) {
973		zflg |= ZCILOOK;
974		pn_alloc(&realnm);
975		realnmp = &realnm;
976	}
977
978top:
979	xattr_obj = 0;
980	xzp = NULL;
981	/*
982	 * Attempt to lock directory; fail if entry doesn't exist.
983	 */
984	if ((error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
985	    NULL, realnmp))) {
986		if (realnmp)
987			pn_free(realnmp);
988		ZFS_EXIT(zfsvfs);
989		return (error);
990	}
991
992	if ((error = zfs_zaccess_delete(dzp, zp, cr))) {
993		goto out;
994	}
995
996	/*
997	 * Need to use rmdir for removing directories.
998	 */
999	if (S_ISDIR(ZTOI(zp)->i_mode)) {
1000		error = SET_ERROR(EPERM);
1001		goto out;
1002	}
1003
1004	mutex_enter(&zp->z_lock);
1005	may_delete_now = atomic_read(&ZTOI(zp)->i_count) == 1 &&
1006	    !(zp->z_is_mapped);
1007	mutex_exit(&zp->z_lock);
1008
1009	/*
1010	 * We may delete the znode now, or we may put it in the unlinked set;
1011	 * it depends on whether we're the last link, and on whether there are
1012	 * other holds on the inode.  So we dmu_tx_hold() the right things to
1013	 * allow for either case.
1014	 */
1015	obj = zp->z_id;
1016	tx = dmu_tx_create(zfsvfs->z_os);
1017	dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
1018	dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
1019	zfs_sa_upgrade_txholds(tx, zp);
1020	zfs_sa_upgrade_txholds(tx, dzp);
1021	if (may_delete_now) {
1022		toobig = zp->z_size > zp->z_blksz * zfs_delete_blocks;
1023		/* if the file is too big, only hold_free a token amount */
1024		dmu_tx_hold_free(tx, zp->z_id, 0,
1025		    (toobig ? DMU_MAX_ACCESS : DMU_OBJECT_END));
1026	}
1027
1028	/* are there any extended attributes? */
1029	error = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
1030	    &xattr_obj, sizeof (xattr_obj));
1031	if (error == 0 && xattr_obj) {
1032		error = zfs_zget(zfsvfs, xattr_obj, &xzp);
1033		ASSERT0(error);
1034		dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
1035		dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE);
1036	}
1037
1038	mutex_enter(&zp->z_lock);
1039	if ((acl_obj = zfs_external_acl(zp)) != 0 && may_delete_now)
1040		dmu_tx_hold_free(tx, acl_obj, 0, DMU_OBJECT_END);
1041	mutex_exit(&zp->z_lock);
1042
1043	/* charge as an update -- would be nice not to charge at all */
1044	dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
1045
1046	/*
1047	 * Mark this transaction as typically resulting in a net free of space
1048	 */
1049	dmu_tx_mark_netfree(tx);
1050
1051	error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
1052	if (error) {
1053		zfs_dirent_unlock(dl);
1054		if (error == ERESTART) {
1055			waited = B_TRUE;
1056			dmu_tx_wait(tx);
1057			dmu_tx_abort(tx);
1058			zrele(zp);
1059			if (xzp)
1060				zrele(xzp);
1061			goto top;
1062		}
1063		if (realnmp)
1064			pn_free(realnmp);
1065		dmu_tx_abort(tx);
1066		zrele(zp);
1067		if (xzp)
1068			zrele(xzp);
1069		ZFS_EXIT(zfsvfs);
1070		return (error);
1071	}
1072
1073	/*
1074	 * Remove the directory entry.
1075	 */
1076	error = zfs_link_destroy(dl, zp, tx, zflg, &unlinked);
1077
1078	if (error) {
1079		dmu_tx_commit(tx);
1080		goto out;
1081	}
1082
1083	if (unlinked) {
1084		/*
1085		 * Hold z_lock so that we can make sure that the ACL obj
1086		 * hasn't changed.  Could have been deleted due to
1087		 * zfs_sa_upgrade().
1088		 */
1089		mutex_enter(&zp->z_lock);
1090		(void) sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
1091		    &xattr_obj_unlinked, sizeof (xattr_obj_unlinked));
1092		delete_now = may_delete_now && !toobig &&
1093		    atomic_read(&ZTOI(zp)->i_count) == 1 &&
1094		    !(zp->z_is_mapped) && xattr_obj == xattr_obj_unlinked &&
1095		    zfs_external_acl(zp) == acl_obj;
1096	}
1097
1098	if (delete_now) {
1099		if (xattr_obj_unlinked) {
1100			ASSERT3U(ZTOI(xzp)->i_nlink, ==, 2);
1101			mutex_enter(&xzp->z_lock);
1102			xzp->z_unlinked = B_TRUE;
1103			clear_nlink(ZTOI(xzp));
1104			links = 0;
1105			error = sa_update(xzp->z_sa_hdl, SA_ZPL_LINKS(zfsvfs),
1106			    &links, sizeof (links), tx);
1107			ASSERT3U(error,  ==,  0);
1108			mutex_exit(&xzp->z_lock);
1109			zfs_unlinked_add(xzp, tx);
1110
1111			if (zp->z_is_sa)
1112				error = sa_remove(zp->z_sa_hdl,
1113				    SA_ZPL_XATTR(zfsvfs), tx);
1114			else
1115				error = sa_update(zp->z_sa_hdl,
1116				    SA_ZPL_XATTR(zfsvfs), &null_xattr,
1117				    sizeof (uint64_t), tx);
1118			ASSERT0(error);
1119		}
1120		/*
1121		 * Add to the unlinked set because a new reference could be
1122		 * taken concurrently resulting in a deferred destruction.
1123		 */
1124		zfs_unlinked_add(zp, tx);
1125		mutex_exit(&zp->z_lock);
1126	} else if (unlinked) {
1127		mutex_exit(&zp->z_lock);
1128		zfs_unlinked_add(zp, tx);
1129	}
1130
1131	txtype = TX_REMOVE;
1132	if (flags & FIGNORECASE)
1133		txtype |= TX_CI;
1134	zfs_log_remove(zilog, tx, txtype, dzp, name, obj, unlinked);
1135
1136	dmu_tx_commit(tx);
1137out:
1138	if (realnmp)
1139		pn_free(realnmp);
1140
1141	zfs_dirent_unlock(dl);
1142	zfs_znode_update_vfs(dzp);
1143	zfs_znode_update_vfs(zp);
1144
1145	if (delete_now)
1146		zrele(zp);
1147	else
1148		zfs_zrele_async(zp);
1149
1150	if (xzp) {
1151		zfs_znode_update_vfs(xzp);
1152		zfs_zrele_async(xzp);
1153	}
1154
1155	if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
1156		zil_commit(zilog, 0);
1157
1158	ZFS_EXIT(zfsvfs);
1159	return (error);
1160}
1161
1162/*
1163 * Create a new directory and insert it into dzp using the name
1164 * provided.  Return a pointer to the inserted directory.
1165 *
1166 *	IN:	dzp	- znode of directory to add subdir to.
1167 *		dirname	- name of new directory.
1168 *		vap	- attributes of new directory.
1169 *		cr	- credentials of caller.
1170 *		flags	- case flags.
1171 *		vsecp	- ACL to be set
1172 *
1173 *	OUT:	zpp	- znode of created directory.
1174 *
1175 *	RETURN:	0 if success
1176 *		error code if failure
1177 *
1178 * Timestamps:
1179 *	dzp - ctime|mtime updated
1180 *	zpp - ctime|mtime|atime updated
1181 */
1182/*ARGSUSED*/
1183int
1184zfs_mkdir(znode_t *dzp, char *dirname, vattr_t *vap, znode_t **zpp,
1185    cred_t *cr, int flags, vsecattr_t *vsecp)
1186{
1187	znode_t		*zp;
1188	zfsvfs_t	*zfsvfs = ZTOZSB(dzp);
1189	zilog_t		*zilog;
1190	zfs_dirlock_t	*dl;
1191	uint64_t	txtype;
1192	dmu_tx_t	*tx;
1193	int		error;
1194	int		zf = ZNEW;
1195	uid_t		uid;
1196	gid_t		gid = crgetgid(cr);
1197	zfs_acl_ids_t   acl_ids;
1198	boolean_t	fuid_dirtied;
1199	boolean_t	waited = B_FALSE;
1200
1201	ASSERT(S_ISDIR(vap->va_mode));
1202
1203	/*
1204	 * If we have an ephemeral id, ACL, or XVATTR then
1205	 * make sure file system is at proper version
1206	 */
1207
1208	uid = crgetuid(cr);
1209	if (zfsvfs->z_use_fuids == B_FALSE &&
1210	    (vsecp || IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
1211		return (SET_ERROR(EINVAL));
1212
1213	if (dirname == NULL)
1214		return (SET_ERROR(EINVAL));
1215
1216	ZFS_ENTER(zfsvfs);
1217	ZFS_VERIFY_ZP(dzp);
1218	zilog = zfsvfs->z_log;
1219
1220	if (dzp->z_pflags & ZFS_XATTR) {
1221		ZFS_EXIT(zfsvfs);
1222		return (SET_ERROR(EINVAL));
1223	}
1224
1225	if (zfsvfs->z_utf8 && u8_validate(dirname,
1226	    strlen(dirname), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
1227		ZFS_EXIT(zfsvfs);
1228		return (SET_ERROR(EILSEQ));
1229	}
1230	if (flags & FIGNORECASE)
1231		zf |= ZCILOOK;
1232
1233	if (vap->va_mask & ATTR_XVATTR) {
1234		if ((error = secpolicy_xvattr((xvattr_t *)vap,
1235		    crgetuid(cr), cr, vap->va_mode)) != 0) {
1236			ZFS_EXIT(zfsvfs);
1237			return (error);
1238		}
1239	}
1240
1241	if ((error = zfs_acl_ids_create(dzp, 0, vap, cr,
1242	    vsecp, &acl_ids)) != 0) {
1243		ZFS_EXIT(zfsvfs);
1244		return (error);
1245	}
1246	/*
1247	 * First make sure the new directory doesn't exist.
1248	 *
1249	 * Existence is checked first to make sure we don't return
1250	 * EACCES instead of EEXIST which can cause some applications
1251	 * to fail.
1252	 */
1253top:
1254	*zpp = NULL;
1255
1256	if ((error = zfs_dirent_lock(&dl, dzp, dirname, &zp, zf,
1257	    NULL, NULL))) {
1258		zfs_acl_ids_free(&acl_ids);
1259		ZFS_EXIT(zfsvfs);
1260		return (error);
1261	}
1262
1263	if ((error = zfs_zaccess(dzp, ACE_ADD_SUBDIRECTORY, 0, B_FALSE, cr))) {
1264		zfs_acl_ids_free(&acl_ids);
1265		zfs_dirent_unlock(dl);
1266		ZFS_EXIT(zfsvfs);
1267		return (error);
1268	}
1269
1270	if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, zfs_inherit_projid(dzp))) {
1271		zfs_acl_ids_free(&acl_ids);
1272		zfs_dirent_unlock(dl);
1273		ZFS_EXIT(zfsvfs);
1274		return (SET_ERROR(EDQUOT));
1275	}
1276
1277	/*
1278	 * Add a new entry to the directory.
1279	 */
1280	tx = dmu_tx_create(zfsvfs->z_os);
1281	dmu_tx_hold_zap(tx, dzp->z_id, TRUE, dirname);
1282	dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
1283	fuid_dirtied = zfsvfs->z_fuid_dirty;
1284	if (fuid_dirtied)
1285		zfs_fuid_txhold(zfsvfs, tx);
1286	if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
1287		dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
1288		    acl_ids.z_aclp->z_acl_bytes);
1289	}
1290
1291	dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
1292	    ZFS_SA_BASE_ATTR_SIZE);
1293
1294	error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
1295	if (error) {
1296		zfs_dirent_unlock(dl);
1297		if (error == ERESTART) {
1298			waited = B_TRUE;
1299			dmu_tx_wait(tx);
1300			dmu_tx_abort(tx);
1301			goto top;
1302		}
1303		zfs_acl_ids_free(&acl_ids);
1304		dmu_tx_abort(tx);
1305		ZFS_EXIT(zfsvfs);
1306		return (error);
1307	}
1308
1309	/*
1310	 * Create new node.
1311	 */
1312	zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
1313
1314	/*
1315	 * Now put new name in parent dir.
1316	 */
1317	error = zfs_link_create(dl, zp, tx, ZNEW);
1318	if (error != 0) {
1319		zfs_znode_delete(zp, tx);
1320		remove_inode_hash(ZTOI(zp));
1321		goto out;
1322	}
1323
1324	if (fuid_dirtied)
1325		zfs_fuid_sync(zfsvfs, tx);
1326
1327	*zpp = zp;
1328
1329	txtype = zfs_log_create_txtype(Z_DIR, vsecp, vap);
1330	if (flags & FIGNORECASE)
1331		txtype |= TX_CI;
1332	zfs_log_create(zilog, tx, txtype, dzp, zp, dirname, vsecp,
1333	    acl_ids.z_fuidp, vap);
1334
1335out:
1336	zfs_acl_ids_free(&acl_ids);
1337
1338	dmu_tx_commit(tx);
1339
1340	zfs_dirent_unlock(dl);
1341
1342	if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
1343		zil_commit(zilog, 0);
1344
1345	if (error != 0) {
1346		zrele(zp);
1347	} else {
1348		zfs_znode_update_vfs(dzp);
1349		zfs_znode_update_vfs(zp);
1350	}
1351	ZFS_EXIT(zfsvfs);
1352	return (error);
1353}
1354
1355/*
1356 * Remove a directory subdir entry.  If the current working
1357 * directory is the same as the subdir to be removed, the
1358 * remove will fail.
1359 *
1360 *	IN:	dzp	- znode of directory to remove from.
1361 *		name	- name of directory to be removed.
1362 *		cwd	- inode of current working directory.
1363 *		cr	- credentials of caller.
1364 *		flags	- case flags
1365 *
1366 *	RETURN:	0 on success, error code on failure.
1367 *
1368 * Timestamps:
1369 *	dzp - ctime|mtime updated
1370 */
1371/*ARGSUSED*/
1372int
1373zfs_rmdir(znode_t *dzp, char *name, znode_t *cwd, cred_t *cr,
1374    int flags)
1375{
1376	znode_t		*zp;
1377	zfsvfs_t	*zfsvfs = ZTOZSB(dzp);
1378	zilog_t		*zilog;
1379	zfs_dirlock_t	*dl;
1380	dmu_tx_t	*tx;
1381	int		error;
1382	int		zflg = ZEXISTS;
1383	boolean_t	waited = B_FALSE;
1384
1385	if (name == NULL)
1386		return (SET_ERROR(EINVAL));
1387
1388	ZFS_ENTER(zfsvfs);
1389	ZFS_VERIFY_ZP(dzp);
1390	zilog = zfsvfs->z_log;
1391
1392	if (flags & FIGNORECASE)
1393		zflg |= ZCILOOK;
1394top:
1395	zp = NULL;
1396
1397	/*
1398	 * Attempt to lock directory; fail if entry doesn't exist.
1399	 */
1400	if ((error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
1401	    NULL, NULL))) {
1402		ZFS_EXIT(zfsvfs);
1403		return (error);
1404	}
1405
1406	if ((error = zfs_zaccess_delete(dzp, zp, cr))) {
1407		goto out;
1408	}
1409
1410	if (!S_ISDIR(ZTOI(zp)->i_mode)) {
1411		error = SET_ERROR(ENOTDIR);
1412		goto out;
1413	}
1414
1415	if (zp == cwd) {
1416		error = SET_ERROR(EINVAL);
1417		goto out;
1418	}
1419
1420	/*
1421	 * Grab a lock on the directory to make sure that no one is
1422	 * trying to add (or lookup) entries while we are removing it.
1423	 */
1424	rw_enter(&zp->z_name_lock, RW_WRITER);
1425
1426	/*
1427	 * Grab a lock on the parent pointer to make sure we play well
1428	 * with the treewalk and directory rename code.
1429	 */
1430	rw_enter(&zp->z_parent_lock, RW_WRITER);
1431
1432	tx = dmu_tx_create(zfsvfs->z_os);
1433	dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
1434	dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
1435	dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
1436	zfs_sa_upgrade_txholds(tx, zp);
1437	zfs_sa_upgrade_txholds(tx, dzp);
1438	dmu_tx_mark_netfree(tx);
1439	error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
1440	if (error) {
1441		rw_exit(&zp->z_parent_lock);
1442		rw_exit(&zp->z_name_lock);
1443		zfs_dirent_unlock(dl);
1444		if (error == ERESTART) {
1445			waited = B_TRUE;
1446			dmu_tx_wait(tx);
1447			dmu_tx_abort(tx);
1448			zrele(zp);
1449			goto top;
1450		}
1451		dmu_tx_abort(tx);
1452		zrele(zp);
1453		ZFS_EXIT(zfsvfs);
1454		return (error);
1455	}
1456
1457	error = zfs_link_destroy(dl, zp, tx, zflg, NULL);
1458
1459	if (error == 0) {
1460		uint64_t txtype = TX_RMDIR;
1461		if (flags & FIGNORECASE)
1462			txtype |= TX_CI;
1463		zfs_log_remove(zilog, tx, txtype, dzp, name, ZFS_NO_OBJECT,
1464		    B_FALSE);
1465	}
1466
1467	dmu_tx_commit(tx);
1468
1469	rw_exit(&zp->z_parent_lock);
1470	rw_exit(&zp->z_name_lock);
1471out:
1472	zfs_dirent_unlock(dl);
1473
1474	zfs_znode_update_vfs(dzp);
1475	zfs_znode_update_vfs(zp);
1476	zrele(zp);
1477
1478	if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
1479		zil_commit(zilog, 0);
1480
1481	ZFS_EXIT(zfsvfs);
1482	return (error);
1483}
1484
1485/*
1486 * Read directory entries from the given directory cursor position and emit
1487 * name and position for each entry.
1488 *
1489 *	IN:	ip	- inode of directory to read.
1490 *		ctx	- directory entry context.
1491 *		cr	- credentials of caller.
1492 *
1493 *	RETURN:	0 if success
1494 *		error code if failure
1495 *
1496 * Timestamps:
1497 *	ip - atime updated
1498 *
1499 * Note that the low 4 bits of the cookie returned by zap is always zero.
1500 * This allows us to use the low range for "special" directory entries:
1501 * We use 0 for '.', and 1 for '..'.  If this is the root of the filesystem,
1502 * we use the offset 2 for the '.zfs' directory.
1503 */
1504/* ARGSUSED */
1505int
1506zfs_readdir(struct inode *ip, zpl_dir_context_t *ctx, cred_t *cr)
1507{
1508	znode_t		*zp = ITOZ(ip);
1509	zfsvfs_t	*zfsvfs = ITOZSB(ip);
1510	objset_t	*os;
1511	zap_cursor_t	zc;
1512	zap_attribute_t	zap;
1513	int		error;
1514	uint8_t		prefetch;
1515	uint8_t		type;
1516	int		done = 0;
1517	uint64_t	parent;
1518	uint64_t	offset; /* must be unsigned; checks for < 1 */
1519
1520	ZFS_ENTER(zfsvfs);
1521	ZFS_VERIFY_ZP(zp);
1522
1523	if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
1524	    &parent, sizeof (parent))) != 0)
1525		goto out;
1526
1527	/*
1528	 * Quit if directory has been removed (posix)
1529	 */
1530	if (zp->z_unlinked)
1531		goto out;
1532
1533	error = 0;
1534	os = zfsvfs->z_os;
1535	offset = ctx->pos;
1536	prefetch = zp->z_zn_prefetch;
1537
1538	/*
1539	 * Initialize the iterator cursor.
1540	 */
1541	if (offset <= 3) {
1542		/*
1543		 * Start iteration from the beginning of the directory.
1544		 */
1545		zap_cursor_init(&zc, os, zp->z_id);
1546	} else {
1547		/*
1548		 * The offset is a serialized cursor.
1549		 */
1550		zap_cursor_init_serialized(&zc, os, zp->z_id, offset);
1551	}
1552
1553	/*
1554	 * Transform to file-system independent format
1555	 */
1556	while (!done) {
1557		uint64_t objnum;
1558		/*
1559		 * Special case `.', `..', and `.zfs'.
1560		 */
1561		if (offset == 0) {
1562			(void) strcpy(zap.za_name, ".");
1563			zap.za_normalization_conflict = 0;
1564			objnum = zp->z_id;
1565			type = DT_DIR;
1566		} else if (offset == 1) {
1567			(void) strcpy(zap.za_name, "..");
1568			zap.za_normalization_conflict = 0;
1569			objnum = parent;
1570			type = DT_DIR;
1571		} else if (offset == 2 && zfs_show_ctldir(zp)) {
1572			(void) strcpy(zap.za_name, ZFS_CTLDIR_NAME);
1573			zap.za_normalization_conflict = 0;
1574			objnum = ZFSCTL_INO_ROOT;
1575			type = DT_DIR;
1576		} else {
1577			/*
1578			 * Grab next entry.
1579			 */
1580			if ((error = zap_cursor_retrieve(&zc, &zap))) {
1581				if (error == ENOENT)
1582					break;
1583				else
1584					goto update;
1585			}
1586
1587			/*
1588			 * Allow multiple entries provided the first entry is
1589			 * the object id.  Non-zpl consumers may safely make
1590			 * use of the additional space.
1591			 *
1592			 * XXX: This should be a feature flag for compatibility
1593			 */
1594			if (zap.za_integer_length != 8 ||
1595			    zap.za_num_integers == 0) {
1596				cmn_err(CE_WARN, "zap_readdir: bad directory "
1597				    "entry, obj = %lld, offset = %lld, "
1598				    "length = %d, num = %lld\n",
1599				    (u_longlong_t)zp->z_id,
1600				    (u_longlong_t)offset,
1601				    zap.za_integer_length,
1602				    (u_longlong_t)zap.za_num_integers);
1603				error = SET_ERROR(ENXIO);
1604				goto update;
1605			}
1606
1607			objnum = ZFS_DIRENT_OBJ(zap.za_first_integer);
1608			type = ZFS_DIRENT_TYPE(zap.za_first_integer);
1609		}
1610
1611		done = !zpl_dir_emit(ctx, zap.za_name, strlen(zap.za_name),
1612		    objnum, type);
1613		if (done)
1614			break;
1615
1616		/* Prefetch znode */
1617		if (prefetch) {
1618			dmu_prefetch(os, objnum, 0, 0, 0,
1619			    ZIO_PRIORITY_SYNC_READ);
1620		}
1621
1622		/*
1623		 * Move to the next entry, fill in the previous offset.
1624		 */
1625		if (offset > 2 || (offset == 2 && !zfs_show_ctldir(zp))) {
1626			zap_cursor_advance(&zc);
1627			offset = zap_cursor_serialize(&zc);
1628		} else {
1629			offset += 1;
1630		}
1631		ctx->pos = offset;
1632	}
1633	zp->z_zn_prefetch = B_FALSE; /* a lookup will re-enable pre-fetching */
1634
1635update:
1636	zap_cursor_fini(&zc);
1637	if (error == ENOENT)
1638		error = 0;
1639out:
1640	ZFS_EXIT(zfsvfs);
1641
1642	return (error);
1643}
1644
1645/*
1646 * Get the basic file attributes and place them in the provided kstat
1647 * structure.  The inode is assumed to be the authoritative source
1648 * for most of the attributes.  However, the znode currently has the
1649 * authoritative atime, blksize, and block count.
1650 *
1651 *	IN:	ip	- inode of file.
1652 *
1653 *	OUT:	sp	- kstat values.
1654 *
1655 *	RETURN:	0 (always succeeds)
1656 */
1657/* ARGSUSED */
1658int
1659zfs_getattr_fast(struct user_namespace *user_ns, struct inode *ip,
1660    struct kstat *sp)
1661{
1662	znode_t *zp = ITOZ(ip);
1663	zfsvfs_t *zfsvfs = ITOZSB(ip);
1664	uint32_t blksize;
1665	u_longlong_t nblocks;
1666
1667	ZFS_ENTER(zfsvfs);
1668	ZFS_VERIFY_ZP(zp);
1669
1670	mutex_enter(&zp->z_lock);
1671
1672	zpl_generic_fillattr(user_ns, ip, sp);
1673	/*
1674	 * +1 link count for root inode with visible '.zfs' directory.
1675	 */
1676	if ((zp->z_id == zfsvfs->z_root) && zfs_show_ctldir(zp))
1677		if (sp->nlink < ZFS_LINK_MAX)
1678			sp->nlink++;
1679
1680	sa_object_size(zp->z_sa_hdl, &blksize, &nblocks);
1681	sp->blksize = blksize;
1682	sp->blocks = nblocks;
1683
1684	if (unlikely(zp->z_blksz == 0)) {
1685		/*
1686		 * Block size hasn't been set; suggest maximal I/O transfers.
1687		 */
1688		sp->blksize = zfsvfs->z_max_blksz;
1689	}
1690
1691	mutex_exit(&zp->z_lock);
1692
1693	/*
1694	 * Required to prevent NFS client from detecting different inode
1695	 * numbers of snapshot root dentry before and after snapshot mount.
1696	 */
1697	if (zfsvfs->z_issnap) {
1698		if (ip->i_sb->s_root->d_inode == ip)
1699			sp->ino = ZFSCTL_INO_SNAPDIRS -
1700			    dmu_objset_id(zfsvfs->z_os);
1701	}
1702
1703	ZFS_EXIT(zfsvfs);
1704
1705	return (0);
1706}
1707
1708/*
1709 * For the operation of changing file's user/group/project, we need to
1710 * handle not only the main object that is assigned to the file directly,
1711 * but also the ones that are used by the file via hidden xattr directory.
1712 *
1713 * Because the xattr directory may contains many EA entries, as to it may
1714 * be impossible to change all of them via the transaction of changing the
1715 * main object's user/group/project attributes. Then we have to change them
1716 * via other multiple independent transactions one by one. It may be not good
1717 * solution, but we have no better idea yet.
1718 */
1719static int
1720zfs_setattr_dir(znode_t *dzp)
1721{
1722	struct inode	*dxip = ZTOI(dzp);
1723	struct inode	*xip = NULL;
1724	zfsvfs_t	*zfsvfs = ZTOZSB(dzp);
1725	objset_t	*os = zfsvfs->z_os;
1726	zap_cursor_t	zc;
1727	zap_attribute_t	zap;
1728	zfs_dirlock_t	*dl;
1729	znode_t		*zp = NULL;
1730	dmu_tx_t	*tx = NULL;
1731	uint64_t	uid, gid;
1732	sa_bulk_attr_t	bulk[4];
1733	int		count;
1734	int		err;
1735
1736	zap_cursor_init(&zc, os, dzp->z_id);
1737	while ((err = zap_cursor_retrieve(&zc, &zap)) == 0) {
1738		count = 0;
1739		if (zap.za_integer_length != 8 || zap.za_num_integers != 1) {
1740			err = ENXIO;
1741			break;
1742		}
1743
1744		err = zfs_dirent_lock(&dl, dzp, (char *)zap.za_name, &zp,
1745		    ZEXISTS, NULL, NULL);
1746		if (err == ENOENT)
1747			goto next;
1748		if (err)
1749			break;
1750
1751		xip = ZTOI(zp);
1752		if (KUID_TO_SUID(xip->i_uid) == KUID_TO_SUID(dxip->i_uid) &&
1753		    KGID_TO_SGID(xip->i_gid) == KGID_TO_SGID(dxip->i_gid) &&
1754		    zp->z_projid == dzp->z_projid)
1755			goto next;
1756
1757		tx = dmu_tx_create(os);
1758		if (!(zp->z_pflags & ZFS_PROJID))
1759			dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
1760		else
1761			dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
1762
1763		err = dmu_tx_assign(tx, TXG_WAIT);
1764		if (err)
1765			break;
1766
1767		mutex_enter(&dzp->z_lock);
1768
1769		if (KUID_TO_SUID(xip->i_uid) != KUID_TO_SUID(dxip->i_uid)) {
1770			xip->i_uid = dxip->i_uid;
1771			uid = zfs_uid_read(dxip);
1772			SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
1773			    &uid, sizeof (uid));
1774		}
1775
1776		if (KGID_TO_SGID(xip->i_gid) != KGID_TO_SGID(dxip->i_gid)) {
1777			xip->i_gid = dxip->i_gid;
1778			gid = zfs_gid_read(dxip);
1779			SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL,
1780			    &gid, sizeof (gid));
1781		}
1782
1783		if (zp->z_projid != dzp->z_projid) {
1784			if (!(zp->z_pflags & ZFS_PROJID)) {
1785				zp->z_pflags |= ZFS_PROJID;
1786				SA_ADD_BULK_ATTR(bulk, count,
1787				    SA_ZPL_FLAGS(zfsvfs), NULL, &zp->z_pflags,
1788				    sizeof (zp->z_pflags));
1789			}
1790
1791			zp->z_projid = dzp->z_projid;
1792			SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PROJID(zfsvfs),
1793			    NULL, &zp->z_projid, sizeof (zp->z_projid));
1794		}
1795
1796		mutex_exit(&dzp->z_lock);
1797
1798		if (likely(count > 0)) {
1799			err = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
1800			dmu_tx_commit(tx);
1801		} else {
1802			dmu_tx_abort(tx);
1803		}
1804		tx = NULL;
1805		if (err != 0 && err != ENOENT)
1806			break;
1807
1808next:
1809		if (zp) {
1810			zrele(zp);
1811			zp = NULL;
1812			zfs_dirent_unlock(dl);
1813		}
1814		zap_cursor_advance(&zc);
1815	}
1816
1817	if (tx)
1818		dmu_tx_abort(tx);
1819	if (zp) {
1820		zrele(zp);
1821		zfs_dirent_unlock(dl);
1822	}
1823	zap_cursor_fini(&zc);
1824
1825	return (err == ENOENT ? 0 : err);
1826}
1827
1828/*
1829 * Set the file attributes to the values contained in the
1830 * vattr structure.
1831 *
1832 *	IN:	zp	- znode of file to be modified.
1833 *		vap	- new attribute values.
1834 *			  If ATTR_XVATTR set, then optional attrs are being set
1835 *		flags	- ATTR_UTIME set if non-default time values provided.
1836 *			- ATTR_NOACLCHECK (CIFS context only).
1837 *		cr	- credentials of caller.
1838 *
1839 *	RETURN:	0 if success
1840 *		error code if failure
1841 *
1842 * Timestamps:
1843 *	ip - ctime updated, mtime updated if size changed.
1844 */
1845/* ARGSUSED */
1846int
1847zfs_setattr(znode_t *zp, vattr_t *vap, int flags, cred_t *cr)
1848{
1849	struct inode	*ip;
1850	zfsvfs_t	*zfsvfs = ZTOZSB(zp);
1851	objset_t	*os = zfsvfs->z_os;
1852	zilog_t		*zilog;
1853	dmu_tx_t	*tx;
1854	vattr_t		oldva;
1855	xvattr_t	*tmpxvattr;
1856	uint_t		mask = vap->va_mask;
1857	uint_t		saved_mask = 0;
1858	int		trim_mask = 0;
1859	uint64_t	new_mode;
1860	uint64_t	new_kuid = 0, new_kgid = 0, new_uid, new_gid;
1861	uint64_t	xattr_obj;
1862	uint64_t	mtime[2], ctime[2], atime[2];
1863	uint64_t	projid = ZFS_INVALID_PROJID;
1864	znode_t		*attrzp;
1865	int		need_policy = FALSE;
1866	int		err, err2 = 0;
1867	zfs_fuid_info_t *fuidp = NULL;
1868	xvattr_t *xvap = (xvattr_t *)vap;	/* vap may be an xvattr_t * */
1869	xoptattr_t	*xoap;
1870	zfs_acl_t	*aclp;
1871	boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
1872	boolean_t	fuid_dirtied = B_FALSE;
1873	boolean_t	handle_eadir = B_FALSE;
1874	sa_bulk_attr_t	*bulk, *xattr_bulk;
1875	int		count = 0, xattr_count = 0, bulks = 8;
1876
1877	if (mask == 0)
1878		return (0);
1879
1880	ZFS_ENTER(zfsvfs);
1881	ZFS_VERIFY_ZP(zp);
1882	ip = ZTOI(zp);
1883
1884	/*
1885	 * If this is a xvattr_t, then get a pointer to the structure of
1886	 * optional attributes.  If this is NULL, then we have a vattr_t.
1887	 */
1888	xoap = xva_getxoptattr(xvap);
1889	if (xoap != NULL && (mask & ATTR_XVATTR)) {
1890		if (XVA_ISSET_REQ(xvap, XAT_PROJID)) {
1891			if (!dmu_objset_projectquota_enabled(os) ||
1892			    (!S_ISREG(ip->i_mode) && !S_ISDIR(ip->i_mode))) {
1893				ZFS_EXIT(zfsvfs);
1894				return (SET_ERROR(ENOTSUP));
1895			}
1896
1897			projid = xoap->xoa_projid;
1898			if (unlikely(projid == ZFS_INVALID_PROJID)) {
1899				ZFS_EXIT(zfsvfs);
1900				return (SET_ERROR(EINVAL));
1901			}
1902
1903			if (projid == zp->z_projid && zp->z_pflags & ZFS_PROJID)
1904				projid = ZFS_INVALID_PROJID;
1905			else
1906				need_policy = TRUE;
1907		}
1908
1909		if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT) &&
1910		    (xoap->xoa_projinherit !=
1911		    ((zp->z_pflags & ZFS_PROJINHERIT) != 0)) &&
1912		    (!dmu_objset_projectquota_enabled(os) ||
1913		    (!S_ISREG(ip->i_mode) && !S_ISDIR(ip->i_mode)))) {
1914			ZFS_EXIT(zfsvfs);
1915			return (SET_ERROR(ENOTSUP));
1916		}
1917	}
1918
1919	zilog = zfsvfs->z_log;
1920
1921	/*
1922	 * Make sure that if we have ephemeral uid/gid or xvattr specified
1923	 * that file system is at proper version level
1924	 */
1925
1926	if (zfsvfs->z_use_fuids == B_FALSE &&
1927	    (((mask & ATTR_UID) && IS_EPHEMERAL(vap->va_uid)) ||
1928	    ((mask & ATTR_GID) && IS_EPHEMERAL(vap->va_gid)) ||
1929	    (mask & ATTR_XVATTR))) {
1930		ZFS_EXIT(zfsvfs);
1931		return (SET_ERROR(EINVAL));
1932	}
1933
1934	if (mask & ATTR_SIZE && S_ISDIR(ip->i_mode)) {
1935		ZFS_EXIT(zfsvfs);
1936		return (SET_ERROR(EISDIR));
1937	}
1938
1939	if (mask & ATTR_SIZE && !S_ISREG(ip->i_mode) && !S_ISFIFO(ip->i_mode)) {
1940		ZFS_EXIT(zfsvfs);
1941		return (SET_ERROR(EINVAL));
1942	}
1943
1944	tmpxvattr = kmem_alloc(sizeof (xvattr_t), KM_SLEEP);
1945	xva_init(tmpxvattr);
1946
1947	bulk = kmem_alloc(sizeof (sa_bulk_attr_t) * bulks, KM_SLEEP);
1948	xattr_bulk = kmem_alloc(sizeof (sa_bulk_attr_t) * bulks, KM_SLEEP);
1949
1950	/*
1951	 * Immutable files can only alter immutable bit and atime
1952	 */
1953	if ((zp->z_pflags & ZFS_IMMUTABLE) &&
1954	    ((mask & (ATTR_SIZE|ATTR_UID|ATTR_GID|ATTR_MTIME|ATTR_MODE)) ||
1955	    ((mask & ATTR_XVATTR) && XVA_ISSET_REQ(xvap, XAT_CREATETIME)))) {
1956		err = SET_ERROR(EPERM);
1957		goto out3;
1958	}
1959
1960	if ((mask & ATTR_SIZE) && (zp->z_pflags & ZFS_READONLY)) {
1961		err = SET_ERROR(EPERM);
1962		goto out3;
1963	}
1964
1965	/*
1966	 * Verify timestamps doesn't overflow 32 bits.
1967	 * ZFS can handle large timestamps, but 32bit syscalls can't
1968	 * handle times greater than 2039.  This check should be removed
1969	 * once large timestamps are fully supported.
1970	 */
1971	if (mask & (ATTR_ATIME | ATTR_MTIME)) {
1972		if (((mask & ATTR_ATIME) &&
1973		    TIMESPEC_OVERFLOW(&vap->va_atime)) ||
1974		    ((mask & ATTR_MTIME) &&
1975		    TIMESPEC_OVERFLOW(&vap->va_mtime))) {
1976			err = SET_ERROR(EOVERFLOW);
1977			goto out3;
1978		}
1979	}
1980
1981top:
1982	attrzp = NULL;
1983	aclp = NULL;
1984
1985	/* Can this be moved to before the top label? */
1986	if (zfs_is_readonly(zfsvfs)) {
1987		err = SET_ERROR(EROFS);
1988		goto out3;
1989	}
1990
1991	/*
1992	 * First validate permissions
1993	 */
1994
1995	if (mask & ATTR_SIZE) {
1996		err = zfs_zaccess(zp, ACE_WRITE_DATA, 0, skipaclchk, cr);
1997		if (err)
1998			goto out3;
1999
2000		/*
2001		 * XXX - Note, we are not providing any open
2002		 * mode flags here (like FNDELAY), so we may
2003		 * block if there are locks present... this
2004		 * should be addressed in openat().
2005		 */
2006		/* XXX - would it be OK to generate a log record here? */
2007		err = zfs_freesp(zp, vap->va_size, 0, 0, FALSE);
2008		if (err)
2009			goto out3;
2010	}
2011
2012	if (mask & (ATTR_ATIME|ATTR_MTIME) ||
2013	    ((mask & ATTR_XVATTR) && (XVA_ISSET_REQ(xvap, XAT_HIDDEN) ||
2014	    XVA_ISSET_REQ(xvap, XAT_READONLY) ||
2015	    XVA_ISSET_REQ(xvap, XAT_ARCHIVE) ||
2016	    XVA_ISSET_REQ(xvap, XAT_OFFLINE) ||
2017	    XVA_ISSET_REQ(xvap, XAT_SPARSE) ||
2018	    XVA_ISSET_REQ(xvap, XAT_CREATETIME) ||
2019	    XVA_ISSET_REQ(xvap, XAT_SYSTEM)))) {
2020		need_policy = zfs_zaccess(zp, ACE_WRITE_ATTRIBUTES, 0,
2021		    skipaclchk, cr);
2022	}
2023
2024	if (mask & (ATTR_UID|ATTR_GID)) {
2025		int	idmask = (mask & (ATTR_UID|ATTR_GID));
2026		int	take_owner;
2027		int	take_group;
2028
2029		/*
2030		 * NOTE: even if a new mode is being set,
2031		 * we may clear S_ISUID/S_ISGID bits.
2032		 */
2033
2034		if (!(mask & ATTR_MODE))
2035			vap->va_mode = zp->z_mode;
2036
2037		/*
2038		 * Take ownership or chgrp to group we are a member of
2039		 */
2040
2041		take_owner = (mask & ATTR_UID) && (vap->va_uid == crgetuid(cr));
2042		take_group = (mask & ATTR_GID) &&
2043		    zfs_groupmember(zfsvfs, vap->va_gid, cr);
2044
2045		/*
2046		 * If both ATTR_UID and ATTR_GID are set then take_owner and
2047		 * take_group must both be set in order to allow taking
2048		 * ownership.
2049		 *
2050		 * Otherwise, send the check through secpolicy_vnode_setattr()
2051		 *
2052		 */
2053
2054		if (((idmask == (ATTR_UID|ATTR_GID)) &&
2055		    take_owner && take_group) ||
2056		    ((idmask == ATTR_UID) && take_owner) ||
2057		    ((idmask == ATTR_GID) && take_group)) {
2058			if (zfs_zaccess(zp, ACE_WRITE_OWNER, 0,
2059			    skipaclchk, cr) == 0) {
2060				/*
2061				 * Remove setuid/setgid for non-privileged users
2062				 */
2063				(void) secpolicy_setid_clear(vap, cr);
2064				trim_mask = (mask & (ATTR_UID|ATTR_GID));
2065			} else {
2066				need_policy =  TRUE;
2067			}
2068		} else {
2069			need_policy =  TRUE;
2070		}
2071	}
2072
2073	mutex_enter(&zp->z_lock);
2074	oldva.va_mode = zp->z_mode;
2075	zfs_fuid_map_ids(zp, cr, &oldva.va_uid, &oldva.va_gid);
2076	if (mask & ATTR_XVATTR) {
2077		/*
2078		 * Update xvattr mask to include only those attributes
2079		 * that are actually changing.
2080		 *
2081		 * the bits will be restored prior to actually setting
2082		 * the attributes so the caller thinks they were set.
2083		 */
2084		if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
2085			if (xoap->xoa_appendonly !=
2086			    ((zp->z_pflags & ZFS_APPENDONLY) != 0)) {
2087				need_policy = TRUE;
2088			} else {
2089				XVA_CLR_REQ(xvap, XAT_APPENDONLY);
2090				XVA_SET_REQ(tmpxvattr, XAT_APPENDONLY);
2091			}
2092		}
2093
2094		if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT)) {
2095			if (xoap->xoa_projinherit !=
2096			    ((zp->z_pflags & ZFS_PROJINHERIT) != 0)) {
2097				need_policy = TRUE;
2098			} else {
2099				XVA_CLR_REQ(xvap, XAT_PROJINHERIT);
2100				XVA_SET_REQ(tmpxvattr, XAT_PROJINHERIT);
2101			}
2102		}
2103
2104		if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
2105			if (xoap->xoa_nounlink !=
2106			    ((zp->z_pflags & ZFS_NOUNLINK) != 0)) {
2107				need_policy = TRUE;
2108			} else {
2109				XVA_CLR_REQ(xvap, XAT_NOUNLINK);
2110				XVA_SET_REQ(tmpxvattr, XAT_NOUNLINK);
2111			}
2112		}
2113
2114		if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
2115			if (xoap->xoa_immutable !=
2116			    ((zp->z_pflags & ZFS_IMMUTABLE) != 0)) {
2117				need_policy = TRUE;
2118			} else {
2119				XVA_CLR_REQ(xvap, XAT_IMMUTABLE);
2120				XVA_SET_REQ(tmpxvattr, XAT_IMMUTABLE);
2121			}
2122		}
2123
2124		if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
2125			if (xoap->xoa_nodump !=
2126			    ((zp->z_pflags & ZFS_NODUMP) != 0)) {
2127				need_policy = TRUE;
2128			} else {
2129				XVA_CLR_REQ(xvap, XAT_NODUMP);
2130				XVA_SET_REQ(tmpxvattr, XAT_NODUMP);
2131			}
2132		}
2133
2134		if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
2135			if (xoap->xoa_av_modified !=
2136			    ((zp->z_pflags & ZFS_AV_MODIFIED) != 0)) {
2137				need_policy = TRUE;
2138			} else {
2139				XVA_CLR_REQ(xvap, XAT_AV_MODIFIED);
2140				XVA_SET_REQ(tmpxvattr, XAT_AV_MODIFIED);
2141			}
2142		}
2143
2144		if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
2145			if ((!S_ISREG(ip->i_mode) &&
2146			    xoap->xoa_av_quarantined) ||
2147			    xoap->xoa_av_quarantined !=
2148			    ((zp->z_pflags & ZFS_AV_QUARANTINED) != 0)) {
2149				need_policy = TRUE;
2150			} else {
2151				XVA_CLR_REQ(xvap, XAT_AV_QUARANTINED);
2152				XVA_SET_REQ(tmpxvattr, XAT_AV_QUARANTINED);
2153			}
2154		}
2155
2156		if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
2157			mutex_exit(&zp->z_lock);
2158			err = SET_ERROR(EPERM);
2159			goto out3;
2160		}
2161
2162		if (need_policy == FALSE &&
2163		    (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) ||
2164		    XVA_ISSET_REQ(xvap, XAT_OPAQUE))) {
2165			need_policy = TRUE;
2166		}
2167	}
2168
2169	mutex_exit(&zp->z_lock);
2170
2171	if (mask & ATTR_MODE) {
2172		if (zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr) == 0) {
2173			err = secpolicy_setid_setsticky_clear(ip, vap,
2174			    &oldva, cr);
2175			if (err)
2176				goto out3;
2177
2178			trim_mask |= ATTR_MODE;
2179		} else {
2180			need_policy = TRUE;
2181		}
2182	}
2183
2184	if (need_policy) {
2185		/*
2186		 * If trim_mask is set then take ownership
2187		 * has been granted or write_acl is present and user
2188		 * has the ability to modify mode.  In that case remove
2189		 * UID|GID and or MODE from mask so that
2190		 * secpolicy_vnode_setattr() doesn't revoke it.
2191		 */
2192
2193		if (trim_mask) {
2194			saved_mask = vap->va_mask;
2195			vap->va_mask &= ~trim_mask;
2196		}
2197		err = secpolicy_vnode_setattr(cr, ip, vap, &oldva, flags,
2198		    (int (*)(void *, int, cred_t *))zfs_zaccess_unix, zp);
2199		if (err)
2200			goto out3;
2201
2202		if (trim_mask)
2203			vap->va_mask |= saved_mask;
2204	}
2205
2206	/*
2207	 * secpolicy_vnode_setattr, or take ownership may have
2208	 * changed va_mask
2209	 */
2210	mask = vap->va_mask;
2211
2212	if ((mask & (ATTR_UID | ATTR_GID)) || projid != ZFS_INVALID_PROJID) {
2213		handle_eadir = B_TRUE;
2214		err = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
2215		    &xattr_obj, sizeof (xattr_obj));
2216
2217		if (err == 0 && xattr_obj) {
2218			err = zfs_zget(ZTOZSB(zp), xattr_obj, &attrzp);
2219			if (err)
2220				goto out2;
2221		}
2222		if (mask & ATTR_UID) {
2223			new_kuid = zfs_fuid_create(zfsvfs,
2224			    (uint64_t)vap->va_uid, cr, ZFS_OWNER, &fuidp);
2225			if (new_kuid != KUID_TO_SUID(ZTOI(zp)->i_uid) &&
2226			    zfs_id_overquota(zfsvfs, DMU_USERUSED_OBJECT,
2227			    new_kuid)) {
2228				if (attrzp)
2229					zrele(attrzp);
2230				err = SET_ERROR(EDQUOT);
2231				goto out2;
2232			}
2233		}
2234
2235		if (mask & ATTR_GID) {
2236			new_kgid = zfs_fuid_create(zfsvfs,
2237			    (uint64_t)vap->va_gid, cr, ZFS_GROUP, &fuidp);
2238			if (new_kgid != KGID_TO_SGID(ZTOI(zp)->i_gid) &&
2239			    zfs_id_overquota(zfsvfs, DMU_GROUPUSED_OBJECT,
2240			    new_kgid)) {
2241				if (attrzp)
2242					zrele(attrzp);
2243				err = SET_ERROR(EDQUOT);
2244				goto out2;
2245			}
2246		}
2247
2248		if (projid != ZFS_INVALID_PROJID &&
2249		    zfs_id_overquota(zfsvfs, DMU_PROJECTUSED_OBJECT, projid)) {
2250			if (attrzp)
2251				zrele(attrzp);
2252			err = EDQUOT;
2253			goto out2;
2254		}
2255	}
2256	tx = dmu_tx_create(os);
2257
2258	if (mask & ATTR_MODE) {
2259		uint64_t pmode = zp->z_mode;
2260		uint64_t acl_obj;
2261		new_mode = (pmode & S_IFMT) | (vap->va_mode & ~S_IFMT);
2262
2263		if (ZTOZSB(zp)->z_acl_mode == ZFS_ACL_RESTRICTED &&
2264		    !(zp->z_pflags & ZFS_ACL_TRIVIAL)) {
2265			err = EPERM;
2266			goto out;
2267		}
2268
2269		if ((err = zfs_acl_chmod_setattr(zp, &aclp, new_mode)))
2270			goto out;
2271
2272		mutex_enter(&zp->z_lock);
2273		if (!zp->z_is_sa && ((acl_obj = zfs_external_acl(zp)) != 0)) {
2274			/*
2275			 * Are we upgrading ACL from old V0 format
2276			 * to V1 format?
2277			 */
2278			if (zfsvfs->z_version >= ZPL_VERSION_FUID &&
2279			    zfs_znode_acl_version(zp) ==
2280			    ZFS_ACL_VERSION_INITIAL) {
2281				dmu_tx_hold_free(tx, acl_obj, 0,
2282				    DMU_OBJECT_END);
2283				dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
2284				    0, aclp->z_acl_bytes);
2285			} else {
2286				dmu_tx_hold_write(tx, acl_obj, 0,
2287				    aclp->z_acl_bytes);
2288			}
2289		} else if (!zp->z_is_sa && aclp->z_acl_bytes > ZFS_ACE_SPACE) {
2290			dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
2291			    0, aclp->z_acl_bytes);
2292		}
2293		mutex_exit(&zp->z_lock);
2294		dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
2295	} else {
2296		if (((mask & ATTR_XVATTR) &&
2297		    XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) ||
2298		    (projid != ZFS_INVALID_PROJID &&
2299		    !(zp->z_pflags & ZFS_PROJID)))
2300			dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
2301		else
2302			dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
2303	}
2304
2305	if (attrzp) {
2306		dmu_tx_hold_sa(tx, attrzp->z_sa_hdl, B_FALSE);
2307	}
2308
2309	fuid_dirtied = zfsvfs->z_fuid_dirty;
2310	if (fuid_dirtied)
2311		zfs_fuid_txhold(zfsvfs, tx);
2312
2313	zfs_sa_upgrade_txholds(tx, zp);
2314
2315	err = dmu_tx_assign(tx, TXG_WAIT);
2316	if (err)
2317		goto out;
2318
2319	count = 0;
2320	/*
2321	 * Set each attribute requested.
2322	 * We group settings according to the locks they need to acquire.
2323	 *
2324	 * Note: you cannot set ctime directly, although it will be
2325	 * updated as a side-effect of calling this function.
2326	 */
2327
2328	if (projid != ZFS_INVALID_PROJID && !(zp->z_pflags & ZFS_PROJID)) {
2329		/*
2330		 * For the existed object that is upgraded from old system,
2331		 * its on-disk layout has no slot for the project ID attribute.
2332		 * But quota accounting logic needs to access related slots by
2333		 * offset directly. So we need to adjust old objects' layout
2334		 * to make the project ID to some unified and fixed offset.
2335		 */
2336		if (attrzp)
2337			err = sa_add_projid(attrzp->z_sa_hdl, tx, projid);
2338		if (err == 0)
2339			err = sa_add_projid(zp->z_sa_hdl, tx, projid);
2340
2341		if (unlikely(err == EEXIST))
2342			err = 0;
2343		else if (err != 0)
2344			goto out;
2345		else
2346			projid = ZFS_INVALID_PROJID;
2347	}
2348
2349	if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
2350		mutex_enter(&zp->z_acl_lock);
2351	mutex_enter(&zp->z_lock);
2352
2353	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
2354	    &zp->z_pflags, sizeof (zp->z_pflags));
2355
2356	if (attrzp) {
2357		if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
2358			mutex_enter(&attrzp->z_acl_lock);
2359		mutex_enter(&attrzp->z_lock);
2360		SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
2361		    SA_ZPL_FLAGS(zfsvfs), NULL, &attrzp->z_pflags,
2362		    sizeof (attrzp->z_pflags));
2363		if (projid != ZFS_INVALID_PROJID) {
2364			attrzp->z_projid = projid;
2365			SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
2366			    SA_ZPL_PROJID(zfsvfs), NULL, &attrzp->z_projid,
2367			    sizeof (attrzp->z_projid));
2368		}
2369	}
2370
2371	if (mask & (ATTR_UID|ATTR_GID)) {
2372
2373		if (mask & ATTR_UID) {
2374			ZTOI(zp)->i_uid = SUID_TO_KUID(new_kuid);
2375			new_uid = zfs_uid_read(ZTOI(zp));
2376			SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
2377			    &new_uid, sizeof (new_uid));
2378			if (attrzp) {
2379				SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
2380				    SA_ZPL_UID(zfsvfs), NULL, &new_uid,
2381				    sizeof (new_uid));
2382				ZTOI(attrzp)->i_uid = SUID_TO_KUID(new_uid);
2383			}
2384		}
2385
2386		if (mask & ATTR_GID) {
2387			ZTOI(zp)->i_gid = SGID_TO_KGID(new_kgid);
2388			new_gid = zfs_gid_read(ZTOI(zp));
2389			SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs),
2390			    NULL, &new_gid, sizeof (new_gid));
2391			if (attrzp) {
2392				SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
2393				    SA_ZPL_GID(zfsvfs), NULL, &new_gid,
2394				    sizeof (new_gid));
2395				ZTOI(attrzp)->i_gid = SGID_TO_KGID(new_kgid);
2396			}
2397		}
2398		if (!(mask & ATTR_MODE)) {
2399			SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs),
2400			    NULL, &new_mode, sizeof (new_mode));
2401			new_mode = zp->z_mode;
2402		}
2403		err = zfs_acl_chown_setattr(zp);
2404		ASSERT(err == 0);
2405		if (attrzp) {
2406			err = zfs_acl_chown_setattr(attrzp);
2407			ASSERT(err == 0);
2408		}
2409	}
2410
2411	if (mask & ATTR_MODE) {
2412		SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
2413		    &new_mode, sizeof (new_mode));
2414		zp->z_mode = ZTOI(zp)->i_mode = new_mode;
2415		ASSERT3P(aclp, !=, NULL);
2416		err = zfs_aclset_common(zp, aclp, cr, tx);
2417		ASSERT0(err);
2418		if (zp->z_acl_cached)
2419			zfs_acl_free(zp->z_acl_cached);
2420		zp->z_acl_cached = aclp;
2421		aclp = NULL;
2422	}
2423
2424	if ((mask & ATTR_ATIME) || zp->z_atime_dirty) {
2425		zp->z_atime_dirty = B_FALSE;
2426		ZFS_TIME_ENCODE(&ip->i_atime, atime);
2427		SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
2428		    &atime, sizeof (atime));
2429	}
2430
2431	if (mask & (ATTR_MTIME | ATTR_SIZE)) {
2432		ZFS_TIME_ENCODE(&vap->va_mtime, mtime);
2433		ZTOI(zp)->i_mtime = zpl_inode_timestamp_truncate(
2434		    vap->va_mtime, ZTOI(zp));
2435
2436		SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
2437		    mtime, sizeof (mtime));
2438	}
2439
2440	if (mask & (ATTR_CTIME | ATTR_SIZE)) {
2441		ZFS_TIME_ENCODE(&vap->va_ctime, ctime);
2442		ZTOI(zp)->i_ctime = zpl_inode_timestamp_truncate(vap->va_ctime,
2443		    ZTOI(zp));
2444		SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
2445		    ctime, sizeof (ctime));
2446	}
2447
2448	if (projid != ZFS_INVALID_PROJID) {
2449		zp->z_projid = projid;
2450		SA_ADD_BULK_ATTR(bulk, count,
2451		    SA_ZPL_PROJID(zfsvfs), NULL, &zp->z_projid,
2452		    sizeof (zp->z_projid));
2453	}
2454
2455	if (attrzp && mask) {
2456		SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
2457		    SA_ZPL_CTIME(zfsvfs), NULL, &ctime,
2458		    sizeof (ctime));
2459	}
2460
2461	/*
2462	 * Do this after setting timestamps to prevent timestamp
2463	 * update from toggling bit
2464	 */
2465
2466	if (xoap && (mask & ATTR_XVATTR)) {
2467
2468		/*
2469		 * restore trimmed off masks
2470		 * so that return masks can be set for caller.
2471		 */
2472
2473		if (XVA_ISSET_REQ(tmpxvattr, XAT_APPENDONLY)) {
2474			XVA_SET_REQ(xvap, XAT_APPENDONLY);
2475		}
2476		if (XVA_ISSET_REQ(tmpxvattr, XAT_NOUNLINK)) {
2477			XVA_SET_REQ(xvap, XAT_NOUNLINK);
2478		}
2479		if (XVA_ISSET_REQ(tmpxvattr, XAT_IMMUTABLE)) {
2480			XVA_SET_REQ(xvap, XAT_IMMUTABLE);
2481		}
2482		if (XVA_ISSET_REQ(tmpxvattr, XAT_NODUMP)) {
2483			XVA_SET_REQ(xvap, XAT_NODUMP);
2484		}
2485		if (XVA_ISSET_REQ(tmpxvattr, XAT_AV_MODIFIED)) {
2486			XVA_SET_REQ(xvap, XAT_AV_MODIFIED);
2487		}
2488		if (XVA_ISSET_REQ(tmpxvattr, XAT_AV_QUARANTINED)) {
2489			XVA_SET_REQ(xvap, XAT_AV_QUARANTINED);
2490		}
2491		if (XVA_ISSET_REQ(tmpxvattr, XAT_PROJINHERIT)) {
2492			XVA_SET_REQ(xvap, XAT_PROJINHERIT);
2493		}
2494
2495		if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP))
2496			ASSERT(S_ISREG(ip->i_mode));
2497
2498		zfs_xvattr_set(zp, xvap, tx);
2499	}
2500
2501	if (fuid_dirtied)
2502		zfs_fuid_sync(zfsvfs, tx);
2503
2504	if (mask != 0)
2505		zfs_log_setattr(zilog, tx, TX_SETATTR, zp, vap, mask, fuidp);
2506
2507	mutex_exit(&zp->z_lock);
2508	if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
2509		mutex_exit(&zp->z_acl_lock);
2510
2511	if (attrzp) {
2512		if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
2513			mutex_exit(&attrzp->z_acl_lock);
2514		mutex_exit(&attrzp->z_lock);
2515	}
2516out:
2517	if (err == 0 && xattr_count > 0) {
2518		err2 = sa_bulk_update(attrzp->z_sa_hdl, xattr_bulk,
2519		    xattr_count, tx);
2520		ASSERT(err2 == 0);
2521	}
2522
2523	if (aclp)
2524		zfs_acl_free(aclp);
2525
2526	if (fuidp) {
2527		zfs_fuid_info_free(fuidp);
2528		fuidp = NULL;
2529	}
2530
2531	if (err) {
2532		dmu_tx_abort(tx);
2533		if (attrzp)
2534			zrele(attrzp);
2535		if (err == ERESTART)
2536			goto top;
2537	} else {
2538		if (count > 0)
2539			err2 = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
2540		dmu_tx_commit(tx);
2541		if (attrzp) {
2542			if (err2 == 0 && handle_eadir)
2543				err2 = zfs_setattr_dir(attrzp);
2544			zrele(attrzp);
2545		}
2546		zfs_znode_update_vfs(zp);
2547	}
2548
2549out2:
2550	if (os->os_sync == ZFS_SYNC_ALWAYS)
2551		zil_commit(zilog, 0);
2552
2553out3:
2554	kmem_free(xattr_bulk, sizeof (sa_bulk_attr_t) * bulks);
2555	kmem_free(bulk, sizeof (sa_bulk_attr_t) * bulks);
2556	kmem_free(tmpxvattr, sizeof (xvattr_t));
2557	ZFS_EXIT(zfsvfs);
2558	return (err);
2559}
2560
2561typedef struct zfs_zlock {
2562	krwlock_t	*zl_rwlock;	/* lock we acquired */
2563	znode_t		*zl_znode;	/* znode we held */
2564	struct zfs_zlock *zl_next;	/* next in list */
2565} zfs_zlock_t;
2566
2567/*
2568 * Drop locks and release vnodes that were held by zfs_rename_lock().
2569 */
2570static void
2571zfs_rename_unlock(zfs_zlock_t **zlpp)
2572{
2573	zfs_zlock_t *zl;
2574
2575	while ((zl = *zlpp) != NULL) {
2576		if (zl->zl_znode != NULL)
2577			zfs_zrele_async(zl->zl_znode);
2578		rw_exit(zl->zl_rwlock);
2579		*zlpp = zl->zl_next;
2580		kmem_free(zl, sizeof (*zl));
2581	}
2582}
2583
2584/*
2585 * Search back through the directory tree, using the ".." entries.
2586 * Lock each directory in the chain to prevent concurrent renames.
2587 * Fail any attempt to move a directory into one of its own descendants.
2588 * XXX - z_parent_lock can overlap with map or grow locks
2589 */
2590static int
2591zfs_rename_lock(znode_t *szp, znode_t *tdzp, znode_t *sdzp, zfs_zlock_t **zlpp)
2592{
2593	zfs_zlock_t	*zl;
2594	znode_t		*zp = tdzp;
2595	uint64_t	rootid = ZTOZSB(zp)->z_root;
2596	uint64_t	oidp = zp->z_id;
2597	krwlock_t	*rwlp = &szp->z_parent_lock;
2598	krw_t		rw = RW_WRITER;
2599
2600	/*
2601	 * First pass write-locks szp and compares to zp->z_id.
2602	 * Later passes read-lock zp and compare to zp->z_parent.
2603	 */
2604	do {
2605		if (!rw_tryenter(rwlp, rw)) {
2606			/*
2607			 * Another thread is renaming in this path.
2608			 * Note that if we are a WRITER, we don't have any
2609			 * parent_locks held yet.
2610			 */
2611			if (rw == RW_READER && zp->z_id > szp->z_id) {
2612				/*
2613				 * Drop our locks and restart
2614				 */
2615				zfs_rename_unlock(&zl);
2616				*zlpp = NULL;
2617				zp = tdzp;
2618				oidp = zp->z_id;
2619				rwlp = &szp->z_parent_lock;
2620				rw = RW_WRITER;
2621				continue;
2622			} else {
2623				/*
2624				 * Wait for other thread to drop its locks
2625				 */
2626				rw_enter(rwlp, rw);
2627			}
2628		}
2629
2630		zl = kmem_alloc(sizeof (*zl), KM_SLEEP);
2631		zl->zl_rwlock = rwlp;
2632		zl->zl_znode = NULL;
2633		zl->zl_next = *zlpp;
2634		*zlpp = zl;
2635
2636		if (oidp == szp->z_id)		/* We're a descendant of szp */
2637			return (SET_ERROR(EINVAL));
2638
2639		if (oidp == rootid)		/* We've hit the top */
2640			return (0);
2641
2642		if (rw == RW_READER) {		/* i.e. not the first pass */
2643			int error = zfs_zget(ZTOZSB(zp), oidp, &zp);
2644			if (error)
2645				return (error);
2646			zl->zl_znode = zp;
2647		}
2648		(void) sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(ZTOZSB(zp)),
2649		    &oidp, sizeof (oidp));
2650		rwlp = &zp->z_parent_lock;
2651		rw = RW_READER;
2652
2653	} while (zp->z_id != sdzp->z_id);
2654
2655	return (0);
2656}
2657
2658/*
2659 * Move an entry from the provided source directory to the target
2660 * directory.  Change the entry name as indicated.
2661 *
2662 *	IN:	sdzp	- Source directory containing the "old entry".
2663 *		snm	- Old entry name.
2664 *		tdzp	- Target directory to contain the "new entry".
2665 *		tnm	- New entry name.
2666 *		cr	- credentials of caller.
2667 *		flags	- case flags
2668 *
2669 *	RETURN:	0 on success, error code on failure.
2670 *
2671 * Timestamps:
2672 *	sdzp,tdzp - ctime|mtime updated
2673 */
2674/*ARGSUSED*/
2675int
2676zfs_rename(znode_t *sdzp, char *snm, znode_t *tdzp, char *tnm,
2677    cred_t *cr, int flags)
2678{
2679	znode_t		*szp, *tzp;
2680	zfsvfs_t	*zfsvfs = ZTOZSB(sdzp);
2681	zilog_t		*zilog;
2682	zfs_dirlock_t	*sdl, *tdl;
2683	dmu_tx_t	*tx;
2684	zfs_zlock_t	*zl;
2685	int		cmp, serr, terr;
2686	int		error = 0;
2687	int		zflg = 0;
2688	boolean_t	waited = B_FALSE;
2689
2690	if (snm == NULL || tnm == NULL)
2691		return (SET_ERROR(EINVAL));
2692
2693	ZFS_ENTER(zfsvfs);
2694	ZFS_VERIFY_ZP(sdzp);
2695	zilog = zfsvfs->z_log;
2696
2697	ZFS_VERIFY_ZP(tdzp);
2698
2699	/*
2700	 * We check i_sb because snapshots and the ctldir must have different
2701	 * super blocks.
2702	 */
2703	if (ZTOI(tdzp)->i_sb != ZTOI(sdzp)->i_sb ||
2704	    zfsctl_is_node(ZTOI(tdzp))) {
2705		ZFS_EXIT(zfsvfs);
2706		return (SET_ERROR(EXDEV));
2707	}
2708
2709	if (zfsvfs->z_utf8 && u8_validate(tnm,
2710	    strlen(tnm), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
2711		ZFS_EXIT(zfsvfs);
2712		return (SET_ERROR(EILSEQ));
2713	}
2714
2715	if (flags & FIGNORECASE)
2716		zflg |= ZCILOOK;
2717
2718top:
2719	szp = NULL;
2720	tzp = NULL;
2721	zl = NULL;
2722
2723	/*
2724	 * This is to prevent the creation of links into attribute space
2725	 * by renaming a linked file into/outof an attribute directory.
2726	 * See the comment in zfs_link() for why this is considered bad.
2727	 */
2728	if ((tdzp->z_pflags & ZFS_XATTR) != (sdzp->z_pflags & ZFS_XATTR)) {
2729		ZFS_EXIT(zfsvfs);
2730		return (SET_ERROR(EINVAL));
2731	}
2732
2733	/*
2734	 * Lock source and target directory entries.  To prevent deadlock,
2735	 * a lock ordering must be defined.  We lock the directory with
2736	 * the smallest object id first, or if it's a tie, the one with
2737	 * the lexically first name.
2738	 */
2739	if (sdzp->z_id < tdzp->z_id) {
2740		cmp = -1;
2741	} else if (sdzp->z_id > tdzp->z_id) {
2742		cmp = 1;
2743	} else {
2744		/*
2745		 * First compare the two name arguments without
2746		 * considering any case folding.
2747		 */
2748		int nofold = (zfsvfs->z_norm & ~U8_TEXTPREP_TOUPPER);
2749
2750		cmp = u8_strcmp(snm, tnm, 0, nofold, U8_UNICODE_LATEST, &error);
2751		ASSERT(error == 0 || !zfsvfs->z_utf8);
2752		if (cmp == 0) {
2753			/*
2754			 * POSIX: "If the old argument and the new argument
2755			 * both refer to links to the same existing file,
2756			 * the rename() function shall return successfully
2757			 * and perform no other action."
2758			 */
2759			ZFS_EXIT(zfsvfs);
2760			return (0);
2761		}
2762		/*
2763		 * If the file system is case-folding, then we may
2764		 * have some more checking to do.  A case-folding file
2765		 * system is either supporting mixed case sensitivity
2766		 * access or is completely case-insensitive.  Note
2767		 * that the file system is always case preserving.
2768		 *
2769		 * In mixed sensitivity mode case sensitive behavior
2770		 * is the default.  FIGNORECASE must be used to
2771		 * explicitly request case insensitive behavior.
2772		 *
2773		 * If the source and target names provided differ only
2774		 * by case (e.g., a request to rename 'tim' to 'Tim'),
2775		 * we will treat this as a special case in the
2776		 * case-insensitive mode: as long as the source name
2777		 * is an exact match, we will allow this to proceed as
2778		 * a name-change request.
2779		 */
2780		if ((zfsvfs->z_case == ZFS_CASE_INSENSITIVE ||
2781		    (zfsvfs->z_case == ZFS_CASE_MIXED &&
2782		    flags & FIGNORECASE)) &&
2783		    u8_strcmp(snm, tnm, 0, zfsvfs->z_norm, U8_UNICODE_LATEST,
2784		    &error) == 0) {
2785			/*
2786			 * case preserving rename request, require exact
2787			 * name matches
2788			 */
2789			zflg |= ZCIEXACT;
2790			zflg &= ~ZCILOOK;
2791		}
2792	}
2793
2794	/*
2795	 * If the source and destination directories are the same, we should
2796	 * grab the z_name_lock of that directory only once.
2797	 */
2798	if (sdzp == tdzp) {
2799		zflg |= ZHAVELOCK;
2800		rw_enter(&sdzp->z_name_lock, RW_READER);
2801	}
2802
2803	if (cmp < 0) {
2804		serr = zfs_dirent_lock(&sdl, sdzp, snm, &szp,
2805		    ZEXISTS | zflg, NULL, NULL);
2806		terr = zfs_dirent_lock(&tdl,
2807		    tdzp, tnm, &tzp, ZRENAMING | zflg, NULL, NULL);
2808	} else {
2809		terr = zfs_dirent_lock(&tdl,
2810		    tdzp, tnm, &tzp, zflg, NULL, NULL);
2811		serr = zfs_dirent_lock(&sdl,
2812		    sdzp, snm, &szp, ZEXISTS | ZRENAMING | zflg,
2813		    NULL, NULL);
2814	}
2815
2816	if (serr) {
2817		/*
2818		 * Source entry invalid or not there.
2819		 */
2820		if (!terr) {
2821			zfs_dirent_unlock(tdl);
2822			if (tzp)
2823				zrele(tzp);
2824		}
2825
2826		if (sdzp == tdzp)
2827			rw_exit(&sdzp->z_name_lock);
2828
2829		if (strcmp(snm, "..") == 0)
2830			serr = EINVAL;
2831		ZFS_EXIT(zfsvfs);
2832		return (serr);
2833	}
2834	if (terr) {
2835		zfs_dirent_unlock(sdl);
2836		zrele(szp);
2837
2838		if (sdzp == tdzp)
2839			rw_exit(&sdzp->z_name_lock);
2840
2841		if (strcmp(tnm, "..") == 0)
2842			terr = EINVAL;
2843		ZFS_EXIT(zfsvfs);
2844		return (terr);
2845	}
2846
2847	/*
2848	 * If we are using project inheritance, means if the directory has
2849	 * ZFS_PROJINHERIT set, then its descendant directories will inherit
2850	 * not only the project ID, but also the ZFS_PROJINHERIT flag. Under
2851	 * such case, we only allow renames into our tree when the project
2852	 * IDs are the same.
2853	 */
2854	if (tdzp->z_pflags & ZFS_PROJINHERIT &&
2855	    tdzp->z_projid != szp->z_projid) {
2856		error = SET_ERROR(EXDEV);
2857		goto out;
2858	}
2859
2860	/*
2861	 * Must have write access at the source to remove the old entry
2862	 * and write access at the target to create the new entry.
2863	 * Note that if target and source are the same, this can be
2864	 * done in a single check.
2865	 */
2866
2867	if ((error = zfs_zaccess_rename(sdzp, szp, tdzp, tzp, cr)))
2868		goto out;
2869
2870	if (S_ISDIR(ZTOI(szp)->i_mode)) {
2871		/*
2872		 * Check to make sure rename is valid.
2873		 * Can't do a move like this: /usr/a/b to /usr/a/b/c/d
2874		 */
2875		if ((error = zfs_rename_lock(szp, tdzp, sdzp, &zl)))
2876			goto out;
2877	}
2878
2879	/*
2880	 * Does target exist?
2881	 */
2882	if (tzp) {
2883		/*
2884		 * Source and target must be the same type.
2885		 */
2886		if (S_ISDIR(ZTOI(szp)->i_mode)) {
2887			if (!S_ISDIR(ZTOI(tzp)->i_mode)) {
2888				error = SET_ERROR(ENOTDIR);
2889				goto out;
2890			}
2891		} else {
2892			if (S_ISDIR(ZTOI(tzp)->i_mode)) {
2893				error = SET_ERROR(EISDIR);
2894				goto out;
2895			}
2896		}
2897		/*
2898		 * POSIX dictates that when the source and target
2899		 * entries refer to the same file object, rename
2900		 * must do nothing and exit without error.
2901		 */
2902		if (szp->z_id == tzp->z_id) {
2903			error = 0;
2904			goto out;
2905		}
2906	}
2907
2908	tx = dmu_tx_create(zfsvfs->z_os);
2909	dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
2910	dmu_tx_hold_sa(tx, sdzp->z_sa_hdl, B_FALSE);
2911	dmu_tx_hold_zap(tx, sdzp->z_id, FALSE, snm);
2912	dmu_tx_hold_zap(tx, tdzp->z_id, TRUE, tnm);
2913	if (sdzp != tdzp) {
2914		dmu_tx_hold_sa(tx, tdzp->z_sa_hdl, B_FALSE);
2915		zfs_sa_upgrade_txholds(tx, tdzp);
2916	}
2917	if (tzp) {
2918		dmu_tx_hold_sa(tx, tzp->z_sa_hdl, B_FALSE);
2919		zfs_sa_upgrade_txholds(tx, tzp);
2920	}
2921
2922	zfs_sa_upgrade_txholds(tx, szp);
2923	dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
2924	error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
2925	if (error) {
2926		if (zl != NULL)
2927			zfs_rename_unlock(&zl);
2928		zfs_dirent_unlock(sdl);
2929		zfs_dirent_unlock(tdl);
2930
2931		if (sdzp == tdzp)
2932			rw_exit(&sdzp->z_name_lock);
2933
2934		if (error == ERESTART) {
2935			waited = B_TRUE;
2936			dmu_tx_wait(tx);
2937			dmu_tx_abort(tx);
2938			zrele(szp);
2939			if (tzp)
2940				zrele(tzp);
2941			goto top;
2942		}
2943		dmu_tx_abort(tx);
2944		zrele(szp);
2945		if (tzp)
2946			zrele(tzp);
2947		ZFS_EXIT(zfsvfs);
2948		return (error);
2949	}
2950
2951	if (tzp)	/* Attempt to remove the existing target */
2952		error = zfs_link_destroy(tdl, tzp, tx, zflg, NULL);
2953
2954	if (error == 0) {
2955		error = zfs_link_create(tdl, szp, tx, ZRENAMING);
2956		if (error == 0) {
2957			szp->z_pflags |= ZFS_AV_MODIFIED;
2958			if (tdzp->z_pflags & ZFS_PROJINHERIT)
2959				szp->z_pflags |= ZFS_PROJINHERIT;
2960
2961			error = sa_update(szp->z_sa_hdl, SA_ZPL_FLAGS(zfsvfs),
2962			    (void *)&szp->z_pflags, sizeof (uint64_t), tx);
2963			ASSERT0(error);
2964
2965			error = zfs_link_destroy(sdl, szp, tx, ZRENAMING, NULL);
2966			if (error == 0) {
2967				zfs_log_rename(zilog, tx, TX_RENAME |
2968				    (flags & FIGNORECASE ? TX_CI : 0), sdzp,
2969				    sdl->dl_name, tdzp, tdl->dl_name, szp);
2970			} else {
2971				/*
2972				 * At this point, we have successfully created
2973				 * the target name, but have failed to remove
2974				 * the source name.  Since the create was done
2975				 * with the ZRENAMING flag, there are
2976				 * complications; for one, the link count is
2977				 * wrong.  The easiest way to deal with this
2978				 * is to remove the newly created target, and
2979				 * return the original error.  This must
2980				 * succeed; fortunately, it is very unlikely to
2981				 * fail, since we just created it.
2982				 */
2983				VERIFY3U(zfs_link_destroy(tdl, szp, tx,
2984				    ZRENAMING, NULL), ==, 0);
2985			}
2986		} else {
2987			/*
2988			 * If we had removed the existing target, subsequent
2989			 * call to zfs_link_create() to add back the same entry
2990			 * but, the new dnode (szp) should not fail.
2991			 */
2992			ASSERT(tzp == NULL);
2993		}
2994	}
2995
2996	dmu_tx_commit(tx);
2997out:
2998	if (zl != NULL)
2999		zfs_rename_unlock(&zl);
3000
3001	zfs_dirent_unlock(sdl);
3002	zfs_dirent_unlock(tdl);
3003
3004	zfs_znode_update_vfs(sdzp);
3005	if (sdzp == tdzp)
3006		rw_exit(&sdzp->z_name_lock);
3007
3008	if (sdzp != tdzp)
3009		zfs_znode_update_vfs(tdzp);
3010
3011	zfs_znode_update_vfs(szp);
3012	zrele(szp);
3013	if (tzp) {
3014		zfs_znode_update_vfs(tzp);
3015		zrele(tzp);
3016	}
3017
3018	if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
3019		zil_commit(zilog, 0);
3020
3021	ZFS_EXIT(zfsvfs);
3022	return (error);
3023}
3024
3025/*
3026 * Insert the indicated symbolic reference entry into the directory.
3027 *
3028 *	IN:	dzp	- Directory to contain new symbolic link.
3029 *		name	- Name of directory entry in dip.
3030 *		vap	- Attributes of new entry.
3031 *		link	- Name for new symlink entry.
3032 *		cr	- credentials of caller.
3033 *		flags	- case flags
3034 *
3035 *	OUT:	zpp	- Znode for new symbolic link.
3036 *
3037 *	RETURN:	0 on success, error code on failure.
3038 *
3039 * Timestamps:
3040 *	dip - ctime|mtime updated
3041 */
3042/*ARGSUSED*/
3043int
3044zfs_symlink(znode_t *dzp, char *name, vattr_t *vap, char *link,
3045    znode_t **zpp, cred_t *cr, int flags)
3046{
3047	znode_t		*zp;
3048	zfs_dirlock_t	*dl;
3049	dmu_tx_t	*tx;
3050	zfsvfs_t	*zfsvfs = ZTOZSB(dzp);
3051	zilog_t		*zilog;
3052	uint64_t	len = strlen(link);
3053	int		error;
3054	int		zflg = ZNEW;
3055	zfs_acl_ids_t	acl_ids;
3056	boolean_t	fuid_dirtied;
3057	uint64_t	txtype = TX_SYMLINK;
3058	boolean_t	waited = B_FALSE;
3059
3060	ASSERT(S_ISLNK(vap->va_mode));
3061
3062	if (name == NULL)
3063		return (SET_ERROR(EINVAL));
3064
3065	ZFS_ENTER(zfsvfs);
3066	ZFS_VERIFY_ZP(dzp);
3067	zilog = zfsvfs->z_log;
3068
3069	if (zfsvfs->z_utf8 && u8_validate(name, strlen(name),
3070	    NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
3071		ZFS_EXIT(zfsvfs);
3072		return (SET_ERROR(EILSEQ));
3073	}
3074	if (flags & FIGNORECASE)
3075		zflg |= ZCILOOK;
3076
3077	if (len > MAXPATHLEN) {
3078		ZFS_EXIT(zfsvfs);
3079		return (SET_ERROR(ENAMETOOLONG));
3080	}
3081
3082	if ((error = zfs_acl_ids_create(dzp, 0,
3083	    vap, cr, NULL, &acl_ids)) != 0) {
3084		ZFS_EXIT(zfsvfs);
3085		return (error);
3086	}
3087top:
3088	*zpp = NULL;
3089
3090	/*
3091	 * Attempt to lock directory; fail if entry already exists.
3092	 */
3093	error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, NULL, NULL);
3094	if (error) {
3095		zfs_acl_ids_free(&acl_ids);
3096		ZFS_EXIT(zfsvfs);
3097		return (error);
3098	}
3099
3100	if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
3101		zfs_acl_ids_free(&acl_ids);
3102		zfs_dirent_unlock(dl);
3103		ZFS_EXIT(zfsvfs);
3104		return (error);
3105	}
3106
3107	if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, ZFS_DEFAULT_PROJID)) {
3108		zfs_acl_ids_free(&acl_ids);
3109		zfs_dirent_unlock(dl);
3110		ZFS_EXIT(zfsvfs);
3111		return (SET_ERROR(EDQUOT));
3112	}
3113	tx = dmu_tx_create(zfsvfs->z_os);
3114	fuid_dirtied = zfsvfs->z_fuid_dirty;
3115	dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, MAX(1, len));
3116	dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
3117	dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
3118	    ZFS_SA_BASE_ATTR_SIZE + len);
3119	dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
3120	if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
3121		dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
3122		    acl_ids.z_aclp->z_acl_bytes);
3123	}
3124	if (fuid_dirtied)
3125		zfs_fuid_txhold(zfsvfs, tx);
3126	error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
3127	if (error) {
3128		zfs_dirent_unlock(dl);
3129		if (error == ERESTART) {
3130			waited = B_TRUE;
3131			dmu_tx_wait(tx);
3132			dmu_tx_abort(tx);
3133			goto top;
3134		}
3135		zfs_acl_ids_free(&acl_ids);
3136		dmu_tx_abort(tx);
3137		ZFS_EXIT(zfsvfs);
3138		return (error);
3139	}
3140
3141	/*
3142	 * Create a new object for the symlink.
3143	 * for version 4 ZPL datasets the symlink will be an SA attribute
3144	 */
3145	zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
3146
3147	if (fuid_dirtied)
3148		zfs_fuid_sync(zfsvfs, tx);
3149
3150	mutex_enter(&zp->z_lock);
3151	if (zp->z_is_sa)
3152		error = sa_update(zp->z_sa_hdl, SA_ZPL_SYMLINK(zfsvfs),
3153		    link, len, tx);
3154	else
3155		zfs_sa_symlink(zp, link, len, tx);
3156	mutex_exit(&zp->z_lock);
3157
3158	zp->z_size = len;
3159	(void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
3160	    &zp->z_size, sizeof (zp->z_size), tx);
3161	/*
3162	 * Insert the new object into the directory.
3163	 */
3164	error = zfs_link_create(dl, zp, tx, ZNEW);
3165	if (error != 0) {
3166		zfs_znode_delete(zp, tx);
3167		remove_inode_hash(ZTOI(zp));
3168	} else {
3169		if (flags & FIGNORECASE)
3170			txtype |= TX_CI;
3171		zfs_log_symlink(zilog, tx, txtype, dzp, zp, name, link);
3172
3173		zfs_znode_update_vfs(dzp);
3174		zfs_znode_update_vfs(zp);
3175	}
3176
3177	zfs_acl_ids_free(&acl_ids);
3178
3179	dmu_tx_commit(tx);
3180
3181	zfs_dirent_unlock(dl);
3182
3183	if (error == 0) {
3184		*zpp = zp;
3185
3186		if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
3187			zil_commit(zilog, 0);
3188	} else {
3189		zrele(zp);
3190	}
3191
3192	ZFS_EXIT(zfsvfs);
3193	return (error);
3194}
3195
3196/*
3197 * Return, in the buffer contained in the provided uio structure,
3198 * the symbolic path referred to by ip.
3199 *
3200 *	IN:	ip	- inode of symbolic link
3201 *		uio	- structure to contain the link path.
3202 *		cr	- credentials of caller.
3203 *
3204 *	RETURN:	0 if success
3205 *		error code if failure
3206 *
3207 * Timestamps:
3208 *	ip - atime updated
3209 */
3210/* ARGSUSED */
3211int
3212zfs_readlink(struct inode *ip, zfs_uio_t *uio, cred_t *cr)
3213{
3214	znode_t		*zp = ITOZ(ip);
3215	zfsvfs_t	*zfsvfs = ITOZSB(ip);
3216	int		error;
3217
3218	ZFS_ENTER(zfsvfs);
3219	ZFS_VERIFY_ZP(zp);
3220
3221	mutex_enter(&zp->z_lock);
3222	if (zp->z_is_sa)
3223		error = sa_lookup_uio(zp->z_sa_hdl,
3224		    SA_ZPL_SYMLINK(zfsvfs), uio);
3225	else
3226		error = zfs_sa_readlink(zp, uio);
3227	mutex_exit(&zp->z_lock);
3228
3229	ZFS_EXIT(zfsvfs);
3230	return (error);
3231}
3232
3233/*
3234 * Insert a new entry into directory tdzp referencing szp.
3235 *
3236 *	IN:	tdzp	- Directory to contain new entry.
3237 *		szp	- znode of new entry.
3238 *		name	- name of new entry.
3239 *		cr	- credentials of caller.
3240 *		flags	- case flags.
3241 *
3242 *	RETURN:	0 if success
3243 *		error code if failure
3244 *
3245 * Timestamps:
3246 *	tdzp - ctime|mtime updated
3247 *	 szp - ctime updated
3248 */
3249/* ARGSUSED */
3250int
3251zfs_link(znode_t *tdzp, znode_t *szp, char *name, cred_t *cr,
3252    int flags)
3253{
3254	struct inode *sip = ZTOI(szp);
3255	znode_t		*tzp;
3256	zfsvfs_t	*zfsvfs = ZTOZSB(tdzp);
3257	zilog_t		*zilog;
3258	zfs_dirlock_t	*dl;
3259	dmu_tx_t	*tx;
3260	int		error;
3261	int		zf = ZNEW;
3262	uint64_t	parent;
3263	uid_t		owner;
3264	boolean_t	waited = B_FALSE;
3265	boolean_t	is_tmpfile = 0;
3266	uint64_t	txg;
3267#ifdef HAVE_TMPFILE
3268	is_tmpfile = (sip->i_nlink == 0 && (sip->i_state & I_LINKABLE));
3269#endif
3270	ASSERT(S_ISDIR(ZTOI(tdzp)->i_mode));
3271
3272	if (name == NULL)
3273		return (SET_ERROR(EINVAL));
3274
3275	ZFS_ENTER(zfsvfs);
3276	ZFS_VERIFY_ZP(tdzp);
3277	zilog = zfsvfs->z_log;
3278
3279	/*
3280	 * POSIX dictates that we return EPERM here.
3281	 * Better choices include ENOTSUP or EISDIR.
3282	 */
3283	if (S_ISDIR(sip->i_mode)) {
3284		ZFS_EXIT(zfsvfs);
3285		return (SET_ERROR(EPERM));
3286	}
3287
3288	ZFS_VERIFY_ZP(szp);
3289
3290	/*
3291	 * If we are using project inheritance, means if the directory has
3292	 * ZFS_PROJINHERIT set, then its descendant directories will inherit
3293	 * not only the project ID, but also the ZFS_PROJINHERIT flag. Under
3294	 * such case, we only allow hard link creation in our tree when the
3295	 * project IDs are the same.
3296	 */
3297	if (tdzp->z_pflags & ZFS_PROJINHERIT &&
3298	    tdzp->z_projid != szp->z_projid) {
3299		ZFS_EXIT(zfsvfs);
3300		return (SET_ERROR(EXDEV));
3301	}
3302
3303	/*
3304	 * We check i_sb because snapshots and the ctldir must have different
3305	 * super blocks.
3306	 */
3307	if (sip->i_sb != ZTOI(tdzp)->i_sb || zfsctl_is_node(sip)) {
3308		ZFS_EXIT(zfsvfs);
3309		return (SET_ERROR(EXDEV));
3310	}
3311
3312	/* Prevent links to .zfs/shares files */
3313
3314	if ((error = sa_lookup(szp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
3315	    &parent, sizeof (uint64_t))) != 0) {
3316		ZFS_EXIT(zfsvfs);
3317		return (error);
3318	}
3319	if (parent == zfsvfs->z_shares_dir) {
3320		ZFS_EXIT(zfsvfs);
3321		return (SET_ERROR(EPERM));
3322	}
3323
3324	if (zfsvfs->z_utf8 && u8_validate(name,
3325	    strlen(name), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
3326		ZFS_EXIT(zfsvfs);
3327		return (SET_ERROR(EILSEQ));
3328	}
3329	if (flags & FIGNORECASE)
3330		zf |= ZCILOOK;
3331
3332	/*
3333	 * We do not support links between attributes and non-attributes
3334	 * because of the potential security risk of creating links
3335	 * into "normal" file space in order to circumvent restrictions
3336	 * imposed in attribute space.
3337	 */
3338	if ((szp->z_pflags & ZFS_XATTR) != (tdzp->z_pflags & ZFS_XATTR)) {
3339		ZFS_EXIT(zfsvfs);
3340		return (SET_ERROR(EINVAL));
3341	}
3342
3343	owner = zfs_fuid_map_id(zfsvfs, KUID_TO_SUID(sip->i_uid),
3344	    cr, ZFS_OWNER);
3345	if (owner != crgetuid(cr) && secpolicy_basic_link(cr) != 0) {
3346		ZFS_EXIT(zfsvfs);
3347		return (SET_ERROR(EPERM));
3348	}
3349
3350	if ((error = zfs_zaccess(tdzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
3351		ZFS_EXIT(zfsvfs);
3352		return (error);
3353	}
3354
3355top:
3356	/*
3357	 * Attempt to lock directory; fail if entry already exists.
3358	 */
3359	error = zfs_dirent_lock(&dl, tdzp, name, &tzp, zf, NULL, NULL);
3360	if (error) {
3361		ZFS_EXIT(zfsvfs);
3362		return (error);
3363	}
3364
3365	tx = dmu_tx_create(zfsvfs->z_os);
3366	dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
3367	dmu_tx_hold_zap(tx, tdzp->z_id, TRUE, name);
3368	if (is_tmpfile)
3369		dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
3370
3371	zfs_sa_upgrade_txholds(tx, szp);
3372	zfs_sa_upgrade_txholds(tx, tdzp);
3373	error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
3374	if (error) {
3375		zfs_dirent_unlock(dl);
3376		if (error == ERESTART) {
3377			waited = B_TRUE;
3378			dmu_tx_wait(tx);
3379			dmu_tx_abort(tx);
3380			goto top;
3381		}
3382		dmu_tx_abort(tx);
3383		ZFS_EXIT(zfsvfs);
3384		return (error);
3385	}
3386	/* unmark z_unlinked so zfs_link_create will not reject */
3387	if (is_tmpfile)
3388		szp->z_unlinked = B_FALSE;
3389	error = zfs_link_create(dl, szp, tx, 0);
3390
3391	if (error == 0) {
3392		uint64_t txtype = TX_LINK;
3393		/*
3394		 * tmpfile is created to be in z_unlinkedobj, so remove it.
3395		 * Also, we don't log in ZIL, because all previous file
3396		 * operation on the tmpfile are ignored by ZIL. Instead we
3397		 * always wait for txg to sync to make sure all previous
3398		 * operation are sync safe.
3399		 */
3400		if (is_tmpfile) {
3401			VERIFY(zap_remove_int(zfsvfs->z_os,
3402			    zfsvfs->z_unlinkedobj, szp->z_id, tx) == 0);
3403		} else {
3404			if (flags & FIGNORECASE)
3405				txtype |= TX_CI;
3406			zfs_log_link(zilog, tx, txtype, tdzp, szp, name);
3407		}
3408	} else if (is_tmpfile) {
3409		/* restore z_unlinked since when linking failed */
3410		szp->z_unlinked = B_TRUE;
3411	}
3412	txg = dmu_tx_get_txg(tx);
3413	dmu_tx_commit(tx);
3414
3415	zfs_dirent_unlock(dl);
3416
3417	if (!is_tmpfile && zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
3418		zil_commit(zilog, 0);
3419
3420	if (is_tmpfile && zfsvfs->z_os->os_sync != ZFS_SYNC_DISABLED)
3421		txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), txg);
3422
3423	zfs_znode_update_vfs(tdzp);
3424	zfs_znode_update_vfs(szp);
3425	ZFS_EXIT(zfsvfs);
3426	return (error);
3427}
3428
3429static void
3430zfs_putpage_commit_cb(void *arg)
3431{
3432	struct page *pp = arg;
3433
3434	ClearPageError(pp);
3435	end_page_writeback(pp);
3436}
3437
3438/*
3439 * Push a page out to disk, once the page is on stable storage the
3440 * registered commit callback will be run as notification of completion.
3441 *
3442 *	IN:	ip	- page mapped for inode.
3443 *		pp	- page to push (page is locked)
3444 *		wbc	- writeback control data
3445 *
3446 *	RETURN:	0 if success
3447 *		error code if failure
3448 *
3449 * Timestamps:
3450 *	ip - ctime|mtime updated
3451 */
3452/* ARGSUSED */
3453int
3454zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc)
3455{
3456	znode_t		*zp = ITOZ(ip);
3457	zfsvfs_t	*zfsvfs = ITOZSB(ip);
3458	loff_t		offset;
3459	loff_t		pgoff;
3460	unsigned int	pglen;
3461	dmu_tx_t	*tx;
3462	caddr_t		va;
3463	int		err = 0;
3464	uint64_t	mtime[2], ctime[2];
3465	sa_bulk_attr_t	bulk[3];
3466	int		cnt = 0;
3467	struct address_space *mapping;
3468
3469	ZFS_ENTER(zfsvfs);
3470	ZFS_VERIFY_ZP(zp);
3471
3472	ASSERT(PageLocked(pp));
3473
3474	pgoff = page_offset(pp);	/* Page byte-offset in file */
3475	offset = i_size_read(ip);	/* File length in bytes */
3476	pglen = MIN(PAGE_SIZE,		/* Page length in bytes */
3477	    P2ROUNDUP(offset, PAGE_SIZE)-pgoff);
3478
3479	/* Page is beyond end of file */
3480	if (pgoff >= offset) {
3481		unlock_page(pp);
3482		ZFS_EXIT(zfsvfs);
3483		return (0);
3484	}
3485
3486	/* Truncate page length to end of file */
3487	if (pgoff + pglen > offset)
3488		pglen = offset - pgoff;
3489
3490#if 0
3491	/*
3492	 * FIXME: Allow mmap writes past its quota.  The correct fix
3493	 * is to register a page_mkwrite() handler to count the page
3494	 * against its quota when it is about to be dirtied.
3495	 */
3496	if (zfs_id_overblockquota(zfsvfs, DMU_USERUSED_OBJECT,
3497	    KUID_TO_SUID(ip->i_uid)) ||
3498	    zfs_id_overblockquota(zfsvfs, DMU_GROUPUSED_OBJECT,
3499	    KGID_TO_SGID(ip->i_gid)) ||
3500	    (zp->z_projid != ZFS_DEFAULT_PROJID &&
3501	    zfs_id_overblockquota(zfsvfs, DMU_PROJECTUSED_OBJECT,
3502	    zp->z_projid))) {
3503		err = EDQUOT;
3504	}
3505#endif
3506
3507	/*
3508	 * The ordering here is critical and must adhere to the following
3509	 * rules in order to avoid deadlocking in either zfs_read() or
3510	 * zfs_free_range() due to a lock inversion.
3511	 *
3512	 * 1) The page must be unlocked prior to acquiring the range lock.
3513	 *    This is critical because zfs_read() calls find_lock_page()
3514	 *    which may block on the page lock while holding the range lock.
3515	 *
3516	 * 2) Before setting or clearing write back on a page the range lock
3517	 *    must be held in order to prevent a lock inversion with the
3518	 *    zfs_free_range() function.
3519	 *
3520	 * This presents a problem because upon entering this function the
3521	 * page lock is already held.  To safely acquire the range lock the
3522	 * page lock must be dropped.  This creates a window where another
3523	 * process could truncate, invalidate, dirty, or write out the page.
3524	 *
3525	 * Therefore, after successfully reacquiring the range and page locks
3526	 * the current page state is checked.  In the common case everything
3527	 * will be as is expected and it can be written out.  However, if
3528	 * the page state has changed it must be handled accordingly.
3529	 */
3530	mapping = pp->mapping;
3531	redirty_page_for_writepage(wbc, pp);
3532	unlock_page(pp);
3533
3534	zfs_locked_range_t *lr = zfs_rangelock_enter(&zp->z_rangelock,
3535	    pgoff, pglen, RL_WRITER);
3536	lock_page(pp);
3537
3538	/* Page mapping changed or it was no longer dirty, we're done */
3539	if (unlikely((mapping != pp->mapping) || !PageDirty(pp))) {
3540		unlock_page(pp);
3541		zfs_rangelock_exit(lr);
3542		ZFS_EXIT(zfsvfs);
3543		return (0);
3544	}
3545
3546	/* Another process started write block if required */
3547	if (PageWriteback(pp)) {
3548		unlock_page(pp);
3549		zfs_rangelock_exit(lr);
3550
3551		if (wbc->sync_mode != WB_SYNC_NONE) {
3552			if (PageWriteback(pp))
3553				wait_on_page_bit(pp, PG_writeback);
3554		}
3555
3556		ZFS_EXIT(zfsvfs);
3557		return (0);
3558	}
3559
3560	/* Clear the dirty flag the required locks are held */
3561	if (!clear_page_dirty_for_io(pp)) {
3562		unlock_page(pp);
3563		zfs_rangelock_exit(lr);
3564		ZFS_EXIT(zfsvfs);
3565		return (0);
3566	}
3567
3568	/*
3569	 * Counterpart for redirty_page_for_writepage() above.  This page
3570	 * was in fact not skipped and should not be counted as if it were.
3571	 */
3572	wbc->pages_skipped--;
3573	set_page_writeback(pp);
3574	unlock_page(pp);
3575
3576	tx = dmu_tx_create(zfsvfs->z_os);
3577	dmu_tx_hold_write(tx, zp->z_id, pgoff, pglen);
3578	dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
3579	zfs_sa_upgrade_txholds(tx, zp);
3580
3581	err = dmu_tx_assign(tx, TXG_NOWAIT);
3582	if (err != 0) {
3583		if (err == ERESTART)
3584			dmu_tx_wait(tx);
3585
3586		dmu_tx_abort(tx);
3587		__set_page_dirty_nobuffers(pp);
3588		ClearPageError(pp);
3589		end_page_writeback(pp);
3590		zfs_rangelock_exit(lr);
3591		ZFS_EXIT(zfsvfs);
3592		return (err);
3593	}
3594
3595	va = kmap(pp);
3596	ASSERT3U(pglen, <=, PAGE_SIZE);
3597	dmu_write(zfsvfs->z_os, zp->z_id, pgoff, pglen, va, tx);
3598	kunmap(pp);
3599
3600	SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
3601	SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
3602	SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(zfsvfs), NULL,
3603	    &zp->z_pflags, 8);
3604
3605	/* Preserve the mtime and ctime provided by the inode */
3606	ZFS_TIME_ENCODE(&ip->i_mtime, mtime);
3607	ZFS_TIME_ENCODE(&ip->i_ctime, ctime);
3608	zp->z_atime_dirty = B_FALSE;
3609	zp->z_seq++;
3610
3611	err = sa_bulk_update(zp->z_sa_hdl, bulk, cnt, tx);
3612
3613	zfs_log_write(zfsvfs->z_log, tx, TX_WRITE, zp, pgoff, pglen, 0,
3614	    zfs_putpage_commit_cb, pp);
3615	dmu_tx_commit(tx);
3616
3617	zfs_rangelock_exit(lr);
3618
3619	if (wbc->sync_mode != WB_SYNC_NONE) {
3620		/*
3621		 * Note that this is rarely called under writepages(), because
3622		 * writepages() normally handles the entire commit for
3623		 * performance reasons.
3624		 */
3625		zil_commit(zfsvfs->z_log, zp->z_id);
3626	}
3627
3628	ZFS_EXIT(zfsvfs);
3629	return (err);
3630}
3631
3632/*
3633 * Update the system attributes when the inode has been dirtied.  For the
3634 * moment we only update the mode, atime, mtime, and ctime.
3635 */
3636int
3637zfs_dirty_inode(struct inode *ip, int flags)
3638{
3639	znode_t		*zp = ITOZ(ip);
3640	zfsvfs_t	*zfsvfs = ITOZSB(ip);
3641	dmu_tx_t	*tx;
3642	uint64_t	mode, atime[2], mtime[2], ctime[2];
3643	sa_bulk_attr_t	bulk[4];
3644	int		error = 0;
3645	int		cnt = 0;
3646
3647	if (zfs_is_readonly(zfsvfs) || dmu_objset_is_snapshot(zfsvfs->z_os))
3648		return (0);
3649
3650	ZFS_ENTER(zfsvfs);
3651	ZFS_VERIFY_ZP(zp);
3652
3653#ifdef I_DIRTY_TIME
3654	/*
3655	 * This is the lazytime semantic introduced in Linux 4.0
3656	 * This flag will only be called from update_time when lazytime is set.
3657	 * (Note, I_DIRTY_SYNC will also set if not lazytime)
3658	 * Fortunately mtime and ctime are managed within ZFS itself, so we
3659	 * only need to dirty atime.
3660	 */
3661	if (flags == I_DIRTY_TIME) {
3662		zp->z_atime_dirty = B_TRUE;
3663		goto out;
3664	}
3665#endif
3666
3667	tx = dmu_tx_create(zfsvfs->z_os);
3668
3669	dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
3670	zfs_sa_upgrade_txholds(tx, zp);
3671
3672	error = dmu_tx_assign(tx, TXG_WAIT);
3673	if (error) {
3674		dmu_tx_abort(tx);
3675		goto out;
3676	}
3677
3678	mutex_enter(&zp->z_lock);
3679	zp->z_atime_dirty = B_FALSE;
3680
3681	SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8);
3682	SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(zfsvfs), NULL, &atime, 16);
3683	SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
3684	SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
3685
3686	/* Preserve the mode, mtime and ctime provided by the inode */
3687	ZFS_TIME_ENCODE(&ip->i_atime, atime);
3688	ZFS_TIME_ENCODE(&ip->i_mtime, mtime);
3689	ZFS_TIME_ENCODE(&ip->i_ctime, ctime);
3690	mode = ip->i_mode;
3691
3692	zp->z_mode = mode;
3693
3694	error = sa_bulk_update(zp->z_sa_hdl, bulk, cnt, tx);
3695	mutex_exit(&zp->z_lock);
3696
3697	dmu_tx_commit(tx);
3698out:
3699	ZFS_EXIT(zfsvfs);
3700	return (error);
3701}
3702
3703/*ARGSUSED*/
3704void
3705zfs_inactive(struct inode *ip)
3706{
3707	znode_t	*zp = ITOZ(ip);
3708	zfsvfs_t *zfsvfs = ITOZSB(ip);
3709	uint64_t atime[2];
3710	int error;
3711	int need_unlock = 0;
3712
3713	/* Only read lock if we haven't already write locked, e.g. rollback */
3714	if (!RW_WRITE_HELD(&zfsvfs->z_teardown_inactive_lock)) {
3715		need_unlock = 1;
3716		rw_enter(&zfsvfs->z_teardown_inactive_lock, RW_READER);
3717	}
3718	if (zp->z_sa_hdl == NULL) {
3719		if (need_unlock)
3720			rw_exit(&zfsvfs->z_teardown_inactive_lock);
3721		return;
3722	}
3723
3724	if (zp->z_atime_dirty && zp->z_unlinked == B_FALSE) {
3725		dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os);
3726
3727		dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
3728		zfs_sa_upgrade_txholds(tx, zp);
3729		error = dmu_tx_assign(tx, TXG_WAIT);
3730		if (error) {
3731			dmu_tx_abort(tx);
3732		} else {
3733			ZFS_TIME_ENCODE(&ip->i_atime, atime);
3734			mutex_enter(&zp->z_lock);
3735			(void) sa_update(zp->z_sa_hdl, SA_ZPL_ATIME(zfsvfs),
3736			    (void *)&atime, sizeof (atime), tx);
3737			zp->z_atime_dirty = B_FALSE;
3738			mutex_exit(&zp->z_lock);
3739			dmu_tx_commit(tx);
3740		}
3741	}
3742
3743	zfs_zinactive(zp);
3744	if (need_unlock)
3745		rw_exit(&zfsvfs->z_teardown_inactive_lock);
3746}
3747
3748/*
3749 * Fill pages with data from the disk.
3750 */
3751static int
3752zfs_fillpage(struct inode *ip, struct page *pl[], int nr_pages)
3753{
3754	znode_t *zp = ITOZ(ip);
3755	zfsvfs_t *zfsvfs = ITOZSB(ip);
3756	objset_t *os;
3757	struct page *cur_pp;
3758	u_offset_t io_off, total;
3759	size_t io_len;
3760	loff_t i_size;
3761	unsigned page_idx;
3762	int err;
3763
3764	os = zfsvfs->z_os;
3765	io_len = nr_pages << PAGE_SHIFT;
3766	i_size = i_size_read(ip);
3767	io_off = page_offset(pl[0]);
3768
3769	if (io_off + io_len > i_size)
3770		io_len = i_size - io_off;
3771
3772	/*
3773	 * Iterate over list of pages and read each page individually.
3774	 */
3775	page_idx = 0;
3776	for (total = io_off + io_len; io_off < total; io_off += PAGESIZE) {
3777		caddr_t va;
3778
3779		cur_pp = pl[page_idx++];
3780		va = kmap(cur_pp);
3781		err = dmu_read(os, zp->z_id, io_off, PAGESIZE, va,
3782		    DMU_READ_PREFETCH);
3783		kunmap(cur_pp);
3784		if (err) {
3785			/* convert checksum errors into IO errors */
3786			if (err == ECKSUM)
3787				err = SET_ERROR(EIO);
3788			return (err);
3789		}
3790	}
3791
3792	return (0);
3793}
3794
3795/*
3796 * Uses zfs_fillpage to read data from the file and fill the pages.
3797 *
3798 *	IN:	ip	 - inode of file to get data from.
3799 *		pl	 - list of pages to read
3800 *		nr_pages - number of pages to read
3801 *
3802 *	RETURN:	0 on success, error code on failure.
3803 *
3804 * Timestamps:
3805 *	vp - atime updated
3806 */
3807/* ARGSUSED */
3808int
3809zfs_getpage(struct inode *ip, struct page *pl[], int nr_pages)
3810{
3811	znode_t	 *zp  = ITOZ(ip);
3812	zfsvfs_t *zfsvfs = ITOZSB(ip);
3813	int	 err;
3814
3815	if (pl == NULL)
3816		return (0);
3817
3818	ZFS_ENTER(zfsvfs);
3819	ZFS_VERIFY_ZP(zp);
3820
3821	err = zfs_fillpage(ip, pl, nr_pages);
3822
3823	ZFS_EXIT(zfsvfs);
3824	return (err);
3825}
3826
3827/*
3828 * Check ZFS specific permissions to memory map a section of a file.
3829 *
3830 *	IN:	ip	- inode of the file to mmap
3831 *		off	- file offset
3832 *		addrp	- start address in memory region
3833 *		len	- length of memory region
3834 *		vm_flags- address flags
3835 *
3836 *	RETURN:	0 if success
3837 *		error code if failure
3838 */
3839/*ARGSUSED*/
3840int
3841zfs_map(struct inode *ip, offset_t off, caddr_t *addrp, size_t len,
3842    unsigned long vm_flags)
3843{
3844	znode_t  *zp = ITOZ(ip);
3845	zfsvfs_t *zfsvfs = ITOZSB(ip);
3846
3847	ZFS_ENTER(zfsvfs);
3848	ZFS_VERIFY_ZP(zp);
3849
3850	if ((vm_flags & VM_WRITE) && (zp->z_pflags &
3851	    (ZFS_IMMUTABLE | ZFS_READONLY | ZFS_APPENDONLY))) {
3852		ZFS_EXIT(zfsvfs);
3853		return (SET_ERROR(EPERM));
3854	}
3855
3856	if ((vm_flags & (VM_READ | VM_EXEC)) &&
3857	    (zp->z_pflags & ZFS_AV_QUARANTINED)) {
3858		ZFS_EXIT(zfsvfs);
3859		return (SET_ERROR(EACCES));
3860	}
3861
3862	if (off < 0 || len > MAXOFFSET_T - off) {
3863		ZFS_EXIT(zfsvfs);
3864		return (SET_ERROR(ENXIO));
3865	}
3866
3867	ZFS_EXIT(zfsvfs);
3868	return (0);
3869}
3870
3871/*
3872 * Free or allocate space in a file.  Currently, this function only
3873 * supports the `F_FREESP' command.  However, this command is somewhat
3874 * misnamed, as its functionality includes the ability to allocate as
3875 * well as free space.
3876 *
3877 *	IN:	zp	- znode of file to free data in.
3878 *		cmd	- action to take (only F_FREESP supported).
3879 *		bfp	- section of file to free/alloc.
3880 *		flag	- current file open mode flags.
3881 *		offset	- current file offset.
3882 *		cr	- credentials of caller.
3883 *
3884 *	RETURN:	0 on success, error code on failure.
3885 *
3886 * Timestamps:
3887 *	zp - ctime|mtime updated
3888 */
3889/* ARGSUSED */
3890int
3891zfs_space(znode_t *zp, int cmd, flock64_t *bfp, int flag,
3892    offset_t offset, cred_t *cr)
3893{
3894	zfsvfs_t	*zfsvfs = ZTOZSB(zp);
3895	uint64_t	off, len;
3896	int		error;
3897
3898	ZFS_ENTER(zfsvfs);
3899	ZFS_VERIFY_ZP(zp);
3900
3901	if (cmd != F_FREESP) {
3902		ZFS_EXIT(zfsvfs);
3903		return (SET_ERROR(EINVAL));
3904	}
3905
3906	/*
3907	 * Callers might not be able to detect properly that we are read-only,
3908	 * so check it explicitly here.
3909	 */
3910	if (zfs_is_readonly(zfsvfs)) {
3911		ZFS_EXIT(zfsvfs);
3912		return (SET_ERROR(EROFS));
3913	}
3914
3915	if (bfp->l_len < 0) {
3916		ZFS_EXIT(zfsvfs);
3917		return (SET_ERROR(EINVAL));
3918	}
3919
3920	/*
3921	 * Permissions aren't checked on Solaris because on this OS
3922	 * zfs_space() can only be called with an opened file handle.
3923	 * On Linux we can get here through truncate_range() which
3924	 * operates directly on inodes, so we need to check access rights.
3925	 */
3926	if ((error = zfs_zaccess(zp, ACE_WRITE_DATA, 0, B_FALSE, cr))) {
3927		ZFS_EXIT(zfsvfs);
3928		return (error);
3929	}
3930
3931	off = bfp->l_start;
3932	len = bfp->l_len; /* 0 means from off to end of file */
3933
3934	error = zfs_freesp(zp, off, len, flag, TRUE);
3935
3936	ZFS_EXIT(zfsvfs);
3937	return (error);
3938}
3939
3940/*ARGSUSED*/
3941int
3942zfs_fid(struct inode *ip, fid_t *fidp)
3943{
3944	znode_t		*zp = ITOZ(ip);
3945	zfsvfs_t	*zfsvfs = ITOZSB(ip);
3946	uint32_t	gen;
3947	uint64_t	gen64;
3948	uint64_t	object = zp->z_id;
3949	zfid_short_t	*zfid;
3950	int		size, i, error;
3951
3952	ZFS_ENTER(zfsvfs);
3953
3954	if (fidp->fid_len < SHORT_FID_LEN) {
3955		fidp->fid_len = SHORT_FID_LEN;
3956		ZFS_EXIT(zfsvfs);
3957		return (SET_ERROR(ENOSPC));
3958	}
3959
3960	ZFS_VERIFY_ZP(zp);
3961
3962	if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs),
3963	    &gen64, sizeof (uint64_t))) != 0) {
3964		ZFS_EXIT(zfsvfs);
3965		return (error);
3966	}
3967
3968	gen = (uint32_t)gen64;
3969
3970	size = SHORT_FID_LEN;
3971
3972	zfid = (zfid_short_t *)fidp;
3973
3974	zfid->zf_len = size;
3975
3976	for (i = 0; i < sizeof (zfid->zf_object); i++)
3977		zfid->zf_object[i] = (uint8_t)(object >> (8 * i));
3978
3979	/* Must have a non-zero generation number to distinguish from .zfs */
3980	if (gen == 0)
3981		gen = 1;
3982	for (i = 0; i < sizeof (zfid->zf_gen); i++)
3983		zfid->zf_gen[i] = (uint8_t)(gen >> (8 * i));
3984
3985	ZFS_EXIT(zfsvfs);
3986	return (0);
3987}
3988
3989#if defined(_KERNEL)
3990EXPORT_SYMBOL(zfs_open);
3991EXPORT_SYMBOL(zfs_close);
3992EXPORT_SYMBOL(zfs_lookup);
3993EXPORT_SYMBOL(zfs_create);
3994EXPORT_SYMBOL(zfs_tmpfile);
3995EXPORT_SYMBOL(zfs_remove);
3996EXPORT_SYMBOL(zfs_mkdir);
3997EXPORT_SYMBOL(zfs_rmdir);
3998EXPORT_SYMBOL(zfs_readdir);
3999EXPORT_SYMBOL(zfs_getattr_fast);
4000EXPORT_SYMBOL(zfs_setattr);
4001EXPORT_SYMBOL(zfs_rename);
4002EXPORT_SYMBOL(zfs_symlink);
4003EXPORT_SYMBOL(zfs_readlink);
4004EXPORT_SYMBOL(zfs_link);
4005EXPORT_SYMBOL(zfs_inactive);
4006EXPORT_SYMBOL(zfs_space);
4007EXPORT_SYMBOL(zfs_fid);
4008EXPORT_SYMBOL(zfs_getpage);
4009EXPORT_SYMBOL(zfs_putpage);
4010EXPORT_SYMBOL(zfs_dirty_inode);
4011EXPORT_SYMBOL(zfs_map);
4012
4013/* BEGIN CSTYLED */
4014module_param(zfs_delete_blocks, ulong, 0644);
4015MODULE_PARM_DESC(zfs_delete_blocks, "Delete files larger than N blocks async");
4016/* END CSTYLED */
4017
4018#endif
4019