1/*
2 * Copyright (c) 2000-2001 Boris Popov
3 * All rights reserved.
4 *
5 * Portions Copyright (C) 2001 - 2013 Apple Inc. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 *    must display the following acknowledgement:
17 *    This product includes software developed by Boris Popov.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 *    may be used to endorse or promote products derived from this software
20 *    without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 */
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/kernel.h>
38#include <sys/time.h>
39#include <sys/proc.h>
40#include <sys/mount.h>
41#include <sys/vnode.h>
42#include <sys/malloc.h>
43#include <sys/sysctl.h>
44#include <sys/queue.h>
45#include <sys/fcntl.h>
46#include <sys/stat.h>
47#include <libkern/OSAtomic.h>
48
49#include <libkern/crypto/md5.h>
50
51#include <sys/kauth.h>
52#include <sys/paths.h>
53
54#include <sys/smb_apple.h>
55#include <sys/smb_byte_order.h>
56#include <sys/mchain.h>
57#include <sys/msfscc.h>
58#include <netsmb/smb.h>
59#include <netsmb/smb_2.h>
60#include <netsmb/smb_rq.h>
61#include <netsmb/smb_rq_2.h>
62#include <netsmb/smb_conn.h>
63#include <netsmb/smb_conn_2.h>
64#include <netsmb/smb_subr.h>
65
66#include <smbfs/smbfs.h>
67#include <smbfs/smbfs_node.h>
68#include <smbfs/smbfs_subr.h>
69#include <smbfs/smbfs_subr_2.h>
70#include <Triggers/triggers.h>
71#include <smbclient/smbclient_internal.h>
72
73#define	SMBFS_NOHASH(smp, hval)	(&(smp)->sm_hash[(hval) & (smp)->sm_hashlen])
74#define	smbfs_hash_lock(smp)	(lck_mtx_lock((smp)->sm_hashlock))
75#define	smbfs_hash_unlock(smp)	(lck_mtx_unlock((smp)->sm_hashlock))
76
77extern vnop_t **smbfs_vnodeop_p;
78
79MALLOC_DEFINE(M_SMBNODE, "SMBFS node", "SMBFS vnode private part");
80MALLOC_DEFINE(M_SMBNODENAME, "SMBFS nname", "SMBFS node name");
81
82#define	FNV_32_PRIME ((uint32_t) 0x01000193UL)
83#define	FNV1_32_INIT ((uint32_t) 33554467UL)
84
85#define isdigit(d) ((d) >= '0' && (d) <= '9')
86
87/*
88 * smbfs_build_path
89 *
90 * Build a path that starts from the root node and includes this node. May
91 * want to remove the SMBFS_MAXPATHCOMP limit in the future. That would require
92 * two passes through the loop.
93 */
94static int
95smbfs_build_path(char *path, struct smbnode *np, size_t maxlen)
96{
97    struct smbmount *smp = np->n_mount;
98	struct smbnode  *npstack[SMBFS_MAXPATHCOMP];
99	struct smbnode  **npp = &npstack[0];
100	int i, error = 0;
101    int lock_count = 0;
102	struct smbnode  *lock_stack[SMBFS_MAXPATHCOMP];
103	struct smbnode  **locked_npp = &lock_stack[0];
104
105    /*
106     * We hold sm_reclaim_lock to protect np->n_parent fields from a
107     * race with smbfs_vnop_reclaim()/smbfs_ClearChildren() since we are
108     * walking all the parents up to the root vnode. Always lock
109     * sm_reclaim_lock first and then individual n_parent_rwlock next.
110     * See <rdar://problem/15707521>.
111     */
112	lck_mtx_lock(&smp->sm_reclaim_lock);
113
114    lck_rw_lock_shared(&np->n_parent_rwlock);
115    *locked_npp++ = np;     /* Save node to be unlocked later */
116    lock_count += 1;
117
118    i = 0;
119    while (np->n_parent) {
120		if (i++ == SMBFS_MAXPATHCOMP) {
121			error = ENAMETOOLONG;
122            goto done;
123        }
124		*npp++ = np;
125
126        np = np->n_parent;
127
128        lck_rw_lock_shared(&np->n_parent_rwlock);
129        *locked_npp++ = np;     /* Save node to be unlocked later */
130        lock_count += 1;
131	}
132
133	while (i-- && !error) {
134		np = *--npp;
135		if (strlcat(path, "/", MAXPATHLEN) >= maxlen) {
136			error = ENAMETOOLONG;
137		}
138        else {
139            lck_rw_lock_shared(&np->n_name_rwlock);
140			if (strlcat(path, (char *)np->n_name, maxlen) >= maxlen) {
141				error = ENAMETOOLONG;
142			}
143            lck_rw_unlock_shared(&np->n_name_rwlock);
144		}
145	}
146
147done:
148    /* Unlock all the nodes */
149    for (i = 0; i < lock_count; i++) {
150        lck_rw_unlock_shared(&lock_stack[i]->n_parent_rwlock);
151    }
152
153    lck_mtx_unlock(&smp->sm_reclaim_lock);
154	return error;
155}
156
157static void *
158smbfs_trigger_get_mount_args(vnode_t vp, __unused vfs_context_t ctx,
159							 int *errp)
160{
161	struct mount_url_callargs *argsp;
162	int	error = 0;
163	int	length;
164	char	*url, *mountOnPath;
165	struct smbmount *smp = VTOSMB(vp)->n_mount;
166
167	/*
168	 * Allocate the args structure
169	 */
170	SMB_MALLOC(argsp, struct mount_url_callargs *, sizeof (*argsp), M_SMBFSDATA, M_WAITOK);
171
172	/*
173	 * Get the UID for which the mount should be done; it's the
174	 * UID for which the mount containing the trigger was done,
175	 * which might not be the UID for the process that triggered
176	 * the mount.
177	 */
178	argsp->muc_uid = smp->sm_args.uid;
179
180	/*
181	 * Create the URL
182	 * 1. smb:
183	 * 2. vnode's mount point from name
184	 * 3. path from the root to this vnode.
185	 * 4. URL must be less than MAXPATHLEN
186	 *
187	 * What should be the max length, for URL should it be the MAXPATHLEN
188	 * plus the scheme.
189	 */
190	SMB_MALLOC(url, char *, MAXPATHLEN, M_SMBFSDATA, M_WAITOK | M_ZERO);
191	strlcpy(url, "smb:", MAXPATHLEN);
192	if (strlcat(url, vfs_statfs(vnode_mount(vp))->f_mntfromname, MAXPATHLEN) >= MAXPATHLEN) {
193		error = ENAMETOOLONG;
194	} else {
195		error = smbfs_build_path(url, VTOSMB(vp), MAXPATHLEN);
196	}
197	if (error) {
198		SMBERROR_LOCK(VTOSMB(vp), "%s: URL FAILED url = %s\n", VTOSMB(vp)->n_name, url);
199
200		SMB_FREE(url, M_SMBFSDATA);
201		SMB_FREE(argsp, M_SMBFSDATA);
202		*errp = error;
203		return (NULL);
204	}
205
206	/* Create the mount on path */
207	SMB_MALLOC(mountOnPath, char *, MAXPATHLEN, M_SMBFSDATA, M_WAITOK | M_ZERO);
208	length = MAXPATHLEN;
209	/* This can fail sometimes, should we even bother with it? */
210	error = vn_getpath(vp, mountOnPath, &length);
211	if (error) {
212		SMBERROR_LOCK(VTOSMB(vp), "%s: vn_getpath FAILED, using smbfs_build_path!\n", VTOSMB(vp)->n_name);
213
214		if (strlcpy(mountOnPath, vfs_statfs(vnode_mount(vp))->f_mntonname, MAXPATHLEN) >= MAXPATHLEN) {
215			error = ENAMETOOLONG;
216		} else {
217			error = smbfs_build_path(mountOnPath, VTOSMB(vp), MAXPATHLEN);
218		}
219	}
220	if (error) {
221		SMBERROR_LOCK(VTOSMB(vp), "%s: Mount on name FAILED url = %s\n", VTOSMB(vp)->n_name, url);
222
223		SMB_FREE(mountOnPath, M_SMBFSDATA);
224		SMB_FREE(url, M_SMBFSDATA);
225		SMB_FREE(argsp, M_SMBFSDATA);
226		*errp = error;
227		return (NULL);
228	}
229
230    SMBWARNING_LOCK(VTOSMB(vp), "%s: Triggering with URL = %s mountOnPath = %s\n",
231                    VTOSMB(vp)->n_name, url, mountOnPath);
232
233	argsp->muc_url = url;
234	argsp->muc_mountpoint = mountOnPath;
235	argsp->muc_opts = (smp->sm_args.altflags & SMBFS_MNT_SOFT) ? (char *)"soft" : (char *)"";
236	*errp = 0;
237	return (argsp);
238}
239
240static void
241smbfs_trigger_rel_mount_args(void *data)
242{
243	struct mount_url_callargs *argsp = data;
244
245	SMB_FREE(argsp->muc_url, M_SMBFSDATA);
246	SMB_FREE(argsp->muc_mountpoint, M_SMBFSDATA);
247	SMB_FREE(argsp, M_SMBFSDATA);
248}
249
250/*
251 * See if this is one of those faked up symbolic link. This is Conrad and Steve
252 * French method for storing and reading symlinks on Window Servers.
253 *
254 * The calling routine must hold a reference on the share
255 *
256 */
257static int
258smb_check_for_windows_symlink(struct smb_share *share, struct smbnode *np,
259							  int *symlen, vfs_context_t context)
260{
261    uio_t uio = NULL;
262    MD5_CTX md5;
263    char m5b[SMB_SYMMD5LEN];
264    uint32_t state[4];
265    int len = 0;
266    unsigned char *sb = NULL;
267    unsigned char *cp;
268    SMBFID fid = 0;
269    int error, cerror;
270    size_t read_size = 0; /* unused */
271
272    SMB_MALLOC(sb, void *, (size_t) np->n_size, M_TEMP, M_WAITOK);
273    if (sb == NULL) {
274        error = ENOMEM;
275        goto exit;
276    }
277
278    uio = uio_create(1, 0, UIO_SYSSPACE, UIO_READ);
279    if (uio == NULL) {
280        error = ENOMEM;
281        goto exit;
282    }
283
284    uio_addiov(uio, CAST_USER_ADDR_T(sb), np->n_size);
285
286    if (SSTOVC(share)->vc_flags & SMBV_SMB2) {
287        /* SMB 2/3 */
288        error = smbfs_smb_cmpd_create_read_close(share, np,
289                                                 NULL, 0,
290                                                 NULL, 0,
291                                                 uio, &read_size,
292                                                 NULL,
293                                                 context);
294    }
295    else {
296        /* SMB 1 */
297        error = smbfs_tmpopen(share, np, SMB2_FILE_READ_DATA, &fid, context);
298        if (error) {
299            goto exit;
300        }
301
302        error = smb_smb_read(share, fid, uio, context);
303
304        cerror = smbfs_tmpclose(share, np, fid, context);
305        if (cerror) {
306            SMBWARNING_LOCK(np, "error %d closing fid %llx file %s\n", cerror, fid, np->n_name);
307        }
308    }
309
310    if (!error && !bcmp(sb, smb_symmagic, SMB_SYMMAGICLEN)) {
311        for (cp = &sb[SMB_SYMMAGICLEN]; cp < &sb[SMB_SYMMAGICLEN+SMB_SYMLENLEN-1]; cp++) {
312            if (!isdigit(*cp))
313                break;
314            len *= 10;
315            len += *cp - '0';
316        }
317        cp++; /* skip newline */
318
319        if ((cp != &sb[SMB_SYMMAGICLEN+SMB_SYMLENLEN]) ||
320            (len > (int)(np->n_size - SMB_SYMHDRLEN))) {
321            SMBWARNING("bad symlink length\n");
322            error = ENOENT; /* Not a faked up symbolic link */
323        } else {
324            MD5Init(&md5);
325            MD5Update(&md5, &sb[SMB_SYMHDRLEN], len);
326            MD5Final((u_char *)state, &md5);
327            (void)snprintf(m5b, sizeof(m5b), "%08x%08x%08x%08x",
328                           htobel(state[0]), htobel(state[1]), htobel(state[2]),
329                           htobel(state[3]));
330            if (bcmp(cp, m5b, SMB_SYMMD5LEN-1)) {
331                SMBWARNING("bad symlink md5\n");
332                error = ENOENT; /* Not a faked up symbolic link */
333            } else {
334                *symlen = len;
335                error = 0;
336            }
337        }
338    }
339    else {
340		error = ENOENT; /* Not a faked up symbolic link */
341    }
342
343exit:
344    if (uio != NULL) {
345        uio_free(uio);
346    }
347
348    if (sb != NULL) {
349        SMB_FREE(sb, M_TEMP);
350    }
351
352    return error;
353}
354
355/*
356 * Lock a node
357 */
358int
359smbnode_lock(struct smbnode *np, enum smbfslocktype locktype)
360{
361	if (locktype == SMBFS_SHARED_LOCK)
362		lck_rw_lock_shared(&np->n_rwlock);
363	else
364		lck_rw_lock_exclusive(&np->n_rwlock);
365
366	np->n_lockState = locktype;
367
368#if 1
369	/* For Debugging... */
370	if (locktype != SMBFS_SHARED_LOCK) {
371		np->n_activation = (void *) current_thread();
372	}
373#endif
374	return (0);
375}
376
377
378/*
379 * Lock a pair of smbnodes
380 *
381 * If the two nodes are not the same then lock in the order they came in. The calling routine
382 * should always put them in parent/child order.
383 */
384int
385smbnode_lockpair(struct smbnode *np1, struct smbnode *np2, enum smbfslocktype locktype)
386{
387	int error;
388
389	/*
390	 * If smbnodes match then just lock one.
391	 */
392	if (np1 == np2) {
393		return smbnode_lock(np1, locktype);
394	}
395	if ((error = smbnode_lock(np1, locktype)))
396		return (error);
397	if ((error = smbnode_lock(np2, locktype))) {
398		smbnode_unlock(np1);
399		return (error);
400	}
401	return (0);
402}
403
404/*
405 * Unlock a cnode
406 */
407void
408smbnode_unlock(struct smbnode *np)
409{
410	/* The old code called lck_rw_done which is a non supported kpi */
411	if (np->n_lockState == SMBFS_SHARED_LOCK) {
412		/*
413		 * Should we keep a counter and set n_lockState to zero when the
414		 * counter goes to zero? We would need to lock the counter in that
415		 * case.
416		 */
417		lck_rw_unlock_shared(&np->n_rwlock);
418	} else {
419		/* Note: SMBFS_RECLAIM_LOCK is really SMBFS_EXCLUSIVE_LOCK */
420		np->n_lockState = 0;
421		lck_rw_unlock_exclusive(&np->n_rwlock);
422	}
423}
424
425/*
426 * Unlock a pair of cnodes.
427 */
428void
429smbnode_unlockpair(struct smbnode *np1, struct smbnode *np2)
430{
431	smbnode_unlock(np1);
432	if (np2 != np1)
433		smbnode_unlock(np2);
434}
435
436static int
437tolower(unsigned char ch)
438{
439    if (ch >= 'A' && ch <= 'Z')
440        ch = 'a' + (ch - 'A');
441
442    return ch;
443}
444
445/*
446 * SMB 2/3 - if the server supports File IDs, return ino as hashval
447 * If no File IDs, create hashval from the name.  Currently we use strncasecmp
448 * to find a match, since it uses tolower, we should do the same when creating
449 * our hashval from the name.
450 */
451uint64_t
452smbfs_hash(struct smb_share *share, uint64_t ino,
453            const char *name, size_t nmlen)
454{
455	uint64_t v;
456
457    /* if no share, just want hash from name */
458
459    if ((share) && (SSTOVC(share)->vc_misc_flags & SMBV_HAS_FILEIDS)) {
460        /* Server supports File IDs, use the inode number as hash value */
461        if (ino == 0) {
462            /* This should not happen */
463            SMBERROR("node id of 0 for %s\n", name);
464        }
465
466        v = ino;
467    }
468    else {
469        /* Server does not support File IDs, hash the name instead */
470        for (v = FNV1_32_INIT; nmlen; name++, nmlen--) {
471            v *= FNV_32_PRIME;
472            v ^= (uint64_t)tolower((unsigned char)*name);
473        }
474    }
475
476	return v;
477}
478
479void
480smb_vhashrem(struct smbnode *np)
481{
482	smbfs_hash_lock(np->n_mount);
483	if (np->n_hash.le_prev) {
484		LIST_REMOVE(np, n_hash);
485		np->n_hash.le_prev = NULL;
486	}
487	smbfs_hash_unlock(np->n_mount);
488	return;
489}
490
491void
492smb_vhashadd(struct smbnode *np, uint64_t hashval)
493{
494	struct smbnode_hashhead	*nhpp;
495
496	smbfs_hash_lock(np->n_mount);
497	nhpp = SMBFS_NOHASH(np->n_mount, hashval);
498	LIST_INSERT_HEAD(nhpp, np, n_hash);
499	smbfs_hash_unlock(np->n_mount);
500	return;
501
502}
503
504/* Returns 0 if the names match, non zero if they do not match */
505static int
506smbfs_check_name(struct smb_share *share,
507                 const char *name1,
508                 const char *name2,
509                 size_t name_len)
510{
511    int ret_val = 0;
512
513    if (SSTOVC(share)->vc_misc_flags & SMBV_OSX_SERVER) {
514        /* Its OS X Server so we know for sure */
515        if (SSTOVC(share)->vc_volume_caps & kAAPL_CASE_SENSITIVE) {
516            /* Case Sensitive */
517            ret_val = bcmp(name1, name2, name_len);
518            return (ret_val);
519        }
520    }
521
522    /* Not case sensitive */
523    ret_val = strncasecmp(name1, name2, name_len);
524
525    return (ret_val);
526}
527
528static vnode_t
529smb_hashget(struct smbmount *smp, struct smbnode *dnp, uint64_t hashval,
530			const char *name, size_t nmlen, size_t maxfilenamelen,
531			uint32_t node_flag, const char *sname)
532{
533	vnode_t	vp;
534	struct smbnode_hashhead	*nhpp;
535	struct smbnode *np;
536	uint32_t vid;
537	size_t snmlen = (sname) ? strnlen(sname, maxfilenamelen+1) : 0;
538    struct smb_vc *vcp = NULL;
539
540    if (smp->sm_share == NULL) {
541        SMBERROR("smp->sm_share is NULL? \n");
542        return (NULL);
543    }
544
545    vcp = SSTOVC(smp->sm_share);
546
547loop:
548	smbfs_hash_lock(smp);
549	nhpp = SMBFS_NOHASH(smp, hashval);
550	LIST_FOREACH(np, nhpp, n_hash) {
551		/*
552		 * If we are only looking for a stream node then skip any other nodes.
553		 * If we are look for a directory or data node then skip any stream nodes.
554		 */
555		if ((np->n_flag & N_ISSTREAM) != node_flag)
556			continue;
557
558        if (vcp->vc_misc_flags & SMBV_HAS_FILEIDS) {
559            /*
560             * Server supports File IDs - ID uniquely identifies the item
561             */
562            if (np->n_ino != hashval) {
563                continue;
564            }
565        }
566        else {
567            /*
568             * Server does not support File IDs
569             * We currently assume the remote file system is case insensitive, since
570             * we have no way of telling using the protocol. Someday I would like to
571             * detect and if the server is case sensitive. If the server is case
572             * sensitive then we should use bcmp, if case insensitive use strncasecmp.
573             * NOTE: The strncasecmp routine really only does a tolower, not what we
574             * really want but the best we can do at this time.
575             */
576
577            lck_rw_lock_shared(&np->n_parent_rwlock);
578            lck_rw_lock_shared(&np->n_name_rwlock);
579            if ((np->n_parent != dnp) || (np->n_nmlen != nmlen) ||
580                (smbfs_check_name(smp->sm_share, name, np->n_name, nmlen) != 0)) {
581                lck_rw_unlock_shared(&np->n_name_rwlock);
582                lck_rw_unlock_shared(&np->n_parent_rwlock);
583                continue;
584            }
585            lck_rw_unlock_shared(&np->n_name_rwlock);
586            lck_rw_unlock_shared(&np->n_parent_rwlock);
587        }
588
589        if ((np->n_flag & NDELETEONCLOSE) ||
590            (np->n_flag & NMARKEDFORDLETE)) {
591            /* Skip nodes that are not in the name space anymore */
592            continue;
593        }
594
595		/* If this is a stream make sure its the correct stream */
596		if (np->n_flag & N_ISSTREAM) {
597			DBG_ASSERT(sname);	/* Better be looking for a stream at this point */
598
599            lck_rw_lock_shared(&np->n_name_rwlock);
600            if ((np->n_snmlen != snmlen) ||
601				(bcmp(sname, np->n_sname, snmlen) != 0)) {
602				SMBERROR("We only support one stream and we found found %s looking for %s\n",
603						 np->n_sname, sname);
604                lck_rw_unlock_shared(&np->n_name_rwlock);
605				continue;
606			}
607            lck_rw_unlock_shared(&np->n_name_rwlock);
608		}
609
610		if (ISSET(np->n_flag, NALLOC)) {
611			SET(np->n_flag, NWALLOC);
612			(void)msleep((caddr_t)np, smp->sm_hashlock, PINOD|PDROP, "smb_ngetalloc", 0);
613			goto loop;
614		}
615
616		if (ISSET(np->n_flag, NTRANSIT)) {
617			SET(np->n_flag, NWTRANSIT);
618			(void)msleep((caddr_t)np, smp->sm_hashlock, PINOD|PDROP, "smb_ngettransit", 0);
619			goto loop;
620		}
621
622		vp = SMBTOV(np);
623		vid = vnode_vid(vp);
624
625		smbfs_hash_unlock(smp);
626
627		if (vnode_getwithvid(vp, vid)) {
628			return (NULL);
629        }
630
631		/* Always return the node locked */
632        if ((smbnode_lock(np, SMBFS_EXCLUSIVE_LOCK)) != 0) {
633            vnode_put(vp);
634            return (NULL);
635        }
636
637		np->n_lastvop = smb_hashget;
638		return (vp);
639	}
640
641	smbfs_hash_unlock(smp);
642	return (NULL);
643}
644
645/*
646 * We need to test to see if the vtype changed on the node. We currently only support
647 * three types of vnodes (VDIR, VLNK, and VREG). If the network transacition came
648 * from Unix extensions, Darwin or a create then we can just test to make sure the vtype
649 * is the same. Otherwise we cannot tell the difference between a symbolic link and
650 * a regular file at this point. So we just make sure it didn't change from a file
651 * to a directory or vise versa. Also make sure it didn't change from a reparse point
652 * to a non reparse point or vise versa.
653 */
654static int
655node_vtype_changed(vnode_t vp, enum vtype node_vtype, struct smbfattr *fap)
656{
657	int rt_value = FALSE;	/* Always default to it no changing */
658
659	/* Root node can never change, bad things will happen */
660	if (vnode_isvroot(vp))
661		return FALSE;
662
663	if (vnode_isnamedstream(vp))	/* Streams have no type so ignore them */
664		return FALSE;
665
666
667	/*
668	 * The vtype is valid, use it to make the decision, Unix extensions, Darwin
669	 * or a create.
670	 */
671	if (fap->fa_valid_mask & FA_VTYPE_VALID) {
672		if ((VTOSMB(vp)->n_flag & NWINDOWSYMLNK) && (fap->fa_vtype == VREG)) {
673			/*
674			 * This is a windows fake symlink, so the node type will come is as
675			 * a regular file. Never let it change unless the node type comes
676			 * in as something other than a regular file.
677			 */
678			rt_value = FALSE;
679		} else {
680			rt_value = (fap->fa_vtype != node_vtype);
681		}
682		goto done;
683	}
684
685	/* Once a directory, always a directory*/
686	if (((node_vtype == VDIR) && !(VTOSMB(vp)->n_dosattr & SMB_EFA_DIRECTORY)) ||
687		((node_vtype != VDIR) && (VTOSMB(vp)->n_dosattr & SMB_EFA_DIRECTORY))) {
688		rt_value = TRUE;
689		goto done;
690	}
691
692	/* Once a reparse point, always a reprase point */
693	if ((VTOSMB(vp)->n_dosattr &  SMB_EFA_REPARSE_POINT) != (fap->fa_attr & SMB_EFA_REPARSE_POINT)) {
694		rt_value = TRUE;
695		goto done;
696	}
697done:
698	if (rt_value) {
699		SMBWARNING_LOCK(VTOSMB(vp), "%s had node type and attr of %d 0x%x now its %d 0x%x\n",
700                        VTOSMB(vp)->n_name, node_vtype, VTOSMB(vp)->n_dosattr,
701                        fap->fa_vtype, fap->fa_attr);
702	}
703	return rt_value;
704}
705
706/*
707 * smbfs_nget
708 *
709 * When calling this routine remember if you get a vpp back and no error then
710 * the smbnode is locked and you will need to unlock it.
711 *
712 * The calling routine must hold a reference on the share
713 *
714 */
715int
716smbfs_nget(struct smb_share *share, struct mount *mp,
717           vnode_t dvp, const char *name, size_t nmlen,
718           struct smbfattr *fap, vnode_t *vpp,
719           uint32_t cnflags, uint32_t flags,
720           vfs_context_t context)
721{
722	struct smbmount *smp = VFSTOSMBFS(mp);
723	struct smbnode *np, *dnp;
724	vnode_t vp;
725	int error = 0;
726	uint64_t hashval;
727	struct vnode_fsparam vfsp;
728	int locked = 0;
729	struct componentname cnp;
730	trigger_info_t *ti;
731
732    /*
733     * Be careful as
734     * (1) dvp can be NULL
735     * (2) name can be NULL
736     * (3) fap can be NULL
737     */
738
739	*vpp = NULL;
740
741	if (vfs_isforce(mp)) {
742		return ENXIO;
743    }
744
745	if (!(flags & SMBFS_NGET_LOOKUP_ONLY)) {
746        /* dvp is only required if we are possibly creating the vnode */
747        if (smp->sm_rvp != NULL && dvp == NULL) {
748            return EINVAL;
749        }
750    }
751
752	if (nmlen == 2 && bcmp(name, "..", 2) == 0) {
753		SMBDEBUG("do not call me with dotdot!\n");
754		return EINVAL;
755	} else if (nmlen == 1 && name[0] == '.') {
756		SMBDEBUG("do not call me with dot!\n");
757		return (EINVAL);
758	}
759
760	dnp = dvp ? VTOSMB(dvp) : NULL;
761	if (dnp == NULL && dvp != NULL) {
762		SMBDEBUG("dead parent vnode\n");
763		return (EINVAL);
764	}
765
766	/*
767	 * If we are going to add it to the name cache, then make sure its the name
768	 * on the server that gets used
769	 */
770	bzero(&cnp, sizeof(cnp));
771	cnp.cn_nameptr = (char *)name;
772	cnp.cn_namelen = (int)nmlen;
773	cnp.cn_flags = cnflags;
774
775	SMB_MALLOC(np, struct smbnode *, sizeof *np, M_SMBNODE, M_WAITOK | M_ZERO);
776
777    hashval = smbfs_hash(share, (fap ? fap->fa_ino: 0), name, nmlen);
778
779	if ((*vpp = smb_hashget(smp, dnp, hashval, name, nmlen,
780							share->ss_maxfilenamelen, 0, NULL)) != NULL) {
781        /* Found a pre existing vnode */
782		DBG_ASSERT(!vnode_isnamedstream(*vpp));
783
784        /*
785         * Must be v_get and we have a blank fap except for the fa_ino so dont
786         * try to update the meta data cache for this vnode
787         */
788        if (flags & SMBFS_NGET_NO_CACHE_UPDATE) {
789            /* not going to create a vnode so dont need np */
790			SMB_FREE(np, M_SMBNODE);
791			return (0);
792        }
793
794		if (fap && node_vtype_changed(*vpp, vnode_vtype(*vpp), fap)) {
795			/*
796			 * The node we found has the wrong vtype. We need to remove this one
797			 * and create the new entry. Purge the old node from the name cache,
798			 * remove it from our hash table, and clear its cache timer.
799			 */
800			cache_purge(*vpp);
801			smb_vhashrem(VTOSMB(*vpp));
802			VTOSMB(*vpp)->attribute_cache_timer = 0;
803			VTOSMB(*vpp)->n_symlink_cache_timer = 0;
804			smbnode_unlock(VTOSMB(*vpp));	/* Release the smbnode lock */
805			vnode_put(*vpp);
806			/* Now fall through and create the node with the correct vtype */
807			*vpp = NULL;
808		}
809        else {
810            /* not going to create a vnode so dont need np */
811			SMB_FREE(np, M_SMBNODE);
812
813			/* update the attr_cache info, this is never a stream node */
814			if (fap) {
815				smbfs_attr_cacheenter(share, *vpp, fap, FALSE, context);
816            }
817
818			if (dvp && (cnp.cn_flags & MAKEENTRY)) {
819				cache_enter(dvp, *vpp, &cnp);
820            }
821
822			return (0);
823		}
824	}
825
826	/*
827	 * If SMBFS_NGET_LOOKUP_ONLY set, then it is an explicit lookup
828	 * for an existing vnode. Return if the vnode does not already exist.
829	 */
830	if (flags & SMBFS_NGET_LOOKUP_ONLY) {
831		SMB_FREE(np, M_SMBNODE);
832		return (ENOENT);
833	}
834
835    if (fap == NULL) {
836        /* This should never happen */
837        SMBERROR("fap is NULL! \n");
838		SMB_FREE(np, M_SMBNODE);
839		return (ENOENT);
840    }
841
842    lck_rw_init(&np->n_rwlock, smbfs_rwlock_group, smbfs_lock_attr);
843	lck_rw_init(&np->n_name_rwlock, smbfs_rwlock_group, smbfs_lock_attr);
844	lck_rw_init(&np->n_parent_rwlock, smbfs_rwlock_group, smbfs_lock_attr);
845	(void) smbnode_lock(np, SMBFS_EXCLUSIVE_LOCK);
846	/* if we error out, don't forget to unlock this */
847	locked = 1;
848	np->n_lastvop = smbfs_nget;
849
850	/*
851	 * The node_vtype_changed routine looks at the attributes field to
852	 * detemine if the node has changed from being a reparse point. So before
853	 * entering the smbfs_attr_cacheenter we need to make sure that the attributes
854	 * field has been set when the node is created.
855	 *
856	 * We only set the ReparseTag here, once a tag is set its always set. We
857	 * use node_vtype_changed to test if a reparse point has been removed.
858	 */
859	np->n_reparse_tag = fap->fa_reparse_tag;
860	np->n_dosattr = fap->fa_attr;
861	np->n_vnode = NULL;	/* redundant, but emphatic! */
862	np->n_mount = smp;
863	np->n_size = fap->fa_size;
864	np->n_data_alloc = fap->fa_data_alloc;
865	np->n_ino = fap->fa_ino;
866
867    lck_rw_lock_exclusive(&np->n_name_rwlock);
868	np->n_name = smb_strndup(name, nmlen);
869    lck_rw_unlock_exclusive(&np->n_name_rwlock);
870
871	np->n_nmlen = nmlen;
872	/* Default to what we can do and Windows support */
873	np->n_flags_mask = EXT_REQUIRED_BY_MAC;
874
875    /*
876     * n_uid and n_gid are set to KAUTH_UID_NONE/KAUTH_GID_NONE as the
877     * default.
878     *
879     * If ACLs are retrieved for this node, then we will replace n_uid/n_gid
880     * with a uid/gid that was mapped from the SID.
881     *
882     * When asked for the uid/gid, if they are default values, we return
883     * uid/gid of the mounting user. If they are not set to default values,
884     * then ACLs must have been retrieved and the uid/gid set, so we return
885     * what ever value is set in n_uid/n_gid.
886     */
887	np->n_uid = KAUTH_UID_NONE;
888	np->n_gid = KAUTH_GID_NONE;
889
890    /*
891     * n_nfs_uid/n_nfs_gid are the uid/gid from ACLs and from the NFS ACE.
892     * We dont really do much with it because OS X <-> Windows, we cant really
893     * trust its value. OS X <-> OS X we could trust its value.
894     */
895	np->n_nfs_uid = KAUTH_UID_NONE;
896	np->n_nfs_gid = KAUTH_GID_NONE;
897	SET(np->n_flag, NALLOC);
898	smb_vhashadd(np, hashval);
899	if (dvp) {
900        lck_rw_lock_exclusive(&np->n_parent_rwlock);
901		np->n_parent = dnp;
902        lck_rw_unlock_exclusive(&np->n_parent_rwlock);
903
904		if (!vnode_isvroot(dvp)) {
905			/* Make sure we can get the vnode, we could have an unmount about to happen */
906			if (vnode_get(dvp) == 0) {
907				if (vnode_ref(dvp) == 0) {
908                    /* If we can get a refcnt then mark the child */
909					np->n_flag |= NREFPARENT;
910                    vnode_put(dvp);
911
912                    /* Increment parent node's child refcnt */
913                    OSIncrementAtomic(&dnp->n_child_refcnt);
914                } else {
915                    vnode_put(dvp);
916                    error = EINVAL;
917                    goto errout;
918                }
919			} else {
920                error = EINVAL;
921                goto errout;
922            }
923		}
924	}
925
926	vfsp.vnfs_mp = mp;
927	vfsp.vnfs_vtype = fap->fa_vtype;
928	vfsp.vnfs_str = "smbfs";
929	vfsp.vnfs_dvp = dvp;
930	vfsp.vnfs_fsnode = np;
931	/* This will make sure we always have  a vp->v_name */
932	vfsp.vnfs_cnp = &cnp;
933	vfsp.vnfs_vops = smbfs_vnodeop_p;
934	vfsp.vnfs_rdev = 0;	/* no VBLK or VCHR support */
935	vfsp.vnfs_flags = (dvp && (cnp.cn_flags & MAKEENTRY)) ? 0 : VNFS_NOCACHE;
936	vfsp.vnfs_markroot = (np->n_ino == smp->sm_root_ino);
937	vfsp.vnfs_marksystem = 0;
938
939	/*
940	 * We are now safe to do lookups with the node. We need to be careful with
941	 * the n_vnode field and we should always check to make sure its not null
942	 * before access that field. The current code always makes that check.
943	 *
944	 * So if this is the root vnode then we need to make sure we can access it
945	 * across network without any errors. We keep a reference on the root vnode
946	 * so this only happens once at mount time.
947	 *
948	 * If this is a regular file then we need to see if its one of our special
949	 * Windows symlink files.
950	 */
951	if ((vfsp.vnfs_vtype == VDIR) && (dvp == NULL) && (smp->sm_rvp == NULL) &&
952		(np->n_ino == smp->sm_root_ino)) {
953        /* Lookup the root vnode */
954		error = smbfs_lookup(share, np, NULL, NULL, fap, context);
955		if (error) {
956			goto errout;
957        }
958
959        /* Update the root vnode hash value */
960        smb_vhashrem(np);
961
962        if (!(SSTOVC(share)->vc_misc_flags & SMBV_HAS_FILEIDS)) {
963            /*
964             * Server does not support File IDs, so set root vnode File ID to
965             * be SMBFS_ROOT_INO */
966            fap->fa_ino = SMBFS_ROOT_INO;
967        }
968
969        hashval = smbfs_hash(share, fap->fa_ino, name, nmlen);
970
971        /* Update the root vnode File ID */
972        smp->sm_root_ino = np->n_ino = fap->fa_ino;
973
974        smb_vhashadd(np, hashval);
975	} else if ((vfsp.vnfs_vtype == VREG) && (np->n_size == SMB_SYMLEN)) {
976		int symlen = 0;
977		DBG_ASSERT(dvp);
978		if (smb_check_for_windows_symlink(share, np, &symlen, context) == 0) {
979			vfsp.vnfs_vtype = VLNK;
980			fap->fa_valid_mask |= FA_VTYPE_VALID;
981			fap->fa_vtype = VLNK;
982			np->n_size = symlen;
983			np->n_flag |= NWINDOWSYMLNK;
984		}
985	}
986	vfsp.vnfs_filesize = np->n_size;
987
988	if ((np->n_dosattr & SMB_EFA_REPARSE_POINT) &&
989		(np->n_reparse_tag != IO_REPARSE_TAG_DFS) &&
990		(np->n_reparse_tag != IO_REPARSE_TAG_SYMLINK))  {
991        SMBWARNING_LOCK(np, "%s - unknown reparse point tag 0x%x\n", np->n_name, np->n_reparse_tag);
992	}
993
994	if ((np->n_dosattr & SMB_EFA_REPARSE_POINT) &&
995		(np->n_reparse_tag == IO_REPARSE_TAG_DFS)) {
996		struct vnode_trigger_param vtp;
997
998		bcopy(&vfsp, &vtp.vnt_params, sizeof(vfsp));
999		ti = trigger_new(&vtp, smbfs_trigger_get_mount_args, smbfs_trigger_rel_mount_args);
1000		error = vnode_create(VNCREATE_TRIGGER, (uint32_t)VNCREATE_TRIGGER_SIZE, &vtp, &vp);
1001		if (error)
1002			trigger_free(ti);
1003	} else {
1004		error = vnode_create(VNCREATE_FLAVOR, (uint32_t)VCREATESIZE, &vfsp, &vp);
1005	}
1006
1007	if (error)
1008		goto errout;
1009	vnode_settag(vp, VT_CIFS);
1010	np->n_vnode = vp;
1011	/*
1012	 * We now know what type of node we have so set the mode bit here. We never
1013	 * want to change this for the life of this node. If the type changes on
1014	 * the server then we will blow away this node and create a new one.
1015	 */
1016	switch (vnode_vtype(vp)) {
1017	    case VREG:
1018			np->n_mode |= S_IFREG;
1019			break;
1020	    case VLNK:
1021			np->n_mode |= S_IFLNK;
1022			break;
1023	    case VDIR:
1024			np->n_mode |= S_IFDIR;
1025			break;
1026	    default:
1027			SMBERROR("vnode_vtype %d\n", vnode_vtype(vp));
1028			np->n_mode |= S_IFREG;	/* Can't happen, but just to be safe */
1029	}
1030
1031	/* Initialize the lock used for the open state, open deny list and resource size/timer */
1032	if (!vnode_isdir(vp)) {
1033		lck_mtx_init(&np->f_openStateLock, smbfs_mutex_group, smbfs_lock_attr);
1034		lck_mtx_init(&np->f_clusterWriteLock, smbfs_mutex_group, smbfs_lock_attr);
1035		lck_mtx_init(&np->rfrkMetaLock, smbfs_mutex_group, smbfs_lock_attr);
1036		lck_mtx_init(&np->f_openDenyListLock, smbfs_mutex_group, smbfs_lock_attr);
1037	}
1038
1039	lck_mtx_init(&np->f_ACLCacheLock, smbfs_mutex_group, smbfs_lock_attr);
1040	/* update the attr_cache info, this is never a stream node */
1041	smbfs_attr_cacheenter(share, vp, fap, FALSE, context);
1042
1043	*vpp = vp;
1044	CLR(np->n_flag, NALLOC);
1045        if (ISSET(np->n_flag, NWALLOC))
1046                wakeup(np);
1047	return 0;
1048
1049errout:
1050	if (np->n_flag & NREFPARENT) {
1051		if (vnode_get(dvp) == 0) {
1052			vnode_rele(dvp);
1053			vnode_put(dvp);
1054		}
1055		np->n_flag &= ~NREFPARENT;
1056
1057        /* Remove the child refcnt from the parent we just added above */
1058        OSDecrementAtomic(&dnp->n_child_refcnt);
1059	}
1060
1061	smb_vhashrem(np);
1062
1063	if (locked == 1)
1064		smbnode_unlock(np);	/* Release the smbnode lock */
1065
1066	if (ISSET(np->n_flag, NWALLOC))
1067		wakeup(np);
1068
1069    lck_rw_lock_exclusive(&np->n_name_rwlock);
1070    if (np->n_name != NULL) {
1071        SMB_FREE(np->n_name, M_SMBNODENAME);
1072        np->n_name = NULL; /* Catch anyone still refering to np->n_name */
1073    }
1074    lck_rw_unlock_exclusive(&np->n_name_rwlock);
1075
1076	lck_rw_destroy(&np->n_rwlock, smbfs_rwlock_group);
1077	lck_rw_destroy(&np->n_name_rwlock, smbfs_rwlock_group);
1078	lck_rw_destroy(&np->n_parent_rwlock, smbfs_rwlock_group);
1079
1080	SMB_FREE(np, M_SMBNODE);
1081
1082	return error;
1083}
1084
1085/*
1086* smbfs_find_vgetstrm
1087 *
1088 * When calling this routine remember if you get a vpp back and no error then
1089 * the smbnode is locked and you will need to unlock it.
1090 */
1091vnode_t
1092smbfs_find_vgetstrm(struct smbmount *smp, struct smbnode *np, const char *sname,
1093					size_t maxfilenamelen)
1094{
1095	uint64_t hashval;
1096    vnode_t ret_vnode = NULL;
1097
1098    lck_rw_lock_shared(&np->n_name_rwlock);
1099
1100    hashval = smbfs_hash(smp->sm_share, np->n_ino, np->n_name, np->n_nmlen);
1101	ret_vnode = smb_hashget(smp, np, hashval, np->n_name, np->n_nmlen, maxfilenamelen,
1102					   N_ISSTREAM, sname);
1103
1104    lck_rw_unlock_shared(&np->n_name_rwlock);
1105
1106	return(ret_vnode);
1107}
1108
1109/*
1110* smbfs_vgetstrm
1111 *
1112 * When calling this routine remember if you get a vpp back and no error then
1113 * the smbnode is locked and you will need to unlock it.
1114 *
1115 * The calling routine must hold a reference on the share
1116 *
1117 */
1118int
1119smbfs_vgetstrm(struct smb_share *share, struct smbmount *smp, vnode_t vp,
1120			   vnode_t *svpp, struct smbfattr *fap, const char *sname)
1121{
1122	struct smbnode *np, *snp;
1123	int error = 0;
1124	uint64_t hashval;
1125	struct vnode_fsparam vfsp;
1126	int locked = 0;
1127	struct componentname cnp;
1128	size_t maxfilenamelen = share->ss_maxfilenamelen;
1129    char *tmp_namep = NULL;
1130
1131	/* Better have a root vnode at this point */
1132	DBG_ASSERT(smp->sm_rvp);
1133	/* Better have a parent vnode at this point */
1134	DBG_ASSERT(vp);
1135	/* Parent vnode better not be a directory */
1136	DBG_ASSERT((!vnode_isdir(vp)));
1137	/* Parent vnode better not be a stream */
1138	DBG_ASSERT(!vnode_isnamedstream(vp));
1139	np = VTOSMB(vp);
1140	*svpp = NULL;
1141
1142	if (vfs_isforce(smp->sm_mp))
1143		return ENXIO;
1144	/* Make sure the we have the correct name, always return the xattr name */
1145	bzero(&cnp, sizeof(cnp));
1146	cnp.cn_nameiop = LOOKUP;
1147	cnp.cn_flags = ISLASTCN;
1148	cnp.cn_pnlen = MAXPATHLEN;
1149	SMB_MALLOC (cnp.cn_pnbuf, caddr_t, MAXPATHLEN, M_TEMP, M_WAITOK);
1150	if (bcmp(sname, SFM_RESOURCEFORK_NAME, sizeof(SFM_RESOURCEFORK_NAME)) == 0) {
1151		cnp.cn_nameptr = cnp.cn_pnbuf;
1152        lck_rw_lock_shared(&np->n_name_rwlock);
1153		cnp.cn_namelen = snprintf(cnp.cn_nameptr, MAXPATHLEN, "%s%s", np->n_name,
1154								  _PATH_RSRCFORKSPEC);
1155        lck_rw_unlock_shared(&np->n_name_rwlock);
1156	}
1157    else {
1158		cnp.cn_nameptr = cnp.cn_pnbuf;
1159        lck_rw_lock_shared(&np->n_name_rwlock);
1160		cnp.cn_namelen = snprintf(cnp.cn_nameptr, MAXPATHLEN, "%s%s%s", np->n_name,
1161								  _PATH_FORKSPECIFIER, sname);
1162        lck_rw_unlock_shared(&np->n_name_rwlock);
1163		SMBWARNING("Creating non resource fork named stream: %s\n", cnp.cn_nameptr);
1164	}
1165
1166	SMB_MALLOC(snp, struct smbnode *, sizeof *snp, M_SMBNODE, M_WAITOK);
1167
1168    lck_rw_lock_shared(&np->n_name_rwlock);
1169    hashval = smbfs_hash(share, fap->fa_ino, np->n_name, np->n_nmlen);
1170	if ((*svpp = smb_hashget(smp, np, hashval, np->n_name, np->n_nmlen,
1171							 maxfilenamelen, N_ISSTREAM, sname)) != NULL) {
1172        lck_rw_unlock_shared(&np->n_name_rwlock);
1173		SMB_FREE(snp, M_SMBNODE);
1174		/*
1175		 * If this is the resource stream then the parents resource fork size
1176		 * has already been update. The calling routine aleady updated it.
1177		 * Remember that the parent is currently locked. smbfs_attr_cacheenter
1178		 * can lock the parent if we tell it to update the parent, so never tell
1179		 * it to update the parent in this routine.
1180		 */
1181		smbfs_attr_cacheenter(share, *svpp, fap, FALSE, NULL);
1182		goto done;
1183	}
1184    lck_rw_unlock_shared(&np->n_name_rwlock);
1185
1186	bzero(snp, sizeof(*snp));
1187	lck_rw_init(&snp->n_rwlock, smbfs_rwlock_group, smbfs_lock_attr);
1188	lck_rw_init(&snp->n_name_rwlock, smbfs_rwlock_group, smbfs_lock_attr);
1189	lck_rw_init(&snp->n_parent_rwlock, smbfs_rwlock_group, smbfs_lock_attr);
1190	(void) smbnode_lock(snp, SMBFS_EXCLUSIVE_LOCK);
1191	locked = 1;
1192	snp->n_lastvop = smbfs_vgetstrm;
1193
1194	snp->n_mount = smp;
1195	snp->n_size =  fap->fa_size;
1196	snp->n_data_alloc = fap->fa_data_alloc;
1197	snp->n_ino = np->n_ino;
1198
1199    lck_rw_lock_shared(&np->n_name_rwlock);
1200    tmp_namep = smb_strndup(np->n_name, np->n_nmlen);
1201    lck_rw_unlock_shared(&np->n_name_rwlock);
1202
1203    lck_rw_lock_exclusive(&snp->n_name_rwlock);
1204	snp->n_name = tmp_namep;
1205    lck_rw_unlock_exclusive(&snp->n_name_rwlock);
1206
1207	snp->n_nmlen = np->n_nmlen;
1208	snp->n_flags_mask = np->n_flags_mask;
1209	snp->n_uid = np->n_uid;
1210	snp->n_gid = np->n_gid;
1211	snp->n_nfs_uid = np->n_nfs_uid;
1212	snp->n_nfs_gid = np->n_nfs_uid;
1213
1214    lck_rw_lock_exclusive(&snp->n_parent_rwlock);
1215	snp->n_parent = np;
1216    lck_rw_unlock_exclusive(&snp->n_parent_rwlock);
1217
1218	/* Only a stream node can have a stream name */
1219	snp->n_snmlen = strnlen(sname, maxfilenamelen+1);
1220    lck_rw_lock_exclusive(&snp->n_name_rwlock);
1221	snp->n_sname = smb_strndup(sname, snp->n_snmlen);
1222    lck_rw_unlock_exclusive(&snp->n_name_rwlock);
1223
1224	SET(snp->n_flag, N_ISSTREAM);
1225	/* Special case that I would like to remove some day */
1226	if (bcmp(sname, SFM_RESOURCEFORK_NAME, sizeof(SFM_RESOURCEFORK_NAME)) == 0)
1227		SET(snp->n_flag, N_ISRSRCFRK);
1228	SET(snp->n_flag, NALLOC);
1229	smb_vhashadd(snp, hashval);
1230
1231#ifdef _NOT_YET_
1232    /* Note: Temporarily commenting this out, see <rdar://problem/10695860> */
1233
1234    /* Make sure we can get the parent vnode, we could have an unmount about to happen */
1235    if (!vnode_isvroot(vp)) {
1236        if (vnode_get(vp) == 0) {
1237            if (vnode_ref(vp) == 0) {
1238                /* If we can get a refcnt then mark the child */
1239                snp->n_flag |= NREFPARENT;
1240                vnode_put(vp);
1241
1242                /* Increment parent node's child refcnt */
1243                OSIncrementAtomic(&np->n_child_refcnt);
1244            } else {
1245                vnode_put(vp);
1246                error = EINVAL;
1247                goto errout;
1248            }
1249        } else {
1250            error = EINVAL;
1251            goto errout;
1252        }
1253    }
1254#endif
1255	vfsp.vnfs_mp = smp->sm_mp;
1256	vfsp.vnfs_vtype = VREG;
1257	vfsp.vnfs_str = "smbfs";
1258	vfsp.vnfs_dvp = NULL;
1259	vfsp.vnfs_fsnode = snp;
1260	/* This will make sure we always have  a vp->v_name */
1261	vfsp.vnfs_cnp = &cnp;
1262	vfsp.vnfs_vops = smbfs_vnodeop_p;
1263	vfsp.vnfs_rdev = 0;	/* no VBLK or VCHR support */
1264	vfsp.vnfs_flags = VNFS_NOCACHE;
1265	vfsp.vnfs_markroot = 0;
1266	vfsp.vnfs_marksystem = 0;
1267	vfsp.vnfs_filesize = fap->fa_size;
1268
1269	error = vnode_create(VNCREATE_FLAVOR, (uint32_t)VCREATESIZE, &vfsp, svpp);
1270	if (error)
1271		goto errout;
1272	vnode_settag(*svpp, VT_CIFS);
1273	snp->n_vnode = *svpp;
1274	/*
1275	 * We now know what type of node we have so set the mode bit here. We never
1276	 * what to change this for the life of this node. If the type changes on
1277	 * the server then we will blow away this node and create a new one.
1278	 *
1279	 * Streams are aways regular files and have the parent node's access.
1280	 *
1281	 */
1282	snp->n_mode = S_IFREG | (np->n_mode & ACCESSPERMS);
1283
1284	lck_mtx_init(&snp->f_openStateLock, smbfs_mutex_group, smbfs_lock_attr);
1285	lck_mtx_init(&snp->f_clusterWriteLock, smbfs_mutex_group, smbfs_lock_attr);
1286	lck_mtx_init(&snp->f_openDenyListLock, smbfs_mutex_group, smbfs_lock_attr);
1287	/*
1288	 * If this is the resource stream then the parents resource fork size has
1289	 * already been update. The calling routine aleady updated it. Remember that
1290	 * the parent is currently locked. smbfs_attr_cacheenter can lock the parent
1291	 * if we tell it to update the parent, so never tell it to update the parent
1292	 * in this routine.
1293	 */
1294	smbfs_attr_cacheenter(share, *svpp, fap, FALSE, NULL);
1295
1296	CLR(snp->n_flag, NALLOC);
1297	if (ISSET(snp->n_flag, NWALLOC))
1298		wakeup(snp);
1299	goto done;
1300
1301errout:
1302#ifdef _NOT_YET_
1303    /* Note: Temporarily commenting this out, see <rdar://problem/10695860> */
1304	if (snp->n_flag & NREFPARENT) {
1305		if (vnode_get(vp) == 0) {
1306			vnode_rele(vp);
1307			vnode_put(vp);
1308		}
1309		snp->n_flag &= ~NREFPARENT;
1310
1311        /* Remove the child refcnt from the parent we just added above */
1312        OSDecrementAtomic(&np->n_child_refcnt);
1313	}
1314#endif
1315
1316	smb_vhashrem(snp);
1317
1318	if (locked == 1)
1319		smbnode_unlock(snp);	/* Release the smbnode lock */
1320
1321	if (ISSET(snp->n_flag, NWALLOC))
1322		wakeup(snp);
1323
1324    lck_rw_lock_exclusive(&snp->n_name_rwlock);
1325    if (snp->n_name != NULL) {
1326        SMB_FREE(snp->n_name, M_SMBNODENAME);
1327        snp->n_name = NULL; /* Catch anyone still refering to np->n_name */
1328    }
1329
1330    if (snp->n_sname != NULL) {
1331        SMB_FREE(snp->n_sname, M_SMBNODENAME);
1332        snp->n_sname = NULL; /* Catch anyone still refering to np->n_sname */
1333    }
1334    lck_rw_unlock_exclusive(&snp->n_name_rwlock);
1335
1336    lck_rw_destroy(&snp->n_rwlock, smbfs_rwlock_group);
1337	lck_rw_destroy(&snp->n_name_rwlock, smbfs_rwlock_group);
1338	lck_rw_destroy(&snp->n_parent_rwlock, smbfs_rwlock_group);
1339
1340	SMB_FREE(snp, M_SMBNODE);
1341
1342done:
1343	SMB_FREE(cnp.cn_pnbuf, M_TEMP);
1344	return error;
1345}
1346
1347/*
1348 * Update the nodes resource fork size if needed.
1349 * NOTE: Remember the parent can lock the child while hold its lock, but the
1350 * child cannot lock the parent unless the child is not holding its lock. So
1351 * this routine is safe, because the parent is locking the child.
1352 *
1353 * The calling routine must hold a reference on the share
1354 *
1355 */
1356int
1357smb_get_rsrcfrk_size(struct smb_share *share, vnode_t vp, vfs_context_t context)
1358{
1359	struct smbnode *np = VTOSMB(vp);
1360	uint64_t strmsize = 0;
1361	uint64_t strmsize_alloc = 0;
1362	time_t attrtimeo;
1363	struct timespec ts;
1364	int error = 0;
1365	time_t rfrk_cache_timer;
1366	struct timespec reqtime;
1367    uint32_t stream_flags = 0;
1368	int use_cached_data = 0;
1369
1370    /* If we are in reconnect, use cached data if we have it */
1371    if (np->rfrk_cache_timer != 0) {
1372        use_cached_data = (share->ss_flags & SMBS_RECONNECTING);
1373    }
1374
1375	nanouptime(&reqtime);
1376
1377    /* Check to see if the cache has timed out */
1378    SMB_CACHE_TIME(ts, np, attrtimeo);
1379
1380	lck_mtx_lock(&np->rfrkMetaLock);
1381	rfrk_cache_timer = ts.tv_sec - np->rfrk_cache_timer;
1382	lck_mtx_unlock(&np->rfrkMetaLock);
1383
1384	if ((rfrk_cache_timer > attrtimeo) && !use_cached_data) {
1385        /* Cache has expired go get the resource fork size. */
1386		error = smbfs_smb_qstreaminfo(share, np, VREG,
1387                                      NULL, 0,
1388                                      SFM_RESOURCEFORK_NAME,
1389                                      NULL, NULL,
1390                                      &strmsize, &strmsize_alloc,
1391                                      &stream_flags, NULL,
1392                                      context);
1393
1394        if ((error == ETIMEDOUT) && (np->rfrk_cache_timer != 0)) {
1395            /* Just return the cached data */
1396            error = 0;
1397            goto done;
1398        }
1399
1400		/*
1401		 * We got the resource stream size from the server, now update the resource
1402		 * stream if we have one. Search our hash table and see if we have a stream,
1403		 * if we find one then smbfs_find_vgetstrm will return it with a vnode_get
1404		 * and a smb node lock on it.
1405		 */
1406		if (error == 0) {
1407			struct smbmount *smp = VTOSMBFS(vp);
1408			vnode_t svpp = smbfs_find_vgetstrm(smp, np, SFM_RESOURCEFORK_NAME,
1409											   share->ss_maxfilenamelen);
1410
1411			if (svpp) {
1412				if (smbfs_update_size(VTOSMB(svpp), &reqtime, strmsize) == TRUE) {
1413					/* Remember the only attribute for a stream is its size */
1414					nanouptime(&ts);
1415					VTOSMB(svpp)->attribute_cache_timer = ts.tv_sec;
1416				}
1417				smbnode_unlock(VTOSMB(svpp));
1418				vnode_put(svpp);
1419			}
1420		}
1421        else {
1422			/*
1423			 * Remember that smbfs_smb_qstreaminfo will update the resource forks
1424			 * cache and size if it finds  the resource fork. We are handling the
1425			 * negative cache timer here. If we get an error then there is no
1426			 * resource fork so update the cache.
1427			 */
1428			lck_mtx_lock(&np->rfrkMetaLock);
1429			np->rfrk_size = 0;
1430			np->rfrk_alloc_size = 0;
1431			nanouptime(&ts);
1432			np->rfrk_cache_timer = ts.tv_sec;
1433			lck_mtx_unlock(&np->rfrkMetaLock);
1434		}
1435	}
1436
1437done:
1438	return(error);
1439}
1440
1441/*
1442 * Anytime the stream is updated we need to update the parent's meta data. In
1443 * the resource fork case this means updating the resource size and the resource
1444 * size cache timer. For other streams it just means clearing the meta data cache
1445 * timer. We can update the parent's resource stream size and resource cache timer
1446 * here because we don't need the parent locked in this case. We use a different
1447 * lock when updating the parents resource size and resource cache timer. Since we
1448 * cannot lock the parent node here just return the parent vnode so the calling
1449 * process can handle clearing the meta data cache timer.
1450 *
1451 * NOTE:	smbfs_vnop_pageout calls this routine wihout the node locked. It is
1452 *			not setting the size so this should be safe. If anyone edits this
1453 *			routine they need to keep in mind that it can be entered without a lock.
1454 */
1455vnode_t
1456smb_update_rsrc_and_getparent(vnode_t vp, int setsize)
1457{
1458	struct smbnode *np = VTOSMB(vp);
1459	vnode_t parent_vp = vnode_getparent(vp);
1460	struct timespec ts;
1461
1462	/* If this is a resource then update the parents resource size and cache timer */
1463	if ((parent_vp) && (np->n_flag & N_ISRSRCFRK)) {
1464		lck_mtx_lock(&VTOSMB(parent_vp)->rfrkMetaLock);
1465
1466		/* They want us to update the size */
1467		if (setsize) {
1468			VTOSMB(parent_vp)->rfrk_size = np->n_size;
1469            /* assume alloc size same as new size */
1470			VTOSMB(parent_vp)->rfrk_alloc_size = np->n_size;
1471			nanouptime(&ts);
1472			VTOSMB(parent_vp)->rfrk_cache_timer = ts.tv_sec;
1473		} else if (VTOSMB(parent_vp)->rfrk_size != np->n_size) {
1474			/* Something changed just reset the cache timer */
1475			VTOSMB(parent_vp)->rfrk_cache_timer = 0;
1476		}
1477		lck_mtx_unlock(&VTOSMB(parent_vp)->rfrkMetaLock);
1478	}
1479	return(parent_vp);
1480}
1481
1482static int
1483smb_gid_match(struct smbmount *smp, u_int64_t node_gid)
1484{
1485    u_int32_t ii;
1486
1487	if (node_gid == smp->ntwrk_gid)
1488		return TRUE;
1489
1490	for (ii=0; ii < smp->ntwrk_cnt_gid; ii++)
1491		if (node_gid == smp->ntwrk_gids[ii])
1492			return TRUE;
1493	return FALSE;
1494}
1495
1496/*
1497 * Check to see if the user has the request access privileges on the node.
1498 * Someday we may have a call to check the access across the network, but for
1499 * now all we can do is check the posix mode bits.
1500 *
1501 * NOTE: rq_mode should be one of the S_IRWXO modes.
1502 */
1503int
1504smb_check_posix_access(vfs_context_t context, struct smbnode * np,
1505					   mode_t rq_mode)
1506{
1507	kauth_cred_t cred = vfs_context_ucred(context);
1508	uid_t	user = kauth_cred_getuid (cred);
1509	int		inGroup = 0;
1510
1511	kauth_cred_ismember_gid(cred, np->n_gid, &inGroup);
1512	if (user == np->n_uid) {
1513		if (np->n_mode & (rq_mode << 6))
1514			return TRUE;
1515	} else if (inGroup) {
1516		if (np->n_mode & (rq_mode << 3))
1517			return TRUE;
1518	} else {
1519		if (np->n_mode & rq_mode)
1520			return TRUE;
1521	}
1522	return FALSE;
1523}
1524
1525void smb_get_uid_gid_mode(struct smb_share *share, struct smbmount *smp,
1526                          struct smbfattr *fap, uint32_t flags,
1527                          uid_t *uid, gid_t *gid, mode_t *mode)
1528{
1529    uint16_t temp_mode = 0;
1530
1531	if (fap->fa_unix) {
1532        /* Only SMB 1 supports Unix Extensions */
1533		if ((fap->fa_valid_mask & FA_UNIX_MODES_VALID) != FA_UNIX_MODES_VALID) {
1534			/*
1535			 * The call made to get this information did not contain the uid,
1536			 * gid or posix modes. So just keep using the ones we have, unless
1537			 * we have uninitialize values, then use the default values.
1538			 */
1539			if (*uid == KAUTH_UID_NONE) {
1540				*uid = smp->sm_args.uid;
1541				if (flags & SMBFS_GET_UGM_IS_DIR) {
1542					*mode |= smp->sm_args.dir_mode;
1543				} else {
1544					*mode |= smp->sm_args.file_mode;
1545				}
1546			}
1547
1548			if (*gid == KAUTH_GID_NONE) {
1549				*gid = smp->sm_args.gid;
1550			}
1551		} else if (smp->sm_args.altflags & SMBFS_MNT_TIME_MACHINE) {
1552			/* Remove any existing modes. */
1553			*mode &= ~ACCESSPERMS;
1554
1555			/* Just return what was passed into us */
1556			*uid = smp->sm_args.uid;
1557			*gid = smp->sm_args.gid;
1558			*mode |= (mode_t)(fap->fa_permissions & ACCESSPERMS);
1559		} else if (share->ss_attributes & FILE_PERSISTENT_ACLS) {
1560			/* Remove any existing modes. */
1561			*mode &= ~ACCESSPERMS;
1562
1563			/*
1564			 * The server supports the uid and gid and posix modes, so use the
1565			 * ones returned in the lookup call. If mapping then used the mounted
1566			 * users.
1567			 */
1568			if ((smp->sm_flags & MNT_MAPS_NETWORK_LOCAL_USER) &&
1569				(smp->ntwrk_uid == fap->fa_uid)) {
1570				*uid = smp->sm_args.uid;
1571				*gid = smp->sm_args.gid;
1572			}
1573            else {
1574				*uid = (uid_t)fap->fa_uid;
1575				*gid = (gid_t)fap->fa_gid;
1576			}
1577			*mode |= (mode_t)(fap->fa_permissions & ACCESSPERMS);
1578		} else if ((fap->fa_permissions & ACCESSPERMS) &&
1579				   (smp->sm_args.uid == (uid_t)smp->ntwrk_uid) &&
1580				   (smp->sm_args.gid == (gid_t)smp->ntwrk_gid)) {
1581			/* Remove any existing modes. */
1582			*mode &= ~ACCESSPERMS;
1583
1584			/*
1585			 * The server gave us POSIX modes and the local user matches the network
1586			 * user, so assume they are in the same directory name space.
1587			 */
1588			*uid = (uid_t)fap->fa_uid;
1589			*gid = (gid_t)fap->fa_gid;
1590			*mode |= (mode_t)(fap->fa_permissions & ACCESSPERMS);
1591		}
1592        else {
1593			int uid_match = (fap->fa_uid == smp->ntwrk_uid);
1594			int gid_match = smb_gid_match(smp, fap->fa_gid);
1595
1596			/* Remove any existing modes. */
1597			*mode &= ~ACCESSPERMS;
1598
1599			*uid = smp->sm_args.uid;
1600			*gid = smp->sm_args.gid;
1601
1602			/*
1603			 * We have no idea let the server handle any access issues. This
1604			 * is safe because we only allow root and the user that mount the
1605			 * volume to have access to this mount point
1606			 */
1607			if ((fap->fa_permissions & ACCESSPERMS) == 0)
1608				fap->fa_permissions = ACCESSPERMS;
1609			if (!uid_match && !gid_match) {
1610				/* Use other perms */
1611				*mode |= (mode_t)(fap->fa_permissions & S_IRWXO);
1612
1613				/* use other for group */
1614				*mode |= (mode_t)((fap->fa_permissions & S_IRWXO) << 3);
1615
1616				/* use other for owner */
1617				*mode |= (mode_t)((fap->fa_permissions & S_IRWXO) << 6);
1618			} else if (!uid_match && gid_match) {
1619				/* Use group and other perms  */
1620				*mode |= (mode_t)(fap->fa_permissions & (S_IRWXG | S_IRWXO));
1621
1622				/* use group for owner */
1623				*mode |= (mode_t)((fap->fa_permissions & S_IRWXG) <<  3);
1624			} else if (uid_match && !gid_match) {
1625				/* Use owner and other perms */
1626				*mode |= (mode_t)(fap->fa_permissions & (S_IRWXU | S_IRWXO));
1627
1628				/* use other for group */
1629				*mode |= (mode_t)((fap->fa_permissions & S_IRWXO) << 3);
1630			} else {
1631				/* Use owner, group and other perms */
1632				*mode |= (mode_t)(fap->fa_permissions & ACCESSPERMS);
1633			}
1634		}
1635	}
1636    else {
1637        /*
1638         * See comments in smbfs_nget about n_uid and n_gid and
1639         * KAUTH_UID_NONE/KAUTH_GID_NONE default values.
1640         */
1641        if ((*uid == KAUTH_UID_NONE) || (*gid == KAUTH_GID_NONE)) {
1642            /*
1643             * Either ACLs are off or no ACL retrieved for this item.
1644             * Return the mounting user uid/gid
1645             */
1646            *uid = smp->sm_args.uid;
1647            *gid = smp->sm_args.gid;
1648        }
1649        else {
1650            /*
1651             * uid/gid must have been set by a previous Get ACL, so just return
1652             * their current value.
1653             */
1654        }
1655
1656        /* Figure out the mode */
1657        if (fap->fa_valid_mask & FA_UNIX_MODES_VALID) {
1658            /*
1659             * Server gave us Posix modes via AAPL ReadDirAttr extension
1660             */
1661
1662            /* Remove any existing modes. */
1663            *mode &= ~ACCESSPERMS;
1664
1665            temp_mode = fap->fa_permissions;
1666            *mode |= (temp_mode & ACCESSPERMS); /* only take access perms */
1667        }
1668        else {
1669            if (flags & SMBFS_GET_UGM_REMOVE_POSIX_MODES) {
1670                /* Remove any existing modes. */
1671                *mode &= ~ACCESSPERMS;
1672                /*
1673                 * The system just can't handle posix modes of zero. We now support
1674                 * maximal access, so just dummy up the posix modes so copies work
1675                 * when all you have is inherited ACLs.
1676                 */
1677                if (flags & SMBFS_GET_UGM_IS_DIR) {
1678                    *mode |= smp->sm_args.dir_mode;
1679                }
1680                else {
1681                    /* symlink or regular file */
1682                    *mode |= smp->sm_args.file_mode;
1683                }
1684            }
1685        }
1686	}
1687}
1688
1689/*
1690 * Check to see if the immutable bit should be set on this node.
1691 *
1692 * SMB_EFA_RDONLY ~ UF_IMMUTABLE
1693 *
1694 * We treat the SMB_EFA_RDONLY as the immutable flag. This allows
1695 * us to support the finder lock bit and makes us follow the
1696 * MSDOS code model. See msdosfs project.
1697 *
1698 * NOTE: The ready-only flags does not exactly follow the lock/immutable bit.
1699 *
1700 * See Radar 5582956 for more details.
1701 *
1702 * When dealing with Window Servers the read-only bit for folder does not
1703 * mean the samething as it does for files. Doing this translation was
1704 * confusing customers and really didn't work the way Mac users would expect.
1705 */
1706Boolean
1707node_isimmutable(struct smb_share *share, vnode_t vp, struct smbfattr *fap)
1708{
1709	Boolean unix_info2 = ((UNIX_CAPS(share) & UNIX_QFILEINFO_UNIX_INFO2_CAP)) ? TRUE : FALSE;
1710	Boolean darwin = (SSTOVC(share)->vc_flags & SMBV_DARWIN) ? TRUE : FALSE;
1711    uint32_t is_dir = 0;
1712    uint32_t is_read_only = 0;
1713
1714    if (vp != NULL) {
1715        if (vnode_isdir(vp)) {
1716            is_dir = 1;
1717        }
1718
1719        if (VTOSMB(vp)->n_dosattr & SMB_EFA_RDONLY) {
1720            is_read_only = 1;
1721        }
1722    }
1723    else {
1724        if (fap != NULL) {
1725            /* smbfs_vnop_readdirattr or smbfs_vnop_getattrlistbulk */
1726            if (fap->fa_vtype == VDIR) {
1727                is_dir = 1;
1728            }
1729
1730            if (fap->fa_attr & SMB_EFA_RDONLY) {
1731                is_read_only = 1;
1732            }
1733        }
1734        else {
1735            /* this should be impossible */
1736            SMBERROR("vp and fap are NULL \n");
1737        }
1738    }
1739
1740    if (SSTOVC(share)->vc_flags & SMBV_SMB2) {
1741        if ((UNIX_SERVER(SSTOVC(share)) || !is_dir) && is_read_only) {
1742            return TRUE;
1743        }
1744    }
1745	else {
1746        if ((unix_info2 || darwin || !is_dir) && is_read_only) {
1747            return TRUE;
1748        }
1749    }
1750	return FALSE;
1751}
1752
1753/*
1754 * routines to maintain vnode attributes cache
1755 *
1756 * The calling routine must hold a reference on the share
1757 *
1758 */
1759void
1760smbfs_attr_cacheenter(struct smb_share *share, vnode_t vp, struct smbfattr *fap,
1761					  int UpdateResourceParent, vfs_context_t context)
1762{
1763	struct smbmount *smp = VTOSMBFS(vp);
1764	struct smbnode *np = VTOSMB(vp);
1765	enum vtype node_vtype;
1766	struct timespec ts;
1767	uint32_t monitorHint = 0;
1768    uint32_t flags = 0;
1769
1770	node_vtype = vnode_vtype(vp);
1771
1772	if ((node_vtype == VDIR) && np->d_needsUpdate) {
1773		monitorHint |= VNODE_EVENT_ATTRIB | VNODE_EVENT_WRITE;
1774		np->d_needsUpdate = FALSE;
1775	}
1776
1777	/*
1778	 * The vtype of node has changed, so remove it from the name cache and our
1779	 * hash table. We set the cache timer to zero this will cause cache lookup
1780	 * routine to return ENOENT.
1781	 */
1782	if (node_vtype_changed(vp, node_vtype, fap)) {
1783		np->attribute_cache_timer = 0;
1784		np->n_symlink_cache_timer = 0;
1785		cache_purge(vp);
1786		smb_vhashrem(np);
1787		monitorHint |= VNODE_EVENT_RENAME | VNODE_EVENT_ATTRIB;
1788		goto vnode_notify_needed;
1789	}
1790
1791	/* No need to update the cache after close, we just got updated */
1792	np->n_flag &= ~NATTRCHANGED;
1793	if (node_vtype == VREG) {
1794		if (smbfs_update_size(np, &fap->fa_reqtime, fap->fa_size) == FALSE) {
1795			/* We lost the race, assume we have the correct size */
1796			fap->fa_size = np->n_size;
1797			/* Force a lookup on close, make sure we have the correct size on close */
1798			np->n_flag |= NATTRCHANGED;
1799 		} else if (np->n_size != fap->fa_size) {
1800			/* We one the race and the size change, notify them about the change */
1801			monitorHint |= VNODE_EVENT_EXTEND | VNODE_EVENT_ATTRIB;
1802		}
1803	} else if (node_vtype == VDIR) {
1804		np->n_size = 16384; 	/* should be a better way ... */
1805		/* See if we need to clear the negative name cache */
1806		if ((np->n_flag & NNEGNCENTRIES) &&
1807			((share->ss_fstype == SMB_FS_FAT) ||
1808			 (timespeccmp(&fap->fa_mtime, &np->n_mtime, >)))) {
1809			np->n_flag &= ~NNEGNCENTRIES;
1810			cache_purge_negatives(vp);
1811
1812            VTOSMB(vp)->d_changecnt++;
1813		}
1814		/*
1815		 * Don't allow mtime to go backwards.
1816		 * Yes this has its flaws.  Better ideas are welcome!
1817		 */
1818		if (timespeccmp(&fap->fa_mtime, &np->n_mtime, <))
1819			fap->fa_mtime = np->n_mtime;
1820	} else if (node_vtype != VLNK) {
1821		return;
1822	}
1823	/* The server told us the allocation size return what they told us */
1824	np->n_data_alloc = fap->fa_data_alloc;
1825
1826	if (fap->fa_unix) {
1827		np->n_flags_mask = fap->fa_flags_mask;
1828		np->n_nlinks = fap->fa_nlinks;
1829    }
1830
1831    /* Calculate the uid, gid, and mode */
1832    if (vnode_isdir(np->n_vnode)) {
1833        flags |= SMBFS_GET_UGM_IS_DIR;
1834    }
1835
1836    /*
1837     * Unix mode can come from an ACL ACE (which sets NHAS_POSIXMODES)
1838     * or from SMB 2 when the FA_UNIX_MODES_VALID is set. Only dummy up
1839     * fake modes if we dont have the unix modes already.
1840     */
1841    if (!(np->n_flag & NHAS_POSIXMODES) &&
1842        !(fap->fa_valid_mask & FA_UNIX_MODES_VALID)) {
1843        flags |= SMBFS_GET_UGM_REMOVE_POSIX_MODES;
1844    }
1845
1846    smb_get_uid_gid_mode(share, smp,
1847                         fap, flags,
1848                         &np->n_uid, &np->n_gid, &np->n_mode);
1849
1850    if (fap->fa_valid_mask & FA_UNIX_MODES_VALID) {
1851        np->n_flag |= NHAS_POSIXMODES;
1852    }
1853
1854    if ((monitorHint & VNODE_EVENT_ATTRIB) == 0) {
1855		if (!(timespeccmp(&np->n_crtime, &fap->fa_crtime, ==) ||
1856			 !(timespeccmp(&np->n_mtime, &fap->fa_mtime, ==))))
1857			monitorHint |= VNODE_EVENT_ATTRIB;
1858	}
1859
1860	/*
1861	 * We always set the fstatus time if its valid
1862	 * Never reset the fstatus if the following are true:
1863	 * 1. The modify time on the item hasn't changed.
1864	 * 2. We have already discovered that this item has no streams.
1865	 * 3. The fap information didn't come from an open call.
1866	 *
1867	 * NOTE: This needs to be done before we update the modify time.
1868	 */
1869	if (fap->fa_valid_mask & FA_FSTATUS_VALID) {
1870		/* This is a valid field use it */
1871		np->n_fstatus = fap->fa_fstatus;
1872	} else if (timespeccmp(&np->n_chtime, &fap->fa_chtime, !=)) {
1873		/*
1874		 * Something change clear the fstatus field sine we can't trust it
1875		 * NOTE: The above check needs to be done before we update the change time.
1876		 */
1877		np->n_fstatus = 0;
1878	}
1879
1880    /* Update max access if its valid */
1881	if (fap->fa_valid_mask & FA_MAX_ACCESS_VALID) {
1882        np->maxAccessRights = fap->fa_max_access;
1883        np->maxAccessRightChTime = fap->fa_chtime;
1884    }
1885
1886    /*
1887	 * Not sure if this is still a problem. In the old days the finder did
1888	 * not like it when the create time of the root or directory was after
1889	 * the modify time. This can and will happen on FAT file systems. For
1890	 * now lets leave it alone and see what happens.
1891	 */
1892	np->n_crtime = fap->fa_crtime;
1893	np->n_chtime = fap->fa_chtime;
1894	np->n_atime = fap->fa_atime;
1895	np->n_mtime = fap->fa_mtime;
1896	/*
1897	 * This routine can be called by a Query Info, FindFirst or the NTCreateAndX
1898	 * routine. If the server doesn't support the UNIX extensions then the fa_unix
1899	 * field won't be set and fa_attr will contain the dos attributes. We map the
1900	 * hidden, read only and the archive bits to the hidden, immutable and
1901	 * "not backed up" bits in the BSD flags.
1902	 *
1903	 * In the NTCreateAndX routine we check to see if the server supports the
1904	 * UNIX extensions and we convert the fa_attr and fa_flags_mask to the correct
1905	 * values depending on the bits supported by the server. This allows us to
1906	 * always trust the values in the NTCreateAndX fap.
1907	 *
1908	 * Linux server do not support the UNIX Info2 BSD flags. This is a problem,
1909	 * we still want to use the UNIX extensions, but we can't trust all the bits
1910	 * in the fa_attr field when they come from the Query Info or FindFirst
1911	 * routine. So in this case ignore the hidden, read only and the archive bits
1912	 * in the fa_attr and just keep using the ones we have on the node. This means
1913	 * for Linux servers we only trust the bits that come from the NTCreateAndX or
1914	 * the bits we have set ourself. Remember we we lookup access with the NTCreateAndX
1915	 * so we have the latest info in that case.
1916	 */
1917	if (fap->fa_unix && ((fap->fa_flags_mask & EXT_REQUIRED_BY_MAC) != EXT_REQUIRED_BY_MAC)) {
1918		fap->fa_attr &= ~(SMB_EFA_RDONLY | SMB_EFA_HIDDEN | SMB_EFA_ARCHIVE);
1919		np->n_dosattr &= (SMB_EFA_RDONLY | SMB_EFA_HIDDEN | SMB_EFA_ARCHIVE);
1920		np->n_dosattr |= fap->fa_attr;
1921	} else {
1922		np->n_dosattr = fap->fa_attr;
1923	}
1924
1925	nanouptime(&ts);
1926	np->attribute_cache_timer = ts.tv_sec;
1927	/*
1928	 * UpdateResourceParent says it is ok to update the parent if this is a
1929	 * resource stream. So if this is a stream and its the resource stream then
1930	 * update the parents resource fork size and cache timer. If we can't get the
1931	 * parent then just get out, when the timer goes off the parent will just have
1932	 * to make the wire call.
1933	 */
1934	if (UpdateResourceParent && (vnode_isnamedstream(vp)) &&
1935		(np->n_flag & N_ISRSRCFRK)) {
1936		vnode_t parent_vp = smb_update_rsrc_and_getparent(vp, (fap->fa_size) ? TRUE : FALSE);
1937		/* We no longer need the parent so release it. */
1938		if (parent_vp)
1939			vnode_put(parent_vp);
1940	}
1941
1942vnode_notify_needed:
1943	if ((monitorHint != 0) && (vnode_ismonitored(vp)) && context) {
1944		struct vnode_attr vattr;
1945
1946		vfs_get_notify_attributes(&vattr);
1947		smbfs_attr_cachelookup(share, vp, &vattr, context, TRUE);
1948		vnode_notify(vp, monitorHint, &vattr);
1949	}
1950}
1951
1952/*
1953 * The calling routine must hold a reference on the share
1954 */
1955int
1956smbfs_attr_cachelookup(struct smb_share *share, vnode_t vp, struct vnode_attr *va,
1957					   vfs_context_t context, int useCacheDataOnly)
1958{
1959	struct smbnode *np = VTOSMB(vp);
1960	struct smbmount *smp = VTOSMBFS(vp);
1961	time_t attrtimeo;
1962	struct timespec ts;
1963
1964	SMB_CACHE_TIME(ts, np, attrtimeo);
1965
1966	if (useCacheDataOnly) {
1967		/* Use the current cache data only */
1968	} else if (np->n_flag & NMARKEDFORDLETE) {
1969		/*
1970		 * The file is marked for deletion on close. We can no longer
1971		 * gain access using the path. All access must be done using
1972		 * the fid. So just pretend that the cache will never expire
1973		 * for this item.
1974		 *
1975		 * NOTE: Since it is marked for deletion no one else can access
1976		 *       it, so the cache data should stay good through the life
1977		 *       of the open file.
1978		 */
1979	}
1980	else if ((ts.tv_sec - np->attribute_cache_timer) > attrtimeo)
1981		return (ENOENT);
1982
1983	if (!va)
1984		return (0);
1985
1986	VATTR_RETURN(va, va_rdev, 0);
1987	if ((UNIX_CAPS(share) & UNIX_QFILEINFO_UNIX_INFO2_CAP))
1988		VATTR_RETURN(va, va_nlink, np->n_nlinks);
1989	else
1990		VATTR_RETURN(va, va_nlink, 1);
1991
1992	/*
1993	 * Looks like we need to handle total size in the streams case. The VFS layer
1994	 * always fill this in with the data fork size. Still not sure of this, but
1995	 * for now lets go ahead and handle if ask.
1996	 */
1997	if ((share->ss_attributes & FILE_NAMED_STREAMS) &&
1998		(VATTR_IS_ACTIVE(va, va_total_size))) {
1999		if (vnode_isdir(vp)) {
2000			VATTR_RETURN(va, va_total_size, np->n_size);
2001			lck_mtx_lock(&smp->sm_statfslock);
2002			if (smp->sm_statfsbuf.f_bsize)	/* Just to be safe */
2003				VATTR_RETURN(va, va_total_alloc, roundup(va->va_total_size,
2004														 smp->sm_statfsbuf.f_bsize));
2005			lck_mtx_unlock(&smp->sm_statfslock);
2006		}
2007		else if (!vnode_isnamedstream(vp)) {
2008			if (!useCacheDataOnly) {
2009				(void)smb_get_rsrcfrk_size(share, vp, context);
2010			}
2011			lck_mtx_lock(&np->rfrkMetaLock);
2012			VATTR_RETURN(va, va_total_size, np->n_size + np->rfrk_size);
2013			lck_mtx_unlock(&np->rfrkMetaLock);
2014			lck_mtx_lock(&smp->sm_statfslock);
2015			if (smp->sm_statfsbuf.f_bsize)	/* Just to be safe */
2016				VATTR_RETURN(va, va_total_alloc, roundup(va->va_total_size,
2017														 smp->sm_statfsbuf.f_bsize));
2018			lck_mtx_unlock(&smp->sm_statfslock);
2019		}
2020	}
2021
2022	VATTR_RETURN(va, va_data_size, np->n_size);
2023	VATTR_RETURN(va, va_data_alloc, np->n_data_alloc);
2024	VATTR_RETURN(va, va_iosize, smp->sm_statfsbuf.f_bsize);
2025
2026	if (VATTR_IS_ACTIVE(va, va_mode))
2027		VATTR_RETURN(va, va_mode, np->n_mode);
2028
2029	if (VATTR_IS_ACTIVE(va, va_uid) || VATTR_IS_ACTIVE(va, va_gid)) {
2030		/*
2031		 * The volume was mounted as guest, so we already set the mount point to
2032		 * ignore ownership. Now always return an owner of 99 and group of 99.
2033		 */
2034		if (SMBV_HAS_GUEST_ACCESS(SSTOVC(share))) {
2035			VATTR_RETURN(va, va_uid, UNKNOWNUID);
2036			VATTR_RETURN(va, va_gid, UNKNOWNGID);
2037		} else {
2038			/*
2039			 * For servers that support the UNIX extensions we know the uid/gid.
2040			 * For server that don't support ACLs then the node uid/gid will be
2041			 * set to the mounted user's  uid/gid. For all other servers we need
2042			 * to get the ACL and translate the SID to a uid or gid. The uid/gid
2043			 * really is for display purpose only and means nothing to us. We will
2044			 * set the nodes ids if we get a request for the ACL, but otherwise
2045			 * we leave them unset for performance reasons.
2046			 */
2047			if (np->n_uid == KAUTH_UID_NONE)
2048				VATTR_RETURN(va, va_uid, smp->sm_args.uid);
2049			else
2050				VATTR_RETURN(va, va_uid, np->n_uid);
2051			if (np->n_gid == KAUTH_GID_NONE)
2052				VATTR_RETURN(va, va_gid, smp->sm_args.gid);
2053			else
2054				VATTR_RETURN(va, va_gid, np->n_gid);
2055		}
2056	}
2057	if (VATTR_IS_ACTIVE(va, va_flags)) {
2058		va->va_flags = 0;
2059		/*
2060		 * Remember that SMB_EFA_ARCHIVE means the items needs to be
2061		 * archive and SF_ARCHIVED means the item has been archive.
2062		 *
2063		 * NOTE: Windows does not set ATTR_ARCHIVE bit for directories.
2064		 */
2065		if (!vnode_isdir(vp) && !(np->n_dosattr & SMB_EFA_ARCHIVE))
2066			va->va_flags |= SF_ARCHIVED;
2067		/* The server has it marked as read-only set the immutable bit. */
2068		if (node_isimmutable(share, vp, NULL)) {
2069			va->va_flags |= UF_IMMUTABLE;
2070		}
2071		/*
2072		 * The server has it marked as hidden, set the new UF_HIDDEN bit. Never
2073		 * mark the root volume as hidden, unless they have the MNT_DONTBROWSE
2074		 * set. Assume they know what they are doing if the MNT_DONTBROWSE is set.
2075		 */
2076		if ((np->n_dosattr & SMB_EFA_HIDDEN) &&
2077			(!vnode_isvroot(vp) || (vfs_flags(smp->sm_mp) & MNT_DONTBROWSE))) {
2078				va->va_flags |= UF_HIDDEN;
2079		}
2080		VATTR_SET_SUPPORTED(va, va_flags);
2081	}
2082
2083	/* va_acl are done in smbfs_getattr */
2084
2085	VATTR_RETURN(va, va_create_time, np->n_crtime);
2086	VATTR_RETURN(va, va_modify_time, np->n_mtime);
2087	/* FAT only supports the date not the time! */
2088	VATTR_RETURN(va, va_access_time, np->n_atime);
2089	/*
2090	 * FAT does not support change time, so just return the modify time.
2091	 * Copied from the msdos code. SMB has no backup time so skip the
2092	 * va_backup_time.
2093	 */
2094	if (share->ss_fstype == SMB_FS_FAT)
2095		np->n_chtime.tv_sec = np->n_mtime.tv_sec;
2096	VATTR_RETURN(va, va_change_time, np->n_chtime);
2097
2098	/*
2099	 * Exporting file IDs from HFS Plus:
2100	 *
2101	 * For "normal" files the c_fileid is the same value as the
2102	 * c_cnid.  But for hard link files, they are different - the
2103	 * c_cnid belongs to the active directory entry (ie the link)
2104	 * and the c_fileid is for the actual inode (ie the data file).
2105	 *
2106	 * The stat call (getattr) uses va_fileid and the Carbon APIs,
2107	 * which are hardlink-ignorant, will ask for va_linkid.
2108	 */
2109    lck_rw_lock_shared(&np->n_name_rwlock);
2110    VATTR_RETURN(va, va_fileid, smb2fs_smb_file_id_get(smp, np->n_ino,
2111                                                       np->n_name));
2112    VATTR_RETURN(va, va_linkid, smb2fs_smb_file_id_get(smp, np->n_ino,
2113                                                       np->n_name));
2114    lck_rw_unlock_shared(&np->n_name_rwlock);
2115
2116    lck_rw_lock_shared(&np->n_parent_rwlock);
2117    if (np->n_parent != NULL) {
2118        lck_rw_lock_shared(&np->n_parent->n_name_rwlock);
2119        VATTR_RETURN(va, va_parentid, smb2fs_smb_file_id_get(smp,
2120                                                             np->n_parent->n_ino,
2121                                                             np->n_parent->n_name));
2122        lck_rw_unlock_shared(&np->n_parent->n_name_rwlock);
2123    }
2124    else {
2125        /*
2126         * This would require a lot more work so let the VFS layer handle it.
2127         * VATTR_RETURN(va, va_parentid, np->n_parentid);
2128         */
2129    }
2130    lck_rw_unlock_shared(&np->n_parent_rwlock);
2131
2132	VATTR_RETURN(va, va_fsid, vfs_statfs(vnode_mount(vp))->f_fsid.val[0]);
2133	VATTR_RETURN(va, va_filerev, 0);
2134	VATTR_RETURN(va, va_gen, 0);
2135
2136	/*
2137	 * We currently have no way to know the va_encoding. The VFS layer fills it
2138	 * in with kTextEncodingMacUnicode = 0x7E. Lets leave it to the VFS layer
2139	 * to handle for now.
2140	 * VATTR_RETURN(va, va_encoding, 0x7E);
2141	 */
2142
2143	/*
2144	 * If this is the root, let VFS find out the mount name, which may be
2145	 * different from the real name
2146	 */
2147	if (VATTR_IS_ACTIVE(va, va_name) && !vnode_isvroot(vp)) {
2148        lck_rw_lock_shared(&np->n_name_rwlock);
2149		strlcpy ((char*) va->va_name, (char*)np->n_name, MAXPATHLEN);
2150        lck_rw_unlock_shared(&np->n_name_rwlock);
2151		VATTR_SET_SUPPORTED(va, va_name);
2152	}
2153	/* va_uuuid is done in smbfs_getattr */
2154	/* va_guuid is done in smbfs_getattr */
2155	/* We have no  way to get va_nchildren. Let VFS layer handle it. */
2156	return (0);
2157}
2158
2159/*
2160 * FAT file systems don't exhibit POSIX behaviour with regard to
2161 * updating the directory mtime when the directory's contents
2162 * change.
2163 *
2164 * We force the issue here by updating our cached copy of the mtime
2165 * whenever we perform such an action ourselves, and then mark the
2166 * cache invalid.  Subsequently when the invalidated cache entry is
2167 * updated, we disallow an update that would move the mtime backwards.
2168 *
2169 * This preserves correct or near-correct behaviour with a
2170 * compliant server, and gives near-correct behaviour with
2171 * a non-compliant server in the most common case (we are the
2172 * only client changing the directory).
2173 *
2174 * There are also complications if a server's time is ahead
2175 * of our own.  We must 'touch' a directory when it is first
2176 * created, to ensure that the timestamp starts out sane,
2177 * however it may have a timestamp well ahead of the 'touch'
2178 * point which will be returned and cached the first time the
2179 * directory's attributes are fetched.  Subsequently, the
2180 * directory's mtime will not appear to us to change at all
2181 * until our local time catches up to the server.
2182 *
2183 * Thus, any time a directory is 'touched', the saved timestamp
2184 * must advance at least far enough forwards to be visible to
2185 * the stat(2) interface.
2186 *
2187 * We only do this for FAT file system, all others should be handling
2188 * the modify time correctly.
2189 */
2190void
2191smbfs_attr_touchdir(struct smbnode *dnp, int fatShare)
2192{
2193	if (fatShare) {
2194		struct timespec ts, ta;
2195
2196		/*
2197		 * Creep the saved time forwards far enough that
2198		 * layers above the kernel will notice.
2199		 */
2200		ta.tv_sec = 1;
2201		ta.tv_nsec = 0;
2202		timespecadd(&dnp->n_mtime, &ta);
2203		/*
2204		 * If the current time is later than the updated
2205		 * saved time, apply it instead.
2206		 */
2207		nanotime(&ts);	/* Need current date/time, so use nanotime */
2208		if (timespeccmp(&dnp->n_mtime, &ts, <))
2209			dnp->n_mtime = ts;
2210	}
2211	/*
2212	 * Invalidate the cache, so that we go to the wire
2213	 * to check that the server doesn't have a better
2214	 * timestamp next time we care.
2215	 */
2216	dnp->attribute_cache_timer = 0;
2217}
2218
2219int
2220smbfsIsCacheable(vnode_t vp)
2221{
2222	/* Has to be a file, so dirs and symlinks are not cacheable */
2223	if (!vnode_isreg(vp)) {
2224		return FALSE;
2225	}
2226	if (vnode_isnocache(vp)) {
2227		return FALSE;
2228	} else {
2229		return TRUE;
2230	}
2231}
2232
2233void
2234smbfs_setsize(vnode_t vp, off_t size)
2235{
2236	struct smbnode *np = VTOSMB(vp);
2237
2238	/*
2239	 * n_size is used by smbfs_vnop_pageout so it must be
2240	 * changed before we call setsize
2241	 */
2242	np->n_size = size;
2243	ubc_setsize(vp, size);
2244	/*
2245	 * this lets us avoid a race with readdir which resulted in
2246	 * a stale n_size, which in the worst case yielded data corruption.
2247	 */
2248	nanouptime(&np->n_sizetime);
2249	/* Resetting the size, blow away statfs cache */
2250	VTOSMBFS(vp)->sm_statfstime = 0;
2251}
2252
2253/*
2254 * If the file size hasn't change then really nothing to do here, get out but
2255 * let the calling routine know that they can update their cache timer. If we have
2256 * updated the size internally, while waiting on the response from the server,
2257 * then don't update the size and tell the calling routine not to update its
2258 * cache timers. Otherwise update our internal size and the ubc size. Also tell
2259 * the calling routine to update any cache timers.
2260 */
2261int
2262smbfs_update_size(struct smbnode *np, struct timespec *reqtime, u_quad_t new_size)
2263{
2264	if (np->n_size == new_size)
2265		return TRUE; /* Nothing to update here */
2266
2267	/* Only update the size if we don't have a set eof pending */
2268	if (np->n_flag & NNEEDS_EOF_SET) {
2269        SMB_LOG_IO_LOCK(np, "%s: Waiting on pending seteof, old eof = %lld  new eof = %lld\n",
2270                        np->n_name, np->n_size, new_size);
2271		return FALSE;
2272	}
2273
2274	if (np->waitOnClusterWrite) {
2275		SMB_LOG_IO_LOCK(np, "%s: Waiting on cluster write to complete, old eof = %lld  new eof = %lld\n",
2276                        np->n_name, np->n_size, new_size);
2277		return FALSE;
2278	}
2279
2280	if (timespeccmp(reqtime, &np->n_sizetime, <=)) {
2281		SMB_LOG_IO_LOCK(np, "%s: We set the eof after this lookup, old eof = %lld  new eof = %lld\n",
2282                        np->n_name, np->n_size, new_size);
2283		return FALSE; /* we lost the race, tell the calling routine */
2284	}
2285
2286	/*
2287	 * The file size on the server is different from our copy. So can we trust
2288	 * any of our data? Should we push, invalidate the whole file?
2289	 *
2290	 * The old code would only invalidate the region that the file had grown. Now
2291	 * since we call ubc_setsize in smbfs_setsize that should handle any truncate
2292	 * issue. Not sure why you invalidate a region you don't even have in the cache?
2293	 */
2294	ubc_msync (np->n_vnode, 0, ubc_getsize(np->n_vnode), NULL, UBC_PUSHDIRTY | UBC_SYNC);
2295
2296	SMBDEBUG_LOCK(np, "%s: smbfs_setsize, old eof = %lld  new eof = %lld time %ld:%ld  %ld:%ld\n",
2297                  np->n_name, np->n_size, new_size,
2298                  np->n_sizetime.tv_sec, np->n_sizetime.tv_nsec,
2299                  reqtime->tv_sec, reqtime->tv_nsec);
2300
2301	smbfs_setsize(np->n_vnode, new_size);
2302	return TRUE;
2303}
2304
2305int
2306smbfs_update_name_par(struct smb_share *share, vnode_t dvp, vnode_t vp,
2307                      struct timespec *reqtime,
2308                      const char *new_name, size_t name_len)
2309{
2310    char *new_name2, *old_name;
2311    struct componentname cnp;
2312    struct smbnode *np, *fdnp = NULL, *tdnp = NULL;
2313    vnode_t fdvp = NULL;
2314    uint32_t orig_flag = 0;
2315    int update_flags = 0;
2316    int exclusive_lock = 0;
2317
2318    if ((vp == NULL) ||
2319        (dvp == NULL) ||
2320        (share == NULL) ||
2321        (reqtime == NULL) ||
2322        (new_name == NULL)) {
2323        /* Nothing to update */
2324        //SMBDEBUG("missing info \n");
2325        return TRUE;
2326    }
2327
2328    np = VTOSMB(vp);
2329
2330    /*
2331     * Did the parent change?
2332     *
2333     * fdnp = np->n_parent
2334     * fdvp = np->n_parent->n_vnode (not locked)
2335     *
2336     * tdnp = VTOSMB(dvp)
2337     * tdvp = dvp (locked)
2338     *
2339     * fnp = np (vp is locked)
2340     */
2341    lck_rw_lock_shared(&np->n_parent_rwlock);
2342
2343    if (np->n_parent != NULL) {
2344        fdnp = np->n_parent;
2345        if (fdnp->n_vnode != NULL) {
2346            fdvp = fdnp->n_vnode;
2347        }
2348    }
2349
2350    /* Already checked earlier for dvp == null */
2351    tdnp = VTOSMB(dvp);
2352
2353    if ((fdnp != NULL) &&
2354        (fdvp != NULL) &&
2355        (tdnp != NULL) &&
2356        (fdnp != tdnp)) {
2357        /*
2358         * Parent changed, so need exclusive lock. Try to upgrade lock.
2359         * If exclusive lock upgrade fails we lose the lock and
2360         * have to take the exclusive lock on our own.
2361         */
2362        if (lck_rw_lock_shared_to_exclusive(&np->n_parent_rwlock) == FALSE) {
2363            lck_rw_lock_exclusive(&np->n_parent_rwlock);
2364
2365            /*
2366             * Its remotely possible n_parent changed as we were getting the
2367             * exclusive lock, so reset fdnp and fdvp
2368             */
2369            fdnp = NULL;
2370            fdvp = NULL;
2371
2372            if (np->n_parent != NULL) {
2373                fdnp = np->n_parent;
2374                if (fdnp->n_vnode != NULL) {
2375                    fdvp = fdnp->n_vnode;
2376                }
2377            }
2378
2379            /* Make sure fdnp and fdvp are still ok */
2380            if ((fdnp == NULL) || (fdvp == NULL)) {
2381                /*
2382                 * The parent disappeared. This should not happen.
2383                 * Just leave the vnode unchanged.
2384                 */
2385                SMBERROR_LOCK(np, "Parent lost during update for <%s> \n", np->n_name);
2386                exclusive_lock = 1;
2387                goto error;
2388            }
2389        }
2390        exclusive_lock = 1;
2391
2392        orig_flag = np->n_flag;
2393
2394        /* Take a ref count on the new parent */
2395        if (!vnode_isvroot(dvp)) {
2396            if (vnode_ref(dvp) == 0) {
2397                np->n_flag |= NREFPARENT;
2398
2399                /* Increment new parent node's child refcnt */
2400                OSIncrementAtomic(&tdnp->n_child_refcnt);
2401            }
2402            else {
2403                /* Failed to take ref, so clear flag */
2404                np->n_flag &= ~NREFPARENT;
2405            }
2406        }
2407        else {
2408            /* Do not need to ref cnt if parent is root vnode */
2409            np->n_flag &= ~NREFPARENT;
2410        }
2411
2412        /*
2413         * Remove the ref count off the old parent if there was one and
2414         * if the old parent was not root vnode
2415         */
2416        if ((!vnode_isvroot(fdvp)) && (orig_flag & NREFPARENT)) {
2417            if (vnode_get(fdvp) == 0) {
2418                vnode_rele(fdvp);
2419                vnode_put(fdvp);
2420
2421                /* Remove the child refcnt from old parent */
2422                OSDecrementAtomic(&fdnp->n_child_refcnt);
2423            }
2424        }
2425
2426        /* Set the new parent */
2427        np->n_parent = VTOSMB(dvp);
2428
2429        /* Mark that we need to update the vnodes parent */
2430        update_flags |= VNODE_UPDATE_PARENT;
2431    }
2432
2433error:
2434    if (exclusive_lock == 0) {
2435        /* Most of the time we should end up with just a shared lock */
2436        lck_rw_unlock_shared(&np->n_parent_rwlock);
2437    }
2438    else {
2439        /* Parent must have changed */
2440        lck_rw_unlock_exclusive(&np->n_parent_rwlock);
2441    }
2442
2443    /*
2444     * Did the name change?
2445     */
2446    lck_rw_lock_shared(&np->n_name_rwlock);
2447    if ((np->n_nmlen == name_len) &&
2448        (bcmp(np->n_name, new_name, np->n_nmlen) == 0)) {
2449        /* Name did not change, so nothing to update */
2450
2451        /* Update parent if needed */
2452        if (update_flags != 0) {
2453            vnode_update_identity(vp, dvp, np->n_name, (int) np->n_nmlen, 0,
2454                                  update_flags);
2455        }
2456
2457        lck_rw_unlock_shared(&np->n_name_rwlock);
2458        return TRUE;
2459    }
2460    lck_rw_unlock_shared(&np->n_name_rwlock);
2461
2462    /*
2463     * n_rename_time is used to handle the case where an Enumerate req is sent,
2464     * then a Rename request/reply happens, then the Enumerate reply is
2465     * processed which has the previous name. We dont want to update the name
2466     * with a stale name from an Enumerate that happened before the Rename.
2467     */
2468	if (timespeccmp(reqtime, &np->n_rename_time, <=)) {
2469        /* we lost the race, tell the calling routine */
2470
2471        /* Update parent if needed */
2472        if (update_flags != 0) {
2473            lck_rw_lock_shared(&np->n_name_rwlock);
2474            vnode_update_identity(vp, dvp, np->n_name, (int) np->n_nmlen, 0,
2475                                  update_flags);
2476            lck_rw_unlock_shared(&np->n_name_rwlock);
2477        }
2478
2479        return FALSE;
2480	}
2481
2482    /* Set the new name */
2483    new_name2 = smb_strndup(new_name, name_len);
2484    if (new_name2) {
2485        /* save the old name */
2486        lck_rw_lock_exclusive(&np->n_name_rwlock);
2487        old_name = np->n_name;
2488
2489        /* put in the new name */
2490        np->n_name = new_name2;
2491        np->n_nmlen = name_len;
2492
2493        /* Now its safe to free the old name */
2494        SMB_FREE(old_name, M_SMBNODENAME);
2495
2496        /* Update the VFS name cache */
2497        bzero(&cnp, sizeof(cnp));
2498        cnp.cn_nameptr = (char *)np->n_name;
2499        cnp.cn_namelen = (int) np->n_nmlen;
2500        cnp.cn_flags = MAKEENTRY;
2501
2502        /* Remove old entry, wrong case */
2503        cache_purge(vp);
2504
2505        /* Add new entry, correct case */
2506        cache_enter(dvp, vp, &cnp);
2507        lck_rw_unlock_exclusive(&np->n_name_rwlock);
2508
2509        update_flags |= VNODE_UPDATE_NAME;
2510    }
2511
2512    /* Update parent and/or name if needed */
2513    if (update_flags != 0) {
2514        lck_rw_lock_shared(&np->n_name_rwlock);
2515        vnode_update_identity(vp, dvp, np->n_name, (int) np->n_nmlen, 0,
2516                              update_flags);
2517        lck_rw_unlock_shared(&np->n_name_rwlock);
2518    }
2519
2520	return TRUE;
2521}
2522
2523/*
2524 * FindByteRangeLockEntry
2525 *
2526 * Return Values
2527 *
2528 *	TRUE	- We have this ranged locked already
2529 *	FALSE	- We don't have this range locked
2530 */
2531int
2532FindByteRangeLockEntry(struct fileRefEntry *fndEntry, int64_t offset,
2533					int64_t length, uint32_t lck_pid)
2534{
2535	struct ByteRangeLockEntry *curr = fndEntry->lockList;
2536
2537	while (curr) {
2538		if ((curr->offset == offset) && (curr->length == length) &&
2539			(curr->lck_pid == lck_pid))
2540			return TRUE;
2541		curr = curr->next;
2542	}
2543	return FALSE;
2544}
2545
2546/*
2547 * AddRemoveByteRangeLockEntry
2548 *
2549 * Add .
2550 *
2551 * Return Values
2552 *	none
2553 */
2554void
2555AddRemoveByteRangeLockEntry(struct fileRefEntry *fndEntry, int64_t offset,
2556						 int64_t length, int8_t unLock, uint32_t lck_pid)
2557{
2558	struct ByteRangeLockEntry *curr = NULL;
2559	struct ByteRangeLockEntry *prev = NULL;
2560	struct ByteRangeLockEntry *new = NULL;
2561	int32_t foundIt = 0;
2562
2563	if (unLock == 0) {	/* Locking, so add a new ByteRangeLockEntry */
2564		SMB_MALLOC (new, struct ByteRangeLockEntry *, sizeof (struct ByteRangeLockEntry),
2565				M_TEMP, M_WAITOK);
2566		new->offset = offset;
2567		new->length = length;
2568		new->lck_pid = lck_pid;
2569		new->next = NULL;
2570
2571		curr = fndEntry->lockList;
2572		if (curr == NULL) {
2573			/* first entry is empty so use it */
2574			fndEntry->lockList = new;
2575		} else { /* find the last entry and add the new entry to the end of list */
2576			while (curr->next != NULL)
2577				curr = curr->next;
2578			curr->next = new;
2579		}
2580	} else {	/* Unlocking, so remove a ByteRangeLockEntry */
2581		curr = fndEntry->lockList;
2582		if (curr == NULL) {
2583		    SMBWARNING("AddRemoveByteRangeLockEntry:  no entries found\n");
2584		    return;
2585		}
2586
2587		if ((curr->offset == offset) && (curr->length == length)) {
2588			/* first entry is it, so remove it from the head */
2589			fndEntry->lockList = curr->next;
2590			SMB_FREE(curr, M_TEMP);
2591		} else {
2592			/* Not the first entry, so search the rest of them */
2593			prev = curr;
2594			curr = curr->next;
2595			while (curr != NULL) {
2596				if ((curr->offset == offset) && (curr->length == length)) {
2597					foundIt = 1;
2598					/* found it so remove it */
2599					prev->next = curr->next;
2600					SMB_FREE(curr, M_TEMP);
2601					break;
2602				}
2603				prev = curr;
2604				curr = curr->next;
2605			}
2606
2607			if (foundIt == 0) {
2608				SMBWARNING ("offset 0x%llx/0x%llx not found in fndEntry %p\n",
2609							offset, length, (void *)fndEntry);
2610			}
2611		}
2612	}
2613}
2614
2615/*
2616 * AddFileRef
2617 *
2618 * Create a new open deny file list entry.
2619 *
2620 * Return Values
2621 *	fndEntry is not NULL then return the entry.
2622 */
2623void
2624AddFileRef(vnode_t vp, struct proc *p, uint16_t accessMode, uint32_t rights,
2625           SMBFID fid, struct smb2_durable_handle dur_handle, struct fileRefEntry **fndEntry)
2626{
2627    struct smbnode	*np = VTOSMB(vp);
2628    struct fileRefEntry *entry = NULL;
2629    struct fileRefEntry *current = NULL;
2630
2631    /* Create a new fileRefEntry and insert it into the hp list */
2632    SMB_MALLOC(entry, struct fileRefEntry *, sizeof (struct fileRefEntry),
2633               M_TEMP, M_WAITOK);
2634    entry->refcnt = 0;
2635    entry->mmapped = FALSE;
2636    entry->proc = p;
2637    entry->p_pid = proc_pid(p);
2638    entry->accessMode = accessMode;
2639    entry->rights = rights;
2640    entry->fid = fid;
2641    entry->lockList = NULL;
2642    entry->dur_handle = dur_handle;
2643    entry->next = NULL;
2644
2645    if (fndEntry) {
2646        *fndEntry = entry;
2647    }
2648
2649    lck_mtx_lock(&np->f_openDenyListLock);
2650
2651    if (np->f_openDenyList == NULL) {
2652        /* No other entries, so we are the first */
2653        np->f_openDenyList = entry;
2654    }
2655    else {
2656        /* look for last entry in the list */
2657        current = np->f_openDenyList;
2658        while (current->next != NULL) {
2659            current = current->next;
2660        }
2661
2662        /* put it at the end of the list */
2663        current->next = entry;
2664    }
2665
2666    lck_mtx_unlock(&np->f_openDenyListLock);
2667}
2668
2669/*
2670 * FindFileEntryByFID
2671 *
2672 * Find an entry in the open deny file list entry. Use the fid to locate the
2673 * entry.
2674 *
2675 * Return Values
2676 *	-1	No matching entry found
2677 *	0	Found a match
2678 */
2679int32_t
2680FindFileEntryByFID(vnode_t vp, SMBFID fid, struct fileRefEntry **fndEntry)
2681{
2682	struct fileRefEntry *entry = NULL;
2683	struct smbnode *np;
2684
2685#ifdef SMB_DEBUG
2686	if (fndEntry)
2687		DBG_ASSERT(*fndEntry == NULL);
2688#endif // SMB_DEBUG
2689
2690	/* If we have no vnode then we are done. */
2691	if (!vp) {
2692		return (-1);
2693    }
2694
2695	np = VTOSMB(vp);
2696	lck_mtx_lock(&np->f_openDenyListLock);
2697	/* Now search the list until we find a match */
2698	for (entry = np->f_openDenyList; entry; entry = entry->next) {
2699		if (entry->fid == fid) {
2700			if (fndEntry) {
2701				*fndEntry = entry;
2702            }
2703			lck_mtx_unlock(&np->f_openDenyListLock);
2704			return(0);
2705		}
2706	}
2707	lck_mtx_unlock(&np->f_openDenyListLock);
2708	return(-1);	/* No match found */
2709}
2710
2711/*
2712 * FindFileEntryByLeaseKey
2713 *
2714 * Find an entry in the open deny file list entry. Use the lease key to locate the
2715 * entry.
2716 *
2717 * Return Values
2718 *	-1	No matching entry found
2719 *	0	Found a match
2720 */
2721int32_t
2722FindFileEntryByLeaseKey(vnode_t vp, uint64_t lease_key_hi, uint64_t lease_key_low, struct fileRefEntry **fndEntry)
2723{
2724    struct fileRefEntry *entry = NULL;
2725    int32_t foundIt = FALSE;
2726    struct smbnode *np;
2727
2728    /* If we have no vnode then we are done. */
2729    if (!vp) {
2730        SMBDEBUG("no vnode? \n");
2731		return (foundIt);
2732    }
2733
2734    np = VTOSMB(vp);
2735    lck_mtx_lock(&np->f_openDenyListLock);
2736
2737    for (entry = np->f_openDenyList; entry; entry = entry->next) {
2738        if ((entry->dur_handle.lease_key_hi == lease_key_hi) &&
2739            (entry->dur_handle.lease_key_low == lease_key_low)) {
2740            if (fndEntry) {
2741                *fndEntry = entry;
2742            }
2743			foundIt = TRUE;
2744			break;
2745        }
2746    }
2747
2748    lck_mtx_unlock(&np->f_openDenyListLock);
2749	return (foundIt);
2750}
2751
2752/*
2753 * FindMappedFileRef
2754 *
2755 * Search the open deny file list entry looking for a mapped entry. If they
2756 * requested the entry return it, if they requested the fid return it also.
2757 *
2758 * Return Values
2759 *	FALSE	No matching entry found
2760 *	TRUE	Found a match
2761 */
2762int32_t
2763FindMappedFileRef(vnode_t vp, struct fileRefEntry **fndEntry, SMBFID *fid)
2764{
2765	struct fileRefEntry *entry = NULL;
2766	int32_t foundIt = FALSE;
2767	struct smbnode	*np;
2768
2769	/* If we have no vnode then we are done. */
2770	if (!vp) {
2771		return (foundIt);
2772    }
2773
2774	np = VTOSMB(vp);
2775	lck_mtx_lock(&np->f_openDenyListLock);
2776	for (entry = np->f_openDenyList; entry; entry = entry->next) {
2777		if (entry->mmapped) {
2778			if (fid) {
2779			    *fid = entry->fid;
2780            }
2781			if (fndEntry) {
2782			    *fndEntry = entry;
2783            }
2784			foundIt = TRUE;
2785			break;
2786		}
2787	}
2788	lck_mtx_unlock(&np->f_openDenyListLock);
2789	return (foundIt);
2790}
2791
2792/*
2793 * FindFileRef
2794 *
2795 * Find an entry in the open deny file list entry. Use accessMode and flags to
2796 * locate the entry.
2797 *
2798 * Return Values
2799 *	-1	No matching entry found
2800 *	0	Found a match
2801 *			if fndEntry is not NULL it will point to that entry.
2802 *			fid now holds file reference id for that entry.
2803 */
2804int32_t
2805FindFileRef(vnode_t vp, proc_t p, uint16_t accessMode, int32_t flags,
2806            int64_t offset, int64_t length, struct fileRefEntry **fndEntry,
2807            SMBFID *fid)
2808{
2809	struct fileRefEntry *entry = NULL;
2810	struct fileRefEntry *tempEntry = NULL;
2811	struct ByteRangeLockEntry *currBRL = NULL;
2812	int32_t foundIt = 0;
2813	struct smbnode	*np;
2814
2815#ifdef SMB_DEBUG
2816	if (fndEntry)
2817		DBG_ASSERT(*fndEntry == NULL);
2818#endif // SMB_DEBUG
2819	/* If we have no vnode then we are done. */
2820	if (!vp) {
2821		return (-1);
2822    }
2823
2824	np = VTOSMB(vp);
2825	lck_mtx_lock(&np->f_openDenyListLock);
2826	for (entry = np->f_openDenyList; entry; entry = entry->next) {
2827		/*
2828		 * Remember that p can be NULL, but in that case this is coming from the
2829		 * kernel and is not associated with a particular proc. In fact it just
2830		 * may be the pager itself trying to free up space and there is no proc.
2831		 * I need to find any proc that already has the fork open for read or
2832		 * write to use for read/write to work. So if no proc then just search
2833		 * the whole list and match on the first pid that matches the requested
2834		 * access.
2835		 */
2836		if ((p) && (entry->p_pid != proc_pid(p))) {
2837            SMBERROR("pid not matching \n");
2838			continue;
2839        }
2840
2841		switch (flags) {
2842            case kPreflightOpen:
2843                /*
2844                 * Look for denials locally first
2845                 * 1) If there is an existing exclusive open, then no other opens
2846                 * are allowed.
2847                 * 2) If there is an existing shared open (denyWrite), then no
2848                 * other opens for write are allowed
2849                 * 3) If there is an existing open with Write, then no other
2850                 * opens for denyWrite are allowed
2851                 */
2852                if ((entry->accessMode & kDenyRead) &&
2853                    (entry->accessMode & kDenyWrite)) {
2854                    foundIt = 1;
2855                }
2856
2857                if ((entry->accessMode & kDenyWrite) &&
2858                    (accessMode & kAccessWrite)) {
2859                    foundIt = 1;
2860                }
2861
2862                if ((entry->accessMode & kAccessWrite) &&
2863                    (accessMode & kDenyWrite)) {
2864                    foundIt = 1;
2865                }
2866               break;
2867            case kAnyMatch:
2868                /*
2869                 * if any fork will do, make sure at least have accessMode
2870                 * set. This is for the old ByteRangeLocks and other misc
2871                 * functions looking for a file ref
2872                 */
2873                if (entry->accessMode & accessMode) {
2874                    foundIt = 1;
2875                }
2876                break;
2877            case kCheckDenyOrLocks:
2878                /*
2879                 * This was originally written for Classic support, but after looking
2880                 *  at it some we decide it could happen in Carbon.
2881                 *
2882                 * Where I have the same PID on two different file, some BRL taken,
2883                 * and a read/write occurring. I have to determine which file will
2884                 * successfully read/write on due to any possible byte range locks
2885                 * already taken out.  Note that Classic keeps track of BRLs itself
2886                 * and will not block any read/writes that would fail due to a BRL.
2887                 * I just have to find the correct fork so that the read/write will
2888                 * succeed. Example:  open1 rw/DW, open2 r, lock1 0-5, read1 0-5
2889                 * should occur on fork1 and not fork2
2890                */
2891                /* make sure we have correct access */
2892                if (entry->accessMode & accessMode) {
2893                    /*
2894                     * save this entry in case we find no entry with a matching BRL.
2895                     * saves me from having to search all over again for an OpenDeny match
2896                     */
2897                    if (tempEntry == NULL) {
2898                        tempEntry = entry;
2899                    }
2900
2901                    /* check the BRLs to see if the offset/length fall inside one of them */
2902                    currBRL = entry->lockList;
2903                    while (currBRL != NULL) {
2904                        /* is start of read/write inside of the BRL? */
2905                        if ( (offset >= currBRL->offset) &&
2906                            (offset <= (currBRL->offset + currBRL->length)) ) {
2907                            foundIt = 1;
2908                            break;
2909                        }
2910                        /* is end of read/write inside of the BRL? */
2911                        if ( ((offset + length) >= currBRL->offset) &&
2912                            ((offset + length) <= (currBRL->offset + currBRL->length)) ) {
2913                            foundIt = 1;
2914                            break;
2915                        }
2916                        currBRL = currBRL->next;
2917                    }
2918                }
2919                break;
2920
2921            case kExactMatch:
2922            default:
2923                /*
2924                 * If we want an exact match, then check access mode too
2925                 * This is for ByteRangeLocks and closing files
2926                 */
2927                if (accessMode == entry->accessMode) {
2928                    foundIt = 1;
2929                }
2930                else {
2931                    /*
2932                     * In OS 9.x, if you opened a file for read only and it
2933                     * failed, and there was a file opened already for
2934                     * read/write, then open worked. Weird. For X, if first
2935                     * open was r/w/dR/dW, r/w/dW, r/dR/dW, or r/dW, then a
2936                     * second open from same pid asking for r/dR/dW or r/dW
2937                     * will be allowed.
2938                     *
2939                     * See Radar 5050120, 11024374 for an example of this
2940                     * happening.
2941                     *
2942                     * For kExactMatch, we know for sure either dW or dR/dW was
2943                     * done.
2944                     */
2945                    if ((accessMode & kDenyWrite) &&
2946                        (accessMode & kAccessRead) &&
2947                        !(accessMode & kAccessWrite)) {
2948                        /* We are looking for a match for r/dR/dW or r/dW */
2949                        if (entry->accessMode & kAccessRead) {
2950                            /*
2951                             * existing entry has read, so has to have been
2952                             * r/w/dR/dW, r/w/dW, r/dR/dW, or r/dW and thus
2953                             * this is a match
2954                             */
2955                            foundIt = 1;
2956                        }
2957                    }
2958                }
2959                break;
2960		}
2961
2962		if (foundIt == 1) {
2963			*fid = entry->fid;
2964			if (fndEntry) {
2965                *fndEntry = entry;
2966            }
2967			break;
2968		}
2969	}
2970	lck_mtx_unlock(&np->f_openDenyListLock);
2971
2972	/* Will only happen after we add byte range locking support */
2973	if (foundIt == 0) {
2974		if ( (flags == kCheckDenyOrLocks) && (tempEntry != NULL) ) {
2975			/*
2976             * Did not find any BRL that matched, see if there was a match
2977             * with an OpenDeny
2978             */
2979			*fid = tempEntry->fid;
2980			if (fndEntry) {
2981                *fndEntry = entry;
2982            }
2983			return (0);
2984		}
2985		return (-1);    /* fork not found */
2986	}
2987	else
2988		return (0);
2989}
2990
2991/*
2992 * RemoveFileRef
2993 *
2994 * Remove the entry that was passed in from the list and free it. If no entry is
2995 * passed in then remove all entries.
2996 *
2997 * Return Values
2998 *	none
2999 */
3000void
3001RemoveFileRef(vnode_t vp, struct fileRefEntry *inEntry)
3002{
3003	struct smbnode	*np = VTOSMB(vp);
3004	struct fileRefEntry *curr = NULL;
3005	struct fileRefEntry *prev = NULL;
3006	struct fileRefEntry *entry = NULL;
3007	struct ByteRangeLockEntry *currBRL = NULL;
3008	struct ByteRangeLockEntry *nextBRL = NULL;
3009	int32_t foundIt = 0;
3010
3011	lck_mtx_lock(&np->f_openDenyListLock);
3012	if (inEntry == NULL) {	/* Means remove all */
3013		entry = np->f_openDenyList;
3014		while (entry != NULL) {
3015			/* wipe out the ByteRangeLockEntries first */
3016			currBRL = entry->lockList;
3017			while (currBRL != NULL) {
3018				nextBRL = currBRL->next; /* save next in list */
3019				SMB_FREE (currBRL, M_TEMP);	 /* free current entry */
3020				currBRL = nextBRL;	 /* and on to the next */
3021			}
3022			entry->lockList = NULL;
3023			/* now wipe out the file refs */
3024			curr = entry;
3025			entry = entry->next;
3026			DBG_ASSERT(curr->refcnt == 0);
3027			SMB_FREE(curr, M_TEMP);
3028		}
3029		np->f_openDenyList = NULL;
3030		goto out;
3031	}
3032	DBG_ASSERT(inEntry->refcnt == 0);
3033
3034	/* wipe out the ByteRangeLockEntries first */
3035	currBRL = inEntry->lockList;
3036	while (currBRL != NULL) {
3037		nextBRL = currBRL->next;	/* save next in list */
3038		SMB_FREE(currBRL, M_TEMP);		/* free current entry */
3039		currBRL = nextBRL;		/* and on to the next */
3040	}
3041	inEntry->lockList = NULL;
3042
3043	/* Remove the fileRefEntry */
3044	curr = np->f_openDenyList;
3045	if (curr == NULL)
3046		goto out;
3047	/*
3048	 * if its the first entry in the list, then just set the first
3049	 * entry to be entry->next
3050	 */
3051	if (inEntry == curr) {
3052		np->f_openDenyList = inEntry->next;
3053		foundIt = 1;
3054		SMB_FREE(curr, M_TEMP);
3055		curr = NULL;
3056	} else {
3057		// its not the first, so search the rest
3058		prev = np->f_openDenyList;
3059		curr = prev->next;
3060		while (curr != NULL) {
3061			if (inEntry == curr) {
3062				prev->next = curr->next;
3063				foundIt = 1;
3064				SMB_FREE(curr, M_TEMP);
3065				curr = NULL;
3066				break;
3067			}
3068			prev = curr;
3069			curr = curr->next;
3070		}
3071	}
3072	if (foundIt == 0)
3073		SMBWARNING ("inEntry %p not found in vp %p\n", (void *)inEntry, (void *)vp);
3074out:
3075	lck_mtx_unlock(&np->f_openDenyListLock);
3076}
3077
3078static void
3079smb1fs_reconnect(struct smbmount *smp)
3080{
3081    struct smbnode *np;
3082    uint32_t ii;
3083
3084    /* Get the hash lock */
3085    smbfs_hash_lock(smp);
3086
3087    /* We have a hash table for each mount point */
3088    for (ii = 0; ii < (smp->sm_hashlen + 1); ii++) {
3089        if ((&smp->sm_hash[ii])->lh_first == NULL)
3090            continue;
3091
3092        for (np = (&smp->sm_hash[ii])->lh_first; np; np = np->n_hash.le_next) {
3093            if (ISSET(np->n_flag, NALLOC))
3094                continue;
3095
3096            if (ISSET(np->n_flag, NTRANSIT))
3097                continue;
3098
3099            /*
3100             * Someone is monitoring this item and we reconnected. Force a
3101             * notify update.
3102             */
3103            if (np->n_vnode && (vnode_ismonitored(np->n_vnode))) {
3104                SMBDEBUG_LOCK(np, "%s needs to be updated.\n", np->n_name);
3105
3106                /* Do we need to reopen this item */
3107                if ((np->n_dosattr & SMB_EFA_DIRECTORY) && (np->d_fid != 0)) {
3108                    np->d_needReopen = TRUE;
3109                }
3110
3111                /* Force a network lookup */
3112                np->attribute_cache_timer = 0;
3113                np->n_symlink_cache_timer = 0;
3114                np->d_needsUpdate = TRUE;
3115            }
3116
3117            /* Nothing else to do with directories at this point */
3118            if (np->n_dosattr & SMB_EFA_DIRECTORY) {
3119                continue;
3120            }
3121
3122            /* We only care about open files */
3123            if (np->f_refcnt == 0) {
3124                continue;
3125            }
3126
3127            /*
3128             * We have an open file mark it to be reopen.
3129             *
3130             * 1. Plain old POSIX open with no locks. Only revoke if reopen fails.
3131             * 2. POSIX open with a flock. Revoke if reopen fails. Otherwise
3132             *	  reestablish the lock. If the lock fails then mark it to be revoked.
3133             * 3. POSIX open with POSIX locks. (We do not support posix locks)
3134             * 4. Shared or Exclusive OpenDeny . We now revoke always.
3135             * 5. Carbon Mandatory Locks. We now revoke always.
3136             */
3137            lck_mtx_lock(&np->f_openStateLock);
3138
3139            /* Once it has been revoked it stays revoked */
3140            if (!(np->f_openState & kNeedRevoke))	{
3141                if (np->f_openDenyList) {
3142                    /*
3143                     * We always revoke opens that have mandatory
3144                     * locks or deny modes
3145                     */
3146                    np->f_openState |= kNeedRevoke;
3147                }
3148                else {
3149                    /* Reopen lazily later */
3150                    np->f_openState |= kNeedReopen;
3151                }
3152            }
3153
3154            lck_mtx_unlock(&np->f_openStateLock);
3155        }
3156    }
3157
3158    smbfs_hash_unlock(smp);
3159}
3160
3161static void
3162smb2fs_reconnect(struct smbmount *smp)
3163{
3164    struct smbnode *np;
3165    uint32_t ii;
3166    struct smbfattr *fap = NULL;
3167    struct smb_vc *vcp;
3168	struct fileRefEntry *current = NULL;
3169    int error;
3170    SMB2FID temp_fid;
3171    uint32_t need_reopen = 0, done;
3172
3173    vcp = SSTOVC(smp->sm_share);
3174
3175    SMB_MALLOC(fap,
3176               struct smbfattr *,
3177               sizeof(struct smbfattr),
3178               M_SMBTEMP,
3179               M_WAITOK | M_ZERO);
3180
3181    /* Attempt to resend AAPL create context */
3182    if ((smp->sm_rvp != NULL) &&
3183        (vcp->vc_misc_flags & SMBV_OSX_SERVER)) {
3184        SMBDEBUG("Clearing OS X server flags\n");
3185        vcp->vc_misc_flags &= ~(SMBV_OSX_SERVER | SMBV_OTHER_SERVER);
3186
3187        if (smp->sm_args.altflags & SMBFS_MNT_AAPL_OFF) {
3188            /* Turn off AAPL */
3189            SMBWARNING("AAPL has been turned off for %s volume\n",
3190                       (smp->sm_args.volume_name) ? smp->sm_args.volume_name : "");
3191            vcp->vc_misc_flags |= SMBV_OTHER_SERVER;
3192        }
3193        else {
3194            /*
3195             * Use iod_context so we can tell this is from reconnect
3196             * Share was locked from smb_iod_reconnect, so have to unlock it
3197             * otherwise we can deadlock in iod code when the share lock is
3198             * attempted to be locked again.
3199             */
3200            lck_mtx_unlock(&smp->sm_share->ss_shlock);
3201
3202            /* Send a Create/Close */
3203            smb2fs_smb_cmpd_create(smp->sm_share, VTOSMB(smp->sm_rvp),
3204                                   NULL, 0,
3205                                   NULL, 0,
3206                                   SMB2_FILE_READ_ATTRIBUTES | SMB2_SYNCHRONIZE, VDIR,
3207                                   NTCREATEX_SHARE_ACCESS_ALL, FILE_OPEN,
3208                                   SMB2_CREATE_AAPL_QUERY, NULL,
3209                                   NULL, fap,
3210                                   NULL, vcp->vc_iod->iod_context);
3211
3212            lck_mtx_lock(&smp->sm_share->ss_shlock);
3213        }
3214    }
3215
3216    /*
3217     * <13934847> We can not hold the hash lock while we reopen files as
3218     * we end up dead locked. First go through the entire list with the
3219     * hash lock and just mark the vnodes that need to be reopened with the
3220     * kNeedReopen flag.
3221     */
3222
3223    /* Get the hash lock */
3224    smbfs_hash_lock(smp);
3225
3226    /* We have a hash table for each mount point */
3227    for (ii = 0; ii < (smp->sm_hashlen + 1); ii++) {
3228        if ((&smp->sm_hash[ii])->lh_first == NULL)
3229            continue;
3230
3231        for (np = (&smp->sm_hash[ii])->lh_first; np; np = np->n_hash.le_next) {
3232            if (ISSET(np->n_flag, NALLOC))
3233                continue;
3234
3235            if (ISSET(np->n_flag, NTRANSIT))
3236                continue;
3237
3238            /*
3239             * Someone is monitoring this item and we reconnected. Force a
3240             * notify update.
3241             */
3242            if (np->n_vnode && (vnode_ismonitored(np->n_vnode))) {
3243                SMBDEBUG_LOCK(np, "%s needs to be updated.\n", np->n_name);
3244
3245                /* Do we need to reopen this item */
3246                if ((np->n_dosattr & SMB_EFA_DIRECTORY) && (np->d_fid != 0)) {
3247                    np->d_needReopen = TRUE;
3248
3249                    /* Remove the open fid from the fid table */
3250                    smb_fid_get_kernel_fid(smp->sm_share, np->d_fid,
3251                                           1, &temp_fid);
3252                }
3253
3254                /* Force a network lookup */
3255                np->attribute_cache_timer = 0;
3256                np->n_symlink_cache_timer = 0;
3257                np->d_needsUpdate = TRUE;
3258            }
3259
3260            if (np->n_dosattr & SMB_EFA_DIRECTORY) {
3261                if (np->d_fctx != NULL) {
3262                    /* Enumeration open dir is now closed, lazily reopen it */
3263                    np->d_fctx->f_need_close = FALSE;
3264
3265                    /* Remove the open fid from the fid table */
3266                    smb_fid_get_kernel_fid(smp->sm_share,
3267                                           np->d_fctx->f_create_fid,
3268                                           1, &temp_fid);
3269                }
3270
3271                /* Nothing else to do with directories at this point */
3272                continue;
3273            }
3274
3275            /*
3276             * Only files from here on
3277             */
3278
3279            if (np->f_refcnt == 0) {
3280                /* No open files, so done with this file */
3281                continue;
3282            }
3283
3284            /* Once it has been revoked it stays revoked */
3285            lck_mtx_lock(&np->f_openStateLock);
3286            if (np->f_openState & kNeedRevoke)	{
3287                lck_mtx_unlock(&np->f_openStateLock);
3288                continue;
3289            }
3290            else {
3291                /* Will try to reopen the files */
3292                np->f_openState |= kNeedReopen;
3293
3294                /* Mark that at least one file needs to be reopened */
3295                need_reopen = 1;
3296            }
3297            lck_mtx_unlock(&np->f_openStateLock);
3298        } /* for np loop */
3299    } /* for ii loop */
3300
3301    /* Free the hash lock */
3302    smbfs_hash_unlock(smp);
3303
3304    if (need_reopen == 0) {
3305        /* No files need to be reopened, so leave */
3306        goto exit;
3307    }
3308
3309    /*
3310     * <13934847> We can not hold the hash lock while we reopen files as
3311     * we end up dead locked. Now go through the list again holding the hash
3312     * lock and if a vnode needs to be reopened, drop the hash lock, clear the
3313     * kNeedReopen, attempt to reopen the vnode, then start at begining of
3314     * loop again until there are no more vnodes that need to be reopened.
3315     */
3316    done = 0;
3317
3318    while (done == 0) {
3319        /* Assume there are no files to be reopened */
3320        done = 1;
3321
3322        /* Get the hash lock */
3323        smbfs_hash_lock(smp);
3324
3325        /* We have a hash table for each mount point */
3326        for (ii = 0; ii < (smp->sm_hashlen + 1); ii++) {
3327            if ((&smp->sm_hash[ii])->lh_first == NULL)
3328                continue;
3329
3330            for (np = (&smp->sm_hash[ii])->lh_first; np; np = np->n_hash.le_next) {
3331                if (ISSET(np->n_flag, NALLOC))
3332                    continue;
3333
3334                if (ISSET(np->n_flag, NTRANSIT))
3335                    continue;
3336
3337                if (np->n_dosattr & SMB_EFA_DIRECTORY) {
3338                    continue;
3339                }
3340
3341                /* Once it has been revoked it stays revoked */
3342                lck_mtx_lock(&np->f_openStateLock);
3343                if (np->f_openState & kNeedReopen) {
3344                    /*
3345                     * Need to reopen this file. Clear kNeedReopen state, this
3346                     * way we know if a reconnect happened during reopen.  Set
3347                     * kInReopen so smbfs_attr_cacheenter() will not be called.
3348                     */
3349                    np->f_openState &= ~kNeedReopen;
3350                    np->f_openState |= kInReopen;
3351                    lck_mtx_unlock(&np->f_openStateLock);
3352                }
3353                else {
3354                    /* This file does not need to be reopened */
3355                    lck_mtx_unlock(&np->f_openStateLock);
3356                    continue;
3357                }
3358
3359                /*
3360                 * Free the hash lock - this is why we have to redo entire
3361                 * while loop as the hash table may now change.
3362                 */
3363                done = 0;
3364                smbfs_hash_unlock(smp);
3365
3366                /*
3367                 * For all network calls, use iod_context so we can tell this is
3368                 * from reconnect and thus it wont get blocked waiting for credits.
3369                 *
3370                 * Share was locked from smb_iod_reconnect, so have to
3371                 * unlock it otherwise we can deadlock in iod code when
3372                 * the share lock is attempted to be locked again.
3373                 */
3374
3375                /*
3376                 * Always check f_openDenyList too in case we need to remove all
3377                 * those fids too.
3378                 */
3379
3380                error = 0;
3381
3382                /*
3383                 * Reopen any fids on the f_openDenyList
3384                 *
3385                 * We should lock f_openDenyListLock but can not because we will
3386                 * deadlock
3387                 */
3388                if (np->f_openDenyList) {
3389                    current = np->f_openDenyList;
3390
3391                    do {
3392                        /*
3393                         * Any previous error will cause us to skip attempting to
3394                         * reopen rest of the fids and just close the fids instead.
3395                         */
3396                        if (error == 0) {
3397                            if (current->dur_handle.flags & SMB2_DURABLE_HANDLE_GRANTED) {
3398                                current->dur_handle.flags |= SMB2_DURABLE_HANDLE_RECONNECT;
3399                                current->dur_handle.flags &= ~(SMB2_DURABLE_HANDLE_GRANTED |
3400                                                               SMB2_LEASE_GRANTED);
3401                                current->dur_handle.fid = current->fid;
3402
3403                                lck_mtx_unlock(&smp->sm_share->ss_shlock);
3404                                error = smbfs_smb_ntcreatex(smp->sm_share, np,
3405                                                            0, 0, VREG,
3406                                                            &current->fid, NULL, 0,
3407                                                            0, FALSE, fap,
3408                                                            FALSE, &current->dur_handle, vcp->vc_iod->iod_context);
3409                                lck_mtx_lock(&smp->sm_share->ss_shlock);
3410                                if (error) {
3411                                    SMBERROR_LOCK(np, "Warning: Could not reopen %s \n", np->n_name);
3412                                }
3413                            }
3414                            else {
3415                                /*
3416                                 * Failed to get a durable handle when this file
3417                                 * was opened, so can not reopen this file
3418                                 */
3419                                SMBERROR_LOCK(np, "Missing durable handle %s \n", np->n_name);
3420                                error = EBADF;
3421                            }
3422                        }
3423
3424                        if (error) {
3425                            /* Remove the open fid from the fid table */
3426                            smb_fid_get_kernel_fid(smp->sm_share, current->fid,
3427                                                   1, &temp_fid);
3428                        }
3429
3430                        /* On to next fid */
3431                        current = current->next;
3432                    } while (current != NULL);
3433
3434                    lck_mtx_lock(&np->f_openStateLock);
3435
3436                    if (error) {
3437                        /* Mark the file as revoked */
3438                        np->f_openState |= kNeedRevoke;
3439                    } else if (np->f_fid == 0) {
3440                        /* No shared forks to open, we can clear kInReopen now */
3441                        np->f_openState &= ~kInReopen;
3442                    }
3443
3444                    lck_mtx_unlock(&np->f_openStateLock);
3445                }
3446
3447                /*
3448                 * Reopen shared fork if one is present. Do this AFTER doing the
3449                 * f_openDenyList so we dont break any Handle leases
3450                 */
3451                if (np->f_fid != 0) {
3452                    /* Only reopen if no error from open deny list opens */
3453                    if (error == 0) {
3454                        lck_mtx_unlock(&smp->sm_share->ss_shlock);
3455                        error = smbfs_smb_reopen_file(smp->sm_share, np,
3456                                                      vcp->vc_iod->iod_context);
3457                        /*
3458                         * smbfs_smb_reopen_file() sets the correct f_openState
3459                         * for us
3460                         */
3461                        lck_mtx_lock(&smp->sm_share->ss_shlock);
3462                    }
3463
3464                    if (error) {
3465                        /*
3466                         * On failure, file is marked for revoke so we are done
3467                         * Remove the open fid from the fid table
3468                         */
3469                        smb_fid_get_kernel_fid(smp->sm_share, np->f_fid,
3470                                               1, &temp_fid);
3471                    }
3472                }
3473
3474                /*
3475                 * Paranoid check - its possible that we get reconnected while
3476                 * we are trying to reopen and that would reset the kInReopen
3477                 * which could keep us looping forever. For now, we will only
3478                 * try once to reopen a file and thats it. May have to rethink
3479                 * this if it becomes a problem.
3480                 */
3481                lck_mtx_lock(&np->f_openStateLock);
3482
3483                if (np->f_openState & kNeedReopen) {
3484                    SMBERROR_LOCK(np, "Only one attempt to reopen %s \n", np->n_name);
3485                    np->f_openState &= ~kNeedReopen;
3486
3487                    /* Mark the file as revoked */
3488                    np->f_openState |= kNeedRevoke;
3489                }
3490
3491                lck_mtx_unlock(&np->f_openStateLock);
3492
3493                /*
3494                 * Since we dropped the hash lock, have to start the while
3495                 * loop again to search entire hash table from beginning.
3496                 */
3497                goto loop_again; /* skip out of np and ii loops */
3498
3499            } /* for np loop */
3500        } /* for ii loop */
3501
3502loop_again:
3503
3504        if (done == 1) {
3505            /* if we get here, then must not have found any files to reopen */
3506            smbfs_hash_unlock(smp);
3507        }
3508    }
3509
3510exit:
3511    if (fap) {
3512        SMB_FREE(fap, M_SMBTEMP);
3513    }
3514}
3515
3516/*
3517 * The share needs to be locked before calling this routine!
3518 *
3519 * Search the hash table looking for any open files. Remember we have a hash
3520 * table for every mount point. Not sure why but it makes this part easier.
3521 * Currently we do not support reopens, we just mark the file to be revoked.
3522 */
3523void
3524smbfs_reconnect(struct smbmount *smp)
3525{
3526   	struct smb_vc *vcp;
3527
3528	KASSERT(smb != NULL, ("smp is null"));
3529
3530    vcp = SSTOVC(smp->sm_share);
3531	KASSERT(vcp != NULL, ("vcp is null"));
3532
3533    if (vcp->vc_flags & SMBV_SMB2) {
3534        smb2fs_reconnect(smp);
3535    }
3536    else {
3537        smb1fs_reconnect(smp);
3538    }
3539}
3540
3541/*
3542 * The share needs to be locked before calling this routine!
3543 *
3544 * Search the hash table looking for any open for write files or any files that
3545 * have dirty bits in UBC. If any are found, return EBUSY, else return 0.
3546 */
3547int32_t
3548smbfs_IObusy(struct smbmount *smp)
3549{
3550	struct smbnode *np;
3551	uint32_t ii;
3552
3553    /* lock hash table before we walk it */
3554	smbfs_hash_lock(smp);
3555
3556	/* We have a hash table for each mount point */
3557	for (ii = 0; ii < (smp->sm_hashlen + 1); ii++) {
3558		if ((&smp->sm_hash[ii])->lh_first == NULL)
3559			continue;
3560
3561		for (np = (&smp->sm_hash[ii])->lh_first; np; np = np->n_hash.le_next) {
3562			if (ISSET(np->n_flag, NALLOC))
3563				continue;
3564
3565			if (ISSET(np->n_flag, NTRANSIT))
3566				continue;
3567
3568			/* Nothing else to with directories at this point */
3569			if (np->n_dosattr & SMB_EFA_DIRECTORY) {
3570				continue;
3571			}
3572			/* We only care about open files */
3573			if (np->f_refcnt == 0) {
3574				continue;
3575			}
3576
3577			if ((np->f_openTotalWCnt > 0) || (vnode_hasdirtyblks(SMBTOV(np)))) {
3578                /* Found oen busy file so return EBUSY */
3579                smbfs_hash_unlock(smp);
3580				return EBUSY;
3581			}
3582		}
3583	}
3584
3585	smbfs_hash_unlock(smp);
3586
3587    /* No files open for write and no files with dirty UBC data */
3588    return 0;
3589}
3590
3591void
3592smbfs_ClearChildren(struct smbmount *smp, struct smbnode *parent)
3593{
3594    struct smbnode *np;
3595    uint32_t ii;
3596
3597    /* lock hash table before we walk it */
3598    smbfs_hash_lock(smp);
3599
3600    /* We have a hash table for each mount point */
3601    for (ii = 0; ii < (smp->sm_hashlen + 1); ii++) {
3602        if ((&smp->sm_hash[ii])->lh_first == NULL)
3603            continue;
3604
3605        for (np = (&smp->sm_hash[ii])->lh_first; np; np = np->n_hash.le_next) {
3606            lck_rw_lock_exclusive(&np->n_parent_rwlock);
3607
3608            if (np->n_parent == parent) {
3609                if (ISSET(np->n_flag, NALLOC)) {
3610                    /*
3611                     * Now if (np->n_parent == parent) : OOPS
3612                     *
3613                     * Parent is in reclaim and child in alloc.
3614                     * Most likely, it is the case of force unmount but we
3615                     * should have never come here i.e. SMB should never create
3616                     * a new child smbnode when its parent is in reclaim. In
3617                     * fact, this can be verified by the fact that every
3618                     * function (vfs ops) calling smbfs_nget() and
3619                     * smbfs_vgetstrm() takes an exclusive lock on the parent.
3620                     * So while in NALLOC, parent can't proceed in
3621                     * smbfs_vnop_reclaim() since he would wait on this lock at
3622                     * the very beginning. Looking at the code, it makes no
3623                     * sense that we could ever hit this situation.
3624                     * Fixed in <rdar://problem/12442700>.
3625                     */
3626                 	SMBERROR("%s : Allocating child smbnode when parent \
3627                             is in reclaim\n", __FUNCTION__);
3628                }
3629
3630                if (ISSET(np->n_flag, NTRANSIT)) {
3631                    /*
3632                     * Now if (np->n_parent == parent) : OOPS
3633                     *
3634                     * Parent is in reclaim and child in reclaim too.
3635                     * Most likely, tt is the case of force unmount but we
3636                     * should have never come here i.e. SMB should never
3637                     * reclaim a child smbnode when the parent is still in
3638                     * reclaim. Looking at the code in smbfs_vnop_reclaim(),
3639                     * parent can't acquire sm_reclaim_lock and call
3640                     * smbfs_ClearChildren() if the child is already in
3641                     * NTRANSIT since the child has sm_reclaim_lock lock.
3642                     * So even in case of force unmount, EITHER parent can be
3643                     * here and child is yet to enter NTRANSIT OR the child has
3644                     * this lock and is in NTRANSIT.
3645                     * Fixed in <rdar://problem/12442700>.
3646                     */
3647                	SMBERROR("%s : Child smbnode is in reclaim when parent \
3648                             is still in reclaim\n", __FUNCTION__);
3649                }
3650
3651                /* Clear the parent reference for this child */
3652                np->n_flag &= ~NREFPARENT;
3653                np->n_parent = NULL;
3654            }
3655
3656            lck_rw_unlock_exclusive(&np->n_parent_rwlock);
3657         }
3658    }
3659
3660    smbfs_hash_unlock(smp);
3661}
3662
3663int
3664smbfs_handle_lease_break(struct smbmount *smp, uint64_t lease_key_hi,
3665                         uint64_t lease_key_low, uint32_t new_lease_state)
3666{
3667    int error = 0;
3668    uint32_t tree_id = 0;
3669    uint64_t hash_val = 0;
3670	vnode_t	vp = NULL;
3671	struct fileRefEntry *entry;
3672	struct smbnode_hashhead	*nhpp;
3673	struct smbnode *np;
3674	uint32_t vid;
3675
3676    /* Get hash value from lease key */
3677    smb2_smb_dur_handle_parse_lease_key(lease_key_hi, lease_key_low,
3678                                        &tree_id, &hash_val);
3679
3680    /*
3681     * Server must support File IDs as we have no name/name_len to use.
3682     * Find vnode using hash value, but SKIP locking it!
3683     * Deadlock happens when you have file A open with durable handle, then
3684     * another process opens file A after taking a node lock. The open request
3685     * goes to server which generates a lease break on file A. If you try to
3686     * take a node lock in processing the lease break, you end up deadlocked.
3687     */
3688loop:
3689	smbfs_hash_lock(smp);
3690
3691	nhpp = SMBFS_NOHASH(smp, hash_val);
3692	LIST_FOREACH(np, nhpp, n_hash) {
3693        if (np->n_ino != hash_val) {
3694            continue;
3695        }
3696
3697		if (ISSET(np->n_flag, NALLOC)) {
3698			SET(np->n_flag, NWALLOC);
3699			(void) msleep((caddr_t)np, smp->sm_hashlock, PINOD | PDROP, "smb_ngetalloc", 0);
3700			goto loop;
3701		}
3702
3703		if (ISSET(np->n_flag, NTRANSIT)) {
3704			SET(np->n_flag, NWTRANSIT);
3705			(void) msleep((caddr_t)np, smp->sm_hashlock, PINOD | PDROP, "smb_ngettransit", 0);
3706			goto loop;
3707		}
3708
3709        /*
3710         * Found a match, get the vnode
3711         */
3712		vp = SMBTOV(np);
3713		vid = vnode_vid(vp);
3714
3715		if (vnode_getwithvid(vp, vid)) {
3716            /* Failed to get vnode */
3717			continue;
3718        }
3719
3720        /* See if this vnode has the file ref entry that matches lease key */
3721        if (FindFileEntryByLeaseKey(vp, lease_key_hi, lease_key_low, &entry) == TRUE) {
3722            /*
3723             * At this time, we do nothing with the lease, its just used for
3724             * getting durable handles. Later when we actually use the leases for
3725             * local caching, the lease break handling code should be moved to the
3726             * change notify thread instead of using the iod thread.
3727             */
3728            entry->dur_handle.lease_state = new_lease_state;
3729            error = 0;
3730            vnode_put(vp);
3731            break;
3732        }
3733        else {
3734            SMBERROR("No fileRefEntry found for lease break \n");
3735            vnode_put(vp);
3736            continue;
3737        }
3738	}
3739
3740	smbfs_hash_unlock(smp);
3741
3742    return (error);
3743}
3744
3745