1/*
2 * Copyright (c) 2006 - 2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24#include <sys/param.h>
25#include <sys/kauth.h>
26#include <libkern/OSAtomic.h>
27
28#include <sys/smb_apple.h>
29
30#include <netsmb/smb.h>
31#include <netsmb/smb_2.h>
32#include <netsmb/smb_rq.h>
33#include <netsmb/smb_rq_2.h>
34#include <netsmb/smb_conn.h>
35#include <netsmb/smb_conn_2.h>
36#include <smbfs/smbfs.h>
37#include <smbfs/smbfs_node.h>
38#include <smbfs/smbfs_subr.h>
39#include <smbfs/smbfs_subr_2.h>
40#include "smbfs_notify_change.h"
41
42extern lck_attr_t *smbfs_lock_attr;
43extern lck_grp_t *smbfs_mutex_group;
44
45#define NOTIFY_CHANGE_SLEEP_TIMO	15
46#define NOTIFY_THROTTLE_SLEEP_TIMO	5
47#define SMBFS_MAX_RCVD_NOTIFY		4
48#define SMBFS_MAX_RCVD_NOTIFY_TIME	1
49
50
51/* For now just notify on these item, may want to watch on more in the future */
52#define SMBFS_NOTIFY_CHANGE_FILTERS	FILE_NOTIFY_CHANGE_FILE_NAME | \
53									FILE_NOTIFY_CHANGE_DIR_NAME | \
54									FILE_NOTIFY_CHANGE_ATTRIBUTES | \
55									FILE_NOTIFY_CHANGE_CREATION | \
56									FILE_NOTIFY_CHANGE_SECURITY | \
57									FILE_NOTIFY_CHANGE_STREAM_SIZE | \
58									FILE_NOTIFY_CHANGE_STREAM_WRITE
59
60/* For server message notify, we set everything, which is one of the
61 * ways the server can tell it's a server message notify, and not
62 * a normal notify change type.
63 */
64#define SMBFS_SVRMSG_NOTIFY_FILTERS	FILE_NOTIFY_CHANGE_FILE_NAME | \
65                                    FILE_NOTIFY_CHANGE_DIR_NAME | \
66                                    FILE_NOTIFY_CHANGE_ATTRIBUTES | \
67                                    FILE_NOTIFY_CHANGE_SIZE	| \
68                                    FILE_NOTIFY_CHANGE_LAST_WRITE | \
69                                    FILE_NOTIFY_CHANGE_LAST_ACCESS |\
70                                    FILE_NOTIFY_CHANGE_CREATION | \
71                                    FILE_NOTIFY_CHANGE_EA | \
72                                    FILE_NOTIFY_CHANGE_SECURITY | \
73                                    FILE_NOTIFY_CHANGE_STREAM_NAME | \
74                                    FILE_NOTIFY_CHANGE_STREAM_SIZE | \
75                                    FILE_NOTIFY_CHANGE_STREAM_WRITE
76
77/*
78 * notify_wakeup
79 *
80 * Wake up the thread and tell it there is work to be done.
81 *
82 */
83static void
84notify_wakeup(struct smbfs_notify_change * notify)
85{
86	notify->haveMoreWork = TRUE;		/* we have work to do */
87	wakeup(&(notify)->notify_state);
88}
89
90/*
91 * notify_callback_completion
92 */
93static void
94notify_callback_completion(void *call_back_args)
95{
96	struct watch_item *watchItem = (struct watch_item *)call_back_args;
97
98	lck_mtx_lock(&watchItem->watch_statelock);
99	if ((watchItem->state != kCancelNotify) &&
100		(watchItem->state != kWaitingForRemoval)) {
101		watchItem->state = kReceivedNotify;
102	}
103	lck_mtx_unlock(&watchItem->watch_statelock);
104	notify_wakeup(watchItem->notify);
105}
106
107/*
108 * reset_notify_change
109 *
110 * Remove  the request from the network queue. Now cleanup and remove any
111 * allocated memory.
112 */
113static void
114reset_notify_change(struct watch_item *watchItem, int RemoveRQ)
115{
116	struct smb_ntrq *ntp = watchItem->ntp;
117	struct smb_rq *	rqp = (watchItem->ntp) ? watchItem->ntp->nt_rq : NULL;
118
119    if (watchItem->flags & SMBV_SMB2) {
120        /* Using SMB 2/3 */
121        rqp = watchItem->rqp;
122    }
123
124    if (rqp) {
125		if (RemoveRQ) {
126            /* Needs to be removed from the queue */
127            smb_iod_removerq(rqp);
128            if (ntp) {
129                watchItem->ntp->nt_rq = NULL;
130            }
131        }
132		smb_rq_done(rqp);
133	}
134	if (ntp)
135		smb_nt_done(ntp);
136
137    watchItem->ntp = NULL;
138
139    if (watchItem->flags & SMBV_SMB2) {
140        watchItem->rqp = NULL;
141    }
142}
143
144/*
145 * smbfs_notified_vnode
146 *
147 * See if we can update the node and notify the monitor.
148 */
149static void
150smbfs_notified_vnode(struct smbnode *np, int throttleBack, uint32_t events,
151					 vfs_context_t context)
152{
153	struct smb_share *share = NULL;
154	struct vnode_attr vattr;
155	vnode_t		vp;
156
157	if ((np->d_fid == 0) || (smbnode_lock(np, SMBFS_SHARED_LOCK) != 0)) {
158		return; /* Nothing to do here */
159    }
160
161    SMB_LOG_KTRACE(SMB_DBG_SMBFS_NOTIFY | DBG_FUNC_START,
162                   throttleBack, events, np->d_fid, 0, 0);
163
164    if (!throttleBack) {
165        /*
166         * Always reset the cache timer and force a lookup except for ETIMEDOUT
167         * where we want to return cached meta data if possible. When we stop
168         * throttling, we will do an update at that time.
169         */
170        np->attribute_cache_timer = 0;
171        np->n_symlink_cache_timer = 0;
172    }
173
174	/*
175	 * The fid changed while we were blocked just unlock and get out. If we are
176	 * throttling back then skip this notify.
177	 */
178	if ((np->d_fid == 0) || throttleBack) {
179		goto done;
180    }
181
182	np->n_lastvop = smbfs_notified_vnode;
183	vp = np->n_vnode;
184
185    /* If they have a nofication with a smbnode, then we must have a vnode */
186    if (vnode_get(vp)) {
187        /* The vnode could be going away, skip out nothing to do here */
188		goto done;
189    }
190    /* Should never happen but lets test and make sure */
191     if (VTOSMB(vp) != np) {
192         SMBWARNING_LOCK(np, "%s vnode_fsnode(vp) and np don't match!\n", np->n_name);
193         vnode_put(vp);
194         goto done;
195    }
196
197	share = smb_get_share_with_reference(VTOSMBFS(vp));
198	vfs_get_notify_attributes(&vattr);
199	smbfs_attr_cachelookup(share, vp, &vattr, context, TRUE);
200	smb_share_rele(share, context);
201
202	vnode_notify(vp, events, &vattr);
203	vnode_put(vp);
204	events = 0;
205
206done:
207	if (events == 0)	/* We already process the event */
208		np->d_needsUpdate = FALSE;
209	else		/* Still need to process the event */
210		np->d_needsUpdate = TRUE;
211	smbnode_unlock(np);
212
213    SMB_LOG_KTRACE(SMB_DBG_SMBFS_NOTIFY | DBG_FUNC_END, 0, 0, 0, 0, 0);
214}
215
216/*
217 * process_notify_change
218 *
219 */
220static uint32_t
221process_notify_change(struct smb_ntrq *ntp)
222{
223	uint32_t events = 0;
224	struct mdchain *mdp;
225	uint32_t nextoffset = 0, action;
226	int error = 0;
227	size_t rparam_len = 0;
228
229	mdp = &ntp->nt_rdata;
230	if (mdp->md_top) {
231#ifdef SMB_DEBUG
232		size_t rdata_len = m_fixhdr(mdp->md_top);
233		SMBDEBUG("rdata_len = %d \n", (int)rdata_len);
234#else // SMB_DEBUG
235		m_fixhdr(mdp->md_top);
236#endif // SMB_DEBUG
237		md_initm(mdp, mdp->md_top);
238	}
239	mdp = &ntp->nt_rparam;
240	if (mdp->md_top) {
241		rparam_len = m_fixhdr(mdp->md_top);
242		md_initm(mdp, mdp->md_top);
243		SMBDEBUG("rrparam_len = %d\n", (int)rparam_len);
244	}
245	/*
246	 * Remeber the md_get_ routines protect us from buffer overruns. Note that
247	 * the server doesn't have to return any data, so no next offset field is
248	 * not an error.
249	 */
250	if (rparam_len && (md_get_uint32le(mdp, &nextoffset) == 0))
251		do {
252			/* since we already moved pass next offset don't count it */
253			if (nextoffset >= sizeof(uint32_t))
254				nextoffset -= (uint32_t)sizeof(uint32_t);
255
256			error = md_get_uint32le(mdp, &action);
257			if (error)
258				break;
259
260			/* since we already moved pass action don't count it */
261			if (nextoffset >= sizeof(uint32_t))
262				nextoffset -= (uint32_t)sizeof(uint32_t);
263
264			if (nextoffset) {
265				error = md_get_mem(mdp, NULL, nextoffset, MB_MSYSTEM);
266				if (!error)
267					error = md_get_uint32le(mdp, &nextoffset);
268				if (error)
269					break;
270			}
271
272			SMBDEBUG("action = 0x%x \n", action);
273			switch (action) {
274				case FILE_ACTION_ADDED:
275					events |= VNODE_EVENT_FILE_CREATED | VNODE_EVENT_DIR_CREATED;
276					break;
277				case FILE_ACTION_REMOVED:
278					events |= VNODE_EVENT_FILE_REMOVED | VNODE_EVENT_DIR_REMOVED;
279					break;
280				case FILE_ACTION_MODIFIED:
281					events |= VNODE_EVENT_ATTRIB;
282					break;
283				case FILE_ACTION_RENAMED_OLD_NAME:
284				case FILE_ACTION_RENAMED_NEW_NAME:
285					events |= VNODE_EVENT_RENAME;
286					break;
287				case FILE_ACTION_ADDED_STREAM:
288				case FILE_ACTION_REMOVED_STREAM:
289				case FILE_ACTION_MODIFIED_STREAM:
290					/* Should we try to clear all named stream cache? */
291					events |= VNODE_EVENT_ATTRIB;
292					break;
293				default:
294					error = ENOTSUP;
295					break;
296			}
297		} while (nextoffset);
298
299	if (error || (events == 0))
300		events = VNODE_EVENT_ATTRIB | VNODE_EVENT_WRITE;
301	if (error) {
302		SMBWARNING("error = %d\n", error);
303	}
304	return events;
305}
306
307/*
308 * Proces a change notify message from the server
309 */
310static int
311rcvd_notify_change(struct watch_item *watchItem, vfs_context_t context)
312{
313	struct smbnode *np = watchItem->np;
314	struct smb_ntrq *ntp = watchItem->ntp;
315	struct smb_rq *	rqp = (watchItem->ntp) ? watchItem->ntp->nt_rq : NULL;
316	int error = 0;
317	uint32_t events = VNODE_EVENT_ATTRIB | VNODE_EVENT_WRITE;
318
319    if (watchItem->flags & SMBV_SMB2) {
320        /* Using SMB 2/3 */
321        rqp = watchItem->rqp;
322
323        if (rqp) {
324            error = smb2_smb_parse_change_notify(rqp, &events);
325        }
326    }
327    else {
328        if (rqp) {
329            /*
330             * NOTE: smb_nt_reply calls smb_rq_reply which will remove the rqp from
331             * the main threads queue. So when we are done here call reset_notify_change
332             * but tell it not to remove the request from the queue.
333             */
334            error = smb_nt_reply(ntp);
335            if (!error)
336                events = process_notify_change(ntp);
337        }
338    }
339
340    if (error == ECANCELED) {
341        /*
342         * Either we close the file descriptor or we canceled the
343         * operation. Nothing else to do here just get out.
344         */
345        SMBDEBUG_LOCK(np, "Notification for %s was canceled.\n", np->n_name);
346        goto done;
347    }
348
349    if (error != ETIMEDOUT) {
350        /*
351         * Always reset the cache timer and force a lookup except for ETIMEDOUT
352         * where we want to return cached meta data if possible
353         */
354        np->attribute_cache_timer = 0;
355        np->n_symlink_cache_timer = 0;
356    }
357
358	if (error == ENOTSUP) {
359		/* This server doesn't support notifications */
360		SMBWARNING("Server doesn't support notifications, polling\n");
361		return error;
362
363	} else if ((error == ETIMEDOUT) || (error == ENOTCONN)) {
364		SMBDEBUG_LOCK(np, "Processing notify for %s error = %d\n", np->n_name, error);
365		watchItem->throttleBack = TRUE;
366	} else if (error)  {
367		SMBWARNING_LOCK(np, "We got an unexpected error: %d for %s\n", error, np->n_name);
368		watchItem->throttleBack = TRUE;
369	} else {
370		struct timespec ts;
371
372		nanouptime(&ts);
373		if (timespeccmp(&ts, &watchItem->last_notify_time, >)) {
374			watchItem->rcvd_notify_count = 0;
375			ts.tv_sec += SMBFS_MAX_RCVD_NOTIFY_TIME;
376			watchItem->last_notify_time = ts;
377		} else {
378			watchItem->rcvd_notify_count++;
379			if (watchItem->rcvd_notify_count > SMBFS_MAX_RCVD_NOTIFY)
380				watchItem->throttleBack = TRUE;
381		}
382	}
383
384	/* Notify them that something changed */
385	smbfs_notified_vnode(np, watchItem->throttleBack, events, context);
386
387done:
388	reset_notify_change(watchItem, FALSE);
389	return 0;
390}
391
392/*
393 * Process a svrmsg notify message from the server
394 */
395static int
396rcvd_svrmsg_notify(struct smbmount	*smp, struct watch_item *watchItem)
397{
398	struct smb_rq *	rqp;
399    uint32_t action, delay;
400	int error = 0;
401
402    /* svrmsg notify always uses SMB 2/3 */
403    rqp = watchItem->rqp;
404
405    if (rqp == NULL) {
406        /* Not good, log an error and punt */
407        SMBDEBUG("Received svrmsg, but no rqp\n");
408        error = EINVAL;
409        goto done;
410    }
411
412    error = smb2_smb_parse_svrmsg_notify(rqp, &action, &delay);
413
414    if (error) {
415        SMBDEBUG("parse svrmsg error: %d\n", error);
416        goto done;
417    }
418
419    /* Here is where we make the call to the Kernel Event Agent and
420     * let it know what's going on with the server.
421     *
422     * Note: SVRMSG_GOING_DOWN and SVRMSG_SHUTDOWN_CANCELLED are mutually exclusive.
423     *       Only one can be set at any given time.
424     */
425    lck_mtx_lock(&smp->sm_svrmsg_lock);
426    if (action == SVRMSG_SHUTDOWN_START) {
427        /* Clear any pending SVRMSG_RCVD_SHUTDOWN_CANCEL status */
428        smp->sm_svrmsg_pending &= SVRMSG_RCVD_SHUTDOWN_CANCEL;
429
430        /* Set SVRMSG_RCVD_GOING_DOWN & delay */
431        smp->sm_svrmsg_pending |= SVRMSG_RCVD_GOING_DOWN;
432        smp->sm_svrmsg_shutdown_delay = delay;
433
434    } else if (action == SVRMSG_SHUTDOWN_CANCELLED) {
435        /* Clear any pending SVRMSG_RCVD_GOING_DOWN status */
436        smp->sm_svrmsg_pending &= ~SVRMSG_RCVD_GOING_DOWN;
437
438        /* Set SVRMSG_RCVD_SHUTDOWN_CANCEL */
439        smp->sm_svrmsg_pending |= SVRMSG_RCVD_SHUTDOWN_CANCEL;
440    }
441    lck_mtx_unlock(&smp->sm_svrmsg_lock);
442    vfs_event_signal(NULL, VQ_SERVEREVENT, 0);
443
444done:
445	reset_notify_change(watchItem, FALSE);
446	return error;
447}
448
449/*
450 * Send a change notify message to the server
451 */
452static int
453send_notify_change(struct watch_item *watchItem, vfs_context_t context)
454{
455	struct smbnode *np = watchItem->np;
456	struct smb_share *share;
457	struct smb_ntrq *ntp;
458	struct mbchain *mbp;
459	int error;
460	uint32_t CompletionFilters;
461    uint16_t smb1_fid;
462
463
464	share = smb_get_share_with_reference(np->n_mount);
465	if (share->ss_flags & SMBS_RECONNECTING) {
466		/* While we are in reconnect stop sending */
467		error = EAGAIN;
468		goto done;
469	}
470
471	/* Need to wait for it to be reopened */
472	if (np->d_needReopen) {
473		error = EBADF;
474		goto done;
475	}
476
477	/* Someone close don't send any more notifies  */
478	if (np->d_fid == 0) {
479		error = EBADF;
480		goto done;
481	}
482
483	if (watchItem->throttleBack) {
484		uint32_t events = VNODE_EVENT_ATTRIB | VNODE_EVENT_WRITE;
485		/* Reset throttle state info */
486		watchItem->throttleBack = FALSE;
487		watchItem->rcvd_notify_count = 0;
488		/*
489		 * Something could have happen while we were throttle so just say
490		 * something changed
491		 */
492		smbfs_notified_vnode(np, watchItem->throttleBack, events, context);
493		nanouptime(&watchItem->last_notify_time);
494		watchItem->last_notify_time.tv_sec += SMBFS_MAX_RCVD_NOTIFY_TIME;
495	}
496
497	SMBDEBUG_LOCK(np, "Sending notify for %s with fid = 0x%llx\n", np->n_name, np->d_fid);
498
499	/* Items we want to be notified about. */
500	CompletionFilters = SMBFS_NOTIFY_CHANGE_FILTERS;
501
502    /*
503    * Let SMB 2/3 handle this
504    */
505    if (SSTOVC(share)->vc_flags & SMBV_SMB2) {
506        /* Set max response size to 64K which should be plenty */
507        watchItem->flags |= SMBV_SMB2;
508        error = smb2fs_smb_change_notify(share, 64 * 1024,
509                                         CompletionFilters,
510                                         notify_callback_completion, watchItem,
511                                         context);
512        if (error) {
513            SMBWARNING("smb2fs_smb_change_notify return %d\n", error);
514            reset_notify_change(watchItem, TRUE);
515        }
516		goto done;
517    }
518
519    error = smb_nt_alloc(SSTOCP(share), NT_TRANSACT_NOTIFY_CHANGE, context, &ntp);
520	if (error) {
521		goto done;	/* Something bad happen, try agian later */
522	}
523	watchItem->ntp = ntp;
524	mbp = &ntp->nt_tsetup;
525	mb_init(mbp);
526
527	mb_put_uint32le(mbp, CompletionFilters);	/* Completion Filter */
528    smb1_fid = (uint16_t) np->d_fid;
529	mb_put_uint16le(mbp, smb1_fid);
530	/*
531	 * Decide that watch tree should be set per item instead of per mount. So
532	 * if we have to poll then watch tree will be true for the parent node or
533	 * root node. This will allow us to handle the case where we have too many
534	 * notifications.
535	 *
536	 * NOTE: Still concerned about the traffic setting this can cause. Seems
537	 *       finder calls monitor begin on every directory they have open and
538	 *       viewable by the user. Also they never call monitor end, so these
539	 *       notifications hang around until the node goes inactive. So this
540	 *       means if a root is being monitored and some subdirector is being
541	 *       monitored, then we will get double response for everything in the
542	 *       subdirectory. This is exactly whay I have observed with the latest
543	 *		 finder.
544	 */
545	/* Watch for thing below this item */
546	mb_put_uint16le(mbp, watchItem->watchTree);
547
548	/* Amount of param data they can return, make sure it fits in one message */
549	ntp->nt_maxpcount = SSTOVC(share)->vc_txmax -
550					(SMB_HDRLEN+SMB_COM_NT_TRANS_LEN+SMB_MAX_SETUPCOUNT_LEN+1);
551	ntp->nt_maxdcount = 0;
552	error = smb_nt_async_request(ntp, notify_callback_completion, watchItem);
553	if (error) {
554		SMBWARNING("smb_nt_async_request return %d\n", error);
555		reset_notify_change(watchItem, TRUE);
556	}
557done:
558	smb_share_rele(share, context);
559	return error;
560}
561
562static int
563send_svrmsg_notify(struct smbmount *smp,
564                   struct watch_item *svrItem,
565                   vfs_context_t context)
566{
567	struct smb_share *share;
568	int error;
569	uint32_t CompletionFilters;
570
571	share = smb_get_share_with_reference(smp);
572	if (share->ss_flags & SMBS_RECONNECTING) {
573		/* While we are in reconnect stop sending */
574		error = EAGAIN;
575		goto done;
576	}
577
578	/* Items we want to be notified about. */
579	CompletionFilters = SMBFS_SVRMSG_NOTIFY_FILTERS;
580
581    /* Set max response size to 64K which should be plenty */
582    svrItem->flags |= SMBV_SMB2;
583    error = smb2fs_smb_change_notify(share, 64 * 1024,
584                                    CompletionFilters,
585                                    notify_callback_completion, svrItem,
586                                    context);
587    if (error) {
588        SMBWARNING("smb2fs_smb_change_notify returns %d\n", error);
589        reset_notify_change(svrItem, TRUE);
590    }
591
592done:
593	smb_share_rele(share, context);
594	return error;
595}
596
597static int
598VolumeMaxNotification(struct smbmount *smp, vfs_context_t context)
599{
600	struct smb_share   *share;
601	int32_t				vc_volume_cnt;
602	int					maxWorkingCnt;
603
604	share = smb_get_share_with_reference(smp);
605	vc_volume_cnt = OSAddAtomic(0, &SSTOVC(share)->vc_volume_cnt);
606
607	/*
608	 * Did this share just get replaced for Dfs failover, try again
609	 */
610	if (vc_volume_cnt == 0) {
611		smb_share_rele(share, context);
612		share = smb_get_share_with_reference(smp);
613		vc_volume_cnt = OSAddAtomic(0, &SSTOVC(share)->vc_volume_cnt);
614	}
615
616	/* Just to be safe never let vc_volume_cnt be zero! */
617	if (!vc_volume_cnt) {
618		vc_volume_cnt = 1;
619	}
620
621    if (SSTOVC(share)->vc_flags & SMBV_SMB2) {
622        /* SMB 2/3 relies on crediting */
623        maxWorkingCnt = (SSTOVC(share)->vc_credits_max / 2) / vc_volume_cnt;
624    }
625    else {
626        /* SMB 1 relies on maxmux */
627        maxWorkingCnt = (SSTOVC(share)->vc_maxmux / 2) / vc_volume_cnt;
628    }
629
630	smb_share_rele(share, context);
631
632	return maxWorkingCnt;
633}
634
635/*
636 * process_svrmsg_items
637 *
638 * Process server message notifications.
639 *
640 */
641static void
642process_svrmsg_items(struct smbfs_notify_change *notify, vfs_context_t context)
643{
644	struct smbmount	*smp = notify->smp;
645    struct watch_item *svrItem;
646    int error;
647
648    svrItem = notify->svrmsg_item;
649    if (svrItem == NULL) {
650        /* extremely unlikely, but just to be sure */
651        return;
652    }
653
654    switch (svrItem->state) {
655        case kReceivedNotify:
656        {
657            error = rcvd_svrmsg_notify(smp, svrItem);
658            if (error == ENOTSUP) {
659                /* Notify not supported, turn off svrmsg notify */
660
661                /* This will effectively disable server messages */
662                lck_mtx_lock(&svrItem->watch_statelock);
663                SMBERROR("svrmsg notify not supported\n");
664                svrItem->state = kWaitingForRemoval;
665                lck_mtx_unlock(&svrItem->watch_statelock);
666                break;
667            } else if (error) {
668                lck_mtx_lock(&svrItem->watch_statelock);
669                svrItem->rcvd_notify_count++;
670                if (svrItem->rcvd_notify_count > SMBFS_MAX_RCVD_NOTIFY) {
671                    /* too many errors, turn off svrmsg notify */
672                    SMBERROR("disabling svrmsg notify, error: %d\n", error);
673                    svrItem->state = kWaitingForRemoval;
674                } else {
675                    svrItem->state = kSendNotify;
676                }
677                lck_mtx_unlock(&svrItem->watch_statelock);
678                break;
679            }
680
681            lck_mtx_lock(&svrItem->watch_statelock);
682            SMBDEBUG("Receive success, sending next svrmsg notify\n");
683            svrItem->state = kSendNotify;
684            svrItem->rcvd_notify_count = 0;
685            lck_mtx_unlock(&svrItem->watch_statelock);
686
687            /* fall through to send another svrmsg notify */
688        }
689
690        case kSendNotify:
691        {
692            error = send_svrmsg_notify(smp, svrItem, context);
693            if (error == EAGAIN) {
694                /* Must be in reconnect, try to send later */
695                break;
696            }
697            if (!error) {
698                lck_mtx_lock(&svrItem->watch_statelock);
699                svrItem->state = kWaitingOnNotify;
700                lck_mtx_unlock(&svrItem->watch_statelock);
701            }
702
703            break;
704        }
705
706        case kCancelNotify:
707            reset_notify_change(svrItem, TRUE);
708
709            lck_mtx_lock(&svrItem->watch_statelock);
710            svrItem->state = kWaitingForRemoval;
711            lck_mtx_unlock(&svrItem->watch_statelock);
712            wakeup(svrItem);
713            break;
714
715        default:
716            SMBDEBUG("State %u ignored\n", svrItem->state);
717            break;
718    }
719}
720
721/*
722 * process_notify_items
723 *
724 * Process all watch items on the notify change list.
725 *
726 */
727static void
728process_notify_items(struct smbfs_notify_change *notify, vfs_context_t context)
729{
730	struct smbmount	*smp = notify->smp;
731	int maxWorkingCnt = VolumeMaxNotification(smp, context);
732	struct watch_item *watchItem, *next;
733	int	 updatePollingNodes = FALSE;
734	int moveToPollCnt = 0, moveFromPollCnt = 0;
735	int workingCnt;
736
737	lck_mtx_lock(&notify->watch_list_lock);
738	/* How many outstanding notification do we have */
739	workingCnt = notify->watchCnt - notify->watchPollCnt;
740	/* Calculate how many need to be move to the polling state */
741	if (workingCnt > maxWorkingCnt) {
742		moveToPollCnt = workingCnt - maxWorkingCnt;
743		SMBDEBUG("moveToPollCnt = %d \n", moveToPollCnt);
744	}
745	else if (notify->watchPollCnt) {
746		/* Calculate how many we can move out of the polling state */
747		moveFromPollCnt = maxWorkingCnt - workingCnt;
748		if (notify->watchPollCnt < moveFromPollCnt) {
749			moveFromPollCnt = notify->watchPollCnt;
750			SMBDEBUG("moveFromPollCnt = %d\n", moveFromPollCnt);
751		}
752	}
753
754    /* Process svrmsg notify messages */
755    if (notify->pollOnly != TRUE && (notify->svrmsg_item != NULL)) {
756        /* Server message notifications handled separately */
757        process_svrmsg_items(notify, context);
758    }
759
760	STAILQ_FOREACH_SAFE(watchItem, &notify->watch_list, entries, next) {
761		switch (watchItem->state) {
762			case kCancelNotify:
763                if (notify->pollOnly == TRUE) {
764                    /* request already removed from the iod queue */
765                    reset_notify_change(watchItem, FALSE);
766                } else {
767                    reset_notify_change(watchItem, TRUE);
768                }
769
770				lck_mtx_lock(&watchItem->watch_statelock);
771				/* Wait for the user process to dequeue and free the item */
772				watchItem->state = kWaitingForRemoval;
773				lck_mtx_unlock(&watchItem->watch_statelock);
774				wakeup(watchItem);
775				break;
776			case kReceivedNotify:
777				/*
778				 * Root is always the first item in the list, so we can set the
779				 * flag here and know that all the polling nodes will get updated.
780				 */
781				if (watchItem->isRoot) {
782					updatePollingNodes = TRUE;
783					if (moveToPollCnt || (notify->watchPollCnt > moveFromPollCnt)) {
784						/* We are polling so turn on watch tree */
785						SMBDEBUG("watchTree = TRUE\n");
786						watchItem->watchTree = TRUE;
787					} else {
788						SMBDEBUG("watchTree = FALSE\n");
789						watchItem->watchTree = FALSE;
790					}
791				}
792				if (rcvd_notify_change(watchItem, context) == ENOTSUP) {
793					notify->pollOnly = TRUE;
794					watchItem->state = kUsePollingToNotify;
795					break;
796				} else {
797					watchItem->state = kSendNotify;
798					if (watchItem->throttleBack) {
799						SMBDEBUG_LOCK(watchItem->np, "Throttling back %s\n", watchItem->np->n_name);
800						notify->sleeptimespec.tv_sec = NOTIFY_THROTTLE_SLEEP_TIMO;
801						break;	/* Pull back sending notification, until next time */
802					}
803				}
804				/* Otherwise fall through, so we can send a new request */
805			case kSendNotify:
806			{
807				int sendError;
808				sendError = send_notify_change(watchItem, context);
809				if (sendError == EAGAIN) {
810					/* Must be in reconnect, try to send agian later */
811					break;
812				}
813				if (!sendError) {
814					watchItem->state = kWaitingOnNotify;
815					break;
816				}
817				if (!watchItem->isRoot && moveToPollCnt) {
818					watchItem->state = kUsePollingToNotify;
819					moveToPollCnt--;
820					notify->watchPollCnt++;
821					SMBDEBUG_LOCK(watchItem->np, "Moving %s to poll state\n", watchItem->np->n_name);
822				} else {
823					/* If an error then keep trying */
824					watchItem->state = kSendNotify;
825				}
826				break;
827			}
828			case kUsePollingToNotify:
829				/* We can move some back to notify and turn off polling */
830				if ((!notify->pollOnly) &&
831                    moveFromPollCnt &&
832                    (watchItem->np->d_fid != 0) &&
833                    (!watchItem->np->d_needReopen)) {
834					watchItem->state = kSendNotify;
835					moveFromPollCnt--;
836					notify->watchPollCnt--;
837					notify->haveMoreWork = TRUE; /* Force us to resend these items */
838					SMBDEBUG_LOCK(watchItem->np, "Moving %s from polling to send state\n", watchItem->np->n_name);
839				} else if (updatePollingNodes) {
840					uint32_t events = VNODE_EVENT_ATTRIB | VNODE_EVENT_WRITE;
841					smbfs_notified_vnode(watchItem->np, FALSE, events, context);
842                    SMBDEBUG_LOCK(watchItem->np, "Updating %s using polling\n", watchItem->np->n_name);
843				}
844				break;
845			case kWaitingOnNotify:
846				/* Nothing to do here but wait */
847				break;
848			case kWaitingForRemoval:
849				/* Just waiting for it to get removed */
850				break;
851		}
852	}
853	lck_mtx_unlock(&notify->watch_list_lock);
854	/*
855	 * Keep track of how many are we over the limit So we can kick them off
856	 * in smbfs_restart_change_notify. We need this to keep one volume from
857	 * hogging all the kqueue events. So if its zero that means the
858	 * smbfs_restart_change_notify code is done so we can now add the new
859	 * value if we have one.
860	 */
861	if (OSAddAtomic(0, &smp->tooManyNotifies) == 0)
862		OSAddAtomic(moveToPollCnt, &smp->tooManyNotifies);
863}
864
865/*
866 * notify_main
867 *
868 * Notify thread main routine.
869 */
870static void
871notify_main(void *arg)
872{
873	struct smbfs_notify_change	*notify = arg;
874	vfs_context_t		context;
875
876	context = vfs_context_create((vfs_context_t)0);
877
878	notify->sleeptimespec.tv_nsec = 0;
879
880	lck_mtx_lock(&notify->notify_statelock);
881	notify->notify_state = kNotifyThreadRunning;
882	lck_mtx_unlock(&notify->notify_statelock);
883
884	while (notify->notify_state == kNotifyThreadRunning) {
885		notify->sleeptimespec.tv_sec = NOTIFY_CHANGE_SLEEP_TIMO;
886		notify->haveMoreWork = FALSE;
887		process_notify_items(notify, context);
888		if (!notify->haveMoreWork)
889			msleep(&notify->notify_state, 0, PWAIT, "notify change idle",
890				   &notify->sleeptimespec);
891	}
892	/* Shouldn't have anything in the queue at this point */
893	DBG_ASSERT(STAILQ_EMPTY(&notify->watch_list))
894
895	lck_mtx_lock(&notify->notify_statelock);
896	notify->notify_state = kNotifyThreadStop;
897	lck_mtx_unlock(&notify->notify_statelock);
898	vfs_context_rele(context);
899	wakeup(notify);
900}
901
902/*
903 * smbfs_notify_change_create_thread
904 *
905 * Create and start the thread used do handle notify change request
906 */
907void
908smbfs_notify_change_create_thread(struct smbmount *smp)
909{
910	struct smbfs_notify_change	*notify;
911	kern_return_t	result;
912	thread_t		thread;
913
914	SMB_MALLOC(notify, struct smbfs_notify_change *, sizeof(*notify), M_TEMP,
915		   M_WAITOK | M_ZERO);
916	smp->notify_thread = notify;
917
918	notify->smp = smp;
919	lck_mtx_init(&notify->notify_statelock, smbfs_mutex_group, smbfs_lock_attr);
920	lck_mtx_init(&notify->watch_list_lock, smbfs_mutex_group, smbfs_lock_attr);
921	STAILQ_INIT(&notify->watch_list);
922
923	notify->notify_state = kNotifyThreadStarting;
924
925	result = kernel_thread_start((thread_continue_t)notify_main, notify, &thread);
926	if (result != KERN_SUCCESS) {
927		SMBERROR("can't start notify change thread: result = %d\n", result);
928		smp->notify_thread = NULL;
929		SMB_FREE(notify, M_SMBIOD);
930		return;
931	}
932	thread_deallocate(thread);
933	return;
934}
935
936/*
937 * smbfs_notify_change_destroy_thread
938 *
939 * Stop the thread used to handle notify change request and remove any memory
940 * used by the thread.
941 *
942 * NOTE: All watch items should have already been remove from the threads list.
943 */
944void
945smbfs_notify_change_destroy_thread(struct smbmount *smp)
946{
947	struct smbfs_notify_change	*notify = smp->notify_thread;
948
949	if (smp->notify_thread == NULL)
950		return;
951	smp->notify_thread = NULL;
952	notify->notify_state = kNotifyThreadStopping;
953	wakeup(&notify->notify_state);
954
955	for (;;) {
956		lck_mtx_lock(&notify->notify_statelock);
957		if (notify->notify_state == kNotifyThreadStop) {
958			lck_mtx_unlock(&notify->notify_statelock);
959			if (STAILQ_EMPTY(&notify->watch_list)) {
960				SMBDEBUG("Watch thread going away\n");
961			} else {
962				SMBERROR("Watch thread going away with watch items, very bad?\n");
963			}
964			break;
965		}
966		msleep(notify, &notify->notify_statelock, PWAIT | PDROP, "notify change exit", 0);
967	}
968	lck_mtx_destroy(&notify->notify_statelock, smbfs_mutex_group);
969	lck_mtx_destroy(&notify->watch_list_lock, smbfs_mutex_group);
970	SMB_FREE(notify, M_TEMP);
971}
972
973/*
974 * enqueue_notify_change_request
975 *
976 * Allocate an item and place it on the list.
977 */
978static void
979enqueue_notify_change_request(struct smbfs_notify_change *notify,
980							  struct smbnode *np)
981{
982	struct watch_item *watchItem;
983
984	SMB_MALLOC(watchItem, struct watch_item *, sizeof(*watchItem), M_TEMP, M_WAITOK | M_ZERO);
985	lck_mtx_init(&watchItem->watch_statelock, smbfs_mutex_group, smbfs_lock_attr);
986	watchItem->isRoot = vnode_isvroot(np->n_vnode);
987	watchItem->np = np;
988	if (notify->pollOnly) {
989		watchItem->state = kUsePollingToNotify;
990	} else {
991		watchItem->state = kSendNotify;
992	}
993	watchItem->notify = notify;
994	nanouptime(&watchItem->last_notify_time);
995	watchItem->last_notify_time.tv_sec += SMBFS_MAX_RCVD_NOTIFY_TIME;
996	lck_mtx_lock(&notify->watch_list_lock);
997	notify->watchCnt++;
998
999    SMBDEBUG_LOCK(np, "Enqueue %s count = %d poll count = %d\n", np->n_name,
1000                  notify->watchCnt, notify->watchPollCnt);
1001
1002	/* Always make sure the root vnode is the first item in the list */
1003	if (watchItem->isRoot) {
1004		STAILQ_INSERT_HEAD(&notify->watch_list, watchItem, entries);
1005	} else {
1006		STAILQ_INSERT_TAIL(&notify->watch_list, watchItem, entries);
1007	}
1008	lck_mtx_unlock(&notify->watch_list_lock);
1009	notify_wakeup(notify);
1010}
1011
1012/*
1013 * enqueue_notify_svrmsg_request
1014 *
1015 * Allocate an item for server messages, and place it
1016 * in the notify struct.
1017 */
1018static void
1019enqueue_notify_svrmsg_request(struct smbfs_notify_change *notify)
1020{
1021	struct watch_item *watchItem;
1022
1023    if (notify->pollOnly) {
1024        SMBERROR("Server doesn't support notify, not enabling svrmsg notify\n");
1025        return;
1026    }
1027
1028	SMB_MALLOC(watchItem, struct watch_item *, sizeof(*watchItem), M_TEMP, M_WAITOK | M_ZERO);
1029	lck_mtx_init(&watchItem->watch_statelock, smbfs_mutex_group, smbfs_lock_attr);
1030
1031    watchItem->isServerMsg = TRUE;
1032    watchItem->state = kSendNotify;
1033
1034	watchItem->notify = notify;
1035	nanouptime(&watchItem->last_notify_time);
1036	lck_mtx_lock(&notify->watch_list_lock);
1037
1038    notify->svrmsg_item = watchItem;
1039	lck_mtx_unlock(&notify->watch_list_lock);
1040	notify_wakeup(notify);
1041}
1042
1043/*
1044 * dequeue_notify_change_request
1045 *
1046 * Search the list, if we find a match set the state to cancel. Now wait for the
1047 * watch thread to say its ok to remove the item.
1048 */
1049static void
1050dequeue_notify_change_request(struct smbfs_notify_change *notify,
1051							  struct smbnode *np)
1052{
1053	struct watch_item *watchItem, *next;
1054
1055	lck_mtx_lock(&notify->watch_list_lock);
1056	STAILQ_FOREACH_SAFE(watchItem, &notify->watch_list, entries, next) {
1057		if (watchItem->np == np) {
1058			notify->watchCnt--;
1059			lck_mtx_lock(&watchItem->watch_statelock);
1060			if (watchItem->state == kUsePollingToNotify)
1061				notify->watchPollCnt--;
1062
1063            SMBDEBUG_LOCK(np, "Dequeue %s count = %d poll count = %d\n", np->n_name,
1064                          notify->watchCnt, notify->watchPollCnt);
1065
1066			watchItem->state = kCancelNotify;
1067			lck_mtx_unlock(&watchItem->watch_statelock);
1068			notify_wakeup(notify);
1069			msleep(watchItem, &notify->watch_list_lock, PWAIT,
1070				   "notify watchItem cancel", NULL);
1071			STAILQ_REMOVE(&notify->watch_list, watchItem, watch_item, entries);
1072			SMB_FREE(watchItem, M_TEMP);
1073			watchItem = NULL;
1074			break;
1075		}
1076	}
1077	lck_mtx_unlock(&notify->watch_list_lock);
1078}
1079
1080/*
1081 * dequeue_notify_svrmsg_request
1082 *
1083 * Set the svrmsg_item state to cancel, then wait for the
1084 * watch thread to say its ok to remove the item.
1085 */
1086static void
1087dequeue_notify_svrmsg_request(struct smbfs_notify_change *notify)
1088{
1089	struct watch_item *watchItem = notify->svrmsg_item;
1090
1091    if (watchItem == NULL) {
1092        return;
1093    }
1094
1095    lck_mtx_lock(&notify->watch_list_lock);
1096
1097    lck_mtx_lock(&watchItem->watch_statelock);
1098    watchItem->state = kCancelNotify;
1099    lck_mtx_unlock(&watchItem->watch_statelock);
1100
1101    notify_wakeup(notify);
1102    msleep(watchItem, &notify->watch_list_lock, PWAIT,
1103           "svrmsg watchItem cancel", NULL);
1104
1105    if (watchItem->state != kWaitingForRemoval) {
1106        SMBERROR("svrmsgItem->state: %d, expected kWaitingForRemoval\n", watchItem->state);
1107    }
1108
1109    lck_mtx_lock(&watchItem->watch_statelock);
1110    notify->svrmsg_item = NULL;
1111    lck_mtx_unlock(&watchItem->watch_statelock);
1112
1113    SMB_FREE(watchItem, M_TEMP);
1114
1115	lck_mtx_unlock(&notify->watch_list_lock);
1116}
1117
1118/*
1119 * smbfs_start_change_notify
1120 *
1121 * Start the change notify process. Called from the smbfs_vnop_monitor routine.
1122 *
1123 * The calling routine must hold a reference on the share
1124 *
1125 */
1126int
1127smbfs_start_change_notify(struct smb_share *share, struct smbnode *np,
1128						  vfs_context_t context, int *releaseLock)
1129{
1130	struct smbmount *smp = np->n_mount;
1131	int error;
1132
1133	if (smp->notify_thread == NULL) {
1134		/* This server doesn't support notify change so turn on polling */
1135		np->n_flag |= N_POLLNOTIFY;
1136		SMBDEBUG_LOCK(np, "Monitoring %s with polling\n", np->n_name);
1137	} else {
1138		if (np->d_kqrefcnt) {
1139			np->d_kqrefcnt++;	/* Already processing this node, we are done */
1140			return 0;
1141		}
1142		np->d_kqrefcnt++;
1143		/* Setting SMB2_SYNCHRONIZE because XP does. */
1144		error = smbfs_tmpopen(share, np, SMB2_FILE_READ_DATA | SMB2_SYNCHRONIZE,
1145                              &np->d_fid, context);
1146		if (error)	{
1147			/* Open failed so turn on polling */
1148			np->n_flag |= N_POLLNOTIFY;
1149			SMBDEBUG_LOCK(np, "Monitoring %s failed to open. %d\n", np->n_name, error);
1150		} else {
1151			SMBDEBUG_LOCK(np, "Monitoring %s\n", np->n_name);
1152
1153			/*
1154			 * We no longer need the node lock. So unlock the node so we have no
1155			 * lock contention with the notify list lock.
1156			 *
1157			 * Make sure we tell the calling routine that we have released the
1158			 * node lock.
1159			 */
1160			*releaseLock = FALSE;
1161			smbnode_unlock(np);
1162			enqueue_notify_change_request(smp->notify_thread, np);
1163		}
1164	}
1165	return 0;
1166}
1167
1168/*
1169 * smbfs_start_svrmsg_notify
1170 *
1171 * Start the change notify process. Called from the smbfs mount routine.
1172 *
1173 * The calling routine must hold a reference on the share
1174 *
1175 */
1176int
1177smbfs_start_svrmsg_notify(struct smbmount *smp)
1178{
1179	int error = 0;
1180
1181	if (smp->notify_thread == NULL) {
1182		/* This server doesn't support notify change, so forget srvmsg
1183         * notifications
1184         */
1185		SMBDEBUG("Server doesn't support notify\n");
1186        error = ENOTSUP;
1187	} else {
1188			SMBDEBUG("Monitoring server messages\n");
1189			enqueue_notify_svrmsg_request(smp->notify_thread);
1190	}
1191	return error;
1192}
1193
1194/*
1195 * smbfs_stop_change_notify
1196 *
1197 * Called from  smbfs_vnop_monitor or smb_vnop_inactive routine. If this is the
1198 * last close then close the directory and set the fid to zero. This will stop
1199 * the watch event from doing any further work. Now dequeue the watch item.
1200 *
1201 * The calling routine must hold a reference on the share
1202 *
1203 */
1204int
1205smbfs_stop_change_notify(struct smb_share *share, struct smbnode *np,
1206						 int forceClose, vfs_context_t context, int *releaseLock)
1207{
1208	struct smbmount *smp = np->n_mount;
1209	SMBFID	fid;
1210
1211	if (forceClose)
1212		np->d_kqrefcnt = 0;
1213	else
1214		np->d_kqrefcnt--;
1215
1216	/* Still have users monitoring just get out */
1217	if (np->d_kqrefcnt > 0)
1218		return 0;
1219
1220	DBG_ASSERT(np->d_kqrefcnt == 0)
1221	/* If polling was turned on, turn it off */
1222	np->n_flag &= ~N_POLLNOTIFY;
1223	fid = np->d_fid;
1224	/* Stop all notify network traffic */
1225	np->d_fid = 0;
1226	/* If marked for reopen, turn it off */
1227	np->d_needReopen = FALSE;
1228	np->d_kqrefcnt = 0;
1229	/* If we have it open then close it */
1230	if (fid != 0) {
1231		(void)smbfs_tmpclose(share, np, fid, context);
1232    }
1233
1234	SMBDEBUG_LOCK(np, "We are no longer monitoring  %s\n", np->n_name);
1235
1236	if (smp->notify_thread) {
1237		/*
1238		 * We no longer need the node lock. So unlock the node so we have no
1239		 * lock contention with the notify list lock.
1240		 *
1241		 * Make sure we tell the calling routine that we have released the
1242		 * node lock.
1243		 */
1244		*releaseLock = FALSE;
1245		smbnode_unlock(np);
1246		dequeue_notify_change_request(smp->notify_thread, np);
1247	}
1248	return 0;
1249}
1250
1251int
1252smbfs_stop_svrmsg_notify(struct smbmount *smp)
1253{
1254	SMBDEBUG("We are no longer monitoring svrmsg notify replies\n");
1255
1256	if (smp->notify_thread) {
1257		dequeue_notify_svrmsg_request(smp->notify_thread);
1258	}
1259	return 0;
1260}
1261
1262/*
1263 * smbfs_restart_change_notify
1264 *
1265 * Reopen the directory and wake up the notify queue.
1266 *
1267 * The calling routine must hold a reference on the share
1268 *
1269 */
1270void
1271smbfs_restart_change_notify(struct smb_share *share, struct smbnode *np,
1272							vfs_context_t context)
1273{
1274	struct smbmount *smp = np->n_mount;
1275	int error;
1276
1277	/* This server doesn't support notify change so we are done just return */
1278	if (smp->notify_thread == NULL) {
1279		np->d_needReopen = FALSE;
1280		return;
1281	}
1282	if (!np->d_needReopen) {
1283		SMBFID	fid = np->d_fid;
1284
1285		if ((vnode_isvroot(np->n_vnode)) ||
1286			(OSAddAtomic(0, &smp->tooManyNotifies) == 0)) {
1287			/* Nothing do do here just get out */
1288			return;
1289		}
1290
1291		/* We sent something see how long we have been waiting */
1292		SMBDEBUG_LOCK(np, "Need to close '%s' so we can force it to use polling\n",
1293                      np->n_name);
1294
1295		np->d_needReopen = TRUE;
1296		np->d_fid = 0;
1297		/*
1298		 * Closing it here will cause the server to send a cancel error, which
1299		 * will cause the notification thread to place this item in the poll
1300		 * state.
1301		 */
1302		(void)smbfs_tmpclose(share, np, fid, context);
1303		OSAddAtomic(-1, &smp->tooManyNotifies);
1304		return;	/* Nothing left to do here, just get out */
1305	}
1306
1307    SMBDEBUG_LOCK(np, "%s is being reopened for monitoring\n", np->n_name);
1308
1309    /*
1310	 * We set the capabilities VOL_CAP_INT_REMOTE_EVENT for all supported
1311	 * servers. So if they call us without checking the
1312	 * capabilities then they get what they get.
1313	 *
1314	 * Setting SMB2_SYNCHRONIZE because XP does.
1315	 *
1316	 * Now reopen the directory.
1317	 */
1318	error = smbfs_tmpopen(share, np, SMB2_FILE_READ_DATA | SMB2_SYNCHRONIZE,
1319						  &np->d_fid, context);
1320	if (error) {
1321		SMBWARNING_LOCK(np, "Attempting to reopen %s failed %d\n", np->n_name, error);
1322		return;
1323	}
1324
1325	np->d_needReopen = FALSE;
1326	notify_wakeup(smp->notify_thread);
1327}
1328