1/*
2 * Copyright (c) 2006 - 2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24#include <sys/param.h>
25#include <sys/kauth.h>
26#include <libkern/OSAtomic.h>
27
28#include <sys/smb_apple.h>
29
30#include <netsmb/smb.h>
31#include <netsmb/smb_2.h>
32#include <netsmb/smb_rq.h>
33#include <netsmb/smb_rq_2.h>
34#include <netsmb/smb_conn.h>
35#include <netsmb/smb_conn_2.h>
36#include <smbfs/smbfs.h>
37#include <smbfs/smbfs_node.h>
38#include <smbfs/smbfs_subr.h>
39#include <smbfs/smbfs_subr_2.h>
40#include "smbfs_notify_change.h"
41
42extern lck_attr_t *smbfs_lock_attr;
43extern lck_grp_t *smbfs_mutex_group;
44
45#define NOTIFY_CHANGE_SLEEP_TIMO	15
46#define NOTIFY_THROTTLE_SLEEP_TIMO	5
47#define SMBFS_MAX_RCVD_NOTIFY		4
48#define SMBFS_MAX_RCVD_NOTIFY_TIME	1
49
50
51/* For now just notify on these item, may want to watch on more in the future */
52#define SMBFS_NOTIFY_CHANGE_FILTERS	FILE_NOTIFY_CHANGE_FILE_NAME | \
53									FILE_NOTIFY_CHANGE_DIR_NAME | \
54									FILE_NOTIFY_CHANGE_ATTRIBUTES | \
55									FILE_NOTIFY_CHANGE_CREATION | \
56									FILE_NOTIFY_CHANGE_SECURITY | \
57									FILE_NOTIFY_CHANGE_STREAM_SIZE | \
58									FILE_NOTIFY_CHANGE_STREAM_WRITE
59
60/* For server message notify, we set everything, which is one of the
61 * ways the server can tell it's a server message notify, and not
62 * a normal notify change type.
63 */
64#define SMBFS_SVRMSG_NOTIFY_FILTERS	FILE_NOTIFY_CHANGE_FILE_NAME | \
65                                    FILE_NOTIFY_CHANGE_DIR_NAME | \
66                                    FILE_NOTIFY_CHANGE_ATTRIBUTES | \
67                                    FILE_NOTIFY_CHANGE_SIZE	| \
68                                    FILE_NOTIFY_CHANGE_LAST_WRITE | \
69                                    FILE_NOTIFY_CHANGE_LAST_ACCESS |\
70                                    FILE_NOTIFY_CHANGE_CREATION | \
71                                    FILE_NOTIFY_CHANGE_EA | \
72                                    FILE_NOTIFY_CHANGE_SECURITY | \
73                                    FILE_NOTIFY_CHANGE_STREAM_NAME | \
74                                    FILE_NOTIFY_CHANGE_STREAM_SIZE | \
75                                    FILE_NOTIFY_CHANGE_STREAM_WRITE
76
77/*
78 * notify_wakeup
79 *
80 * Wake up the thread and tell it there is work to be done.
81 *
82 */
83static void
84notify_wakeup(struct smbfs_notify_change * notify)
85{
86	notify->haveMoreWork = TRUE;		/* we have work to do */
87	wakeup(&(notify)->notify_state);
88}
89
90/*
91 * notify_callback_completion
92 */
93static void
94notify_callback_completion(void *call_back_args)
95{
96	struct watch_item *watchItem = (struct watch_item *)call_back_args;
97
98	lck_mtx_lock(&watchItem->watch_statelock);
99	if ((watchItem->state != kCancelNotify) &&
100		(watchItem->state != kWaitingForRemoval)) {
101		watchItem->state = kReceivedNotify;
102	}
103	lck_mtx_unlock(&watchItem->watch_statelock);
104	notify_wakeup(watchItem->notify);
105}
106
107/*
108 * reset_notify_change
109 *
110 * Remove  the request from the network queue. Now cleanup and remove any
111 * allocated memory.
112 */
113static void
114reset_notify_change(struct watch_item *watchItem, int RemoveRQ)
115{
116	struct smb_ntrq *ntp = watchItem->ntp;
117	struct smb_rq *	rqp = (watchItem->ntp) ? watchItem->ntp->nt_rq : NULL;
118
119    if (watchItem->flags & SMBV_SMB2) {
120        /* Using SMB 2.x */
121        rqp = watchItem->rqp;
122    }
123
124    if (rqp) {
125		if (RemoveRQ) {
126            /* Needs to be removed from the queue */
127            smb_iod_removerq(rqp);
128            if (ntp) {
129                watchItem->ntp->nt_rq = NULL;
130            }
131        }
132		smb_rq_done(rqp);
133	}
134	if (ntp)
135		smb_nt_done(ntp);
136
137    watchItem->ntp = NULL;
138
139    if (watchItem->flags & SMBV_SMB2) {
140        watchItem->rqp = NULL;
141    }
142}
143
144/*
145 * smbfs_notified_vnode
146 *
147 * See if we can update the node and notify the monitor.
148 */
149static void
150smbfs_notified_vnode(struct smbnode *np, int throttleBack, uint32_t events,
151					 vfs_context_t context)
152{
153	struct smb_share *share = NULL;
154	struct vnode_attr vattr;
155	vnode_t		vp;
156
157	if ((np->d_fid == 0) || (smbnode_lock(np, SMBFS_SHARED_LOCK) != 0)) {
158		return; /* Nothing to do here */
159    }
160
161	np->attribute_cache_timer = 0;
162	np->n_symlink_cache_timer = 0;
163	/*
164	 * The fid changed while we were blocked just unlock and get out. If we are
165	 * throttling back then skip this notify.
166	 */
167	if ((np->d_fid == 0) || throttleBack) {
168		goto done;
169    }
170
171	np->n_lastvop = smbfs_notified_vnode;
172	vp = np->n_vnode;
173
174    /* If they have a nofication with a smbnode, then we must have a vnode */
175    if (vnode_get(vp)) {
176        /* The vnode could be going away, skip out nothing to do here */
177		goto done;
178    }
179    /* Should never happen but lets test and make sure */
180     if (VTOSMB(vp) != np) {
181         SMBWARNING("%s vnode_fsnode(vp) and np don't match!\n", np->n_name);
182         vnode_put(vp);
183         goto done;
184    }
185
186	share = smb_get_share_with_reference(VTOSMBFS(vp));
187	vfs_get_notify_attributes(&vattr);
188	smbfs_attr_cachelookup(share, vp, &vattr, context, TRUE);
189	smb_share_rele(share, context);
190
191	vnode_notify(vp, events, &vattr);
192	vnode_put(vp);
193	events = 0;
194
195done:
196	if (events == 0)	/* We already process the event */
197		np->d_needsUpdate = FALSE;
198	else		/* Still need to process the event */
199		np->d_needsUpdate = TRUE;
200	smbnode_unlock(np);
201}
202
203/*
204 * process_notify_change
205 *
206 */
207static uint32_t
208process_notify_change(struct smb_ntrq *ntp)
209{
210	uint32_t events = 0;
211	struct mdchain *mdp;
212	uint32_t nextoffset = 0, action;
213	int error = 0;
214	size_t rparam_len = 0;
215
216	mdp = &ntp->nt_rdata;
217	if (mdp->md_top) {
218#ifdef SMB_DEBUG
219		size_t rdata_len = m_fixhdr(mdp->md_top);
220		SMBDEBUG("rdata_len = %d \n", (int)rdata_len);
221#else // SMB_DEBUG
222		m_fixhdr(mdp->md_top);
223#endif // SMB_DEBUG
224		md_initm(mdp, mdp->md_top);
225	}
226	mdp = &ntp->nt_rparam;
227	if (mdp->md_top) {
228		rparam_len = m_fixhdr(mdp->md_top);
229		md_initm(mdp, mdp->md_top);
230		SMBDEBUG("rrparam_len = %d\n", (int)rparam_len);
231	}
232	/*
233	 * Remeber the md_get_ routines protect us from buffer overruns. Note that
234	 * the server doesn't have to return any data, so no next offset field is
235	 * not an error.
236	 */
237	if (rparam_len && (md_get_uint32le(mdp, &nextoffset) == 0))
238		do {
239			/* since we already moved pass next offset don't count it */
240			if (nextoffset >= sizeof(uint32_t))
241				nextoffset -= (uint32_t)sizeof(uint32_t);
242
243			error = md_get_uint32le(mdp, &action);
244			if (error)
245				break;
246
247			/* since we already moved pass action don't count it */
248			if (nextoffset >= sizeof(uint32_t))
249				nextoffset -= (uint32_t)sizeof(uint32_t);
250
251			if (nextoffset) {
252				error = md_get_mem(mdp, NULL, nextoffset, MB_MSYSTEM);
253				if (!error)
254					error = md_get_uint32le(mdp, &nextoffset);
255				if (error)
256					break;
257			}
258
259			SMBDEBUG("action = 0x%x \n", action);
260			switch (action) {
261				case FILE_ACTION_ADDED:
262					events |= VNODE_EVENT_FILE_CREATED | VNODE_EVENT_DIR_CREATED;
263					break;
264				case FILE_ACTION_REMOVED:
265					events |= VNODE_EVENT_FILE_REMOVED | VNODE_EVENT_DIR_REMOVED;
266					break;
267				case FILE_ACTION_MODIFIED:
268					events |= VNODE_EVENT_ATTRIB;
269					break;
270				case FILE_ACTION_RENAMED_OLD_NAME:
271				case FILE_ACTION_RENAMED_NEW_NAME:
272					events |= VNODE_EVENT_RENAME;
273					break;
274				case FILE_ACTION_ADDED_STREAM:
275				case FILE_ACTION_REMOVED_STREAM:
276				case FILE_ACTION_MODIFIED_STREAM:
277					/* Should we try to clear all named stream cache? */
278					events |= VNODE_EVENT_ATTRIB;
279					break;
280				default:
281					error = ENOTSUP;
282					break;
283			}
284		} while (nextoffset);
285
286	if (error || (events == 0))
287		events = VNODE_EVENT_ATTRIB | VNODE_EVENT_WRITE;
288	if (error) {
289		SMBWARNING("error = %d\n", error);
290	}
291	return events;
292}
293
294/*
295 * Proces a change notify message from the server
296 */
297static int
298rcvd_notify_change(struct watch_item *watchItem, vfs_context_t context)
299{
300	struct smbnode *np = watchItem->np;
301	struct smb_ntrq *ntp = watchItem->ntp;
302	struct smb_rq *	rqp = (watchItem->ntp) ? watchItem->ntp->nt_rq : NULL;
303	int error = 0;
304	uint32_t events = VNODE_EVENT_ATTRIB | VNODE_EVENT_WRITE;
305
306    if (watchItem->flags & SMBV_SMB2) {
307        /* Using SMB 2.x */
308        rqp = watchItem->rqp;
309
310        if (rqp) {
311            error = smb2_smb_parse_change_notify(rqp, &events);
312        }
313    }
314    else {
315        if (rqp) {
316            /*
317             * NOTE: smb_nt_reply calls smb_rq_reply which will remove the rqp from
318             * the main threads queue. So when we are done here call reset_notify_change
319             * but tell it not to remove the request from the queue.
320             */
321            error = smb_nt_reply(ntp);
322            if (!error)
323                events = process_notify_change(ntp);
324        }
325    }
326
327    if (error == ECANCELED) {
328        /*
329         * Either we close the file descriptor or we canceled the
330         * operation. Nothing else to do here just get out.
331         */
332        SMBDEBUG("Notification for %s was canceled.\n", np->n_name);
333        goto done;
334    }
335
336	/* Always reset the cache timer and force a lookup */
337	np->attribute_cache_timer = 0;
338	np->n_symlink_cache_timer = 0;
339	if (error == ENOTSUP) {
340		/* This server doesn't support notifications */
341		SMBWARNING("Server doesn't support notifications, polling\n");
342		return error;
343
344	} else if ((error == ETIMEDOUT) || (error == ENOTCONN)) {
345		SMBDEBUG("Processing notify for %s error = %d\n", np->n_name, error);
346		watchItem->throttleBack = TRUE;
347	} else if (error)  {
348		SMBWARNING("We got an unexpected error: %d for %s\n", error, np->n_name);
349		watchItem->throttleBack = TRUE;
350	} else {
351		struct timespec ts;
352
353		nanouptime(&ts);
354		if (timespeccmp(&ts, &watchItem->last_notify_time, >)) {
355			watchItem->rcvd_notify_count = 0;
356			ts.tv_sec += SMBFS_MAX_RCVD_NOTIFY_TIME;
357			watchItem->last_notify_time = ts;
358		} else {
359			watchItem->rcvd_notify_count++;
360			if (watchItem->rcvd_notify_count > SMBFS_MAX_RCVD_NOTIFY)
361				watchItem->throttleBack = TRUE;
362		}
363	}
364	/* Notify them that something changed */
365	smbfs_notified_vnode(np, watchItem->throttleBack, events, context);
366
367done:
368	reset_notify_change(watchItem, FALSE);
369	return 0;
370}
371
372/*
373 * Process a svrmsg notify message from the server
374 */
375static int
376rcvd_svrmsg_notify(struct smbmount	*smp, struct watch_item *watchItem)
377{
378	struct smb_rq *	rqp;
379    uint32_t action, delay;
380	int error = 0;
381
382    /* svrmsg notify always uses SMB 2.x */
383    rqp = watchItem->rqp;
384
385    if (rqp == NULL) {
386        /* Not good, log an error and punt */
387        SMBDEBUG("Received svrmsg, but no rqp\n");
388        error = EINVAL;
389        goto done;
390    }
391
392    error = smb2_smb_parse_svrmsg_notify(rqp, &action, &delay);
393
394    if (error) {
395        SMBDEBUG("parse svrmsg error: %d\n", error);
396        goto done;
397    }
398
399    /* Here is where we make the call to the Kernel Event Agent and
400     * let it know what's going on with the server.
401     *
402     * Note: SVRMSG_GOING_DOWN and SVRMSG_SHUTDOWN_CANCELLED are mutually exclusive.
403     *       Only one can be set at any given time.
404     */
405    lck_mtx_lock(&smp->sm_svrmsg_lock);
406    if (action == SVRMSG_SHUTDOWN_START) {
407        /* Clear any pending SVRMSG_RCVD_SHUTDOWN_CANCEL status */
408        smp->sm_svrmsg_pending &= SVRMSG_RCVD_SHUTDOWN_CANCEL;
409
410        /* Set SVRMSG_RCVD_GOING_DOWN & delay */
411        smp->sm_svrmsg_pending |= SVRMSG_RCVD_GOING_DOWN;
412        smp->sm_svrmsg_shutdown_delay = delay;
413
414    } else if (action == SVRMSG_SHUTDOWN_CANCELLED) {
415        /* Clear any pending SVRMSG_RCVD_GOING_DOWN status */
416        smp->sm_svrmsg_pending &= ~SVRMSG_RCVD_GOING_DOWN;
417
418        /* Set SVRMSG_RCVD_SHUTDOWN_CANCEL */
419        smp->sm_svrmsg_pending |= SVRMSG_RCVD_SHUTDOWN_CANCEL;
420    }
421    lck_mtx_unlock(&smp->sm_svrmsg_lock);
422    vfs_event_signal(NULL, VQ_SERVEREVENT, 0);
423
424done:
425	reset_notify_change(watchItem, FALSE);
426	return error;
427}
428
429/*
430 * Send a change notify message to the server
431 */
432static int
433send_notify_change(struct watch_item *watchItem, vfs_context_t context)
434{
435	struct smbnode *np = watchItem->np;
436	struct smb_share *share;
437	struct smb_ntrq *ntp;
438	struct mbchain *mbp;
439	int error;
440	uint32_t CompletionFilters;
441    uint16_t smb1_fid;
442
443
444	share = smb_get_share_with_reference(np->n_mount);
445	if (share->ss_flags & SMBS_RECONNECTING) {
446		/* While we are in reconnect stop sending */
447		error = EAGAIN;
448		goto done;
449	}
450
451	/* Need to wait for it to be reopened */
452	if (np->d_needReopen) {
453		error = EBADF;
454		goto done;
455	}
456
457	/* Someone close don't send any more notifies  */
458	if (np->d_fid == 0) {
459		error = EBADF;
460		goto done;
461	}
462
463	if (watchItem->throttleBack) {
464		uint32_t events = VNODE_EVENT_ATTRIB | VNODE_EVENT_WRITE;
465		/* Reset throttle state info */
466		watchItem->throttleBack = FALSE;
467		watchItem->rcvd_notify_count = 0;
468		/*
469		 * Something could have happen while we were throttle so just say
470		 * something changed
471		 */
472		smbfs_notified_vnode(np, watchItem->throttleBack, events, context);
473		nanouptime(&watchItem->last_notify_time);
474		watchItem->last_notify_time.tv_sec += SMBFS_MAX_RCVD_NOTIFY_TIME;
475	}
476
477	SMBDEBUG("Sending notify for %s with fid = 0x%llx\n", np->n_name, np->d_fid);
478
479	/* Items we want to be notified about. */
480	CompletionFilters = SMBFS_NOTIFY_CHANGE_FILTERS;
481
482    /*
483    * Let SMB2 handle this
484    */
485    if (SSTOVC(share)->vc_flags & SMBV_SMB2) {
486        /* Set max response size to 64K which should be plenty */
487        watchItem->flags |= SMBV_SMB2;
488        error = smb2fs_smb_change_notify(share, 64 * 1024,
489                                         CompletionFilters,
490                                         notify_callback_completion, watchItem,
491                                         context);
492        if (error) {
493            SMBWARNING("smb2fs_smb_change_notify return %d\n", error);
494            reset_notify_change(watchItem, TRUE);
495        }
496		goto done;
497    }
498
499    error = smb_nt_alloc(SSTOCP(share), NT_TRANSACT_NOTIFY_CHANGE, context, &ntp);
500	if (error) {
501		goto done;	/* Something bad happen, try agian later */
502	}
503	watchItem->ntp = ntp;
504	mbp = &ntp->nt_tsetup;
505	mb_init(mbp);
506
507	mb_put_uint32le(mbp, CompletionFilters);	/* Completion Filter */
508    smb1_fid = (uint16_t) np->d_fid;
509	mb_put_uint16le(mbp, smb1_fid);
510	/*
511	 * Decide that watch tree should be set per item instead of per mount. So
512	 * if we have to poll then watch tree will be true for the parent node or
513	 * root node. This will allow us to handle the case where we have too many
514	 * notifications.
515	 *
516	 * NOTE: Still concerned about the traffic setting this can cause. Seems
517	 *       finder calls monitor begin on every directory they have open and
518	 *       viewable by the user. Also they never call monitor end, so these
519	 *       notifications hang around until the node goes inactive. So this
520	 *       means if a root is being monitored and some subdirector is being
521	 *       monitored, then we will get double response for everything in the
522	 *       subdirectory. This is exactly whay I have observed with the latest
523	 *		 finder.
524	 */
525	/* Watch for thing below this item */
526	mb_put_uint16le(mbp, watchItem->watchTree);
527
528	/* Amount of param data they can return, make sure it fits in one message */
529	ntp->nt_maxpcount = SSTOVC(share)->vc_txmax -
530					(SMB_HDRLEN+SMB_COM_NT_TRANS_LEN+SMB_MAX_SETUPCOUNT_LEN+1);
531	ntp->nt_maxdcount = 0;
532	error = smb_nt_async_request(ntp, notify_callback_completion, watchItem);
533	if (error) {
534		SMBWARNING("smb_nt_async_request return %d\n", error);
535		reset_notify_change(watchItem, TRUE);
536	}
537done:
538	smb_share_rele(share, context);
539	return error;
540}
541
542static int
543send_svrmsg_notify(struct smbmount *smp,
544                   struct watch_item *svrItem,
545                   vfs_context_t context)
546{
547	struct smb_share *share;
548	int error;
549	uint32_t CompletionFilters;
550
551	share = smb_get_share_with_reference(smp);
552	if (share->ss_flags & SMBS_RECONNECTING) {
553		/* While we are in reconnect stop sending */
554		error = EAGAIN;
555		goto done;
556	}
557
558	/* Items we want to be notified about. */
559	CompletionFilters = SMBFS_SVRMSG_NOTIFY_FILTERS;
560
561    /* Set max response size to 64K which should be plenty */
562    svrItem->flags |= SMBV_SMB2;
563    error = smb2fs_smb_change_notify(share, 64 * 1024,
564                                    CompletionFilters,
565                                    notify_callback_completion, svrItem,
566                                    context);
567    if (error) {
568        SMBWARNING("smb2fs_smb_change_notify returns %d\n", error);
569        reset_notify_change(svrItem, TRUE);
570    }
571
572done:
573	smb_share_rele(share, context);
574	return error;
575}
576
577static int
578VolumeMaxNotification(struct smbmount *smp, vfs_context_t context)
579{
580	struct smb_share   *share;
581	int32_t				vc_volume_cnt;
582	int					maxWorkingCnt;
583
584	share = smb_get_share_with_reference(smp);
585	vc_volume_cnt = OSAddAtomic(0, &SSTOVC(share)->vc_volume_cnt);
586
587	/*
588	 * Did this share just get replaced for Dfs failover, try again
589	 */
590	if (vc_volume_cnt == 0) {
591		smb_share_rele(share, context);
592		share = smb_get_share_with_reference(smp);
593		vc_volume_cnt = OSAddAtomic(0, &SSTOVC(share)->vc_volume_cnt);
594	}
595
596	/* Just to be safe never let vc_volume_cnt be zero! */
597	if (!vc_volume_cnt) {
598		vc_volume_cnt = 1;
599	}
600
601    if (SSTOVC(share)->vc_flags & SMBV_SMB2) {
602        /* SMB 2.x relies on crediting */
603        maxWorkingCnt = (SSTOVC(share)->vc_credits_max / 2) / vc_volume_cnt;
604    }
605    else {
606        /* SMB 1.x relies on maxmux */
607        maxWorkingCnt = (SSTOVC(share)->vc_maxmux / 2) / vc_volume_cnt;
608    }
609
610	smb_share_rele(share, context);
611
612	return maxWorkingCnt;
613}
614
615/*
616 * process_svrmsg_items
617 *
618 * Process server message notifications.
619 *
620 */
621static void
622process_svrmsg_items(struct smbfs_notify_change *notify, vfs_context_t context)
623{
624	struct smbmount	*smp = notify->smp;
625    struct watch_item *svrItem;
626    int error;
627
628    svrItem = notify->svrmsg_item;
629    if (svrItem == NULL) {
630        /* extremely unlikely, but just to be sure */
631        return;
632    }
633
634    switch (svrItem->state) {
635        case kReceivedNotify:
636        {
637            error = rcvd_svrmsg_notify(smp, svrItem);
638            if (error == ENOTSUP) {
639                /* Notify not supported, turn off svrmsg notify */
640
641                /* This will effectively disable server messages */
642                lck_mtx_lock(&svrItem->watch_statelock);
643                SMBERROR("svrmsg notify not supported\n");
644                svrItem->state = kWaitingForRemoval;
645                lck_mtx_unlock(&svrItem->watch_statelock);
646                break;
647            } else if (error) {
648                lck_mtx_lock(&svrItem->watch_statelock);
649                svrItem->rcvd_notify_count++;
650                if (svrItem->rcvd_notify_count > SMBFS_MAX_RCVD_NOTIFY) {
651                    /* too many errors, turn off svrmsg notify */
652                    SMBERROR("disabling svrmsg notify, error: %d\n", error);
653                    svrItem->state = kWaitingForRemoval;
654                } else {
655                    svrItem->state = kSendNotify;
656                }
657                lck_mtx_unlock(&svrItem->watch_statelock);
658                break;
659            }
660
661            lck_mtx_lock(&svrItem->watch_statelock);
662            SMBDEBUG("Receive success, sending next svrmsg notify\n");
663            svrItem->state = kSendNotify;
664            svrItem->rcvd_notify_count = 0;
665            lck_mtx_unlock(&svrItem->watch_statelock);
666
667            /* fall through to send another svrmsg notify */
668        }
669
670        case kSendNotify:
671        {
672            error = send_svrmsg_notify(smp, svrItem, context);
673            if (error == EAGAIN) {
674                /* Must be in reconnect, try to send later */
675                break;
676            }
677            if (!error) {
678                lck_mtx_lock(&svrItem->watch_statelock);
679                svrItem->state = kWaitingOnNotify;
680                lck_mtx_unlock(&svrItem->watch_statelock);
681            }
682
683            break;
684        }
685
686        case kCancelNotify:
687            reset_notify_change(svrItem, TRUE);
688
689            lck_mtx_lock(&svrItem->watch_statelock);
690            svrItem->state = kWaitingForRemoval;
691            lck_mtx_unlock(&svrItem->watch_statelock);
692            wakeup(svrItem);
693            break;
694
695        default:
696            SMBDEBUG("State %u ignored\n", svrItem->state);
697            break;
698    }
699}
700
701/*
702 * process_notify_items
703 *
704 * Process all watch items on the notify change list.
705 *
706 */
707static void
708process_notify_items(struct smbfs_notify_change *notify, vfs_context_t context)
709{
710	struct smbmount	*smp = notify->smp;
711	int maxWorkingCnt = VolumeMaxNotification(smp, context);
712	struct watch_item *watchItem, *next;
713	int	 updatePollingNodes = FALSE;
714	int moveToPollCnt = 0, moveFromPollCnt = 0;
715	int workingCnt;
716
717	lck_mtx_lock(&notify->watch_list_lock);
718	/* How many outstanding notification do we have */
719	workingCnt = notify->watchCnt - notify->watchPollCnt;
720	/* Calculate how many need to be move to the polling state */
721	if (workingCnt > maxWorkingCnt) {
722		moveToPollCnt = workingCnt - maxWorkingCnt;
723		SMBDEBUG("moveToPollCnt = %d \n", moveToPollCnt);
724	}
725	else if (notify->watchPollCnt) {
726		/* Calculate how many we can move out of the polling state */
727		moveFromPollCnt = maxWorkingCnt - workingCnt;
728		if (notify->watchPollCnt < moveFromPollCnt) {
729			moveFromPollCnt = notify->watchPollCnt;
730			SMBDEBUG("moveFromPollCnt = %d\n", moveFromPollCnt);
731		}
732	}
733
734    /* Process svrmsg notify messages */
735    if (notify->pollOnly != TRUE && (notify->svrmsg_item != NULL)) {
736        /* Server message notifications handled separately */
737        process_svrmsg_items(notify, context);
738    }
739
740	STAILQ_FOREACH_SAFE(watchItem, &notify->watch_list, entries, next) {
741		switch (watchItem->state) {
742			case kCancelNotify:
743                if (notify->pollOnly == TRUE) {
744                    /* request already removed from the iod queue */
745                    reset_notify_change(watchItem, FALSE);
746                } else {
747                    reset_notify_change(watchItem, TRUE);
748                }
749
750				lck_mtx_lock(&watchItem->watch_statelock);
751				/* Wait for the user process to dequeue and free the item */
752				watchItem->state = kWaitingForRemoval;
753				lck_mtx_unlock(&watchItem->watch_statelock);
754				wakeup(watchItem);
755				break;
756			case kReceivedNotify:
757				/*
758				 * Root is always the first item in the list, so we can set the
759				 * flag here and know that all the polling nodes will get updated.
760				 */
761				if (watchItem->isRoot) {
762					updatePollingNodes = TRUE;
763					if (moveToPollCnt || (notify->watchPollCnt > moveFromPollCnt)) {
764						/* We are polling so turn on watch tree */
765						SMBDEBUG("watchTree = TRUE\n");
766						watchItem->watchTree = TRUE;
767					} else {
768						SMBDEBUG("watchTree = FALSE\n");
769						watchItem->watchTree = FALSE;
770					}
771				}
772				if (rcvd_notify_change(watchItem, context) == ENOTSUP) {
773					notify->pollOnly = TRUE;
774					watchItem->state = kUsePollingToNotify;
775					break;
776				} else {
777					watchItem->state = kSendNotify;
778					if (watchItem->throttleBack) {
779						SMBDEBUG("Throttling back %s\n", watchItem->np->n_name);
780						notify->sleeptimespec.tv_sec = NOTIFY_THROTTLE_SLEEP_TIMO;
781						break;	/* Pull back sending notification, until next time */
782					}
783				}
784				/* Otherwise fall through, so we can send a new request */
785			case kSendNotify:
786			{
787				int sendError;
788				sendError = send_notify_change(watchItem, context);
789				if (sendError == EAGAIN) {
790					/* Must be in reconnect, try to send agian later */
791					break;
792				}
793				if (!sendError) {
794					watchItem->state = kWaitingOnNotify;
795					break;
796				}
797				if (!watchItem->isRoot && moveToPollCnt) {
798					watchItem->state = kUsePollingToNotify;
799					moveToPollCnt--;
800					notify->watchPollCnt++;
801					SMBDEBUG("Moving %s to poll state\n", watchItem->np->n_name);
802				} else {
803					/* If an error then keep trying */
804					watchItem->state = kSendNotify;
805				}
806				break;
807			}
808			case kUsePollingToNotify:
809				/* We can move some back to notify and turn off polling */
810				if ((!notify->pollOnly) &&
811                    moveFromPollCnt &&
812                    (watchItem->np->d_fid != 0) &&
813                    (!watchItem->np->d_needReopen)) {
814					watchItem->state = kSendNotify;
815					moveFromPollCnt--;
816					notify->watchPollCnt--;
817					notify->haveMoreWork = TRUE; /* Force us to resend these items */
818					SMBDEBUG("Moving %s from polling to send state\n", watchItem->np->n_name);
819				} else if (updatePollingNodes) {
820					uint32_t events = VNODE_EVENT_ATTRIB | VNODE_EVENT_WRITE;
821					smbfs_notified_vnode(watchItem->np, FALSE, events, context);
822					SMBDEBUG("Updating %s using polling\n", watchItem->np->n_name);
823				}
824				break;
825			case kWaitingOnNotify:
826				/* Nothing to do here but wait */
827				break;
828			case kWaitingForRemoval:
829				/* Just waiting for it to get removed */
830				break;
831		}
832	}
833	lck_mtx_unlock(&notify->watch_list_lock);
834	/*
835	 * Keep track of how many are we over the limit So we can kick them off
836	 * in smbfs_restart_change_notify. We need this to keep one volume from
837	 * hogging all the kqueue events. So if its zero that means the
838	 * smbfs_restart_change_notify code is done so we can now add the new
839	 * value if we have one.
840	 */
841	if (OSAddAtomic(0, &smp->tooManyNotifies) == 0)
842		OSAddAtomic(moveToPollCnt, &smp->tooManyNotifies);
843}
844
845/*
846 * notify_main
847 *
848 * Notify thread main routine.
849 */
850static void
851notify_main(void *arg)
852{
853	struct smbfs_notify_change	*notify = arg;
854	vfs_context_t		context;
855
856	context = vfs_context_create((vfs_context_t)0);
857
858	notify->sleeptimespec.tv_nsec = 0;
859
860	lck_mtx_lock(&notify->notify_statelock);
861	notify->notify_state = kNotifyThreadRunning;
862	lck_mtx_unlock(&notify->notify_statelock);
863
864	while (notify->notify_state == kNotifyThreadRunning) {
865		notify->sleeptimespec.tv_sec = NOTIFY_CHANGE_SLEEP_TIMO;
866		notify->haveMoreWork = FALSE;
867		process_notify_items(notify, context);
868		if (!notify->haveMoreWork)
869			msleep(&notify->notify_state, 0, PWAIT, "notify change idle",
870				   &notify->sleeptimespec);
871	}
872	/* Shouldn't have anything in the queue at this point */
873	DBG_ASSERT(STAILQ_EMPTY(&notify->watch_list))
874
875	lck_mtx_lock(&notify->notify_statelock);
876	notify->notify_state = kNotifyThreadStop;
877	lck_mtx_unlock(&notify->notify_statelock);
878	vfs_context_rele(context);
879	wakeup(notify);
880}
881
882/*
883 * smbfs_notify_change_create_thread
884 *
885 * Create and start the thread used do handle notify change request
886 */
887void
888smbfs_notify_change_create_thread(struct smbmount *smp)
889{
890	struct smbfs_notify_change	*notify;
891	kern_return_t	result;
892	thread_t		thread;
893
894	SMB_MALLOC(notify, struct smbfs_notify_change *, sizeof(*notify), M_TEMP,
895		   M_WAITOK | M_ZERO);
896	smp->notify_thread = notify;
897
898	notify->smp = smp;
899	lck_mtx_init(&notify->notify_statelock, smbfs_mutex_group, smbfs_lock_attr);
900	lck_mtx_init(&notify->watch_list_lock, smbfs_mutex_group, smbfs_lock_attr);
901	STAILQ_INIT(&notify->watch_list);
902
903	notify->notify_state = kNotifyThreadStarting;
904
905	result = kernel_thread_start((thread_continue_t)notify_main, notify, &thread);
906	if (result != KERN_SUCCESS) {
907		SMBERROR("can't start notify change thread: result = %d\n", result);
908		smp->notify_thread = NULL;
909		SMB_FREE(notify, M_SMBIOD);
910		return;
911	}
912	thread_deallocate(thread);
913	return;
914}
915
916/*
917 * smbfs_notify_change_destroy_thread
918 *
919 * Stop the thread used to handle notify change request and remove any memory
920 * used by the thread.
921 *
922 * NOTE: All watch items should have already been remove from the threads list.
923 */
924void
925smbfs_notify_change_destroy_thread(struct smbmount *smp)
926{
927	struct smbfs_notify_change	*notify = smp->notify_thread;
928
929	if (smp->notify_thread == NULL)
930		return;
931	smp->notify_thread = NULL;
932	notify->notify_state = kNotifyThreadStopping;
933	wakeup(&notify->notify_state);
934
935	for (;;) {
936		lck_mtx_lock(&notify->notify_statelock);
937		if (notify->notify_state == kNotifyThreadStop) {
938			lck_mtx_unlock(&notify->notify_statelock);
939			if (STAILQ_EMPTY(&notify->watch_list)) {
940				SMBDEBUG("Watch thread going away\n");
941			} else {
942				SMBERROR("Watch thread going away with watch items, very bad?\n");
943			}
944			break;
945		}
946		msleep(notify, &notify->notify_statelock, PWAIT | PDROP, "notify change exit", 0);
947	}
948	lck_mtx_destroy(&notify->notify_statelock, smbfs_mutex_group);
949	lck_mtx_destroy(&notify->watch_list_lock, smbfs_mutex_group);
950	SMB_FREE(notify, M_TEMP);
951}
952
953/*
954 * enqueue_notify_change_request
955 *
956 * Allocate an item and place it on the list.
957 */
958static void
959enqueue_notify_change_request(struct smbfs_notify_change *notify,
960							  struct smbnode *np)
961{
962	struct watch_item *watchItem;
963
964	SMB_MALLOC(watchItem, struct watch_item *, sizeof(*watchItem), M_TEMP, M_WAITOK | M_ZERO);
965	lck_mtx_init(&watchItem->watch_statelock, smbfs_mutex_group, smbfs_lock_attr);
966	watchItem->isRoot = vnode_isvroot(np->n_vnode);
967	watchItem->np = np;
968	if (notify->pollOnly) {
969		watchItem->state = kUsePollingToNotify;
970	} else {
971		watchItem->state = kSendNotify;
972	}
973	watchItem->notify = notify;
974	nanouptime(&watchItem->last_notify_time);
975	watchItem->last_notify_time.tv_sec += SMBFS_MAX_RCVD_NOTIFY_TIME;
976	lck_mtx_lock(&notify->watch_list_lock);
977	notify->watchCnt++;
978	SMBDEBUG("Enqueue %s count = %d poll count = %d\n", np->n_name,
979			 notify->watchCnt, notify->watchPollCnt);
980	/* Always make sure the root vnode is the first item in the list */
981	if (watchItem->isRoot) {
982		STAILQ_INSERT_HEAD(&notify->watch_list, watchItem, entries);
983	} else {
984		STAILQ_INSERT_TAIL(&notify->watch_list, watchItem, entries);
985	}
986	lck_mtx_unlock(&notify->watch_list_lock);
987	notify_wakeup(notify);
988}
989
990/*
991 * enqueue_notify_svrmsg_request
992 *
993 * Allocate an item for server messages, and place it
994 * in the notify struct.
995 */
996static void
997enqueue_notify_svrmsg_request(struct smbfs_notify_change *notify)
998{
999	struct watch_item *watchItem;
1000
1001    if (notify->pollOnly) {
1002        SMBERROR("Server doesn't support notify, not enabling svrmsg notify\n");
1003        return;
1004    }
1005
1006	SMB_MALLOC(watchItem, struct watch_item *, sizeof(*watchItem), M_TEMP, M_WAITOK | M_ZERO);
1007	lck_mtx_init(&watchItem->watch_statelock, smbfs_mutex_group, smbfs_lock_attr);
1008
1009    watchItem->isServerMsg = TRUE;
1010    watchItem->state = kSendNotify;
1011
1012	watchItem->notify = notify;
1013	nanouptime(&watchItem->last_notify_time);
1014	lck_mtx_lock(&notify->watch_list_lock);
1015
1016    notify->svrmsg_item = watchItem;
1017	lck_mtx_unlock(&notify->watch_list_lock);
1018	notify_wakeup(notify);
1019}
1020
1021/*
1022 * dequeue_notify_change_request
1023 *
1024 * Search the list, if we find a match set the state to cancel. Now wait for the
1025 * watch thread to say its ok to remove the item.
1026 */
1027static void
1028dequeue_notify_change_request(struct smbfs_notify_change *notify,
1029							  struct smbnode *np)
1030{
1031	struct watch_item *watchItem, *next;
1032
1033	lck_mtx_lock(&notify->watch_list_lock);
1034	STAILQ_FOREACH_SAFE(watchItem, &notify->watch_list, entries, next) {
1035		if (watchItem->np == np) {
1036			notify->watchCnt--;
1037			lck_mtx_lock(&watchItem->watch_statelock);
1038			if (watchItem->state == kUsePollingToNotify)
1039				notify->watchPollCnt--;
1040			SMBDEBUG("Dequeue %s count = %d poll count = %d\n", np->n_name,
1041					 notify->watchCnt, notify->watchPollCnt);
1042			watchItem->state = kCancelNotify;
1043			lck_mtx_unlock(&watchItem->watch_statelock);
1044			notify_wakeup(notify);
1045			msleep(watchItem, &notify->watch_list_lock, PWAIT,
1046				   "notify watchItem cancel", NULL);
1047			STAILQ_REMOVE(&notify->watch_list, watchItem, watch_item, entries);
1048			SMB_FREE(watchItem, M_TEMP);
1049			watchItem = NULL;
1050			break;
1051		}
1052	}
1053	lck_mtx_unlock(&notify->watch_list_lock);
1054}
1055
1056/*
1057 * dequeue_notify_svrmsg_request
1058 *
1059 * Set the svrmsg_item state to cancel, then wait for the
1060 * watch thread to say its ok to remove the item.
1061 */
1062static void
1063dequeue_notify_svrmsg_request(struct smbfs_notify_change *notify)
1064{
1065	struct watch_item *watchItem = notify->svrmsg_item;
1066
1067    if (watchItem == NULL) {
1068        return;
1069    }
1070
1071    lck_mtx_lock(&notify->watch_list_lock);
1072
1073    lck_mtx_lock(&watchItem->watch_statelock);
1074    watchItem->state = kCancelNotify;
1075    lck_mtx_unlock(&watchItem->watch_statelock);
1076
1077    notify_wakeup(notify);
1078    msleep(watchItem, &notify->watch_list_lock, PWAIT,
1079           "svrmsg watchItem cancel", NULL);
1080
1081    if (watchItem->state != kWaitingForRemoval) {
1082        SMBERROR("svrmsgItem->state: %d, expected kWaitingForRemoval\n", watchItem->state);
1083    }
1084
1085    lck_mtx_lock(&watchItem->watch_statelock);
1086    notify->svrmsg_item = NULL;
1087    lck_mtx_unlock(&watchItem->watch_statelock);
1088
1089    SMB_FREE(watchItem, M_TEMP);
1090
1091	lck_mtx_unlock(&notify->watch_list_lock);
1092}
1093
1094/*
1095 * smbfs_start_change_notify
1096 *
1097 * Start the change notify process. Called from the smbfs_vnop_monitor routine.
1098 *
1099 * The calling routine must hold a reference on the share
1100 *
1101 */
1102int
1103smbfs_start_change_notify(struct smb_share *share, struct smbnode *np,
1104						  vfs_context_t context, int *releaseLock)
1105{
1106	struct smbmount *smp = np->n_mount;
1107	int error;
1108
1109	if (smp->notify_thread == NULL) {
1110		/* This server doesn't support notify change so turn on polling */
1111		np->n_flag |= N_POLLNOTIFY;
1112		SMBDEBUG("Monitoring %s with polling\n", np->n_name);
1113	} else {
1114		if (np->d_kqrefcnt) {
1115			np->d_kqrefcnt++;	/* Already processing this node, we are done */
1116			return 0;
1117		}
1118		np->d_kqrefcnt++;
1119		/* Setting SMB2_SYNCHRONIZE because XP does. */
1120		error = smbfs_tmpopen(share, np, SMB2_FILE_READ_DATA | SMB2_SYNCHRONIZE,
1121                              &np->d_fid, context);
1122		if (error)	{
1123			/* Open failed so turn on polling */
1124			np->n_flag |= N_POLLNOTIFY;
1125			SMBDEBUG("Monitoring %s failed to open. %d\n", np->n_name, error);
1126		} else {
1127			SMBDEBUG("Monitoring %s\n", np->n_name);
1128			/*
1129			 * We no longer need the node lock. So unlock the node so we have no
1130			 * lock contention with the notify list lock.
1131			 *
1132			 * Make sure we tell the calling routine that we have released the
1133			 * node lock.
1134			 */
1135			*releaseLock = FALSE;
1136			smbnode_unlock(np);
1137			enqueue_notify_change_request(smp->notify_thread, np);
1138		}
1139	}
1140	return 0;
1141}
1142
1143/*
1144 * smbfs_start_svrmsg_notify
1145 *
1146 * Start the change notify process. Called from the smbfs mount routine.
1147 *
1148 * The calling routine must hold a reference on the share
1149 *
1150 */
1151int
1152smbfs_start_svrmsg_notify(struct smbmount *smp)
1153{
1154	int error = 0;
1155
1156	if (smp->notify_thread == NULL) {
1157		/* This server doesn't support notify change, so forget srvmsg
1158         * notifications
1159         */
1160		SMBDEBUG("Server doesn't support notify\n");
1161        error = ENOTSUP;
1162	} else {
1163			SMBDEBUG("Monitoring server messages\n");
1164			enqueue_notify_svrmsg_request(smp->notify_thread);
1165	}
1166	return error;
1167}
1168
1169/*
1170 * smbfs_stop_change_notify
1171 *
1172 * Called from  smbfs_vnop_monitor or smb_vnop_inactive routine. If this is the
1173 * last close then close the directory and set the fid to zero. This will stop
1174 * the watch event from doing any further work. Now dequeue the watch item.
1175 *
1176 * The calling routine must hold a reference on the share
1177 *
1178 */
1179int
1180smbfs_stop_change_notify(struct smb_share *share, struct smbnode *np,
1181						 int forceClose, vfs_context_t context, int *releaseLock)
1182{
1183	struct smbmount *smp = np->n_mount;
1184	SMBFID	fid;
1185
1186	if (forceClose)
1187		np->d_kqrefcnt = 0;
1188	else
1189		np->d_kqrefcnt--;
1190
1191	/* Still have users monitoring just get out */
1192	if (np->d_kqrefcnt > 0)
1193		return 0;
1194
1195	DBG_ASSERT(np->d_kqrefcnt == 0)
1196	/* If polling was turned on, turn it off */
1197	np->n_flag &= ~N_POLLNOTIFY;
1198	fid = np->d_fid;
1199	/* Stop all notify network traffic */
1200	np->d_fid = 0;
1201	/* If marked for reopen, turn it off */
1202	np->d_needReopen = FALSE;
1203	np->d_kqrefcnt = 0;
1204	/* If we have it open then close it */
1205	if (fid != 0) {
1206		(void)smbfs_tmpclose(share, np, fid, context);
1207    }
1208	SMBDEBUG("We are no longer monitoring  %s\n", np->n_name);
1209	if (smp->notify_thread) {
1210		/*
1211		 * We no longer need the node lock. So unlock the node so we have no
1212		 * lock contention with the notify list lock.
1213		 *
1214		 * Make sure we tell the calling routine that we have released the
1215		 * node lock.
1216		 */
1217		*releaseLock = FALSE;
1218		smbnode_unlock(np);
1219		dequeue_notify_change_request(smp->notify_thread, np);
1220	}
1221	return 0;
1222}
1223
1224int
1225smbfs_stop_svrmsg_notify(struct smbmount *smp)
1226{
1227	SMBDEBUG("We are no longer monitoring svrmsg notify replies\n");
1228
1229	if (smp->notify_thread) {
1230		dequeue_notify_svrmsg_request(smp->notify_thread);
1231	}
1232	return 0;
1233}
1234
1235/*
1236 * smbfs_restart_change_notify
1237 *
1238 * Reopen the directory and wake up the notify queue.
1239 *
1240 * The calling routine must hold a reference on the share
1241 *
1242 */
1243void
1244smbfs_restart_change_notify(struct smb_share *share, struct smbnode *np,
1245							vfs_context_t context)
1246{
1247	struct smbmount *smp = np->n_mount;
1248	int error;
1249
1250	/* This server doesn't support notify change so we are done just return */
1251	if (smp->notify_thread == NULL) {
1252		np->d_needReopen = FALSE;
1253		return;
1254	}
1255	if (!np->d_needReopen) {
1256		SMBFID	fid = np->d_fid;
1257
1258		if ((vnode_isvroot(np->n_vnode)) ||
1259			(OSAddAtomic(0, &smp->tooManyNotifies) == 0)) {
1260			/* Nothing do do here just get out */
1261			return;
1262		}
1263
1264		/* We sent something see how long we have been waiting */
1265		SMBDEBUG("Need to close '%s' so we can force it to use polling\n",
1266				 np->n_name);
1267		np->d_needReopen = TRUE;
1268		np->d_fid = 0;
1269		/*
1270		 * Closing it here will cause the server to send a cancel error, which
1271		 * will cause the notification thread to place this item in the poll
1272		 * state.
1273		 */
1274		(void)smbfs_tmpclose(share, np, fid, context);
1275		OSAddAtomic(-1, &smp->tooManyNotifies);
1276		return;	/* Nothing left to do here, just get out */
1277	}
1278	SMBDEBUG("%s is being reopened for monitoring\n", np->n_name);
1279	/*
1280	 * We set the capabilities VOL_CAP_INT_REMOTE_EVENT for all supported
1281	 * servers. So if they call us without checking the
1282	 * capabilities then they get what they get.
1283	 *
1284	 * Setting SMB2_SYNCHRONIZE because XP does.
1285	 *
1286	 * Now reopen the directory.
1287	 */
1288	error = smbfs_tmpopen(share, np, SMB2_FILE_READ_DATA | SMB2_SYNCHRONIZE,
1289						  &np->d_fid, context);
1290	if (error) {
1291		SMBWARNING("Attempting to reopen %s failed %d\n", np->n_name, error);
1292		return;
1293	}
1294
1295	np->d_needReopen = FALSE;
1296	notify_wakeup(smp->notify_thread);
1297}
1298