1/*
2 * Copyright (c) 2004-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#include <stdarg.h>
29#include <sys/param.h>
30#include <sys/systm.h>
31#include <sys/event.h>         // for kqueue related stuff
32#include <sys/fsevents.h>
33
34#if CONFIG_FSE
35#include <sys/namei.h>
36#include <sys/filedesc.h>
37#include <sys/kernel.h>
38#include <sys/file_internal.h>
39#include <sys/stat.h>
40#include <sys/vnode_internal.h>
41#include <sys/mount_internal.h>
42#include <sys/proc_internal.h>
43#include <sys/kauth.h>
44#include <sys/uio.h>
45#include <sys/malloc.h>
46#include <sys/dirent.h>
47#include <sys/attr.h>
48#include <sys/sysctl.h>
49#include <sys/ubc.h>
50#include <machine/cons.h>
51#include <miscfs/specfs/specdev.h>
52#include <miscfs/devfs/devfs.h>
53#include <sys/filio.h>
54#include <kern/locks.h>
55#include <libkern/OSAtomic.h>
56#include <kern/zalloc.h>
57#include <mach/mach_time.h>
58#include <kern/thread_call.h>
59#include <kern/clock.h>
60
61#include <security/audit/audit.h>
62#include <bsm/audit_kevents.h>
63
64#include <pexpert/pexpert.h>
65
66typedef struct kfs_event {
67    LIST_ENTRY(kfs_event) kevent_list;
68    int16_t        type;           // type code of this event
69    u_int16_t      flags,          // per-event flags
70                   len;            // the length of the path in "str"
71    int32_t        refcount;       // number of clients referencing this
72    pid_t          pid;            // pid of the process that did the op
73
74    uint64_t       abstime;        // when this event happened (mach_absolute_time())
75    ino64_t        ino;
76    dev_t          dev;
77    int32_t        mode;
78    uid_t          uid;
79    gid_t          gid;
80
81    const char    *str;
82
83    struct kfs_event *dest;    // if this is a two-file op
84} kfs_event;
85
86// flags for the flags field
87#define KFSE_COMBINED_EVENTS          0x0001
88#define KFSE_CONTAINS_DROPPED_EVENTS  0x0002
89#define KFSE_RECYCLED_EVENT           0x0004
90#define KFSE_BEING_CREATED            0x0008
91
92LIST_HEAD(kfse_list, kfs_event) kfse_list_head = LIST_HEAD_INITIALIZER(x);
93int num_events_outstanding = 0;
94int num_pending_rename = 0;
95
96
97struct fsevent_handle;
98
99typedef struct fs_event_watcher {
100    int8_t      *event_list;             // the events we're interested in
101    int32_t      num_events;
102    dev_t       *devices_not_to_watch;   // report events from devices not in this list
103    uint32_t     num_devices;
104    int32_t      flags;
105    kfs_event  **event_queue;
106    int32_t      eventq_size;            // number of event pointers in queue
107    int32_t      num_readers;
108    int32_t      rd;                     // read index into the event_queue
109    int32_t      wr;                     // write index into the event_queue
110    int32_t      blockers;
111    int32_t      my_id;
112    uint32_t     num_dropped;
113    uint64_t     max_event_id;
114    struct fsevent_handle *fseh;
115    pid_t        pid;
116    char         proc_name[(2 * MAXCOMLEN) + 1];
117} fs_event_watcher;
118
119// fs_event_watcher flags
120#define WATCHER_DROPPED_EVENTS         0x0001
121#define WATCHER_CLOSING                0x0002
122#define WATCHER_WANTS_COMPACT_EVENTS   0x0004
123#define WATCHER_WANTS_EXTENDED_INFO    0x0008
124#define WATCHER_APPLE_SYSTEM_SERVICE   0x0010   // fseventsd, coreservicesd, mds
125
126#define MAX_WATCHERS  8
127static fs_event_watcher *watcher_table[MAX_WATCHERS];
128
129#define DEFAULT_MAX_KFS_EVENTS   4096
130static int max_kfs_events = DEFAULT_MAX_KFS_EVENTS;
131
132// we allocate kfs_event structures out of this zone
133static zone_t     event_zone;
134static int        fs_event_init = 0;
135
136//
137// this array records whether anyone is interested in a
138// particular type of event.  if no one is, we bail out
139// early from the event delivery
140//
141static int16_t     fs_event_type_watchers[FSE_MAX_EVENTS];
142
143static int  watcher_add_event(fs_event_watcher *watcher, kfs_event *kfse);
144static void fsevents_wakeup(fs_event_watcher *watcher);
145
146//
147// Locks
148//
149static lck_grp_attr_t *  fsevent_group_attr;
150static lck_attr_t *      fsevent_lock_attr;
151static lck_grp_t *       fsevent_mutex_group;
152
153static lck_grp_t *       fsevent_rw_group;
154
155static lck_rw_t  event_handling_lock; // handles locking for event manipulation and recycling
156static lck_mtx_t watch_table_lock;
157static lck_mtx_t event_buf_lock;
158static lck_mtx_t event_writer_lock;
159
160
161/* Explicitly declare qsort so compiler doesn't complain */
162__private_extern__ void qsort(
163    void * array,
164    size_t nmembers,
165    size_t member_size,
166    int (*)(const void *, const void *));
167
168static int
169is_ignored_directory(const char *path) {
170
171    if (!path) {
172      return 0;
173    }
174
175#define IS_TLD(x) strnstr((char *) path, x, MAXPATHLEN)
176    if (IS_TLD("/.Spotlight-V100/") ||
177        IS_TLD("/.MobileBackups/") ||
178        IS_TLD("/Backups.backupdb/")) {
179        return 1;
180    }
181#undef IS_TLD
182
183    return 0;
184}
185
186static void
187fsevents_internal_init(void)
188{
189    int i;
190
191    if (fs_event_init++ != 0) {
192	return;
193    }
194
195    for(i=0; i < FSE_MAX_EVENTS; i++) {
196	fs_event_type_watchers[i] = 0;
197    }
198
199    memset(watcher_table, 0, sizeof(watcher_table));
200
201    fsevent_lock_attr    = lck_attr_alloc_init();
202    fsevent_group_attr   = lck_grp_attr_alloc_init();
203    fsevent_mutex_group  = lck_grp_alloc_init("fsevent-mutex", fsevent_group_attr);
204    fsevent_rw_group     = lck_grp_alloc_init("fsevent-rw", fsevent_group_attr);
205
206    lck_mtx_init(&watch_table_lock, fsevent_mutex_group, fsevent_lock_attr);
207    lck_mtx_init(&event_buf_lock, fsevent_mutex_group, fsevent_lock_attr);
208    lck_mtx_init(&event_writer_lock, fsevent_mutex_group, fsevent_lock_attr);
209
210    lck_rw_init(&event_handling_lock, fsevent_rw_group, fsevent_lock_attr);
211
212    PE_get_default("kern.maxkfsevents", &max_kfs_events, sizeof(max_kfs_events));
213
214    event_zone = zinit(sizeof(kfs_event),
215	               max_kfs_events * sizeof(kfs_event),
216	               max_kfs_events * sizeof(kfs_event),
217	               "fs-event-buf");
218    if (event_zone == NULL) {
219	printf("fsevents: failed to initialize the event zone.\n");
220    }
221
222    // mark the zone as exhaustible so that it will not
223    // ever grow beyond what we initially filled it with
224    zone_change(event_zone, Z_EXHAUST, TRUE);
225    zone_change(event_zone, Z_COLLECT, FALSE);
226    zone_change(event_zone, Z_CALLERACCT, FALSE);
227
228    if (zfill(event_zone, max_kfs_events) < max_kfs_events) {
229	printf("fsevents: failed to pre-fill the event zone.\n");
230    }
231
232}
233
234static void
235lock_watch_table(void)
236{
237    lck_mtx_lock(&watch_table_lock);
238}
239
240static void
241unlock_watch_table(void)
242{
243    lck_mtx_unlock(&watch_table_lock);
244}
245
246static void
247lock_fs_event_list(void)
248{
249    lck_mtx_lock(&event_buf_lock);
250}
251
252static void
253unlock_fs_event_list(void)
254{
255    lck_mtx_unlock(&event_buf_lock);
256}
257
258// forward prototype
259static void release_event_ref(kfs_event *kfse);
260
261static int
262watcher_cares_about_dev(fs_event_watcher *watcher, dev_t dev)
263{
264    unsigned int i;
265
266    // if devices_not_to_watch is NULL then we care about all
267    // events from all devices
268    if (watcher->devices_not_to_watch == NULL) {
269	return 1;
270    }
271
272    for(i=0; i < watcher->num_devices; i++) {
273	if (dev == watcher->devices_not_to_watch[i]) {
274	    // found a match! that means we do not
275	    // want events from this device.
276	    return 0;
277	}
278    }
279
280    // if we're here it's not in the devices_not_to_watch[]
281    // list so that means we do care about it
282    return 1;
283}
284
285
286int
287need_fsevent(int type, vnode_t vp)
288{
289    if (type >= 0 && type < FSE_MAX_EVENTS && fs_event_type_watchers[type] == 0)
290	return (0);
291
292    // events in /dev aren't really interesting...
293    if (vp->v_tag == VT_DEVFS) {
294	return (0);
295    }
296
297    return 1;
298}
299
300
301#define is_throw_away(x)  ((x) == FSE_STAT_CHANGED || (x) == FSE_CONTENT_MODIFIED)
302
303
304// Ways that an event can be reused:
305//
306// "combined" events mean that there were two events for
307// the same vnode or path and we're combining both events
308// into a single event.  The primary event gets a bit that
309// marks it as having been combined.  The secondary event
310// is essentially dropped and the kfse structure reused.
311//
312// "collapsed" means that multiple events below a given
313// directory are collapsed into a single event.  in this
314// case, the directory that we collapse into and all of
315// its children must be re-scanned.
316//
317// "recycled" means that we're completely blowing away
318// the event since there are other events that have info
319// about the same vnode or path (and one of those other
320// events will be marked as combined or collapsed as
321// appropriate).
322//
323#define KFSE_COMBINED   0x0001
324#define KFSE_COLLAPSED  0x0002
325#define KFSE_RECYCLED   0x0004
326
327int num_dropped         = 0;
328int num_parent_switch   = 0;
329int num_recycled_rename = 0;
330
331static struct timeval last_print;
332
333//
334// These variables are used to track coalescing multiple identical
335// events for the same vnode/pathname.  If we get the same event
336// type and same vnode/pathname as the previous event, we just drop
337// the event since it's superfluous.  This improves some micro-
338// benchmarks considerably and actually has a real-world impact on
339// tests like a Finder copy where multiple stat-changed events can
340// get coalesced.
341//
342static int     last_event_type=-1;
343static void   *last_ptr=NULL;
344static char    last_str[MAXPATHLEN];
345static int     last_nlen=0;
346static int     last_vid=-1;
347static uint64_t last_coalesced_time=0;
348static void   *last_event_ptr=NULL;
349int            last_coalesced = 0;
350static mach_timebase_info_data_t    sTimebaseInfo = { 0, 0 };
351
352
353int
354add_fsevent(int type, vfs_context_t ctx, ...)
355{
356    struct proc	     *p = vfs_context_proc(ctx);
357    int               i, arg_type, ret;
358    kfs_event        *kfse, *kfse_dest=NULL, *cur;
359    fs_event_watcher *watcher;
360    va_list           ap;
361    int 	      error = 0, did_alloc=0;
362    dev_t             dev = 0;
363    uint64_t          now, elapsed;
364    char             *pathbuff=NULL;
365    int               pathbuff_len;
366
367
368
369    va_start(ap, ctx);
370
371    // ignore bogus event types..
372    if (type < 0 || type >= FSE_MAX_EVENTS) {
373	return EINVAL;
374    }
375
376    // if no one cares about this type of event, bail out
377    if (fs_event_type_watchers[type] == 0) {
378	va_end(ap);
379
380	return 0;
381    }
382
383    now = mach_absolute_time();
384
385    // find a free event and snag it for our use
386    // NOTE: do not do anything that would block until
387    //       the lock is dropped.
388    lock_fs_event_list();
389
390    //
391    // check if this event is identical to the previous one...
392    // (as long as it's not an event type that can never be the
393    // same as a previous event)
394    //
395    if (type != FSE_CREATE_FILE && type != FSE_DELETE && type != FSE_RENAME && type != FSE_EXCHANGE && type != FSE_CHOWN && type != FSE_DOCID_CHANGED && type != FSE_DOCID_CREATED) {
396	void *ptr=NULL;
397	int   vid=0, was_str=0, nlen=0;
398
399	for(arg_type=va_arg(ap, int32_t); arg_type != FSE_ARG_DONE; arg_type=va_arg(ap, int32_t)) {
400	    switch(arg_type) {
401		case FSE_ARG_VNODE: {
402		    ptr = va_arg(ap, void *);
403		    vid = vnode_vid((struct vnode *)ptr);
404		    last_str[0] = '\0';
405		    break;
406		}
407		case FSE_ARG_STRING: {
408		    nlen = va_arg(ap, int32_t);
409		    ptr = va_arg(ap, void *);
410		    was_str = 1;
411		    break;
412		}
413	    }
414	    if (ptr != NULL) {
415		break;
416	    }
417	}
418
419	if ( sTimebaseInfo.denom == 0 ) {
420	    (void) clock_timebase_info(&sTimebaseInfo);
421	}
422
423	elapsed = (now - last_coalesced_time);
424	if (sTimebaseInfo.denom != sTimebaseInfo.numer) {
425	    if (sTimebaseInfo.denom == 1) {
426		elapsed *= sTimebaseInfo.numer;
427	    } else {
428		// this could overflow... the worst that will happen is that we'll
429		// send (or not send) an extra event so I'm not going to worry about
430		// doing the math right like dtrace_abs_to_nano() does.
431		elapsed = (elapsed * sTimebaseInfo.numer) / (uint64_t)sTimebaseInfo.denom;
432	    }
433	}
434
435	if (type == last_event_type
436            && (elapsed < 1000000000)
437	    &&
438	    ((vid && vid == last_vid && last_ptr == ptr)
439	      ||
440	     (last_str[0] && last_nlen == nlen && ptr && strcmp(last_str, ptr) == 0))
441	   ) {
442
443	    last_coalesced++;
444	    unlock_fs_event_list();
445	    va_end(ap);
446
447	    return 0;
448	} else {
449	    last_ptr = ptr;
450	    if (was_str) {
451		strlcpy(last_str, ptr, sizeof(last_str));
452	    }
453	    last_nlen = nlen;
454	    last_vid = vid;
455	    last_event_type = type;
456	    last_coalesced_time = now;
457	}
458    }
459    va_start(ap, ctx);
460
461
462    kfse = zalloc_noblock(event_zone);
463    if (kfse && (type == FSE_RENAME || type == FSE_EXCHANGE)) {
464	kfse_dest = zalloc_noblock(event_zone);
465	if (kfse_dest == NULL) {
466	    did_alloc = 1;
467	    zfree(event_zone, kfse);
468	    kfse = NULL;
469	}
470    }
471
472
473    if (kfse == NULL) {        // yikes! no free events
474	    unlock_fs_event_list();
475	    lock_watch_table();
476
477	    for(i=0; i < MAX_WATCHERS; i++) {
478		watcher = watcher_table[i];
479		if (watcher == NULL) {
480		    continue;
481		}
482
483		watcher->flags |= WATCHER_DROPPED_EVENTS;
484		fsevents_wakeup(watcher);
485	    }
486	    unlock_watch_table();
487
488	    {
489		struct timeval current_tv;
490
491		num_dropped++;
492
493		// only print a message at most once every 5 seconds
494		microuptime(&current_tv);
495		if ((current_tv.tv_sec - last_print.tv_sec) > 10) {
496		    int ii;
497		    void *junkptr=zalloc_noblock(event_zone), *listhead=kfse_list_head.lh_first;
498
499		    printf("add_fsevent: event queue is full! dropping events (num dropped events: %d; num events outstanding: %d).\n", num_dropped, num_events_outstanding);
500		    printf("add_fsevent: kfse_list head %p ; num_pending_rename %d\n", listhead, num_pending_rename);
501		    printf("add_fsevent: zalloc sez: %p\n", junkptr);
502		    printf("add_fsevent: event_zone info: %d 0x%x\n", ((int *)event_zone)[0], ((int *)event_zone)[1]);
503		    lock_watch_table();
504		    for(ii=0; ii < MAX_WATCHERS; ii++) {
505			if (watcher_table[ii] == NULL) {
506			    continue;
507			}
508
509			printf("add_fsevent: watcher %s %p: rd %4d wr %4d q_size %4d flags 0x%x\n",
510			       watcher_table[ii]->proc_name,
511			       watcher_table[ii],
512			       watcher_table[ii]->rd, watcher_table[ii]->wr,
513			       watcher_table[ii]->eventq_size, watcher_table[ii]->flags);
514		    }
515		    unlock_watch_table();
516
517		    last_print = current_tv;
518		    if (junkptr) {
519			zfree(event_zone, junkptr);
520		    }
521		}
522	    }
523
524	    if (pathbuff) {
525		release_pathbuff(pathbuff);
526		pathbuff = NULL;
527	    }
528	    return ENOSPC;
529	}
530
531    memset(kfse, 0, sizeof(kfs_event));
532    kfse->refcount = 1;
533    OSBitOrAtomic16(KFSE_BEING_CREATED, &kfse->flags);
534
535    last_event_ptr = kfse;
536    kfse->type     = type;
537    kfse->abstime  = now;
538    kfse->pid      = p->p_pid;
539    if (type == FSE_RENAME || type == FSE_EXCHANGE) {
540	memset(kfse_dest, 0, sizeof(kfs_event));
541	kfse_dest->refcount = 1;
542	OSBitOrAtomic16(KFSE_BEING_CREATED, &kfse_dest->flags);
543	kfse_dest->type     = type;
544	kfse_dest->pid      = p->p_pid;
545	kfse_dest->abstime  = now;
546
547	kfse->dest = kfse_dest;
548    }
549
550    num_events_outstanding++;
551    if (kfse->type == FSE_RENAME) {
552	num_pending_rename++;
553    }
554    LIST_INSERT_HEAD(&kfse_list_head, kfse, kevent_list);
555
556    if (kfse->refcount < 1) {
557	panic("add_fsevent: line %d: kfse recount %d but should be at least 1\n", __LINE__, kfse->refcount);
558    }
559
560    unlock_fs_event_list();  // at this point it's safe to unlock
561
562    //
563    // now process the arguments passed in and copy them into
564    // the kfse
565    //
566
567    cur = kfse;
568
569    if (type == FSE_DOCID_CREATED || type == FSE_DOCID_CHANGED) {
570	    uint64_t val;
571
572	    //
573	    // These events are special and not like the other events.  They only
574	    // have a dev_t, src inode #, dest inode #, and a doc-id.  We use the
575	    // fields that we can in the kfse but have to overlay the dest inode
576	    // number and the doc-id on the other fields.
577	    //
578
579	    // First the dev_t
580	    arg_type = va_arg(ap, int32_t);
581	    if (arg_type == FSE_ARG_DEV) {
582		    cur->dev = (dev_t)(va_arg(ap, dev_t));
583	    } else {
584		    cur->dev = (dev_t)0xbadc0de1;
585	    }
586
587	    // next the source inode #
588	    arg_type = va_arg(ap, int32_t);
589	    if (arg_type == FSE_ARG_INO) {
590		    cur->ino = (ino64_t)(va_arg(ap, ino64_t));
591	    } else {
592		    cur->ino = 0xbadc0de2;
593	    }
594
595	    // now the dest inode #
596	    arg_type = va_arg(ap, int32_t);
597	    if (arg_type == FSE_ARG_INO) {
598		    val = (ino64_t)(va_arg(ap, ino64_t));
599	    } else {
600		    val = 0xbadc0de2;
601	    }
602	    // overlay the dest inode number on the str/dest pointer fields
603	    memcpy(&cur->str, &val, sizeof(ino64_t));
604
605
606	    // and last the document-id
607	    arg_type = va_arg(ap, int32_t);
608	    if (arg_type == FSE_ARG_INT32) {
609		    val = (uint64_t)va_arg(ap, uint32_t);
610	    } else if (arg_type == FSE_ARG_INT64) {
611		    val = (uint64_t)va_arg(ap, uint64_t);
612	    } else {
613		    val = 0xbadc0de3;
614	    }
615
616	    // the docid is 64-bit and overlays the uid/gid fields
617	    memcpy(&cur->uid, &val, sizeof(uint64_t));
618
619	    goto done_with_args;
620    }
621
622    for(arg_type=va_arg(ap, int32_t); arg_type != FSE_ARG_DONE; arg_type=va_arg(ap, int32_t))
623
624	switch(arg_type) {
625	    case FSE_ARG_VNODE: {
626		// this expands out into multiple arguments to the client
627		struct vnode *vp;
628		struct vnode_attr va;
629
630		if (kfse->str != NULL) {
631		    cur = kfse_dest;
632		}
633
634		vp = va_arg(ap, struct vnode *);
635		if (vp == NULL) {
636		    panic("add_fsevent: you can't pass me a NULL vnode ptr (type %d)!\n",
637			  cur->type);
638		}
639
640		VATTR_INIT(&va);
641		VATTR_WANTED(&va, va_fsid);
642		VATTR_WANTED(&va, va_fileid);
643		VATTR_WANTED(&va, va_mode);
644		VATTR_WANTED(&va, va_uid);
645		VATTR_WANTED(&va, va_gid);
646		if ((ret = vnode_getattr(vp, &va, vfs_context_kernel())) != 0) {
647		    // printf("add_fsevent: failed to getattr on vp %p (%d)\n", cur->fref.vp, ret);
648		    cur->str = NULL;
649		    error = EINVAL;
650		    goto clean_up;
651		}
652
653		cur->dev  = dev = (dev_t)va.va_fsid;
654		cur->ino  = (ino64_t)va.va_fileid;
655		cur->mode = (int32_t)vnode_vttoif(vnode_vtype(vp)) | va.va_mode;
656		cur->uid  = va.va_uid;
657		cur->gid  = va.va_gid;
658
659		// if we haven't gotten the path yet, get it.
660		if (pathbuff == NULL) {
661		    pathbuff = get_pathbuff();
662		    pathbuff_len = MAXPATHLEN;
663
664		    pathbuff[0] = '\0';
665		    if ((ret = vn_getpath(vp, pathbuff, &pathbuff_len)) != 0 || pathbuff[0] == '\0') {
666
667			cur->flags |= KFSE_CONTAINS_DROPPED_EVENTS;
668
669			do {
670				if (vp->v_parent != NULL) {
671					vp = vp->v_parent;
672				} else if (vp->v_mount) {
673					strlcpy(pathbuff, vp->v_mount->mnt_vfsstat.f_mntonname, MAXPATHLEN);
674					break;
675				} else {
676					vp = NULL;
677				}
678
679				if (vp == NULL) {
680					break;
681				}
682
683				pathbuff_len = MAXPATHLEN;
684				ret = vn_getpath(vp, pathbuff, &pathbuff_len);
685			} while (ret == ENOSPC);
686
687			if (ret != 0 || vp == NULL) {
688				error = ENOENT;
689				goto clean_up;
690			}
691		    }
692		}
693
694		// store the path by adding it to the global string table
695		cur->len = pathbuff_len;
696		cur->str = vfs_addname(pathbuff, pathbuff_len, 0, 0);
697		if (cur->str == NULL || cur->str[0] == '\0') {
698		    panic("add_fsevent: was not able to add path %s to event %p.\n", pathbuff, cur);
699		}
700
701		release_pathbuff(pathbuff);
702		pathbuff = NULL;
703
704		break;
705	    }
706
707	    case FSE_ARG_FINFO: {
708		fse_info *fse;
709
710		fse = va_arg(ap, fse_info *);
711
712		cur->dev  = dev = (dev_t)fse->dev;
713		cur->ino  = (ino64_t)fse->ino;
714		cur->mode = (int32_t)fse->mode;
715		cur->uid  = (uid_t)fse->uid;
716		cur->gid  = (uid_t)fse->gid;
717		// if it's a hard-link and this is the last link, flag it
718		if ((fse->mode & FSE_MODE_HLINK) && fse->nlink == 0) {
719		    cur->mode |= FSE_MODE_LAST_HLINK;
720		}
721		if (cur->mode & FSE_TRUNCATED_PATH) {
722			cur->flags |= KFSE_CONTAINS_DROPPED_EVENTS;
723			cur->mode &= ~FSE_TRUNCATED_PATH;
724		}
725		break;
726	    }
727
728	    case FSE_ARG_STRING:
729		if (kfse->str != NULL) {
730		    cur = kfse_dest;
731		}
732
733		cur->len = (int16_t)(va_arg(ap, int32_t) & 0x7fff);
734		if (cur->len >= 1) {
735		    cur->str = vfs_addname(va_arg(ap, char *), cur->len, 0, 0);
736		} else {
737		    printf("add_fsevent: funny looking string length: %d\n", (int)cur->len);
738		    cur->len = 2;
739		    cur->str = vfs_addname("/", cur->len, 0, 0);
740		}
741		if (cur->str[0] == 0) {
742		    printf("add_fsevent: bogus looking string (len %d)\n", cur->len);
743		}
744		break;
745
746	    case FSE_ARG_INT32: {
747		    uint32_t ival = (uint32_t)va_arg(ap, int32_t);
748		    kfse->uid = (ino64_t)ival;
749		break;
750	    }
751
752	    default:
753		printf("add_fsevent: unknown type %d\n", arg_type);
754		// just skip one 32-bit word and hope we sync up...
755		(void)va_arg(ap, int32_t);
756	}
757
758done_with_args:
759    va_end(ap);
760
761    OSBitAndAtomic16(~KFSE_BEING_CREATED, &kfse->flags);
762    if (kfse_dest) {
763	OSBitAndAtomic16(~KFSE_BEING_CREATED, &kfse_dest->flags);
764    }
765
766    //
767    // now we have to go and let everyone know that
768    // is interested in this type of event
769    //
770    lock_watch_table();
771
772    for(i=0; i < MAX_WATCHERS; i++) {
773	watcher = watcher_table[i];
774	if (watcher == NULL) {
775	    continue;
776	}
777
778	if (   watcher->event_list[type] == FSE_REPORT
779	    && watcher_cares_about_dev(watcher, dev)) {
780
781	    if (watcher_add_event(watcher, kfse) != 0) {
782		watcher->num_dropped++;
783		continue;
784	    }
785	}
786
787	// if (kfse->refcount < 1) {
788	//    panic("add_fsevent: line %d: kfse recount %d but should be at least 1\n", __LINE__, kfse->refcount);
789	// }
790    }
791
792    unlock_watch_table();
793
794  clean_up:
795
796    if (pathbuff) {
797	release_pathbuff(pathbuff);
798	pathbuff = NULL;
799    }
800
801    release_event_ref(kfse);
802
803    return error;
804}
805
806
807static void
808release_event_ref(kfs_event *kfse)
809{
810    int old_refcount;
811    kfs_event copy, dest_copy;
812
813
814    old_refcount = OSAddAtomic(-1, &kfse->refcount);
815    if (old_refcount > 1) {
816	return;
817    }
818
819    lock_fs_event_list();
820    if (last_event_ptr == kfse) {
821	    last_event_ptr = NULL;
822	    last_event_type = -1;
823	    last_coalesced_time = 0;
824    }
825
826    if (kfse->refcount < 0) {
827	panic("release_event_ref: bogus kfse refcount %d\n", kfse->refcount);
828    }
829
830    if (kfse->refcount > 0 || kfse->type == FSE_INVALID) {
831	// This is very subtle.  Either of these conditions can
832	// be true if an event got recycled while we were waiting
833	// on the fs_event_list lock or the event got recycled,
834	// delivered, _and_ free'd by someone else while we were
835	// waiting on the fs event list lock.  In either case
836	// we need to just unlock the list and return without
837	// doing anything because if the refcount is > 0 then
838	// someone else will take care of free'ing it and when
839	// the kfse->type is invalid then someone else already
840	// has handled free'ing the event (while we were blocked
841	// on the event list lock).
842	//
843	unlock_fs_event_list();
844	return;
845    }
846
847    //
848    // make a copy of this so we can free things without
849    // holding the fs_event_buf lock
850    //
851    copy = *kfse;
852    if (kfse->dest && OSAddAtomic(-1, &kfse->dest->refcount) == 1) {
853	dest_copy = *kfse->dest;
854    } else {
855	dest_copy.str  = NULL;
856	dest_copy.len  = 0;
857	dest_copy.type = FSE_INVALID;
858    }
859
860    kfse->pid = kfse->type;             // save this off for debugging...
861    kfse->uid = (uid_t)(long)kfse->str;       // save this off for debugging...
862    kfse->gid = (gid_t)(long)current_thread();
863
864    kfse->str = (char *)0xdeadbeef;             // XXXdbg - catch any cheaters...
865
866    if (dest_copy.type != FSE_INVALID) {
867	kfse->dest->str = (char *)0xbadc0de;   // XXXdbg - catch any cheaters...
868	kfse->dest->type = FSE_INVALID;
869
870	if (kfse->dest->kevent_list.le_prev != NULL) {
871	    num_events_outstanding--;
872	    LIST_REMOVE(kfse->dest, kevent_list);
873	    memset(&kfse->dest->kevent_list, 0xa5, sizeof(kfse->dest->kevent_list));
874	}
875
876	zfree(event_zone, kfse->dest);
877    }
878
879    // mark this fsevent as invalid
880    {
881	int otype;
882
883	otype = kfse->type;
884    kfse->type = FSE_INVALID;
885
886    if (kfse->kevent_list.le_prev != NULL) {
887	num_events_outstanding--;
888	if (otype == FSE_RENAME) {
889	    num_pending_rename--;
890	}
891	LIST_REMOVE(kfse, kevent_list);
892	memset(&kfse->kevent_list, 0, sizeof(kfse->kevent_list));
893    }
894    }
895
896    zfree(event_zone, kfse);
897
898    unlock_fs_event_list();
899
900    // if we have a pointer in the union
901    if (copy.str && copy.type != FSE_DOCID_CHANGED) {
902	if (copy.len == 0) {    // and it's not a string
903	    panic("%s:%d: no more fref.vp!\n", __FILE__, __LINE__);
904	    // vnode_rele_ext(copy.fref.vp, O_EVTONLY, 0);
905	} else {                // else it's a string
906	    vfs_removename(copy.str);
907	}
908    }
909
910    if (dest_copy.type != FSE_INVALID && dest_copy.str) {
911	if (dest_copy.len == 0) {
912	    panic("%s:%d: no more fref.vp!\n", __FILE__, __LINE__);
913	    // vnode_rele_ext(dest_copy.fref.vp, O_EVTONLY, 0);
914	} else {
915	    vfs_removename(dest_copy.str);
916	}
917    }
918}
919
920static int
921add_watcher(int8_t *event_list, int32_t num_events, int32_t eventq_size, fs_event_watcher **watcher_out, void *fseh)
922{
923    int               i;
924    fs_event_watcher *watcher;
925
926    if (eventq_size <= 0 || eventq_size > 100*max_kfs_events) {
927	eventq_size = max_kfs_events;
928    }
929
930    // Note: the event_queue follows the fs_event_watcher struct
931    //       in memory so we only have to do one allocation
932    MALLOC(watcher,
933	   fs_event_watcher *,
934	   sizeof(fs_event_watcher) + eventq_size * sizeof(kfs_event *),
935	   M_TEMP, M_WAITOK);
936    if (watcher == NULL) {
937	return ENOMEM;
938    }
939
940    watcher->event_list   = event_list;
941    watcher->num_events   = num_events;
942    watcher->devices_not_to_watch = NULL;
943    watcher->num_devices  = 0;
944    watcher->flags        = 0;
945    watcher->event_queue  = (kfs_event **)&watcher[1];
946    watcher->eventq_size  = eventq_size;
947    watcher->rd           = 0;
948    watcher->wr           = 0;
949    watcher->blockers     = 0;
950    watcher->num_readers  = 0;
951    watcher->max_event_id = 0;
952    watcher->fseh         = fseh;
953    watcher->pid          = proc_selfpid();
954    proc_selfname(watcher->proc_name, sizeof(watcher->proc_name));
955
956    watcher->num_dropped  = 0;      // XXXdbg - debugging
957
958    if (!strncmp(watcher->proc_name, "fseventsd", sizeof(watcher->proc_name)) ||
959	!strncmp(watcher->proc_name, "coreservicesd", sizeof(watcher->proc_name)) ||
960	!strncmp(watcher->proc_name, "mds", sizeof(watcher->proc_name))) {
961	watcher->flags |= WATCHER_APPLE_SYSTEM_SERVICE;
962    } else {
963      printf("fsevents: watcher %s (pid: %d) - Using /dev/fsevents directly is unsupported.  Migrate to FSEventsFramework\n",
964	     watcher->proc_name, watcher->pid);
965    }
966
967    lock_watch_table();
968
969    // find a slot for the new watcher
970    for(i=0; i < MAX_WATCHERS; i++) {
971	if (watcher_table[i] == NULL) {
972	    watcher->my_id   = i;
973	    watcher_table[i] = watcher;
974	    break;
975	}
976    }
977
978    if (i >= MAX_WATCHERS) {
979	printf("fsevents: too many watchers!\n");
980	unlock_watch_table();
981	FREE(watcher, M_TEMP);
982	return ENOSPC;
983    }
984
985    // now update the global list of who's interested in
986    // events of a particular type...
987    for(i=0; i < num_events; i++) {
988	if (event_list[i] != FSE_IGNORE && i < FSE_MAX_EVENTS) {
989	    fs_event_type_watchers[i]++;
990	}
991    }
992
993    unlock_watch_table();
994
995    *watcher_out = watcher;
996
997    return 0;
998}
999
1000
1001
1002static void
1003remove_watcher(fs_event_watcher *target)
1004{
1005    int i, j, counter=0;
1006    fs_event_watcher *watcher;
1007    kfs_event *kfse;
1008
1009    lock_watch_table();
1010
1011    for(j=0; j < MAX_WATCHERS; j++) {
1012	watcher = watcher_table[j];
1013	if (watcher != target) {
1014	    continue;
1015	}
1016
1017	watcher_table[j] = NULL;
1018
1019	for(i=0; i < watcher->num_events; i++) {
1020	    if (watcher->event_list[i] != FSE_IGNORE && i < FSE_MAX_EVENTS) {
1021		fs_event_type_watchers[i]--;
1022	    }
1023	}
1024
1025	if (watcher->flags & WATCHER_CLOSING) {
1026	    unlock_watch_table();
1027	    return;
1028	}
1029
1030	// printf("fsevents: removing watcher %p (rd %d wr %d num_readers %d flags 0x%x)\n", watcher, watcher->rd, watcher->wr, watcher->num_readers, watcher->flags);
1031	watcher->flags |= WATCHER_CLOSING;
1032	OSAddAtomic(1, &watcher->num_readers);
1033
1034	unlock_watch_table();
1035
1036	while (watcher->num_readers > 1 && counter++ < 5000) {
1037	    lock_watch_table();
1038	    fsevents_wakeup(watcher);      // in case they're asleep
1039	    unlock_watch_table();
1040
1041	    tsleep(watcher, PRIBIO, "fsevents-close", 1);
1042	}
1043	if (counter++ >= 5000) {
1044	    // printf("fsevents: close: still have readers! (%d)\n", watcher->num_readers);
1045	    panic("fsevents: close: still have readers! (%d)\n", watcher->num_readers);
1046	}
1047
1048	// drain the event_queue
1049
1050	lck_rw_lock_exclusive(&event_handling_lock);
1051	while(watcher->rd != watcher->wr) {
1052	    kfse = watcher->event_queue[watcher->rd];
1053	    watcher->event_queue[watcher->rd] = NULL;
1054	    watcher->rd = (watcher->rd+1) % watcher->eventq_size;
1055	    OSSynchronizeIO();
1056	    if (kfse != NULL && kfse->type != FSE_INVALID && kfse->refcount >= 1) {
1057		release_event_ref(kfse);
1058	    }
1059	}
1060	lck_rw_unlock_exclusive(&event_handling_lock);
1061
1062	if (watcher->event_list) {
1063	    FREE(watcher->event_list, M_TEMP);
1064	    watcher->event_list = NULL;
1065	}
1066	if (watcher->devices_not_to_watch) {
1067	    FREE(watcher->devices_not_to_watch, M_TEMP);
1068	    watcher->devices_not_to_watch = NULL;
1069	}
1070	FREE(watcher, M_TEMP);
1071
1072	return;
1073    }
1074
1075    unlock_watch_table();
1076}
1077
1078
1079#define EVENT_DELAY_IN_MS   10
1080static thread_call_t event_delivery_timer = NULL;
1081static int timer_set = 0;
1082
1083
1084static void
1085delayed_event_delivery(__unused void *param0, __unused void *param1)
1086{
1087    int i;
1088
1089    lock_watch_table();
1090
1091    for(i=0; i < MAX_WATCHERS; i++) {
1092	if (watcher_table[i] != NULL && watcher_table[i]->rd != watcher_table[i]->wr) {
1093	    fsevents_wakeup(watcher_table[i]);
1094	}
1095    }
1096
1097    timer_set = 0;
1098
1099    unlock_watch_table();
1100}
1101
1102
1103//
1104// The watch table must be locked before calling this function.
1105//
1106static void
1107schedule_event_wakeup(void)
1108{
1109    uint64_t deadline;
1110
1111    if (event_delivery_timer == NULL) {
1112	event_delivery_timer = thread_call_allocate((thread_call_func_t)delayed_event_delivery, NULL);
1113    }
1114
1115    clock_interval_to_deadline(EVENT_DELAY_IN_MS, 1000 * 1000, &deadline);
1116
1117    thread_call_enter_delayed(event_delivery_timer, deadline);
1118    timer_set = 1;
1119}
1120
1121
1122
1123#define MAX_NUM_PENDING  16
1124
1125//
1126// NOTE: the watch table must be locked before calling
1127//       this routine.
1128//
1129static int
1130watcher_add_event(fs_event_watcher *watcher, kfs_event *kfse)
1131{
1132    if (kfse->abstime > watcher->max_event_id) {
1133	watcher->max_event_id = kfse->abstime;
1134    }
1135
1136    if (((watcher->wr + 1) % watcher->eventq_size) == watcher->rd) {
1137	watcher->flags |= WATCHER_DROPPED_EVENTS;
1138	fsevents_wakeup(watcher);
1139	return ENOSPC;
1140    }
1141
1142    OSAddAtomic(1, &kfse->refcount);
1143    watcher->event_queue[watcher->wr] = kfse;
1144    OSSynchronizeIO();
1145    watcher->wr = (watcher->wr + 1) % watcher->eventq_size;
1146
1147    //
1148    // wake up the watcher if there are more than MAX_NUM_PENDING events.
1149    // otherwise schedule a timer (if one isn't already set) which will
1150    // send any pending events if no more are received in the next
1151    // EVENT_DELAY_IN_MS milli-seconds.
1152    //
1153    int32_t num_pending = 0;
1154    if (watcher->rd < watcher->wr) {
1155      num_pending = watcher->wr - watcher->rd;
1156    }
1157
1158    if (watcher->rd > watcher->wr) {
1159      num_pending = watcher->wr + watcher->eventq_size - watcher->rd;
1160    }
1161
1162    if (num_pending > (watcher->eventq_size*3/4) && !(watcher->flags & WATCHER_APPLE_SYSTEM_SERVICE)) {
1163      /* Non-Apple Service is falling behind, start dropping events for this process */
1164      lck_rw_lock_exclusive(&event_handling_lock);
1165      while (watcher->rd != watcher->wr) {
1166	kfse = watcher->event_queue[watcher->rd];
1167	watcher->event_queue[watcher->rd] = NULL;
1168	watcher->rd = (watcher->rd+1) % watcher->eventq_size;
1169	OSSynchronizeIO();
1170	if (kfse != NULL && kfse->type != FSE_INVALID && kfse->refcount >= 1) {
1171	  release_event_ref(kfse);
1172	}
1173      }
1174      watcher->flags |= WATCHER_DROPPED_EVENTS;
1175      lck_rw_unlock_exclusive(&event_handling_lock);
1176
1177      printf("fsevents: watcher falling behind: %s (pid: %d) rd: %4d wr: %4d q_size: %4d flags: 0x%x\n",
1178	     watcher->proc_name, watcher->pid, watcher->rd, watcher->wr,
1179	     watcher->eventq_size, watcher->flags);
1180
1181      fsevents_wakeup(watcher);
1182    } else if (num_pending > MAX_NUM_PENDING) {
1183      fsevents_wakeup(watcher);
1184    } else if (timer_set == 0) {
1185      schedule_event_wakeup();
1186    }
1187
1188    return 0;
1189}
1190
1191static int
1192fill_buff(uint16_t type, int32_t size, const void *data,
1193          char *buff, int32_t *_buff_idx, int32_t buff_sz,
1194          struct uio *uio)
1195{
1196    int32_t amt, error = 0, buff_idx = *_buff_idx;
1197    uint16_t tmp;
1198
1199    //
1200    // the +1 on the size is to guarantee that the main data
1201    // copy loop will always copy at least 1 byte
1202    //
1203    if ((buff_sz - buff_idx) <= (int)(2*sizeof(uint16_t) + 1)) {
1204	if (buff_idx > uio_resid(uio)) {
1205	    error = ENOSPC;
1206	    goto get_out;
1207	}
1208
1209	error = uiomove(buff, buff_idx, uio);
1210	if (error) {
1211	    goto get_out;
1212	}
1213	buff_idx = 0;
1214    }
1215
1216    // copy out the header (type & size)
1217    memcpy(&buff[buff_idx], &type, sizeof(uint16_t));
1218    buff_idx += sizeof(uint16_t);
1219
1220    tmp = size & 0xffff;
1221    memcpy(&buff[buff_idx], &tmp, sizeof(uint16_t));
1222    buff_idx += sizeof(uint16_t);
1223
1224    // now copy the body of the data, flushing along the way
1225    // if the buffer fills up.
1226    //
1227    while(size > 0) {
1228	amt = (size < (buff_sz - buff_idx)) ? size : (buff_sz - buff_idx);
1229	memcpy(&buff[buff_idx], data, amt);
1230
1231	size -= amt;
1232	buff_idx += amt;
1233	data = (const char *)data + amt;
1234	if (size > (buff_sz - buff_idx)) {
1235	    if (buff_idx > uio_resid(uio)) {
1236		error = ENOSPC;
1237		goto get_out;
1238	    }
1239	    error = uiomove(buff, buff_idx, uio);
1240	    if (error) {
1241		goto get_out;
1242	    }
1243	    buff_idx = 0;
1244	}
1245
1246	if (amt == 0) {   // just in case...
1247	    break;
1248	}
1249    }
1250
1251  get_out:
1252    *_buff_idx = buff_idx;
1253
1254    return error;
1255}
1256
1257
1258static int copy_out_kfse(fs_event_watcher *watcher, kfs_event *kfse, struct uio *uio)  __attribute__((noinline));
1259
1260static int
1261copy_out_kfse(fs_event_watcher *watcher, kfs_event *kfse, struct uio *uio)
1262{
1263    int      error;
1264    uint16_t tmp16;
1265    int32_t  type;
1266    kfs_event *cur;
1267    char     evbuff[512];
1268    int      evbuff_idx = 0;
1269
1270    if (kfse->type == FSE_INVALID) {
1271	panic("fsevents: copy_out_kfse: asked to copy out an invalid event (kfse %p, refcount %d fref ptr %p)\n", kfse, kfse->refcount, kfse->str);
1272    }
1273
1274    if (kfse->flags & KFSE_BEING_CREATED) {
1275	return 0;
1276    }
1277
1278    if (kfse->type == FSE_RENAME && kfse->dest == NULL) {
1279	//
1280	// This can happen if an event gets recycled but we had a
1281	// pointer to it in our event queue.  The event is the
1282	// destination of a rename which we'll process separately
1283	// (that is, another kfse points to this one so it's ok
1284	// to skip this guy because we'll process it when we process
1285	// the other one)
1286	error = 0;
1287	goto get_out;
1288    }
1289
1290    if (watcher->flags & WATCHER_WANTS_EXTENDED_INFO) {
1291
1292	type = (kfse->type & 0xfff);
1293
1294	if (kfse->flags & KFSE_CONTAINS_DROPPED_EVENTS) {
1295	    type |= (FSE_CONTAINS_DROPPED_EVENTS << FSE_FLAG_SHIFT);
1296	} else if (kfse->flags & KFSE_COMBINED_EVENTS) {
1297	    type |= (FSE_COMBINED_EVENTS << FSE_FLAG_SHIFT);
1298	}
1299
1300    } else {
1301	type = (int32_t)kfse->type;
1302    }
1303
1304    // copy out the type of the event
1305    memcpy(evbuff, &type, sizeof(int32_t));
1306    evbuff_idx += sizeof(int32_t);
1307
1308    // copy out the pid of the person that generated the event
1309    memcpy(&evbuff[evbuff_idx], &kfse->pid, sizeof(pid_t));
1310    evbuff_idx += sizeof(pid_t);
1311
1312    cur = kfse;
1313
1314  copy_again:
1315
1316    if (kfse->type == FSE_DOCID_CHANGED || kfse->type == FSE_DOCID_CREATED) {
1317	dev_t    dev  = cur->dev;
1318	ino_t    ino  = cur->ino;
1319	uint64_t ival;
1320
1321	error = fill_buff(FSE_ARG_DEV, sizeof(dev_t), &dev, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1322	if (error != 0) {
1323	    goto get_out;
1324	}
1325
1326	error = fill_buff(FSE_ARG_INO, sizeof(ino_t), &ino, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1327	if (error != 0) {
1328	    goto get_out;
1329	}
1330
1331	memcpy(&ino, &cur->str, sizeof(ino_t));
1332	error = fill_buff(FSE_ARG_INO, sizeof(ino_t), &ino, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1333	if (error != 0) {
1334	    goto get_out;
1335	}
1336
1337	memcpy(&ival, &cur->uid, sizeof(uint64_t));   // the docid gets stuffed into the ino field
1338	error = fill_buff(FSE_ARG_INT64, sizeof(uint64_t), &ival, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1339	if (error != 0) {
1340	    goto get_out;
1341	}
1342
1343	goto done;
1344    }
1345
1346    if (cur->str == NULL || cur->str[0] == '\0') {
1347	printf("copy_out_kfse:2: empty/short path (%s)\n", cur->str);
1348	error = fill_buff(FSE_ARG_STRING, 2, "/", evbuff, &evbuff_idx, sizeof(evbuff), uio);
1349    } else {
1350	error = fill_buff(FSE_ARG_STRING, cur->len, cur->str, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1351    }
1352    if (error != 0) {
1353	goto get_out;
1354    }
1355
1356    if (cur->dev == 0 && cur->ino == 0) {
1357	// this happens when a rename event happens and the
1358	// destination of the rename did not previously exist.
1359	// it thus has no other file info so skip copying out
1360	// the stuff below since it isn't initialized
1361	goto done;
1362    }
1363
1364
1365    if (watcher->flags & WATCHER_WANTS_COMPACT_EVENTS) {
1366	int32_t finfo_size;
1367
1368	finfo_size = sizeof(dev_t) + sizeof(ino64_t) + sizeof(int32_t) + sizeof(uid_t) + sizeof(gid_t);
1369	error = fill_buff(FSE_ARG_FINFO, finfo_size, &cur->ino, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1370	if (error != 0) {
1371	    goto get_out;
1372	}
1373    } else {
1374	ino_t ino;
1375
1376	error = fill_buff(FSE_ARG_DEV, sizeof(dev_t), &cur->dev, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1377	if (error != 0) {
1378	    goto get_out;
1379	}
1380
1381	ino = (ino_t)cur->ino;
1382	error = fill_buff(FSE_ARG_INO, sizeof(ino_t), &ino, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1383	if (error != 0) {
1384	    goto get_out;
1385	}
1386
1387	error = fill_buff(FSE_ARG_MODE, sizeof(int32_t), &cur->mode, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1388	if (error != 0) {
1389	    goto get_out;
1390	}
1391
1392	error = fill_buff(FSE_ARG_UID, sizeof(uid_t), &cur->uid, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1393	if (error != 0) {
1394	    goto get_out;
1395	}
1396
1397	error = fill_buff(FSE_ARG_GID, sizeof(gid_t), &cur->gid, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1398	if (error != 0) {
1399	    goto get_out;
1400	}
1401    }
1402
1403
1404    if (cur->dest) {
1405	cur = cur->dest;
1406	goto copy_again;
1407    }
1408
1409  done:
1410    // very last thing: the time stamp
1411    error = fill_buff(FSE_ARG_INT64, sizeof(uint64_t), &cur->abstime, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1412    if (error != 0) {
1413	goto get_out;
1414    }
1415
1416    // check if the FSE_ARG_DONE will fit
1417    if (sizeof(uint16_t) > sizeof(evbuff) - evbuff_idx) {
1418	if (evbuff_idx > uio_resid(uio)) {
1419	    error = ENOSPC;
1420	    goto get_out;
1421	}
1422	error = uiomove(evbuff, evbuff_idx, uio);
1423	if (error) {
1424	    goto get_out;
1425	}
1426	evbuff_idx = 0;
1427    }
1428
1429    tmp16 = FSE_ARG_DONE;
1430    memcpy(&evbuff[evbuff_idx], &tmp16, sizeof(uint16_t));
1431    evbuff_idx += sizeof(uint16_t);
1432
1433    // flush any remaining data in the buffer (and hopefully
1434    // in most cases this is the only uiomove we'll do)
1435    if (evbuff_idx > uio_resid(uio)) {
1436	error = ENOSPC;
1437    } else {
1438	error = uiomove(evbuff, evbuff_idx, uio);
1439    }
1440
1441  get_out:
1442
1443    return error;
1444}
1445
1446
1447
1448static int
1449fmod_watch(fs_event_watcher *watcher, struct uio *uio)
1450{
1451    int               error=0;
1452    user_ssize_t      last_full_event_resid;
1453    kfs_event        *kfse;
1454    uint16_t          tmp16;
1455    int               skipped;
1456
1457    last_full_event_resid = uio_resid(uio);
1458
1459    // need at least 2048 bytes of space (maxpathlen + 1 event buf)
1460    if  (uio_resid(uio) < 2048 || watcher == NULL) {
1461	return EINVAL;
1462    }
1463
1464    if (watcher->flags & WATCHER_CLOSING) {
1465	return 0;
1466    }
1467
1468    if (OSAddAtomic(1, &watcher->num_readers) != 0) {
1469	// don't allow multiple threads to read from the fd at the same time
1470	OSAddAtomic(-1, &watcher->num_readers);
1471	return EAGAIN;
1472    }
1473
1474 restart_watch:
1475    if (watcher->rd == watcher->wr) {
1476	if (watcher->flags & WATCHER_CLOSING) {
1477	    OSAddAtomic(-1, &watcher->num_readers);
1478	    return 0;
1479	}
1480	OSAddAtomic(1, &watcher->blockers);
1481
1482	// there's nothing to do, go to sleep
1483	error = tsleep((caddr_t)watcher, PUSER|PCATCH, "fsevents_empty", 0);
1484
1485	OSAddAtomic(-1, &watcher->blockers);
1486
1487	if (error != 0 || (watcher->flags & WATCHER_CLOSING)) {
1488	    OSAddAtomic(-1, &watcher->num_readers);
1489	    return error;
1490	}
1491    }
1492
1493    // if we dropped events, return that as an event first
1494    if (watcher->flags & WATCHER_DROPPED_EVENTS) {
1495	int32_t val = FSE_EVENTS_DROPPED;
1496
1497	error = uiomove((caddr_t)&val, sizeof(int32_t), uio);
1498	if (error == 0) {
1499	    val = 0;             // a fake pid
1500	    error = uiomove((caddr_t)&val, sizeof(int32_t), uio);
1501
1502	    tmp16 = FSE_ARG_DONE;  // makes it a consistent msg
1503	    error = uiomove((caddr_t)&tmp16, sizeof(int16_t), uio);
1504
1505	    last_full_event_resid = uio_resid(uio);
1506	}
1507
1508	if (error) {
1509	    OSAddAtomic(-1, &watcher->num_readers);
1510	    return error;
1511	}
1512
1513	watcher->flags &= ~WATCHER_DROPPED_EVENTS;
1514    }
1515
1516    skipped = 0;
1517
1518    lck_rw_lock_shared(&event_handling_lock);
1519    while (uio_resid(uio) > 0 && watcher->rd != watcher->wr) {
1520	if (watcher->flags & WATCHER_CLOSING) {
1521	    break;
1522	}
1523
1524	//
1525	// check if the event is something of interest to us
1526	// (since it may have been recycled/reused and changed
1527	// its type or which device it is for)
1528	//
1529	kfse = watcher->event_queue[watcher->rd];
1530	if (!kfse || kfse->type == FSE_INVALID || kfse->refcount < 1) {
1531	  break;
1532	}
1533
1534	if (watcher->event_list[kfse->type] == FSE_REPORT && watcher_cares_about_dev(watcher, kfse->dev)) {
1535
1536	  if (!(watcher->flags & WATCHER_APPLE_SYSTEM_SERVICE) && kfse->type != FSE_DOCID_CHANGED && is_ignored_directory(kfse->str)) {
1537	    // If this is not an Apple System Service, skip specified directories
1538	    // radar://12034844
1539	    error = 0;
1540	    skipped = 1;
1541	  } else {
1542
1543	    skipped = 0;
1544	    if (last_event_ptr == kfse) {
1545		last_event_ptr = NULL;
1546		last_event_type = -1;
1547		last_coalesced_time = 0;
1548	    }
1549	    error = copy_out_kfse(watcher, kfse, uio);
1550	    if (error != 0) {
1551		// if an event won't fit or encountered an error while
1552		// we were copying it out, then backup to the last full
1553		// event and just bail out.  if the error was ENOENT
1554		// then we can continue regular processing, otherwise
1555		// we should unlock things and return.
1556		uio_setresid(uio, last_full_event_resid);
1557		if (error != ENOENT) {
1558		    lck_rw_unlock_shared(&event_handling_lock);
1559		    error = 0;
1560		    goto get_out;
1561		}
1562	    }
1563
1564	    last_full_event_resid = uio_resid(uio);
1565	  }
1566	}
1567
1568	watcher->event_queue[watcher->rd] = NULL;
1569	watcher->rd = (watcher->rd + 1) % watcher->eventq_size;
1570	OSSynchronizeIO();
1571	release_event_ref(kfse);
1572    }
1573    lck_rw_unlock_shared(&event_handling_lock);
1574
1575    if (skipped && error == 0) {
1576      goto restart_watch;
1577    }
1578
1579  get_out:
1580    OSAddAtomic(-1, &watcher->num_readers);
1581
1582    return error;
1583}
1584
1585
1586// release any references we might have on vnodes which are
1587// the mount point passed to us (so that it can be cleanly
1588// unmounted).
1589//
1590// since we don't want to lose the events we'll convert the
1591// vnode refs to full paths.
1592//
1593void
1594fsevent_unmount(__unused struct mount *mp)
1595{
1596    // we no longer maintain pointers to vnodes so
1597    // there is nothing to do...
1598}
1599
1600
1601//
1602// /dev/fsevents device code
1603//
1604static int fsevents_installed = 0;
1605
1606typedef struct fsevent_handle {
1607    UInt32            flags;
1608    SInt32            active;
1609    fs_event_watcher *watcher;
1610    struct klist      knotes;
1611    struct selinfo    si;
1612} fsevent_handle;
1613
1614#define FSEH_CLOSING   0x0001
1615
1616static int
1617fseventsf_read(struct fileproc *fp, struct uio *uio,
1618	       __unused int flags, __unused vfs_context_t ctx)
1619{
1620    fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data;
1621    int error;
1622
1623    error = fmod_watch(fseh->watcher, uio);
1624
1625    return error;
1626}
1627
1628
1629static int
1630fseventsf_write(__unused struct fileproc *fp, __unused struct uio *uio,
1631		__unused int flags, __unused vfs_context_t ctx)
1632{
1633    return EIO;
1634}
1635
1636#pragma pack(push, 4)
1637typedef struct ext_fsevent_dev_filter_args {
1638    uint32_t    num_devices;
1639    user_addr_t devices;
1640} ext_fsevent_dev_filter_args;
1641#pragma pack(pop)
1642
1643#define NEW_FSEVENTS_DEVICE_FILTER      _IOW('s', 100, ext_fsevent_dev_filter_args)
1644
1645typedef struct old_fsevent_dev_filter_args {
1646    uint32_t  num_devices;
1647    int32_t   devices;
1648} old_fsevent_dev_filter_args;
1649
1650#define	OLD_FSEVENTS_DEVICE_FILTER	_IOW('s', 100, old_fsevent_dev_filter_args)
1651
1652#if __LP64__
1653/* need this in spite of the padding due to alignment of devices */
1654typedef struct fsevent_dev_filter_args32 {
1655    uint32_t  num_devices;
1656    uint32_t  devices;
1657    int32_t   pad1;
1658} fsevent_dev_filter_args32;
1659#endif
1660
1661static int
1662fseventsf_ioctl(struct fileproc *fp, u_long cmd, caddr_t data, vfs_context_t ctx)
1663{
1664    fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data;
1665    int ret = 0;
1666    ext_fsevent_dev_filter_args *devfilt_args, _devfilt_args;
1667
1668    if (proc_is64bit(vfs_context_proc(ctx))) {
1669	devfilt_args = (ext_fsevent_dev_filter_args *)data;
1670    }
1671    else if (cmd == OLD_FSEVENTS_DEVICE_FILTER) {
1672	old_fsevent_dev_filter_args *udev_filt_args = (old_fsevent_dev_filter_args *)data;
1673
1674	devfilt_args = &_devfilt_args;
1675	memset(devfilt_args, 0, sizeof(ext_fsevent_dev_filter_args));
1676
1677	devfilt_args->num_devices = udev_filt_args->num_devices;
1678	devfilt_args->devices     = CAST_USER_ADDR_T(udev_filt_args->devices);
1679    }
1680    else {
1681#if __LP64__
1682	fsevent_dev_filter_args32 *udev_filt_args = (fsevent_dev_filter_args32 *)data;
1683#else
1684	fsevent_dev_filter_args *udev_filt_args = (fsevent_dev_filter_args *)data;
1685#endif
1686
1687	devfilt_args = &_devfilt_args;
1688	memset(devfilt_args, 0, sizeof(ext_fsevent_dev_filter_args));
1689
1690	devfilt_args->num_devices = udev_filt_args->num_devices;
1691	devfilt_args->devices     = CAST_USER_ADDR_T(udev_filt_args->devices);
1692    }
1693
1694    OSAddAtomic(1, &fseh->active);
1695    if (fseh->flags & FSEH_CLOSING) {
1696	OSAddAtomic(-1, &fseh->active);
1697	return 0;
1698    }
1699
1700    switch (cmd) {
1701	case FIONBIO:
1702	case FIOASYNC:
1703	    break;
1704
1705	case FSEVENTS_WANT_COMPACT_EVENTS: {
1706	    fseh->watcher->flags |= WATCHER_WANTS_COMPACT_EVENTS;
1707	    break;
1708	}
1709
1710	case FSEVENTS_WANT_EXTENDED_INFO: {
1711	    fseh->watcher->flags |= WATCHER_WANTS_EXTENDED_INFO;
1712	    break;
1713	}
1714
1715	case FSEVENTS_GET_CURRENT_ID: {
1716		*(uint64_t *)data = fseh->watcher->max_event_id;
1717		ret = 0;
1718		break;
1719	}
1720
1721	case OLD_FSEVENTS_DEVICE_FILTER:
1722	case NEW_FSEVENTS_DEVICE_FILTER: {
1723	    int new_num_devices;
1724	    dev_t *devices_not_to_watch, *tmp=NULL;
1725
1726	    if (devfilt_args->num_devices > 256) {
1727		ret = EINVAL;
1728		break;
1729	    }
1730
1731	    new_num_devices = devfilt_args->num_devices;
1732	    if (new_num_devices == 0) {
1733		tmp = fseh->watcher->devices_not_to_watch;
1734
1735		lock_watch_table();
1736		fseh->watcher->devices_not_to_watch = NULL;
1737		fseh->watcher->num_devices = new_num_devices;
1738		unlock_watch_table();
1739
1740		if (tmp) {
1741		    FREE(tmp, M_TEMP);
1742		}
1743		break;
1744	    }
1745
1746	    MALLOC(devices_not_to_watch, dev_t *,
1747		   new_num_devices * sizeof(dev_t),
1748		   M_TEMP, M_WAITOK);
1749	    if (devices_not_to_watch == NULL) {
1750		ret = ENOMEM;
1751		break;
1752	    }
1753
1754	    ret = copyin(devfilt_args->devices,
1755			 (void *)devices_not_to_watch,
1756			 new_num_devices * sizeof(dev_t));
1757	    if (ret) {
1758		FREE(devices_not_to_watch, M_TEMP);
1759		break;
1760	    }
1761
1762	    lock_watch_table();
1763	    fseh->watcher->num_devices = new_num_devices;
1764	    tmp = fseh->watcher->devices_not_to_watch;
1765	    fseh->watcher->devices_not_to_watch = devices_not_to_watch;
1766	    unlock_watch_table();
1767
1768	    if (tmp) {
1769		FREE(tmp, M_TEMP);
1770	    }
1771
1772	    break;
1773	}
1774
1775	default:
1776	    ret = EINVAL;
1777	    break;
1778    }
1779
1780    OSAddAtomic(-1, &fseh->active);
1781    return (ret);
1782}
1783
1784
1785static int
1786fseventsf_select(struct fileproc *fp, int which, __unused void *wql, vfs_context_t ctx)
1787{
1788    fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data;
1789    int ready = 0;
1790
1791    if ((which != FREAD) || (fseh->watcher->flags & WATCHER_CLOSING)) {
1792	return 0;
1793    }
1794
1795
1796    // if there's nothing in the queue, we're not ready
1797    if (fseh->watcher->rd != fseh->watcher->wr) {
1798	ready = 1;
1799    }
1800
1801    if (!ready) {
1802	selrecord(vfs_context_proc(ctx), &fseh->si, wql);
1803    }
1804
1805    return ready;
1806}
1807
1808
1809#if NOTUSED
1810static int
1811fseventsf_stat(__unused struct fileproc *fp, __unused struct stat *sb, __unused vfs_context_t ctx)
1812{
1813    return ENOTSUP;
1814}
1815#endif
1816
1817static int
1818fseventsf_close(struct fileglob *fg, __unused vfs_context_t ctx)
1819{
1820    fsevent_handle *fseh = (struct fsevent_handle *)fg->fg_data;
1821    fs_event_watcher *watcher;
1822
1823    OSBitOrAtomic(FSEH_CLOSING, &fseh->flags);
1824    while (OSAddAtomic(0, &fseh->active) > 0) {
1825	tsleep((caddr_t)fseh->watcher, PRIBIO, "fsevents-close", 1);
1826    }
1827
1828    watcher = fseh->watcher;
1829    fg->fg_data = NULL;
1830    fseh->watcher = NULL;
1831
1832    remove_watcher(watcher);
1833    FREE(fseh, M_TEMP);
1834
1835    return 0;
1836}
1837
1838static void
1839filt_fsevent_detach(struct knote *kn)
1840{
1841	fsevent_handle *fseh = (struct fsevent_handle *)kn->kn_hook;
1842
1843	lock_watch_table();
1844
1845	KNOTE_DETACH(&fseh->knotes, kn);
1846
1847	unlock_watch_table();
1848}
1849
1850/*
1851 * Determine whether this knote should be active
1852 *
1853 * This is kind of subtle.
1854 * 	--First, notice if the vnode has been revoked: in so, override hint
1855 * 	--EVFILT_READ knotes are checked no matter what the hint is
1856 * 	--Other knotes activate based on hint.
1857 * 	--If hint is revoke, set special flags and activate
1858 */
1859static int
1860filt_fsevent(struct knote *kn, long hint)
1861{
1862	fsevent_handle *fseh = (struct fsevent_handle *)kn->kn_hook;
1863	int activate = 0;
1864	int32_t rd, wr, amt;
1865
1866	if (NOTE_REVOKE == hint) {
1867		kn->kn_flags |= (EV_EOF | EV_ONESHOT);
1868		activate = 1;
1869	}
1870
1871	rd = fseh->watcher->rd;
1872	wr = fseh->watcher->wr;
1873	if (rd <= wr) {
1874	    amt = wr - rd;
1875	} else {
1876	    amt = fseh->watcher->eventq_size - (rd - wr);
1877	}
1878
1879	switch(kn->kn_filter) {
1880		case EVFILT_READ:
1881			kn->kn_data = amt;
1882
1883			if (kn->kn_data != 0) {
1884				activate = 1;
1885			}
1886			break;
1887		case EVFILT_VNODE:
1888			/* Check events this note matches against the hint */
1889			if (kn->kn_sfflags & hint) {
1890				kn->kn_fflags |= hint; /* Set which event occurred */
1891			}
1892			if (kn->kn_fflags != 0) {
1893				activate = 1;
1894			}
1895			break;
1896		default: {
1897			// nothing to do...
1898			break;
1899		}
1900	}
1901
1902	return (activate);
1903}
1904
1905
1906struct  filterops fsevent_filtops = {
1907	.f_isfd = 1,
1908	.f_attach = NULL,
1909	.f_detach = filt_fsevent_detach,
1910	.f_event = filt_fsevent
1911};
1912
1913static int
1914fseventsf_kqfilter(__unused struct fileproc *fp, __unused struct knote *kn, __unused vfs_context_t ctx)
1915{
1916    fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data;
1917
1918    kn->kn_hook = (void*)fseh;
1919    kn->kn_hookid = 1;
1920    kn->kn_fop = &fsevent_filtops;
1921
1922    lock_watch_table();
1923
1924    KNOTE_ATTACH(&fseh->knotes, kn);
1925
1926    unlock_watch_table();
1927    return 0;
1928}
1929
1930
1931static int
1932fseventsf_drain(struct fileproc *fp, __unused vfs_context_t ctx)
1933{
1934    int counter = 0;
1935    fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data;
1936
1937    fseh->watcher->flags |= WATCHER_CLOSING;
1938
1939    // if there are people still waiting, sleep for 10ms to
1940    // let them clean up and get out of there.  however we
1941    // also don't want to get stuck forever so if they don't
1942    // exit after 5 seconds we're tearing things down anyway.
1943    while(fseh->watcher->blockers && counter++ < 500) {
1944        // issue wakeup in case anyone is blocked waiting for an event
1945        // do this each time we wakeup in case the blocker missed
1946        // the wakeup due to the unprotected test of WATCHER_CLOSING
1947        // and decision to tsleep in fmod_watch... this bit of
1948        // latency is a decent tradeoff against not having to
1949        // take and drop a lock in fmod_watch
1950	lock_watch_table();
1951	fsevents_wakeup(fseh->watcher);
1952	unlock_watch_table();
1953
1954	tsleep((caddr_t)fseh->watcher, PRIBIO, "watcher-close", 1);
1955    }
1956
1957    return 0;
1958}
1959
1960
1961static int
1962fseventsopen(__unused dev_t dev, __unused int flag, __unused int mode, __unused struct proc *p)
1963{
1964    if (!kauth_cred_issuser(kauth_cred_get())) {
1965	return EPERM;
1966    }
1967
1968    return 0;
1969}
1970
1971static int
1972fseventsclose(__unused dev_t dev, __unused int flag, __unused int mode, __unused struct proc *p)
1973{
1974    return 0;
1975}
1976
1977static int
1978fseventsread(__unused dev_t dev, __unused struct uio *uio, __unused int ioflag)
1979{
1980    return EIO;
1981}
1982
1983
1984static int
1985parse_buffer_and_add_events(const char *buffer, int bufsize, vfs_context_t ctx, long *remainder)
1986{
1987    const fse_info *finfo, *dest_finfo;
1988    const char *path, *ptr, *dest_path, *event_start=buffer;
1989    int path_len, type, dest_path_len, err = 0;
1990
1991
1992    ptr = buffer;
1993    while ((ptr+sizeof(int)+sizeof(fse_info)+1) < buffer+bufsize) {
1994	type = *(const int *)ptr;
1995	if (type < 0 || type >= FSE_MAX_EVENTS) {
1996	    err = EINVAL;
1997	    break;
1998	}
1999
2000	ptr += sizeof(int);
2001
2002	finfo = (const fse_info *)ptr;
2003	ptr += sizeof(fse_info);
2004
2005	path = ptr;
2006	while(ptr < buffer+bufsize && *ptr != '\0') {
2007	    ptr++;
2008	}
2009
2010	if (ptr >= buffer+bufsize) {
2011	    break;
2012	}
2013
2014	ptr++;   // advance over the trailing '\0'
2015
2016	path_len = ptr - path;
2017
2018	if (type != FSE_RENAME && type != FSE_EXCHANGE) {
2019	    event_start = ptr;   // record where the next event starts
2020
2021	    err = add_fsevent(type, ctx, FSE_ARG_STRING, path_len, path, FSE_ARG_FINFO, finfo, FSE_ARG_DONE);
2022	    if (err) {
2023		break;
2024	    }
2025	    continue;
2026	}
2027
2028	//
2029	// if we're here we have to slurp up the destination finfo
2030	// and path so that we can pass them to the add_fsevent()
2031	// call.  basically it's a copy of the above code.
2032	//
2033	dest_finfo = (const fse_info *)ptr;
2034	ptr += sizeof(fse_info);
2035
2036	dest_path = ptr;
2037	while(ptr < buffer+bufsize && *ptr != '\0') {
2038	    ptr++;
2039	}
2040
2041	if (ptr >= buffer+bufsize) {
2042	    break;
2043	}
2044
2045	ptr++;               // advance over the trailing '\0'
2046	event_start = ptr;   // record where the next event starts
2047
2048	dest_path_len = ptr - dest_path;
2049	//
2050	// If the destination inode number is non-zero, generate a rename
2051	// with both source and destination FSE_ARG_FINFO. Otherwise generate
2052	// a rename with only one FSE_ARG_FINFO. If you need to inject an
2053	// exchange with an inode of zero, just make that inode (and its path)
2054	// come in as the first one, not the second.
2055	//
2056	if (dest_finfo->ino) {
2057	        err = add_fsevent(type, ctx,
2058		                  FSE_ARG_STRING, path_len,      path,      FSE_ARG_FINFO, finfo,
2059		                  FSE_ARG_STRING, dest_path_len, dest_path, FSE_ARG_FINFO, dest_finfo,
2060		                  FSE_ARG_DONE);
2061	} else {
2062		err = add_fsevent(type, ctx,
2063		                  FSE_ARG_STRING, path_len,      path,      FSE_ARG_FINFO, finfo,
2064		                  FSE_ARG_STRING, dest_path_len, dest_path,
2065		                  FSE_ARG_DONE);
2066	}
2067
2068	if (err) {
2069	    break;
2070	}
2071
2072    }
2073
2074    // if the last event wasn't complete, set the remainder
2075    // to be the last event start boundary.
2076    //
2077    *remainder = (long)((buffer+bufsize) - event_start);
2078
2079    return err;
2080}
2081
2082
2083//
2084// Note: this buffer size can not ever be less than
2085//       2*MAXPATHLEN + 2*sizeof(fse_info) + sizeof(int)
2086//       because that is the max size for a single event.
2087//       I made it 4k to be a "nice" size.  making it
2088//       smaller is not a good idea.
2089//
2090#define WRITE_BUFFER_SIZE  4096
2091char *write_buffer=NULL;
2092
2093static int
2094fseventswrite(__unused dev_t dev, struct uio *uio, __unused int ioflag)
2095{
2096    int error=0, count;
2097    vfs_context_t ctx = vfs_context_current();
2098    long offset=0, remainder;
2099
2100    lck_mtx_lock(&event_writer_lock);
2101
2102    if (write_buffer == NULL) {
2103	if (kmem_alloc(kernel_map, (vm_offset_t *)&write_buffer, WRITE_BUFFER_SIZE)) {
2104	    lck_mtx_unlock(&event_writer_lock);
2105	    return ENOMEM;
2106	}
2107    }
2108
2109    //
2110    // this loop copies in and processes the events written.
2111    // it takes care to copy in reasonable size chunks and
2112    // process them.  if there is an event that spans a chunk
2113    // boundary we're careful to copy those bytes down to the
2114    // beginning of the buffer and read the next chunk in just
2115    // after it.
2116    //
2117    while(uio_resid(uio)) {
2118	if (uio_resid(uio) > (WRITE_BUFFER_SIZE-offset)) {
2119	    count = WRITE_BUFFER_SIZE - offset;
2120	} else {
2121	    count = uio_resid(uio);
2122	}
2123
2124	error = uiomove(write_buffer+offset, count, uio);
2125	if (error) {
2126	    break;
2127	}
2128
2129	// printf("fsevents: write: copied in %d bytes (offset: %ld)\n", count, offset);
2130	error = parse_buffer_and_add_events(write_buffer, offset+count, ctx, &remainder);
2131	if (error) {
2132	    break;
2133	}
2134
2135	//
2136	// if there's any remainder, copy it down to the beginning
2137	// of the buffer so that it will get processed the next time
2138	// through the loop.  note that the remainder always starts
2139	// at an event boundary.
2140	//
2141	if (remainder != 0) {
2142	    // printf("fsevents: write: an event spanned a %d byte boundary.  remainder: %ld\n",
2143	    //	WRITE_BUFFER_SIZE, remainder);
2144	    memmove(write_buffer, (write_buffer+count+offset) - remainder, remainder);
2145	    offset = remainder;
2146	} else {
2147	    offset = 0;
2148	}
2149    }
2150
2151    lck_mtx_unlock(&event_writer_lock);
2152
2153    return error;
2154}
2155
2156
2157static const struct fileops fsevents_fops = {
2158    DTYPE_FSEVENTS,
2159    fseventsf_read,
2160    fseventsf_write,
2161    fseventsf_ioctl,
2162    fseventsf_select,
2163    fseventsf_close,
2164    fseventsf_kqfilter,
2165    fseventsf_drain
2166};
2167
2168typedef struct ext_fsevent_clone_args {
2169    user_addr_t  event_list;
2170    int32_t      num_events;
2171    int32_t      event_queue_depth;
2172    user_addr_t  fd;
2173} ext_fsevent_clone_args;
2174
2175typedef struct old_fsevent_clone_args {
2176    uint32_t  event_list;
2177    int32_t  num_events;
2178    int32_t  event_queue_depth;
2179    uint32_t  fd;
2180} old_fsevent_clone_args;
2181
2182#define	OLD_FSEVENTS_CLONE	_IOW('s', 1, old_fsevent_clone_args)
2183
2184static int
2185fseventsioctl(__unused dev_t dev, u_long cmd, caddr_t data, __unused int flag, struct proc *p)
2186{
2187    struct fileproc *f;
2188    int fd, error;
2189    fsevent_handle *fseh = NULL;
2190    ext_fsevent_clone_args *fse_clone_args, _fse_clone;
2191    int8_t *event_list;
2192    int is64bit = proc_is64bit(p);
2193
2194    switch (cmd) {
2195	case OLD_FSEVENTS_CLONE: {
2196	    old_fsevent_clone_args *old_args = (old_fsevent_clone_args *)data;
2197
2198	    fse_clone_args = &_fse_clone;
2199	    memset(fse_clone_args, 0, sizeof(ext_fsevent_clone_args));
2200
2201	    fse_clone_args->event_list        = CAST_USER_ADDR_T(old_args->event_list);
2202	    fse_clone_args->num_events        = old_args->num_events;
2203	    fse_clone_args->event_queue_depth = old_args->event_queue_depth;
2204	    fse_clone_args->fd                = CAST_USER_ADDR_T(old_args->fd);
2205	    goto handle_clone;
2206	}
2207
2208	case FSEVENTS_CLONE:
2209	    if (is64bit) {
2210		fse_clone_args = (ext_fsevent_clone_args *)data;
2211	    } else {
2212		fsevent_clone_args *ufse_clone = (fsevent_clone_args *)data;
2213
2214		fse_clone_args = &_fse_clone;
2215		memset(fse_clone_args, 0, sizeof(ext_fsevent_clone_args));
2216
2217		fse_clone_args->event_list        = CAST_USER_ADDR_T(ufse_clone->event_list);
2218		fse_clone_args->num_events        = ufse_clone->num_events;
2219		fse_clone_args->event_queue_depth = ufse_clone->event_queue_depth;
2220		fse_clone_args->fd                = CAST_USER_ADDR_T(ufse_clone->fd);
2221	    }
2222
2223	handle_clone:
2224	    if (fse_clone_args->num_events < 0 || fse_clone_args->num_events > 4096) {
2225		return EINVAL;
2226	    }
2227
2228	    MALLOC(fseh, fsevent_handle *, sizeof(fsevent_handle),
2229		   M_TEMP, M_WAITOK);
2230	    if (fseh == NULL) {
2231		return ENOMEM;
2232	    }
2233	    memset(fseh, 0, sizeof(fsevent_handle));
2234
2235	    klist_init(&fseh->knotes);
2236
2237	    MALLOC(event_list, int8_t *,
2238		   fse_clone_args->num_events * sizeof(int8_t),
2239		   M_TEMP, M_WAITOK);
2240	    if (event_list == NULL) {
2241		FREE(fseh, M_TEMP);
2242		return ENOMEM;
2243	    }
2244
2245	    error = copyin(fse_clone_args->event_list,
2246			   (void *)event_list,
2247			   fse_clone_args->num_events * sizeof(int8_t));
2248	    if (error) {
2249		FREE(event_list, M_TEMP);
2250		FREE(fseh, M_TEMP);
2251		return error;
2252	    }
2253
2254	    error = add_watcher(event_list,
2255				fse_clone_args->num_events,
2256				fse_clone_args->event_queue_depth,
2257			        &fseh->watcher,
2258			        fseh);
2259	    if (error) {
2260		FREE(event_list, M_TEMP);
2261		FREE(fseh, M_TEMP);
2262		return error;
2263	    }
2264
2265	    fseh->watcher->fseh = fseh;
2266
2267	    error = falloc(p, &f, &fd, vfs_context_current());
2268	    if (error) {
2269		remove_watcher(fseh->watcher);
2270		FREE(event_list, M_TEMP);
2271		FREE(fseh, M_TEMP);
2272		return (error);
2273	    }
2274	    proc_fdlock(p);
2275	    f->f_fglob->fg_flag = FREAD | FWRITE;
2276	    f->f_fglob->fg_ops = &fsevents_fops;
2277	    f->f_fglob->fg_data = (caddr_t) fseh;
2278	    proc_fdunlock(p);
2279	    error = copyout((void *)&fd, fse_clone_args->fd, sizeof(int32_t));
2280	    if (error != 0) {
2281		fp_free(p, fd, f);
2282	    } else {
2283		proc_fdlock(p);
2284		procfdtbl_releasefd(p, fd, NULL);
2285		fp_drop(p, fd, f, 1);
2286		proc_fdunlock(p);
2287	    }
2288	    break;
2289
2290	default:
2291	    error = EINVAL;
2292	    break;
2293    }
2294
2295    return error;
2296}
2297
2298static void
2299fsevents_wakeup(fs_event_watcher *watcher)
2300{
2301    selwakeup(&watcher->fseh->si);
2302    KNOTE(&watcher->fseh->knotes, NOTE_WRITE|NOTE_NONE);
2303    wakeup((caddr_t)watcher);
2304}
2305
2306
2307/*
2308 * A struct describing which functions will get invoked for certain
2309 * actions.
2310 */
2311static struct cdevsw fsevents_cdevsw =
2312{
2313    fseventsopen,		/* open */
2314    fseventsclose,		/* close */
2315    fseventsread,		/* read */
2316    fseventswrite,		/* write */
2317    fseventsioctl,		/* ioctl */
2318    (stop_fcn_t *)&nulldev,	/* stop */
2319    (reset_fcn_t *)&nulldev,	/* reset */
2320    NULL,			/* tty's */
2321    eno_select,			/* select */
2322    eno_mmap,			/* mmap */
2323    eno_strat,			/* strategy */
2324    eno_getc,			/* getc */
2325    eno_putc,			/* putc */
2326    0				/* type */
2327};
2328
2329
2330/*
2331 * Called to initialize our device,
2332 * and to register ourselves with devfs
2333 */
2334
2335void
2336fsevents_init(void)
2337{
2338    int ret;
2339
2340    if (fsevents_installed) {
2341	return;
2342    }
2343
2344    fsevents_installed = 1;
2345
2346    ret = cdevsw_add(-1, &fsevents_cdevsw);
2347    if (ret < 0) {
2348	fsevents_installed = 0;
2349	return;
2350    }
2351
2352    devfs_make_node(makedev (ret, 0), DEVFS_CHAR,
2353		    UID_ROOT, GID_WHEEL, 0644, "fsevents", 0);
2354
2355    fsevents_internal_init();
2356}
2357
2358
2359char *
2360get_pathbuff(void)
2361{
2362    char *path;
2363
2364    MALLOC_ZONE(path, char *, MAXPATHLEN, M_NAMEI, M_WAITOK);
2365    return path;
2366}
2367
2368void
2369release_pathbuff(char *path)
2370{
2371
2372    if (path == NULL) {
2373	return;
2374    }
2375    FREE_ZONE(path, MAXPATHLEN, M_NAMEI);
2376}
2377
2378int
2379get_fse_info(struct vnode *vp, fse_info *fse, __unused vfs_context_t ctx)
2380{
2381    struct vnode_attr va;
2382
2383    VATTR_INIT(&va);
2384    VATTR_WANTED(&va, va_fsid);
2385    VATTR_WANTED(&va, va_fileid);
2386    VATTR_WANTED(&va, va_mode);
2387    VATTR_WANTED(&va, va_uid);
2388    VATTR_WANTED(&va, va_gid);
2389    if (vp->v_flag & VISHARDLINK) {
2390	if (vp->v_type == VDIR) {
2391	    VATTR_WANTED(&va, va_dirlinkcount);
2392	} else {
2393	    VATTR_WANTED(&va, va_nlink);
2394	}
2395    }
2396
2397    if (vnode_getattr(vp, &va, vfs_context_kernel()) != 0) {
2398	memset(fse, 0, sizeof(fse_info));
2399	return -1;
2400    }
2401
2402    return vnode_get_fse_info_from_vap(vp, fse, &va);
2403}
2404
2405int
2406vnode_get_fse_info_from_vap(vnode_t vp, fse_info *fse, struct vnode_attr *vap)
2407{
2408    fse->ino  = (ino64_t)vap->va_fileid;
2409    fse->dev  = (dev_t)vap->va_fsid;
2410    fse->mode = (int32_t)vnode_vttoif(vnode_vtype(vp)) | vap->va_mode;
2411    fse->uid  = (uid_t)vap->va_uid;
2412    fse->gid  = (gid_t)vap->va_gid;
2413    if (vp->v_flag & VISHARDLINK) {
2414	fse->mode |= FSE_MODE_HLINK;
2415	if (vp->v_type == VDIR) {
2416	    fse->nlink = (uint64_t)vap->va_dirlinkcount;
2417	} else {
2418	    fse->nlink = (uint64_t)vap->va_nlink;
2419	}
2420    }
2421
2422    return 0;
2423}
2424
2425void
2426create_fsevent_from_kevent(vnode_t vp, uint32_t kevents, struct vnode_attr *vap)
2427{
2428    int fsevent_type=FSE_CONTENT_MODIFIED, len;   // the default is the most pessimistic
2429    char pathbuf[MAXPATHLEN];
2430    fse_info fse;
2431
2432
2433    if (kevents & VNODE_EVENT_DELETE) {
2434        fsevent_type = FSE_DELETE;
2435    } else if (kevents & (VNODE_EVENT_EXTEND|VNODE_EVENT_WRITE)) {
2436	fsevent_type = FSE_CONTENT_MODIFIED;
2437    } else if (kevents & VNODE_EVENT_LINK) {
2438	fsevent_type = FSE_CREATE_FILE;
2439    } else if (kevents & VNODE_EVENT_RENAME) {
2440	fsevent_type = FSE_CREATE_FILE;    // XXXdbg - should use FSE_RENAME but we don't have the destination info;
2441    } else if (kevents & (VNODE_EVENT_FILE_CREATED|VNODE_EVENT_FILE_REMOVED|VNODE_EVENT_DIR_CREATED|VNODE_EVENT_DIR_REMOVED)) {
2442	fsevent_type = FSE_STAT_CHANGED;  // XXXdbg - because vp is a dir and the thing created/removed lived inside it
2443    } else {   // a catch all for VNODE_EVENT_PERMS, VNODE_EVENT_ATTRIB and anything else
2444        fsevent_type = FSE_STAT_CHANGED;
2445    }
2446
2447    // printf("convert_kevent: kevents 0x%x fsevent type 0x%x (for %s)\n", kevents, fsevent_type, vp->v_name ? vp->v_name : "(no-name)");
2448
2449    fse.dev = vap->va_fsid;
2450    fse.ino = vap->va_fileid;
2451    fse.mode = vnode_vttoif(vnode_vtype(vp)) | (uint32_t)vap->va_mode;
2452    if (vp->v_flag & VISHARDLINK) {
2453	fse.mode |= FSE_MODE_HLINK;
2454	if (vp->v_type == VDIR) {
2455	    fse.nlink = vap->va_dirlinkcount;
2456	} else {
2457	    fse.nlink = vap->va_nlink;
2458	}
2459    }
2460
2461    if (vp->v_type == VDIR) {
2462	fse.mode |= FSE_REMOTE_DIR_EVENT;
2463    }
2464
2465
2466    fse.uid = vap->va_uid;
2467    fse.gid = vap->va_gid;
2468
2469    len = sizeof(pathbuf);
2470    if (vn_getpath(vp, pathbuf, &len) == 0) {
2471	add_fsevent(fsevent_type, vfs_context_current(), FSE_ARG_STRING, len, pathbuf, FSE_ARG_FINFO, &fse, FSE_ARG_DONE);
2472    }
2473    return;
2474}
2475
2476#else /* CONFIG_FSE */
2477/*
2478 * The get_pathbuff and release_pathbuff routines are used in places not
2479 * related to fsevents, and it's a handy abstraction, so define trivial
2480 * versions that don't cache a pool of buffers.  This way, we don't have
2481 * to conditionalize the callers, and they still get the advantage of the
2482 * pool of buffers if CONFIG_FSE is turned on.
2483 */
2484char *
2485get_pathbuff(void)
2486{
2487	char *path;
2488	MALLOC_ZONE(path, char *, MAXPATHLEN, M_NAMEI, M_WAITOK);
2489	return path;
2490}
2491
2492void
2493release_pathbuff(char *path)
2494{
2495	FREE_ZONE(path, MAXPATHLEN, M_NAMEI);
2496}
2497#endif /* CONFIG_FSE */
2498