1/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2 * Permission is hereby granted, free of charge, to any person obtaining a copy
3 * of this software and associated documentation files (the "Software"), to
4 * deal in the Software without restriction, including without limitation the
5 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
6 * sell copies of the Software, and to permit persons to whom the Software is
7 * furnished to do so, subject to the following conditions:
8 *
9 * The above copyright notice and this permission notice shall be included in
10 * all copies or substantial portions of the Software.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
15 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
16 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
17 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
18 * IN THE SOFTWARE.
19 */
20
21#include "uv.h"
22#include "uv/tree.h"
23#include "internal.h"
24
25#include <stdint.h>
26#include <stdio.h>
27#include <stdlib.h>
28#include <string.h>
29#include <assert.h>
30#include <errno.h>
31
32#include <sys/inotify.h>
33#include <sys/types.h>
34#include <unistd.h>
35
36struct watcher_list {
37  RB_ENTRY(watcher_list) entry;
38  QUEUE watchers;
39  int iterating;
40  char* path;
41  int wd;
42};
43
44struct watcher_root {
45  struct watcher_list* rbh_root;
46};
47#define CAST(p) ((struct watcher_root*)(p))
48
49
50static int compare_watchers(const struct watcher_list* a,
51                            const struct watcher_list* b) {
52  if (a->wd < b->wd) return -1;
53  if (a->wd > b->wd) return 1;
54  return 0;
55}
56
57
58RB_GENERATE_STATIC(watcher_root, watcher_list, entry, compare_watchers)
59
60
61static void uv__inotify_read(uv_loop_t* loop,
62                             uv__io_t* w,
63                             unsigned int revents);
64
65static void maybe_free_watcher_list(struct watcher_list* w,
66                                    uv_loop_t* loop);
67
68static int init_inotify(uv_loop_t* loop) {
69  int fd;
70
71  if (loop->inotify_fd != -1)
72    return 0;
73
74  fd = inotify_init1(IN_NONBLOCK | IN_CLOEXEC);
75  if (fd < 0)
76    return UV__ERR(errno);
77
78  loop->inotify_fd = fd;
79  uv__io_init(&loop->inotify_read_watcher, uv__inotify_read, loop->inotify_fd);
80  uv__io_start(loop, &loop->inotify_read_watcher, POLLIN);
81
82  return 0;
83}
84
85
86int uv__inotify_fork(uv_loop_t* loop, void* old_watchers) {
87  /* Open the inotify_fd, and re-arm all the inotify watchers. */
88  int err;
89  struct watcher_list* tmp_watcher_list_iter;
90  struct watcher_list* watcher_list;
91  struct watcher_list tmp_watcher_list;
92  QUEUE queue;
93  QUEUE* q;
94  uv_fs_event_t* handle;
95  char* tmp_path;
96
97  if (old_watchers != NULL) {
98    /* We must restore the old watcher list to be able to close items
99     * out of it.
100     */
101    loop->inotify_watchers = old_watchers;
102
103    QUEUE_INIT(&tmp_watcher_list.watchers);
104    /* Note that the queue we use is shared with the start and stop()
105     * functions, making QUEUE_FOREACH unsafe to use. So we use the
106     * QUEUE_MOVE trick to safely iterate. Also don't free the watcher
107     * list until we're done iterating. c.f. uv__inotify_read.
108     */
109    RB_FOREACH_SAFE(watcher_list, watcher_root,
110                    CAST(&old_watchers), tmp_watcher_list_iter) {
111      watcher_list->iterating = 1;
112      QUEUE_MOVE(&watcher_list->watchers, &queue);
113      while (!QUEUE_EMPTY(&queue)) {
114        q = QUEUE_HEAD(&queue);
115        handle = QUEUE_DATA(q, uv_fs_event_t, watchers);
116        /* It's critical to keep a copy of path here, because it
117         * will be set to NULL by stop() and then deallocated by
118         * maybe_free_watcher_list
119         */
120        tmp_path = uv__strdup(handle->path);
121        assert(tmp_path != NULL);
122        QUEUE_REMOVE(q);
123        QUEUE_INSERT_TAIL(&watcher_list->watchers, q);
124        uv_fs_event_stop(handle);
125
126        QUEUE_INSERT_TAIL(&tmp_watcher_list.watchers, &handle->watchers);
127        handle->path = tmp_path;
128      }
129      watcher_list->iterating = 0;
130      maybe_free_watcher_list(watcher_list, loop);
131    }
132
133    QUEUE_MOVE(&tmp_watcher_list.watchers, &queue);
134    while (!QUEUE_EMPTY(&queue)) {
135        q = QUEUE_HEAD(&queue);
136        QUEUE_REMOVE(q);
137        handle = QUEUE_DATA(q, uv_fs_event_t, watchers);
138        tmp_path = handle->path;
139        handle->path = NULL;
140        err = uv_fs_event_start(handle, handle->cb, tmp_path, 0);
141        uv__free(tmp_path);
142        if (err)
143          return err;
144    }
145  }
146
147  return 0;
148}
149
150
151static struct watcher_list* find_watcher(uv_loop_t* loop, int wd) {
152  struct watcher_list w;
153  w.wd = wd;
154  return RB_FIND(watcher_root, CAST(&loop->inotify_watchers), &w);
155}
156
157static void maybe_free_watcher_list(struct watcher_list* w, uv_loop_t* loop) {
158  /* if the watcher_list->watchers is being iterated over, we can't free it. */
159  if ((!w->iterating) && QUEUE_EMPTY(&w->watchers)) {
160    /* No watchers left for this path. Clean up. */
161    RB_REMOVE(watcher_root, CAST(&loop->inotify_watchers), w);
162    inotify_rm_watch(loop->inotify_fd, w->wd);
163    uv__free(w);
164  }
165}
166
167static void uv__inotify_read(uv_loop_t* loop,
168                             uv__io_t* dummy,
169                             unsigned int events) {
170  const struct inotify_event* e;
171  struct watcher_list* w;
172  uv_fs_event_t* h;
173  QUEUE queue;
174  QUEUE* q;
175  const char* path;
176  ssize_t size;
177  const char *p;
178  /* needs to be large enough for sizeof(inotify_event) + strlen(path) */
179  char buf[4096];
180
181  for (;;) {
182    do
183      size = read(loop->inotify_fd, buf, sizeof(buf));
184    while (size == -1 && errno == EINTR);
185
186    if (size == -1) {
187      assert(errno == EAGAIN || errno == EWOULDBLOCK);
188      break;
189    }
190
191    assert(size > 0); /* pre-2.6.21 thing, size=0 == read buffer too small */
192
193    /* Now we have one or more inotify_event structs. */
194    for (p = buf; p < buf + size; p += sizeof(*e) + e->len) {
195      e = (const struct inotify_event*) p;
196
197      events = 0;
198      if (e->mask & (IN_ATTRIB|IN_MODIFY))
199        events |= UV_CHANGE;
200      if (e->mask & ~(IN_ATTRIB|IN_MODIFY))
201        events |= UV_RENAME;
202
203      w = find_watcher(loop, e->wd);
204      if (w == NULL)
205        continue; /* Stale event, no watchers left. */
206
207      /* inotify does not return the filename when monitoring a single file
208       * for modifications. Repurpose the filename for API compatibility.
209       * I'm not convinced this is a good thing, maybe it should go.
210       */
211      path = e->len ? (const char*) (e + 1) : uv__basename_r(w->path);
212
213      /* We're about to iterate over the queue and call user's callbacks.
214       * What can go wrong?
215       * A callback could call uv_fs_event_stop()
216       * and the queue can change under our feet.
217       * So, we use QUEUE_MOVE() trick to safely iterate over the queue.
218       * And we don't free the watcher_list until we're done iterating.
219       *
220       * First,
221       * tell uv_fs_event_stop() (that could be called from a user's callback)
222       * not to free watcher_list.
223       */
224      w->iterating = 1;
225      QUEUE_MOVE(&w->watchers, &queue);
226      while (!QUEUE_EMPTY(&queue)) {
227        q = QUEUE_HEAD(&queue);
228        h = QUEUE_DATA(q, uv_fs_event_t, watchers);
229
230        QUEUE_REMOVE(q);
231        QUEUE_INSERT_TAIL(&w->watchers, q);
232
233        h->cb(h, path, events, 0);
234      }
235      /* done iterating, time to (maybe) free empty watcher_list */
236      w->iterating = 0;
237      maybe_free_watcher_list(w, loop);
238    }
239  }
240}
241
242
243int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
244  uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT);
245  return 0;
246}
247
248
249int uv_fs_event_start(uv_fs_event_t* handle,
250                      uv_fs_event_cb cb,
251                      const char* path,
252                      unsigned int flags) {
253  struct watcher_list* w;
254  size_t len;
255  int events;
256  int err;
257  int wd;
258
259  if (uv__is_active(handle))
260    return UV_EINVAL;
261
262  err = init_inotify(handle->loop);
263  if (err)
264    return err;
265
266  events = IN_ATTRIB
267         | IN_CREATE
268         | IN_MODIFY
269         | IN_DELETE
270         | IN_DELETE_SELF
271         | IN_MOVE_SELF
272         | IN_MOVED_FROM
273         | IN_MOVED_TO;
274
275  wd = inotify_add_watch(handle->loop->inotify_fd, path, events);
276  if (wd == -1)
277    return UV__ERR(errno);
278
279  w = find_watcher(handle->loop, wd);
280  if (w)
281    goto no_insert;
282
283  len = strlen(path) + 1;
284  w = uv__malloc(sizeof(*w) + len);
285  if (w == NULL)
286    return UV_ENOMEM;
287
288  w->wd = wd;
289  w->path = memcpy(w + 1, path, len);
290  QUEUE_INIT(&w->watchers);
291  w->iterating = 0;
292  RB_INSERT(watcher_root, CAST(&handle->loop->inotify_watchers), w);
293
294no_insert:
295  uv__handle_start(handle);
296  QUEUE_INSERT_TAIL(&w->watchers, &handle->watchers);
297  handle->path = w->path;
298  handle->cb = cb;
299  handle->wd = wd;
300
301  return 0;
302}
303
304
305int uv_fs_event_stop(uv_fs_event_t* handle) {
306  struct watcher_list* w;
307
308  if (!uv__is_active(handle))
309    return 0;
310
311  w = find_watcher(handle->loop, handle->wd);
312  assert(w != NULL);
313
314  handle->wd = -1;
315  handle->path = NULL;
316  uv__handle_stop(handle);
317  QUEUE_REMOVE(&handle->watchers);
318
319  maybe_free_watcher_list(w, handle->loop);
320
321  return 0;
322}
323
324
325void uv__fs_event_close(uv_fs_event_t* handle) {
326  uv_fs_event_stop(handle);
327}
328