• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/fs/notify/fanotify/
1#include <linux/fanotify.h>
2#include <linux/fcntl.h>
3#include <linux/file.h>
4#include <linux/fs.h>
5#include <linux/anon_inodes.h>
6#include <linux/fsnotify_backend.h>
7#include <linux/init.h>
8#include <linux/mount.h>
9#include <linux/namei.h>
10#include <linux/poll.h>
11#include <linux/security.h>
12#include <linux/syscalls.h>
13#include <linux/slab.h>
14#include <linux/types.h>
15#include <linux/uaccess.h>
16
17#include <asm/ioctls.h>
18
19extern const struct fsnotify_ops fanotify_fsnotify_ops;
20
21static struct kmem_cache *fanotify_mark_cache __read_mostly;
22static struct kmem_cache *fanotify_response_event_cache __read_mostly;
23
24struct fanotify_response_event {
25	struct list_head list;
26	__s32 fd;
27	struct fsnotify_event *event;
28};
29
30/*
31 * Get an fsnotify notification event if one exists and is small
32 * enough to fit in "count". Return an error pointer if the count
33 * is not large enough.
34 *
35 * Called with the group->notification_mutex held.
36 */
37static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
38					    size_t count)
39{
40	BUG_ON(!mutex_is_locked(&group->notification_mutex));
41
42	pr_debug("%s: group=%p count=%zd\n", __func__, group, count);
43
44	if (fsnotify_notify_queue_is_empty(group))
45		return NULL;
46
47	if (FAN_EVENT_METADATA_LEN > count)
48		return ERR_PTR(-EINVAL);
49
50	/* held the notification_mutex the whole time, so this is the
51	 * same event we peeked above */
52	return fsnotify_remove_notify_event(group);
53}
54
55static int create_fd(struct fsnotify_group *group, struct fsnotify_event *event)
56{
57	int client_fd;
58	struct dentry *dentry;
59	struct vfsmount *mnt;
60	struct file *new_file;
61
62	pr_debug("%s: group=%p event=%p\n", __func__, group, event);
63
64	client_fd = get_unused_fd();
65	if (client_fd < 0)
66		return client_fd;
67
68	if (event->data_type != FSNOTIFY_EVENT_PATH) {
69		WARN_ON(1);
70		put_unused_fd(client_fd);
71		return -EINVAL;
72	}
73
74	/*
75	 * we need a new file handle for the userspace program so it can read even if it was
76	 * originally opened O_WRONLY.
77	 */
78	dentry = dget(event->path.dentry);
79	mnt = mntget(event->path.mnt);
80	/* it's possible this event was an overflow event.  in that case dentry and mnt
81	 * are NULL;  That's fine, just don't call dentry open */
82	if (dentry && mnt)
83		new_file = dentry_open(dentry, mnt,
84				       group->fanotify_data.f_flags | FMODE_NONOTIFY,
85				       current_cred());
86	else
87		new_file = ERR_PTR(-EOVERFLOW);
88	if (IS_ERR(new_file)) {
89		/*
90		 * we still send an event even if we can't open the file.  this
91		 * can happen when say tasks are gone and we try to open their
92		 * /proc files or we try to open a WRONLY file like in sysfs
93		 * we just send the errno to userspace since there isn't much
94		 * else we can do.
95		 */
96		put_unused_fd(client_fd);
97		client_fd = PTR_ERR(new_file);
98	} else {
99		fd_install(client_fd, new_file);
100	}
101
102	return client_fd;
103}
104
105static ssize_t fill_event_metadata(struct fsnotify_group *group,
106				   struct fanotify_event_metadata *metadata,
107				   struct fsnotify_event *event)
108{
109	pr_debug("%s: group=%p metadata=%p event=%p\n", __func__,
110		 group, metadata, event);
111
112	metadata->event_len = FAN_EVENT_METADATA_LEN;
113	metadata->vers = FANOTIFY_METADATA_VERSION;
114	metadata->mask = event->mask & FAN_ALL_OUTGOING_EVENTS;
115	metadata->pid = pid_vnr(event->tgid);
116	metadata->fd = create_fd(group, event);
117
118	return metadata->fd;
119}
120
121#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
122static struct fanotify_response_event *dequeue_re(struct fsnotify_group *group,
123						  __s32 fd)
124{
125	struct fanotify_response_event *re, *return_re = NULL;
126
127	mutex_lock(&group->fanotify_data.access_mutex);
128	list_for_each_entry(re, &group->fanotify_data.access_list, list) {
129		if (re->fd != fd)
130			continue;
131
132		list_del_init(&re->list);
133		return_re = re;
134		break;
135	}
136	mutex_unlock(&group->fanotify_data.access_mutex);
137
138	pr_debug("%s: found return_re=%p\n", __func__, return_re);
139
140	return return_re;
141}
142
143static int process_access_response(struct fsnotify_group *group,
144				   struct fanotify_response *response_struct)
145{
146	struct fanotify_response_event *re;
147	__s32 fd = response_struct->fd;
148	__u32 response = response_struct->response;
149
150	pr_debug("%s: group=%p fd=%d response=%d\n", __func__, group,
151		 fd, response);
152	/*
153	 * make sure the response is valid, if invalid we do nothing and either
154	 * userspace can send a valid responce or we will clean it up after the
155	 * timeout
156	 */
157	switch (response) {
158	case FAN_ALLOW:
159	case FAN_DENY:
160		break;
161	default:
162		return -EINVAL;
163	}
164
165	if (fd < 0)
166		return -EINVAL;
167
168	re = dequeue_re(group, fd);
169	if (!re)
170		return -ENOENT;
171
172	re->event->response = response;
173
174	wake_up(&group->fanotify_data.access_waitq);
175
176	kmem_cache_free(fanotify_response_event_cache, re);
177
178	return 0;
179}
180
181static int prepare_for_access_response(struct fsnotify_group *group,
182				       struct fsnotify_event *event,
183				       __s32 fd)
184{
185	struct fanotify_response_event *re;
186
187	if (!(event->mask & FAN_ALL_PERM_EVENTS))
188		return 0;
189
190	re = kmem_cache_alloc(fanotify_response_event_cache, GFP_KERNEL);
191	if (!re)
192		return -ENOMEM;
193
194	re->event = event;
195	re->fd = fd;
196
197	mutex_lock(&group->fanotify_data.access_mutex);
198
199	if (group->fanotify_data.bypass_perm) {
200		mutex_unlock(&group->fanotify_data.access_mutex);
201		kmem_cache_free(fanotify_response_event_cache, re);
202		event->response = FAN_ALLOW;
203		return 0;
204	}
205
206	list_add_tail(&re->list, &group->fanotify_data.access_list);
207	mutex_unlock(&group->fanotify_data.access_mutex);
208
209	return 0;
210}
211
212static void remove_access_response(struct fsnotify_group *group,
213				   struct fsnotify_event *event,
214				   __s32 fd)
215{
216	struct fanotify_response_event *re;
217
218	if (!(event->mask & FAN_ALL_PERM_EVENTS))
219		return;
220
221	re = dequeue_re(group, fd);
222	if (!re)
223		return;
224
225	BUG_ON(re->event != event);
226
227	kmem_cache_free(fanotify_response_event_cache, re);
228
229	return;
230}
231#else
232static int prepare_for_access_response(struct fsnotify_group *group,
233				       struct fsnotify_event *event,
234				       __s32 fd)
235{
236	return 0;
237}
238
239static void remove_access_response(struct fsnotify_group *group,
240				   struct fsnotify_event *event,
241				   __s32 fd)
242{
243	return;
244}
245#endif
246
247static ssize_t copy_event_to_user(struct fsnotify_group *group,
248				  struct fsnotify_event *event,
249				  char __user *buf)
250{
251	struct fanotify_event_metadata fanotify_event_metadata;
252	int fd, ret;
253
254	pr_debug("%s: group=%p event=%p\n", __func__, group, event);
255
256	fd = fill_event_metadata(group, &fanotify_event_metadata, event);
257	if (fd < 0)
258		return fd;
259
260	ret = prepare_for_access_response(group, event, fd);
261	if (ret)
262		goto out_close_fd;
263
264	ret = -EFAULT;
265	if (copy_to_user(buf, &fanotify_event_metadata, FAN_EVENT_METADATA_LEN))
266		goto out_kill_access_response;
267
268	return FAN_EVENT_METADATA_LEN;
269
270out_kill_access_response:
271	remove_access_response(group, event, fd);
272out_close_fd:
273	sys_close(fd);
274	return ret;
275}
276
277/* intofiy userspace file descriptor functions */
278static unsigned int fanotify_poll(struct file *file, poll_table *wait)
279{
280	struct fsnotify_group *group = file->private_data;
281	int ret = 0;
282
283	poll_wait(file, &group->notification_waitq, wait);
284	mutex_lock(&group->notification_mutex);
285	if (!fsnotify_notify_queue_is_empty(group))
286		ret = POLLIN | POLLRDNORM;
287	mutex_unlock(&group->notification_mutex);
288
289	return ret;
290}
291
292static ssize_t fanotify_read(struct file *file, char __user *buf,
293			     size_t count, loff_t *pos)
294{
295	struct fsnotify_group *group;
296	struct fsnotify_event *kevent;
297	char __user *start;
298	int ret;
299	DEFINE_WAIT(wait);
300
301	start = buf;
302	group = file->private_data;
303
304	pr_debug("%s: group=%p\n", __func__, group);
305
306	while (1) {
307		prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE);
308
309		mutex_lock(&group->notification_mutex);
310		kevent = get_one_event(group, count);
311		mutex_unlock(&group->notification_mutex);
312
313		if (kevent) {
314			ret = PTR_ERR(kevent);
315			if (IS_ERR(kevent))
316				break;
317			ret = copy_event_to_user(group, kevent, buf);
318			fsnotify_put_event(kevent);
319			if (ret < 0)
320				break;
321			buf += ret;
322			count -= ret;
323			continue;
324		}
325
326		ret = -EAGAIN;
327		if (file->f_flags & O_NONBLOCK)
328			break;
329		ret = -EINTR;
330		if (signal_pending(current))
331			break;
332
333		if (start != buf)
334			break;
335
336		schedule();
337	}
338
339	finish_wait(&group->notification_waitq, &wait);
340	if (start != buf && ret != -EFAULT)
341		ret = buf - start;
342	return ret;
343}
344
345static ssize_t fanotify_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
346{
347#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
348	struct fanotify_response response = { .fd = -1, .response = -1 };
349	struct fsnotify_group *group;
350	int ret;
351
352	group = file->private_data;
353
354	if (count > sizeof(response))
355		count = sizeof(response);
356
357	pr_debug("%s: group=%p count=%zu\n", __func__, group, count);
358
359	if (copy_from_user(&response, buf, count))
360		return -EFAULT;
361
362	ret = process_access_response(group, &response);
363	if (ret < 0)
364		count = ret;
365
366	return count;
367#else
368	return -EINVAL;
369#endif
370}
371
372static int fanotify_release(struct inode *ignored, struct file *file)
373{
374	struct fsnotify_group *group = file->private_data;
375	struct fanotify_response_event *re, *lre;
376
377	pr_debug("%s: file=%p group=%p\n", __func__, file, group);
378
379#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
380	mutex_lock(&group->fanotify_data.access_mutex);
381
382	group->fanotify_data.bypass_perm = true;
383
384	list_for_each_entry_safe(re, lre, &group->fanotify_data.access_list, list) {
385		pr_debug("%s: found group=%p re=%p event=%p\n", __func__, group,
386			 re, re->event);
387
388		list_del_init(&re->list);
389		re->event->response = FAN_ALLOW;
390
391		kmem_cache_free(fanotify_response_event_cache, re);
392	}
393	mutex_unlock(&group->fanotify_data.access_mutex);
394
395	wake_up(&group->fanotify_data.access_waitq);
396#endif
397	/* matches the fanotify_init->fsnotify_alloc_group */
398	fsnotify_put_group(group);
399
400	return 0;
401}
402
403static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
404{
405	struct fsnotify_group *group;
406	struct fsnotify_event_holder *holder;
407	void __user *p;
408	int ret = -ENOTTY;
409	size_t send_len = 0;
410
411	group = file->private_data;
412
413	p = (void __user *) arg;
414
415	switch (cmd) {
416	case FIONREAD:
417		mutex_lock(&group->notification_mutex);
418		list_for_each_entry(holder, &group->notification_list, event_list)
419			send_len += FAN_EVENT_METADATA_LEN;
420		mutex_unlock(&group->notification_mutex);
421		ret = put_user(send_len, (int __user *) p);
422		break;
423	}
424
425	return ret;
426}
427
428static const struct file_operations fanotify_fops = {
429	.poll		= fanotify_poll,
430	.read		= fanotify_read,
431	.write		= fanotify_write,
432	.fasync		= NULL,
433	.release	= fanotify_release,
434	.unlocked_ioctl	= fanotify_ioctl,
435	.compat_ioctl	= fanotify_ioctl,
436};
437
438static void fanotify_free_mark(struct fsnotify_mark *fsn_mark)
439{
440	kmem_cache_free(fanotify_mark_cache, fsn_mark);
441}
442
443static int fanotify_find_path(int dfd, const char __user *filename,
444			      struct path *path, unsigned int flags)
445{
446	int ret;
447
448	pr_debug("%s: dfd=%d filename=%p flags=%x\n", __func__,
449		 dfd, filename, flags);
450
451	if (filename == NULL) {
452		struct file *file;
453		int fput_needed;
454
455		ret = -EBADF;
456		file = fget_light(dfd, &fput_needed);
457		if (!file)
458			goto out;
459
460		ret = -ENOTDIR;
461		if ((flags & FAN_MARK_ONLYDIR) &&
462		    !(S_ISDIR(file->f_path.dentry->d_inode->i_mode))) {
463			fput_light(file, fput_needed);
464			goto out;
465		}
466
467		*path = file->f_path;
468		path_get(path);
469		fput_light(file, fput_needed);
470	} else {
471		unsigned int lookup_flags = 0;
472
473		if (!(flags & FAN_MARK_DONT_FOLLOW))
474			lookup_flags |= LOOKUP_FOLLOW;
475		if (flags & FAN_MARK_ONLYDIR)
476			lookup_flags |= LOOKUP_DIRECTORY;
477
478		ret = user_path_at(dfd, filename, lookup_flags, path);
479		if (ret)
480			goto out;
481	}
482
483	/* you can only watch an inode if you have read permissions on it */
484	ret = inode_permission(path->dentry->d_inode, MAY_READ);
485	if (ret)
486		path_put(path);
487out:
488	return ret;
489}
490
491static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark,
492					    __u32 mask,
493					    unsigned int flags)
494{
495	__u32 oldmask;
496
497	spin_lock(&fsn_mark->lock);
498	if (!(flags & FAN_MARK_IGNORED_MASK)) {
499		oldmask = fsn_mark->mask;
500		fsnotify_set_mark_mask_locked(fsn_mark, (oldmask & ~mask));
501	} else {
502		oldmask = fsn_mark->ignored_mask;
503		fsnotify_set_mark_ignored_mask_locked(fsn_mark, (oldmask & ~mask));
504	}
505	spin_unlock(&fsn_mark->lock);
506
507	if (!(oldmask & ~mask))
508		fsnotify_destroy_mark(fsn_mark);
509
510	return mask & oldmask;
511}
512
513static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group,
514					 struct vfsmount *mnt, __u32 mask,
515					 unsigned int flags)
516{
517	struct fsnotify_mark *fsn_mark = NULL;
518	__u32 removed;
519
520	fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
521	if (!fsn_mark)
522		return -ENOENT;
523
524	removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags);
525	fsnotify_put_mark(fsn_mark);
526	if (removed & mnt->mnt_fsnotify_mask)
527		fsnotify_recalc_vfsmount_mask(mnt);
528
529	return 0;
530}
531
532static int fanotify_remove_inode_mark(struct fsnotify_group *group,
533				      struct inode *inode, __u32 mask,
534				      unsigned int flags)
535{
536	struct fsnotify_mark *fsn_mark = NULL;
537	__u32 removed;
538
539	fsn_mark = fsnotify_find_inode_mark(group, inode);
540	if (!fsn_mark)
541		return -ENOENT;
542
543	removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags);
544	/* matches the fsnotify_find_inode_mark() */
545	fsnotify_put_mark(fsn_mark);
546	if (removed & inode->i_fsnotify_mask)
547		fsnotify_recalc_inode_mask(inode);
548
549	return 0;
550}
551
552static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark,
553				       __u32 mask,
554				       unsigned int flags)
555{
556	__u32 oldmask;
557
558	spin_lock(&fsn_mark->lock);
559	if (!(flags & FAN_MARK_IGNORED_MASK)) {
560		oldmask = fsn_mark->mask;
561		fsnotify_set_mark_mask_locked(fsn_mark, (oldmask | mask));
562	} else {
563		oldmask = fsn_mark->ignored_mask;
564		fsnotify_set_mark_ignored_mask_locked(fsn_mark, (oldmask | mask));
565		if (flags & FAN_MARK_IGNORED_SURV_MODIFY)
566			fsn_mark->flags |= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY;
567	}
568	spin_unlock(&fsn_mark->lock);
569
570	return mask & ~oldmask;
571}
572
573static int fanotify_add_vfsmount_mark(struct fsnotify_group *group,
574				      struct vfsmount *mnt, __u32 mask,
575				      unsigned int flags)
576{
577	struct fsnotify_mark *fsn_mark;
578	__u32 added;
579
580	fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
581	if (!fsn_mark) {
582		int ret;
583
584		fsn_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL);
585		if (!fsn_mark)
586			return -ENOMEM;
587
588		fsnotify_init_mark(fsn_mark, fanotify_free_mark);
589		ret = fsnotify_add_mark(fsn_mark, group, NULL, mnt, 0);
590		if (ret) {
591			fanotify_free_mark(fsn_mark);
592			return ret;
593		}
594	}
595	added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
596	fsnotify_put_mark(fsn_mark);
597	if (added & ~mnt->mnt_fsnotify_mask)
598		fsnotify_recalc_vfsmount_mask(mnt);
599
600	return 0;
601}
602
603static int fanotify_add_inode_mark(struct fsnotify_group *group,
604				   struct inode *inode, __u32 mask,
605				   unsigned int flags)
606{
607	struct fsnotify_mark *fsn_mark;
608	__u32 added;
609
610	pr_debug("%s: group=%p inode=%p\n", __func__, group, inode);
611
612	fsn_mark = fsnotify_find_inode_mark(group, inode);
613	if (!fsn_mark) {
614		int ret;
615
616		fsn_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL);
617		if (!fsn_mark)
618			return -ENOMEM;
619
620		fsnotify_init_mark(fsn_mark, fanotify_free_mark);
621		ret = fsnotify_add_mark(fsn_mark, group, inode, NULL, 0);
622		if (ret) {
623			fanotify_free_mark(fsn_mark);
624			return ret;
625		}
626	}
627	added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
628	fsnotify_put_mark(fsn_mark);
629	if (added & ~inode->i_fsnotify_mask)
630		fsnotify_recalc_inode_mask(inode);
631	return 0;
632}
633
634/* fanotify syscalls */
635SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
636{
637	struct fsnotify_group *group;
638	int f_flags, fd;
639
640	pr_debug("%s: flags=%d event_f_flags=%d\n",
641		__func__, flags, event_f_flags);
642
643	if (!capable(CAP_SYS_ADMIN))
644		return -EPERM;
645
646	if (flags & ~FAN_ALL_INIT_FLAGS)
647		return -EINVAL;
648
649	f_flags = O_RDWR | FMODE_NONOTIFY;
650	if (flags & FAN_CLOEXEC)
651		f_flags |= O_CLOEXEC;
652	if (flags & FAN_NONBLOCK)
653		f_flags |= O_NONBLOCK;
654
655	/* fsnotify_alloc_group takes a ref.  Dropped in fanotify_release */
656	group = fsnotify_alloc_group(&fanotify_fsnotify_ops);
657	if (IS_ERR(group))
658		return PTR_ERR(group);
659
660	group->fanotify_data.f_flags = event_f_flags;
661#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
662	mutex_init(&group->fanotify_data.access_mutex);
663	init_waitqueue_head(&group->fanotify_data.access_waitq);
664	INIT_LIST_HEAD(&group->fanotify_data.access_list);
665#endif
666
667	fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags);
668	if (fd < 0)
669		goto out_put_group;
670
671	return fd;
672
673out_put_group:
674	fsnotify_put_group(group);
675	return fd;
676}
677
678SYSCALL_DEFINE(fanotify_mark)(int fanotify_fd, unsigned int flags,
679			      __u64 mask, int dfd,
680			      const char  __user * pathname)
681{
682	struct inode *inode = NULL;
683	struct vfsmount *mnt = NULL;
684	struct fsnotify_group *group;
685	struct file *filp;
686	struct path path;
687	int ret, fput_needed;
688
689	pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n",
690		 __func__, fanotify_fd, flags, dfd, pathname, mask);
691
692	/* we only use the lower 32 bits as of right now. */
693	if (mask & ((__u64)0xffffffff << 32))
694		return -EINVAL;
695
696	if (flags & ~FAN_ALL_MARK_FLAGS)
697		return -EINVAL;
698	switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
699	case FAN_MARK_ADD:
700	case FAN_MARK_REMOVE:
701	case FAN_MARK_FLUSH:
702		break;
703	default:
704		return -EINVAL;
705	}
706#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
707	if (mask & ~(FAN_ALL_EVENTS | FAN_ALL_PERM_EVENTS | FAN_EVENT_ON_CHILD))
708#else
709	if (mask & ~(FAN_ALL_EVENTS | FAN_EVENT_ON_CHILD))
710#endif
711		return -EINVAL;
712
713	filp = fget_light(fanotify_fd, &fput_needed);
714	if (unlikely(!filp))
715		return -EBADF;
716
717	/* verify that this is indeed an fanotify instance */
718	ret = -EINVAL;
719	if (unlikely(filp->f_op != &fanotify_fops))
720		goto fput_and_out;
721
722	ret = fanotify_find_path(dfd, pathname, &path, flags);
723	if (ret)
724		goto fput_and_out;
725
726	/* inode held in place by reference to path; group by fget on fd */
727	if (!(flags & FAN_MARK_MOUNT))
728		inode = path.dentry->d_inode;
729	else
730		mnt = path.mnt;
731	group = filp->private_data;
732
733	/* create/update an inode mark */
734	switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
735	case FAN_MARK_ADD:
736		if (flags & FAN_MARK_MOUNT)
737			ret = fanotify_add_vfsmount_mark(group, mnt, mask, flags);
738		else
739			ret = fanotify_add_inode_mark(group, inode, mask, flags);
740		break;
741	case FAN_MARK_REMOVE:
742		if (flags & FAN_MARK_MOUNT)
743			ret = fanotify_remove_vfsmount_mark(group, mnt, mask, flags);
744		else
745			ret = fanotify_remove_inode_mark(group, inode, mask, flags);
746		break;
747	case FAN_MARK_FLUSH:
748		if (flags & FAN_MARK_MOUNT)
749			fsnotify_clear_vfsmount_marks_by_group(group);
750		else
751			fsnotify_clear_inode_marks_by_group(group);
752		break;
753	default:
754		ret = -EINVAL;
755	}
756
757	path_put(&path);
758fput_and_out:
759	fput_light(filp, fput_needed);
760	return ret;
761}
762
763#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
764asmlinkage long SyS_fanotify_mark(long fanotify_fd, long flags, __u64 mask,
765				  long dfd, long pathname)
766{
767	return SYSC_fanotify_mark((int) fanotify_fd, (unsigned int) flags,
768				  mask, (int) dfd,
769				  (const char  __user *) pathname);
770}
771SYSCALL_ALIAS(sys_fanotify_mark, SyS_fanotify_mark);
772#endif
773
774/*
775 * fanotify_user_setup - Our initialization function.  Note that we cannnot return
776 * error because we have compiled-in VFS hooks.  So an (unlikely) failure here
777 * must result in panic().
778 */
779static int __init fanotify_user_setup(void)
780{
781	fanotify_mark_cache = KMEM_CACHE(fsnotify_mark, SLAB_PANIC);
782	fanotify_response_event_cache = KMEM_CACHE(fanotify_response_event,
783						   SLAB_PANIC);
784
785	return 0;
786}
787device_initcall(fanotify_user_setup);
788