1/*
2 *  Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
3 *
4 *  This program is free software; you can redistribute it and/or modify
5 *  it under the terms of the GNU General Public License as published by
6 *  the Free Software Foundation; either version 2, or (at your option)
7 *  any later version.
8 *
9 *  This program is distributed in the hope that it will be useful,
10 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
11 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12 *  GNU General Public License for more details.
13 *
14 *  You should have received a copy of the GNU General Public License
15 *  along with this program; see the file COPYING.  If not, write to
16 *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
17 */
18
19#include <linux/fs.h>
20#include <linux/init.h>
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/mutex.h>
24#include <linux/spinlock.h>
25#include <linux/writeback.h> /* for inode_lock */
26
27#include <asm/atomic.h>
28
29#include <linux/fsnotify_backend.h>
30#include "fsnotify.h"
31
32/*
33 * Recalculate the mask of events relevant to a given inode locked.
34 */
35static void fsnotify_recalc_inode_mask_locked(struct inode *inode)
36{
37	struct fsnotify_mark *mark;
38	struct hlist_node *pos;
39	__u32 new_mask = 0;
40
41	assert_spin_locked(&inode->i_lock);
42
43	hlist_for_each_entry(mark, pos, &inode->i_fsnotify_marks, i.i_list)
44		new_mask |= mark->mask;
45	inode->i_fsnotify_mask = new_mask;
46}
47
48/*
49 * Recalculate the inode->i_fsnotify_mask, or the mask of all FS_* event types
50 * any notifier is interested in hearing for this inode.
51 */
52void fsnotify_recalc_inode_mask(struct inode *inode)
53{
54	spin_lock(&inode->i_lock);
55	fsnotify_recalc_inode_mask_locked(inode);
56	spin_unlock(&inode->i_lock);
57
58	__fsnotify_update_child_dentry_flags(inode);
59}
60
61void fsnotify_destroy_inode_mark(struct fsnotify_mark *mark)
62{
63	struct inode *inode = mark->i.inode;
64
65	assert_spin_locked(&mark->lock);
66	assert_spin_locked(&mark->group->mark_lock);
67
68	spin_lock(&inode->i_lock);
69
70	hlist_del_init_rcu(&mark->i.i_list);
71	mark->i.inode = NULL;
72
73	/*
74	 * this mark is now off the inode->i_fsnotify_marks list and we
75	 * hold the inode->i_lock, so this is the perfect time to update the
76	 * inode->i_fsnotify_mask
77	 */
78	fsnotify_recalc_inode_mask_locked(inode);
79
80	spin_unlock(&inode->i_lock);
81}
82
83/*
84 * Given an inode, destroy all of the marks associated with that inode.
85 */
86void fsnotify_clear_marks_by_inode(struct inode *inode)
87{
88	struct fsnotify_mark *mark, *lmark;
89	struct hlist_node *pos, *n;
90	LIST_HEAD(free_list);
91
92	spin_lock(&inode->i_lock);
93	hlist_for_each_entry_safe(mark, pos, n, &inode->i_fsnotify_marks, i.i_list) {
94		list_add(&mark->i.free_i_list, &free_list);
95		hlist_del_init_rcu(&mark->i.i_list);
96		fsnotify_get_mark(mark);
97	}
98	spin_unlock(&inode->i_lock);
99
100	list_for_each_entry_safe(mark, lmark, &free_list, i.free_i_list) {
101		fsnotify_destroy_mark(mark);
102		fsnotify_put_mark(mark);
103	}
104}
105
106/*
107 * Given a group clear all of the inode marks associated with that group.
108 */
109void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *group)
110{
111	fsnotify_clear_marks_by_group_flags(group, FSNOTIFY_MARK_FLAG_INODE);
112}
113
114/*
115 * given a group and inode, find the mark associated with that combination.
116 * if found take a reference to that mark and return it, else return NULL
117 */
118struct fsnotify_mark *fsnotify_find_inode_mark_locked(struct fsnotify_group *group,
119						      struct inode *inode)
120{
121	struct fsnotify_mark *mark;
122	struct hlist_node *pos;
123
124	assert_spin_locked(&inode->i_lock);
125
126	hlist_for_each_entry(mark, pos, &inode->i_fsnotify_marks, i.i_list) {
127		if (mark->group == group) {
128			fsnotify_get_mark(mark);
129			return mark;
130		}
131	}
132	return NULL;
133}
134
135/*
136 * given a group and inode, find the mark associated with that combination.
137 * if found take a reference to that mark and return it, else return NULL
138 */
139struct fsnotify_mark *fsnotify_find_inode_mark(struct fsnotify_group *group,
140					       struct inode *inode)
141{
142	struct fsnotify_mark *mark;
143
144	spin_lock(&inode->i_lock);
145	mark = fsnotify_find_inode_mark_locked(group, inode);
146	spin_unlock(&inode->i_lock);
147
148	return mark;
149}
150
151/*
152 * If we are setting a mark mask on an inode mark we should pin the inode
153 * in memory.
154 */
155void fsnotify_set_inode_mark_mask_locked(struct fsnotify_mark *mark,
156					 __u32 mask)
157{
158	struct inode *inode;
159
160	assert_spin_locked(&mark->lock);
161
162	if (mask &&
163	    mark->i.inode &&
164	    !(mark->flags & FSNOTIFY_MARK_FLAG_OBJECT_PINNED)) {
165		mark->flags |= FSNOTIFY_MARK_FLAG_OBJECT_PINNED;
166		inode = igrab(mark->i.inode);
167		/*
168		 * we shouldn't be able to get here if the inode wasn't
169		 * already safely held in memory.  But bug in case it
170		 * ever is wrong.
171		 */
172		BUG_ON(!inode);
173	}
174}
175
176/*
177 * Attach an initialized mark to a given inode.
178 * These marks may be used for the fsnotify backend to determine which
179 * event types should be delivered to which group and for which inodes.  These
180 * marks are ordered according to the group's location in memory.
181 */
182int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
183			    struct fsnotify_group *group, struct inode *inode,
184			    int allow_dups)
185{
186	struct fsnotify_mark *lmark;
187	struct hlist_node *node, *last = NULL;
188	int ret = 0;
189
190	mark->flags |= FSNOTIFY_MARK_FLAG_INODE;
191
192	assert_spin_locked(&mark->lock);
193	assert_spin_locked(&group->mark_lock);
194
195	spin_lock(&inode->i_lock);
196
197	mark->i.inode = inode;
198
199	/* is mark the first mark? */
200	if (hlist_empty(&inode->i_fsnotify_marks)) {
201		hlist_add_head_rcu(&mark->i.i_list, &inode->i_fsnotify_marks);
202		goto out;
203	}
204
205	/* should mark be in the middle of the current list? */
206	hlist_for_each_entry(lmark, node, &inode->i_fsnotify_marks, i.i_list) {
207		last = node;
208
209		if ((lmark->group == group) && !allow_dups) {
210			ret = -EEXIST;
211			goto out;
212		}
213
214		if (mark->group < lmark->group)
215			continue;
216
217		hlist_add_before_rcu(&mark->i.i_list, &lmark->i.i_list);
218		goto out;
219	}
220
221	BUG_ON(last == NULL);
222	/* mark should be the last entry.  last is the current last entry */
223	hlist_add_after_rcu(last, &mark->i.i_list);
224out:
225	fsnotify_recalc_inode_mask_locked(inode);
226	spin_unlock(&inode->i_lock);
227
228	return ret;
229}
230
231/**
232 * fsnotify_unmount_inodes - an sb is unmounting.  handle any watched inodes.
233 * @list: list of inodes being unmounted (sb->s_inodes)
234 *
235 * Called with inode_lock held, protecting the unmounting super block's list
236 * of inodes, and with iprune_mutex held, keeping shrink_icache_memory() at bay.
237 * We temporarily drop inode_lock, however, and CAN block.
238 */
239void fsnotify_unmount_inodes(struct list_head *list)
240{
241	struct inode *inode, *next_i, *need_iput = NULL;
242
243	list_for_each_entry_safe(inode, next_i, list, i_sb_list) {
244		struct inode *need_iput_tmp;
245
246		/*
247		 * We cannot __iget() an inode in state I_FREEING,
248		 * I_WILL_FREE, or I_NEW which is fine because by that point
249		 * the inode cannot have any associated watches.
250		 */
251		if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW))
252			continue;
253
254		/*
255		 * If i_count is zero, the inode cannot have any watches and
256		 * doing an __iget/iput with MS_ACTIVE clear would actually
257		 * evict all inodes with zero i_count from icache which is
258		 * unnecessarily violent and may in fact be illegal to do.
259		 */
260		if (!atomic_read(&inode->i_count))
261			continue;
262
263		need_iput_tmp = need_iput;
264		need_iput = NULL;
265
266		/* In case fsnotify_inode_delete() drops a reference. */
267		if (inode != need_iput_tmp)
268			__iget(inode);
269		else
270			need_iput_tmp = NULL;
271
272		/* In case the dropping of a reference would nuke next_i. */
273		if ((&next_i->i_sb_list != list) &&
274		    atomic_read(&next_i->i_count) &&
275		    !(next_i->i_state & (I_FREEING | I_WILL_FREE))) {
276			__iget(next_i);
277			need_iput = next_i;
278		}
279
280		/*
281		 * We can safely drop inode_lock here because we hold
282		 * references on both inode and next_i.  Also no new inodes
283		 * will be added since the umount has begun.  Finally,
284		 * iprune_mutex keeps shrink_icache_memory() away.
285		 */
286		spin_unlock(&inode_lock);
287
288		if (need_iput_tmp)
289			iput(need_iput_tmp);
290
291		/* for each watch, send FS_UNMOUNT and then remove it */
292		fsnotify(inode, FS_UNMOUNT, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
293
294		fsnotify_inode_delete(inode);
295
296		iput(inode);
297
298		spin_lock(&inode_lock);
299	}
300}
301