1/*
2 * Copyright (c) 2002, 2007 Red Hat, Inc. All rights reserved.
3 *
4 * This software may be freely redistributed under the terms of the
5 * GNU General Public License.
6 *
7 * You should have received a copy of the GNU General Public License
8 * along with this program; if not, write to the Free Software
9 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
10 *
11 * Authors: David Woodhouse <dwmw2@infradead.org>
12 *          David Howells <dhowells@redhat.com>
13 *
14 */
15
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/init.h>
19#include <linux/circ_buf.h>
20#include <linux/sched.h>
21#include "internal.h"
22
23/*
24 * Handle invalidation of an mmap'd file.  We invalidate all the PTEs referring
25 * to the pages in this file's pagecache, forcing the kernel to go through
26 * ->fault() or ->page_mkwrite() - at which point we can handle invalidation
27 * more fully.
28 */
29void afs_invalidate_mmap_work(struct work_struct *work)
30{
31	struct afs_vnode *vnode = container_of(work, struct afs_vnode, cb_work);
32
33	unmap_mapping_pages(vnode->netfs.inode.i_mapping, 0, 0, false);
34}
35
36static void afs_volume_init_callback(struct afs_volume *volume)
37{
38	struct afs_vnode *vnode;
39
40	down_read(&volume->open_mmaps_lock);
41
42	list_for_each_entry(vnode, &volume->open_mmaps, cb_mmap_link) {
43		if (vnode->cb_v_check != atomic_read(&volume->cb_v_break)) {
44			atomic64_set(&vnode->cb_expires_at, AFS_NO_CB_PROMISE);
45			queue_work(system_unbound_wq, &vnode->cb_work);
46		}
47	}
48
49	up_read(&volume->open_mmaps_lock);
50}
51
52/*
53 * Allow the fileserver to request callback state (re-)initialisation.
54 * Unfortunately, UUIDs are not guaranteed unique.
55 */
56void afs_init_callback_state(struct afs_server *server)
57{
58	struct afs_server_entry *se;
59
60	down_read(&server->cell->vs_lock);
61
62	list_for_each_entry(se, &server->volumes, slink) {
63		se->cb_expires_at = AFS_NO_CB_PROMISE;
64		se->volume->cb_expires_at = AFS_NO_CB_PROMISE;
65		trace_afs_cb_v_break(se->volume->vid, atomic_read(&se->volume->cb_v_break),
66				     afs_cb_break_for_s_reinit);
67		if (!list_empty(&se->volume->open_mmaps))
68			afs_volume_init_callback(se->volume);
69	}
70
71	up_read(&server->cell->vs_lock);
72}
73
74/*
75 * actually break a callback
76 */
77void __afs_break_callback(struct afs_vnode *vnode, enum afs_cb_break_reason reason)
78{
79	_enter("");
80
81	clear_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
82	if (atomic64_xchg(&vnode->cb_expires_at, AFS_NO_CB_PROMISE) != AFS_NO_CB_PROMISE) {
83		vnode->cb_break++;
84		vnode->cb_v_check = atomic_read(&vnode->volume->cb_v_break);
85		afs_clear_permits(vnode);
86
87		if (vnode->lock_state == AFS_VNODE_LOCK_WAITING_FOR_CB)
88			afs_lock_may_be_available(vnode);
89
90		if (reason != afs_cb_break_for_deleted &&
91		    vnode->status.type == AFS_FTYPE_FILE &&
92		    atomic_read(&vnode->cb_nr_mmap))
93			queue_work(system_unbound_wq, &vnode->cb_work);
94
95		trace_afs_cb_break(&vnode->fid, vnode->cb_break, reason, true);
96	} else {
97		trace_afs_cb_break(&vnode->fid, vnode->cb_break, reason, false);
98	}
99}
100
101void afs_break_callback(struct afs_vnode *vnode, enum afs_cb_break_reason reason)
102{
103	write_seqlock(&vnode->cb_lock);
104	__afs_break_callback(vnode, reason);
105	write_sequnlock(&vnode->cb_lock);
106}
107
108/*
109 * Look up a volume by volume ID under RCU conditions.
110 */
111static struct afs_volume *afs_lookup_volume_rcu(struct afs_cell *cell,
112						afs_volid_t vid)
113{
114	struct afs_volume *volume = NULL;
115	struct rb_node *p;
116	int seq = 1;
117
118	for (;;) {
119		/* Unfortunately, rbtree walking doesn't give reliable results
120		 * under just the RCU read lock, so we have to check for
121		 * changes.
122		 */
123		seq++; /* 2 on the 1st/lockless path, otherwise odd */
124		read_seqbegin_or_lock(&cell->volume_lock, &seq);
125
126		p = rcu_dereference_raw(cell->volumes.rb_node);
127		while (p) {
128			volume = rb_entry(p, struct afs_volume, cell_node);
129
130			if (volume->vid < vid)
131				p = rcu_dereference_raw(p->rb_left);
132			else if (volume->vid > vid)
133				p = rcu_dereference_raw(p->rb_right);
134			else
135				break;
136			volume = NULL;
137		}
138
139		if (volume && afs_try_get_volume(volume, afs_volume_trace_get_callback))
140			break;
141		if (!need_seqretry(&cell->volume_lock, seq))
142			break;
143		seq |= 1; /* Want a lock next time */
144	}
145
146	done_seqretry(&cell->volume_lock, seq);
147	return volume;
148}
149
150/*
151 * Allow the fileserver to break callbacks at the volume-level.  This is
152 * typically done when, for example, a R/W volume is snapshotted to a R/O
153 * volume (the only way to change an R/O volume).  It may also, however, happen
154 * when a volserver takes control of a volume (offlining it, moving it, etc.).
155 *
156 * Every file in that volume will need to be reevaluated.
157 */
158static void afs_break_volume_callback(struct afs_server *server,
159				      struct afs_volume *volume)
160	__releases(RCU)
161{
162	struct afs_server_list *slist = rcu_dereference(volume->servers);
163	unsigned int i, cb_v_break;
164
165	write_lock(&volume->cb_v_break_lock);
166
167	for (i = 0; i < slist->nr_servers; i++)
168		if (slist->servers[i].server == server)
169			slist->servers[i].cb_expires_at = AFS_NO_CB_PROMISE;
170	volume->cb_expires_at = AFS_NO_CB_PROMISE;
171
172	cb_v_break = atomic_inc_return_release(&volume->cb_v_break);
173	trace_afs_cb_v_break(volume->vid, cb_v_break, afs_cb_break_for_volume_callback);
174
175	write_unlock(&volume->cb_v_break_lock);
176	rcu_read_unlock();
177
178	if (!list_empty(&volume->open_mmaps))
179		afs_volume_init_callback(volume);
180}
181
182/*
183 * allow the fileserver to explicitly break one callback
184 * - happens when
185 *   - the backing file is changed
186 *   - a lock is released
187 */
188static void afs_break_one_callback(struct afs_server *server,
189				   struct afs_volume *volume,
190				   struct afs_fid *fid)
191{
192	struct super_block *sb;
193	struct afs_vnode *vnode;
194	struct inode *inode;
195
196	/* See if we can find a matching inode - even an I_NEW inode needs to
197	 * be marked as it can have its callback broken before we finish
198	 * setting up the local inode.
199	 */
200	sb = rcu_dereference(volume->sb);
201	if (!sb)
202		return;
203
204	inode = find_inode_rcu(sb, fid->vnode, afs_ilookup5_test_by_fid, fid);
205	if (inode) {
206		vnode = AFS_FS_I(inode);
207		afs_break_callback(vnode, afs_cb_break_for_callback);
208	} else {
209		trace_afs_cb_miss(fid, afs_cb_break_for_callback);
210	}
211}
212
213static void afs_break_some_callbacks(struct afs_server *server,
214				     struct afs_callback_break *cbb,
215				     size_t *_count)
216{
217	struct afs_callback_break *residue = cbb;
218	struct afs_volume *volume;
219	afs_volid_t vid = cbb->fid.vid;
220	size_t i;
221
222	rcu_read_lock();
223	volume = afs_lookup_volume_rcu(server->cell, vid);
224	if (cbb->fid.vnode == 0 && cbb->fid.unique == 0) {
225		afs_break_volume_callback(server, volume);
226		*_count -= 1;
227		if (*_count)
228			memmove(cbb, cbb + 1, sizeof(*cbb) * *_count);
229	} else {
230		/* TODO: Find all matching volumes if we couldn't match the server and
231		 * break them anyway.
232		 */
233
234		for (i = *_count; i > 0; cbb++, i--) {
235			if (cbb->fid.vid == vid) {
236				_debug("- Fid { vl=%08llx n=%llu u=%u }",
237				       cbb->fid.vid,
238				       cbb->fid.vnode,
239				       cbb->fid.unique);
240				--*_count;
241				if (volume)
242					afs_break_one_callback(server, volume, &cbb->fid);
243			} else {
244				*residue++ = *cbb;
245			}
246		}
247		rcu_read_unlock();
248	}
249
250	afs_put_volume(volume, afs_volume_trace_put_callback);
251}
252
253/*
254 * allow the fileserver to break callback promises
255 */
256void afs_break_callbacks(struct afs_server *server, size_t count,
257			 struct afs_callback_break *callbacks)
258{
259	_enter("%p,%zu,", server, count);
260
261	ASSERT(server != NULL);
262
263	while (count > 0)
264		afs_break_some_callbacks(server, callbacks, &count);
265}
266