1/* 2 * Copyright (c) 2002, 2007 Red Hat, Inc. All rights reserved. 3 * 4 * This software may be freely redistributed under the terms of the 5 * GNU General Public License. 6 * 7 * You should have received a copy of the GNU General Public License 8 * along with this program; if not, write to the Free Software 9 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 10 * 11 * Authors: David Woodhouse <dwmw2@infradead.org> 12 * David Howells <dhowells@redhat.com> 13 * 14 */ 15 16#include <linux/kernel.h> 17#include <linux/module.h> 18#include <linux/init.h> 19#include <linux/circ_buf.h> 20#include <linux/sched.h> 21#include "internal.h" 22 23 24#define afs_breakring_space(server) \ 25 CIRC_SPACE((server)->cb_break_head, (server)->cb_break_tail, \ 26 ARRAY_SIZE((server)->cb_break)) 27 28//static void afs_callback_updater(struct work_struct *); 29 30static struct workqueue_struct *afs_callback_update_worker; 31 32/* 33 * allow the fileserver to request callback state (re-)initialisation 34 */ 35void afs_init_callback_state(struct afs_server *server) 36{ 37 struct afs_vnode *vnode; 38 39 _enter("{%p}", server); 40 41 spin_lock(&server->cb_lock); 42 43 /* kill all the promises on record from this server */ 44 while (!RB_EMPTY_ROOT(&server->cb_promises)) { 45 vnode = rb_entry(server->cb_promises.rb_node, 46 struct afs_vnode, cb_promise); 47 _debug("UNPROMISE { vid=%x:%u uq=%u}", 48 vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique); 49 rb_erase(&vnode->cb_promise, &server->cb_promises); 50 vnode->cb_promised = false; 51 } 52 53 spin_unlock(&server->cb_lock); 54 _leave(""); 55} 56 57/* 58 * handle the data invalidation side of a callback being broken 59 */ 60void afs_broken_callback_work(struct work_struct *work) 61{ 62 struct afs_vnode *vnode = 63 container_of(work, struct afs_vnode, cb_broken_work); 64 65 _enter(""); 66 67 if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) 68 return; 69 70 /* we're only interested in dealing with a broken callback on *this* 71 * vnode and only if no-one else has dealt with it yet */ 72 if (!mutex_trylock(&vnode->validate_lock)) 73 return; /* someone else is dealing with it */ 74 75 if (test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags)) { 76 if (S_ISDIR(vnode->vfs_inode.i_mode)) 77 afs_clear_permits(vnode); 78 79 if (afs_vnode_fetch_status(vnode, NULL, NULL) < 0) 80 goto out; 81 82 if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) 83 goto out; 84 85 /* if the vnode's data version number changed then its contents 86 * are different */ 87 if (test_and_clear_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) 88 afs_zap_data(vnode); 89 } 90 91out: 92 mutex_unlock(&vnode->validate_lock); 93 94 /* avoid the potential race whereby the mutex_trylock() in this 95 * function happens again between the clear_bit() and the 96 * mutex_unlock() */ 97 if (test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags)) { 98 _debug("requeue"); 99 queue_work(afs_callback_update_worker, &vnode->cb_broken_work); 100 } 101 _leave(""); 102} 103 104/* 105 * actually break a callback 106 */ 107static void afs_break_callback(struct afs_server *server, 108 struct afs_vnode *vnode) 109{ 110 _enter(""); 111 112 set_bit(AFS_VNODE_CB_BROKEN, &vnode->flags); 113 114 if (vnode->cb_promised) { 115 spin_lock(&vnode->lock); 116 117 _debug("break callback"); 118 119 spin_lock(&server->cb_lock); 120 if (vnode->cb_promised) { 121 rb_erase(&vnode->cb_promise, &server->cb_promises); 122 vnode->cb_promised = false; 123 } 124 spin_unlock(&server->cb_lock); 125 126 queue_work(afs_callback_update_worker, &vnode->cb_broken_work); 127 if (list_empty(&vnode->granted_locks) && 128 !list_empty(&vnode->pending_locks)) 129 afs_lock_may_be_available(vnode); 130 spin_unlock(&vnode->lock); 131 } 132} 133 134/* 135 * allow the fileserver to explicitly break one callback 136 * - happens when 137 * - the backing file is changed 138 * - a lock is released 139 */ 140static void afs_break_one_callback(struct afs_server *server, 141 struct afs_fid *fid) 142{ 143 struct afs_vnode *vnode; 144 struct rb_node *p; 145 146 _debug("find"); 147 spin_lock(&server->fs_lock); 148 p = server->fs_vnodes.rb_node; 149 while (p) { 150 vnode = rb_entry(p, struct afs_vnode, server_rb); 151 if (fid->vid < vnode->fid.vid) 152 p = p->rb_left; 153 else if (fid->vid > vnode->fid.vid) 154 p = p->rb_right; 155 else if (fid->vnode < vnode->fid.vnode) 156 p = p->rb_left; 157 else if (fid->vnode > vnode->fid.vnode) 158 p = p->rb_right; 159 else if (fid->unique < vnode->fid.unique) 160 p = p->rb_left; 161 else if (fid->unique > vnode->fid.unique) 162 p = p->rb_right; 163 else 164 goto found; 165 } 166 167 /* not found so we just ignore it (it may have moved to another 168 * server) */ 169not_available: 170 _debug("not avail"); 171 spin_unlock(&server->fs_lock); 172 _leave(""); 173 return; 174 175found: 176 _debug("found"); 177 ASSERTCMP(server, ==, vnode->server); 178 179 if (!igrab(AFS_VNODE_TO_I(vnode))) 180 goto not_available; 181 spin_unlock(&server->fs_lock); 182 183 afs_break_callback(server, vnode); 184 iput(&vnode->vfs_inode); 185 _leave(""); 186} 187 188/* 189 * allow the fileserver to break callback promises 190 */ 191void afs_break_callbacks(struct afs_server *server, size_t count, 192 struct afs_callback callbacks[]) 193{ 194 _enter("%p,%zu,", server, count); 195 196 ASSERT(server != NULL); 197 ASSERTCMP(count, <=, AFSCBMAX); 198 199 for (; count > 0; callbacks++, count--) { 200 _debug("- Fid { vl=%08x n=%u u=%u } CB { v=%u x=%u t=%u }", 201 callbacks->fid.vid, 202 callbacks->fid.vnode, 203 callbacks->fid.unique, 204 callbacks->version, 205 callbacks->expiry, 206 callbacks->type 207 ); 208 afs_break_one_callback(server, &callbacks->fid); 209 } 210 211 _leave(""); 212 return; 213} 214 215/* 216 * record the callback for breaking 217 * - the caller must hold server->cb_lock 218 */ 219static void afs_do_give_up_callback(struct afs_server *server, 220 struct afs_vnode *vnode) 221{ 222 struct afs_callback *cb; 223 224 _enter("%p,%p", server, vnode); 225 226 cb = &server->cb_break[server->cb_break_head]; 227 cb->fid = vnode->fid; 228 cb->version = vnode->cb_version; 229 cb->expiry = vnode->cb_expiry; 230 cb->type = vnode->cb_type; 231 smp_wmb(); 232 server->cb_break_head = 233 (server->cb_break_head + 1) & 234 (ARRAY_SIZE(server->cb_break) - 1); 235 236 /* defer the breaking of callbacks to try and collect as many as 237 * possible to ship in one operation */ 238 switch (atomic_inc_return(&server->cb_break_n)) { 239 case 1 ... AFSCBMAX - 1: 240 queue_delayed_work(afs_callback_update_worker, 241 &server->cb_break_work, HZ * 2); 242 break; 243 case AFSCBMAX: 244 afs_flush_callback_breaks(server); 245 break; 246 default: 247 break; 248 } 249 250 ASSERT(server->cb_promises.rb_node != NULL); 251 rb_erase(&vnode->cb_promise, &server->cb_promises); 252 vnode->cb_promised = false; 253 _leave(""); 254} 255 256/* 257 * discard the callback on a deleted item 258 */ 259void afs_discard_callback_on_delete(struct afs_vnode *vnode) 260{ 261 struct afs_server *server = vnode->server; 262 263 _enter("%d", vnode->cb_promised); 264 265 if (!vnode->cb_promised) { 266 _leave(" [not promised]"); 267 return; 268 } 269 270 ASSERT(server != NULL); 271 272 spin_lock(&server->cb_lock); 273 if (vnode->cb_promised) { 274 ASSERT(server->cb_promises.rb_node != NULL); 275 rb_erase(&vnode->cb_promise, &server->cb_promises); 276 vnode->cb_promised = false; 277 } 278 spin_unlock(&server->cb_lock); 279 _leave(""); 280} 281 282/* 283 * give up the callback registered for a vnode on the file server when the 284 * inode is being cleared 285 */ 286void afs_give_up_callback(struct afs_vnode *vnode) 287{ 288 struct afs_server *server = vnode->server; 289 290 DECLARE_WAITQUEUE(myself, current); 291 292 _enter("%d", vnode->cb_promised); 293 294 _debug("GIVE UP INODE %p", &vnode->vfs_inode); 295 296 if (!vnode->cb_promised) { 297 _leave(" [not promised]"); 298 return; 299 } 300 301 ASSERT(server != NULL); 302 303 spin_lock(&server->cb_lock); 304 if (vnode->cb_promised && afs_breakring_space(server) == 0) { 305 add_wait_queue(&server->cb_break_waitq, &myself); 306 for (;;) { 307 set_current_state(TASK_UNINTERRUPTIBLE); 308 if (!vnode->cb_promised || 309 afs_breakring_space(server) != 0) 310 break; 311 spin_unlock(&server->cb_lock); 312 schedule(); 313 spin_lock(&server->cb_lock); 314 } 315 remove_wait_queue(&server->cb_break_waitq, &myself); 316 __set_current_state(TASK_RUNNING); 317 } 318 319 /* of course, it's always possible for the server to break this vnode's 320 * callback first... */ 321 if (vnode->cb_promised) 322 afs_do_give_up_callback(server, vnode); 323 324 spin_unlock(&server->cb_lock); 325 _leave(""); 326} 327 328/* 329 * dispatch a deferred give up callbacks operation 330 */ 331void afs_dispatch_give_up_callbacks(struct work_struct *work) 332{ 333 struct afs_server *server = 334 container_of(work, struct afs_server, cb_break_work.work); 335 336 _enter(""); 337 338 /* tell the fileserver to discard the callback promises it has 339 * - in the event of ENOMEM or some other error, we just forget that we 340 * had callbacks entirely, and the server will call us later to break 341 * them 342 */ 343 afs_fs_give_up_callbacks(server, &afs_async_call); 344} 345 346/* 347 * flush the outstanding callback breaks on a server 348 */ 349void afs_flush_callback_breaks(struct afs_server *server) 350{ 351 cancel_delayed_work(&server->cb_break_work); 352 queue_delayed_work(afs_callback_update_worker, 353 &server->cb_break_work, 0); 354} 355 356 357/* 358 * initialise the callback update process 359 */ 360int __init afs_callback_update_init(void) 361{ 362 afs_callback_update_worker = 363 create_singlethread_workqueue("kafs_callbackd"); 364 return afs_callback_update_worker ? 0 : -ENOMEM; 365} 366 367/* 368 * shut down the callback update process 369 */ 370void afs_callback_update_kill(void) 371{ 372 destroy_workqueue(afs_callback_update_worker); 373} 374