Lines Matching defs:vnode

16 static void afs_next_locker(struct afs_vnode *vnode, int error);
25 static inline void afs_set_lock_state(struct afs_vnode *vnode, enum afs_lock_state state)
27 _debug("STATE %u -> %u", vnode->lock_state, state);
28 vnode->lock_state = state;
34 * if the callback is broken on this vnode, then the lock may now be available
36 void afs_lock_may_be_available(struct afs_vnode *vnode)
38 _enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode);
40 spin_lock(&vnode->lock);
41 if (vnode->lock_state == AFS_VNODE_LOCK_WAITING_FOR_CB)
42 afs_next_locker(vnode, 0);
43 trace_afs_flock_ev(vnode, NULL, afs_flock_callback_break, 0);
44 spin_unlock(&vnode->lock);
51 static void afs_schedule_lock_extension(struct afs_vnode *vnode)
56 expires_at = ktime_add_ms(vnode->locked_at, AFS_LOCKWAIT * 1000 / 2);
64 queue_delayed_work(afs_lock_manager, &vnode->lock_work, duration_j);
74 struct afs_vnode *vnode = op->file[0].vnode;
77 spin_lock(&vnode->lock);
78 trace_afs_flock_ev(vnode, NULL, afs_flock_timestamp, 0);
79 vnode->locked_at = call->issue_time;
80 afs_schedule_lock_extension(vnode);
81 spin_unlock(&vnode->lock);
88 * - the caller must hold the vnode lock
90 static void afs_grant_locks(struct afs_vnode *vnode)
93 bool exclusive = (vnode->lock_type == AFS_LOCK_WRITE);
95 list_for_each_entry_safe(p, _p, &vnode->pending_locks, fl_u.afs.link) {
99 list_move_tail(&p->fl_u.afs.link, &vnode->granted_locks);
101 trace_afs_flock_op(vnode, p, afs_flock_op_grant);
111 static void afs_next_locker(struct afs_vnode *vnode, int error)
114 struct key *key = vnode->lock_key;
119 if (vnode->lock_type == AFS_LOCK_WRITE)
122 list_for_each_entry_safe(p, _p, &vnode->pending_locks, fl_u.afs.link) {
137 vnode->lock_key = NULL;
141 afs_set_lock_state(vnode, AFS_VNODE_LOCK_SETTING);
143 trace_afs_flock_op(vnode, next, afs_flock_op_wake);
146 afs_set_lock_state(vnode, AFS_VNODE_LOCK_NONE);
147 trace_afs_flock_ev(vnode, NULL, afs_flock_no_lockers, 0);
154 * Kill off all waiters in the the pending lock queue due to the vnode being
157 static void afs_kill_lockers_enoent(struct afs_vnode *vnode)
161 afs_set_lock_state(vnode, AFS_VNODE_LOCK_DELETED);
163 while (!list_empty(&vnode->pending_locks)) {
164 p = list_entry(vnode->pending_locks.next,
171 key_put(vnode->lock_key);
172 vnode->lock_key = NULL;
191 static int afs_set_lock(struct afs_vnode *vnode, struct key *key,
197 vnode->volume->name,
198 vnode->fid.vid,
199 vnode->fid.vnode,
200 vnode->fid.unique,
203 op = afs_alloc_operation(key, vnode->volume);
207 afs_op_set_vnode(op, 0, vnode);
223 static int afs_extend_lock(struct afs_vnode *vnode, struct key *key)
228 vnode->volume->name,
229 vnode->fid.vid,
230 vnode->fid.vnode,
231 vnode->fid.unique,
234 op = afs_alloc_operation(key, vnode->volume);
238 afs_op_set_vnode(op, 0, vnode);
254 static int afs_release_lock(struct afs_vnode *vnode, struct key *key)
259 vnode->volume->name,
260 vnode->fid.vid,
261 vnode->fid.vnode,
262 vnode->fid.unique,
265 op = afs_alloc_operation(key, vnode->volume);
269 afs_op_set_vnode(op, 0, vnode);
283 struct afs_vnode *vnode =
288 _enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode);
290 spin_lock(&vnode->lock);
293 _debug("wstate %u for %p", vnode->lock_state, vnode);
294 switch (vnode->lock_state) {
296 afs_set_lock_state(vnode, AFS_VNODE_LOCK_UNLOCKING);
297 trace_afs_flock_ev(vnode, NULL, afs_flock_work_unlocking, 0);
298 spin_unlock(&vnode->lock);
302 ret = afs_release_lock(vnode, vnode->lock_key);
303 if (ret < 0 && vnode->lock_state != AFS_VNODE_LOCK_DELETED) {
304 trace_afs_flock_ev(vnode, NULL, afs_flock_release_fail,
308 vnode->fid.vid, vnode->fid.vnode, ret);
311 spin_lock(&vnode->lock);
313 afs_kill_lockers_enoent(vnode);
315 afs_next_locker(vnode, 0);
316 spin_unlock(&vnode->lock);
325 ASSERT(!list_empty(&vnode->granted_locks));
327 key = key_get(vnode->lock_key);
328 afs_set_lock_state(vnode, AFS_VNODE_LOCK_EXTENDING);
329 trace_afs_flock_ev(vnode, NULL, afs_flock_work_extending, 0);
330 spin_unlock(&vnode->lock);
332 ret = afs_extend_lock(vnode, key); /* RPC */
336 trace_afs_flock_ev(vnode, NULL, afs_flock_extend_fail,
339 vnode->fid.vid, vnode->fid.vnode, ret);
342 spin_lock(&vnode->lock);
345 afs_kill_lockers_enoent(vnode);
346 spin_unlock(&vnode->lock);
350 if (vnode->lock_state != AFS_VNODE_LOCK_EXTENDING)
352 afs_set_lock_state(vnode, AFS_VNODE_LOCK_GRANTED);
355 queue_delayed_work(afs_lock_manager, &vnode->lock_work,
357 spin_unlock(&vnode->lock);
369 afs_next_locker(vnode, 0);
370 spin_unlock(&vnode->lock);
374 afs_kill_lockers_enoent(vnode);
375 spin_unlock(&vnode->lock);
380 spin_unlock(&vnode->lock);
387 * pass responsibility for the unlocking of a vnode on the server to the
390 * - the caller must hold the vnode lock
392 static void afs_defer_unlock(struct afs_vnode *vnode)
394 _enter("%u", vnode->lock_state);
396 if (list_empty(&vnode->granted_locks) &&
397 (vnode->lock_state == AFS_VNODE_LOCK_GRANTED ||
398 vnode->lock_state == AFS_VNODE_LOCK_EXTENDING)) {
399 cancel_delayed_work(&vnode->lock_work);
401 afs_set_lock_state(vnode, AFS_VNODE_LOCK_NEED_UNLOCK);
402 trace_afs_flock_ev(vnode, NULL, afs_flock_defer_unlock, 0);
403 queue_delayed_work(afs_lock_manager, &vnode->lock_work, 0);
411 static int afs_do_setlk_check(struct afs_vnode *vnode, struct key *key,
420 ret = afs_validate(vnode, key);
427 ret = afs_check_permit(vnode, key, &access);
454 struct afs_vnode *vnode = AFS_FS_I(inode);
465 vnode->fid.vid, vnode->fid.vnode,
477 ret = afs_do_setlk_check(vnode, key, mode, type);
481 trace_afs_flock_op(vnode, fl, afs_flock_op_set_lock);
497 spin_lock(&vnode->lock);
498 list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks);
501 if (vnode->lock_state == AFS_VNODE_LOCK_DELETED)
508 _debug("try %u", vnode->lock_state);
509 if (vnode->lock_state == AFS_VNODE_LOCK_GRANTED) {
512 list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks);
517 if (vnode->lock_type == AFS_LOCK_WRITE) {
519 list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks);
525 if (vnode->lock_state == AFS_VNODE_LOCK_NONE &&
529 if (vnode->status.lock_count == -1)
532 if (vnode->status.lock_count != 0)
537 if (vnode->lock_state != AFS_VNODE_LOCK_NONE)
541 /* We don't have a lock on this vnode and we aren't currently waiting
549 trace_afs_flock_ev(vnode, fl, afs_flock_try_to_lock, 0);
550 vnode->lock_key = key_get(key);
551 vnode->lock_type = type;
552 afs_set_lock_state(vnode, AFS_VNODE_LOCK_SETTING);
553 spin_unlock(&vnode->lock);
555 ret = afs_set_lock(vnode, key, type); /* RPC */
557 spin_lock(&vnode->lock);
565 trace_afs_flock_ev(vnode, fl, afs_flock_fail_perm, ret);
567 afs_next_locker(vnode, ret);
572 trace_afs_flock_ev(vnode, fl, afs_flock_fail_other, ret);
574 afs_kill_lockers_enoent(vnode);
579 trace_afs_flock_ev(vnode, fl, afs_flock_fail_other, ret);
581 afs_next_locker(vnode, 0);
589 ASSERT(list_empty(&vnode->granted_locks));
590 ASSERTCMP(vnode->pending_locks.next, ==, &fl->fl_u.afs.link);
594 afs_set_lock_state(vnode, AFS_VNODE_LOCK_GRANTED);
595 trace_afs_flock_ev(vnode, fl, afs_flock_acquired, type);
596 afs_grant_locks(vnode);
601 spin_unlock(&vnode->lock);
608 trace_afs_flock_ev(vnode, fl, afs_flock_vfs_locking, 0);
610 trace_afs_flock_ev(vnode, fl, afs_flock_vfs_lock, ret);
618 afs_validate(vnode, key);
625 afs_next_locker(vnode, 0);
630 afs_set_lock_state(vnode, AFS_VNODE_LOCK_WAITING_FOR_CB);
631 trace_afs_flock_ev(vnode, fl, afs_flock_would_block, ret);
632 queue_delayed_work(afs_lock_manager, &vnode->lock_work, HZ * 5);
640 spin_unlock(&vnode->lock);
642 trace_afs_flock_ev(vnode, fl, afs_flock_waiting, 0);
645 trace_afs_flock_ev(vnode, fl, afs_flock_waited, ret);
648 spin_lock(&vnode->lock);
660 ASSERTCMP(vnode->lock_state, ==, AFS_VNODE_LOCK_WAITING_FOR_CB);
661 afs_set_lock_state(vnode, AFS_VNODE_LOCK_SETTING);
671 spin_unlock(&vnode->lock);
687 spin_lock(&vnode->lock);
689 afs_defer_unlock(vnode);
692 spin_unlock(&vnode->lock);
703 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
706 _enter("{%llx:%llu},%u", vnode->fid.vid, vnode->fid.vnode,
709 trace_afs_flock_op(vnode, fl, afs_flock_op_unlock);
715 _leave(" = %d [%u]", ret, vnode->lock_state);
724 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
730 if (vnode->lock_state == AFS_VNODE_LOCK_DELETED)
739 ret = afs_fetch_status(vnode, key, false, NULL);
743 lock_count = READ_ONCE(vnode->status.lock_count);
766 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
771 vnode->fid.vid, vnode->fid.vnode, cmd,
779 trace_afs_flock_op(vnode, fl, afs_flock_op_lock);
792 trace_afs_flock_op(vnode, fl, op);
801 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
806 vnode->fid.vid, vnode->fid.vnode, cmd,
820 trace_afs_flock_op(vnode, fl, afs_flock_op_flock);
834 trace_afs_flock_op(vnode, fl, op);
840 * copy into its own list, so we need to add that copy to the vnode's lock
846 struct afs_vnode *vnode = AFS_FS_I(file_inode(fl->c.flc_file));
852 spin_lock(&vnode->lock);
853 trace_afs_flock_op(vnode, new, afs_flock_op_copy_lock);
855 spin_unlock(&vnode->lock);
859 * need to remove this lock from the vnode queue when it's removed from the
864 struct afs_vnode *vnode = AFS_FS_I(file_inode(fl->c.flc_file));
868 spin_lock(&vnode->lock);
870 trace_afs_flock_op(vnode, fl, afs_flock_op_release_lock);
872 if (list_empty(&vnode->granted_locks))
873 afs_defer_unlock(vnode);
875 _debug("state %u for %p", vnode->lock_state, vnode);
876 spin_unlock(&vnode->lock);