Lines Matching defs:gl

57 	struct gfs2_glock *gl;		/* current glock struct        */
61 typedef void (*glock_examiner) (struct gfs2_glock * gl);
63 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
65 static void handle_callback(struct gfs2_glock *gl, unsigned int state,
119 * @gl: the glock
121 static void wake_up_glock(struct gfs2_glock *gl)
123 wait_queue_head_t *wq = glock_waitqueue(&gl->gl_name);
126 __wake_up(wq, TASK_NORMAL, 1, &gl->gl_name);
131 struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
133 kfree(gl->gl_lksb.sb_lvbptr);
134 if (gl->gl_ops->go_flags & GLOF_ASPACE) {
136 container_of(gl, struct gfs2_glock_aspace, glock);
139 kmem_cache_free(gfs2_glock_cachep, gl);
144 * @gl: the glock
155 static bool glock_blocked_by_withdraw(struct gfs2_glock *gl)
157 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
161 if (gl->gl_ops->go_flags & GLOF_NONDISK)
164 gl->gl_name.ln_number == sdp->sd_jdesc->jd_no_addr)
169 void gfs2_glock_free(struct gfs2_glock *gl)
171 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
173 gfs2_glock_assert_withdraw(gl, atomic_read(&gl->gl_revokes) == 0);
174 rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms);
176 wake_up_glock(gl);
177 call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
184 * @gl: The glock to hold
188 struct gfs2_glock *gfs2_glock_hold(struct gfs2_glock *gl)
190 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
191 lockref_get(&gl->gl_lockref);
192 return gl;
197 * @gl: the glock
202 static int demote_ok(const struct gfs2_glock *gl)
204 const struct gfs2_glock_operations *glops = gl->gl_ops;
206 if (gl->gl_state == LM_ST_UNLOCKED)
208 if (!list_empty(&gl->gl_holders))
211 return glops->go_demote_ok(gl);
216 void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
218 if (!(gl->gl_ops->go_flags & GLOF_LRU))
223 list_move_tail(&gl->gl_lru, &lru_list);
225 if (!test_bit(GLF_LRU, &gl->gl_flags)) {
226 set_bit(GLF_LRU, &gl->gl_flags);
233 static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
235 if (!(gl->gl_ops->go_flags & GLOF_LRU))
239 if (test_bit(GLF_LRU, &gl->gl_flags)) {
240 list_del_init(&gl->gl_lru);
242 clear_bit(GLF_LRU, &gl->gl_flags);
251 static void __gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
252 if (!queue_delayed_work(glock_workqueue, &gl->gl_work, delay)) {
259 GLOCK_BUG_ON(gl, gl->gl_lockref.count < 2);
260 gl->gl_lockref.count--;
264 static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
265 spin_lock(&gl->gl_lockref.lock);
266 __gfs2_glock_queue_work(gl, delay);
267 spin_unlock(&gl->gl_lockref.lock);
270 static void __gfs2_glock_put(struct gfs2_glock *gl)
272 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
273 struct address_space *mapping = gfs2_glock2aspace(gl);
275 lockref_mark_dead(&gl->gl_lockref);
276 spin_unlock(&gl->gl_lockref.lock);
277 gfs2_glock_remove_from_lru(gl);
278 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
282 GLOCK_BUG_ON(gl, !mapping_empty(mapping));
284 trace_gfs2_glock_put(gl);
285 sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
291 void gfs2_glock_queue_put(struct gfs2_glock *gl)
293 gfs2_glock_queue_work(gl, 0);
298 * @gl: The glock to put
302 void gfs2_glock_put(struct gfs2_glock *gl)
304 if (lockref_put_or_lock(&gl->gl_lockref))
307 __gfs2_glock_put(gl);
312 * @gl: The glock
313 * @current_gh: One of the current holders of @gl
324 static inline bool may_grant(struct gfs2_glock *gl,
329 GLOCK_BUG_ON(gl, !test_bit(HIF_HOLDER, &current_gh->gh_iflags));
353 if (gl->gl_state == gh->gh_state)
357 if (gl->gl_state == LM_ST_EXCLUSIVE) {
362 return gl->gl_state != LM_ST_UNLOCKED;
380 * @gl: The glock
384 static void do_error(struct gfs2_glock *gl, const int ret)
388 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
405 * @gl: the glock
408 static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
412 if (!list_empty(&gl->gl_holders)) {
413 gh = list_first_entry(&gl->gl_holders, struct gfs2_holder,
429 struct gfs2_glock *gl = gh->gh_gl;
430 const struct gfs2_glock_operations *glops = gl->gl_ops;
434 if (!test_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags))
441 if (test_and_set_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags)) {
442 wait_on_bit(&gl->gl_flags, GLF_INSTANTIATE_IN_PROG,
454 ret = glops->go_instantiate(gl);
456 clear_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags);
457 clear_and_wake_up_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags);
469 * @gl: The glock
474 static bool do_promote(struct gfs2_glock *gl)
478 current_gh = find_first_holder(gl);
479 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
482 if (!may_grant(gl, current_gh, gh)) {
489 if (list_is_first(&gh->gh_list, &gl->gl_holders))
491 do_error(gl, 0);
505 * @gl: the glock
508 static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
512 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
521 * @gl: the glock
526 static inline struct gfs2_holder *find_last_waiter(const struct gfs2_glock *gl)
530 if (list_empty(&gl->gl_holders))
532 gh = list_last_entry(&gl->gl_holders, struct gfs2_holder, gh_list);
538 * @gl: the glock
542 static void state_change(struct gfs2_glock *gl, unsigned int new_state)
546 held1 = (gl->gl_state != LM_ST_UNLOCKED);
550 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
552 gl->gl_lockref.count++;
554 gl->gl_lockref.count--;
556 if (new_state != gl->gl_target)
558 gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR,
560 gl->gl_state = new_state;
561 gl->gl_tchange = jiffies;
564 static void gfs2_set_demote(struct gfs2_glock *gl)
566 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
568 set_bit(GLF_DEMOTE, &gl->gl_flags);
573 static void gfs2_demote_wake(struct gfs2_glock *gl)
575 gl->gl_demote_state = LM_ST_EXCLUSIVE;
576 clear_bit(GLF_DEMOTE, &gl->gl_flags);
578 wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
583 * @gl: The glock
588 static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
590 const struct gfs2_glock_operations *glops = gl->gl_ops;
594 spin_lock(&gl->gl_lockref.lock);
595 trace_gfs2_glock_state_change(gl, state);
596 state_change(gl, state);
597 gh = find_first_waiter(gl);
600 if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
601 state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
602 gl->gl_target = LM_ST_UNLOCKED;
605 if (unlikely(state != gl->gl_target)) {
608 if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
611 list_move_tail(&gh->gh_list, &gl->gl_holders);
612 gh = find_first_waiter(gl);
613 gl->gl_target = gh->gh_state;
614 if (do_promote(gl))
621 gl->gl_target = gl->gl_state;
622 do_error(gl, ret);
630 do_xmote(gl, gh, gl->gl_target);
635 do_xmote(gl, gh, LM_ST_UNLOCKED);
638 fs_err(gl->gl_name.ln_sbd, "wanted %u got %u\n",
639 gl->gl_target, state);
640 GLOCK_BUG_ON(gl, 1);
642 spin_unlock(&gl->gl_lockref.lock);
647 if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
648 gfs2_demote_wake(gl);
653 spin_unlock(&gl->gl_lockref.lock);
654 rv = glops->go_xmote_bh(gl);
655 spin_lock(&gl->gl_lockref.lock);
657 do_error(gl, rv);
661 do_promote(gl);
664 clear_bit(GLF_LOCK, &gl->gl_flags);
665 spin_unlock(&gl->gl_lockref.lock);
668 static bool is_system_glock(struct gfs2_glock *gl)
670 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
673 if (gl == m_ip->i_gl)
680 * @gl: The lock state
686 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh,
688 __releases(&gl->gl_lockref.lock)
689 __acquires(&gl->gl_lockref.lock)
691 const struct gfs2_glock_operations *glops = gl->gl_ops;
692 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
696 if (target != LM_ST_UNLOCKED && glock_blocked_by_withdraw(gl) &&
701 GLOCK_BUG_ON(gl, gl->gl_state == target);
702 GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
711 &gl->gl_flags))
713 do_error(gl, 0); /* Fail queued try locks */
715 gl->gl_req = target;
716 set_bit(GLF_BLOCKING, &gl->gl_flags);
717 if ((gl->gl_req == LM_ST_UNLOCKED) ||
718 (gl->gl_state == LM_ST_EXCLUSIVE) ||
720 clear_bit(GLF_BLOCKING, &gl->gl_flags);
721 spin_unlock(&gl->gl_lockref.lock);
723 ret = glops->go_sync(gl);
731 gfs2_dump_glock(NULL, gl, true);
736 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) {
744 if ((atomic_read(&gl->gl_ail_count) != 0) &&
746 gfs2_glock_assert_warn(gl,
747 !atomic_read(&gl->gl_ail_count));
748 gfs2_dump_glock(NULL, gl, true);
750 glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
751 clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
755 gfs2_glock_hold(gl);
779 if (glock_blocked_by_withdraw(gl) &&
782 if (!is_system_glock(gl)) {
783 handle_callback(gl, LM_ST_UNLOCKED, 0, false); /* sets demote */
790 state_change(gl, LM_ST_UNLOCKED);
795 clear_bit(GLF_LOCK, &gl->gl_flags);
796 clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
797 gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD);
800 clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
806 ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
807 if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED &&
810 finish_xmote(gl, target);
811 gfs2_glock_queue_work(gl, 0);
814 GLOCK_BUG_ON(gl, !gfs2_withdrawing_or_withdrawn(sdp));
817 finish_xmote(gl, target);
818 gfs2_glock_queue_work(gl, 0);
821 spin_lock(&gl->gl_lockref.lock);
826 * @gl: The glock in question
831 static void run_queue(struct gfs2_glock *gl, const int nonblock)
832 __releases(&gl->gl_lockref.lock)
833 __acquires(&gl->gl_lockref.lock)
837 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
840 GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
842 if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
843 gl->gl_demote_state != gl->gl_state) {
844 if (find_first_holder(gl))
848 set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
849 GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
850 gl->gl_target = gl->gl_demote_state;
852 if (test_bit(GLF_DEMOTE, &gl->gl_flags))
853 gfs2_demote_wake(gl);
854 if (do_promote(gl))
856 gh = find_first_waiter(gl);
857 gl->gl_target = gh->gh_state;
859 do_error(gl, 0); /* Fail queued try locks */
861 do_xmote(gl, gh, gl->gl_target);
865 clear_bit(GLF_LOCK, &gl->gl_flags);
867 gl->gl_lockref.count++;
868 __gfs2_glock_queue_work(gl, 0);
872 clear_bit(GLF_LOCK, &gl->gl_flags);
879 * @gl: the glock
882 void glock_set_object(struct gfs2_glock *gl, void *object)
886 spin_lock(&gl->gl_lockref.lock);
887 prev_object = gl->gl_object;
888 gl->gl_object = object;
889 spin_unlock(&gl->gl_lockref.lock);
890 if (gfs2_assert_warn(gl->gl_name.ln_sbd, prev_object == NULL)) {
892 gl->gl_name.ln_type,
893 (unsigned long long)gl->gl_name.ln_number);
894 gfs2_dump_glock(NULL, gl, true);
900 * @gl: the glock
903 void glock_clear_object(struct gfs2_glock *gl, void *object)
907 spin_lock(&gl->gl_lockref.lock);
908 prev_object = gl->gl_object;
909 gl->gl_object = NULL;
910 spin_unlock(&gl->gl_lockref.lock);
911 if (gfs2_assert_warn(gl->gl_name.ln_sbd, prev_object == object)) {
913 gl->gl_name.ln_type,
914 (unsigned long long)gl->gl_name.ln_number);
915 gfs2_dump_glock(NULL, gl, true);
919 void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation)
921 struct gfs2_inode_lvb *ri = (void *)gl->gl_lksb.sb_lvbptr;
929 bool gfs2_inode_already_deleted(struct gfs2_glock *gl, u64 generation)
931 struct gfs2_inode_lvb *ri = (void *)gl->gl_lksb.sb_lvbptr;
938 static void gfs2_glock_poke(struct gfs2_glock *gl)
944 __gfs2_holder_init(gl, LM_ST_SHARED, flags, &gh, _RET_IP_);
951 static bool gfs2_try_evict(struct gfs2_glock *gl)
966 spin_lock(&gl->gl_lockref.lock);
967 ip = gl->gl_object;
970 spin_unlock(&gl->gl_lockref.lock);
972 gl->gl_no_formal_ino = ip->i_no_formal_ino;
977 /* If the inode was evicted, gl->gl_object will now be NULL. */
978 spin_lock(&gl->gl_lockref.lock);
979 ip = gl->gl_object;
985 spin_unlock(&gl->gl_lockref.lock);
995 bool gfs2_queue_try_to_evict(struct gfs2_glock *gl)
997 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
999 if (test_and_set_bit(GLF_TRY_TO_EVICT, &gl->gl_flags))
1002 &gl->gl_delete, 0);
1005 static bool gfs2_queue_verify_evict(struct gfs2_glock *gl)
1007 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
1009 if (test_and_set_bit(GLF_VERIFY_EVICT, &gl->gl_flags))
1012 &gl->gl_delete, 5 * HZ);
1018 struct gfs2_glock *gl = container_of(dwork, struct gfs2_glock, gl_delete);
1019 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
1021 u64 no_addr = gl->gl_name.ln_number;
1023 if (test_and_clear_bit(GLF_TRY_TO_EVICT, &gl->gl_flags)) {
1041 if (gfs2_try_evict(gl)) {
1044 if (gfs2_queue_verify_evict(gl))
1050 if (test_and_clear_bit(GLF_VERIFY_EVICT, &gl->gl_flags)) {
1051 inode = gfs2_lookup_by_inum(sdp, no_addr, gl->gl_no_formal_ino,
1056 gfs2_queue_verify_evict(gl))
1065 gfs2_glock_put(gl);
1071 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
1074 if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
1075 finish_xmote(gl, gl->gl_reply);
1078 spin_lock(&gl->gl_lockref.lock);
1079 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1080 gl->gl_state != LM_ST_UNLOCKED &&
1081 gl->gl_demote_state != LM_ST_EXCLUSIVE) {
1084 holdtime = gl->gl_tchange + gl->gl_hold_time;
1089 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
1090 gfs2_set_demote(gl);
1093 run_queue(gl, 0);
1097 if (gl->gl_name.ln_type != LM_TYPE_INODE)
1099 __gfs2_glock_queue_work(gl, delay);
1107 gl->gl_lockref.count -= drop_refs;
1108 if (!gl->gl_lockref.count) {
1109 __gfs2_glock_put(gl);
1112 spin_unlock(&gl->gl_lockref.lock);
1120 struct gfs2_glock *gl;
1130 gl = rhashtable_lookup_get_insert_fast(&gl_hash_table,
1132 if (IS_ERR(gl))
1135 gl = rhashtable_lookup_fast(&gl_hash_table,
1138 if (gl && !lockref_get_not_dead(&gl->gl_lockref)) {
1146 return gl;
1170 struct gfs2_glock *gl, *tmp;
1174 gl = find_insert_glock(&name, NULL);
1175 if (gl) {
1176 *glp = gl;
1187 gl = &gla->glock;
1189 gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_NOFS);
1190 if (!gl)
1193 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
1194 gl->gl_ops = glops;
1197 gl->gl_lksb.sb_lvbptr = kzalloc(GDLM_LVB_SIZE, GFP_NOFS);
1198 if (!gl->gl_lksb.sb_lvbptr) {
1199 gfs2_glock_dealloc(&gl->gl_rcu);
1205 gl->gl_node.next = NULL;
1206 gl->gl_flags = glops->go_instantiate ? BIT(GLF_INSTANTIATE_NEEDED) : 0;
1207 gl->gl_name = name;
1208 lockdep_set_subclass(&gl->gl_lockref.lock, glops->go_subclass);
1209 gl->gl_lockref.count = 1;
1210 gl->gl_state = LM_ST_UNLOCKED;
1211 gl->gl_target = LM_ST_UNLOCKED;
1212 gl->gl_demote_state = LM_ST_EXCLUSIVE;
1213 gl->gl_dstamp = 0;
1216 gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type];
1218 gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0;
1219 gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0;
1220 gl->gl_tchange = jiffies;
1221 gl->gl_object = NULL;
1222 gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
1223 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
1224 if (gl->gl_name.ln_type == LM_TYPE_IOPEN)
1225 INIT_DELAYED_WORK(&gl->gl_delete, delete_work_func);
1227 mapping = gfs2_glock2aspace(gl);
1237 tmp = find_insert_glock(&name, gl);
1239 *glp = gl;
1249 gfs2_glock_dealloc(&gl->gl_rcu);
1259 * @gl: the glock
1266 void __gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, u16 flags,
1270 gh->gh_gl = gfs2_glock_hold(gl);
1312 static void gfs2_glock_update_hold_time(struct gfs2_glock *gl,
1318 gl->gl_hold_time = min(gl->gl_hold_time + GL_GLOCK_HOLD_INCR,
1425 * @gl: the glock
1434 static void handle_callback(struct gfs2_glock *gl, unsigned int state,
1438 set_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
1440 gfs2_set_demote(gl);
1441 if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
1442 gl->gl_demote_state = state;
1443 gl->gl_demote_time = jiffies;
1444 } else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
1445 gl->gl_demote_state != state) {
1446 gl->gl_demote_state = LM_ST_UNLOCKED;
1448 if (gl->gl_ops->go_callback)
1449 gl->gl_ops->go_callback(gl, remote);
1450 trace_gfs2_demote_rq(gl, remote);
1492 __releases(&gl->gl_lockref.lock)
1493 __acquires(&gl->gl_lockref.lock)
1495 struct gfs2_glock *gl = gh->gh_gl;
1496 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
1501 GLOCK_BUG_ON(gl, gh->gh_owner_pid == NULL);
1503 GLOCK_BUG_ON(gl, true);
1506 if (test_bit(GLF_LOCK, &gl->gl_flags)) {
1509 current_gh = find_first_holder(gl);
1510 try_futile = !may_grant(gl, current_gh, gh);
1512 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
1516 list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
1525 list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
1537 gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT);
1538 gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT);
1540 list_add_tail(&gh->gh_list, &gl->gl_holders);
1544 spin_unlock(&gl->gl_lockref.lock);
1546 sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
1547 spin_lock(&gl->gl_lockref.lock);
1559 gfs2_dump_glock(NULL, gl, true);
1574 struct gfs2_glock *gl = gh->gh_gl;
1577 if (glock_blocked_by_withdraw(gl) && !(gh->gh_flags & LM_FLAG_NOEXP))
1584 spin_lock(&gl->gl_lockref.lock);
1585 if (find_last_waiter(gl))
1587 current_gh = find_first_holder(gl);
1588 if (!may_grant(gl, current_gh, gh))
1591 list_add_tail(&gh->gh_list, &gl->gl_holders);
1595 spin_unlock(&gl->gl_lockref.lock);
1599 if (test_bit(GLF_LRU, &gl->gl_flags))
1600 gfs2_glock_remove_from_lru(gl);
1603 spin_lock(&gl->gl_lockref.lock);
1606 test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) {
1607 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1608 gl->gl_lockref.count++;
1609 __gfs2_glock_queue_work(gl, 0);
1611 run_queue(gl, 1);
1612 spin_unlock(&gl->gl_lockref.lock);
1633 static inline bool needs_demote(struct gfs2_glock *gl)
1635 return (test_bit(GLF_DEMOTE, &gl->gl_flags) ||
1636 test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags));
1641 struct gfs2_glock *gl = gh->gh_gl;
1651 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1661 if (!needs_demote(gl)) {
1662 if (list_empty(&gl->gl_holders))
1666 if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl))
1667 gfs2_glock_add_to_lru(gl);
1670 gl->gl_lockref.count++;
1671 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1672 !test_bit(GLF_DEMOTE, &gl->gl_flags) &&
1673 gl->gl_name.ln_type == LM_TYPE_INODE)
1674 delay = gl->gl_hold_time;
1675 __gfs2_glock_queue_work(gl, delay);
1686 struct gfs2_glock *gl = gh->gh_gl;
1687 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
1689 spin_lock(&gl->gl_lockref.lock);
1698 if (list_is_first(&gh->gh_list, &gl->gl_holders) &&
1700 spin_unlock(&gl->gl_lockref.lock);
1701 gl->gl_name.ln_sbd->sd_lockstruct.ls_ops->lm_cancel(gl);
1703 spin_lock(&gl->gl_lockref.lock);
1714 glock_blocked_by_withdraw(gl) &&
1717 spin_unlock(&gl->gl_lockref.lock);
1721 spin_lock(&gl->gl_lockref.lock);
1726 spin_unlock(&gl->gl_lockref.lock);
1731 struct gfs2_glock *gl = gh->gh_gl;
1734 wait_on_bit(&gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE);
1765 struct gfs2_glock *gl;
1768 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1770 error = gfs2_glock_nq_init(gl, state, flags, gh);
1771 gfs2_glock_put(gl);
1882 void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
1888 gfs2_glock_hold(gl);
1889 spin_lock(&gl->gl_lockref.lock);
1890 holdtime = gl->gl_tchange + gl->gl_hold_time;
1891 if (!list_empty(&gl->gl_holders) &&
1892 gl->gl_name.ln_type == LM_TYPE_INODE) {
1895 if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
1896 delay = gl->gl_hold_time;
1898 handle_callback(gl, state, delay, true);
1899 __gfs2_glock_queue_work(gl, delay);
1900 spin_unlock(&gl->gl_lockref.lock);
1905 * @gl: The glock in question
1914 static int gfs2_should_freeze(const struct gfs2_glock *gl)
1918 if (gl->gl_reply & ~LM_OUT_ST_MASK)
1920 if (gl->gl_target == LM_ST_UNLOCKED)
1923 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1935 * @gl: Pointer to the glock
1942 void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
1944 struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
1946 spin_lock(&gl->gl_lockref.lock);
1947 gl->gl_reply = ret;
1950 if (gfs2_should_freeze(gl)) {
1951 set_bit(GLF_FROZEN, &gl->gl_flags);
1952 spin_unlock(&gl->gl_lockref.lock);
1957 gl->gl_lockref.count++;
1958 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1959 __gfs2_glock_queue_work(gl, 0);
1960 spin_unlock(&gl->gl_lockref.lock);
1997 struct gfs2_glock *gl;
2002 gl = list_first_entry(list, struct gfs2_glock, gl_lru);
2003 list_del_init(&gl->gl_lru);
2004 clear_bit(GLF_LRU, &gl->gl_flags);
2005 if (!spin_trylock(&gl->gl_lockref.lock)) {
2007 list_add(&gl->gl_lru, &lru_list);
2008 set_bit(GLF_LRU, &gl->gl_flags);
2012 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
2013 spin_unlock(&gl->gl_lockref.lock);
2016 gl->gl_lockref.count++;
2017 if (demote_ok(gl))
2018 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
2019 WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags));
2020 __gfs2_glock_queue_work(gl, 0);
2021 spin_unlock(&gl->gl_lockref.lock);
2037 struct gfs2_glock *gl, *next;
2042 list_for_each_entry_safe(gl, next, &lru_list, gl_lru) {
2046 if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
2047 if (!spin_trylock(&gl->gl_lockref.lock))
2049 if (gl->gl_lockref.count <= 1 &&
2050 (gl->gl_state == LM_ST_UNLOCKED ||
2051 demote_ok(gl))) {
2052 list_move(&gl->gl_lru, &dispose);
2056 spin_unlock(&gl->gl_lockref.lock);
2094 struct gfs2_glock *gl;
2102 while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl)) {
2103 if (gl->gl_name.ln_sbd == sdp)
2104 examiner(gl);
2108 } while (cond_resched(), gl == ERR_PTR(-EAGAIN));
2113 void gfs2_cancel_delete_work(struct gfs2_glock *gl)
2115 clear_bit(GLF_TRY_TO_EVICT, &gl->gl_flags);
2116 clear_bit(GLF_VERIFY_EVICT, &gl->gl_flags);
2117 if (cancel_delayed_work(&gl->gl_delete))
2118 gfs2_glock_put(gl);
2121 static void flush_delete_work(struct gfs2_glock *gl)
2123 if (gl->gl_name.ln_type == LM_TYPE_IOPEN) {
2124 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
2126 if (cancel_delayed_work(&gl->gl_delete)) {
2128 &gl->gl_delete, 0);
2141 * @gl: The glock to thaw
2145 static void thaw_glock(struct gfs2_glock *gl)
2147 if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
2149 if (!lockref_get_not_dead(&gl->gl_lockref))
2151 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
2152 gfs2_glock_queue_work(gl, 0);
2157 * @gl: the glock to look at
2161 static void clear_glock(struct gfs2_glock *gl)
2163 gfs2_glock_remove_from_lru(gl);
2165 spin_lock(&gl->gl_lockref.lock);
2166 if (!__lockref_is_dead(&gl->gl_lockref)) {
2167 gl->gl_lockref.count++;
2168 if (gl->gl_state != LM_ST_UNLOCKED)
2169 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
2170 __gfs2_glock_queue_work(gl, 0);
2172 spin_unlock(&gl->gl_lockref.lock);
2186 static void dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid)
2188 spin_lock(&gl->gl_lockref.lock);
2189 gfs2_dump_glock(seq, gl, fsid);
2190 spin_unlock(&gl->gl_lockref.lock);
2193 static void dump_glock_func(struct gfs2_glock *gl)
2195 dump_glock(NULL, gl, true);
2198 static void withdraw_dq(struct gfs2_glock *gl)
2200 spin_lock(&gl->gl_lockref.lock);
2201 if (!__lockref_is_dead(&gl->gl_lockref) &&
2202 glock_blocked_by_withdraw(gl))
2203 do_error(gl, LM_OUT_ERROR); /* remove pending waiters */
2204 spin_unlock(&gl->gl_lockref.lock);
2307 static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
2309 const unsigned long *gflags = &gl->gl_flags;
2332 if (!list_empty(&gl->gl_holders))
2336 if (gl->gl_object)
2357 * @gl: the glock
2372 void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid)
2374 const struct gfs2_glock_operations *glops = gl->gl_ops;
2378 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
2382 if (gl->gl_ops->go_flags & GLOF_ASPACE) {
2383 struct address_space *mapping = gfs2_glock2aspace(gl);
2390 dtime = jiffies - gl->gl_demote_time;
2392 if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
2396 fs_id_buf, state2str(gl->gl_state),
2397 gl->gl_name.ln_type,
2398 (unsigned long long)gl->gl_name.ln_number,
2399 gflags2str(gflags_buf, gl),
2400 state2str(gl->gl_target),
2401 state2str(gl->gl_demote_state), dtime,
2402 atomic_read(&gl->gl_ail_count),
2403 atomic_read(&gl->gl_revokes),
2404 (int)gl->gl_lockref.count, gl->gl_hold_time, nrpages);
2406 list_for_each_entry(gh, &gl->gl_holders, gh_list)
2409 if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
2410 glops->go_dump(seq, gl, fs_id_buf);
2415 struct gfs2_glock *gl = iter_ptr;
2418 gl->gl_name.ln_type,
2419 (unsigned long long)gl->gl_name.ln_number,
2420 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTT],
2421 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR],
2422 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTB],
2423 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB],
2424 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRT],
2425 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR],
2426 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT],
2427 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]);
2527 struct gfs2_glock *gl = gi->gl;
2529 if (gl) {
2532 if (!lockref_put_not_zero(&gl->gl_lockref))
2533 gfs2_glock_queue_put(gl);
2536 gl = rhashtable_walk_next(&gi->hti);
2537 if (IS_ERR_OR_NULL(gl)) {
2538 if (gl == ERR_PTR(-EAGAIN)) {
2542 gl = NULL;
2545 if (gl->gl_name.ln_sbd != gi->sdp)
2548 if (!lockref_get_not_dead(&gl->gl_lockref))
2552 if (__lockref_is_dead(&gl->gl_lockref))
2557 gi->gl = gl;
2582 return gi->gl;
2593 return gi->gl;
2672 gi->gl = NULL;
2688 if (gi->gl)
2689 gfs2_glock_put(gi->gl);
2842 struct gfs2_glock *gl;
2845 gl = GFS2_I(inode)->i_iopen_gh.gh_gl;
2846 if (gl) {
2848 i->tgid, i->fd, gl->gl_name.ln_type,
2849 (unsigned long long)gl->gl_name.ln_number);