• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-WNDR4500-V1.0.1.40_1.0.68/src/linux/linux-2.6/fs/gfs2/

Lines Matching defs:gl

50 	struct gfs2_glock *gl;        /* current glock struct      */
56 typedef void (*glock_examiner) (struct gfs2_glock * gl);
59 static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl);
60 static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh);
61 static void gfs2_glock_drop_th(struct gfs2_glock *gl);
169 * @gl: The glock to release
175 static void glock_free(struct gfs2_glock *gl)
177 struct gfs2_sbd *sdp = gl->gl_sbd;
178 struct inode *aspace = gl->gl_aspace;
180 gfs2_lm_put_lock(sdp, gl->gl_lock);
185 kmem_cache_free(gfs2_glock_cachep, gl);
190 * @gl: The glock to hold
194 void gfs2_glock_hold(struct gfs2_glock *gl)
196 atomic_inc(&gl->gl_ref);
201 * @gl: The glock to put
205 int gfs2_glock_put(struct gfs2_glock *gl)
208 struct gfs2_sbd *sdp = gl->gl_sbd;
210 write_lock(gl_lock_addr(gl->gl_hash));
211 if (atomic_dec_and_test(&gl->gl_ref)) {
212 hlist_del(&gl->gl_list);
213 write_unlock(gl_lock_addr(gl->gl_hash));
214 BUG_ON(spin_is_locked(&gl->gl_spin));
215 gfs2_assert(sdp, gl->gl_state == LM_ST_UNLOCKED);
216 gfs2_assert(sdp, list_empty(&gl->gl_reclaim));
217 gfs2_assert(sdp, list_empty(&gl->gl_holders));
218 gfs2_assert(sdp, list_empty(&gl->gl_waiters1));
219 gfs2_assert(sdp, list_empty(&gl->gl_waiters3));
220 glock_free(gl);
224 write_unlock(gl_lock_addr(gl->gl_hash));
241 struct gfs2_glock *gl;
244 hlist_for_each_entry(gl, h, &gl_hash_table[hash].hb_list, gl_list) {
245 if (!lm_name_equal(&gl->gl_name, name))
247 if (gl->gl_sbd != sdp)
250 atomic_inc(&gl->gl_ref);
252 return gl;
270 struct gfs2_glock *gl;
273 gl = search_bucket(hash, sdp, name);
276 return gl;
297 struct gfs2_glock *gl, *tmp;
302 gl = search_bucket(hash, sdp, &name);
305 if (gl || !create) {
306 *glp = gl;
310 gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
311 if (!gl)
314 gl->gl_flags = 0;
315 gl->gl_name = name;
316 atomic_set(&gl->gl_ref, 1);
317 gl->gl_state = LM_ST_UNLOCKED;
318 gl->gl_hash = hash;
319 gl->gl_owner_pid = 0;
320 gl->gl_ip = 0;
321 gl->gl_ops = glops;
322 gl->gl_req_gh = NULL;
323 gl->gl_req_bh = NULL;
324 gl->gl_vn = 0;
325 gl->gl_stamp = jiffies;
326 gl->gl_object = NULL;
327 gl->gl_sbd = sdp;
328 gl->gl_aspace = NULL;
329 lops_init_le(&gl->gl_le, &gfs2_glock_lops);
334 gl->gl_aspace = gfs2_aspace_get(sdp);
335 if (!gl->gl_aspace) {
341 error = gfs2_lm_get_lock(sdp, &name, &gl->gl_lock);
349 glock_free(gl);
350 gl = tmp;
352 hlist_add_head(&gl->gl_list, &gl_hash_table[hash].hb_list);
356 *glp = gl;
361 if (gl->gl_aspace)
362 gfs2_aspace_put(gl->gl_aspace);
364 kmem_cache_free(gfs2_glock_cachep, gl);
370 * @gl: the glock
377 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
381 gh->gh_gl = gl;
388 gfs2_glock_hold(gl);
450 struct gfs2_glock *gl = gh->gh_gl;
454 set_bit(GLF_LOCK, &gl->gl_flags);
473 struct gfs2_glock *gl = gh->gh_gl;
474 struct gfs2_sbd *sdp = gl->gl_sbd;
476 if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
477 if (list_empty(&gl->gl_holders)) {
478 gl->gl_req_gh = gh;
479 set_bit(GLF_LOCK, &gl->gl_flags);
480 spin_unlock(&gl->gl_spin);
490 spin_lock(&gl->gl_spin);
495 if (list_empty(&gl->gl_holders)) {
497 set_bit(GLF_LOCK, &gl->gl_flags);
502 next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder,
508 list_move_tail(&gh->gh_list, &gl->gl_holders);
524 static int rq_demote(struct gfs2_glock *gl)
526 if (!list_empty(&gl->gl_holders))
529 if (gl->gl_state == gl->gl_demote_state ||
530 gl->gl_state == LM_ST_UNLOCKED) {
531 clear_bit(GLF_DEMOTE, &gl->gl_flags);
534 set_bit(GLF_LOCK, &gl->gl_flags);
535 spin_unlock(&gl->gl_spin);
536 if (gl->gl_demote_state == LM_ST_UNLOCKED ||
537 gl->gl_state != LM_ST_EXCLUSIVE)
538 gfs2_glock_drop_th(gl);
540 gfs2_glock_xmote_th(gl, NULL);
541 spin_lock(&gl->gl_spin);
548 * @gl: the glock
551 static void run_queue(struct gfs2_glock *gl)
557 if (test_bit(GLF_LOCK, &gl->gl_flags))
560 if (!list_empty(&gl->gl_waiters1)) {
561 gh = list_entry(gl->gl_waiters1.next,
567 gfs2_assert_warn(gl->gl_sbd, 0);
569 } else if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
570 blocked = rq_demote(gl);
571 } else if (!list_empty(&gl->gl_waiters3)) {
572 gh = list_entry(gl->gl_waiters3.next,
578 gfs2_assert_warn(gl->gl_sbd, 0);
590 * @gl: the glock
595 static void gfs2_glmutex_lock(struct gfs2_glock *gl)
599 gfs2_holder_init(gl, 0, 0, &gh);
604 spin_lock(&gl->gl_spin);
605 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
606 list_add_tail(&gh.gh_list, &gl->gl_waiters1);
608 gl->gl_owner_pid = current->pid;
609 gl->gl_ip = (unsigned long)__builtin_return_address(0);
614 spin_unlock(&gl->gl_spin);
622 * @gl: the glock
627 static int gfs2_glmutex_trylock(struct gfs2_glock *gl)
631 spin_lock(&gl->gl_spin);
632 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
635 gl->gl_owner_pid = current->pid;
636 gl->gl_ip = (unsigned long)__builtin_return_address(0);
638 spin_unlock(&gl->gl_spin);
645 * @gl: the glock
649 static void gfs2_glmutex_unlock(struct gfs2_glock *gl)
651 spin_lock(&gl->gl_spin);
652 clear_bit(GLF_LOCK, &gl->gl_flags);
653 gl->gl_owner_pid = 0;
654 gl->gl_ip = 0;
655 run_queue(gl);
656 BUG_ON(!spin_is_locked(&gl->gl_spin));
657 spin_unlock(&gl->gl_spin);
662 * @gl: the glock
669 static void handle_callback(struct gfs2_glock *gl, unsigned int state)
671 spin_lock(&gl->gl_spin);
672 if (test_and_set_bit(GLF_DEMOTE, &gl->gl_flags) == 0) {
673 gl->gl_demote_state = state;
674 gl->gl_demote_time = jiffies;
675 } else if (gl->gl_demote_state != LM_ST_UNLOCKED) {
676 gl->gl_demote_state = state;
678 spin_unlock(&gl->gl_spin);
683 * @gl: the glock
688 static void state_change(struct gfs2_glock *gl, unsigned int new_state)
692 held1 = (gl->gl_state != LM_ST_UNLOCKED);
697 gfs2_glock_hold(gl);
699 gfs2_glock_put(gl);
702 gl->gl_state = new_state;
707 * @gl: The glock in question
712 static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
714 struct gfs2_sbd *sdp = gl->gl_sbd;
715 const struct gfs2_glock_operations *glops = gl->gl_ops;
716 struct gfs2_holder *gh = gl->gl_req_gh;
717 int prev_state = gl->gl_state;
720 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
721 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
724 state_change(gl, ret & LM_OUT_ST_MASK);
728 glops->go_inval(gl, DIO_METADATA);
729 } else if (gl->gl_state == LM_ST_DEFERRED) {
733 glops->go_inval(gl, 0);
739 gl->gl_stamp = jiffies;
743 clear_bit(GLF_DEMOTE, &gl->gl_flags);
745 spin_lock(&gl->gl_spin);
753 if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
754 list_add_tail(&gh->gh_list, &gl->gl_holders);
768 spin_unlock(&gl->gl_spin);
772 glops->go_xmote_bh(gl);
775 spin_lock(&gl->gl_spin);
776 gl->gl_req_gh = NULL;
777 gl->gl_req_bh = NULL;
778 clear_bit(GLF_LOCK, &gl->gl_flags);
779 run_queue(gl);
780 spin_unlock(&gl->gl_spin);
783 gfs2_glock_put(gl);
791 * @gl: The glock in question
797 void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh)
799 struct gfs2_sbd *sdp = gl->gl_sbd;
801 unsigned state = gh ? gh->gh_state : gl->gl_demote_state;
802 const struct gfs2_glock_operations *glops = gl->gl_ops;
809 glops->go_xmote_th(gl);
811 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
812 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
814 gfs2_assert_warn(sdp, state != gl->gl_state);
816 gfs2_glock_hold(gl);
817 gl->gl_req_bh = xmote_bh;
819 lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state, lck_flags);
827 xmote_bh(gl, lck_ret);
832 * @gl: the glock
840 static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
842 struct gfs2_sbd *sdp = gl->gl_sbd;
843 const struct gfs2_glock_operations *glops = gl->gl_ops;
844 struct gfs2_holder *gh = gl->gl_req_gh;
846 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
847 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
850 state_change(gl, LM_ST_UNLOCKED);
851 clear_bit(GLF_DEMOTE, &gl->gl_flags);
854 glops->go_inval(gl, DIO_METADATA);
857 spin_lock(&gl->gl_spin);
860 spin_unlock(&gl->gl_spin);
863 spin_lock(&gl->gl_spin);
864 gl->gl_req_gh = NULL;
865 gl->gl_req_bh = NULL;
866 clear_bit(GLF_LOCK, &gl->gl_flags);
867 run_queue(gl);
868 spin_unlock(&gl->gl_spin);
870 gfs2_glock_put(gl);
878 * @gl: the glock
882 static void gfs2_glock_drop_th(struct gfs2_glock *gl)
884 struct gfs2_sbd *sdp = gl->gl_sbd;
885 const struct gfs2_glock_operations *glops = gl->gl_ops;
889 glops->go_drop_th(gl);
891 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
892 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
893 gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED);
895 gfs2_glock_hold(gl);
896 gl->gl_req_bh = drop_bh;
898 ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state);
904 drop_bh(gl, ret);
918 struct gfs2_glock *gl = gh->gh_gl;
920 spin_lock(&gl->gl_spin);
922 while (gl->gl_req_gh != gh &&
925 if (gl->gl_req_bh && !(gl->gl_req_gh &&
926 (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) {
927 spin_unlock(&gl->gl_spin);
928 gfs2_lm_cancel(gl->gl_sbd, gl->gl_lock);
930 spin_lock(&gl->gl_spin);
932 spin_unlock(&gl->gl_spin);
934 spin_lock(&gl->gl_spin);
938 spin_unlock(&gl->gl_spin);
950 struct gfs2_glock *gl = gh->gh_gl;
951 struct gfs2_sbd *sdp = gl->gl_sbd;
952 const struct gfs2_glock_operations *glops = gl->gl_ops;
958 spin_lock(&gl->gl_spin);
959 if (gl->gl_req_gh != gh &&
964 run_queue(gl);
965 spin_unlock(&gl->gl_spin);
968 spin_unlock(&gl->gl_spin);
979 gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state, gh->gh_state,
983 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
988 spin_lock(&gl->gl_spin);
990 spin_unlock(&gl->gl_spin);
994 spin_lock(&gl->gl_spin);
995 gl->gl_req_gh = NULL;
996 gl->gl_req_bh = NULL;
997 clear_bit(GLF_LOCK, &gl->gl_flags);
998 run_queue(gl);
999 spin_unlock(&gl->gl_spin);
1040 struct gfs2_glock *gl = gh->gh_gl;
1047 existing = find_holder_by_owner(&gl->gl_holders, gh->gh_owner_pid);
1056 gl->gl_name.ln_type, gl->gl_state);
1060 existing = find_holder_by_owner(&gl->gl_waiters3, gh->gh_owner_pid);
1068 list_add(&gh->gh_list, &gl->gl_waiters3);
1070 list_add_tail(&gh->gh_list, &gl->gl_waiters3);
1084 struct gfs2_glock *gl = gh->gh_gl;
1085 struct gfs2_sbd *sdp = gl->gl_sbd;
1096 spin_lock(&gl->gl_spin);
1098 run_queue(gl);
1099 spin_unlock(&gl->gl_spin);
1121 struct gfs2_glock *gl = gh->gh_gl;
1124 spin_lock(&gl->gl_spin);
1130 spin_unlock(&gl->gl_spin);
1139 spin_unlock(&gl->gl_spin);
1173 struct gfs2_glock *gl = gh->gh_gl;
1174 const struct gfs2_glock_operations *glops = gl->gl_ops;
1177 handle_callback(gl, LM_ST_UNLOCKED);
1179 gfs2_glmutex_lock(gl);
1181 spin_lock(&gl->gl_spin);
1184 if (list_empty(&gl->gl_holders)) {
1185 spin_unlock(&gl->gl_spin);
1190 spin_lock(&gl->gl_spin);
1191 gl->gl_stamp = jiffies;
1194 clear_bit(GLF_LOCK, &gl->gl_flags);
1195 run_queue(gl);
1196 spin_unlock(&gl->gl_spin);
1227 struct gfs2_glock *gl;
1230 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1232 error = gfs2_glock_nq_init(gl, state, flags, gh);
1233 gfs2_glock_put(gl);
1403 * @gl: The glock in question
1407 int gfs2_lvb_hold(struct gfs2_glock *gl)
1411 gfs2_glmutex_lock(gl);
1413 if (!atomic_read(&gl->gl_lvb_count)) {
1414 error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb);
1416 gfs2_glmutex_unlock(gl);
1419 gfs2_glock_hold(gl);
1421 atomic_inc(&gl->gl_lvb_count);
1423 gfs2_glmutex_unlock(gl);
1430 * @gl: The glock in question
1434 void gfs2_lvb_unhold(struct gfs2_glock *gl)
1436 gfs2_glock_hold(gl);
1437 gfs2_glmutex_lock(gl);
1439 gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0);
1440 if (atomic_dec_and_test(&gl->gl_lvb_count)) {
1441 gfs2_lm_unhold_lvb(gl->gl_sbd, gl->gl_lock, gl->gl_lvb);
1442 gl->gl_lvb = NULL;
1443 gfs2_glock_put(gl);
1446 gfs2_glmutex_unlock(gl);
1447 gfs2_glock_put(gl);
1453 struct gfs2_glock *gl;
1455 gl = gfs2_glock_find(sdp, name);
1456 if (!gl)
1459 handle_callback(gl, state);
1461 spin_lock(&gl->gl_spin);
1462 run_queue(gl);
1463 spin_unlock(&gl->gl_spin);
1465 gfs2_glock_put(gl);
1498 struct gfs2_glock *gl;
1501 gl = gfs2_glock_find(sdp, &async->lc_name);
1502 if (gfs2_assert_warn(sdp, gl))
1504 if (!gfs2_assert_warn(sdp, gl->gl_req_bh))
1505 gl->gl_req_bh(gl, async->lc_ret);
1506 gfs2_glock_put(gl);
1530 * @gl: the glock
1535 static int demote_ok(struct gfs2_glock *gl)
1537 const struct gfs2_glock_operations *glops = gl->gl_ops;
1540 if (test_bit(GLF_STICKY, &gl->gl_flags))
1543 demote = glops->go_demote_ok(gl);
1550 * @gl: the glock
1554 void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
1556 struct gfs2_sbd *sdp = gl->gl_sbd;
1559 if (list_empty(&gl->gl_reclaim)) {
1560 gfs2_glock_hold(gl);
1561 list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list);
1581 struct gfs2_glock *gl;
1588 gl = list_entry(sdp->sd_reclaim_list.next,
1590 list_del_init(&gl->gl_reclaim);
1596 if (gfs2_glmutex_trylock(gl)) {
1597 if (list_empty(&gl->gl_holders) &&
1598 gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
1599 handle_callback(gl, LM_ST_UNLOCKED);
1600 gfs2_glmutex_unlock(gl);
1603 gfs2_glock_put(gl);
1618 struct gfs2_glock *gl, *prev = NULL;
1626 gl = list_entry(head->first, struct gfs2_glock, gl_list);
1628 if (gl->gl_sbd == sdp) {
1629 gfs2_glock_hold(gl);
1633 prev = gl;
1634 examiner(gl);
1638 if (gl->gl_list.next == NULL)
1640 gl = list_entry(gl->gl_list.next, struct gfs2_glock, gl_list);
1651 * @gl: the glock to look at
1655 static void scan_glock(struct gfs2_glock *gl)
1657 if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object)
1660 if (gfs2_glmutex_trylock(gl)) {
1661 if (list_empty(&gl->gl_holders) &&
1662 gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
1664 gfs2_glmutex_unlock(gl);
1669 gfs2_glmutex_unlock(gl);
1670 gfs2_glock_schedule_for_reclaim(gl);
1689 * @gl: the glock to look at
1693 static void clear_glock(struct gfs2_glock *gl)
1695 struct gfs2_sbd *sdp = gl->gl_sbd;
1699 if (!list_empty(&gl->gl_reclaim)) {
1700 list_del_init(&gl->gl_reclaim);
1703 released = gfs2_glock_put(gl);
1709 if (gfs2_glmutex_trylock(gl)) {
1710 if (list_empty(&gl->gl_holders) &&
1711 gl->gl_state != LM_ST_UNLOCKED)
1712 handle_callback(gl, LM_ST_UNLOCKED);
1713 gfs2_glmutex_unlock(gl);
1838 * @gl: the glock
1844 static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl)
1851 spin_lock(&gl->gl_spin);
1853 print_dbg(gi, "Glock 0x%p (%u, %llu)\n", gl, gl->gl_name.ln_type,
1854 (unsigned long long)gl->gl_name.ln_number);
1857 if (test_bit(x, &gl->gl_flags))
1860 if (!test_bit(GLF_LOCK, &gl->gl_flags))
1863 print_dbg(gi, " gl_ref = %d\n", atomic_read(&gl->gl_ref));
1864 print_dbg(gi, " gl_state = %u\n", gl->gl_state);
1865 if (gl->gl_owner_pid) {
1866 gl_owner = find_task_by_pid(gl->gl_owner_pid);
1869 gl->gl_owner_pid, gl_owner->comm);
1872 gl->gl_owner_pid);
1875 print_dbg(gi, " gl_ip = %lu\n", gl->gl_ip);
1876 print_dbg(gi, " req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no");
1877 print_dbg(gi, " req_bh = %s\n", (gl->gl_req_bh) ? "yes" : "no");
1878 print_dbg(gi, " lvb_count = %d\n", atomic_read(&gl->gl_lvb_count));
1879 print_dbg(gi, " object = %s\n", (gl->gl_object) ? "yes" : "no");
1881 (list_empty(&gl->gl_le.le_list)) ? "no" : "yes");
1883 (list_empty(&gl->gl_reclaim)) ? "no" : "yes");
1884 if (gl->gl_aspace)
1885 print_dbg(gi, " aspace = 0x%p nrpages = %lu\n", gl->gl_aspace,
1886 gl->gl_aspace->i_mapping->nrpages);
1889 print_dbg(gi, " ail = %d\n", atomic_read(&gl->gl_ail_count));
1890 if (gl->gl_req_gh) {
1891 error = dump_holder(gi, "Request", gl->gl_req_gh);
1895 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1900 list_for_each_entry(gh, &gl->gl_waiters1, gh_list) {
1905 list_for_each_entry(gh, &gl->gl_waiters3, gh_list) {
1910 if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
1912 gl->gl_demote_state,
1913 (u64)(jiffies - gl->gl_demote_time)*(1000000/HZ));
1915 if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) {
1916 if (!test_bit(GLF_LOCK, &gl->gl_flags) &&
1917 list_empty(&gl->gl_holders)) {
1918 error = dump_inode(gi, gl->gl_object);
1930 spin_unlock(&gl->gl_spin);
1945 struct gfs2_glock *gl;
1954 hlist_for_each_entry(gl, h, &gl_hash_table[x].hb_list, gl_list) {
1955 if (gl->gl_sbd != sdp)
1958 error = dump_glock(NULL, gl);
2006 gi->gl = list_entry(gi->hb_list->first,
2011 if (gi->gl->gl_list.next == NULL) {
2018 gi->gl = list_entry(gi->gl->gl_list.next,
2021 if (gi->gl)
2043 gi->gl = NULL;
2100 dump_glock(gi, gi->gl);