• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-WNDR4500-V1.0.1.40_1.0.68/src/linux/linux-2.6/fs/gfs2/

Lines Matching defs:gh

60 static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh);
373 * @gh: the holder structure
378 struct gfs2_holder *gh)
380 INIT_LIST_HEAD(&gh->gh_list);
381 gh->gh_gl = gl;
382 gh->gh_ip = (unsigned long)__builtin_return_address(0);
383 gh->gh_owner_pid = current->pid;
384 gh->gh_state = state;
385 gh->gh_flags = flags;
386 gh->gh_error = 0;
387 gh->gh_iflags = 0;
395 * @gh: the holder structure
401 void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
403 gh->gh_state = state;
404 gh->gh_flags = flags;
405 gh->gh_iflags = 0;
406 gh->gh_ip = (unsigned long)__builtin_return_address(0);
411 * @gh: the holder structure
415 void gfs2_holder_uninit(struct gfs2_holder *gh)
417 gfs2_glock_put(gh->gh_gl);
418 gh->gh_gl = NULL;
419 gh->gh_ip = 0;
422 static void gfs2_holder_wake(struct gfs2_holder *gh)
424 clear_bit(HIF_WAIT, &gh->gh_iflags);
426 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
435 static void wait_on_holder(struct gfs2_holder *gh)
438 wait_on_bit(&gh->gh_iflags, HIF_WAIT, holder_wait, TASK_UNINTERRUPTIBLE);
443 * @gh: the glock holder
448 static int rq_mutex(struct gfs2_holder *gh)
450 struct gfs2_glock *gl = gh->gh_gl;
452 list_del_init(&gh->gh_list);
453 /* gh->gh_error never examined. */
455 clear_bit(HIF_WAIT, &gh->gh_iflags);
457 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
464 * @gh: the glock holder
471 static int rq_promote(struct gfs2_holder *gh)
473 struct gfs2_glock *gl = gh->gh_gl;
476 if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
478 gl->gl_req_gh = gh;
484 !(gh->gh_flags & LM_FLAG_PRIORITY)) {
489 gfs2_glock_xmote_th(gh->gh_gl, gh);
496 set_bit(HIF_FIRST, &gh->gh_iflags);
500 if (gh->gh_state == LM_ST_EXCLUSIVE)
508 list_move_tail(&gh->gh_list, &gl->gl_holders);
509 gh->gh_error = 0;
510 set_bit(HIF_HOLDER, &gh->gh_iflags);
512 gfs2_holder_wake(gh);
519 * @gh: the glock holder
553 struct gfs2_holder *gh;
561 gh = list_entry(gl->gl_waiters1.next,
564 if (test_bit(HIF_MUTEX, &gh->gh_iflags))
565 blocked = rq_mutex(gh);
572 gh = list_entry(gl->gl_waiters3.next,
575 if (test_bit(HIF_PROMOTE, &gh->gh_iflags))
576 blocked = rq_promote(gh);
597 struct gfs2_holder gh;
599 gfs2_holder_init(gl, 0, 0, &gh);
600 set_bit(HIF_MUTEX, &gh.gh_iflags);
601 if (test_and_set_bit(HIF_WAIT, &gh.gh_iflags))
606 list_add_tail(&gh.gh_list, &gl->gl_waiters1);
610 clear_bit(HIF_WAIT, &gh.gh_iflags);
612 wake_up_bit(&gh.gh_iflags, HIF_WAIT);
616 wait_on_holder(&gh);
617 gfs2_holder_uninit(&gh);
716 struct gfs2_holder *gh = gl->gl_req_gh;
738 if (!gh) {
746 list_del_init(&gh->gh_list);
747 gh->gh_error = -EIO;
750 gh->gh_error = GLR_CANCELED;
753 if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
754 list_add_tail(&gh->gh_list, &gl->gl_holders);
755 gh->gh_error = 0;
756 set_bit(HIF_HOLDER, &gh->gh_iflags);
757 set_bit(HIF_FIRST, &gh->gh_iflags);
761 gh->gh_error = GLR_TRYFAILED;
762 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
764 gh->gh_error = -EINVAL;
785 if (gh)
786 gfs2_holder_wake(gh);
797 void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh)
800 int flags = gh ? gh->gh_flags : 0;
801 unsigned state = gh ? gh->gh_state : gl->gl_demote_state;
844 struct gfs2_holder *gh = gl->gl_req_gh;
856 if (gh) {
858 list_del_init(&gh->gh_list);
859 gh->gh_error = 0;
872 if (gh)
873 gfs2_holder_wake(gh);
911 * @gh: the LM_FLAG_PRIORITY holder waiting to acquire the lock
916 static void do_cancels(struct gfs2_holder *gh)
918 struct gfs2_glock *gl = gh->gh_gl;
922 while (gl->gl_req_gh != gh &&
923 !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
924 !list_empty(&gh->gh_list)) {
943 * @gh: the glock holder
948 static int glock_wait_internal(struct gfs2_holder *gh)
950 struct gfs2_glock *gl = gh->gh_gl;
954 if (test_bit(HIF_ABORTED, &gh->gh_iflags))
957 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
959 if (gl->gl_req_gh != gh &&
960 !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
961 !list_empty(&gh->gh_list)) {
962 list_del_init(&gh->gh_list);
963 gh->gh_error = GLR_TRYFAILED;
966 return gh->gh_error;
971 if (gh->gh_flags & LM_FLAG_PRIORITY)
972 do_cancels(gh);
974 wait_on_holder(gh);
975 if (gh->gh_error)
976 return gh->gh_error;
978 gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags));
979 gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state, gh->gh_state,
980 gh->gh_flags));
982 if (test_bit(HIF_FIRST, &gh->gh_iflags)) {
986 gh->gh_error = glops->go_lock(gh);
987 if (gh->gh_error) {
989 list_del_init(&gh->gh_list);
1002 return gh->gh_error;
1008 struct gfs2_holder *gh;
1010 list_for_each_entry(gh, head, gh_list) {
1011 if (gh->gh_owner_pid == pid)
1012 return gh;
1034 * @gh: the holder structure to add
1038 static void add_to_queue(struct gfs2_holder *gh)
1040 struct gfs2_glock *gl = gh->gh_gl;
1043 BUG_ON(!gh->gh_owner_pid);
1044 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
1047 existing = find_holder_by_owner(&gl->gl_holders, gh->gh_owner_pid);
1053 print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1054 printk(KERN_INFO "pid : %d\n", gh->gh_owner_pid);
1060 existing = find_holder_by_owner(&gl->gl_waiters3, gh->gh_owner_pid);
1063 print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1067 if (gh->gh_flags & LM_FLAG_PRIORITY)
1068 list_add(&gh->gh_list, &gl->gl_waiters3);
1070 list_add_tail(&gh->gh_list, &gl->gl_waiters3);
1075 * @gh: the holder structure
1077 * if (gh->gh_flags & GL_ASYNC), this never returns an error
1082 int gfs2_glock_nq(struct gfs2_holder *gh)
1084 struct gfs2_glock *gl = gh->gh_gl;
1090 set_bit(HIF_ABORTED, &gh->gh_iflags);
1094 set_bit(HIF_PROMOTE, &gh->gh_iflags);
1097 add_to_queue(gh);
1101 if (!(gh->gh_flags & GL_ASYNC)) {
1102 error = glock_wait_internal(gh);
1114 * @gh: the holder
1119 int gfs2_glock_poll(struct gfs2_holder *gh)
1121 struct gfs2_glock *gl = gh->gh_gl;
1126 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1128 else if (list_empty(&gh->gh_list)) {
1129 if (gh->gh_error == GLR_CANCELED) {
1132 if (gfs2_glock_nq(gh))
1146 * @gh: the holder structure
1151 int gfs2_glock_wait(struct gfs2_holder *gh)
1155 error = glock_wait_internal(gh);
1158 gh->gh_flags &= ~GL_ASYNC;
1159 error = gfs2_glock_nq(gh);
1167 * @gh: the glock holder
1171 void gfs2_glock_dq(struct gfs2_holder *gh)
1173 struct gfs2_glock *gl = gh->gh_gl;
1176 if (gh->gh_flags & GL_NOCACHE)
1182 list_del_init(&gh->gh_list);
1188 glops->go_unlock(gh);
1201 * @gh: the holder structure
1205 void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1207 gfs2_glock_dq(gh);
1208 gfs2_holder_uninit(gh);
1218 * @gh: the struct gfs2_holder
1225 unsigned int state, int flags, struct gfs2_holder *gh)
1232 error = gfs2_glock_nq_init(gl, state, flags, gh);
1775 * @gh: the glock holder
1781 struct gfs2_holder *gh)
1787 if (gh->gh_owner_pid) {
1788 print_dbg(gi, " owner = %ld ", (long)gh->gh_owner_pid);
1789 gh_owner = find_task_by_pid(gh->gh_owner_pid);
1796 print_dbg(gi, " gh_state = %u\n", gh->gh_state);
1799 if (gh->gh_flags & (1 << x))
1802 print_dbg(gi, " error = %d\n", gh->gh_error);
1805 if (test_bit(x, &gh->gh_iflags))
1808 gfs2_print_symbol(gi, " initialized at: %s\n", gh->gh_ip);
1846 struct gfs2_holder *gh;
1895 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1896 error = dump_holder(gi, "Holder", gh);
1900 list_for_each_entry(gh, &gl->gl_waiters1, gh_list) {
1901 error = dump_holder(gi, "Waiter1", gh);
1905 list_for_each_entry(gh, &gl->gl_waiters3, gh_list) {
1906 error = dump_holder(gi, "Waiter3", gh);