• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/fs/ocfs2/dlm/

Lines Matching refs:lockres

49 static inline int user_check_wait_flag(struct user_lock_res *lockres,
54 spin_lock(&lockres->l_lock);
55 ret = lockres->l_flags & flag;
56 spin_unlock(&lockres->l_lock);
61 static inline void user_wait_on_busy_lock(struct user_lock_res *lockres)
64 wait_event(lockres->l_event,
65 !user_check_wait_flag(lockres, USER_LOCK_BUSY));
68 static inline void user_wait_on_blocked_lock(struct user_lock_res *lockres)
71 wait_event(lockres->l_event,
72 !user_check_wait_flag(lockres, USER_LOCK_BLOCKED));
77 dlm_ctxt_from_user_lockres(struct user_lock_res *lockres)
81 ip = container_of(lockres,
88 user_dlm_inode_from_user_lockres(struct user_lock_res *lockres)
92 ip = container_of(lockres,
98 static inline void user_recover_from_dlm_error(struct user_lock_res *lockres)
100 spin_lock(&lockres->l_lock);
101 lockres->l_flags &= ~USER_LOCK_BUSY;
102 spin_unlock(&lockres->l_lock);
127 struct user_lock_res *lockres = opaque;
130 mlog(0, "AST fired for lockres %.*s\n", lockres->l_namelen,
131 lockres->l_name);
133 spin_lock(&lockres->l_lock);
135 lksb = &(lockres->l_lksb);
137 mlog(ML_ERROR, "lksb status value of %u on lockres %.*s\n",
138 lksb->status, lockres->l_namelen, lockres->l_name);
139 spin_unlock(&lockres->l_lock);
143 mlog_bug_on_msg(lockres->l_requested == LKM_IVMODE,
145 lockres->l_namelen, lockres->l_name, lockres->l_flags);
148 if (lockres->l_requested < lockres->l_level) {
149 if (lockres->l_requested <=
150 user_highest_compat_lock_level(lockres->l_blocking)) {
151 lockres->l_blocking = LKM_NLMODE;
152 lockres->l_flags &= ~USER_LOCK_BLOCKED;
156 lockres->l_level = lockres->l_requested;
157 lockres->l_requested = LKM_IVMODE;
158 lockres->l_flags |= USER_LOCK_ATTACHED;
159 lockres->l_flags &= ~USER_LOCK_BUSY;
161 spin_unlock(&lockres->l_lock);
163 wake_up(&lockres->l_event);
166 static inline void user_dlm_grab_inode_ref(struct user_lock_res *lockres)
169 inode = user_dlm_inode_from_user_lockres(lockres);
176 static void __user_dlm_queue_lockres(struct user_lock_res *lockres)
178 if (!(lockres->l_flags & USER_LOCK_QUEUED)) {
179 user_dlm_grab_inode_ref(lockres);
181 INIT_WORK(&lockres->l_work, user_dlm_unblock_lock);
183 queue_work(user_dlm_worker, &lockres->l_work);
184 lockres->l_flags |= USER_LOCK_QUEUED;
188 static void __user_dlm_cond_queue_lockres(struct user_lock_res *lockres)
192 if (!(lockres->l_flags & USER_LOCK_BLOCKED))
195 switch (lockres->l_blocking) {
197 if (!lockres->l_ex_holders && !lockres->l_ro_holders)
201 if (!lockres->l_ex_holders)
209 __user_dlm_queue_lockres(lockres);
214 struct user_lock_res *lockres = opaque;
216 mlog(0, "Blocking AST fired for lockres %.*s. Blocking level %d\n",
217 lockres->l_namelen, lockres->l_name, level);
219 spin_lock(&lockres->l_lock);
220 lockres->l_flags |= USER_LOCK_BLOCKED;
221 if (level > lockres->l_blocking)
222 lockres->l_blocking = level;
224 __user_dlm_queue_lockres(lockres);
225 spin_unlock(&lockres->l_lock);
227 wake_up(&lockres->l_event);
232 struct user_lock_res *lockres = opaque;
234 mlog(0, "UNLOCK AST called on lock %.*s\n", lockres->l_namelen,
235 lockres->l_name);
240 spin_lock(&lockres->l_lock);
244 if (lockres->l_flags & USER_LOCK_IN_TEARDOWN
245 && !(lockres->l_flags & USER_LOCK_IN_CANCEL)) {
246 lockres->l_level = LKM_IVMODE;
251 BUG_ON(!(lockres->l_flags & USER_LOCK_IN_CANCEL));
252 lockres->l_flags &= ~USER_LOCK_IN_CANCEL;
255 BUG_ON(!(lockres->l_flags & USER_LOCK_IN_CANCEL));
257 lockres->l_requested = LKM_IVMODE; /* cancel an
260 lockres->l_flags &= ~USER_LOCK_IN_CANCEL;
263 if (lockres->l_flags & USER_LOCK_BLOCKED)
264 __user_dlm_queue_lockres(lockres);
267 lockres->l_flags &= ~USER_LOCK_BUSY;
269 spin_unlock(&lockres->l_lock);
271 wake_up(&lockres->l_event);
274 static inline void user_dlm_drop_inode_ref(struct user_lock_res *lockres)
277 inode = user_dlm_inode_from_user_lockres(lockres);
284 struct user_lock_res *lockres =
286 struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres);
288 mlog(0, "processing lockres %.*s\n", lockres->l_namelen,
289 lockres->l_name);
291 spin_lock(&lockres->l_lock);
293 mlog_bug_on_msg(!(lockres->l_flags & USER_LOCK_QUEUED),
295 lockres->l_namelen, lockres->l_name, lockres->l_flags);
299 lockres->l_flags &= ~USER_LOCK_QUEUED;
306 if (!(lockres->l_flags & USER_LOCK_BLOCKED)) {
307 spin_unlock(&lockres->l_lock);
311 if (lockres->l_flags & USER_LOCK_IN_TEARDOWN) {
312 spin_unlock(&lockres->l_lock);
316 if (lockres->l_flags & USER_LOCK_BUSY) {
317 if (lockres->l_flags & USER_LOCK_IN_CANCEL) {
318 spin_unlock(&lockres->l_lock);
322 lockres->l_flags |= USER_LOCK_IN_CANCEL;
323 spin_unlock(&lockres->l_lock);
326 &lockres->l_lksb,
329 lockres);
331 user_log_dlm_error("dlmunlock", status, lockres);
338 if ((lockres->l_blocking == LKM_EXMODE)
339 && (lockres->l_ex_holders || lockres->l_ro_holders)) {
340 spin_unlock(&lockres->l_lock);
342 lockres->l_ro_holders, lockres->l_ex_holders);
346 if ((lockres->l_blocking == LKM_PRMODE)
347 && lockres->l_ex_holders) {
348 spin_unlock(&lockres->l_lock);
350 lockres->l_ex_holders);
355 new_level = user_highest_compat_lock_level(lockres->l_blocking);
356 lockres->l_requested = new_level;
357 lockres->l_flags |= USER_LOCK_BUSY;
359 lockres->l_level, new_level);
360 spin_unlock(&lockres->l_lock);
365 &lockres->l_lksb,
367 lockres->l_name,
368 lockres->l_namelen,
370 lockres,
373 user_log_dlm_error("dlmlock", status, lockres);
374 user_recover_from_dlm_error(lockres);
378 user_dlm_drop_inode_ref(lockres);
381 static inline void user_dlm_inc_holders(struct user_lock_res *lockres,
386 lockres->l_ex_holders++;
389 lockres->l_ro_holders++;
400 user_may_continue_on_blocked_lock(struct user_lock_res *lockres,
403 BUG_ON(!(lockres->l_flags & USER_LOCK_BLOCKED));
405 return wanted <= user_highest_compat_lock_level(lockres->l_blocking);
408 int user_dlm_cluster_lock(struct user_lock_res *lockres,
413 struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres);
417 mlog(ML_ERROR, "lockres %.*s: invalid request!\n",
418 lockres->l_namelen, lockres->l_name);
423 mlog(0, "lockres %.*s: asking for %s lock, passed flags = 0x%x\n",
424 lockres->l_namelen, lockres->l_name,
434 spin_lock(&lockres->l_lock);
439 if ((lockres->l_flags & USER_LOCK_BUSY) &&
440 (level > lockres->l_level)) {
443 spin_unlock(&lockres->l_lock);
445 user_wait_on_busy_lock(lockres);
449 if ((lockres->l_flags & USER_LOCK_BLOCKED) &&
450 (!user_may_continue_on_blocked_lock(lockres, level))) {
453 spin_unlock(&lockres->l_lock);
455 user_wait_on_blocked_lock(lockres);
459 if (level > lockres->l_level) {
461 if (lockres->l_level != LKM_IVMODE)
464 lockres->l_requested = level;
465 lockres->l_flags |= USER_LOCK_BUSY;
466 spin_unlock(&lockres->l_lock);
474 &lockres->l_lksb,
476 lockres->l_name,
477 lockres->l_namelen,
479 lockres,
486 user_log_dlm_error("dlmlock", status, lockres);
489 user_recover_from_dlm_error(lockres);
493 user_wait_on_busy_lock(lockres);
497 user_dlm_inc_holders(lockres, level);
498 spin_unlock(&lockres->l_lock);
505 static inline void user_dlm_dec_holders(struct user_lock_res *lockres,
510 BUG_ON(!lockres->l_ex_holders);
511 lockres->l_ex_holders--;
514 BUG_ON(!lockres->l_ro_holders);
515 lockres->l_ro_holders--;
522 void user_dlm_cluster_unlock(struct user_lock_res *lockres,
527 mlog(ML_ERROR, "lockres %.*s: invalid request!\n",
528 lockres->l_namelen, lockres->l_name);
532 spin_lock(&lockres->l_lock);
533 user_dlm_dec_holders(lockres, level);
534 __user_dlm_cond_queue_lockres(lockres);
535 spin_unlock(&lockres->l_lock);
542 struct user_lock_res *lockres = &DLMFS_I(inode)->ip_lockres;
543 char *lvb = lockres->l_lksb.lvb;
547 spin_lock(&lockres->l_lock);
549 BUG_ON(lockres->l_level < LKM_EXMODE);
552 spin_unlock(&lockres->l_lock);
559 struct user_lock_res *lockres = &DLMFS_I(inode)->ip_lockres;
560 char *lvb = lockres->l_lksb.lvb;
564 spin_lock(&lockres->l_lock);
566 BUG_ON(lockres->l_level < LKM_PRMODE);
569 spin_unlock(&lockres->l_lock);
572 void user_dlm_lock_res_init(struct user_lock_res *lockres,
575 memset(lockres, 0, sizeof(*lockres));
577 spin_lock_init(&lockres->l_lock);
578 init_waitqueue_head(&lockres->l_event);
579 lockres->l_level = LKM_IVMODE;
580 lockres->l_requested = LKM_IVMODE;
581 lockres->l_blocking = LKM_IVMODE;
586 memcpy(lockres->l_name,
589 lockres->l_namelen = dentry->d_name.len;
592 int user_dlm_destroy_lock(struct user_lock_res *lockres)
595 struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres);
597 mlog(0, "asked to destroy %.*s\n", lockres->l_namelen, lockres->l_name);
599 spin_lock(&lockres->l_lock);
600 if (lockres->l_flags & USER_LOCK_IN_TEARDOWN) {
601 spin_unlock(&lockres->l_lock);
605 lockres->l_flags |= USER_LOCK_IN_TEARDOWN;
607 while (lockres->l_flags & USER_LOCK_BUSY) {
608 spin_unlock(&lockres->l_lock);
610 user_wait_on_busy_lock(lockres);
612 spin_lock(&lockres->l_lock);
615 if (lockres->l_ro_holders || lockres->l_ex_holders) {
616 spin_unlock(&lockres->l_lock);
621 if (!(lockres->l_flags & USER_LOCK_ATTACHED)) {
622 spin_unlock(&lockres->l_lock);
626 lockres->l_flags &= ~USER_LOCK_ATTACHED;
627 lockres->l_flags |= USER_LOCK_BUSY;
628 spin_unlock(&lockres->l_lock);
631 &lockres->l_lksb,
634 lockres);
636 user_log_dlm_error("dlmunlock", status, lockres);
641 user_wait_on_busy_lock(lockres);