Lines Matching refs:lock

35 			   struct dlm_lock *lock);
36 static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
39 * lock level will obsolete a pending bast.
40 * For example, if dlm_thread queued a bast for an EX lock that
42 * lock owner downconverted to NL, the bast is now obsolete.
44 * This is needed because the lock and convert paths can queue
47 static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
50 assert_spin_locked(&lock->spinlock);
52 if (lock->ml.highest_blocked == LKM_IVMODE)
54 BUG_ON(lock->ml.highest_blocked == LKM_NLMODE);
56 if (lock->bast_pending &&
57 list_empty(&lock->bast_list))
61 if (lock->ml.type == LKM_EXMODE)
64 else if (lock->ml.type == LKM_NLMODE)
67 else if (lock->ml.highest_blocked != LKM_EXMODE)
74 void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
79 BUG_ON(!lock);
81 res = lock->lockres;
85 if (!list_empty(&lock->ast_list)) {
86 mlog(ML_ERROR, "%s: res %.*s, lock %u:%llu, "
89 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
90 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
91 lock->ast_pending, lock->ml.type);
94 if (lock->ast_pending)
95 mlog(0, "%s: res %.*s, lock %u:%llu, AST getting flushed\n",
97 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
98 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)));
100 /* putting lock on list, add a ref */
101 dlm_lock_get(lock);
102 spin_lock(&lock->spinlock);
105 if (dlm_should_cancel_bast(dlm, lock)) {
106 mlog(0, "%s: res %.*s, lock %u:%llu, Cancelling BAST\n",
108 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
109 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)));
110 lock->bast_pending = 0;
111 list_del_init(&lock->bast_list);
112 lock->ml.highest_blocked = LKM_IVMODE;
113 /* removing lock from list, remove a ref. guaranteed
116 dlm_lock_put(lock);
124 list_add_tail(&lock->ast_list, &dlm->pending_asts);
125 lock->ast_pending = 1;
126 spin_unlock(&lock->spinlock);
129 void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
132 BUG_ON(!lock);
135 __dlm_queue_ast(dlm, lock);
140 void __dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
145 BUG_ON(!lock);
149 res = lock->lockres;
151 BUG_ON(!list_empty(&lock->bast_list));
152 if (lock->bast_pending)
153 mlog(0, "%s: res %.*s, lock %u:%llu, BAST getting flushed\n",
155 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
156 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)));
158 /* putting lock on list, add a ref */
159 dlm_lock_get(lock);
160 spin_lock(&lock->spinlock);
161 list_add_tail(&lock->bast_list, &dlm->pending_basts);
162 lock->bast_pending = 1;
163 spin_unlock(&lock->spinlock);
167 struct dlm_lock *lock)
169 struct dlm_lockstatus *lksb = lock->lksb;
178 lock->ml.node == dlm->node_num ? "master" :
183 * place when the lock is downconverted - otherwise we risk
197 struct dlm_lock *lock)
201 mlog(0, "%s: res %.*s, lock %u:%llu, Local AST\n", dlm->name,
203 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
204 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)));
206 fn = lock->ast;
207 BUG_ON(lock->ml.node != dlm->node_num);
209 dlm_update_lvb(dlm, res, lock);
210 (*fn)(lock->astdata);
215 struct dlm_lock *lock)
221 mlog(0, "%s: res %.*s, lock %u:%llu, Remote AST\n", dlm->name,
223 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
224 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)));
226 lksb = lock->lksb;
227 BUG_ON(lock->ml.node == dlm->node_num);
230 dlm_update_lvb(dlm, res, lock);
232 /* lock request came from another node
234 ret = dlm_send_proxy_ast(dlm, res, lock, lksbflags);
239 struct dlm_lock *lock, int blocked_type)
241 dlm_bastlockfunc_t *fn = lock->bast;
243 BUG_ON(lock->ml.node != dlm->node_num);
245 mlog(0, "%s: res %.*s, lock %u:%llu, Local BAST, blocked %d\n",
247 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
248 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
251 (*fn)(lock->astdata, blocked_type);
263 struct dlm_lock *lock = NULL;
346 lock = NULL;
347 list_for_each_entry(lock, head, list) {
348 if (lock->ml.cookie == cookie)
358 list_for_each_entry(lock, head, list) {
359 /* if lock is found but unlock is pending ignore the bast */
360 if (lock->ml.cookie == cookie) {
361 if (lock->unlock_pending)
367 mlog(0, "Got %sast for unknown lock! cookie=%u:%llu, name=%.*s, "
381 /* do not alter lock refcount. switching lists. */
382 list_move_tail(&lock->list, &res->granted);
383 mlog(0, "%s: res %.*s, lock %u:%llu, Granted type %d => %d\n",
387 lock->ml.type, lock->ml.convert_type);
389 if (lock->ml.convert_type != LKM_IVMODE) {
390 lock->ml.type = lock->ml.convert_type;
391 lock->ml.convert_type = LKM_IVMODE;
396 lock->lksb->status = DLM_NORMAL;
400 BUG_ON(!(lock->lksb->flags & DLM_LKSB_GET_LVB));
401 memcpy(lock->lksb->lvb, past->lvb, DLM_LVB_LEN);
407 dlm_do_local_ast(dlm, res, lock);
409 dlm_do_local_bast(dlm, res, lock, past->blocked_type);
422 struct dlm_lock *lock, int msg_type,
432 res->lockname.len, res->lockname.name, lock->ml.node, msg_type,
441 past.cookie = lock->ml.cookie;
448 vec[1].iov_base = lock->lksb->lvb;
453 lock->ml.node, &status);
457 lock->ml.node);
461 "node is dead!\n", lock->ml.node);
465 "DLM_MIGRATING!\n", lock->ml.node);
469 lock->ml.node, status);