Lines Matching defs:dlm

143 static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm);
145 void __dlm_unhash_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
150 mlog(0, "%s: Unhash res %.*s\n", dlm->name, res->lockname.len,
156 void __dlm_insert_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
160 assert_spin_locked(&dlm->spinlock);
162 bucket = dlm_lockres_hash(dlm, res->lockname.hash);
169 mlog(0, "%s: Hash res %.*s\n", dlm->name, res->lockname.len,
173 struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm,
183 assert_spin_locked(&dlm->spinlock);
185 bucket = dlm_lockres_hash(dlm, hash);
206 struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm,
215 assert_spin_locked(&dlm->spinlock);
217 res = __dlm_lookup_lockres_full(dlm, name, len, hash);
231 struct dlm_lock_resource * dlm_lookup_lockres(struct dlm_ctxt *dlm,
238 spin_lock(&dlm->spinlock);
239 res = __dlm_lookup_lockres(dlm, name, len, hash);
240 spin_unlock(&dlm->spinlock);
290 static void dlm_free_ctxt_mem(struct dlm_ctxt *dlm)
292 dlm_destroy_debugfs_subroot(dlm);
294 if (dlm->lockres_hash)
295 dlm_free_pagevec((void **)dlm->lockres_hash, DLM_HASH_PAGES);
297 if (dlm->master_hash)
298 dlm_free_pagevec((void **)dlm->master_hash, DLM_HASH_PAGES);
300 kfree(dlm->name);
301 kfree(dlm);
309 struct dlm_ctxt *dlm;
311 dlm = container_of(kref, struct dlm_ctxt, dlm_refs);
313 BUG_ON(dlm->num_joins);
314 BUG_ON(dlm->dlm_state == DLM_CTXT_JOINED);
317 list_del_init(&dlm->list);
321 mlog(0, "freeing memory from domain %s\n", dlm->name);
325 dlm_free_ctxt_mem(dlm);
330 void dlm_put(struct dlm_ctxt *dlm)
333 kref_put(&dlm->dlm_refs, dlm_ctxt_release);
337 static void __dlm_get(struct dlm_ctxt *dlm)
339 kref_get(&dlm->dlm_refs);
342 /* given a questionable reference to a dlm object, gets a reference if
345 struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm)
353 if (target == dlm) {
365 int dlm_domain_fully_joined(struct dlm_ctxt *dlm)
370 ret = (dlm->dlm_state == DLM_CTXT_JOINED) ||
371 (dlm->dlm_state == DLM_CTXT_IN_SHUTDOWN);
377 static void dlm_destroy_dlm_worker(struct dlm_ctxt *dlm)
379 if (dlm->dlm_worker) {
380 destroy_workqueue(dlm->dlm_worker);
381 dlm->dlm_worker = NULL;
385 static void dlm_complete_dlm_shutdown(struct dlm_ctxt *dlm)
387 dlm_unregister_domain_handlers(dlm);
388 dlm_complete_thread(dlm);
389 dlm_complete_recovery_thread(dlm);
390 dlm_destroy_dlm_worker(dlm);
396 list_del_init(&dlm->list);
403 static int dlm_migrate_all_locks(struct dlm_ctxt *dlm)
411 mlog(0, "Migrating locks from domain %s\n", dlm->name);
414 spin_lock(&dlm->spinlock);
418 bucket = dlm_lockres_hash(dlm, i);
425 /* migrate, if necessary. this will drop the dlm
427 dropped = dlm_empty_lockres(dlm, res);
431 __dlm_lockres_calc_usage(dlm, res);
439 cond_resched_lock(&dlm->spinlock);
443 cond_resched_lock(&dlm->spinlock);
448 if (dlm->reco.state & DLM_RECO_STATE_ACTIVE) {
450 "need to be migrated after dlm recovery\n", dlm->name);
453 mlog(0, "%s: we won't do dlm recovery after migrating "
454 "all lock resources\n", dlm->name);
455 dlm->migrate_done = 1;
459 spin_unlock(&dlm->spinlock);
460 wake_up(&dlm->dlm_thread_wq);
462 /* let the dlm thread take care of purging, keep scanning until
466 dlm->name, num);
469 mlog(0, "DONE Migrating locks from domain %s\n", dlm->name);
473 static int dlm_no_joining_node(struct dlm_ctxt *dlm)
477 spin_lock(&dlm->spinlock);
478 ret = dlm->joining_node == DLM_LOCK_RES_OWNER_UNKNOWN;
479 spin_unlock(&dlm->spinlock);
487 struct dlm_ctxt *dlm = data;
491 if (!dlm_grab(dlm))
495 mlog(0, "%s: Node %u sent a begin exit domain message\n", dlm->name, node);
497 spin_lock(&dlm->spinlock);
498 set_bit(node, dlm->exit_domain_map);
499 spin_unlock(&dlm->spinlock);
501 dlm_put(dlm);
506 static void dlm_mark_domain_leaving(struct dlm_ctxt *dlm)
508 /* Yikes, a double spinlock! I need domain_lock for the dlm
509 * state and the dlm spinlock for join state... Sorry! */
512 spin_lock(&dlm->spinlock);
514 if (dlm->joining_node != DLM_LOCK_RES_OWNER_UNKNOWN) {
516 dlm->joining_node);
517 spin_unlock(&dlm->spinlock);
520 wait_event(dlm->dlm_join_events, dlm_no_joining_node(dlm));
524 dlm->dlm_state = DLM_CTXT_LEAVING;
525 spin_unlock(&dlm->spinlock);
529 static void __dlm_print_nodes(struct dlm_ctxt *dlm)
533 assert_spin_locked(&dlm->spinlock);
536 while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES,
547 struct dlm_ctxt *dlm = data;
553 if (!dlm_grab(dlm))
558 spin_lock(&dlm->spinlock);
559 clear_bit(node, dlm->domain_map);
560 clear_bit(node, dlm->exit_domain_map);
561 printk(KERN_NOTICE "o2dlm: Node %u leaves domain %s ", node, dlm->name);
562 __dlm_print_nodes(dlm);
565 dlm_hb_event_notify_attached(dlm, node, 0);
567 spin_unlock(&dlm->spinlock);
569 dlm_put(dlm);
574 static int dlm_send_one_domain_exit(struct dlm_ctxt *dlm, u32 msg_type,
580 mlog(0, "%s: Sending domain exit message %u to node %u\n", dlm->name,
584 leave_msg.node_idx = dlm->node_num;
586 status = o2net_send_message(msg_type, dlm->key, &leave_msg,
591 dlm->name);
596 static void dlm_begin_exit_domain(struct dlm_ctxt *dlm)
601 if (dlm->dlm_locking_proto.pv_major == 1 &&
602 dlm->dlm_locking_proto.pv_minor < 2)
610 spin_lock(&dlm->spinlock);
612 node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES, node + 1);
615 if (node == dlm->node_num)
618 spin_unlock(&dlm->spinlock);
619 dlm_send_one_domain_exit(dlm, DLM_BEGIN_EXIT_DOMAIN_MSG, node);
620 spin_lock(&dlm->spinlock);
622 spin_unlock(&dlm->spinlock);
625 static void dlm_leave_domain(struct dlm_ctxt *dlm)
630 * accept mastership of new ones. The dlm is responsible for
634 spin_lock(&dlm->spinlock);
636 clear_bit(dlm->node_num, dlm->domain_map);
637 while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES,
639 /* Drop the dlm spinlock. This is safe wrt the domain_map.
645 spin_unlock(&dlm->spinlock);
649 status = dlm_send_one_domain_exit(dlm, DLM_EXIT_DOMAIN_MSG,
664 spin_lock(&dlm->spinlock);
668 clear_bit(node, dlm->domain_map);
670 spin_unlock(&dlm->spinlock);
673 void dlm_unregister_domain(struct dlm_ctxt *dlm)
679 BUG_ON(dlm->dlm_state != DLM_CTXT_JOINED);
680 BUG_ON(!dlm->num_joins);
682 dlm->num_joins--;
683 if (!dlm->num_joins) {
690 dlm->dlm_state = DLM_CTXT_IN_SHUTDOWN;
696 mlog(0, "shutting down domain %s\n", dlm->name);
697 dlm_begin_exit_domain(dlm);
699 /* We changed dlm state, notify the thread */
700 dlm_kick_thread(dlm, NULL);
702 while (dlm_migrate_all_locks(dlm)) {
705 mlog(0, "%s: more migration to do\n", dlm->name);
709 if (!list_empty(&dlm->tracking_list)) {
712 list_for_each_entry(res, &dlm->tracking_list, tracking)
716 dlm_mark_domain_leaving(dlm);
717 dlm_leave_domain(dlm);
718 printk(KERN_NOTICE "o2dlm: Leaving domain %s\n", dlm->name);
719 dlm_force_free_mles(dlm);
720 dlm_complete_dlm_shutdown(dlm);
722 dlm_put(dlm);
796 struct dlm_ctxt *dlm = NULL;
821 dlm = __dlm_lookup_domain_full(query->domain, query->name_len);
822 if (!dlm)
832 if (test_bit(nodenum, dlm->domain_map)) {
844 /* Once the dlm ctxt is marked as leaving then we don't want
848 if (dlm->dlm_state != DLM_CTXT_LEAVING) {
850 spin_lock(&dlm->spinlock);
852 if (dlm->dlm_state == DLM_CTXT_NEW &&
853 dlm->joining_node == DLM_LOCK_RES_OWNER_UNKNOWN) {
858 } else if (dlm->joining_node != DLM_LOCK_RES_OWNER_UNKNOWN) {
861 } else if (dlm->reco.state & DLM_RECO_STATE_ACTIVE) {
865 } else if (test_bit(bit, dlm->recovery_map)) {
869 } else if (test_bit(bit, dlm->domain_map)) {
882 &dlm->dlm_locking_proto,
886 &dlm->fs_locking_proto,
893 __dlm_set_joining_node(dlm, query->node_idx);
897 spin_unlock(&dlm->spinlock);
913 struct dlm_ctxt *dlm = NULL;
921 dlm = __dlm_lookup_domain_full(assert->domain, assert->name_len);
922 /* XXX should we consider no dlm ctxt an error? */
923 if (dlm) {
924 spin_lock(&dlm->spinlock);
929 BUG_ON(dlm->joining_node != assert->node_idx);
931 if (dlm->reco.state & DLM_RECO_STATE_ACTIVE) {
932 mlog(0, "dlm recovery is ongoing, disallow join\n");
933 spin_unlock(&dlm->spinlock);
938 set_bit(assert->node_idx, dlm->domain_map);
939 clear_bit(assert->node_idx, dlm->exit_domain_map);
940 __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
943 assert->node_idx, dlm->name);
944 __dlm_print_nodes(dlm);
947 dlm_hb_event_notify_attached(dlm, assert->node_idx, 1);
949 spin_unlock(&dlm->spinlock);
956 static int dlm_match_regions(struct dlm_ctxt *dlm,
969 qr->qr_domain, qr->qr_node, dlm->node_num);
978 qr->qr_domain, dlm->node_num, qr->qr_node);
1009 dlm->node_num, qr->qr_node);
1032 qr->qr_node, dlm->node_num);
1042 static int dlm_send_regions(struct dlm_ctxt *dlm, unsigned long *node_map)
1058 qr->qr_node = dlm->node_num;
1059 qr->qr_namelen = strlen(dlm->name);
1060 memcpy(qr->qr_domain, dlm->name, qr->qr_namelen);
1073 if (i == dlm->node_num)
1099 struct dlm_ctxt *dlm = NULL;
1116 dlm = __dlm_lookup_domain_full(qr->qr_domain, qr->qr_namelen);
1117 if (!dlm) {
1123 spin_lock(&dlm->spinlock);
1124 if (dlm->joining_node != qr->qr_node) {
1127 dlm->joining_node);
1132 if (dlm->dlm_locking_proto.pv_major == 1 &&
1133 dlm->dlm_locking_proto.pv_minor == 0) {
1135 "but active dlm protocol is %d.%d\n", qr->qr_node,
1136 qr->qr_domain, dlm->dlm_locking_proto.pv_major,
1137 dlm->dlm_locking_proto.pv_minor);
1141 status = dlm_match_regions(dlm, qr, local, sizeof(qr->qr_regions));
1144 spin_unlock(&dlm->spinlock);
1154 static int dlm_match_nodes(struct dlm_ctxt *dlm, struct dlm_query_nodeinfo *qn)
1196 qn->qn_nodenum, dlm->node_num);
1203 dlm->node_num, qn->qn_nodenum);
1214 static int dlm_send_nodeinfo(struct dlm_ctxt *dlm, unsigned long *node_map)
1243 qn->qn_nodenum = dlm->node_num;
1245 qn->qn_namelen = strlen(dlm->name);
1246 memcpy(qn->qn_domain, dlm->name, qn->qn_namelen);
1251 if (i == dlm->node_num)
1276 struct dlm_ctxt *dlm = NULL;
1285 dlm = __dlm_lookup_domain_full(qn->qn_domain, qn->qn_namelen);
1286 if (!dlm) {
1292 spin_lock(&dlm->spinlock);
1294 if (dlm->joining_node != qn->qn_nodenum) {
1297 dlm->joining_node);
1302 if (dlm->dlm_locking_proto.pv_major == 1 &&
1303 dlm->dlm_locking_proto.pv_minor == 0) {
1305 "but active dlm protocol is %d.%d\n", qn->qn_nodenum,
1306 qn->qn_domain, dlm->dlm_locking_proto.pv_major,
1307 dlm->dlm_locking_proto.pv_minor);
1311 status = dlm_match_nodes(dlm, qn);
1315 spin_unlock(&dlm->spinlock);
1325 struct dlm_ctxt *dlm = NULL;
1333 dlm = __dlm_lookup_domain_full(cancel->domain, cancel->name_len);
1335 if (dlm) {
1336 spin_lock(&dlm->spinlock);
1340 BUG_ON(dlm->joining_node != cancel->node_idx);
1341 __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
1343 spin_unlock(&dlm->spinlock);
1350 static int dlm_send_one_join_cancel(struct dlm_ctxt *dlm,
1357 cancel_msg.node_idx = dlm->node_num;
1358 cancel_msg.name_len = strlen(dlm->name);
1359 memcpy(cancel_msg.domain, dlm->name, cancel_msg.name_len);
1376 static int dlm_send_join_cancels(struct dlm_ctxt *dlm,
1395 if (node == dlm->node_num)
1398 tmpstat = dlm_send_one_join_cancel(dlm, node);
1412 static int dlm_request_join(struct dlm_ctxt *dlm,
1424 join_msg.node_idx = dlm->node_num;
1425 join_msg.name_len = strlen(dlm->name);
1426 memcpy(join_msg.domain, dlm->name, join_msg.name_len);
1427 join_msg.dlm_proto = dlm->dlm_locking_proto;
1428 join_msg.fs_proto = dlm->fs_locking_proto;
1431 byte_copymap(join_msg.node_map, dlm->live_nodes_map, O2NM_MAX_NODES);
1445 his dlm isn't up, so we can consider him a 'yes' but not
1462 dlm->dlm_locking_proto.pv_major,
1463 dlm->dlm_locking_proto.pv_minor,
1464 dlm->fs_locking_proto.pv_major,
1465 dlm->fs_locking_proto.pv_minor,
1471 dlm->dlm_locking_proto.pv_minor = packet.dlm_minor;
1472 dlm->fs_locking_proto.pv_minor = packet.fs_minor;
1477 dlm->dlm_locking_proto.pv_major,
1478 dlm->dlm_locking_proto.pv_minor,
1479 dlm->fs_locking_proto.pv_major,
1480 dlm->fs_locking_proto.pv_minor);
1499 static int dlm_send_one_join_assert(struct dlm_ctxt *dlm,
1509 assert_msg.node_idx = dlm->node_num;
1510 assert_msg.name_len = strlen(dlm->name);
1511 memcpy(assert_msg.domain, dlm->name, assert_msg.name_len);
1526 static void dlm_send_join_asserts(struct dlm_ctxt *dlm,
1535 if (node == dlm->node_num)
1542 status = dlm_send_one_join_assert(dlm, node);
1544 spin_lock(&dlm->spinlock);
1545 live = test_bit(node, dlm->live_nodes_map);
1546 spin_unlock(&dlm->spinlock);
1565 static int dlm_should_restart_join(struct dlm_ctxt *dlm,
1576 spin_lock(&dlm->spinlock);
1579 ret = !bitmap_equal(ctxt->live_map, dlm->live_nodes_map,
1581 spin_unlock(&dlm->spinlock);
1589 static int dlm_try_to_join_domain(struct dlm_ctxt *dlm)
1595 mlog(0, "%p", dlm);
1607 o2hb_fill_node_map(dlm->live_nodes_map, O2NM_MAX_NODES);
1609 spin_lock(&dlm->spinlock);
1610 bitmap_copy(ctxt->live_map, dlm->live_nodes_map, O2NM_MAX_NODES);
1611 __dlm_set_joining_node(dlm, dlm->node_num);
1612 spin_unlock(&dlm->spinlock);
1617 if (node == dlm->node_num)
1620 status = dlm_request_join(dlm, node, &response);
1627 * dlm up. */
1631 if (dlm_should_restart_join(dlm, ctxt, response)) {
1643 spin_lock(&dlm->spinlock);
1644 bitmap_copy(dlm->domain_map, ctxt->yes_resp_map, O2NM_MAX_NODES);
1645 set_bit(dlm->node_num, dlm->domain_map);
1646 spin_unlock(&dlm->spinlock);
1649 if (dlm->dlm_locking_proto.pv_major > 1 ||
1650 dlm->dlm_locking_proto.pv_minor > 0) {
1651 status = dlm_send_nodeinfo(dlm, ctxt->yes_resp_map);
1656 status = dlm_send_regions(dlm, ctxt->yes_resp_map);
1663 dlm_send_join_asserts(dlm, ctxt->yes_resp_map);
1670 dlm->dlm_state = DLM_CTXT_JOINED;
1671 dlm->num_joins++;
1675 spin_lock(&dlm->spinlock);
1676 __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
1678 printk(KERN_NOTICE "o2dlm: Joining domain %s ", dlm->name);
1679 __dlm_print_nodes(dlm);
1681 spin_unlock(&dlm->spinlock);
1686 tmpstat = dlm_send_join_cancels(dlm,
1699 static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm)
1701 o2hb_unregister_callback(dlm->name, &dlm->dlm_hb_up);
1702 o2hb_unregister_callback(dlm->name, &dlm->dlm_hb_down);
1703 o2net_unregister_handler_list(&dlm->dlm_domain_handlers);
1706 static int dlm_register_domain_handlers(struct dlm_ctxt *dlm)
1712 o2hb_setup_callback(&dlm->dlm_hb_down, O2HB_NODE_DOWN_CB,
1713 dlm_hb_node_down_cb, dlm, DLM_HB_NODE_DOWN_PRI);
1714 o2hb_setup_callback(&dlm->dlm_hb_up, O2HB_NODE_UP_CB,
1715 dlm_hb_node_up_cb, dlm, DLM_HB_NODE_UP_PRI);
1717 status = o2hb_register_callback(dlm->name, &dlm->dlm_hb_down);
1721 status = o2hb_register_callback(dlm->name, &dlm->dlm_hb_up);
1725 status = o2net_register_handler(DLM_MASTER_REQUEST_MSG, dlm->key,
1728 dlm, NULL, &dlm->dlm_domain_handlers);
1732 status = o2net_register_handler(DLM_ASSERT_MASTER_MSG, dlm->key,
1735 dlm, dlm_assert_master_post_handler,
1736 &dlm->dlm_domain_handlers);
1740 status = o2net_register_handler(DLM_CREATE_LOCK_MSG, dlm->key,
1743 dlm, NULL, &dlm->dlm_domain_handlers);
1747 status = o2net_register_handler(DLM_CONVERT_LOCK_MSG, dlm->key,
1750 dlm, NULL, &dlm->dlm_domain_handlers);
1754 status = o2net_register_handler(DLM_UNLOCK_LOCK_MSG, dlm->key,
1757 dlm, NULL, &dlm->dlm_domain_handlers);
1761 status = o2net_register_handler(DLM_PROXY_AST_MSG, dlm->key,
1764 dlm, NULL, &dlm->dlm_domain_handlers);
1768 status = o2net_register_handler(DLM_EXIT_DOMAIN_MSG, dlm->key,
1771 dlm, NULL, &dlm->dlm_domain_handlers);
1775 status = o2net_register_handler(DLM_DEREF_LOCKRES_MSG, dlm->key,
1778 dlm, NULL, &dlm->dlm_domain_handlers);
1782 status = o2net_register_handler(DLM_MIGRATE_REQUEST_MSG, dlm->key,
1785 dlm, NULL, &dlm->dlm_domain_handlers);
1789 status = o2net_register_handler(DLM_MIG_LOCKRES_MSG, dlm->key,
1792 dlm, NULL, &dlm->dlm_domain_handlers);
1796 status = o2net_register_handler(DLM_MASTER_REQUERY_MSG, dlm->key,
1799 dlm, NULL, &dlm->dlm_domain_handlers);
1803 status = o2net_register_handler(DLM_LOCK_REQUEST_MSG, dlm->key,
1806 dlm, NULL, &dlm->dlm_domain_handlers);
1810 status = o2net_register_handler(DLM_RECO_DATA_DONE_MSG, dlm->key,
1813 dlm, NULL, &dlm->dlm_domain_handlers);
1817 status = o2net_register_handler(DLM_BEGIN_RECO_MSG, dlm->key,
1820 dlm, NULL, &dlm->dlm_domain_handlers);
1824 status = o2net_register_handler(DLM_FINALIZE_RECO_MSG, dlm->key,
1827 dlm, NULL, &dlm->dlm_domain_handlers);
1831 status = o2net_register_handler(DLM_BEGIN_EXIT_DOMAIN_MSG, dlm->key,
1834 dlm, NULL, &dlm->dlm_domain_handlers);
1838 status = o2net_register_handler(DLM_DEREF_LOCKRES_DONE, dlm->key,
1841 dlm, NULL, &dlm->dlm_domain_handlers);
1844 dlm_unregister_domain_handlers(dlm);
1849 static int dlm_join_domain(struct dlm_ctxt *dlm)
1856 BUG_ON(!dlm);
1858 mlog(0, "Join domain %s\n", dlm->name);
1860 status = dlm_register_domain_handlers(dlm);
1866 status = dlm_launch_thread(dlm);
1872 status = dlm_launch_recovery_thread(dlm);
1878 dlm_debug_init(dlm);
1880 snprintf(wq_name, O2NM_MAX_NAME_LEN, "dlm_wq-%s", dlm->name);
1881 dlm->dlm_worker = alloc_workqueue(wq_name, WQ_MEM_RECLAIM, 0);
1882 if (!dlm->dlm_worker) {
1889 status = dlm_try_to_join_domain(dlm);
1903 mlog(ML_NOTICE, "Timed out joining dlm domain "
1904 "%s after %u msecs\n", dlm->name,
1934 dlm_unregister_domain_handlers(dlm);
1935 dlm_complete_thread(dlm);
1936 dlm_complete_recovery_thread(dlm);
1937 dlm_destroy_dlm_worker(dlm);
1948 struct dlm_ctxt *dlm = NULL;
1950 dlm = kzalloc(sizeof(*dlm), GFP_KERNEL);
1951 if (!dlm) {
1957 dlm->name = kstrdup(domain, GFP_KERNEL);
1958 if (dlm->name == NULL) {
1964 dlm->lockres_hash = (struct hlist_head **)dlm_alloc_pagevec(DLM_HASH_PAGES);
1965 if (!dlm->lockres_hash) {
1972 INIT_HLIST_HEAD(dlm_lockres_hash(dlm, i));
1974 dlm->master_hash = (struct hlist_head **)
1976 if (!dlm->master_hash) {
1983 INIT_HLIST_HEAD(dlm_master_hash(dlm, i));
1985 dlm->key = key;
1986 dlm->node_num = o2nm_this_node();
1988 dlm_create_debugfs_subroot(dlm);
1990 spin_lock_init(&dlm->spinlock);
1991 spin_lock_init(&dlm->master_lock);
1992 spin_lock_init(&dlm->ast_lock);
1993 spin_lock_init(&dlm->track_lock);
1994 INIT_LIST_HEAD(&dlm->list);
1995 INIT_LIST_HEAD(&dlm->dirty_list);
1996 INIT_LIST_HEAD(&dlm->reco.resources);
1997 INIT_LIST_HEAD(&dlm->reco.node_data);
1998 INIT_LIST_HEAD(&dlm->purge_list);
1999 INIT_LIST_HEAD(&dlm->dlm_domain_handlers);
2000 INIT_LIST_HEAD(&dlm->tracking_list);
2001 dlm->reco.state = 0;
2003 INIT_LIST_HEAD(&dlm->pending_asts);
2004 INIT_LIST_HEAD(&dlm->pending_basts);
2006 mlog(0, "dlm->recovery_map=%p, &(dlm->recovery_map[0])=%p\n",
2007 dlm->recovery_map, &(dlm->recovery_map[0]));
2009 bitmap_zero(dlm->recovery_map, O2NM_MAX_NODES);
2010 bitmap_zero(dlm->live_nodes_map, O2NM_MAX_NODES);
2011 bitmap_zero(dlm->domain_map, O2NM_MAX_NODES);
2013 dlm->dlm_thread_task = NULL;
2014 dlm->dlm_reco_thread_task = NULL;
2015 dlm->dlm_worker = NULL;
2016 init_waitqueue_head(&dlm->dlm_thread_wq);
2017 init_waitqueue_head(&dlm->dlm_reco_thread_wq);
2018 init_waitqueue_head(&dlm->reco.event);
2019 init_waitqueue_head(&dlm->ast_wq);
2020 init_waitqueue_head(&dlm->migration_wq);
2021 INIT_LIST_HEAD(&dlm->mle_hb_events);
2023 dlm->joining_node = DLM_LOCK_RES_OWNER_UNKNOWN;
2024 init_waitqueue_head(&dlm->dlm_join_events);
2026 dlm->migrate_done = 0;
2028 dlm->reco.new_master = O2NM_INVALID_NODE_NUM;
2029 dlm->reco.dead_node = O2NM_INVALID_NODE_NUM;
2031 atomic_set(&dlm->res_tot_count, 0);
2032 atomic_set(&dlm->res_cur_count, 0);
2034 atomic_set(&dlm->mle_tot_count[i], 0);
2035 atomic_set(&dlm->mle_cur_count[i], 0);
2038 spin_lock_init(&dlm->work_lock);
2039 INIT_LIST_HEAD(&dlm->work_list);
2040 INIT_WORK(&dlm->dispatched_work, dlm_dispatch_work);
2042 kref_init(&dlm->dlm_refs);
2043 dlm->dlm_state = DLM_CTXT_NEW;
2045 INIT_LIST_HEAD(&dlm->dlm_eviction_callbacks);
2048 kref_read(&dlm->dlm_refs));
2052 if (ret < 0 && dlm) {
2053 if (dlm->master_hash)
2054 dlm_free_pagevec((void **)dlm->master_hash,
2057 if (dlm->lockres_hash)
2058 dlm_free_pagevec((void **)dlm->lockres_hash,
2061 kfree(dlm->name);
2062 kfree(dlm);
2063 dlm = NULL;
2065 return dlm;
2103 struct dlm_ctxt *dlm = NULL;
2115 dlm = NULL;
2124 dlm = __dlm_lookup_domain(domain);
2125 if (dlm) {
2126 if (dlm->dlm_state != DLM_CTXT_JOINED) {
2136 if (dlm_protocol_compare(&dlm->fs_locking_proto, fs_proto)) {
2146 __dlm_get(dlm);
2147 dlm->num_joins++;
2169 dlm = new_ctxt;
2173 list_add_tail(&dlm->list, &dlm_domains);
2180 dlm->dlm_locking_proto = dlm_protocol;
2181 dlm->fs_locking_proto = *fs_proto;
2183 ret = dlm_join_domain(dlm);
2186 dlm_put(dlm);
2191 *fs_proto = dlm->fs_locking_proto;
2199 dlm = ERR_PTR(ret);
2201 return dlm;
2259 * dlm completes it's recovery work, otherwise it may be able to
2260 * acquire locks on resources requiring recovery. Since the dlm can
2269 void dlm_fire_domain_eviction_callbacks(struct dlm_ctxt *dlm,
2275 list_for_each_entry(cb, &dlm->dlm_eviction_callbacks, ec_item) {
2291 void dlm_register_eviction_cb(struct dlm_ctxt *dlm,
2295 list_add_tail(&cb->ec_item, &dlm->dlm_eviction_callbacks);