Lines Matching defs:ls

33 static ssize_t dlm_control_store(struct dlm_ls *ls, const char *buf, size_t len)
41 ls = dlm_find_lockspace_local(ls->ls_local_handle);
42 if (!ls)
47 dlm_ls_stop(ls);
50 dlm_ls_start(ls);
55 dlm_put_lockspace(ls);
59 static ssize_t dlm_event_store(struct dlm_ls *ls, const char *buf, size_t len)
61 int rc = kstrtoint(buf, 0, &ls->ls_uevent_result);
65 set_bit(LSFL_UEVENT_WAIT, &ls->ls_flags);
66 wake_up(&ls->ls_uevent_wait);
70 static ssize_t dlm_id_show(struct dlm_ls *ls, char *buf)
72 return snprintf(buf, PAGE_SIZE, "%u\n", ls->ls_global_id);
75 static ssize_t dlm_id_store(struct dlm_ls *ls, const char *buf, size_t len)
77 int rc = kstrtouint(buf, 0, &ls->ls_global_id);
84 static ssize_t dlm_nodir_show(struct dlm_ls *ls, char *buf)
86 return snprintf(buf, PAGE_SIZE, "%u\n", dlm_no_directory(ls));
89 static ssize_t dlm_nodir_store(struct dlm_ls *ls, const char *buf, size_t len)
97 set_bit(LSFL_NODIR, &ls->ls_flags);
101 static ssize_t dlm_recover_status_show(struct dlm_ls *ls, char *buf)
103 uint32_t status = dlm_recover_status(ls);
107 static ssize_t dlm_recover_nodeid_show(struct dlm_ls *ls, char *buf)
109 return snprintf(buf, PAGE_SIZE, "%d\n", ls->ls_recover_nodeid);
164 struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj);
166 return a->show ? a->show(ls, buf) : 0;
172 struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj);
174 return a->store ? a->store(ls, buf, len) : len;
179 struct dlm_ls *ls = container_of(k, struct dlm_ls, ls_kobj);
180 kfree(ls);
196 static int do_uevent(struct dlm_ls *ls, int in)
199 kobject_uevent(&ls->ls_kobj, KOBJ_ONLINE);
201 kobject_uevent(&ls->ls_kobj, KOBJ_OFFLINE);
203 log_rinfo(ls, "%s the lockspace group...", in ? "joining" : "leaving");
208 wait_event(ls->ls_uevent_wait,
209 test_and_clear_bit(LSFL_UEVENT_WAIT, &ls->ls_flags));
211 log_rinfo(ls, "group event done %d", ls->ls_uevent_result);
213 return ls->ls_uevent_result;
218 const struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj);
220 add_uevent_var(env, "LOCKSPACE=%s", ls->ls_name);
250 struct dlm_ls *ls;
254 list_for_each_entry(ls, &lslist, ls_list) {
255 if (ls->ls_global_id == id) {
256 atomic_inc(&ls->ls_count);
260 ls = NULL;
263 return ls;
268 struct dlm_ls *ls;
271 list_for_each_entry(ls, &lslist, ls_list) {
272 if (ls->ls_local_handle == lockspace) {
273 atomic_inc(&ls->ls_count);
277 ls = NULL;
280 return ls;
285 struct dlm_ls *ls;
288 list_for_each_entry(ls, &lslist, ls_list) {
289 if (ls->ls_device.minor == minor) {
290 atomic_inc(&ls->ls_count);
294 ls = NULL;
297 return ls;
300 void dlm_put_lockspace(struct dlm_ls *ls)
302 if (atomic_dec_and_test(&ls->ls_count))
303 wake_up(&ls->ls_count_wait);
306 static void remove_lockspace(struct dlm_ls *ls)
309 wait_event(ls->ls_count_wait, atomic_read(&ls->ls_count) == 0);
312 if (atomic_read(&ls->ls_count) != 0) {
317 WARN_ON(ls->ls_create_count != 0);
318 list_del(&ls->ls_list);
339 struct dlm_ls *ls;
382 list_for_each_entry(ls, &lslist, ls_list) {
383 WARN_ON(ls->ls_create_count <= 0);
384 if (ls->ls_namelen != namelen)
386 if (memcmp(ls->ls_name, name, namelen))
392 ls->ls_create_count++;
393 *lockspace = ls;
404 ls = kzalloc(sizeof(*ls), GFP_NOFS);
405 if (!ls)
407 memcpy(ls->ls_name, name, namelen);
408 ls->ls_namelen = namelen;
409 ls->ls_lvblen = lvblen;
410 atomic_set(&ls->ls_count, 0);
411 init_waitqueue_head(&ls->ls_count_wait);
412 ls->ls_flags = 0;
413 ls->ls_scan_time = jiffies;
416 ls->ls_ops = ops;
417 ls->ls_ops_arg = ops_arg;
423 ls->ls_exflags = (flags & ~(DLM_LSFL_FS | DLM_LSFL_NEWEXCL));
425 INIT_LIST_HEAD(&ls->ls_toss);
426 INIT_LIST_HEAD(&ls->ls_keep);
427 rwlock_init(&ls->ls_rsbtbl_lock);
429 error = rhashtable_init(&ls->ls_rsbtbl, &dlm_rhash_rsb_params);
433 idr_init(&ls->ls_lkbidr);
434 rwlock_init(&ls->ls_lkbidr_lock);
436 INIT_LIST_HEAD(&ls->ls_waiters);
437 spin_lock_init(&ls->ls_waiters_lock);
438 INIT_LIST_HEAD(&ls->ls_orphans);
439 spin_lock_init(&ls->ls_orphans_lock);
441 INIT_LIST_HEAD(&ls->ls_new_rsb);
442 spin_lock_init(&ls->ls_new_rsb_spin);
444 INIT_LIST_HEAD(&ls->ls_nodes);
445 INIT_LIST_HEAD(&ls->ls_nodes_gone);
446 ls->ls_num_nodes = 0;
447 ls->ls_low_nodeid = 0;
448 ls->ls_total_weight = 0;
449 ls->ls_node_array = NULL;
451 memset(&ls->ls_local_rsb, 0, sizeof(struct dlm_rsb));
452 ls->ls_local_rsb.res_ls = ls;
454 ls->ls_debug_rsb_dentry = NULL;
455 ls->ls_debug_waiters_dentry = NULL;
457 init_waitqueue_head(&ls->ls_uevent_wait);
458 ls->ls_uevent_result = 0;
459 init_completion(&ls->ls_recovery_done);
460 ls->ls_recovery_result = -1;
462 spin_lock_init(&ls->ls_cb_lock);
463 INIT_LIST_HEAD(&ls->ls_cb_delay);
465 ls->ls_recoverd_task = NULL;
466 mutex_init(&ls->ls_recoverd_active);
467 spin_lock_init(&ls->ls_recover_lock);
468 spin_lock_init(&ls->ls_rcom_spin);
469 get_random_bytes(&ls->ls_rcom_seq, sizeof(uint64_t));
470 ls->ls_recover_status = 0;
471 ls->ls_recover_seq = get_random_u64();
472 ls->ls_recover_args = NULL;
473 init_rwsem(&ls->ls_in_recovery);
474 rwlock_init(&ls->ls_recv_active);
475 INIT_LIST_HEAD(&ls->ls_requestqueue);
476 rwlock_init(&ls->ls_requestqueue_lock);
477 spin_lock_init(&ls->ls_clear_proc_locks);
484 ls->ls_recover_buf = kmalloc(DLM_MAX_SOCKET_BUFSIZE, GFP_NOFS);
485 if (!ls->ls_recover_buf) {
490 ls->ls_slot = 0;
491 ls->ls_num_slots = 0;
492 ls->ls_slots_size = 0;
493 ls->ls_slots = NULL;
495 INIT_LIST_HEAD(&ls->ls_recover_list);
496 spin_lock_init(&ls->ls_recover_list_lock);
497 idr_init(&ls->ls_recover_idr);
498 spin_lock_init(&ls->ls_recover_idr_lock);
499 ls->ls_recover_list_count = 0;
500 ls->ls_local_handle = ls;
501 init_waitqueue_head(&ls->ls_wait_general);
502 INIT_LIST_HEAD(&ls->ls_masters_list);
503 rwlock_init(&ls->ls_masters_lock);
504 INIT_LIST_HEAD(&ls->ls_dir_dump_list);
505 rwlock_init(&ls->ls_dir_dump_lock);
507 INIT_LIST_HEAD(&ls->ls_toss_q);
508 spin_lock_init(&ls->ls_toss_q_lock);
509 timer_setup(&ls->ls_timer, dlm_rsb_toss_timer,
513 ls->ls_create_count = 1;
514 list_add(&ls->ls_list, &lslist);
518 error = dlm_callback_start(ls);
520 log_error(ls, "can't start dlm_callback %d", error);
525 init_waitqueue_head(&ls->ls_recover_lock_wait);
528 * Once started, dlm_recoverd first looks for ls in lslist, then
534 error = dlm_recoverd_start(ls);
536 log_error(ls, "can't start dlm_recoverd %d", error);
540 wait_event(ls->ls_recover_lock_wait,
541 test_bit(LSFL_RECOVER_LOCK, &ls->ls_flags));
543 /* let kobject handle freeing of ls if there's an error */
546 ls->ls_kobj.kset = dlm_kset;
547 error = kobject_init_and_add(&ls->ls_kobj, &dlm_ktype, NULL,
548 "%s", ls->ls_name);
551 kobject_uevent(&ls->ls_kobj, KOBJ_ADD);
559 error = do_uevent(ls, 1);
564 wait_for_completion(&ls->ls_recovery_done);
565 error = ls->ls_recovery_result;
569 dlm_create_debug_file(ls);
571 log_rinfo(ls, "join complete");
572 *lockspace = ls;
576 do_uevent(ls, 0);
577 dlm_clear_members(ls);
578 kfree(ls->ls_node_array);
580 dlm_recoverd_stop(ls);
582 dlm_callback_stop(ls);
585 list_del(&ls->ls_list);
587 idr_destroy(&ls->ls_recover_idr);
588 kfree(ls->ls_recover_buf);
590 idr_destroy(&ls->ls_lkbidr);
591 rhashtable_destroy(&ls->ls_rsbtbl);
594 kobject_put(&ls->ls_kobj);
596 kfree(ls);
677 static int lockspace_busy(struct dlm_ls *ls, int force)
681 read_lock_bh(&ls->ls_lkbidr_lock);
683 rv = idr_for_each(&ls->ls_lkbidr, lkb_idr_is_any, ls);
685 rv = idr_for_each(&ls->ls_lkbidr, lkb_idr_is_local, ls);
689 read_unlock_bh(&ls->ls_lkbidr_lock);
700 static int release_lockspace(struct dlm_ls *ls, int force)
705 busy = lockspace_busy(ls, force);
708 if (ls->ls_create_count == 1) {
712 /* remove_lockspace takes ls off lslist */
713 ls->ls_create_count = 0;
716 } else if (ls->ls_create_count > 1) {
717 rv = --ls->ls_create_count;
724 log_debug(ls, "release_lockspace no remove %d", rv);
731 dlm_device_deregister(ls);
734 do_uevent(ls, 0);
736 dlm_recoverd_stop(ls);
741 clear_bit(LSFL_RUNNING, &ls->ls_flags);
742 timer_shutdown_sync(&ls->ls_timer);
745 dlm_clear_members(ls);
749 dlm_callback_stop(ls);
751 remove_lockspace(ls);
753 dlm_delete_debug_file(ls);
755 idr_destroy(&ls->ls_recover_idr);
756 kfree(ls->ls_recover_buf);
762 idr_for_each(&ls->ls_lkbidr, lkb_idr_free, ls);
763 idr_destroy(&ls->ls_lkbidr);
768 rhashtable_free_and_destroy(&ls->ls_rsbtbl, rhash_free_rsb, NULL);
770 while (!list_empty(&ls->ls_new_rsb)) {
771 rsb = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb,
781 dlm_purge_requestqueue(ls);
782 kfree(ls->ls_recover_args);
783 dlm_clear_members(ls);
784 dlm_clear_members_gone(ls);
785 kfree(ls->ls_node_array);
786 log_rinfo(ls, "release_lockspace final free");
787 kobject_put(&ls->ls_kobj);
788 /* The ls structure will be freed when the kobject is done with */
810 struct dlm_ls *ls;
813 ls = dlm_find_lockspace_local(lockspace);
814 if (!ls)
816 dlm_put_lockspace(ls);
819 error = release_lockspace(ls, force);
831 struct dlm_ls *ls;
837 list_for_each_entry(ls, &lslist, ls_list) {
838 if (!test_bit(LSFL_RUNNING, &ls->ls_flags)) {
843 log_error(ls, "no userland control daemon, stopping lockspace");
844 dlm_ls_stop(ls);