Lines Matching defs:delayed_root

42 void btrfs_init_delayed_root(struct btrfs_delayed_root *delayed_root)
44 atomic_set(&delayed_root->items, 0);
45 atomic_set(&delayed_root->items_seq, 0);
46 delayed_root->nodes = 0;
47 spin_lock_init(&delayed_root->lock);
48 init_waitqueue_head(&delayed_root->wait);
49 INIT_LIST_HEAD(&delayed_root->node_list);
50 INIT_LIST_HEAD(&delayed_root->prepare_list);
212 struct btrfs_delayed_root *delayed_root)
217 spin_lock(&delayed_root->lock);
218 if (list_empty(&delayed_root->node_list))
221 p = delayed_root->node_list.next;
225 spin_unlock(&delayed_root->lock);
233 struct btrfs_delayed_root *delayed_root;
237 delayed_root = node->root->fs_info->delayed_root;
238 spin_lock(&delayed_root->lock);
241 if (list_empty(&delayed_root->node_list))
243 p = delayed_root->node_list.next;
244 } else if (list_is_last(&node->n_list, &delayed_root->node_list))
252 spin_unlock(&delayed_root->lock);
261 struct btrfs_delayed_root *delayed_root;
266 delayed_root = delayed_node->root->fs_info->delayed_root;
270 btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
272 btrfs_dequeue_delayed_node(delayed_root, delayed_node);
296 struct btrfs_delayed_root *delayed_root)
301 spin_lock(&delayed_root->lock);
302 if (list_empty(&delayed_root->prepare_list))
305 p = delayed_root->prepare_list.next;
310 spin_unlock(&delayed_root->lock);
411 atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
415 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
417 int seq = atomic_inc_return(&delayed_root->items_seq);
420 if ((atomic_dec_return(&delayed_root->items) <
422 cond_wake_up_nomb(&delayed_root->wait);
429 struct btrfs_delayed_root *delayed_root;
438 delayed_root = delayed_node->root->fs_info->delayed_root;
449 finish_one_item(delayed_root);
984 struct btrfs_delayed_root *delayed_root;
992 delayed_root = delayed_node->root->fs_info->delayed_root;
993 finish_one_item(delayed_root);
1001 struct btrfs_delayed_root *delayed_root;
1006 delayed_root = delayed_node->root->fs_info->delayed_root;
1007 finish_one_item(delayed_root);
1152 struct btrfs_delayed_root *delayed_root;
1169 delayed_root = fs_info->delayed_root;
1171 curr_node = btrfs_first_delayed_node(delayed_root);
1322 struct btrfs_delayed_root *delayed_root;
1330 struct btrfs_delayed_root *delayed_root;
1339 delayed_root = async_work->delayed_root;
1346 if (atomic_read(&delayed_root->items) <
1350 delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1382 wake_up(&delayed_root->wait);
1387 static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1396 async_work->delayed_root = delayed_root;
1406 WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
1409 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
1411 int val = atomic_read(&delayed_root->items_seq);
1416 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1424 struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
1426 if ((atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) ||
1430 if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1434 seq = atomic_read(&delayed_root->items_seq);
1436 ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
1440 wait_event_interruptible(delayed_root->wait,
1441 could_end_wait(delayed_root, seq));
1445 btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
1951 atomic_inc(&root->fs_info->delayed_root->items);
1995 atomic_inc(&fs_info->delayed_root->items);
2093 curr_node = btrfs_first_delayed_node(fs_info->delayed_root);