• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/fs/btrfs/

Lines Matching refs:worker

43 	/* list of worker threads from struct btrfs_workers */
70 * and so it may not make progress until after our btrfs worker threads
73 * This means we can't use btrfs_start_workers from inside a btrfs worker
75 * involves all of the worker threads.
80 * another worker.
116 static void check_idle_worker(struct btrfs_worker_thread *worker)
118 if (!worker->idle && atomic_read(&worker->num_pending) <
119 worker->workers->idle_thresh / 2) {
121 spin_lock_irqsave(&worker->workers->lock, flags);
122 worker->idle = 1;
124 /* the list may be empty if the worker is just starting */
125 if (!list_empty(&worker->worker_list)) {
126 list_move(&worker->worker_list,
127 &worker->workers->idle_list);
129 spin_unlock_irqrestore(&worker->workers->lock, flags);
137 static void check_busy_worker(struct btrfs_worker_thread *worker)
139 if (worker->idle && atomic_read(&worker->num_pending) >=
140 worker->workers->idle_thresh) {
142 spin_lock_irqsave(&worker->workers->lock, flags);
143 worker->idle = 0;
145 if (!list_empty(&worker->worker_list)) {
146 list_move_tail(&worker->worker_list,
147 &worker->workers->worker_list);
149 spin_unlock_irqrestore(&worker->workers->lock, flags);
153 static void check_pending_worker_creates(struct btrfs_worker_thread *worker)
155 struct btrfs_workers *workers = worker->workers;
225 static void put_worker(struct btrfs_worker_thread *worker)
227 if (atomic_dec_and_test(&worker->refs))
228 kfree(worker);
231 static int try_worker_shutdown(struct btrfs_worker_thread *worker)
235 spin_lock_irq(&worker->lock);
236 spin_lock(&worker->workers->lock);
237 if (worker->workers->num_workers > 1 &&
238 worker->idle &&
239 !worker->working &&
240 !list_empty(&worker->worker_list) &&
241 list_empty(&worker->prio_pending) &&
242 list_empty(&worker->pending) &&
243 atomic_read(&worker->num_pending) == 0) {
245 list_del_init(&worker->worker_list);
246 worker->workers->num_workers--;
248 spin_unlock(&worker->workers->lock);
249 spin_unlock_irq(&worker->lock);
252 put_worker(worker);
256 static struct btrfs_work *get_next_work(struct btrfs_worker_thread *worker,
267 if (!list_empty(&worker->prio_pending))
277 spin_lock_irq(&worker->lock);
278 list_splice_tail_init(&worker->prio_pending, prio_head);
279 list_splice_tail_init(&worker->pending, head);
285 spin_unlock_irq(&worker->lock);
302 struct btrfs_worker_thread *worker = arg;
315 work = get_next_work(worker, &prio_head, &head);
322 work->worker = worker;
326 atomic_dec(&worker->num_pending);
331 run_ordered_completions(worker->workers, work);
333 check_pending_worker_creates(worker);
337 spin_lock_irq(&worker->lock);
338 check_idle_worker(worker);
341 worker->working = 0;
342 spin_unlock_irq(&worker->lock);
345 spin_unlock_irq(&worker->lock);
353 if (!list_empty(&worker->pending) ||
354 !list_empty(&worker->prio_pending))
362 * worker->working is still 1, so nobody
367 if (!list_empty(&worker->pending) ||
368 !list_empty(&worker->prio_pending))
375 spin_lock_irq(&worker->lock);
377 if (!list_empty(&worker->pending) ||
378 !list_empty(&worker->prio_pending)) {
379 spin_unlock_irq(&worker->lock);
388 worker->working = 0;
389 spin_unlock_irq(&worker->lock);
393 if (!worker->working &&
394 try_worker_shutdown(worker)) {
406 * this will wait for all the worker threads to shutdown
411 struct btrfs_worker_thread *worker;
418 worker = list_entry(cur, struct btrfs_worker_thread,
421 atomic_inc(&worker->refs);
423 if (!list_empty(&worker->worker_list)) {
424 list_del_init(&worker->worker_list);
425 put_worker(worker);
431 kthread_stop(worker->task);
433 put_worker(worker);
462 * starts new worker threads. This does not enforce the max worker
468 struct btrfs_worker_thread *worker;
473 worker = kzalloc(sizeof(*worker), GFP_NOFS);
474 if (!worker) {
479 INIT_LIST_HEAD(&worker->pending);
480 INIT_LIST_HEAD(&worker->prio_pending);
481 INIT_LIST_HEAD(&worker->worker_list);
482 spin_lock_init(&worker->lock);
484 atomic_set(&worker->num_pending, 0);
485 atomic_set(&worker->refs, 1);
486 worker->workers = workers;
487 worker->task = kthread_run(worker_loop, worker,
490 if (IS_ERR(worker->task)) {
491 ret = PTR_ERR(worker->task);
492 kfree(worker);
496 list_add_tail(&worker->worker_list, &workers->idle_list);
497 worker->idle = 1;
518 * run through the list and find a worker thread that doesn't have a lot
524 struct btrfs_worker_thread *worker;
539 worker = list_entry(next, struct btrfs_worker_thread,
541 return worker;
550 * requests submitted at roughly the same time onto the same worker.
553 worker = list_entry(next, struct btrfs_worker_thread, worker_list);
554 worker->sequence++;
556 if (worker->sequence % workers->idle_thresh == 0)
558 return worker;
562 * selects a worker thread to take the next job. This will either find
563 * an idle worker, start a new worker up to the max count, or just return
568 struct btrfs_worker_thread *worker;
574 worker = next_worker(workers);
576 if (!worker) {
586 /* we're below the limit, start another worker */
604 worker = list_entry(fallback,
608 * this makes sure the worker doesn't exit before it is placed
611 atomic_inc(&worker->num_pending);
613 return worker;
623 struct btrfs_worker_thread *worker = work->worker;
630 spin_lock_irqsave(&worker->lock, flags);
632 list_add_tail(&work->list, &worker->prio_pending);
634 list_add_tail(&work->list, &worker->pending);
635 atomic_inc(&worker->num_pending);
640 if (worker->idle) {
641 spin_lock(&worker->workers->lock);
642 worker->idle = 0;
643 list_move_tail(&worker->worker_list,
644 &worker->workers->worker_list);
645 spin_unlock(&worker->workers->lock);
647 if (!worker->working) {
649 worker->working = 1;
653 wake_up_process(worker->task);
654 spin_unlock_irqrestore(&worker->lock, flags);
670 struct btrfs_worker_thread *worker;
678 worker = find_worker(workers);
696 spin_lock_irqsave(&worker->lock, flags);
699 list_add_tail(&work->list, &worker->prio_pending);
701 list_add_tail(&work->list, &worker->pending);
702 check_busy_worker(worker);
708 if (!worker->working)
710 worker->working = 1;
713 wake_up_process(worker->task);
714 spin_unlock_irqrestore(&worker->lock, flags);