Lines Matching refs:wq

50 struct btrfs_fs_info * __pure btrfs_workqueue_owner(const struct btrfs_workqueue *wq)
52 return wq->fs_info;
57 return work->wq->fs_info;
60 bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq)
63 * We could compare wq->pending with num_online_cpus()
68 if (wq->thresh == NO_THRESHOLD)
71 return atomic_read(&wq->pending) > wq->thresh * 2;
74 static void btrfs_init_workqueue(struct btrfs_workqueue *wq,
77 wq->fs_info = fs_info;
78 atomic_set(&wq->pending, 0);
79 INIT_LIST_HEAD(&wq->ordered_list);
80 spin_lock_init(&wq->list_lock);
81 spin_lock_init(&wq->thres_lock);
104 * For threshold-able wq, let its concurrency grow on demand.
155 static inline void thresh_queue_hook(struct btrfs_workqueue *wq)
157 if (wq->thresh == NO_THRESHOLD)
159 atomic_inc(&wq->pending);
167 static inline void thresh_exec_hook(struct btrfs_workqueue *wq)
173 if (wq->thresh == NO_THRESHOLD)
176 atomic_dec(&wq->pending);
177 spin_lock(&wq->thres_lock);
179 * Use wq->count to limit the calling frequency of
182 wq->count++;
183 wq->count %= (wq->thresh / 4);
184 if (!wq->count)
186 new_current_active = wq->current_active;
192 pending = atomic_read(&wq->pending);
193 if (pending > wq->thresh)
195 if (pending < wq->thresh / 2)
197 new_current_active = clamp_val(new_current_active, 1, wq->limit_active);
198 if (new_current_active != wq->current_active) {
200 wq->current_active = new_current_active;
203 spin_unlock(&wq->thres_lock);
206 workqueue_set_max_active(wq->normal_wq, wq->current_active);
210 static void run_ordered_work(struct btrfs_workqueue *wq,
213 struct list_head *list = &wq->ordered_list;
215 spinlock_t *lock = &wq->list_lock;
282 trace_btrfs_all_work_done(wq->fs_info, work);
290 trace_btrfs_all_work_done(wq->fs_info, self);
298 struct btrfs_workqueue *wq = work->wq;
313 thresh_exec_hook(wq);
324 run_ordered_work(wq, work);
327 trace_btrfs_all_work_done(wq->fs_info, work);
341 void btrfs_queue_work(struct btrfs_workqueue *wq, struct btrfs_work *work)
345 work->wq = wq;
346 thresh_queue_hook(wq);
348 spin_lock_irqsave(&wq->list_lock, flags);
349 list_add_tail(&work->ordered_list, &wq->ordered_list);
350 spin_unlock_irqrestore(&wq->list_lock, flags);
353 queue_work(wq->normal_wq, &work->normal_work);
356 void btrfs_destroy_workqueue(struct btrfs_workqueue *wq)
358 if (!wq)
360 destroy_workqueue(wq->normal_wq);
361 trace_btrfs_workqueue_destroy(wq);
362 kfree(wq);
365 void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int limit_active)
367 if (wq)
368 wq->limit_active = limit_active;
371 void btrfs_flush_workqueue(struct btrfs_workqueue *wq)
373 flush_workqueue(wq->normal_wq);