Lines Matching defs:wb

85 static bool wb_io_lists_populated(struct bdi_writeback *wb)
87 if (wb_has_dirty_io(wb)) {
90 set_bit(WB_has_dirty_io, &wb->state);
91 WARN_ON_ONCE(!wb->avg_write_bandwidth);
92 atomic_long_add(wb->avg_write_bandwidth,
93 &wb->bdi->tot_write_bandwidth);
98 static void wb_io_lists_depopulated(struct bdi_writeback *wb)
100 if (wb_has_dirty_io(wb) && list_empty(&wb->b_dirty) &&
101 list_empty(&wb->b_io) && list_empty(&wb->b_more_io)) {
102 clear_bit(WB_has_dirty_io, &wb->state);
103 WARN_ON_ONCE(atomic_long_sub_return(wb->avg_write_bandwidth,
104 &wb->bdi->tot_write_bandwidth) < 0);
111 * @wb: target bdi_writeback
112 * @head: one of @wb->b_{dirty|io|more_io|dirty_time}
114 * Move @inode->i_io_list to @list of @wb and set %WB_has_dirty_io.
119 struct bdi_writeback *wb,
122 assert_spin_locked(&wb->list_lock);
129 if (head != &wb->b_dirty_time)
130 return wb_io_lists_populated(wb);
132 wb_io_lists_depopulated(wb);
136 static void wb_wakeup(struct bdi_writeback *wb)
138 spin_lock_irq(&wb->work_lock);
139 if (test_bit(WB_registered, &wb->state))
140 mod_delayed_work(bdi_wq, &wb->dwork, 0);
141 spin_unlock_irq(&wb->work_lock);
145 * This function is used when the first inode for this wb is marked dirty. It
158 static void wb_wakeup_delayed(struct bdi_writeback *wb)
163 spin_lock_irq(&wb->work_lock);
164 if (test_bit(WB_registered, &wb->state))
165 queue_delayed_work(bdi_wq, &wb->dwork, timeout);
166 spin_unlock_irq(&wb->work_lock);
184 static void wb_queue_work(struct bdi_writeback *wb,
187 trace_writeback_queue(wb, work);
192 spin_lock_irq(&wb->work_lock);
194 if (test_bit(WB_registered, &wb->state)) {
195 list_add_tail(&work->list, &wb->work_list);
196 mod_delayed_work(bdi_wq, &wb->dwork, 0);
200 spin_unlock_irq(&wb->work_lock);
267 struct bdi_writeback *wb = NULL;
274 wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
278 wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
283 if (!wb)
284 wb = &bdi->wb;
290 if (unlikely(cmpxchg(&inode->i_wb, NULL, wb)))
291 wb_put(wb);
296 * inode_cgwb_move_to_attached - put the inode onto wb->b_attached list
298 * @wb: target bdi_writeback
300 * Remove the inode from wb's io lists and if necessarily put onto b_attached
304 struct bdi_writeback *wb)
306 assert_spin_locked(&wb->list_lock);
311 if (wb != &wb->bdi->wb)
312 list_move(&inode->i_io_list, &wb->b_attached);
315 wb_io_lists_depopulated(wb);
319 * locked_inode_to_wb_and_lock_list - determine a locked inode's wb and lock it
322 * Returns @inode's wb with its list_lock held. @inode->i_lock must be
323 * held on entry and is released on return. The returned wb is guaranteed
324 * to stay @inode's associated wb until its list_lock is released.
329 __acquires(&wb->list_lock)
332 struct bdi_writeback *wb = inode_to_wb(inode);
336 * @inode->i_lock and @wb->list_lock but list_lock nests
340 wb_get(wb);
342 spin_lock(&wb->list_lock);
345 if (likely(wb == inode->i_wb)) {
346 wb_put(wb); /* @inode already has ref */
347 return wb;
350 spin_unlock(&wb->list_lock);
351 wb_put(wb);
358 * inode_to_wb_and_lock_list - determine an inode's wb and lock it
365 __acquires(&wb->list_lock)
480 * ensures that the new wb is visible if they see !I_WB_SWITCH.
508 * since I_WB_SWITCH assertion and all wb stat update transactions
513 * gives us exclusion against all wb related operations on @inode
577 * inode_switch_wbs - change the wb association of an inode
579 * @new_wb_id: ID of the new wb
581 * Switch @inode's wb association to the wb identified by @new_wb_id. The
604 /* find and pin the new wb */
660 * @wb: target wb
662 * Switch all inodes attached to @wb to a nearest living ancestor's wb in order
663 * to eventually release the dying @wb. Returns %true if not all inodes were
666 bool cleanup_offline_cgwb(struct bdi_writeback *wb)
680 for (memcg_css = wb->memcg_css->parent; memcg_css;
682 isw->new_wb = wb_get_create(wb->bdi, memcg_css, GFP_KERNEL);
687 isw->new_wb = &wb->bdi->wb; /* wb_get() is noop for bdi's wb */
690 spin_lock(&wb->list_lock);
699 restart = isw_prepare_wbs_switch(isw, &wb->b_attached, &nr);
701 restart = isw_prepare_wbs_switch(isw, &wb->b_dirty_time, &nr);
702 spin_unlock(&wb->list_lock);
742 wbc->wb = inode_to_wb(inode);
745 wbc->wb_id = wbc->wb->memcg_css->id;
752 wb_get(wbc->wb);
756 * A dying wb indicates that either the blkcg associated with the
758 * case, a replacement wb should already be available and we should
759 * refresh the wb immediately. In the second case, trying to
762 if (unlikely(wb_dying(wbc->wb) && !css_is_dying(wbc->wb->memcg_css)))
793 * current wb and the last round's winner wb (max of last round's current
794 * wb, the winner from two rounds ago, and the last round's majority
806 struct bdi_writeback *wb = wbc->wb;
812 if (!wb)
839 wb->avg_write_bandwidth);
850 * The switch verdict is reached if foreign wb's consume
867 * Switch if the current wb isn't the consistent winner.
871 * the wrong wb for an extended period of time.
885 wb_put(wbc->wb);
886 wbc->wb = NULL;
913 if (!wbc->wb || wbc->no_cgroup_owner)
944 * @wb: target bdi_writeback to split @nr_pages to
947 * Split @wb's portion of @nr_pages according to @wb's write bandwidth in
948 * relation to the total write bandwidth of all wb's w/ dirty inodes on
949 * @wb->bdi.
951 static long wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages)
953 unsigned long this_bw = wb->avg_write_bandwidth;
954 unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth);
960 * This may be called on clean wb's and proportional distribution
971 * bdi_split_work_to_wbs - split a wb_writeback_work to all wb's of a bdi
974 * @skip_if_busy: skip wb's which already have writeback in progress
976 * Split and issue @base_work to all wb's (bdi_writeback's) of @bdi which
978 * distributed to the busy wbs according to each wb's proportion in the
986 struct bdi_writeback *wb = list_entry(&bdi->wb_list,
992 list_for_each_entry_continue_rcu(wb, &bdi->wb_list, bdi_node) {
1004 if (!wb_has_dirty_io(wb) &&
1006 list_empty(&wb->b_dirty_time)))
1008 if (skip_if_busy && writeback_in_progress(wb))
1011 nr_pages = wb_split_bdi_pages(wb, base_work->nr_pages);
1018 wb_queue_work(wb, work);
1023 * If wb_tryget fails, the wb has been shutdown, skip it.
1025 * Pin @wb so that it stays on @bdi->wb_list. This allows
1026 * continuing iteration from @wb after dropping and
1029 if (!wb_tryget(wb))
1039 wb_queue_work(wb, work);
1040 last_wb = wb;
1067 struct bdi_writeback *wb;
1088 * And find the associated wb. If the wb isn't there already
1091 wb = wb_get_lookup(bdi, memcg_css);
1092 if (!wb) {
1119 wb_queue_work(wb, work);
1125 wb_put(wb);
1134 * cgroup_writeback_umount - flush inode wb switches for umount
1137 * flushes in-flight inode wb switches. An inode wb switch goes through
1139 * that all previously scheduled switches are finished. As wb switches are
1141 * flushing iff wb switches are in flight.
1154 * ensure that all in-flight wb switches are in the workqueue.
1176 struct bdi_writeback *wb)
1178 assert_spin_locked(&wb->list_lock);
1184 wb_io_lists_depopulated(wb);
1190 __acquires(&wb->list_lock)
1192 struct bdi_writeback *wb = inode_to_wb(inode);
1195 spin_lock(&wb->list_lock);
1196 return wb;
1200 __acquires(&wb->list_lock)
1202 struct bdi_writeback *wb = inode_to_wb(inode);
1204 spin_lock(&wb->list_lock);
1205 return wb;
1208 static long wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages)
1219 if (!skip_if_busy || !writeback_in_progress(&bdi->wb)) {
1221 wb_queue_work(&bdi->wb, base_work);
1237 static void wb_start_writeback(struct bdi_writeback *wb, enum wb_reason reason)
1239 if (!wb_has_dirty_io(wb))
1250 if (test_bit(WB_start_all, &wb->state) ||
1251 test_and_set_bit(WB_start_all, &wb->state))
1254 wb->start_all_reason = reason;
1255 wb_wakeup(wb);
1260 * @wb: bdi_writback to write from
1264 * this function returns, it is only guaranteed that for given wb
1268 void wb_start_background_writeback(struct bdi_writeback *wb)
1274 trace_writeback_wake_background(wb);
1275 wb_wakeup(wb);
1283 struct bdi_writeback *wb;
1285 wb = inode_to_wb_and_lock_list(inode);
1290 wb_io_lists_depopulated(wb);
1293 spin_unlock(&wb->list_lock);
1342 static void redirty_tail_locked(struct inode *inode, struct bdi_writeback *wb)
1354 wb_io_lists_depopulated(wb);
1357 if (!list_empty(&wb->b_dirty)) {
1360 tail = wb_inode(wb->b_dirty.next);
1364 inode_io_list_move_locked(inode, wb, &wb->b_dirty);
1367 static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
1370 redirty_tail_locked(inode, wb);
1377 static void requeue_io(struct inode *inode, struct bdi_writeback *wb)
1379 inode_io_list_move_locked(inode, wb, &wb->b_more_io);
1473 static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work,
1479 assert_spin_locked(&wb->list_lock);
1480 list_splice_init(&wb->b_more_io, &wb->b_io);
1481 moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, dirtied_before);
1484 moved += move_expired_inodes(&wb->b_dirty_time, &wb->b_io,
1487 wb_io_lists_populated(wb);
1488 trace_writeback_queue_io(wb, work, dirtied_before, moved);
1562 static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
1586 redirty_tail_locked(inode, wb);
1588 inode_cgwb_move_to_attached(inode, wb);
1600 requeue_io(inode, wb);
1609 redirty_tail_locked(inode, wb);
1617 redirty_tail_locked(inode, wb);
1620 inode_io_list_move_locked(inode, wb, &wb->b_dirty_time);
1624 inode_cgwb_move_to_attached(inode, wb);
1737 struct bdi_writeback *wb;
1776 wb = inode_to_wb_and_lock_list(inode);
1789 inode_cgwb_move_to_attached(inode, wb);
1792 redirty_tail_locked(inode, wb);
1796 wb,
1797 &wb->b_dirty_time);
1802 spin_unlock(&wb->list_lock);
1809 static long writeback_chunk_size(struct bdi_writeback *wb,
1830 pages = min(wb->avg_write_bandwidth / 2,
1845 * NOTE! This is called with wb->list_lock held, and will
1850 struct bdi_writeback *wb,
1872 while (!list_empty(&wb->b_io)) {
1873 struct inode *inode = wb_inode(wb->b_io.prev);
1884 redirty_tail(inode, wb);
1903 redirty_tail_locked(inode, wb);
1917 requeue_io(inode, wb);
1922 spin_unlock(&wb->list_lock);
1933 spin_lock(&wb->list_lock);
1939 write_chunk = writeback_chunk_size(wb, work);
1970 * have been switched to another wb in the meantime.
1980 if (unlikely(tmp_wb != wb)) {
1982 spin_lock(&wb->list_lock);
1999 static long __writeback_inodes_wb(struct bdi_writeback *wb,
2005 while (!list_empty(&wb->b_io)) {
2006 struct inode *inode = wb_inode(wb->b_io.prev);
2015 redirty_tail(inode, wb);
2018 wrote += writeback_sb_inodes(sb, wb, work);
2033 static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
2045 spin_lock(&wb->list_lock);
2046 if (list_empty(&wb->b_io))
2047 queue_io(wb, &work, jiffies);
2048 __writeback_inodes_wb(wb, &work);
2049 spin_unlock(&wb->list_lock);
2070 static long wb_writeback(struct bdi_writeback *wb,
2095 !list_empty(&wb->work_list))
2102 if (work->for_background && !wb_over_bg_thresh(wb))
2106 spin_lock(&wb->list_lock);
2108 trace_writeback_start(wb, work);
2109 if (list_empty(&wb->b_io)) {
2123 queue_io(wb, work, dirtied_before);
2127 progress = writeback_sb_inodes(work->sb, wb, work);
2129 progress = __writeback_inodes_wb(wb, work);
2130 trace_writeback_written(wb, work);
2141 spin_unlock(&wb->list_lock);
2148 if (list_empty(&wb->b_more_io)) {
2149 spin_unlock(&wb->list_lock);
2158 trace_writeback_wait(wb, work);
2159 inode = wb_inode(wb->b_more_io.prev);
2161 spin_unlock(&wb->list_lock);
2173 static struct wb_writeback_work *get_next_work_item(struct bdi_writeback *wb)
2177 spin_lock_irq(&wb->work_lock);
2178 if (!list_empty(&wb->work_list)) {
2179 work = list_entry(wb->work_list.next,
2183 spin_unlock_irq(&wb->work_lock);
2187 static long wb_check_background_flush(struct bdi_writeback *wb)
2189 if (wb_over_bg_thresh(wb)) {
2199 return wb_writeback(wb, &work);
2205 static long wb_check_old_data_flush(struct bdi_writeback *wb)
2216 expired = wb->last_old_flush +
2221 wb->last_old_flush = jiffies;
2233 return wb_writeback(wb, &work);
2239 static long wb_check_start_all(struct bdi_writeback *wb)
2243 if (!test_bit(WB_start_all, &wb->state))
2249 .nr_pages = wb_split_bdi_pages(wb, nr_pages),
2252 .reason = wb->start_all_reason,
2255 nr_pages = wb_writeback(wb, &work);
2258 clear_bit(WB_start_all, &wb->state);
2266 static long wb_do_writeback(struct bdi_writeback *wb)
2271 set_bit(WB_writeback_running, &wb->state);
2272 while ((work = get_next_work_item(wb)) != NULL) {
2273 trace_writeback_exec(wb, work);
2274 wrote += wb_writeback(wb, work);
2281 wrote += wb_check_start_all(wb);
2286 wrote += wb_check_old_data_flush(wb);
2287 wrote += wb_check_background_flush(wb);
2288 clear_bit(WB_writeback_running, &wb->state);
2299 struct bdi_writeback *wb = container_of(to_delayed_work(work),
2303 set_worker_desc("flush-%s", bdi_dev_name(wb->bdi));
2306 !test_bit(WB_registered, &wb->state))) {
2308 * The normal path. Keep writing back @wb until its
2310 * if @wb is shutting down even when we're running off the
2314 pages_written = wb_do_writeback(wb);
2316 } while (!list_empty(&wb->work_list));
2323 pages_written = writeback_inodes_wb(wb, 1024,
2328 if (!list_empty(&wb->work_list))
2329 wb_wakeup(wb);
2330 else if (wb_has_dirty_io(wb) && dirty_writeback_interval)
2331 wb_wakeup_delayed(wb);
2340 struct bdi_writeback *wb;
2345 list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node)
2346 wb_start_writeback(wb, reason);
2399 struct bdi_writeback *wb;
2401 list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node)
2402 if (!list_empty(&wb->b_dirty_time))
2403 wb_wakeup(wb);
2458 struct bdi_writeback *wb = NULL;
2520 * Grab inode's wb early because it requires dropping i_lock and we
2526 wb = locked_inode_to_wb_and_lock_list(inode);
2563 dirty_list = &wb->b_dirty;
2565 dirty_list = &wb->b_dirty_time;
2567 wakeup_bdi = inode_io_list_move_locked(inode, wb,
2570 spin_unlock(&wb->list_lock);
2581 (wb->bdi->capabilities & BDI_CAP_WRITEBACK))
2582 wb_wakeup_delayed(wb);
2587 if (wb)
2588 spin_unlock(&wb->list_lock);
2640 * Move each inode back to the wb list before we drop the lock
2649 * do not have the mapping lock. Skip it here, wb completion
2789 /* protect against inode wb switch, see inode_switch_wbs_work_fn() */