Lines Matching defs:bdi

93 				&wb->bdi->tot_write_bandwidth);
104 &wb->bdi->tot_write_bandwidth) < 0);
146 * wakes-up the corresponding bdi thread which should then take care of the
149 * set up a timer which wakes the bdi thread up later.
208 * Wait for one or more work items issued to @bdi with their ->done field
267 struct backing_dev_info *bdi = inode_to_bdi(inode);
275 wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
279 wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
285 wb = &bdi->wb;
312 if (wb != &wb->bdi->wb)
387 static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi)
389 down_write(&bdi->wb_switch_rwsem);
392 static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi)
394 up_write(&bdi->wb_switch_rwsem);
495 struct backing_dev_info *bdi = inode_to_bdi(isw->inodes[0]);
505 down_read(&bdi->wb_switch_rwsem);
534 up_read(&bdi->wb_switch_rwsem);
587 struct backing_dev_info *bdi = inode_to_bdi(inode);
614 isw->new_wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
683 isw->new_wb = wb_get_create(wb->bdi, memcg_css, GFP_KERNEL);
688 isw->new_wb = &wb->bdi->wb; /* wb_get() is noop for bdi's wb */
946 * @nr_pages: number of pages to write for the whole bdi
950 * @wb->bdi.
955 unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth);
972 * bdi_split_work_to_wbs - split a wb_writeback_work to all wb's of a bdi
973 * @bdi: target backing_dev_info
977 * Split and issue @base_work to all wb's (bdi_writeback's) of @bdi which
980 * total active write bandwidth of @bdi.
982 static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
987 struct bdi_writeback *wb = list_entry(&bdi->wb_list,
993 list_for_each_entry_continue_rcu(wb, &bdi->wb_list, bdi_node) {
994 DEFINE_WB_COMPLETION(fallback_work_done, bdi);
1026 * Pin @wb so that it stays on @bdi->wb_list. This allows
1054 * cgroup_writeback_by_id - initiate cgroup writeback from bdi and memcg IDs
1055 * @bdi_id: target bdi id
1066 struct backing_dev_info *bdi;
1073 /* lookup bdi and memcg */
1074 bdi = bdi_get_by_id(bdi_id);
1075 if (!bdi)
1092 wb = wb_get_lookup(bdi, memcg_css);
1130 bdi_put(bdi);
1173 static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi) { }
1174 static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi) { }
1214 static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
1220 if (!skip_if_busy || !writeback_in_progress(&bdi->wb)) {
1222 wb_queue_work(&bdi->wb, base_work);
1376 * requeue inode for re-scanning after bdi->b_io list is exhausted.
1401 * from permanently stopping the whole bdi writeback.
2284 * Handle writeback of dirty data for the device backed by this bdi. Also
2293 set_worker_desc("flush-%s", bdi_dev_name(wb->bdi));
2325 * Start writeback of `nr_pages' pages on this bdi. If `nr_pages' is zero,
2328 static void __wakeup_flusher_threads_bdi(struct backing_dev_info *bdi,
2333 if (!bdi_has_dirty_io(bdi))
2336 list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node)
2340 void wakeup_flusher_threads_bdi(struct backing_dev_info *bdi,
2344 __wakeup_flusher_threads_bdi(bdi, reason);
2353 struct backing_dev_info *bdi;
2361 list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
2362 __wakeup_flusher_threads_bdi(bdi, reason);
2367 * Wake up bdi's periodically to make sure dirtytime inodes gets
2386 struct backing_dev_info *bdi;
2389 list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
2392 list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node)
2566 * If this is the first dirty inode for this bdi,
2567 * we have to wake-up the corresponding bdi thread
2572 (wb->bdi->capabilities & BDI_CAP_WRITEBACK))
2681 struct backing_dev_info *bdi = sb->s_bdi;
2682 DEFINE_WB_COMPLETION(done, bdi);
2692 if (!bdi_has_dirty_io(bdi) || bdi == &noop_backing_dev_info)
2759 struct backing_dev_info *bdi = sb->s_bdi;
2760 DEFINE_WB_COMPLETION(done, bdi);
2776 if (bdi == &noop_backing_dev_info)
2781 bdi_down_write_wb_switch_rwsem(bdi);
2782 bdi_split_work_to_wbs(bdi, &work, false);
2784 bdi_up_write_wb_switch_rwsem(bdi);