Lines Matching refs:wb
116 mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0); in bdi_wakeup_thread()
132 mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0); in bdi_queue_work()
207 spin_lock(&bdi->wb.list_lock); in inode_wb_list_del()
209 spin_unlock(&bdi->wb.list_lock); in inode_wb_list_del()
221 static void redirty_tail(struct inode *inode, struct bdi_writeback *wb) in redirty_tail() argument
223 assert_spin_locked(&wb->list_lock); in redirty_tail()
224 if (!list_empty(&wb->b_dirty)) { in redirty_tail()
227 tail = wb_inode(wb->b_dirty.next); in redirty_tail()
231 list_move(&inode->i_wb_list, &wb->b_dirty); in redirty_tail()
237 static void requeue_io(struct inode *inode, struct bdi_writeback *wb) in requeue_io() argument
239 assert_spin_locked(&wb->list_lock); in requeue_io()
240 list_move(&inode->i_wb_list, &wb->b_more_io); in requeue_io()
340 static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work) in queue_io() argument
344 assert_spin_locked(&wb->list_lock); in queue_io()
345 list_splice_init(&wb->b_more_io, &wb->b_io); in queue_io()
346 moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, 0, work); in queue_io()
347 moved += move_expired_inodes(&wb->b_dirty_time, &wb->b_io, in queue_io()
349 trace_writeback_queue_io(wb, work, moved); in queue_io()
423 static void requeue_inode(struct inode *inode, struct bdi_writeback *wb, in requeue_inode() argument
443 redirty_tail(inode, wb); in requeue_inode()
454 requeue_io(inode, wb); in requeue_inode()
463 redirty_tail(inode, wb); in requeue_inode()
471 redirty_tail(inode, wb); in requeue_inode()
474 list_move(&inode->i_wb_list, &wb->b_dirty_time); in requeue_inode()
573 writeback_single_inode(struct inode *inode, struct bdi_writeback *wb, in writeback_single_inode() argument
612 spin_lock(&wb->list_lock); in writeback_single_inode()
620 spin_unlock(&wb->list_lock); in writeback_single_inode()
664 struct bdi_writeback *wb, in writeback_sb_inodes() argument
681 while (!list_empty(&wb->b_io)) { in writeback_sb_inodes()
682 struct inode *inode = wb_inode(wb->b_io.prev); in writeback_sb_inodes()
691 redirty_tail(inode, wb); in writeback_sb_inodes()
711 redirty_tail(inode, wb); in writeback_sb_inodes()
725 requeue_io(inode, wb); in writeback_sb_inodes()
729 spin_unlock(&wb->list_lock); in writeback_sb_inodes()
740 spin_lock(&wb->list_lock); in writeback_sb_inodes()
746 write_chunk = writeback_chunk_size(wb->bdi, work); in writeback_sb_inodes()
758 spin_lock(&wb->list_lock); in writeback_sb_inodes()
762 requeue_inode(inode, wb, &wbc); in writeback_sb_inodes()
765 cond_resched_lock(&wb->list_lock); in writeback_sb_inodes()
780 static long __writeback_inodes_wb(struct bdi_writeback *wb, in __writeback_inodes_wb() argument
786 while (!list_empty(&wb->b_io)) { in __writeback_inodes_wb()
787 struct inode *inode = wb_inode(wb->b_io.prev); in __writeback_inodes_wb()
796 redirty_tail(inode, wb); in __writeback_inodes_wb()
799 wrote += writeback_sb_inodes(sb, wb, work); in __writeback_inodes_wb()
814 static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages, in writeback_inodes_wb() argument
824 spin_lock(&wb->list_lock); in writeback_inodes_wb()
825 if (list_empty(&wb->b_io)) in writeback_inodes_wb()
826 queue_io(wb, &work); in writeback_inodes_wb()
827 __writeback_inodes_wb(wb, &work); in writeback_inodes_wb()
828 spin_unlock(&wb->list_lock); in writeback_inodes_wb()
854 static void wb_update_bandwidth(struct bdi_writeback *wb, in wb_update_bandwidth() argument
857 __bdi_update_bandwidth(wb->bdi, 0, 0, 0, 0, 0, start_time); in wb_update_bandwidth()
875 static long wb_writeback(struct bdi_writeback *wb, in wb_writeback() argument
887 spin_lock(&wb->list_lock); in wb_writeback()
902 !list_empty(&wb->bdi->work_list)) in wb_writeback()
909 if (work->for_background && !over_bground_thresh(wb->bdi)) in wb_writeback()
924 trace_writeback_start(wb->bdi, work); in wb_writeback()
925 if (list_empty(&wb->b_io)) in wb_writeback()
926 queue_io(wb, work); in wb_writeback()
928 progress = writeback_sb_inodes(work->sb, wb, work); in wb_writeback()
930 progress = __writeback_inodes_wb(wb, work); in wb_writeback()
931 trace_writeback_written(wb->bdi, work); in wb_writeback()
933 wb_update_bandwidth(wb, wb_start); in wb_writeback()
948 if (list_empty(&wb->b_more_io)) in wb_writeback()
955 if (!list_empty(&wb->b_more_io)) { in wb_writeback()
956 trace_writeback_wait(wb->bdi, work); in wb_writeback()
957 inode = wb_inode(wb->b_more_io.prev); in wb_writeback()
959 spin_unlock(&wb->list_lock); in wb_writeback()
962 spin_lock(&wb->list_lock); in wb_writeback()
965 spin_unlock(&wb->list_lock); in wb_writeback()
999 static long wb_check_background_flush(struct bdi_writeback *wb) in wb_check_background_flush() argument
1001 if (over_bground_thresh(wb->bdi)) { in wb_check_background_flush()
1011 return wb_writeback(wb, &work); in wb_check_background_flush()
1017 static long wb_check_old_data_flush(struct bdi_writeback *wb) in wb_check_old_data_flush() argument
1028 expired = wb->last_old_flush + in wb_check_old_data_flush()
1033 wb->last_old_flush = jiffies; in wb_check_old_data_flush()
1045 return wb_writeback(wb, &work); in wb_check_old_data_flush()
1054 static long wb_do_writeback(struct bdi_writeback *wb) in wb_do_writeback() argument
1056 struct backing_dev_info *bdi = wb->bdi; in wb_do_writeback()
1060 set_bit(BDI_writeback_running, &wb->bdi->state); in wb_do_writeback()
1065 wrote += wb_writeback(wb, work); in wb_do_writeback()
1080 wrote += wb_check_old_data_flush(wb); in wb_do_writeback()
1081 wrote += wb_check_background_flush(wb); in wb_do_writeback()
1082 clear_bit(BDI_writeback_running, &wb->bdi->state); in wb_do_writeback()
1093 struct bdi_writeback *wb = container_of(to_delayed_work(work), in bdi_writeback_workfn() local
1095 struct backing_dev_info *bdi = wb->bdi; in bdi_writeback_workfn()
1110 pages_written = wb_do_writeback(wb); in bdi_writeback_workfn()
1119 pages_written = writeback_inodes_wb(&bdi->wb, 1024, in bdi_writeback_workfn()
1125 mod_delayed_work(bdi_wq, &wb->dwork, 0); in bdi_writeback_workfn()
1126 else if (wb_has_dirty_io(wb) && dirty_writeback_interval) in bdi_writeback_workfn()
1176 if (list_empty(&bdi->wb.b_dirty_time)) in wakeup_dirtytime_writeback()
1324 spin_lock(&bdi->wb.list_lock); in __mark_inode_dirty()
1335 if (!wb_has_dirty_io(&bdi->wb)) in __mark_inode_dirty()
1343 list_move(&inode->i_wb_list, &bdi->wb.b_dirty); in __mark_inode_dirty()
1346 &bdi->wb.b_dirty_time); in __mark_inode_dirty()
1347 spin_unlock(&bdi->wb.list_lock); in __mark_inode_dirty()
1544 struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; in write_inode_now() local
1556 return writeback_single_inode(inode, wb, &wbc); in write_inode_now()
1573 return writeback_single_inode(inode, &inode_to_bdi(inode)->wb, wbc); in sync_inode()