root/fs/fs-writeback.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. wb_inode
  2. wb_io_lists_populated
  3. wb_io_lists_depopulated
  4. inode_io_list_move_locked
  5. inode_io_list_del_locked
  6. wb_wakeup
  7. finish_writeback_work
  8. wb_queue_work
  9. wb_wait_for_completion
  10. __inode_attach_wb
  11. locked_inode_to_wb_and_lock_list
  12. inode_to_wb_and_lock_list
  13. bdi_down_write_wb_switch_rwsem
  14. bdi_up_write_wb_switch_rwsem
  15. inode_switch_wbs_work_fn
  16. inode_switch_wbs_rcu_fn
  17. inode_switch_wbs
  18. wbc_attach_and_unlock_inode
  19. wbc_detach_inode
  20. wbc_account_cgroup_owner
  21. inode_congested
  22. wb_split_bdi_pages
  23. bdi_split_work_to_wbs
  24. cgroup_writeback_by_id
  25. cgroup_writeback_umount
  26. cgroup_writeback_init
  27. bdi_down_write_wb_switch_rwsem
  28. bdi_up_write_wb_switch_rwsem
  29. locked_inode_to_wb_and_lock_list
  30. inode_to_wb_and_lock_list
  31. wb_split_bdi_pages
  32. bdi_split_work_to_wbs
  33. get_nr_dirty_pages
  34. wb_start_writeback
  35. wb_start_background_writeback
  36. inode_io_list_del
  37. sb_mark_inode_writeback
  38. sb_clear_inode_writeback
  39. redirty_tail
  40. requeue_io
  41. inode_sync_complete
  42. inode_dirtied_after
  43. move_expired_inodes
  44. queue_io
  45. write_inode
  46. __inode_wait_for_writeback
  47. inode_wait_for_writeback
  48. inode_sleep_on_writeback
  49. requeue_inode
  50. __writeback_single_inode
  51. writeback_single_inode
  52. writeback_chunk_size
  53. writeback_sb_inodes
  54. __writeback_inodes_wb
  55. writeback_inodes_wb
  56. wb_writeback
  57. get_next_work_item
  58. wb_check_background_flush
  59. wb_check_old_data_flush
  60. wb_check_start_all
  61. wb_do_writeback
  62. wb_workfn
  63. __wakeup_flusher_threads_bdi
  64. wakeup_flusher_threads_bdi
  65. wakeup_flusher_threads
  66. wakeup_dirtytime_writeback
  67. start_dirtytime_writeback
  68. dirtytime_interval_handler
  69. block_dump___mark_inode_dirty
  70. __mark_inode_dirty
  71. wait_sb_inodes
  72. __writeback_inodes_sb_nr
  73. writeback_inodes_sb_nr
  74. writeback_inodes_sb
  75. try_to_writeback_inodes_sb
  76. sync_inodes_sb
  77. write_inode_now
  78. sync_inode
  79. sync_inode_metadata

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * fs/fs-writeback.c
   4  *
   5  * Copyright (C) 2002, Linus Torvalds.
   6  *
   7  * Contains all the functions related to writing back and waiting
   8  * upon dirty inodes against superblocks, and writing back dirty
   9  * pages against inodes.  ie: data writeback.  Writeout of the
  10  * inode itself is not handled here.
  11  *
  12  * 10Apr2002    Andrew Morton
  13  *              Split out of fs/inode.c
  14  *              Additions for address_space-based writeback
  15  */
  16 
  17 #include <linux/kernel.h>
  18 #include <linux/export.h>
  19 #include <linux/spinlock.h>
  20 #include <linux/slab.h>
  21 #include <linux/sched.h>
  22 #include <linux/fs.h>
  23 #include <linux/mm.h>
  24 #include <linux/pagemap.h>
  25 #include <linux/kthread.h>
  26 #include <linux/writeback.h>
  27 #include <linux/blkdev.h>
  28 #include <linux/backing-dev.h>
  29 #include <linux/tracepoint.h>
  30 #include <linux/device.h>
  31 #include <linux/memcontrol.h>
  32 #include "internal.h"
  33 
  34 /*
  35  * 4MB minimal write chunk size
  36  */
  37 #define MIN_WRITEBACK_PAGES     (4096UL >> (PAGE_SHIFT - 10))
  38 
  39 /*
  40  * Passed into wb_writeback(), essentially a subset of writeback_control
  41  */
  42 struct wb_writeback_work {
  43         long nr_pages;
  44         struct super_block *sb;
  45         unsigned long *older_than_this;
  46         enum writeback_sync_modes sync_mode;
  47         unsigned int tagged_writepages:1;
  48         unsigned int for_kupdate:1;
  49         unsigned int range_cyclic:1;
  50         unsigned int for_background:1;
  51         unsigned int for_sync:1;        /* sync(2) WB_SYNC_ALL writeback */
  52         unsigned int auto_free:1;       /* free on completion */
  53         enum wb_reason reason;          /* why was writeback initiated? */
  54 
  55         struct list_head list;          /* pending work list */
  56         struct wb_completion *done;     /* set if the caller waits */
  57 };
  58 
  59 /*
  60  * If an inode is constantly having its pages dirtied, but then the
  61  * updates stop dirtytime_expire_interval seconds in the past, it's
  62  * possible for the worst case time between when an inode has its
  63  * timestamps updated and when they finally get written out to be two
  64  * dirtytime_expire_intervals.  We set the default to 12 hours (in
  65  * seconds), which means most of the time inodes will have their
  66  * timestamps written to disk after 12 hours, but in the worst case a
  67  * few inodes might not their timestamps updated for 24 hours.
  68  */
  69 unsigned int dirtytime_expire_interval = 12 * 60 * 60;
  70 
  71 static inline struct inode *wb_inode(struct list_head *head)
  72 {
  73         return list_entry(head, struct inode, i_io_list);
  74 }
  75 
  76 /*
  77  * Include the creation of the trace points after defining the
  78  * wb_writeback_work structure and inline functions so that the definition
  79  * remains local to this file.
  80  */
  81 #define CREATE_TRACE_POINTS
  82 #include <trace/events/writeback.h>
  83 
  84 EXPORT_TRACEPOINT_SYMBOL_GPL(wbc_writepage);
  85 
  86 static bool wb_io_lists_populated(struct bdi_writeback *wb)
  87 {
  88         if (wb_has_dirty_io(wb)) {
  89                 return false;
  90         } else {
  91                 set_bit(WB_has_dirty_io, &wb->state);
  92                 WARN_ON_ONCE(!wb->avg_write_bandwidth);
  93                 atomic_long_add(wb->avg_write_bandwidth,
  94                                 &wb->bdi->tot_write_bandwidth);
  95                 return true;
  96         }
  97 }
  98 
  99 static void wb_io_lists_depopulated(struct bdi_writeback *wb)
 100 {
 101         if (wb_has_dirty_io(wb) && list_empty(&wb->b_dirty) &&
 102             list_empty(&wb->b_io) && list_empty(&wb->b_more_io)) {
 103                 clear_bit(WB_has_dirty_io, &wb->state);
 104                 WARN_ON_ONCE(atomic_long_sub_return(wb->avg_write_bandwidth,
 105                                         &wb->bdi->tot_write_bandwidth) < 0);
 106         }
 107 }
 108 
 109 /**
 110  * inode_io_list_move_locked - move an inode onto a bdi_writeback IO list
 111  * @inode: inode to be moved
 112  * @wb: target bdi_writeback
 113  * @head: one of @wb->b_{dirty|io|more_io|dirty_time}
 114  *
 115  * Move @inode->i_io_list to @list of @wb and set %WB_has_dirty_io.
 116  * Returns %true if @inode is the first occupant of the !dirty_time IO
 117  * lists; otherwise, %false.
 118  */
 119 static bool inode_io_list_move_locked(struct inode *inode,
 120                                       struct bdi_writeback *wb,
 121                                       struct list_head *head)
 122 {
 123         assert_spin_locked(&wb->list_lock);
 124 
 125         list_move(&inode->i_io_list, head);
 126 
 127         /* dirty_time doesn't count as dirty_io until expiration */
 128         if (head != &wb->b_dirty_time)
 129                 return wb_io_lists_populated(wb);
 130 
 131         wb_io_lists_depopulated(wb);
 132         return false;
 133 }
 134 
 135 /**
 136  * inode_io_list_del_locked - remove an inode from its bdi_writeback IO list
 137  * @inode: inode to be removed
 138  * @wb: bdi_writeback @inode is being removed from
 139  *
 140  * Remove @inode which may be on one of @wb->b_{dirty|io|more_io} lists and
 141  * clear %WB_has_dirty_io if all are empty afterwards.
 142  */
 143 static void inode_io_list_del_locked(struct inode *inode,
 144                                      struct bdi_writeback *wb)
 145 {
 146         assert_spin_locked(&wb->list_lock);
 147 
 148         list_del_init(&inode->i_io_list);
 149         wb_io_lists_depopulated(wb);
 150 }
 151 
 152 static void wb_wakeup(struct bdi_writeback *wb)
 153 {
 154         spin_lock_bh(&wb->work_lock);
 155         if (test_bit(WB_registered, &wb->state))
 156                 mod_delayed_work(bdi_wq, &wb->dwork, 0);
 157         spin_unlock_bh(&wb->work_lock);
 158 }
 159 
 160 static void finish_writeback_work(struct bdi_writeback *wb,
 161                                   struct wb_writeback_work *work)
 162 {
 163         struct wb_completion *done = work->done;
 164 
 165         if (work->auto_free)
 166                 kfree(work);
 167         if (done) {
 168                 wait_queue_head_t *waitq = done->waitq;
 169 
 170                 /* @done can't be accessed after the following dec */
 171                 if (atomic_dec_and_test(&done->cnt))
 172                         wake_up_all(waitq);
 173         }
 174 }
 175 
 176 static void wb_queue_work(struct bdi_writeback *wb,
 177                           struct wb_writeback_work *work)
 178 {
 179         trace_writeback_queue(wb, work);
 180 
 181         if (work->done)
 182                 atomic_inc(&work->done->cnt);
 183 
 184         spin_lock_bh(&wb->work_lock);
 185 
 186         if (test_bit(WB_registered, &wb->state)) {
 187                 list_add_tail(&work->list, &wb->work_list);
 188                 mod_delayed_work(bdi_wq, &wb->dwork, 0);
 189         } else
 190                 finish_writeback_work(wb, work);
 191 
 192         spin_unlock_bh(&wb->work_lock);
 193 }
 194 
 195 /**
 196  * wb_wait_for_completion - wait for completion of bdi_writeback_works
 197  * @done: target wb_completion
 198  *
 199  * Wait for one or more work items issued to @bdi with their ->done field
 200  * set to @done, which should have been initialized with
 201  * DEFINE_WB_COMPLETION().  This function returns after all such work items
 202  * are completed.  Work items which are waited upon aren't freed
 203  * automatically on completion.
 204  */
 205 void wb_wait_for_completion(struct wb_completion *done)
 206 {
 207         atomic_dec(&done->cnt);         /* put down the initial count */
 208         wait_event(*done->waitq, !atomic_read(&done->cnt));
 209 }
 210 
 211 #ifdef CONFIG_CGROUP_WRITEBACK
 212 
 213 /*
 214  * Parameters for foreign inode detection, see wbc_detach_inode() to see
 215  * how they're used.
 216  *
 217  * These paramters are inherently heuristical as the detection target
 218  * itself is fuzzy.  All we want to do is detaching an inode from the
 219  * current owner if it's being written to by some other cgroups too much.
 220  *
 221  * The current cgroup writeback is built on the assumption that multiple
 222  * cgroups writing to the same inode concurrently is very rare and a mode
 223  * of operation which isn't well supported.  As such, the goal is not
 224  * taking too long when a different cgroup takes over an inode while
 225  * avoiding too aggressive flip-flops from occasional foreign writes.
 226  *
 227  * We record, very roughly, 2s worth of IO time history and if more than
 228  * half of that is foreign, trigger the switch.  The recording is quantized
 229  * to 16 slots.  To avoid tiny writes from swinging the decision too much,
 230  * writes smaller than 1/8 of avg size are ignored.
 231  */
 232 #define WB_FRN_TIME_SHIFT       13      /* 1s = 2^13, upto 8 secs w/ 16bit */
 233 #define WB_FRN_TIME_AVG_SHIFT   3       /* avg = avg * 7/8 + new * 1/8 */
 234 #define WB_FRN_TIME_CUT_DIV     8       /* ignore rounds < avg / 8 */
 235 #define WB_FRN_TIME_PERIOD      (2 * (1 << WB_FRN_TIME_SHIFT))  /* 2s */
 236 
 237 #define WB_FRN_HIST_SLOTS       16      /* inode->i_wb_frn_history is 16bit */
 238 #define WB_FRN_HIST_UNIT        (WB_FRN_TIME_PERIOD / WB_FRN_HIST_SLOTS)
 239                                         /* each slot's duration is 2s / 16 */
 240 #define WB_FRN_HIST_THR_SLOTS   (WB_FRN_HIST_SLOTS / 2)
 241                                         /* if foreign slots >= 8, switch */
 242 #define WB_FRN_HIST_MAX_SLOTS   (WB_FRN_HIST_THR_SLOTS / 2 + 1)
 243                                         /* one round can affect upto 5 slots */
 244 #define WB_FRN_MAX_IN_FLIGHT    1024    /* don't queue too many concurrently */
 245 
 246 static atomic_t isw_nr_in_flight = ATOMIC_INIT(0);
 247 static struct workqueue_struct *isw_wq;
 248 
 249 void __inode_attach_wb(struct inode *inode, struct page *page)
 250 {
 251         struct backing_dev_info *bdi = inode_to_bdi(inode);
 252         struct bdi_writeback *wb = NULL;
 253 
 254         if (inode_cgwb_enabled(inode)) {
 255                 struct cgroup_subsys_state *memcg_css;
 256 
 257                 if (page) {
 258                         memcg_css = mem_cgroup_css_from_page(page);
 259                         wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
 260                 } else {
 261                         /* must pin memcg_css, see wb_get_create() */
 262                         memcg_css = task_get_css(current, memory_cgrp_id);
 263                         wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
 264                         css_put(memcg_css);
 265                 }
 266         }
 267 
 268         if (!wb)
 269                 wb = &bdi->wb;
 270 
 271         /*
 272          * There may be multiple instances of this function racing to
 273          * update the same inode.  Use cmpxchg() to tell the winner.
 274          */
 275         if (unlikely(cmpxchg(&inode->i_wb, NULL, wb)))
 276                 wb_put(wb);
 277 }
 278 EXPORT_SYMBOL_GPL(__inode_attach_wb);
 279 
 280 /**
 281  * locked_inode_to_wb_and_lock_list - determine a locked inode's wb and lock it
 282  * @inode: inode of interest with i_lock held
 283  *
 284  * Returns @inode's wb with its list_lock held.  @inode->i_lock must be
 285  * held on entry and is released on return.  The returned wb is guaranteed
 286  * to stay @inode's associated wb until its list_lock is released.
 287  */
 288 static struct bdi_writeback *
 289 locked_inode_to_wb_and_lock_list(struct inode *inode)
 290         __releases(&inode->i_lock)
 291         __acquires(&wb->list_lock)
 292 {
 293         while (true) {
 294                 struct bdi_writeback *wb = inode_to_wb(inode);
 295 
 296                 /*
 297                  * inode_to_wb() association is protected by both
 298                  * @inode->i_lock and @wb->list_lock but list_lock nests
 299                  * outside i_lock.  Drop i_lock and verify that the
 300                  * association hasn't changed after acquiring list_lock.
 301                  */
 302                 wb_get(wb);
 303                 spin_unlock(&inode->i_lock);
 304                 spin_lock(&wb->list_lock);
 305 
 306                 /* i_wb may have changed inbetween, can't use inode_to_wb() */
 307                 if (likely(wb == inode->i_wb)) {
 308                         wb_put(wb);     /* @inode already has ref */
 309                         return wb;
 310                 }
 311 
 312                 spin_unlock(&wb->list_lock);
 313                 wb_put(wb);
 314                 cpu_relax();
 315                 spin_lock(&inode->i_lock);
 316         }
 317 }
 318 
 319 /**
 320  * inode_to_wb_and_lock_list - determine an inode's wb and lock it
 321  * @inode: inode of interest
 322  *
 323  * Same as locked_inode_to_wb_and_lock_list() but @inode->i_lock isn't held
 324  * on entry.
 325  */
 326 static struct bdi_writeback *inode_to_wb_and_lock_list(struct inode *inode)
 327         __acquires(&wb->list_lock)
 328 {
 329         spin_lock(&inode->i_lock);
 330         return locked_inode_to_wb_and_lock_list(inode);
 331 }
 332 
 333 struct inode_switch_wbs_context {
 334         struct inode            *inode;
 335         struct bdi_writeback    *new_wb;
 336 
 337         struct rcu_head         rcu_head;
 338         struct work_struct      work;
 339 };
 340 
 341 static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi)
 342 {
 343         down_write(&bdi->wb_switch_rwsem);
 344 }
 345 
 346 static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi)
 347 {
 348         up_write(&bdi->wb_switch_rwsem);
 349 }
 350 
 351 static void inode_switch_wbs_work_fn(struct work_struct *work)
 352 {
 353         struct inode_switch_wbs_context *isw =
 354                 container_of(work, struct inode_switch_wbs_context, work);
 355         struct inode *inode = isw->inode;
 356         struct backing_dev_info *bdi = inode_to_bdi(inode);
 357         struct address_space *mapping = inode->i_mapping;
 358         struct bdi_writeback *old_wb = inode->i_wb;
 359         struct bdi_writeback *new_wb = isw->new_wb;
 360         XA_STATE(xas, &mapping->i_pages, 0);
 361         struct page *page;
 362         bool switched = false;
 363 
 364         /*
 365          * If @inode switches cgwb membership while sync_inodes_sb() is
 366          * being issued, sync_inodes_sb() might miss it.  Synchronize.
 367          */
 368         down_read(&bdi->wb_switch_rwsem);
 369 
 370         /*
 371          * By the time control reaches here, RCU grace period has passed
 372          * since I_WB_SWITCH assertion and all wb stat update transactions
 373          * between unlocked_inode_to_wb_begin/end() are guaranteed to be
 374          * synchronizing against the i_pages lock.
 375          *
 376          * Grabbing old_wb->list_lock, inode->i_lock and the i_pages lock
 377          * gives us exclusion against all wb related operations on @inode
 378          * including IO list manipulations and stat updates.
 379          */
 380         if (old_wb < new_wb) {
 381                 spin_lock(&old_wb->list_lock);
 382                 spin_lock_nested(&new_wb->list_lock, SINGLE_DEPTH_NESTING);
 383         } else {
 384                 spin_lock(&new_wb->list_lock);
 385                 spin_lock_nested(&old_wb->list_lock, SINGLE_DEPTH_NESTING);
 386         }
 387         spin_lock(&inode->i_lock);
 388         xa_lock_irq(&mapping->i_pages);
 389 
 390         /*
 391          * Once I_FREEING is visible under i_lock, the eviction path owns
 392          * the inode and we shouldn't modify ->i_io_list.
 393          */
 394         if (unlikely(inode->i_state & I_FREEING))
 395                 goto skip_switch;
 396 
 397         trace_inode_switch_wbs(inode, old_wb, new_wb);
 398 
 399         /*
 400          * Count and transfer stats.  Note that PAGECACHE_TAG_DIRTY points
 401          * to possibly dirty pages while PAGECACHE_TAG_WRITEBACK points to
 402          * pages actually under writeback.
 403          */
 404         xas_for_each_marked(&xas, page, ULONG_MAX, PAGECACHE_TAG_DIRTY) {
 405                 if (PageDirty(page)) {
 406                         dec_wb_stat(old_wb, WB_RECLAIMABLE);
 407                         inc_wb_stat(new_wb, WB_RECLAIMABLE);
 408                 }
 409         }
 410 
 411         xas_set(&xas, 0);
 412         xas_for_each_marked(&xas, page, ULONG_MAX, PAGECACHE_TAG_WRITEBACK) {
 413                 WARN_ON_ONCE(!PageWriteback(page));
 414                 dec_wb_stat(old_wb, WB_WRITEBACK);
 415                 inc_wb_stat(new_wb, WB_WRITEBACK);
 416         }
 417 
 418         wb_get(new_wb);
 419 
 420         /*
 421          * Transfer to @new_wb's IO list if necessary.  The specific list
 422          * @inode was on is ignored and the inode is put on ->b_dirty which
 423          * is always correct including from ->b_dirty_time.  The transfer
 424          * preserves @inode->dirtied_when ordering.
 425          */
 426         if (!list_empty(&inode->i_io_list)) {
 427                 struct inode *pos;
 428 
 429                 inode_io_list_del_locked(inode, old_wb);
 430                 inode->i_wb = new_wb;
 431                 list_for_each_entry(pos, &new_wb->b_dirty, i_io_list)
 432                         if (time_after_eq(inode->dirtied_when,
 433                                           pos->dirtied_when))
 434                                 break;
 435                 inode_io_list_move_locked(inode, new_wb, pos->i_io_list.prev);
 436         } else {
 437                 inode->i_wb = new_wb;
 438         }
 439 
 440         /* ->i_wb_frn updates may race wbc_detach_inode() but doesn't matter */
 441         inode->i_wb_frn_winner = 0;
 442         inode->i_wb_frn_avg_time = 0;
 443         inode->i_wb_frn_history = 0;
 444         switched = true;
 445 skip_switch:
 446         /*
 447          * Paired with load_acquire in unlocked_inode_to_wb_begin() and
 448          * ensures that the new wb is visible if they see !I_WB_SWITCH.
 449          */
 450         smp_store_release(&inode->i_state, inode->i_state & ~I_WB_SWITCH);
 451 
 452         xa_unlock_irq(&mapping->i_pages);
 453         spin_unlock(&inode->i_lock);
 454         spin_unlock(&new_wb->list_lock);
 455         spin_unlock(&old_wb->list_lock);
 456 
 457         up_read(&bdi->wb_switch_rwsem);
 458 
 459         if (switched) {
 460                 wb_wakeup(new_wb);
 461                 wb_put(old_wb);
 462         }
 463         wb_put(new_wb);
 464 
 465         iput(inode);
 466         kfree(isw);
 467 
 468         atomic_dec(&isw_nr_in_flight);
 469 }
 470 
 471 static void inode_switch_wbs_rcu_fn(struct rcu_head *rcu_head)
 472 {
 473         struct inode_switch_wbs_context *isw = container_of(rcu_head,
 474                                 struct inode_switch_wbs_context, rcu_head);
 475 
 476         /* needs to grab bh-unsafe locks, bounce to work item */
 477         INIT_WORK(&isw->work, inode_switch_wbs_work_fn);
 478         queue_work(isw_wq, &isw->work);
 479 }
 480 
 481 /**
 482  * inode_switch_wbs - change the wb association of an inode
 483  * @inode: target inode
 484  * @new_wb_id: ID of the new wb
 485  *
 486  * Switch @inode's wb association to the wb identified by @new_wb_id.  The
 487  * switching is performed asynchronously and may fail silently.
 488  */
 489 static void inode_switch_wbs(struct inode *inode, int new_wb_id)
 490 {
 491         struct backing_dev_info *bdi = inode_to_bdi(inode);
 492         struct cgroup_subsys_state *memcg_css;
 493         struct inode_switch_wbs_context *isw;
 494 
 495         /* noop if seems to be already in progress */
 496         if (inode->i_state & I_WB_SWITCH)
 497                 return;
 498 
 499         /* avoid queueing a new switch if too many are already in flight */
 500         if (atomic_read(&isw_nr_in_flight) > WB_FRN_MAX_IN_FLIGHT)
 501                 return;
 502 
 503         isw = kzalloc(sizeof(*isw), GFP_ATOMIC);
 504         if (!isw)
 505                 return;
 506 
 507         /* find and pin the new wb */
 508         rcu_read_lock();
 509         memcg_css = css_from_id(new_wb_id, &memory_cgrp_subsys);
 510         if (memcg_css)
 511                 isw->new_wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
 512         rcu_read_unlock();
 513         if (!isw->new_wb)
 514                 goto out_free;
 515 
 516         /* while holding I_WB_SWITCH, no one else can update the association */
 517         spin_lock(&inode->i_lock);
 518         if (!(inode->i_sb->s_flags & SB_ACTIVE) ||
 519             inode->i_state & (I_WB_SWITCH | I_FREEING) ||
 520             inode_to_wb(inode) == isw->new_wb) {
 521                 spin_unlock(&inode->i_lock);
 522                 goto out_free;
 523         }
 524         inode->i_state |= I_WB_SWITCH;
 525         __iget(inode);
 526         spin_unlock(&inode->i_lock);
 527 
 528         isw->inode = inode;
 529 
 530         /*
 531          * In addition to synchronizing among switchers, I_WB_SWITCH tells
 532          * the RCU protected stat update paths to grab the i_page
 533          * lock so that stat transfer can synchronize against them.
 534          * Let's continue after I_WB_SWITCH is guaranteed to be visible.
 535          */
 536         call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn);
 537 
 538         atomic_inc(&isw_nr_in_flight);
 539         return;
 540 
 541 out_free:
 542         if (isw->new_wb)
 543                 wb_put(isw->new_wb);
 544         kfree(isw);
 545 }
 546 
 547 /**
 548  * wbc_attach_and_unlock_inode - associate wbc with target inode and unlock it
 549  * @wbc: writeback_control of interest
 550  * @inode: target inode
 551  *
 552  * @inode is locked and about to be written back under the control of @wbc.
 553  * Record @inode's writeback context into @wbc and unlock the i_lock.  On
 554  * writeback completion, wbc_detach_inode() should be called.  This is used
 555  * to track the cgroup writeback context.
 556  */
 557 void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
 558                                  struct inode *inode)
 559 {
 560         if (!inode_cgwb_enabled(inode)) {
 561                 spin_unlock(&inode->i_lock);
 562                 return;
 563         }
 564 
 565         wbc->wb = inode_to_wb(inode);
 566         wbc->inode = inode;
 567 
 568         wbc->wb_id = wbc->wb->memcg_css->id;
 569         wbc->wb_lcand_id = inode->i_wb_frn_winner;
 570         wbc->wb_tcand_id = 0;
 571         wbc->wb_bytes = 0;
 572         wbc->wb_lcand_bytes = 0;
 573         wbc->wb_tcand_bytes = 0;
 574 
 575         wb_get(wbc->wb);
 576         spin_unlock(&inode->i_lock);
 577 
 578         /*
 579          * A dying wb indicates that either the blkcg associated with the
 580          * memcg changed or the associated memcg is dying.  In the first
 581          * case, a replacement wb should already be available and we should
 582          * refresh the wb immediately.  In the second case, trying to
 583          * refresh will keep failing.
 584          */
 585         if (unlikely(wb_dying(wbc->wb) && !css_is_dying(wbc->wb->memcg_css)))
 586                 inode_switch_wbs(inode, wbc->wb_id);
 587 }
 588 EXPORT_SYMBOL_GPL(wbc_attach_and_unlock_inode);
 589 
 590 /**
 591  * wbc_detach_inode - disassociate wbc from inode and perform foreign detection
 592  * @wbc: writeback_control of the just finished writeback
 593  *
 594  * To be called after a writeback attempt of an inode finishes and undoes
 595  * wbc_attach_and_unlock_inode().  Can be called under any context.
 596  *
 597  * As concurrent write sharing of an inode is expected to be very rare and
 598  * memcg only tracks page ownership on first-use basis severely confining
 599  * the usefulness of such sharing, cgroup writeback tracks ownership
 600  * per-inode.  While the support for concurrent write sharing of an inode
 601  * is deemed unnecessary, an inode being written to by different cgroups at
 602  * different points in time is a lot more common, and, more importantly,
 603  * charging only by first-use can too readily lead to grossly incorrect
 604  * behaviors (single foreign page can lead to gigabytes of writeback to be
 605  * incorrectly attributed).
 606  *
 607  * To resolve this issue, cgroup writeback detects the majority dirtier of
 608  * an inode and transfers the ownership to it.  To avoid unnnecessary
 609  * oscillation, the detection mechanism keeps track of history and gives
 610  * out the switch verdict only if the foreign usage pattern is stable over
 611  * a certain amount of time and/or writeback attempts.
 612  *
 613  * On each writeback attempt, @wbc tries to detect the majority writer
 614  * using Boyer-Moore majority vote algorithm.  In addition to the byte
 615  * count from the majority voting, it also counts the bytes written for the
 616  * current wb and the last round's winner wb (max of last round's current
 617  * wb, the winner from two rounds ago, and the last round's majority
 618  * candidate).  Keeping track of the historical winner helps the algorithm
 619  * to semi-reliably detect the most active writer even when it's not the
 620  * absolute majority.
 621  *
 622  * Once the winner of the round is determined, whether the winner is
 623  * foreign or not and how much IO time the round consumed is recorded in
 624  * inode->i_wb_frn_history.  If the amount of recorded foreign IO time is
 625  * over a certain threshold, the switch verdict is given.
 626  */
 627 void wbc_detach_inode(struct writeback_control *wbc)
 628 {
 629         struct bdi_writeback *wb = wbc->wb;
 630         struct inode *inode = wbc->inode;
 631         unsigned long avg_time, max_bytes, max_time;
 632         u16 history;
 633         int max_id;
 634 
 635         if (!wb)
 636                 return;
 637 
 638         history = inode->i_wb_frn_history;
 639         avg_time = inode->i_wb_frn_avg_time;
 640 
 641         /* pick the winner of this round */
 642         if (wbc->wb_bytes >= wbc->wb_lcand_bytes &&
 643             wbc->wb_bytes >= wbc->wb_tcand_bytes) {
 644                 max_id = wbc->wb_id;
 645                 max_bytes = wbc->wb_bytes;
 646         } else if (wbc->wb_lcand_bytes >= wbc->wb_tcand_bytes) {
 647                 max_id = wbc->wb_lcand_id;
 648                 max_bytes = wbc->wb_lcand_bytes;
 649         } else {
 650                 max_id = wbc->wb_tcand_id;
 651                 max_bytes = wbc->wb_tcand_bytes;
 652         }
 653 
 654         /*
 655          * Calculate the amount of IO time the winner consumed and fold it
 656          * into the running average kept per inode.  If the consumed IO
 657          * time is lower than avag / WB_FRN_TIME_CUT_DIV, ignore it for
 658          * deciding whether to switch or not.  This is to prevent one-off
 659          * small dirtiers from skewing the verdict.
 660          */
 661         max_time = DIV_ROUND_UP((max_bytes >> PAGE_SHIFT) << WB_FRN_TIME_SHIFT,
 662                                 wb->avg_write_bandwidth);
 663         if (avg_time)
 664                 avg_time += (max_time >> WB_FRN_TIME_AVG_SHIFT) -
 665                             (avg_time >> WB_FRN_TIME_AVG_SHIFT);
 666         else
 667                 avg_time = max_time;    /* immediate catch up on first run */
 668 
 669         if (max_time >= avg_time / WB_FRN_TIME_CUT_DIV) {
 670                 int slots;
 671 
 672                 /*
 673                  * The switch verdict is reached if foreign wb's consume
 674                  * more than a certain proportion of IO time in a
 675                  * WB_FRN_TIME_PERIOD.  This is loosely tracked by 16 slot
 676                  * history mask where each bit represents one sixteenth of
 677                  * the period.  Determine the number of slots to shift into
 678                  * history from @max_time.
 679                  */
 680                 slots = min(DIV_ROUND_UP(max_time, WB_FRN_HIST_UNIT),
 681                             (unsigned long)WB_FRN_HIST_MAX_SLOTS);
 682                 history <<= slots;
 683                 if (wbc->wb_id != max_id)
 684                         history |= (1U << slots) - 1;
 685 
 686                 if (history)
 687                         trace_inode_foreign_history(inode, wbc, history);
 688 
 689                 /*
 690                  * Switch if the current wb isn't the consistent winner.
 691                  * If there are multiple closely competing dirtiers, the
 692                  * inode may switch across them repeatedly over time, which
 693                  * is okay.  The main goal is avoiding keeping an inode on
 694                  * the wrong wb for an extended period of time.
 695                  */
 696                 if (hweight32(history) > WB_FRN_HIST_THR_SLOTS)
 697                         inode_switch_wbs(inode, max_id);
 698         }
 699 
 700         /*
 701          * Multiple instances of this function may race to update the
 702          * following fields but we don't mind occassional inaccuracies.
 703          */
 704         inode->i_wb_frn_winner = max_id;
 705         inode->i_wb_frn_avg_time = min(avg_time, (unsigned long)U16_MAX);
 706         inode->i_wb_frn_history = history;
 707 
 708         wb_put(wbc->wb);
 709         wbc->wb = NULL;
 710 }
 711 EXPORT_SYMBOL_GPL(wbc_detach_inode);
 712 
 713 /**
 714  * wbc_account_cgroup_owner - account writeback to update inode cgroup ownership
 715  * @wbc: writeback_control of the writeback in progress
 716  * @page: page being written out
 717  * @bytes: number of bytes being written out
 718  *
 719  * @bytes from @page are about to written out during the writeback
 720  * controlled by @wbc.  Keep the book for foreign inode detection.  See
 721  * wbc_detach_inode().
 722  */
 723 void wbc_account_cgroup_owner(struct writeback_control *wbc, struct page *page,
 724                               size_t bytes)
 725 {
 726         struct cgroup_subsys_state *css;
 727         int id;
 728 
 729         /*
 730          * pageout() path doesn't attach @wbc to the inode being written
 731          * out.  This is intentional as we don't want the function to block
 732          * behind a slow cgroup.  Ultimately, we want pageout() to kick off
 733          * regular writeback instead of writing things out itself.
 734          */
 735         if (!wbc->wb || wbc->no_cgroup_owner)
 736                 return;
 737 
 738         css = mem_cgroup_css_from_page(page);
 739         /* dead cgroups shouldn't contribute to inode ownership arbitration */
 740         if (!(css->flags & CSS_ONLINE))
 741                 return;
 742 
 743         id = css->id;
 744 
 745         if (id == wbc->wb_id) {
 746                 wbc->wb_bytes += bytes;
 747                 return;
 748         }
 749 
 750         if (id == wbc->wb_lcand_id)
 751                 wbc->wb_lcand_bytes += bytes;
 752 
 753         /* Boyer-Moore majority vote algorithm */
 754         if (!wbc->wb_tcand_bytes)
 755                 wbc->wb_tcand_id = id;
 756         if (id == wbc->wb_tcand_id)
 757                 wbc->wb_tcand_bytes += bytes;
 758         else
 759                 wbc->wb_tcand_bytes -= min(bytes, wbc->wb_tcand_bytes);
 760 }
 761 EXPORT_SYMBOL_GPL(wbc_account_cgroup_owner);
 762 
 763 /**
 764  * inode_congested - test whether an inode is congested
 765  * @inode: inode to test for congestion (may be NULL)
 766  * @cong_bits: mask of WB_[a]sync_congested bits to test
 767  *
 768  * Tests whether @inode is congested.  @cong_bits is the mask of congestion
 769  * bits to test and the return value is the mask of set bits.
 770  *
 771  * If cgroup writeback is enabled for @inode, the congestion state is
 772  * determined by whether the cgwb (cgroup bdi_writeback) for the blkcg
 773  * associated with @inode is congested; otherwise, the root wb's congestion
 774  * state is used.
 775  *
 776  * @inode is allowed to be NULL as this function is often called on
 777  * mapping->host which is NULL for the swapper space.
 778  */
 779 int inode_congested(struct inode *inode, int cong_bits)
 780 {
 781         /*
 782          * Once set, ->i_wb never becomes NULL while the inode is alive.
 783          * Start transaction iff ->i_wb is visible.
 784          */
 785         if (inode && inode_to_wb_is_valid(inode)) {
 786                 struct bdi_writeback *wb;
 787                 struct wb_lock_cookie lock_cookie = {};
 788                 bool congested;
 789 
 790                 wb = unlocked_inode_to_wb_begin(inode, &lock_cookie);
 791                 congested = wb_congested(wb, cong_bits);
 792                 unlocked_inode_to_wb_end(inode, &lock_cookie);
 793                 return congested;
 794         }
 795 
 796         return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
 797 }
 798 EXPORT_SYMBOL_GPL(inode_congested);
 799 
 800 /**
 801  * wb_split_bdi_pages - split nr_pages to write according to bandwidth
 802  * @wb: target bdi_writeback to split @nr_pages to
 803  * @nr_pages: number of pages to write for the whole bdi
 804  *
 805  * Split @wb's portion of @nr_pages according to @wb's write bandwidth in
 806  * relation to the total write bandwidth of all wb's w/ dirty inodes on
 807  * @wb->bdi.
 808  */
 809 static long wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages)
 810 {
 811         unsigned long this_bw = wb->avg_write_bandwidth;
 812         unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth);
 813 
 814         if (nr_pages == LONG_MAX)
 815                 return LONG_MAX;
 816 
 817         /*
 818          * This may be called on clean wb's and proportional distribution
 819          * may not make sense, just use the original @nr_pages in those
 820          * cases.  In general, we wanna err on the side of writing more.
 821          */
 822         if (!tot_bw || this_bw >= tot_bw)
 823                 return nr_pages;
 824         else
 825                 return DIV_ROUND_UP_ULL((u64)nr_pages * this_bw, tot_bw);
 826 }
 827 
 828 /**
 829  * bdi_split_work_to_wbs - split a wb_writeback_work to all wb's of a bdi
 830  * @bdi: target backing_dev_info
 831  * @base_work: wb_writeback_work to issue
 832  * @skip_if_busy: skip wb's which already have writeback in progress
 833  *
 834  * Split and issue @base_work to all wb's (bdi_writeback's) of @bdi which
 835  * have dirty inodes.  If @base_work->nr_page isn't %LONG_MAX, it's
 836  * distributed to the busy wbs according to each wb's proportion in the
 837  * total active write bandwidth of @bdi.
 838  */
 839 static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
 840                                   struct wb_writeback_work *base_work,
 841                                   bool skip_if_busy)
 842 {
 843         struct bdi_writeback *last_wb = NULL;
 844         struct bdi_writeback *wb = list_entry(&bdi->wb_list,
 845                                               struct bdi_writeback, bdi_node);
 846 
 847         might_sleep();
 848 restart:
 849         rcu_read_lock();
 850         list_for_each_entry_continue_rcu(wb, &bdi->wb_list, bdi_node) {
 851                 DEFINE_WB_COMPLETION(fallback_work_done, bdi);
 852                 struct wb_writeback_work fallback_work;
 853                 struct wb_writeback_work *work;
 854                 long nr_pages;
 855 
 856                 if (last_wb) {
 857                         wb_put(last_wb);
 858                         last_wb = NULL;
 859                 }
 860 
 861                 /* SYNC_ALL writes out I_DIRTY_TIME too */
 862                 if (!wb_has_dirty_io(wb) &&
 863                     (base_work->sync_mode == WB_SYNC_NONE ||
 864                      list_empty(&wb->b_dirty_time)))
 865                         continue;
 866                 if (skip_if_busy && writeback_in_progress(wb))
 867                         continue;
 868 
 869                 nr_pages = wb_split_bdi_pages(wb, base_work->nr_pages);
 870 
 871                 work = kmalloc(sizeof(*work), GFP_ATOMIC);
 872                 if (work) {
 873                         *work = *base_work;
 874                         work->nr_pages = nr_pages;
 875                         work->auto_free = 1;
 876                         wb_queue_work(wb, work);
 877                         continue;
 878                 }
 879 
 880                 /* alloc failed, execute synchronously using on-stack fallback */
 881                 work = &fallback_work;
 882                 *work = *base_work;
 883                 work->nr_pages = nr_pages;
 884                 work->auto_free = 0;
 885                 work->done = &fallback_work_done;
 886 
 887                 wb_queue_work(wb, work);
 888 
 889                 /*
 890                  * Pin @wb so that it stays on @bdi->wb_list.  This allows
 891                  * continuing iteration from @wb after dropping and
 892                  * regrabbing rcu read lock.
 893                  */
 894                 wb_get(wb);
 895                 last_wb = wb;
 896 
 897                 rcu_read_unlock();
 898                 wb_wait_for_completion(&fallback_work_done);
 899                 goto restart;
 900         }
 901         rcu_read_unlock();
 902 
 903         if (last_wb)
 904                 wb_put(last_wb);
 905 }
 906 
 907 /**
 908  * cgroup_writeback_by_id - initiate cgroup writeback from bdi and memcg IDs
 909  * @bdi_id: target bdi id
 910  * @memcg_id: target memcg css id
 911  * @nr: number of pages to write, 0 for best-effort dirty flushing
 912  * @reason: reason why some writeback work initiated
 913  * @done: target wb_completion
 914  *
 915  * Initiate flush of the bdi_writeback identified by @bdi_id and @memcg_id
 916  * with the specified parameters.
 917  */
 918 int cgroup_writeback_by_id(u64 bdi_id, int memcg_id, unsigned long nr,
 919                            enum wb_reason reason, struct wb_completion *done)
 920 {
 921         struct backing_dev_info *bdi;
 922         struct cgroup_subsys_state *memcg_css;
 923         struct bdi_writeback *wb;
 924         struct wb_writeback_work *work;
 925         int ret;
 926 
 927         /* lookup bdi and memcg */
 928         bdi = bdi_get_by_id(bdi_id);
 929         if (!bdi)
 930                 return -ENOENT;
 931 
 932         rcu_read_lock();
 933         memcg_css = css_from_id(memcg_id, &memory_cgrp_subsys);
 934         if (memcg_css && !css_tryget(memcg_css))
 935                 memcg_css = NULL;
 936         rcu_read_unlock();
 937         if (!memcg_css) {
 938                 ret = -ENOENT;
 939                 goto out_bdi_put;
 940         }
 941 
 942         /*
 943          * And find the associated wb.  If the wb isn't there already
 944          * there's nothing to flush, don't create one.
 945          */
 946         wb = wb_get_lookup(bdi, memcg_css);
 947         if (!wb) {
 948                 ret = -ENOENT;
 949                 goto out_css_put;
 950         }
 951 
 952         /*
 953          * If @nr is zero, the caller is attempting to write out most of
 954          * the currently dirty pages.  Let's take the current dirty page
 955          * count and inflate it by 25% which should be large enough to
 956          * flush out most dirty pages while avoiding getting livelocked by
 957          * concurrent dirtiers.
 958          */
 959         if (!nr) {
 960                 unsigned long filepages, headroom, dirty, writeback;
 961 
 962                 mem_cgroup_wb_stats(wb, &filepages, &headroom, &dirty,
 963                                       &writeback);
 964                 nr = dirty * 10 / 8;
 965         }
 966 
 967         /* issue the writeback work */
 968         work = kzalloc(sizeof(*work), GFP_NOWAIT | __GFP_NOWARN);
 969         if (work) {
 970                 work->nr_pages = nr;
 971                 work->sync_mode = WB_SYNC_NONE;
 972                 work->range_cyclic = 1;
 973                 work->reason = reason;
 974                 work->done = done;
 975                 work->auto_free = 1;
 976                 wb_queue_work(wb, work);
 977                 ret = 0;
 978         } else {
 979                 ret = -ENOMEM;
 980         }
 981 
 982         wb_put(wb);
 983 out_css_put:
 984         css_put(memcg_css);
 985 out_bdi_put:
 986         bdi_put(bdi);
 987         return ret;
 988 }
 989 
 990 /**
 991  * cgroup_writeback_umount - flush inode wb switches for umount
 992  *
 993  * This function is called when a super_block is about to be destroyed and
 994  * flushes in-flight inode wb switches.  An inode wb switch goes through
 995  * RCU and then workqueue, so the two need to be flushed in order to ensure
 996  * that all previously scheduled switches are finished.  As wb switches are
 997  * rare occurrences and synchronize_rcu() can take a while, perform
 998  * flushing iff wb switches are in flight.
 999  */
1000 void cgroup_writeback_umount(void)
1001 {
1002         if (atomic_read(&isw_nr_in_flight)) {
1003                 /*
1004                  * Use rcu_barrier() to wait for all pending callbacks to
1005                  * ensure that all in-flight wb switches are in the workqueue.
1006                  */
1007                 rcu_barrier();
1008                 flush_workqueue(isw_wq);
1009         }
1010 }
1011 
1012 static int __init cgroup_writeback_init(void)
1013 {
1014         isw_wq = alloc_workqueue("inode_switch_wbs", 0, 0);
1015         if (!isw_wq)
1016                 return -ENOMEM;
1017         return 0;
1018 }
1019 fs_initcall(cgroup_writeback_init);
1020 
1021 #else   /* CONFIG_CGROUP_WRITEBACK */
1022 
1023 static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi) { }
1024 static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi) { }
1025 
1026 static struct bdi_writeback *
1027 locked_inode_to_wb_and_lock_list(struct inode *inode)
1028         __releases(&inode->i_lock)
1029         __acquires(&wb->list_lock)
1030 {
1031         struct bdi_writeback *wb = inode_to_wb(inode);
1032 
1033         spin_unlock(&inode->i_lock);
1034         spin_lock(&wb->list_lock);
1035         return wb;
1036 }
1037 
1038 static struct bdi_writeback *inode_to_wb_and_lock_list(struct inode *inode)
1039         __acquires(&wb->list_lock)
1040 {
1041         struct bdi_writeback *wb = inode_to_wb(inode);
1042 
1043         spin_lock(&wb->list_lock);
1044         return wb;
1045 }
1046 
1047 static long wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages)
1048 {
1049         return nr_pages;
1050 }
1051 
1052 static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
1053                                   struct wb_writeback_work *base_work,
1054                                   bool skip_if_busy)
1055 {
1056         might_sleep();
1057 
1058         if (!skip_if_busy || !writeback_in_progress(&bdi->wb)) {
1059                 base_work->auto_free = 0;
1060                 wb_queue_work(&bdi->wb, base_work);
1061         }
1062 }
1063 
1064 #endif  /* CONFIG_CGROUP_WRITEBACK */
1065 
1066 /*
1067  * Add in the number of potentially dirty inodes, because each inode
1068  * write can dirty pagecache in the underlying blockdev.
1069  */
1070 static unsigned long get_nr_dirty_pages(void)
1071 {
1072         return global_node_page_state(NR_FILE_DIRTY) +
1073                 global_node_page_state(NR_UNSTABLE_NFS) +
1074                 get_nr_dirty_inodes();
1075 }
1076 
1077 static void wb_start_writeback(struct bdi_writeback *wb, enum wb_reason reason)
1078 {
1079         if (!wb_has_dirty_io(wb))
1080                 return;
1081 
1082         /*
1083          * All callers of this function want to start writeback of all
1084          * dirty pages. Places like vmscan can call this at a very
1085          * high frequency, causing pointless allocations of tons of
1086          * work items and keeping the flusher threads busy retrieving
1087          * that work. Ensure that we only allow one of them pending and
1088          * inflight at the time.
1089          */
1090         if (test_bit(WB_start_all, &wb->state) ||
1091             test_and_set_bit(WB_start_all, &wb->state))
1092                 return;
1093 
1094         wb->start_all_reason = reason;
1095         wb_wakeup(wb);
1096 }
1097 
1098 /**
1099  * wb_start_background_writeback - start background writeback
1100  * @wb: bdi_writback to write from
1101  *
1102  * Description:
1103  *   This makes sure WB_SYNC_NONE background writeback happens. When
1104  *   this function returns, it is only guaranteed that for given wb
1105  *   some IO is happening if we are over background dirty threshold.
1106  *   Caller need not hold sb s_umount semaphore.
1107  */
1108 void wb_start_background_writeback(struct bdi_writeback *wb)
1109 {
1110         /*
1111          * We just wake up the flusher thread. It will perform background
1112          * writeback as soon as there is no other work to do.
1113          */
1114         trace_writeback_wake_background(wb);
1115         wb_wakeup(wb);
1116 }
1117 
1118 /*
1119  * Remove the inode from the writeback list it is on.
1120  */
1121 void inode_io_list_del(struct inode *inode)
1122 {
1123         struct bdi_writeback *wb;
1124 
1125         wb = inode_to_wb_and_lock_list(inode);
1126         inode_io_list_del_locked(inode, wb);
1127         spin_unlock(&wb->list_lock);
1128 }
1129 
1130 /*
1131  * mark an inode as under writeback on the sb
1132  */
1133 void sb_mark_inode_writeback(struct inode *inode)
1134 {
1135         struct super_block *sb = inode->i_sb;
1136         unsigned long flags;
1137 
1138         if (list_empty(&inode->i_wb_list)) {
1139                 spin_lock_irqsave(&sb->s_inode_wblist_lock, flags);
1140                 if (list_empty(&inode->i_wb_list)) {
1141                         list_add_tail(&inode->i_wb_list, &sb->s_inodes_wb);
1142                         trace_sb_mark_inode_writeback(inode);
1143                 }
1144                 spin_unlock_irqrestore(&sb->s_inode_wblist_lock, flags);
1145         }
1146 }
1147 
1148 /*
1149  * clear an inode as under writeback on the sb
1150  */
1151 void sb_clear_inode_writeback(struct inode *inode)
1152 {
1153         struct super_block *sb = inode->i_sb;
1154         unsigned long flags;
1155 
1156         if (!list_empty(&inode->i_wb_list)) {
1157                 spin_lock_irqsave(&sb->s_inode_wblist_lock, flags);
1158                 if (!list_empty(&inode->i_wb_list)) {
1159                         list_del_init(&inode->i_wb_list);
1160                         trace_sb_clear_inode_writeback(inode);
1161                 }
1162                 spin_unlock_irqrestore(&sb->s_inode_wblist_lock, flags);
1163         }
1164 }
1165 
1166 /*
1167  * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
1168  * furthest end of its superblock's dirty-inode list.
1169  *
1170  * Before stamping the inode's ->dirtied_when, we check to see whether it is
1171  * already the most-recently-dirtied inode on the b_dirty list.  If that is
1172  * the case then the inode must have been redirtied while it was being written
1173  * out and we don't reset its dirtied_when.
1174  */
1175 static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
1176 {
1177         if (!list_empty(&wb->b_dirty)) {
1178                 struct inode *tail;
1179 
1180                 tail = wb_inode(wb->b_dirty.next);
1181                 if (time_before(inode->dirtied_when, tail->dirtied_when))
1182                         inode->dirtied_when = jiffies;
1183         }
1184         inode_io_list_move_locked(inode, wb, &wb->b_dirty);
1185 }
1186 
1187 /*
1188  * requeue inode for re-scanning after bdi->b_io list is exhausted.
1189  */
1190 static void requeue_io(struct inode *inode, struct bdi_writeback *wb)
1191 {
1192         inode_io_list_move_locked(inode, wb, &wb->b_more_io);
1193 }
1194 
1195 static void inode_sync_complete(struct inode *inode)
1196 {
1197         inode->i_state &= ~I_SYNC;
1198         /* If inode is clean an unused, put it into LRU now... */
1199         inode_add_lru(inode);
1200         /* Waiters must see I_SYNC cleared before being woken up */
1201         smp_mb();
1202         wake_up_bit(&inode->i_state, __I_SYNC);
1203 }
1204 
1205 static bool inode_dirtied_after(struct inode *inode, unsigned long t)
1206 {
1207         bool ret = time_after(inode->dirtied_when, t);
1208 #ifndef CONFIG_64BIT
1209         /*
1210          * For inodes being constantly redirtied, dirtied_when can get stuck.
1211          * It _appears_ to be in the future, but is actually in distant past.
1212          * This test is necessary to prevent such wrapped-around relative times
1213          * from permanently stopping the whole bdi writeback.
1214          */
1215         ret = ret && time_before_eq(inode->dirtied_when, jiffies);
1216 #endif
1217         return ret;
1218 }
1219 
1220 #define EXPIRE_DIRTY_ATIME 0x0001
1221 
1222 /*
1223  * Move expired (dirtied before work->older_than_this) dirty inodes from
1224  * @delaying_queue to @dispatch_queue.
1225  */
1226 static int move_expired_inodes(struct list_head *delaying_queue,
1227                                struct list_head *dispatch_queue,
1228                                int flags,
1229                                struct wb_writeback_work *work)
1230 {
1231         unsigned long *older_than_this = NULL;
1232         unsigned long expire_time;
1233         LIST_HEAD(tmp);
1234         struct list_head *pos, *node;
1235         struct super_block *sb = NULL;
1236         struct inode *inode;
1237         int do_sb_sort = 0;
1238         int moved = 0;
1239 
1240         if ((flags & EXPIRE_DIRTY_ATIME) == 0)
1241                 older_than_this = work->older_than_this;
1242         else if (!work->for_sync) {
1243                 expire_time = jiffies - (dirtytime_expire_interval * HZ);
1244                 older_than_this = &expire_time;
1245         }
1246         while (!list_empty(delaying_queue)) {
1247                 inode = wb_inode(delaying_queue->prev);
1248                 if (older_than_this &&
1249                     inode_dirtied_after(inode, *older_than_this))
1250                         break;
1251                 list_move(&inode->i_io_list, &tmp);
1252                 moved++;
1253                 if (flags & EXPIRE_DIRTY_ATIME)
1254                         set_bit(__I_DIRTY_TIME_EXPIRED, &inode->i_state);
1255                 if (sb_is_blkdev_sb(inode->i_sb))
1256                         continue;
1257                 if (sb && sb != inode->i_sb)
1258                         do_sb_sort = 1;
1259                 sb = inode->i_sb;
1260         }
1261 
1262         /* just one sb in list, splice to dispatch_queue and we're done */
1263         if (!do_sb_sort) {
1264                 list_splice(&tmp, dispatch_queue);
1265                 goto out;
1266         }
1267 
1268         /* Move inodes from one superblock together */
1269         while (!list_empty(&tmp)) {
1270                 sb = wb_inode(tmp.prev)->i_sb;
1271                 list_for_each_prev_safe(pos, node, &tmp) {
1272                         inode = wb_inode(pos);
1273                         if (inode->i_sb == sb)
1274                                 list_move(&inode->i_io_list, dispatch_queue);
1275                 }
1276         }
1277 out:
1278         return moved;
1279 }
1280 
1281 /*
1282  * Queue all expired dirty inodes for io, eldest first.
1283  * Before
1284  *         newly dirtied     b_dirty    b_io    b_more_io
1285  *         =============>    gf         edc     BA
1286  * After
1287  *         newly dirtied     b_dirty    b_io    b_more_io
1288  *         =============>    g          fBAedc
1289  *                                           |
1290  *                                           +--> dequeue for IO
1291  */
1292 static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work)
1293 {
1294         int moved;
1295 
1296         assert_spin_locked(&wb->list_lock);
1297         list_splice_init(&wb->b_more_io, &wb->b_io);
1298         moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, 0, work);
1299         moved += move_expired_inodes(&wb->b_dirty_time, &wb->b_io,
1300                                      EXPIRE_DIRTY_ATIME, work);
1301         if (moved)
1302                 wb_io_lists_populated(wb);
1303         trace_writeback_queue_io(wb, work, moved);
1304 }
1305 
1306 static int write_inode(struct inode *inode, struct writeback_control *wbc)
1307 {
1308         int ret;
1309 
1310         if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) {
1311                 trace_writeback_write_inode_start(inode, wbc);
1312                 ret = inode->i_sb->s_op->write_inode(inode, wbc);
1313                 trace_writeback_write_inode(inode, wbc);
1314                 return ret;
1315         }
1316         return 0;
1317 }
1318 
1319 /*
1320  * Wait for writeback on an inode to complete. Called with i_lock held.
1321  * Caller must make sure inode cannot go away when we drop i_lock.
1322  */
1323 static void __inode_wait_for_writeback(struct inode *inode)
1324         __releases(inode->i_lock)
1325         __acquires(inode->i_lock)
1326 {
1327         DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
1328         wait_queue_head_t *wqh;
1329 
1330         wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
1331         while (inode->i_state & I_SYNC) {
1332                 spin_unlock(&inode->i_lock);
1333                 __wait_on_bit(wqh, &wq, bit_wait,
1334                               TASK_UNINTERRUPTIBLE);
1335                 spin_lock(&inode->i_lock);
1336         }
1337 }
1338 
1339 /*
1340  * Wait for writeback on an inode to complete. Caller must have inode pinned.
1341  */
1342 void inode_wait_for_writeback(struct inode *inode)
1343 {
1344         spin_lock(&inode->i_lock);
1345         __inode_wait_for_writeback(inode);
1346         spin_unlock(&inode->i_lock);
1347 }
1348 
1349 /*
1350  * Sleep until I_SYNC is cleared. This function must be called with i_lock
1351  * held and drops it. It is aimed for callers not holding any inode reference
1352  * so once i_lock is dropped, inode can go away.
1353  */
1354 static void inode_sleep_on_writeback(struct inode *inode)
1355         __releases(inode->i_lock)
1356 {
1357         DEFINE_WAIT(wait);
1358         wait_queue_head_t *wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
1359         int sleep;
1360 
1361         prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
1362         sleep = inode->i_state & I_SYNC;
1363         spin_unlock(&inode->i_lock);
1364         if (sleep)
1365                 schedule();
1366         finish_wait(wqh, &wait);
1367 }
1368 
1369 /*
1370  * Find proper writeback list for the inode depending on its current state and
1371  * possibly also change of its state while we were doing writeback.  Here we
1372  * handle things such as livelock prevention or fairness of writeback among
1373  * inodes. This function can be called only by flusher thread - noone else
1374  * processes all inodes in writeback lists and requeueing inodes behind flusher
1375  * thread's back can have unexpected consequences.
1376  */
1377 static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
1378                           struct writeback_control *wbc)
1379 {
1380         if (inode->i_state & I_FREEING)
1381                 return;
1382 
1383         /*
1384          * Sync livelock prevention. Each inode is tagged and synced in one
1385          * shot. If still dirty, it will be redirty_tail()'ed below.  Update
1386          * the dirty time to prevent enqueue and sync it again.
1387          */
1388         if ((inode->i_state & I_DIRTY) &&
1389             (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages))
1390                 inode->dirtied_when = jiffies;
1391 
1392         if (wbc->pages_skipped) {
1393                 /*
1394                  * writeback is not making progress due to locked
1395                  * buffers. Skip this inode for now.
1396                  */
1397                 redirty_tail(inode, wb);
1398                 return;
1399         }
1400 
1401         if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
1402                 /*
1403                  * We didn't write back all the pages.  nfs_writepages()
1404                  * sometimes bales out without doing anything.
1405                  */
1406                 if (wbc->nr_to_write <= 0) {
1407                         /* Slice used up. Queue for next turn. */
1408                         requeue_io(inode, wb);
1409                 } else {
1410                         /*
1411                          * Writeback blocked by something other than
1412                          * congestion. Delay the inode for some time to
1413                          * avoid spinning on the CPU (100% iowait)
1414                          * retrying writeback of the dirty page/inode
1415                          * that cannot be performed immediately.
1416                          */
1417                         redirty_tail(inode, wb);
1418                 }
1419         } else if (inode->i_state & I_DIRTY) {
1420                 /*
1421                  * Filesystems can dirty the inode during writeback operations,
1422                  * such as delayed allocation during submission or metadata
1423                  * updates after data IO completion.
1424                  */
1425                 redirty_tail(inode, wb);
1426         } else if (inode->i_state & I_DIRTY_TIME) {
1427                 inode->dirtied_when = jiffies;
1428                 inode_io_list_move_locked(inode, wb, &wb->b_dirty_time);
1429         } else {
1430                 /* The inode is clean. Remove from writeback lists. */
1431                 inode_io_list_del_locked(inode, wb);
1432         }
1433 }
1434 
1435 /*
1436  * Write out an inode and its dirty pages. Do not update the writeback list
1437  * linkage. That is left to the caller. The caller is also responsible for
1438  * setting I_SYNC flag and calling inode_sync_complete() to clear it.
1439  */
1440 static int
1441 __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
1442 {
1443         struct address_space *mapping = inode->i_mapping;
1444         long nr_to_write = wbc->nr_to_write;
1445         unsigned dirty;
1446         int ret;
1447 
1448         WARN_ON(!(inode->i_state & I_SYNC));
1449 
1450         trace_writeback_single_inode_start(inode, wbc, nr_to_write);
1451 
1452         ret = do_writepages(mapping, wbc);
1453 
1454         /*
1455          * Make sure to wait on the data before writing out the metadata.
1456          * This is important for filesystems that modify metadata on data
1457          * I/O completion. We don't do it for sync(2) writeback because it has a
1458          * separate, external IO completion path and ->sync_fs for guaranteeing
1459          * inode metadata is written back correctly.
1460          */
1461         if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync) {
1462                 int err = filemap_fdatawait(mapping);
1463                 if (ret == 0)
1464                         ret = err;
1465         }
1466 
1467         /*
1468          * Some filesystems may redirty the inode during the writeback
1469          * due to delalloc, clear dirty metadata flags right before
1470          * write_inode()
1471          */
1472         spin_lock(&inode->i_lock);
1473 
1474         dirty = inode->i_state & I_DIRTY;
1475         if (inode->i_state & I_DIRTY_TIME) {
1476                 if ((dirty & I_DIRTY_INODE) ||
1477                     wbc->sync_mode == WB_SYNC_ALL ||
1478                     unlikely(inode->i_state & I_DIRTY_TIME_EXPIRED) ||
1479                     unlikely(time_after(jiffies,
1480                                         (inode->dirtied_time_when +
1481                                          dirtytime_expire_interval * HZ)))) {
1482                         dirty |= I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED;
1483                         trace_writeback_lazytime(inode);
1484                 }
1485         } else
1486                 inode->i_state &= ~I_DIRTY_TIME_EXPIRED;
1487         inode->i_state &= ~dirty;
1488 
1489         /*
1490          * Paired with smp_mb() in __mark_inode_dirty().  This allows
1491          * __mark_inode_dirty() to test i_state without grabbing i_lock -
1492          * either they see the I_DIRTY bits cleared or we see the dirtied
1493          * inode.
1494          *
1495          * I_DIRTY_PAGES is always cleared together above even if @mapping
1496          * still has dirty pages.  The flag is reinstated after smp_mb() if
1497          * necessary.  This guarantees that either __mark_inode_dirty()
1498          * sees clear I_DIRTY_PAGES or we see PAGECACHE_TAG_DIRTY.
1499          */
1500         smp_mb();
1501 
1502         if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
1503                 inode->i_state |= I_DIRTY_PAGES;
1504 
1505         spin_unlock(&inode->i_lock);
1506 
1507         if (dirty & I_DIRTY_TIME)
1508                 mark_inode_dirty_sync(inode);
1509         /* Don't write the inode if only I_DIRTY_PAGES was set */
1510         if (dirty & ~I_DIRTY_PAGES) {
1511                 int err = write_inode(inode, wbc);
1512                 if (ret == 0)
1513                         ret = err;
1514         }
1515         trace_writeback_single_inode(inode, wbc, nr_to_write);
1516         return ret;
1517 }
1518 
1519 /*
1520  * Write out an inode's dirty pages. Either the caller has an active reference
1521  * on the inode or the inode has I_WILL_FREE set.
1522  *
1523  * This function is designed to be called for writing back one inode which
1524  * we go e.g. from filesystem. Flusher thread uses __writeback_single_inode()
1525  * and does more profound writeback list handling in writeback_sb_inodes().
1526  */
1527 static int writeback_single_inode(struct inode *inode,
1528                                   struct writeback_control *wbc)
1529 {
1530         struct bdi_writeback *wb;
1531         int ret = 0;
1532 
1533         spin_lock(&inode->i_lock);
1534         if (!atomic_read(&inode->i_count))
1535                 WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
1536         else
1537                 WARN_ON(inode->i_state & I_WILL_FREE);
1538 
1539         if (inode->i_state & I_SYNC) {
1540                 if (wbc->sync_mode != WB_SYNC_ALL)
1541                         goto out;
1542                 /*
1543                  * It's a data-integrity sync. We must wait. Since callers hold
1544                  * inode reference or inode has I_WILL_FREE set, it cannot go
1545                  * away under us.
1546                  */
1547                 __inode_wait_for_writeback(inode);
1548         }
1549         WARN_ON(inode->i_state & I_SYNC);
1550         /*
1551          * Skip inode if it is clean and we have no outstanding writeback in
1552          * WB_SYNC_ALL mode. We don't want to mess with writeback lists in this
1553          * function since flusher thread may be doing for example sync in
1554          * parallel and if we move the inode, it could get skipped. So here we
1555          * make sure inode is on some writeback list and leave it there unless
1556          * we have completely cleaned the inode.
1557          */
1558         if (!(inode->i_state & I_DIRTY_ALL) &&
1559             (wbc->sync_mode != WB_SYNC_ALL ||
1560              !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK)))
1561                 goto out;
1562         inode->i_state |= I_SYNC;
1563         wbc_attach_and_unlock_inode(wbc, inode);
1564 
1565         ret = __writeback_single_inode(inode, wbc);
1566 
1567         wbc_detach_inode(wbc);
1568 
1569         wb = inode_to_wb_and_lock_list(inode);
1570         spin_lock(&inode->i_lock);
1571         /*
1572          * If inode is clean, remove it from writeback lists. Otherwise don't
1573          * touch it. See comment above for explanation.
1574          */
1575         if (!(inode->i_state & I_DIRTY_ALL))
1576                 inode_io_list_del_locked(inode, wb);
1577         spin_unlock(&wb->list_lock);
1578         inode_sync_complete(inode);
1579 out:
1580         spin_unlock(&inode->i_lock);
1581         return ret;
1582 }
1583 
1584 static long writeback_chunk_size(struct bdi_writeback *wb,
1585                                  struct wb_writeback_work *work)
1586 {
1587         long pages;
1588 
1589         /*
1590          * WB_SYNC_ALL mode does livelock avoidance by syncing dirty
1591          * inodes/pages in one big loop. Setting wbc.nr_to_write=LONG_MAX
1592          * here avoids calling into writeback_inodes_wb() more than once.
1593          *
1594          * The intended call sequence for WB_SYNC_ALL writeback is:
1595          *
1596          *      wb_writeback()
1597          *          writeback_sb_inodes()       <== called only once
1598          *              write_cache_pages()     <== called once for each inode
1599          *                   (quickly) tag currently dirty pages
1600          *                   (maybe slowly) sync all tagged pages
1601          */
1602         if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages)
1603                 pages = LONG_MAX;
1604         else {
1605                 pages = min(wb->avg_write_bandwidth / 2,
1606                             global_wb_domain.dirty_limit / DIRTY_SCOPE);
1607                 pages = min(pages, work->nr_pages);
1608                 pages = round_down(pages + MIN_WRITEBACK_PAGES,
1609                                    MIN_WRITEBACK_PAGES);
1610         }
1611 
1612         return pages;
1613 }
1614 
1615 /*
1616  * Write a portion of b_io inodes which belong to @sb.
1617  *
1618  * Return the number of pages and/or inodes written.
1619  *
1620  * NOTE! This is called with wb->list_lock held, and will
1621  * unlock and relock that for each inode it ends up doing
1622  * IO for.
1623  */
1624 static long writeback_sb_inodes(struct super_block *sb,
1625                                 struct bdi_writeback *wb,
1626                                 struct wb_writeback_work *work)
1627 {
1628         struct writeback_control wbc = {
1629                 .sync_mode              = work->sync_mode,
1630                 .tagged_writepages      = work->tagged_writepages,
1631                 .for_kupdate            = work->for_kupdate,
1632                 .for_background         = work->for_background,
1633                 .for_sync               = work->for_sync,
1634                 .range_cyclic           = work->range_cyclic,
1635                 .range_start            = 0,
1636                 .range_end              = LLONG_MAX,
1637         };
1638         unsigned long start_time = jiffies;
1639         long write_chunk;
1640         long wrote = 0;  /* count both pages and inodes */
1641 
1642         while (!list_empty(&wb->b_io)) {
1643                 struct inode *inode = wb_inode(wb->b_io.prev);
1644                 struct bdi_writeback *tmp_wb;
1645 
1646                 if (inode->i_sb != sb) {
1647                         if (work->sb) {
1648                                 /*
1649                                  * We only want to write back data for this
1650                                  * superblock, move all inodes not belonging
1651                                  * to it back onto the dirty list.
1652                                  */
1653                                 redirty_tail(inode, wb);
1654                                 continue;
1655                         }
1656 
1657                         /*
1658                          * The inode belongs to a different superblock.
1659                          * Bounce back to the caller to unpin this and
1660                          * pin the next superblock.
1661                          */
1662                         break;
1663                 }
1664 
1665                 /*
1666                  * Don't bother with new inodes or inodes being freed, first
1667                  * kind does not need periodic writeout yet, and for the latter
1668                  * kind writeout is handled by the freer.
1669                  */
1670                 spin_lock(&inode->i_lock);
1671                 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
1672                         spin_unlock(&inode->i_lock);
1673                         redirty_tail(inode, wb);
1674                         continue;
1675                 }
1676                 if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) {
1677                         /*
1678                          * If this inode is locked for writeback and we are not
1679                          * doing writeback-for-data-integrity, move it to
1680                          * b_more_io so that writeback can proceed with the
1681                          * other inodes on s_io.
1682                          *
1683                          * We'll have another go at writing back this inode
1684                          * when we completed a full scan of b_io.
1685                          */
1686                         spin_unlock(&inode->i_lock);
1687                         requeue_io(inode, wb);
1688                         trace_writeback_sb_inodes_requeue(inode);
1689                         continue;
1690                 }
1691                 spin_unlock(&wb->list_lock);
1692 
1693                 /*
1694                  * We already requeued the inode if it had I_SYNC set and we
1695                  * are doing WB_SYNC_NONE writeback. So this catches only the
1696                  * WB_SYNC_ALL case.
1697                  */
1698                 if (inode->i_state & I_SYNC) {
1699                         /* Wait for I_SYNC. This function drops i_lock... */
1700                         inode_sleep_on_writeback(inode);
1701                         /* Inode may be gone, start again */
1702                         spin_lock(&wb->list_lock);
1703                         continue;
1704                 }
1705                 inode->i_state |= I_SYNC;
1706                 wbc_attach_and_unlock_inode(&wbc, inode);
1707 
1708                 write_chunk = writeback_chunk_size(wb, work);
1709                 wbc.nr_to_write = write_chunk;
1710                 wbc.pages_skipped = 0;
1711 
1712                 /*
1713                  * We use I_SYNC to pin the inode in memory. While it is set
1714                  * evict_inode() will wait so the inode cannot be freed.
1715                  */
1716                 __writeback_single_inode(inode, &wbc);
1717 
1718                 wbc_detach_inode(&wbc);
1719                 work->nr_pages -= write_chunk - wbc.nr_to_write;
1720                 wrote += write_chunk - wbc.nr_to_write;
1721 
1722                 if (need_resched()) {
1723                         /*
1724                          * We're trying to balance between building up a nice
1725                          * long list of IOs to improve our merge rate, and
1726                          * getting those IOs out quickly for anyone throttling
1727                          * in balance_dirty_pages().  cond_resched() doesn't
1728                          * unplug, so get our IOs out the door before we
1729                          * give up the CPU.
1730                          */
1731                         blk_flush_plug(current);
1732                         cond_resched();
1733                 }
1734 
1735                 /*
1736                  * Requeue @inode if still dirty.  Be careful as @inode may
1737                  * have been switched to another wb in the meantime.
1738                  */
1739                 tmp_wb = inode_to_wb_and_lock_list(inode);
1740                 spin_lock(&inode->i_lock);
1741                 if (!(inode->i_state & I_DIRTY_ALL))
1742                         wrote++;
1743                 requeue_inode(inode, tmp_wb, &wbc);
1744                 inode_sync_complete(inode);
1745                 spin_unlock(&inode->i_lock);
1746 
1747                 if (unlikely(tmp_wb != wb)) {
1748                         spin_unlock(&tmp_wb->list_lock);
1749                         spin_lock(&wb->list_lock);
1750                 }
1751 
1752                 /*
1753                  * bail out to wb_writeback() often enough to check
1754                  * background threshold and other termination conditions.
1755                  */
1756                 if (wrote) {
1757                         if (time_is_before_jiffies(start_time + HZ / 10UL))
1758                                 break;
1759                         if (work->nr_pages <= 0)
1760                                 break;
1761                 }
1762         }
1763         return wrote;
1764 }
1765 
1766 static long __writeback_inodes_wb(struct bdi_writeback *wb,
1767                                   struct wb_writeback_work *work)
1768 {
1769         unsigned long start_time = jiffies;
1770         long wrote = 0;
1771 
1772         while (!list_empty(&wb->b_io)) {
1773                 struct inode *inode = wb_inode(wb->b_io.prev);
1774                 struct super_block *sb = inode->i_sb;
1775 
1776                 if (!trylock_super(sb)) {
1777                         /*
1778                          * trylock_super() may fail consistently due to
1779                          * s_umount being grabbed by someone else. Don't use
1780                          * requeue_io() to avoid busy retrying the inode/sb.
1781                          */
1782                         redirty_tail(inode, wb);
1783                         continue;
1784                 }
1785                 wrote += writeback_sb_inodes(sb, wb, work);
1786                 up_read(&sb->s_umount);
1787 
1788                 /* refer to the same tests at the end of writeback_sb_inodes */
1789                 if (wrote) {
1790                         if (time_is_before_jiffies(start_time + HZ / 10UL))
1791                                 break;
1792                         if (work->nr_pages <= 0)
1793                                 break;
1794                 }
1795         }
1796         /* Leave any unwritten inodes on b_io */
1797         return wrote;
1798 }
1799 
1800 static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
1801                                 enum wb_reason reason)
1802 {
1803         struct wb_writeback_work work = {
1804                 .nr_pages       = nr_pages,
1805                 .sync_mode      = WB_SYNC_NONE,
1806                 .range_cyclic   = 1,
1807                 .reason         = reason,
1808         };
1809         struct blk_plug plug;
1810 
1811         blk_start_plug(&plug);
1812         spin_lock(&wb->list_lock);
1813         if (list_empty(&wb->b_io))
1814                 queue_io(wb, &work);
1815         __writeback_inodes_wb(wb, &work);
1816         spin_unlock(&wb->list_lock);
1817         blk_finish_plug(&plug);
1818 
1819         return nr_pages - work.nr_pages;
1820 }
1821 
1822 /*
1823  * Explicit flushing or periodic writeback of "old" data.
1824  *
1825  * Define "old": the first time one of an inode's pages is dirtied, we mark the
1826  * dirtying-time in the inode's address_space.  So this periodic writeback code
1827  * just walks the superblock inode list, writing back any inodes which are
1828  * older than a specific point in time.
1829  *
1830  * Try to run once per dirty_writeback_interval.  But if a writeback event
1831  * takes longer than a dirty_writeback_interval interval, then leave a
1832  * one-second gap.
1833  *
1834  * older_than_this takes precedence over nr_to_write.  So we'll only write back
1835  * all dirty pages if they are all attached to "old" mappings.
1836  */
1837 static long wb_writeback(struct bdi_writeback *wb,
1838                          struct wb_writeback_work *work)
1839 {
1840         unsigned long wb_start = jiffies;
1841         long nr_pages = work->nr_pages;
1842         unsigned long oldest_jif;
1843         struct inode *inode;
1844         long progress;
1845         struct blk_plug plug;
1846 
1847         oldest_jif = jiffies;
1848         work->older_than_this = &oldest_jif;
1849 
1850         blk_start_plug(&plug);
1851         spin_lock(&wb->list_lock);
1852         for (;;) {
1853                 /*
1854                  * Stop writeback when nr_pages has been consumed
1855                  */
1856                 if (work->nr_pages <= 0)
1857                         break;
1858 
1859                 /*
1860                  * Background writeout and kupdate-style writeback may
1861                  * run forever. Stop them if there is other work to do
1862                  * so that e.g. sync can proceed. They'll be restarted
1863                  * after the other works are all done.
1864                  */
1865                 if ((work->for_background || work->for_kupdate) &&
1866                     !list_empty(&wb->work_list))
1867                         break;
1868 
1869                 /*
1870                  * For background writeout, stop when we are below the
1871                  * background dirty threshold
1872                  */
1873                 if (work->for_background && !wb_over_bg_thresh(wb))
1874                         break;
1875 
1876                 /*
1877                  * Kupdate and background works are special and we want to
1878                  * include all inodes that need writing. Livelock avoidance is
1879                  * handled by these works yielding to any other work so we are
1880                  * safe.
1881                  */
1882                 if (work->for_kupdate) {
1883                         oldest_jif = jiffies -
1884                                 msecs_to_jiffies(dirty_expire_interval * 10);
1885                 } else if (work->for_background)
1886                         oldest_jif = jiffies;
1887 
1888                 trace_writeback_start(wb, work);
1889                 if (list_empty(&wb->b_io))
1890                         queue_io(wb, work);
1891                 if (work->sb)
1892                         progress = writeback_sb_inodes(work->sb, wb, work);
1893                 else
1894                         progress = __writeback_inodes_wb(wb, work);
1895                 trace_writeback_written(wb, work);
1896 
1897                 wb_update_bandwidth(wb, wb_start);
1898 
1899                 /*
1900                  * Did we write something? Try for more
1901                  *
1902                  * Dirty inodes are moved to b_io for writeback in batches.
1903                  * The completion of the current batch does not necessarily
1904                  * mean the overall work is done. So we keep looping as long
1905                  * as made some progress on cleaning pages or inodes.
1906                  */
1907                 if (progress)
1908                         continue;
1909                 /*
1910                  * No more inodes for IO, bail
1911                  */
1912                 if (list_empty(&wb->b_more_io))
1913                         break;
1914                 /*
1915                  * Nothing written. Wait for some inode to
1916                  * become available for writeback. Otherwise
1917                  * we'll just busyloop.
1918                  */
1919                 trace_writeback_wait(wb, work);
1920                 inode = wb_inode(wb->b_more_io.prev);
1921                 spin_lock(&inode->i_lock);
1922                 spin_unlock(&wb->list_lock);
1923                 /* This function drops i_lock... */
1924                 inode_sleep_on_writeback(inode);
1925                 spin_lock(&wb->list_lock);
1926         }
1927         spin_unlock(&wb->list_lock);
1928         blk_finish_plug(&plug);
1929 
1930         return nr_pages - work->nr_pages;
1931 }
1932 
1933 /*
1934  * Return the next wb_writeback_work struct that hasn't been processed yet.
1935  */
1936 static struct wb_writeback_work *get_next_work_item(struct bdi_writeback *wb)
1937 {
1938         struct wb_writeback_work *work = NULL;
1939 
1940         spin_lock_bh(&wb->work_lock);
1941         if (!list_empty(&wb->work_list)) {
1942                 work = list_entry(wb->work_list.next,
1943                                   struct wb_writeback_work, list);
1944                 list_del_init(&work->list);
1945         }
1946         spin_unlock_bh(&wb->work_lock);
1947         return work;
1948 }
1949 
1950 static long wb_check_background_flush(struct bdi_writeback *wb)
1951 {
1952         if (wb_over_bg_thresh(wb)) {
1953 
1954                 struct wb_writeback_work work = {
1955                         .nr_pages       = LONG_MAX,
1956                         .sync_mode      = WB_SYNC_NONE,
1957                         .for_background = 1,
1958                         .range_cyclic   = 1,
1959                         .reason         = WB_REASON_BACKGROUND,
1960                 };
1961 
1962                 return wb_writeback(wb, &work);
1963         }
1964 
1965         return 0;
1966 }
1967 
1968 static long wb_check_old_data_flush(struct bdi_writeback *wb)
1969 {
1970         unsigned long expired;
1971         long nr_pages;
1972 
1973         /*
1974          * When set to zero, disable periodic writeback
1975          */
1976         if (!dirty_writeback_interval)
1977                 return 0;
1978 
1979         expired = wb->last_old_flush +
1980                         msecs_to_jiffies(dirty_writeback_interval * 10);
1981         if (time_before(jiffies, expired))
1982                 return 0;
1983 
1984         wb->last_old_flush = jiffies;
1985         nr_pages = get_nr_dirty_pages();
1986 
1987         if (nr_pages) {
1988                 struct wb_writeback_work work = {
1989                         .nr_pages       = nr_pages,
1990                         .sync_mode      = WB_SYNC_NONE,
1991                         .for_kupdate    = 1,
1992                         .range_cyclic   = 1,
1993                         .reason         = WB_REASON_PERIODIC,
1994                 };
1995 
1996                 return wb_writeback(wb, &work);
1997         }
1998 
1999         return 0;
2000 }
2001 
2002 static long wb_check_start_all(struct bdi_writeback *wb)
2003 {
2004         long nr_pages;
2005 
2006         if (!test_bit(WB_start_all, &wb->state))
2007                 return 0;
2008 
2009         nr_pages = get_nr_dirty_pages();
2010         if (nr_pages) {
2011                 struct wb_writeback_work work = {
2012                         .nr_pages       = wb_split_bdi_pages(wb, nr_pages),
2013                         .sync_mode      = WB_SYNC_NONE,
2014                         .range_cyclic   = 1,
2015                         .reason         = wb->start_all_reason,
2016                 };
2017 
2018                 nr_pages = wb_writeback(wb, &work);
2019         }
2020 
2021         clear_bit(WB_start_all, &wb->state);
2022         return nr_pages;
2023 }
2024 
2025 
2026 /*
2027  * Retrieve work items and do the writeback they describe
2028  */
2029 static long wb_do_writeback(struct bdi_writeback *wb)
2030 {
2031         struct wb_writeback_work *work;
2032         long wrote = 0;
2033 
2034         set_bit(WB_writeback_running, &wb->state);
2035         while ((work = get_next_work_item(wb)) != NULL) {
2036                 trace_writeback_exec(wb, work);
2037                 wrote += wb_writeback(wb, work);
2038                 finish_writeback_work(wb, work);
2039         }
2040 
2041         /*
2042          * Check for a flush-everything request
2043          */
2044         wrote += wb_check_start_all(wb);
2045 
2046         /*
2047          * Check for periodic writeback, kupdated() style
2048          */
2049         wrote += wb_check_old_data_flush(wb);
2050         wrote += wb_check_background_flush(wb);
2051         clear_bit(WB_writeback_running, &wb->state);
2052 
2053         return wrote;
2054 }
2055 
2056 /*
2057  * Handle writeback of dirty data for the device backed by this bdi. Also
2058  * reschedules periodically and does kupdated style flushing.
2059  */
2060 void wb_workfn(struct work_struct *work)
2061 {
2062         struct bdi_writeback *wb = container_of(to_delayed_work(work),
2063                                                 struct bdi_writeback, dwork);
2064         long pages_written;
2065 
2066         set_worker_desc("flush-%s", bdi_dev_name(wb->bdi));
2067         current->flags |= PF_SWAPWRITE;
2068 
2069         if (likely(!current_is_workqueue_rescuer() ||
2070                    !test_bit(WB_registered, &wb->state))) {
2071                 /*
2072                  * The normal path.  Keep writing back @wb until its
2073                  * work_list is empty.  Note that this path is also taken
2074                  * if @wb is shutting down even when we're running off the
2075                  * rescuer as work_list needs to be drained.
2076                  */
2077                 do {
2078                         pages_written = wb_do_writeback(wb);
2079                         trace_writeback_pages_written(pages_written);
2080                 } while (!list_empty(&wb->work_list));
2081         } else {
2082                 /*
2083                  * bdi_wq can't get enough workers and we're running off
2084                  * the emergency worker.  Don't hog it.  Hopefully, 1024 is
2085                  * enough for efficient IO.
2086                  */
2087                 pages_written = writeback_inodes_wb(wb, 1024,
2088                                                     WB_REASON_FORKER_THREAD);
2089                 trace_writeback_pages_written(pages_written);
2090         }
2091 
2092         if (!list_empty(&wb->work_list))
2093                 wb_wakeup(wb);
2094         else if (wb_has_dirty_io(wb) && dirty_writeback_interval)
2095                 wb_wakeup_delayed(wb);
2096 
2097         current->flags &= ~PF_SWAPWRITE;
2098 }
2099 
2100 /*
2101  * Start writeback of `nr_pages' pages on this bdi. If `nr_pages' is zero,
2102  * write back the whole world.
2103  */
2104 static void __wakeup_flusher_threads_bdi(struct backing_dev_info *bdi,
2105                                          enum wb_reason reason)
2106 {
2107         struct bdi_writeback *wb;
2108 
2109         if (!bdi_has_dirty_io(bdi))
2110                 return;
2111 
2112         list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node)
2113                 wb_start_writeback(wb, reason);
2114 }
2115 
2116 void wakeup_flusher_threads_bdi(struct backing_dev_info *bdi,
2117                                 enum wb_reason reason)
2118 {
2119         rcu_read_lock();
2120         __wakeup_flusher_threads_bdi(bdi, reason);
2121         rcu_read_unlock();
2122 }
2123 
2124 /*
2125  * Wakeup the flusher threads to start writeback of all currently dirty pages
2126  */
2127 void wakeup_flusher_threads(enum wb_reason reason)
2128 {
2129         struct backing_dev_info *bdi;
2130 
2131         /*
2132          * If we are expecting writeback progress we must submit plugged IO.
2133          */
2134         if (blk_needs_flush_plug(current))
2135                 blk_schedule_flush_plug(current);
2136 
2137         rcu_read_lock();
2138         list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
2139                 __wakeup_flusher_threads_bdi(bdi, reason);
2140         rcu_read_unlock();
2141 }
2142 
2143 /*
2144  * Wake up bdi's periodically to make sure dirtytime inodes gets
2145  * written back periodically.  We deliberately do *not* check the
2146  * b_dirtytime list in wb_has_dirty_io(), since this would cause the
2147  * kernel to be constantly waking up once there are any dirtytime
2148  * inodes on the system.  So instead we define a separate delayed work
2149  * function which gets called much more rarely.  (By default, only
2150  * once every 12 hours.)
2151  *
2152  * If there is any other write activity going on in the file system,
2153  * this function won't be necessary.  But if the only thing that has
2154  * happened on the file system is a dirtytime inode caused by an atime
2155  * update, we need this infrastructure below to make sure that inode
2156  * eventually gets pushed out to disk.
2157  */
2158 static void wakeup_dirtytime_writeback(struct work_struct *w);
2159 static DECLARE_DELAYED_WORK(dirtytime_work, wakeup_dirtytime_writeback);
2160 
2161 static void wakeup_dirtytime_writeback(struct work_struct *w)
2162 {
2163         struct backing_dev_info *bdi;
2164 
2165         rcu_read_lock();
2166         list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
2167                 struct bdi_writeback *wb;
2168 
2169                 list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node)
2170                         if (!list_empty(&wb->b_dirty_time))
2171                                 wb_wakeup(wb);
2172         }
2173         rcu_read_unlock();
2174         schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
2175 }
2176 
2177 static int __init start_dirtytime_writeback(void)
2178 {
2179         schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
2180         return 0;
2181 }
2182 __initcall(start_dirtytime_writeback);
2183 
2184 int dirtytime_interval_handler(struct ctl_table *table, int write,
2185                                void __user *buffer, size_t *lenp, loff_t *ppos)
2186 {
2187         int ret;
2188 
2189         ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2190         if (ret == 0 && write)
2191                 mod_delayed_work(system_wq, &dirtytime_work, 0);
2192         return ret;
2193 }
2194 
2195 static noinline void block_dump___mark_inode_dirty(struct inode *inode)
2196 {
2197         if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
2198                 struct dentry *dentry;
2199                 const char *name = "?";
2200 
2201                 dentry = d_find_alias(inode);
2202                 if (dentry) {
2203                         spin_lock(&dentry->d_lock);
2204                         name = (const char *) dentry->d_name.name;
2205                 }
2206                 printk(KERN_DEBUG
2207                        "%s(%d): dirtied inode %lu (%s) on %s\n",
2208                        current->comm, task_pid_nr(current), inode->i_ino,
2209                        name, inode->i_sb->s_id);
2210                 if (dentry) {
2211                         spin_unlock(&dentry->d_lock);
2212                         dput(dentry);
2213                 }
2214         }
2215 }
2216 
2217 /**
2218  * __mark_inode_dirty - internal function
2219  *
2220  * @inode: inode to mark
2221  * @flags: what kind of dirty (i.e. I_DIRTY_SYNC)
2222  *
2223  * Mark an inode as dirty. Callers should use mark_inode_dirty or
2224  * mark_inode_dirty_sync.
2225  *
2226  * Put the inode on the super block's dirty list.
2227  *
2228  * CAREFUL! We mark it dirty unconditionally, but move it onto the
2229  * dirty list only if it is hashed or if it refers to a blockdev.
2230  * If it was not hashed, it will never be added to the dirty list
2231  * even if it is later hashed, as it will have been marked dirty already.
2232  *
2233  * In short, make sure you hash any inodes _before_ you start marking
2234  * them dirty.
2235  *
2236  * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
2237  * the block-special inode (/dev/hda1) itself.  And the ->dirtied_when field of
2238  * the kernel-internal blockdev inode represents the dirtying time of the
2239  * blockdev's pages.  This is why for I_DIRTY_PAGES we always use
2240  * page->mapping->host, so the page-dirtying time is recorded in the internal
2241  * blockdev inode.
2242  */
2243 void __mark_inode_dirty(struct inode *inode, int flags)
2244 {
2245         struct super_block *sb = inode->i_sb;
2246         int dirtytime;
2247 
2248         trace_writeback_mark_inode_dirty(inode, flags);
2249 
2250         /*
2251          * Don't do this for I_DIRTY_PAGES - that doesn't actually
2252          * dirty the inode itself
2253          */
2254         if (flags & (I_DIRTY_INODE | I_DIRTY_TIME)) {
2255                 trace_writeback_dirty_inode_start(inode, flags);
2256 
2257                 if (sb->s_op->dirty_inode)
2258                         sb->s_op->dirty_inode(inode, flags);
2259 
2260                 trace_writeback_dirty_inode(inode, flags);
2261         }
2262         if (flags & I_DIRTY_INODE)
2263                 flags &= ~I_DIRTY_TIME;
2264         dirtytime = flags & I_DIRTY_TIME;
2265 
2266         /*
2267          * Paired with smp_mb() in __writeback_single_inode() for the
2268          * following lockless i_state test.  See there for details.
2269          */
2270         smp_mb();
2271 
2272         if (((inode->i_state & flags) == flags) ||
2273             (dirtytime && (inode->i_state & I_DIRTY_INODE)))
2274                 return;
2275 
2276         if (unlikely(block_dump))
2277                 block_dump___mark_inode_dirty(inode);
2278 
2279         spin_lock(&inode->i_lock);
2280         if (dirtytime && (inode->i_state & I_DIRTY_INODE))
2281                 goto out_unlock_inode;
2282         if ((inode->i_state & flags) != flags) {
2283                 const int was_dirty = inode->i_state & I_DIRTY;
2284 
2285                 inode_attach_wb(inode, NULL);
2286 
2287                 if (flags & I_DIRTY_INODE)
2288                         inode->i_state &= ~I_DIRTY_TIME;
2289                 inode->i_state |= flags;
2290 
2291                 /*
2292                  * If the inode is being synced, just update its dirty state.
2293                  * The unlocker will place the inode on the appropriate
2294                  * superblock list, based upon its state.
2295                  */
2296                 if (inode->i_state & I_SYNC)
2297                         goto out_unlock_inode;
2298 
2299                 /*
2300                  * Only add valid (hashed) inodes to the superblock's
2301                  * dirty list.  Add blockdev inodes as well.
2302                  */
2303                 if (!S_ISBLK(inode->i_mode)) {
2304                         if (inode_unhashed(inode))
2305                                 goto out_unlock_inode;
2306                 }
2307                 if (inode->i_state & I_FREEING)
2308                         goto out_unlock_inode;
2309 
2310                 /*
2311                  * If the inode was already on b_dirty/b_io/b_more_io, don't
2312                  * reposition it (that would break b_dirty time-ordering).
2313                  */
2314                 if (!was_dirty) {
2315                         struct bdi_writeback *wb;
2316                         struct list_head *dirty_list;
2317                         bool wakeup_bdi = false;
2318 
2319                         wb = locked_inode_to_wb_and_lock_list(inode);
2320 
2321                         WARN(bdi_cap_writeback_dirty(wb->bdi) &&
2322                              !test_bit(WB_registered, &wb->state),
2323                              "bdi-%s not registered\n", wb->bdi->name);
2324 
2325                         inode->dirtied_when = jiffies;
2326                         if (dirtytime)
2327                                 inode->dirtied_time_when = jiffies;
2328 
2329                         if (inode->i_state & I_DIRTY)
2330                                 dirty_list = &wb->b_dirty;
2331                         else
2332                                 dirty_list = &wb->b_dirty_time;
2333 
2334                         wakeup_bdi = inode_io_list_move_locked(inode, wb,
2335                                                                dirty_list);
2336 
2337                         spin_unlock(&wb->list_lock);
2338                         trace_writeback_dirty_inode_enqueue(inode);
2339 
2340                         /*
2341                          * If this is the first dirty inode for this bdi,
2342                          * we have to wake-up the corresponding bdi thread
2343                          * to make sure background write-back happens
2344                          * later.
2345                          */
2346                         if (bdi_cap_writeback_dirty(wb->bdi) && wakeup_bdi)
2347                                 wb_wakeup_delayed(wb);
2348                         return;
2349                 }
2350         }
2351 out_unlock_inode:
2352         spin_unlock(&inode->i_lock);
2353 }
2354 EXPORT_SYMBOL(__mark_inode_dirty);
2355 
2356 /*
2357  * The @s_sync_lock is used to serialise concurrent sync operations
2358  * to avoid lock contention problems with concurrent wait_sb_inodes() calls.
2359  * Concurrent callers will block on the s_sync_lock rather than doing contending
2360  * walks. The queueing maintains sync(2) required behaviour as all the IO that
2361  * has been issued up to the time this function is enter is guaranteed to be
2362  * completed by the time we have gained the lock and waited for all IO that is
2363  * in progress regardless of the order callers are granted the lock.
2364  */
2365 static void wait_sb_inodes(struct super_block *sb)
2366 {
2367         LIST_HEAD(sync_list);
2368 
2369         /*
2370          * We need to be protected against the filesystem going from
2371          * r/o to r/w or vice versa.
2372          */
2373         WARN_ON(!rwsem_is_locked(&sb->s_umount));
2374 
2375         mutex_lock(&sb->s_sync_lock);
2376 
2377         /*
2378          * Splice the writeback list onto a temporary list to avoid waiting on
2379          * inodes that have started writeback after this point.
2380          *
2381          * Use rcu_read_lock() to keep the inodes around until we have a
2382          * reference. s_inode_wblist_lock protects sb->s_inodes_wb as well as
2383          * the local list because inodes can be dropped from either by writeback
2384          * completion.
2385          */
2386         rcu_read_lock();
2387         spin_lock_irq(&sb->s_inode_wblist_lock);
2388         list_splice_init(&sb->s_inodes_wb, &sync_list);
2389 
2390         /*
2391          * Data integrity sync. Must wait for all pages under writeback, because
2392          * there may have been pages dirtied before our sync call, but which had
2393          * writeout started before we write it out.  In which case, the inode
2394          * may not be on the dirty list, but we still have to wait for that
2395          * writeout.
2396          */
2397         while (!list_empty(&sync_list)) {
2398                 struct inode *inode = list_first_entry(&sync_list, struct inode,
2399                                                        i_wb_list);
2400                 struct address_space *mapping = inode->i_mapping;
2401 
2402                 /*
2403                  * Move each inode back to the wb list before we drop the lock
2404                  * to preserve consistency between i_wb_list and the mapping
2405                  * writeback tag. Writeback completion is responsible to remove
2406                  * the inode from either list once the writeback tag is cleared.
2407                  */
2408                 list_move_tail(&inode->i_wb_list, &sb->s_inodes_wb);
2409 
2410                 /*
2411                  * The mapping can appear untagged while still on-list since we
2412                  * do not have the mapping lock. Skip it here, wb completion
2413                  * will remove it.
2414                  */
2415                 if (!mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK))
2416                         continue;
2417 
2418                 spin_unlock_irq(&sb->s_inode_wblist_lock);
2419 
2420                 spin_lock(&inode->i_lock);
2421                 if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) {
2422                         spin_unlock(&inode->i_lock);
2423 
2424                         spin_lock_irq(&sb->s_inode_wblist_lock);
2425                         continue;
2426                 }
2427                 __iget(inode);
2428                 spin_unlock(&inode->i_lock);
2429                 rcu_read_unlock();
2430 
2431                 /*
2432                  * We keep the error status of individual mapping so that
2433                  * applications can catch the writeback error using fsync(2).
2434                  * See filemap_fdatawait_keep_errors() for details.
2435                  */
2436                 filemap_fdatawait_keep_errors(mapping);
2437 
2438                 cond_resched();
2439 
2440                 iput(inode);
2441 
2442                 rcu_read_lock();
2443                 spin_lock_irq(&sb->s_inode_wblist_lock);
2444         }
2445         spin_unlock_irq(&sb->s_inode_wblist_lock);
2446         rcu_read_unlock();
2447         mutex_unlock(&sb->s_sync_lock);
2448 }
2449 
2450 static void __writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr,
2451                                      enum wb_reason reason, bool skip_if_busy)
2452 {
2453         struct backing_dev_info *bdi = sb->s_bdi;
2454         DEFINE_WB_COMPLETION(done, bdi);
2455         struct wb_writeback_work work = {
2456                 .sb                     = sb,
2457                 .sync_mode              = WB_SYNC_NONE,
2458                 .tagged_writepages      = 1,
2459                 .done                   = &done,
2460                 .nr_pages               = nr,
2461                 .reason                 = reason,
2462         };
2463 
2464         if (!bdi_has_dirty_io(bdi) || bdi == &noop_backing_dev_info)
2465                 return;
2466         WARN_ON(!rwsem_is_locked(&sb->s_umount));
2467 
2468         bdi_split_work_to_wbs(sb->s_bdi, &work, skip_if_busy);
2469         wb_wait_for_completion(&done);
2470 }
2471 
2472 /**
2473  * writeback_inodes_sb_nr -     writeback dirty inodes from given super_block
2474  * @sb: the superblock
2475  * @nr: the number of pages to write
2476  * @reason: reason why some writeback work initiated
2477  *
2478  * Start writeback on some inodes on this super_block. No guarantees are made
2479  * on how many (if any) will be written, and this function does not wait
2480  * for IO completion of submitted IO.
2481  */
2482 void writeback_inodes_sb_nr(struct super_block *sb,
2483                             unsigned long nr,
2484                             enum wb_reason reason)
2485 {
2486         __writeback_inodes_sb_nr(sb, nr, reason, false);
2487 }
2488 EXPORT_SYMBOL(writeback_inodes_sb_nr);
2489 
2490 /**
2491  * writeback_inodes_sb  -       writeback dirty inodes from given super_block
2492  * @sb: the superblock
2493  * @reason: reason why some writeback work was initiated
2494  *
2495  * Start writeback on some inodes on this super_block. No guarantees are made
2496  * on how many (if any) will be written, and this function does not wait
2497  * for IO completion of submitted IO.
2498  */
2499 void writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
2500 {
2501         return writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
2502 }
2503 EXPORT_SYMBOL(writeback_inodes_sb);
2504 
2505 /**
2506  * try_to_writeback_inodes_sb - try to start writeback if none underway
2507  * @sb: the superblock
2508  * @reason: reason why some writeback work was initiated
2509  *
2510  * Invoke __writeback_inodes_sb_nr if no writeback is currently underway.
2511  */
2512 void try_to_writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
2513 {
2514         if (!down_read_trylock(&sb->s_umount))
2515                 return;
2516 
2517         __writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason, true);
2518         up_read(&sb->s_umount);
2519 }
2520 EXPORT_SYMBOL(try_to_writeback_inodes_sb);
2521 
2522 /**
2523  * sync_inodes_sb       -       sync sb inode pages
2524  * @sb: the superblock
2525  *
2526  * This function writes and waits on any dirty inode belonging to this
2527  * super_block.
2528  */
2529 void sync_inodes_sb(struct super_block *sb)
2530 {
2531         struct backing_dev_info *bdi = sb->s_bdi;
2532         DEFINE_WB_COMPLETION(done, bdi);
2533         struct wb_writeback_work work = {
2534                 .sb             = sb,
2535                 .sync_mode      = WB_SYNC_ALL,
2536                 .nr_pages       = LONG_MAX,
2537                 .range_cyclic   = 0,
2538                 .done           = &done,
2539                 .reason         = WB_REASON_SYNC,
2540                 .for_sync       = 1,
2541         };
2542 
2543         /*
2544          * Can't skip on !bdi_has_dirty() because we should wait for !dirty
2545          * inodes under writeback and I_DIRTY_TIME inodes ignored by
2546          * bdi_has_dirty() need to be written out too.
2547          */
2548         if (bdi == &noop_backing_dev_info)
2549                 return;
2550         WARN_ON(!rwsem_is_locked(&sb->s_umount));
2551 
2552         /* protect against inode wb switch, see inode_switch_wbs_work_fn() */
2553         bdi_down_write_wb_switch_rwsem(bdi);
2554         bdi_split_work_to_wbs(bdi, &work, false);
2555         wb_wait_for_completion(&done);
2556         bdi_up_write_wb_switch_rwsem(bdi);
2557 
2558         wait_sb_inodes(sb);
2559 }
2560 EXPORT_SYMBOL(sync_inodes_sb);
2561 
2562 /**
2563  * write_inode_now      -       write an inode to disk
2564  * @inode: inode to write to disk
2565  * @sync: whether the write should be synchronous or not
2566  *
2567  * This function commits an inode to disk immediately if it is dirty. This is
2568  * primarily needed by knfsd.
2569  *
2570  * The caller must either have a ref on the inode or must have set I_WILL_FREE.
2571  */
2572 int write_inode_now(struct inode *inode, int sync)
2573 {
2574         struct writeback_control wbc = {
2575                 .nr_to_write = LONG_MAX,
2576                 .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
2577                 .range_start = 0,
2578                 .range_end = LLONG_MAX,
2579         };
2580 
2581         if (!mapping_cap_writeback_dirty(inode->i_mapping))
2582                 wbc.nr_to_write = 0;
2583 
2584         might_sleep();
2585         return writeback_single_inode(inode, &wbc);
2586 }
2587 EXPORT_SYMBOL(write_inode_now);
2588 
2589 /**
2590  * sync_inode - write an inode and its pages to disk.
2591  * @inode: the inode to sync
2592  * @wbc: controls the writeback mode
2593  *
2594  * sync_inode() will write an inode and its pages to disk.  It will also
2595  * correctly update the inode on its superblock's dirty inode lists and will
2596  * update inode->i_state.
2597  *
2598  * The caller must have a ref on the inode.
2599  */
2600 int sync_inode(struct inode *inode, struct writeback_control *wbc)
2601 {
2602         return writeback_single_inode(inode, wbc);
2603 }
2604 EXPORT_SYMBOL(sync_inode);
2605 
2606 /**
2607  * sync_inode_metadata - write an inode to disk
2608  * @inode: the inode to sync
2609  * @wait: wait for I/O to complete.
2610  *
2611  * Write an inode to disk and adjust its dirty state after completion.
2612  *
2613  * Note: only writes the actual inode, no associated data or other metadata.
2614  */
2615 int sync_inode_metadata(struct inode *inode, int wait)
2616 {
2617         struct writeback_control wbc = {
2618                 .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE,
2619                 .nr_to_write = 0, /* metadata-only */
2620         };
2621 
2622         return sync_inode(inode, &wbc);
2623 }
2624 EXPORT_SYMBOL(sync_inode_metadata);

/* [<][>][^][v][top][bottom][index][help] */