1/*
2 * fs/fs-writeback.c
3 *
4 * Copyright (C) 2002, Linus Torvalds.
5 *
6 * Contains all the functions related to writing back and waiting
7 * upon dirty inodes against superblocks, and writing back dirty
8 * pages against inodes.  ie: data writeback.  Writeout of the
9 * inode itself is not handled here.
10 *
11 * 10Apr2002	Andrew Morton
12 *		Split out of fs/inode.c
13 *		Additions for address_space-based writeback
14 */
15
16#include <linux/kernel.h>
17#include <linux/export.h>
18#include <linux/spinlock.h>
19#include <linux/slab.h>
20#include <linux/sched.h>
21#include <linux/fs.h>
22#include <linux/mm.h>
23#include <linux/pagemap.h>
24#include <linux/kthread.h>
25#include <linux/writeback.h>
26#include <linux/blkdev.h>
27#include <linux/backing-dev.h>
28#include <linux/tracepoint.h>
29#include <linux/device.h>
30#include "internal.h"
31
32/*
33 * 4MB minimal write chunk size
34 */
35#define MIN_WRITEBACK_PAGES	(4096UL >> (PAGE_CACHE_SHIFT - 10))
36
37/*
38 * Passed into wb_writeback(), essentially a subset of writeback_control
39 */
40struct wb_writeback_work {
41	long nr_pages;
42	struct super_block *sb;
43	unsigned long *older_than_this;
44	enum writeback_sync_modes sync_mode;
45	unsigned int tagged_writepages:1;
46	unsigned int for_kupdate:1;
47	unsigned int range_cyclic:1;
48	unsigned int for_background:1;
49	unsigned int for_sync:1;	/* sync(2) WB_SYNC_ALL writeback */
50	enum wb_reason reason;		/* why was writeback initiated? */
51
52	struct list_head list;		/* pending work list */
53	struct completion *done;	/* set if the caller waits */
54};
55
56/*
57 * If an inode is constantly having its pages dirtied, but then the
58 * updates stop dirtytime_expire_interval seconds in the past, it's
59 * possible for the worst case time between when an inode has its
60 * timestamps updated and when they finally get written out to be two
61 * dirtytime_expire_intervals.  We set the default to 12 hours (in
62 * seconds), which means most of the time inodes will have their
63 * timestamps written to disk after 12 hours, but in the worst case a
64 * few inodes might not their timestamps updated for 24 hours.
65 */
66unsigned int dirtytime_expire_interval = 12 * 60 * 60;
67
68/**
69 * writeback_in_progress - determine whether there is writeback in progress
70 * @bdi: the device's backing_dev_info structure.
71 *
72 * Determine whether there is writeback waiting to be handled against a
73 * backing device.
74 */
75int writeback_in_progress(struct backing_dev_info *bdi)
76{
77	return test_bit(BDI_writeback_running, &bdi->state);
78}
79EXPORT_SYMBOL(writeback_in_progress);
80
81struct backing_dev_info *inode_to_bdi(struct inode *inode)
82{
83	struct super_block *sb;
84
85	if (!inode)
86		return &noop_backing_dev_info;
87
88	sb = inode->i_sb;
89#ifdef CONFIG_BLOCK
90	if (sb_is_blkdev_sb(sb))
91		return blk_get_backing_dev_info(I_BDEV(inode));
92#endif
93	return sb->s_bdi;
94}
95EXPORT_SYMBOL_GPL(inode_to_bdi);
96
97static inline struct inode *wb_inode(struct list_head *head)
98{
99	return list_entry(head, struct inode, i_wb_list);
100}
101
102/*
103 * Include the creation of the trace points after defining the
104 * wb_writeback_work structure and inline functions so that the definition
105 * remains local to this file.
106 */
107#define CREATE_TRACE_POINTS
108#include <trace/events/writeback.h>
109
110EXPORT_TRACEPOINT_SYMBOL_GPL(wbc_writepage);
111
112static void bdi_wakeup_thread(struct backing_dev_info *bdi)
113{
114	spin_lock_bh(&bdi->wb_lock);
115	if (test_bit(BDI_registered, &bdi->state))
116		mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
117	spin_unlock_bh(&bdi->wb_lock);
118}
119
120static void bdi_queue_work(struct backing_dev_info *bdi,
121			   struct wb_writeback_work *work)
122{
123	trace_writeback_queue(bdi, work);
124
125	spin_lock_bh(&bdi->wb_lock);
126	if (!test_bit(BDI_registered, &bdi->state)) {
127		if (work->done)
128			complete(work->done);
129		goto out_unlock;
130	}
131	list_add_tail(&work->list, &bdi->work_list);
132	mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
133out_unlock:
134	spin_unlock_bh(&bdi->wb_lock);
135}
136
137static void
138__bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
139		      bool range_cyclic, enum wb_reason reason)
140{
141	struct wb_writeback_work *work;
142
143	/*
144	 * This is WB_SYNC_NONE writeback, so if allocation fails just
145	 * wakeup the thread for old dirty data writeback
146	 */
147	work = kzalloc(sizeof(*work), GFP_ATOMIC);
148	if (!work) {
149		trace_writeback_nowork(bdi);
150		bdi_wakeup_thread(bdi);
151		return;
152	}
153
154	work->sync_mode	= WB_SYNC_NONE;
155	work->nr_pages	= nr_pages;
156	work->range_cyclic = range_cyclic;
157	work->reason	= reason;
158
159	bdi_queue_work(bdi, work);
160}
161
162/**
163 * bdi_start_writeback - start writeback
164 * @bdi: the backing device to write from
165 * @nr_pages: the number of pages to write
166 * @reason: reason why some writeback work was initiated
167 *
168 * Description:
169 *   This does WB_SYNC_NONE opportunistic writeback. The IO is only
170 *   started when this function returns, we make no guarantees on
171 *   completion. Caller need not hold sb s_umount semaphore.
172 *
173 */
174void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
175			enum wb_reason reason)
176{
177	__bdi_start_writeback(bdi, nr_pages, true, reason);
178}
179
180/**
181 * bdi_start_background_writeback - start background writeback
182 * @bdi: the backing device to write from
183 *
184 * Description:
185 *   This makes sure WB_SYNC_NONE background writeback happens. When
186 *   this function returns, it is only guaranteed that for given BDI
187 *   some IO is happening if we are over background dirty threshold.
188 *   Caller need not hold sb s_umount semaphore.
189 */
190void bdi_start_background_writeback(struct backing_dev_info *bdi)
191{
192	/*
193	 * We just wake up the flusher thread. It will perform background
194	 * writeback as soon as there is no other work to do.
195	 */
196	trace_writeback_wake_background(bdi);
197	bdi_wakeup_thread(bdi);
198}
199
200/*
201 * Remove the inode from the writeback list it is on.
202 */
203void inode_wb_list_del(struct inode *inode)
204{
205	struct backing_dev_info *bdi = inode_to_bdi(inode);
206
207	spin_lock(&bdi->wb.list_lock);
208	list_del_init(&inode->i_wb_list);
209	spin_unlock(&bdi->wb.list_lock);
210}
211
212/*
213 * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
214 * furthest end of its superblock's dirty-inode list.
215 *
216 * Before stamping the inode's ->dirtied_when, we check to see whether it is
217 * already the most-recently-dirtied inode on the b_dirty list.  If that is
218 * the case then the inode must have been redirtied while it was being written
219 * out and we don't reset its dirtied_when.
220 */
221static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
222{
223	assert_spin_locked(&wb->list_lock);
224	if (!list_empty(&wb->b_dirty)) {
225		struct inode *tail;
226
227		tail = wb_inode(wb->b_dirty.next);
228		if (time_before(inode->dirtied_when, tail->dirtied_when))
229			inode->dirtied_when = jiffies;
230	}
231	list_move(&inode->i_wb_list, &wb->b_dirty);
232}
233
234/*
235 * requeue inode for re-scanning after bdi->b_io list is exhausted.
236 */
237static void requeue_io(struct inode *inode, struct bdi_writeback *wb)
238{
239	assert_spin_locked(&wb->list_lock);
240	list_move(&inode->i_wb_list, &wb->b_more_io);
241}
242
243static void inode_sync_complete(struct inode *inode)
244{
245	inode->i_state &= ~I_SYNC;
246	/* If inode is clean an unused, put it into LRU now... */
247	inode_add_lru(inode);
248	/* Waiters must see I_SYNC cleared before being woken up */
249	smp_mb();
250	wake_up_bit(&inode->i_state, __I_SYNC);
251}
252
253static bool inode_dirtied_after(struct inode *inode, unsigned long t)
254{
255	bool ret = time_after(inode->dirtied_when, t);
256#ifndef CONFIG_64BIT
257	/*
258	 * For inodes being constantly redirtied, dirtied_when can get stuck.
259	 * It _appears_ to be in the future, but is actually in distant past.
260	 * This test is necessary to prevent such wrapped-around relative times
261	 * from permanently stopping the whole bdi writeback.
262	 */
263	ret = ret && time_before_eq(inode->dirtied_when, jiffies);
264#endif
265	return ret;
266}
267
268#define EXPIRE_DIRTY_ATIME 0x0001
269
270/*
271 * Move expired (dirtied before work->older_than_this) dirty inodes from
272 * @delaying_queue to @dispatch_queue.
273 */
274static int move_expired_inodes(struct list_head *delaying_queue,
275			       struct list_head *dispatch_queue,
276			       int flags,
277			       struct wb_writeback_work *work)
278{
279	unsigned long *older_than_this = NULL;
280	unsigned long expire_time;
281	LIST_HEAD(tmp);
282	struct list_head *pos, *node;
283	struct super_block *sb = NULL;
284	struct inode *inode;
285	int do_sb_sort = 0;
286	int moved = 0;
287
288	if ((flags & EXPIRE_DIRTY_ATIME) == 0)
289		older_than_this = work->older_than_this;
290	else if (!work->for_sync) {
291		expire_time = jiffies - (dirtytime_expire_interval * HZ);
292		older_than_this = &expire_time;
293	}
294	while (!list_empty(delaying_queue)) {
295		inode = wb_inode(delaying_queue->prev);
296		if (older_than_this &&
297		    inode_dirtied_after(inode, *older_than_this))
298			break;
299		list_move(&inode->i_wb_list, &tmp);
300		moved++;
301		if (flags & EXPIRE_DIRTY_ATIME)
302			set_bit(__I_DIRTY_TIME_EXPIRED, &inode->i_state);
303		if (sb_is_blkdev_sb(inode->i_sb))
304			continue;
305		if (sb && sb != inode->i_sb)
306			do_sb_sort = 1;
307		sb = inode->i_sb;
308	}
309
310	/* just one sb in list, splice to dispatch_queue and we're done */
311	if (!do_sb_sort) {
312		list_splice(&tmp, dispatch_queue);
313		goto out;
314	}
315
316	/* Move inodes from one superblock together */
317	while (!list_empty(&tmp)) {
318		sb = wb_inode(tmp.prev)->i_sb;
319		list_for_each_prev_safe(pos, node, &tmp) {
320			inode = wb_inode(pos);
321			if (inode->i_sb == sb)
322				list_move(&inode->i_wb_list, dispatch_queue);
323		}
324	}
325out:
326	return moved;
327}
328
329/*
330 * Queue all expired dirty inodes for io, eldest first.
331 * Before
332 *         newly dirtied     b_dirty    b_io    b_more_io
333 *         =============>    gf         edc     BA
334 * After
335 *         newly dirtied     b_dirty    b_io    b_more_io
336 *         =============>    g          fBAedc
337 *                                           |
338 *                                           +--> dequeue for IO
339 */
340static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work)
341{
342	int moved;
343
344	assert_spin_locked(&wb->list_lock);
345	list_splice_init(&wb->b_more_io, &wb->b_io);
346	moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, 0, work);
347	moved += move_expired_inodes(&wb->b_dirty_time, &wb->b_io,
348				     EXPIRE_DIRTY_ATIME, work);
349	trace_writeback_queue_io(wb, work, moved);
350}
351
352static int write_inode(struct inode *inode, struct writeback_control *wbc)
353{
354	int ret;
355
356	if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) {
357		trace_writeback_write_inode_start(inode, wbc);
358		ret = inode->i_sb->s_op->write_inode(inode, wbc);
359		trace_writeback_write_inode(inode, wbc);
360		return ret;
361	}
362	return 0;
363}
364
365/*
366 * Wait for writeback on an inode to complete. Called with i_lock held.
367 * Caller must make sure inode cannot go away when we drop i_lock.
368 */
369static void __inode_wait_for_writeback(struct inode *inode)
370	__releases(inode->i_lock)
371	__acquires(inode->i_lock)
372{
373	DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
374	wait_queue_head_t *wqh;
375
376	wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
377	while (inode->i_state & I_SYNC) {
378		spin_unlock(&inode->i_lock);
379		__wait_on_bit(wqh, &wq, bit_wait,
380			      TASK_UNINTERRUPTIBLE);
381		spin_lock(&inode->i_lock);
382	}
383}
384
385/*
386 * Wait for writeback on an inode to complete. Caller must have inode pinned.
387 */
388void inode_wait_for_writeback(struct inode *inode)
389{
390	spin_lock(&inode->i_lock);
391	__inode_wait_for_writeback(inode);
392	spin_unlock(&inode->i_lock);
393}
394
395/*
396 * Sleep until I_SYNC is cleared. This function must be called with i_lock
397 * held and drops it. It is aimed for callers not holding any inode reference
398 * so once i_lock is dropped, inode can go away.
399 */
400static void inode_sleep_on_writeback(struct inode *inode)
401	__releases(inode->i_lock)
402{
403	DEFINE_WAIT(wait);
404	wait_queue_head_t *wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
405	int sleep;
406
407	prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
408	sleep = inode->i_state & I_SYNC;
409	spin_unlock(&inode->i_lock);
410	if (sleep)
411		schedule();
412	finish_wait(wqh, &wait);
413}
414
415/*
416 * Find proper writeback list for the inode depending on its current state and
417 * possibly also change of its state while we were doing writeback.  Here we
418 * handle things such as livelock prevention or fairness of writeback among
419 * inodes. This function can be called only by flusher thread - noone else
420 * processes all inodes in writeback lists and requeueing inodes behind flusher
421 * thread's back can have unexpected consequences.
422 */
423static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
424			  struct writeback_control *wbc)
425{
426	if (inode->i_state & I_FREEING)
427		return;
428
429	/*
430	 * Sync livelock prevention. Each inode is tagged and synced in one
431	 * shot. If still dirty, it will be redirty_tail()'ed below.  Update
432	 * the dirty time to prevent enqueue and sync it again.
433	 */
434	if ((inode->i_state & I_DIRTY) &&
435	    (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages))
436		inode->dirtied_when = jiffies;
437
438	if (wbc->pages_skipped) {
439		/*
440		 * writeback is not making progress due to locked
441		 * buffers. Skip this inode for now.
442		 */
443		redirty_tail(inode, wb);
444		return;
445	}
446
447	if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
448		/*
449		 * We didn't write back all the pages.  nfs_writepages()
450		 * sometimes bales out without doing anything.
451		 */
452		if (wbc->nr_to_write <= 0) {
453			/* Slice used up. Queue for next turn. */
454			requeue_io(inode, wb);
455		} else {
456			/*
457			 * Writeback blocked by something other than
458			 * congestion. Delay the inode for some time to
459			 * avoid spinning on the CPU (100% iowait)
460			 * retrying writeback of the dirty page/inode
461			 * that cannot be performed immediately.
462			 */
463			redirty_tail(inode, wb);
464		}
465	} else if (inode->i_state & I_DIRTY) {
466		/*
467		 * Filesystems can dirty the inode during writeback operations,
468		 * such as delayed allocation during submission or metadata
469		 * updates after data IO completion.
470		 */
471		redirty_tail(inode, wb);
472	} else if (inode->i_state & I_DIRTY_TIME) {
473		inode->dirtied_when = jiffies;
474		list_move(&inode->i_wb_list, &wb->b_dirty_time);
475	} else {
476		/* The inode is clean. Remove from writeback lists. */
477		list_del_init(&inode->i_wb_list);
478	}
479}
480
481/*
482 * Write out an inode and its dirty pages. Do not update the writeback list
483 * linkage. That is left to the caller. The caller is also responsible for
484 * setting I_SYNC flag and calling inode_sync_complete() to clear it.
485 */
486static int
487__writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
488{
489	struct address_space *mapping = inode->i_mapping;
490	long nr_to_write = wbc->nr_to_write;
491	unsigned dirty;
492	int ret;
493
494	WARN_ON(!(inode->i_state & I_SYNC));
495
496	trace_writeback_single_inode_start(inode, wbc, nr_to_write);
497
498	ret = do_writepages(mapping, wbc);
499
500	/*
501	 * Make sure to wait on the data before writing out the metadata.
502	 * This is important for filesystems that modify metadata on data
503	 * I/O completion. We don't do it for sync(2) writeback because it has a
504	 * separate, external IO completion path and ->sync_fs for guaranteeing
505	 * inode metadata is written back correctly.
506	 */
507	if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync) {
508		int err = filemap_fdatawait(mapping);
509		if (ret == 0)
510			ret = err;
511	}
512
513	/*
514	 * Some filesystems may redirty the inode during the writeback
515	 * due to delalloc, clear dirty metadata flags right before
516	 * write_inode()
517	 */
518	spin_lock(&inode->i_lock);
519
520	dirty = inode->i_state & I_DIRTY;
521	if (inode->i_state & I_DIRTY_TIME) {
522		if ((dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) ||
523		    unlikely(inode->i_state & I_DIRTY_TIME_EXPIRED) ||
524		    unlikely(time_after(jiffies,
525					(inode->dirtied_time_when +
526					 dirtytime_expire_interval * HZ)))) {
527			dirty |= I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED;
528			trace_writeback_lazytime(inode);
529		}
530	} else
531		inode->i_state &= ~I_DIRTY_TIME_EXPIRED;
532	inode->i_state &= ~dirty;
533
534	/*
535	 * Paired with smp_mb() in __mark_inode_dirty().  This allows
536	 * __mark_inode_dirty() to test i_state without grabbing i_lock -
537	 * either they see the I_DIRTY bits cleared or we see the dirtied
538	 * inode.
539	 *
540	 * I_DIRTY_PAGES is always cleared together above even if @mapping
541	 * still has dirty pages.  The flag is reinstated after smp_mb() if
542	 * necessary.  This guarantees that either __mark_inode_dirty()
543	 * sees clear I_DIRTY_PAGES or we see PAGECACHE_TAG_DIRTY.
544	 */
545	smp_mb();
546
547	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
548		inode->i_state |= I_DIRTY_PAGES;
549
550	spin_unlock(&inode->i_lock);
551
552	if (dirty & I_DIRTY_TIME)
553		mark_inode_dirty_sync(inode);
554	/* Don't write the inode if only I_DIRTY_PAGES was set */
555	if (dirty & ~I_DIRTY_PAGES) {
556		int err = write_inode(inode, wbc);
557		if (ret == 0)
558			ret = err;
559	}
560	trace_writeback_single_inode(inode, wbc, nr_to_write);
561	return ret;
562}
563
564/*
565 * Write out an inode's dirty pages. Either the caller has an active reference
566 * on the inode or the inode has I_WILL_FREE set.
567 *
568 * This function is designed to be called for writing back one inode which
569 * we go e.g. from filesystem. Flusher thread uses __writeback_single_inode()
570 * and does more profound writeback list handling in writeback_sb_inodes().
571 */
572static int
573writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
574		       struct writeback_control *wbc)
575{
576	int ret = 0;
577
578	spin_lock(&inode->i_lock);
579	if (!atomic_read(&inode->i_count))
580		WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
581	else
582		WARN_ON(inode->i_state & I_WILL_FREE);
583
584	if (inode->i_state & I_SYNC) {
585		if (wbc->sync_mode != WB_SYNC_ALL)
586			goto out;
587		/*
588		 * It's a data-integrity sync. We must wait. Since callers hold
589		 * inode reference or inode has I_WILL_FREE set, it cannot go
590		 * away under us.
591		 */
592		__inode_wait_for_writeback(inode);
593	}
594	WARN_ON(inode->i_state & I_SYNC);
595	/*
596	 * Skip inode if it is clean and we have no outstanding writeback in
597	 * WB_SYNC_ALL mode. We don't want to mess with writeback lists in this
598	 * function since flusher thread may be doing for example sync in
599	 * parallel and if we move the inode, it could get skipped. So here we
600	 * make sure inode is on some writeback list and leave it there unless
601	 * we have completely cleaned the inode.
602	 */
603	if (!(inode->i_state & I_DIRTY_ALL) &&
604	    (wbc->sync_mode != WB_SYNC_ALL ||
605	     !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK)))
606		goto out;
607	inode->i_state |= I_SYNC;
608	spin_unlock(&inode->i_lock);
609
610	ret = __writeback_single_inode(inode, wbc);
611
612	spin_lock(&wb->list_lock);
613	spin_lock(&inode->i_lock);
614	/*
615	 * If inode is clean, remove it from writeback lists. Otherwise don't
616	 * touch it. See comment above for explanation.
617	 */
618	if (!(inode->i_state & I_DIRTY_ALL))
619		list_del_init(&inode->i_wb_list);
620	spin_unlock(&wb->list_lock);
621	inode_sync_complete(inode);
622out:
623	spin_unlock(&inode->i_lock);
624	return ret;
625}
626
627static long writeback_chunk_size(struct backing_dev_info *bdi,
628				 struct wb_writeback_work *work)
629{
630	long pages;
631
632	/*
633	 * WB_SYNC_ALL mode does livelock avoidance by syncing dirty
634	 * inodes/pages in one big loop. Setting wbc.nr_to_write=LONG_MAX
635	 * here avoids calling into writeback_inodes_wb() more than once.
636	 *
637	 * The intended call sequence for WB_SYNC_ALL writeback is:
638	 *
639	 *      wb_writeback()
640	 *          writeback_sb_inodes()       <== called only once
641	 *              write_cache_pages()     <== called once for each inode
642	 *                   (quickly) tag currently dirty pages
643	 *                   (maybe slowly) sync all tagged pages
644	 */
645	if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages)
646		pages = LONG_MAX;
647	else {
648		pages = min(bdi->avg_write_bandwidth / 2,
649			    global_dirty_limit / DIRTY_SCOPE);
650		pages = min(pages, work->nr_pages);
651		pages = round_down(pages + MIN_WRITEBACK_PAGES,
652				   MIN_WRITEBACK_PAGES);
653	}
654
655	return pages;
656}
657
658/*
659 * Write a portion of b_io inodes which belong to @sb.
660 *
661 * Return the number of pages and/or inodes written.
662 */
663static long writeback_sb_inodes(struct super_block *sb,
664				struct bdi_writeback *wb,
665				struct wb_writeback_work *work)
666{
667	struct writeback_control wbc = {
668		.sync_mode		= work->sync_mode,
669		.tagged_writepages	= work->tagged_writepages,
670		.for_kupdate		= work->for_kupdate,
671		.for_background		= work->for_background,
672		.for_sync		= work->for_sync,
673		.range_cyclic		= work->range_cyclic,
674		.range_start		= 0,
675		.range_end		= LLONG_MAX,
676	};
677	unsigned long start_time = jiffies;
678	long write_chunk;
679	long wrote = 0;  /* count both pages and inodes */
680
681	while (!list_empty(&wb->b_io)) {
682		struct inode *inode = wb_inode(wb->b_io.prev);
683
684		if (inode->i_sb != sb) {
685			if (work->sb) {
686				/*
687				 * We only want to write back data for this
688				 * superblock, move all inodes not belonging
689				 * to it back onto the dirty list.
690				 */
691				redirty_tail(inode, wb);
692				continue;
693			}
694
695			/*
696			 * The inode belongs to a different superblock.
697			 * Bounce back to the caller to unpin this and
698			 * pin the next superblock.
699			 */
700			break;
701		}
702
703		/*
704		 * Don't bother with new inodes or inodes being freed, first
705		 * kind does not need periodic writeout yet, and for the latter
706		 * kind writeout is handled by the freer.
707		 */
708		spin_lock(&inode->i_lock);
709		if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
710			spin_unlock(&inode->i_lock);
711			redirty_tail(inode, wb);
712			continue;
713		}
714		if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) {
715			/*
716			 * If this inode is locked for writeback and we are not
717			 * doing writeback-for-data-integrity, move it to
718			 * b_more_io so that writeback can proceed with the
719			 * other inodes on s_io.
720			 *
721			 * We'll have another go at writing back this inode
722			 * when we completed a full scan of b_io.
723			 */
724			spin_unlock(&inode->i_lock);
725			requeue_io(inode, wb);
726			trace_writeback_sb_inodes_requeue(inode);
727			continue;
728		}
729		spin_unlock(&wb->list_lock);
730
731		/*
732		 * We already requeued the inode if it had I_SYNC set and we
733		 * are doing WB_SYNC_NONE writeback. So this catches only the
734		 * WB_SYNC_ALL case.
735		 */
736		if (inode->i_state & I_SYNC) {
737			/* Wait for I_SYNC. This function drops i_lock... */
738			inode_sleep_on_writeback(inode);
739			/* Inode may be gone, start again */
740			spin_lock(&wb->list_lock);
741			continue;
742		}
743		inode->i_state |= I_SYNC;
744		spin_unlock(&inode->i_lock);
745
746		write_chunk = writeback_chunk_size(wb->bdi, work);
747		wbc.nr_to_write = write_chunk;
748		wbc.pages_skipped = 0;
749
750		/*
751		 * We use I_SYNC to pin the inode in memory. While it is set
752		 * evict_inode() will wait so the inode cannot be freed.
753		 */
754		__writeback_single_inode(inode, &wbc);
755
756		work->nr_pages -= write_chunk - wbc.nr_to_write;
757		wrote += write_chunk - wbc.nr_to_write;
758		spin_lock(&wb->list_lock);
759		spin_lock(&inode->i_lock);
760		if (!(inode->i_state & I_DIRTY_ALL))
761			wrote++;
762		requeue_inode(inode, wb, &wbc);
763		inode_sync_complete(inode);
764		spin_unlock(&inode->i_lock);
765		cond_resched_lock(&wb->list_lock);
766		/*
767		 * bail out to wb_writeback() often enough to check
768		 * background threshold and other termination conditions.
769		 */
770		if (wrote) {
771			if (time_is_before_jiffies(start_time + HZ / 10UL))
772				break;
773			if (work->nr_pages <= 0)
774				break;
775		}
776	}
777	return wrote;
778}
779
780static long __writeback_inodes_wb(struct bdi_writeback *wb,
781				  struct wb_writeback_work *work)
782{
783	unsigned long start_time = jiffies;
784	long wrote = 0;
785
786	while (!list_empty(&wb->b_io)) {
787		struct inode *inode = wb_inode(wb->b_io.prev);
788		struct super_block *sb = inode->i_sb;
789
790		if (!trylock_super(sb)) {
791			/*
792			 * trylock_super() may fail consistently due to
793			 * s_umount being grabbed by someone else. Don't use
794			 * requeue_io() to avoid busy retrying the inode/sb.
795			 */
796			redirty_tail(inode, wb);
797			continue;
798		}
799		wrote += writeback_sb_inodes(sb, wb, work);
800		up_read(&sb->s_umount);
801
802		/* refer to the same tests at the end of writeback_sb_inodes */
803		if (wrote) {
804			if (time_is_before_jiffies(start_time + HZ / 10UL))
805				break;
806			if (work->nr_pages <= 0)
807				break;
808		}
809	}
810	/* Leave any unwritten inodes on b_io */
811	return wrote;
812}
813
814static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
815				enum wb_reason reason)
816{
817	struct wb_writeback_work work = {
818		.nr_pages	= nr_pages,
819		.sync_mode	= WB_SYNC_NONE,
820		.range_cyclic	= 1,
821		.reason		= reason,
822	};
823
824	spin_lock(&wb->list_lock);
825	if (list_empty(&wb->b_io))
826		queue_io(wb, &work);
827	__writeback_inodes_wb(wb, &work);
828	spin_unlock(&wb->list_lock);
829
830	return nr_pages - work.nr_pages;
831}
832
833static bool over_bground_thresh(struct backing_dev_info *bdi)
834{
835	unsigned long background_thresh, dirty_thresh;
836
837	global_dirty_limits(&background_thresh, &dirty_thresh);
838
839	if (global_page_state(NR_FILE_DIRTY) +
840	    global_page_state(NR_UNSTABLE_NFS) > background_thresh)
841		return true;
842
843	if (bdi_stat(bdi, BDI_RECLAIMABLE) >
844				bdi_dirty_limit(bdi, background_thresh))
845		return true;
846
847	return false;
848}
849
850/*
851 * Called under wb->list_lock. If there are multiple wb per bdi,
852 * only the flusher working on the first wb should do it.
853 */
854static void wb_update_bandwidth(struct bdi_writeback *wb,
855				unsigned long start_time)
856{
857	__bdi_update_bandwidth(wb->bdi, 0, 0, 0, 0, 0, start_time);
858}
859
860/*
861 * Explicit flushing or periodic writeback of "old" data.
862 *
863 * Define "old": the first time one of an inode's pages is dirtied, we mark the
864 * dirtying-time in the inode's address_space.  So this periodic writeback code
865 * just walks the superblock inode list, writing back any inodes which are
866 * older than a specific point in time.
867 *
868 * Try to run once per dirty_writeback_interval.  But if a writeback event
869 * takes longer than a dirty_writeback_interval interval, then leave a
870 * one-second gap.
871 *
872 * older_than_this takes precedence over nr_to_write.  So we'll only write back
873 * all dirty pages if they are all attached to "old" mappings.
874 */
875static long wb_writeback(struct bdi_writeback *wb,
876			 struct wb_writeback_work *work)
877{
878	unsigned long wb_start = jiffies;
879	long nr_pages = work->nr_pages;
880	unsigned long oldest_jif;
881	struct inode *inode;
882	long progress;
883
884	oldest_jif = jiffies;
885	work->older_than_this = &oldest_jif;
886
887	spin_lock(&wb->list_lock);
888	for (;;) {
889		/*
890		 * Stop writeback when nr_pages has been consumed
891		 */
892		if (work->nr_pages <= 0)
893			break;
894
895		/*
896		 * Background writeout and kupdate-style writeback may
897		 * run forever. Stop them if there is other work to do
898		 * so that e.g. sync can proceed. They'll be restarted
899		 * after the other works are all done.
900		 */
901		if ((work->for_background || work->for_kupdate) &&
902		    !list_empty(&wb->bdi->work_list))
903			break;
904
905		/*
906		 * For background writeout, stop when we are below the
907		 * background dirty threshold
908		 */
909		if (work->for_background && !over_bground_thresh(wb->bdi))
910			break;
911
912		/*
913		 * Kupdate and background works are special and we want to
914		 * include all inodes that need writing. Livelock avoidance is
915		 * handled by these works yielding to any other work so we are
916		 * safe.
917		 */
918		if (work->for_kupdate) {
919			oldest_jif = jiffies -
920				msecs_to_jiffies(dirty_expire_interval * 10);
921		} else if (work->for_background)
922			oldest_jif = jiffies;
923
924		trace_writeback_start(wb->bdi, work);
925		if (list_empty(&wb->b_io))
926			queue_io(wb, work);
927		if (work->sb)
928			progress = writeback_sb_inodes(work->sb, wb, work);
929		else
930			progress = __writeback_inodes_wb(wb, work);
931		trace_writeback_written(wb->bdi, work);
932
933		wb_update_bandwidth(wb, wb_start);
934
935		/*
936		 * Did we write something? Try for more
937		 *
938		 * Dirty inodes are moved to b_io for writeback in batches.
939		 * The completion of the current batch does not necessarily
940		 * mean the overall work is done. So we keep looping as long
941		 * as made some progress on cleaning pages or inodes.
942		 */
943		if (progress)
944			continue;
945		/*
946		 * No more inodes for IO, bail
947		 */
948		if (list_empty(&wb->b_more_io))
949			break;
950		/*
951		 * Nothing written. Wait for some inode to
952		 * become available for writeback. Otherwise
953		 * we'll just busyloop.
954		 */
955		if (!list_empty(&wb->b_more_io))  {
956			trace_writeback_wait(wb->bdi, work);
957			inode = wb_inode(wb->b_more_io.prev);
958			spin_lock(&inode->i_lock);
959			spin_unlock(&wb->list_lock);
960			/* This function drops i_lock... */
961			inode_sleep_on_writeback(inode);
962			spin_lock(&wb->list_lock);
963		}
964	}
965	spin_unlock(&wb->list_lock);
966
967	return nr_pages - work->nr_pages;
968}
969
970/*
971 * Return the next wb_writeback_work struct that hasn't been processed yet.
972 */
973static struct wb_writeback_work *
974get_next_work_item(struct backing_dev_info *bdi)
975{
976	struct wb_writeback_work *work = NULL;
977
978	spin_lock_bh(&bdi->wb_lock);
979	if (!list_empty(&bdi->work_list)) {
980		work = list_entry(bdi->work_list.next,
981				  struct wb_writeback_work, list);
982		list_del_init(&work->list);
983	}
984	spin_unlock_bh(&bdi->wb_lock);
985	return work;
986}
987
988/*
989 * Add in the number of potentially dirty inodes, because each inode
990 * write can dirty pagecache in the underlying blockdev.
991 */
992static unsigned long get_nr_dirty_pages(void)
993{
994	return global_page_state(NR_FILE_DIRTY) +
995		global_page_state(NR_UNSTABLE_NFS) +
996		get_nr_dirty_inodes();
997}
998
999static long wb_check_background_flush(struct bdi_writeback *wb)
1000{
1001	if (over_bground_thresh(wb->bdi)) {
1002
1003		struct wb_writeback_work work = {
1004			.nr_pages	= LONG_MAX,
1005			.sync_mode	= WB_SYNC_NONE,
1006			.for_background	= 1,
1007			.range_cyclic	= 1,
1008			.reason		= WB_REASON_BACKGROUND,
1009		};
1010
1011		return wb_writeback(wb, &work);
1012	}
1013
1014	return 0;
1015}
1016
1017static long wb_check_old_data_flush(struct bdi_writeback *wb)
1018{
1019	unsigned long expired;
1020	long nr_pages;
1021
1022	/*
1023	 * When set to zero, disable periodic writeback
1024	 */
1025	if (!dirty_writeback_interval)
1026		return 0;
1027
1028	expired = wb->last_old_flush +
1029			msecs_to_jiffies(dirty_writeback_interval * 10);
1030	if (time_before(jiffies, expired))
1031		return 0;
1032
1033	wb->last_old_flush = jiffies;
1034	nr_pages = get_nr_dirty_pages();
1035
1036	if (nr_pages) {
1037		struct wb_writeback_work work = {
1038			.nr_pages	= nr_pages,
1039			.sync_mode	= WB_SYNC_NONE,
1040			.for_kupdate	= 1,
1041			.range_cyclic	= 1,
1042			.reason		= WB_REASON_PERIODIC,
1043		};
1044
1045		return wb_writeback(wb, &work);
1046	}
1047
1048	return 0;
1049}
1050
1051/*
1052 * Retrieve work items and do the writeback they describe
1053 */
1054static long wb_do_writeback(struct bdi_writeback *wb)
1055{
1056	struct backing_dev_info *bdi = wb->bdi;
1057	struct wb_writeback_work *work;
1058	long wrote = 0;
1059
1060	set_bit(BDI_writeback_running, &wb->bdi->state);
1061	while ((work = get_next_work_item(bdi)) != NULL) {
1062
1063		trace_writeback_exec(bdi, work);
1064
1065		wrote += wb_writeback(wb, work);
1066
1067		/*
1068		 * Notify the caller of completion if this is a synchronous
1069		 * work item, otherwise just free it.
1070		 */
1071		if (work->done)
1072			complete(work->done);
1073		else
1074			kfree(work);
1075	}
1076
1077	/*
1078	 * Check for periodic writeback, kupdated() style
1079	 */
1080	wrote += wb_check_old_data_flush(wb);
1081	wrote += wb_check_background_flush(wb);
1082	clear_bit(BDI_writeback_running, &wb->bdi->state);
1083
1084	return wrote;
1085}
1086
1087/*
1088 * Handle writeback of dirty data for the device backed by this bdi. Also
1089 * reschedules periodically and does kupdated style flushing.
1090 */
1091void bdi_writeback_workfn(struct work_struct *work)
1092{
1093	struct bdi_writeback *wb = container_of(to_delayed_work(work),
1094						struct bdi_writeback, dwork);
1095	struct backing_dev_info *bdi = wb->bdi;
1096	long pages_written;
1097
1098	set_worker_desc("flush-%s", dev_name(bdi->dev));
1099	current->flags |= PF_SWAPWRITE;
1100
1101	if (likely(!current_is_workqueue_rescuer() ||
1102		   !test_bit(BDI_registered, &bdi->state))) {
1103		/*
1104		 * The normal path.  Keep writing back @bdi until its
1105		 * work_list is empty.  Note that this path is also taken
1106		 * if @bdi is shutting down even when we're running off the
1107		 * rescuer as work_list needs to be drained.
1108		 */
1109		do {
1110			pages_written = wb_do_writeback(wb);
1111			trace_writeback_pages_written(pages_written);
1112		} while (!list_empty(&bdi->work_list));
1113	} else {
1114		/*
1115		 * bdi_wq can't get enough workers and we're running off
1116		 * the emergency worker.  Don't hog it.  Hopefully, 1024 is
1117		 * enough for efficient IO.
1118		 */
1119		pages_written = writeback_inodes_wb(&bdi->wb, 1024,
1120						    WB_REASON_FORKER_THREAD);
1121		trace_writeback_pages_written(pages_written);
1122	}
1123
1124	if (!list_empty(&bdi->work_list))
1125		mod_delayed_work(bdi_wq, &wb->dwork, 0);
1126	else if (wb_has_dirty_io(wb) && dirty_writeback_interval)
1127		bdi_wakeup_thread_delayed(bdi);
1128
1129	current->flags &= ~PF_SWAPWRITE;
1130}
1131
1132/*
1133 * Start writeback of `nr_pages' pages.  If `nr_pages' is zero, write back
1134 * the whole world.
1135 */
1136void wakeup_flusher_threads(long nr_pages, enum wb_reason reason)
1137{
1138	struct backing_dev_info *bdi;
1139
1140	if (!nr_pages)
1141		nr_pages = get_nr_dirty_pages();
1142
1143	rcu_read_lock();
1144	list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
1145		if (!bdi_has_dirty_io(bdi))
1146			continue;
1147		__bdi_start_writeback(bdi, nr_pages, false, reason);
1148	}
1149	rcu_read_unlock();
1150}
1151
1152/*
1153 * Wake up bdi's periodically to make sure dirtytime inodes gets
1154 * written back periodically.  We deliberately do *not* check the
1155 * b_dirtytime list in wb_has_dirty_io(), since this would cause the
1156 * kernel to be constantly waking up once there are any dirtytime
1157 * inodes on the system.  So instead we define a separate delayed work
1158 * function which gets called much more rarely.  (By default, only
1159 * once every 12 hours.)
1160 *
1161 * If there is any other write activity going on in the file system,
1162 * this function won't be necessary.  But if the only thing that has
1163 * happened on the file system is a dirtytime inode caused by an atime
1164 * update, we need this infrastructure below to make sure that inode
1165 * eventually gets pushed out to disk.
1166 */
1167static void wakeup_dirtytime_writeback(struct work_struct *w);
1168static DECLARE_DELAYED_WORK(dirtytime_work, wakeup_dirtytime_writeback);
1169
1170static void wakeup_dirtytime_writeback(struct work_struct *w)
1171{
1172	struct backing_dev_info *bdi;
1173
1174	rcu_read_lock();
1175	list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
1176		if (list_empty(&bdi->wb.b_dirty_time))
1177			continue;
1178		bdi_wakeup_thread(bdi);
1179	}
1180	rcu_read_unlock();
1181	schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
1182}
1183
1184static int __init start_dirtytime_writeback(void)
1185{
1186	schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
1187	return 0;
1188}
1189__initcall(start_dirtytime_writeback);
1190
1191int dirtytime_interval_handler(struct ctl_table *table, int write,
1192			       void __user *buffer, size_t *lenp, loff_t *ppos)
1193{
1194	int ret;
1195
1196	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
1197	if (ret == 0 && write)
1198		mod_delayed_work(system_wq, &dirtytime_work, 0);
1199	return ret;
1200}
1201
1202static noinline void block_dump___mark_inode_dirty(struct inode *inode)
1203{
1204	if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
1205		struct dentry *dentry;
1206		const char *name = "?";
1207
1208		dentry = d_find_alias(inode);
1209		if (dentry) {
1210			spin_lock(&dentry->d_lock);
1211			name = (const char *) dentry->d_name.name;
1212		}
1213		printk(KERN_DEBUG
1214		       "%s(%d): dirtied inode %lu (%s) on %s\n",
1215		       current->comm, task_pid_nr(current), inode->i_ino,
1216		       name, inode->i_sb->s_id);
1217		if (dentry) {
1218			spin_unlock(&dentry->d_lock);
1219			dput(dentry);
1220		}
1221	}
1222}
1223
1224/**
1225 *	__mark_inode_dirty -	internal function
1226 *	@inode: inode to mark
1227 *	@flags: what kind of dirty (i.e. I_DIRTY_SYNC)
1228 *	Mark an inode as dirty. Callers should use mark_inode_dirty or
1229 *  	mark_inode_dirty_sync.
1230 *
1231 * Put the inode on the super block's dirty list.
1232 *
1233 * CAREFUL! We mark it dirty unconditionally, but move it onto the
1234 * dirty list only if it is hashed or if it refers to a blockdev.
1235 * If it was not hashed, it will never be added to the dirty list
1236 * even if it is later hashed, as it will have been marked dirty already.
1237 *
1238 * In short, make sure you hash any inodes _before_ you start marking
1239 * them dirty.
1240 *
1241 * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
1242 * the block-special inode (/dev/hda1) itself.  And the ->dirtied_when field of
1243 * the kernel-internal blockdev inode represents the dirtying time of the
1244 * blockdev's pages.  This is why for I_DIRTY_PAGES we always use
1245 * page->mapping->host, so the page-dirtying time is recorded in the internal
1246 * blockdev inode.
1247 */
1248#define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC)
1249void __mark_inode_dirty(struct inode *inode, int flags)
1250{
1251	struct super_block *sb = inode->i_sb;
1252	struct backing_dev_info *bdi = NULL;
1253	int dirtytime;
1254
1255	trace_writeback_mark_inode_dirty(inode, flags);
1256
1257	/*
1258	 * Don't do this for I_DIRTY_PAGES - that doesn't actually
1259	 * dirty the inode itself
1260	 */
1261	if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_TIME)) {
1262		trace_writeback_dirty_inode_start(inode, flags);
1263
1264		if (sb->s_op->dirty_inode)
1265			sb->s_op->dirty_inode(inode, flags);
1266
1267		trace_writeback_dirty_inode(inode, flags);
1268	}
1269	if (flags & I_DIRTY_INODE)
1270		flags &= ~I_DIRTY_TIME;
1271	dirtytime = flags & I_DIRTY_TIME;
1272
1273	/*
1274	 * Paired with smp_mb() in __writeback_single_inode() for the
1275	 * following lockless i_state test.  See there for details.
1276	 */
1277	smp_mb();
1278
1279	if (((inode->i_state & flags) == flags) ||
1280	    (dirtytime && (inode->i_state & I_DIRTY_INODE)))
1281		return;
1282
1283	if (unlikely(block_dump))
1284		block_dump___mark_inode_dirty(inode);
1285
1286	spin_lock(&inode->i_lock);
1287	if (dirtytime && (inode->i_state & I_DIRTY_INODE))
1288		goto out_unlock_inode;
1289	if ((inode->i_state & flags) != flags) {
1290		const int was_dirty = inode->i_state & I_DIRTY;
1291
1292		if (flags & I_DIRTY_INODE)
1293			inode->i_state &= ~I_DIRTY_TIME;
1294		inode->i_state |= flags;
1295
1296		/*
1297		 * If the inode is being synced, just update its dirty state.
1298		 * The unlocker will place the inode on the appropriate
1299		 * superblock list, based upon its state.
1300		 */
1301		if (inode->i_state & I_SYNC)
1302			goto out_unlock_inode;
1303
1304		/*
1305		 * Only add valid (hashed) inodes to the superblock's
1306		 * dirty list.  Add blockdev inodes as well.
1307		 */
1308		if (!S_ISBLK(inode->i_mode)) {
1309			if (inode_unhashed(inode))
1310				goto out_unlock_inode;
1311		}
1312		if (inode->i_state & I_FREEING)
1313			goto out_unlock_inode;
1314
1315		/*
1316		 * If the inode was already on b_dirty/b_io/b_more_io, don't
1317		 * reposition it (that would break b_dirty time-ordering).
1318		 */
1319		if (!was_dirty) {
1320			bool wakeup_bdi = false;
1321			bdi = inode_to_bdi(inode);
1322
1323			spin_unlock(&inode->i_lock);
1324			spin_lock(&bdi->wb.list_lock);
1325			if (bdi_cap_writeback_dirty(bdi)) {
1326				WARN(!test_bit(BDI_registered, &bdi->state),
1327				     "bdi-%s not registered\n", bdi->name);
1328
1329				/*
1330				 * If this is the first dirty inode for this
1331				 * bdi, we have to wake-up the corresponding
1332				 * bdi thread to make sure background
1333				 * write-back happens later.
1334				 */
1335				if (!wb_has_dirty_io(&bdi->wb))
1336					wakeup_bdi = true;
1337			}
1338
1339			inode->dirtied_when = jiffies;
1340			if (dirtytime)
1341				inode->dirtied_time_when = jiffies;
1342			if (inode->i_state & (I_DIRTY_INODE | I_DIRTY_PAGES))
1343				list_move(&inode->i_wb_list, &bdi->wb.b_dirty);
1344			else
1345				list_move(&inode->i_wb_list,
1346					  &bdi->wb.b_dirty_time);
1347			spin_unlock(&bdi->wb.list_lock);
1348			trace_writeback_dirty_inode_enqueue(inode);
1349
1350			if (wakeup_bdi)
1351				bdi_wakeup_thread_delayed(bdi);
1352			return;
1353		}
1354	}
1355out_unlock_inode:
1356	spin_unlock(&inode->i_lock);
1357
1358}
1359EXPORT_SYMBOL(__mark_inode_dirty);
1360
1361static void wait_sb_inodes(struct super_block *sb)
1362{
1363	struct inode *inode, *old_inode = NULL;
1364
1365	/*
1366	 * We need to be protected against the filesystem going from
1367	 * r/o to r/w or vice versa.
1368	 */
1369	WARN_ON(!rwsem_is_locked(&sb->s_umount));
1370
1371	spin_lock(&inode_sb_list_lock);
1372
1373	/*
1374	 * Data integrity sync. Must wait for all pages under writeback,
1375	 * because there may have been pages dirtied before our sync
1376	 * call, but which had writeout started before we write it out.
1377	 * In which case, the inode may not be on the dirty list, but
1378	 * we still have to wait for that writeout.
1379	 */
1380	list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
1381		struct address_space *mapping = inode->i_mapping;
1382
1383		spin_lock(&inode->i_lock);
1384		if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
1385		    (mapping->nrpages == 0)) {
1386			spin_unlock(&inode->i_lock);
1387			continue;
1388		}
1389		__iget(inode);
1390		spin_unlock(&inode->i_lock);
1391		spin_unlock(&inode_sb_list_lock);
1392
1393		/*
1394		 * We hold a reference to 'inode' so it couldn't have been
1395		 * removed from s_inodes list while we dropped the
1396		 * inode_sb_list_lock.  We cannot iput the inode now as we can
1397		 * be holding the last reference and we cannot iput it under
1398		 * inode_sb_list_lock. So we keep the reference and iput it
1399		 * later.
1400		 */
1401		iput(old_inode);
1402		old_inode = inode;
1403
1404		filemap_fdatawait(mapping);
1405
1406		cond_resched();
1407
1408		spin_lock(&inode_sb_list_lock);
1409	}
1410	spin_unlock(&inode_sb_list_lock);
1411	iput(old_inode);
1412}
1413
1414/**
1415 * writeback_inodes_sb_nr -	writeback dirty inodes from given super_block
1416 * @sb: the superblock
1417 * @nr: the number of pages to write
1418 * @reason: reason why some writeback work initiated
1419 *
1420 * Start writeback on some inodes on this super_block. No guarantees are made
1421 * on how many (if any) will be written, and this function does not wait
1422 * for IO completion of submitted IO.
1423 */
1424void writeback_inodes_sb_nr(struct super_block *sb,
1425			    unsigned long nr,
1426			    enum wb_reason reason)
1427{
1428	DECLARE_COMPLETION_ONSTACK(done);
1429	struct wb_writeback_work work = {
1430		.sb			= sb,
1431		.sync_mode		= WB_SYNC_NONE,
1432		.tagged_writepages	= 1,
1433		.done			= &done,
1434		.nr_pages		= nr,
1435		.reason			= reason,
1436	};
1437
1438	if (sb->s_bdi == &noop_backing_dev_info)
1439		return;
1440	WARN_ON(!rwsem_is_locked(&sb->s_umount));
1441	bdi_queue_work(sb->s_bdi, &work);
1442	wait_for_completion(&done);
1443}
1444EXPORT_SYMBOL(writeback_inodes_sb_nr);
1445
1446/**
1447 * writeback_inodes_sb	-	writeback dirty inodes from given super_block
1448 * @sb: the superblock
1449 * @reason: reason why some writeback work was initiated
1450 *
1451 * Start writeback on some inodes on this super_block. No guarantees are made
1452 * on how many (if any) will be written, and this function does not wait
1453 * for IO completion of submitted IO.
1454 */
1455void writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
1456{
1457	return writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
1458}
1459EXPORT_SYMBOL(writeback_inodes_sb);
1460
1461/**
1462 * try_to_writeback_inodes_sb_nr - try to start writeback if none underway
1463 * @sb: the superblock
1464 * @nr: the number of pages to write
1465 * @reason: the reason of writeback
1466 *
1467 * Invoke writeback_inodes_sb_nr if no writeback is currently underway.
1468 * Returns 1 if writeback was started, 0 if not.
1469 */
1470int try_to_writeback_inodes_sb_nr(struct super_block *sb,
1471				  unsigned long nr,
1472				  enum wb_reason reason)
1473{
1474	if (writeback_in_progress(sb->s_bdi))
1475		return 1;
1476
1477	if (!down_read_trylock(&sb->s_umount))
1478		return 0;
1479
1480	writeback_inodes_sb_nr(sb, nr, reason);
1481	up_read(&sb->s_umount);
1482	return 1;
1483}
1484EXPORT_SYMBOL(try_to_writeback_inodes_sb_nr);
1485
1486/**
1487 * try_to_writeback_inodes_sb - try to start writeback if none underway
1488 * @sb: the superblock
1489 * @reason: reason why some writeback work was initiated
1490 *
1491 * Implement by try_to_writeback_inodes_sb_nr()
1492 * Returns 1 if writeback was started, 0 if not.
1493 */
1494int try_to_writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
1495{
1496	return try_to_writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
1497}
1498EXPORT_SYMBOL(try_to_writeback_inodes_sb);
1499
1500/**
1501 * sync_inodes_sb	-	sync sb inode pages
1502 * @sb: the superblock
1503 *
1504 * This function writes and waits on any dirty inode belonging to this
1505 * super_block.
1506 */
1507void sync_inodes_sb(struct super_block *sb)
1508{
1509	DECLARE_COMPLETION_ONSTACK(done);
1510	struct wb_writeback_work work = {
1511		.sb		= sb,
1512		.sync_mode	= WB_SYNC_ALL,
1513		.nr_pages	= LONG_MAX,
1514		.range_cyclic	= 0,
1515		.done		= &done,
1516		.reason		= WB_REASON_SYNC,
1517		.for_sync	= 1,
1518	};
1519
1520	/* Nothing to do? */
1521	if (sb->s_bdi == &noop_backing_dev_info)
1522		return;
1523	WARN_ON(!rwsem_is_locked(&sb->s_umount));
1524
1525	bdi_queue_work(sb->s_bdi, &work);
1526	wait_for_completion(&done);
1527
1528	wait_sb_inodes(sb);
1529}
1530EXPORT_SYMBOL(sync_inodes_sb);
1531
1532/**
1533 * write_inode_now	-	write an inode to disk
1534 * @inode: inode to write to disk
1535 * @sync: whether the write should be synchronous or not
1536 *
1537 * This function commits an inode to disk immediately if it is dirty. This is
1538 * primarily needed by knfsd.
1539 *
1540 * The caller must either have a ref on the inode or must have set I_WILL_FREE.
1541 */
1542int write_inode_now(struct inode *inode, int sync)
1543{
1544	struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
1545	struct writeback_control wbc = {
1546		.nr_to_write = LONG_MAX,
1547		.sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
1548		.range_start = 0,
1549		.range_end = LLONG_MAX,
1550	};
1551
1552	if (!mapping_cap_writeback_dirty(inode->i_mapping))
1553		wbc.nr_to_write = 0;
1554
1555	might_sleep();
1556	return writeback_single_inode(inode, wb, &wbc);
1557}
1558EXPORT_SYMBOL(write_inode_now);
1559
1560/**
1561 * sync_inode - write an inode and its pages to disk.
1562 * @inode: the inode to sync
1563 * @wbc: controls the writeback mode
1564 *
1565 * sync_inode() will write an inode and its pages to disk.  It will also
1566 * correctly update the inode on its superblock's dirty inode lists and will
1567 * update inode->i_state.
1568 *
1569 * The caller must have a ref on the inode.
1570 */
1571int sync_inode(struct inode *inode, struct writeback_control *wbc)
1572{
1573	return writeback_single_inode(inode, &inode_to_bdi(inode)->wb, wbc);
1574}
1575EXPORT_SYMBOL(sync_inode);
1576
1577/**
1578 * sync_inode_metadata - write an inode to disk
1579 * @inode: the inode to sync
1580 * @wait: wait for I/O to complete.
1581 *
1582 * Write an inode to disk and adjust its dirty state after completion.
1583 *
1584 * Note: only writes the actual inode, no associated data or other metadata.
1585 */
1586int sync_inode_metadata(struct inode *inode, int wait)
1587{
1588	struct writeback_control wbc = {
1589		.sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE,
1590		.nr_to_write = 0, /* metadata-only */
1591	};
1592
1593	return sync_inode(inode, &wbc);
1594}
1595EXPORT_SYMBOL(sync_inode_metadata);
1596