1/*
2 *  linux/mm/swap_state.c
3 *
4 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
5 *  Swap reorganised 29.12.95, Stephen Tweedie
6 *
7 *  Rewritten to use page cache, (C) 1998 Stephen Tweedie
8 */
9#include <linux/mm.h>
10#include <linux/gfp.h>
11#include <linux/kernel_stat.h>
12#include <linux/swap.h>
13#include <linux/swapops.h>
14#include <linux/init.h>
15#include <linux/pagemap.h>
16#include <linux/backing-dev.h>
17#include <linux/blkdev.h>
18#include <linux/pagevec.h>
19#include <linux/migrate.h>
20
21#include <asm/pgtable.h>
22
23/*
24 * swapper_space is a fiction, retained to simplify the path through
25 * vmscan's shrink_page_list.
26 */
27static const struct address_space_operations swap_aops = {
28	.writepage	= swap_writepage,
29	.set_page_dirty	= swap_set_page_dirty,
30#ifdef CONFIG_MIGRATION
31	.migratepage	= migrate_page,
32#endif
33};
34
35struct address_space swapper_spaces[MAX_SWAPFILES] = {
36	[0 ... MAX_SWAPFILES - 1] = {
37		.page_tree	= RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
38		.i_mmap_writable = ATOMIC_INIT(0),
39		.a_ops		= &swap_aops,
40	}
41};
42
43#define INC_CACHE_INFO(x)	do { swap_cache_info.x++; } while (0)
44
45static struct {
46	unsigned long add_total;
47	unsigned long del_total;
48	unsigned long find_success;
49	unsigned long find_total;
50} swap_cache_info;
51
52unsigned long total_swapcache_pages(void)
53{
54	int i;
55	unsigned long ret = 0;
56
57	for (i = 0; i < MAX_SWAPFILES; i++)
58		ret += swapper_spaces[i].nrpages;
59	return ret;
60}
61
62static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
63
64void show_swap_cache_info(void)
65{
66	printk("%lu pages in swap cache\n", total_swapcache_pages());
67	printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
68		swap_cache_info.add_total, swap_cache_info.del_total,
69		swap_cache_info.find_success, swap_cache_info.find_total);
70	printk("Free swap  = %ldkB\n",
71		get_nr_swap_pages() << (PAGE_SHIFT - 10));
72	printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
73}
74
75/*
76 * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
77 * but sets SwapCache flag and private instead of mapping and index.
78 */
79int __add_to_swap_cache(struct page *page, swp_entry_t entry)
80{
81	int error;
82	struct address_space *address_space;
83
84	VM_BUG_ON_PAGE(!PageLocked(page), page);
85	VM_BUG_ON_PAGE(PageSwapCache(page), page);
86	VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
87
88	page_cache_get(page);
89	SetPageSwapCache(page);
90	set_page_private(page, entry.val);
91
92	address_space = swap_address_space(entry);
93	spin_lock_irq(&address_space->tree_lock);
94	error = radix_tree_insert(&address_space->page_tree,
95					entry.val, page);
96	if (likely(!error)) {
97		address_space->nrpages++;
98		__inc_zone_page_state(page, NR_FILE_PAGES);
99		INC_CACHE_INFO(add_total);
100	}
101	spin_unlock_irq(&address_space->tree_lock);
102
103	if (unlikely(error)) {
104		/*
105		 * Only the context which have set SWAP_HAS_CACHE flag
106		 * would call add_to_swap_cache().
107		 * So add_to_swap_cache() doesn't returns -EEXIST.
108		 */
109		VM_BUG_ON(error == -EEXIST);
110		set_page_private(page, 0UL);
111		ClearPageSwapCache(page);
112		page_cache_release(page);
113	}
114
115	return error;
116}
117
118
119int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
120{
121	int error;
122
123	error = radix_tree_maybe_preload(gfp_mask);
124	if (!error) {
125		error = __add_to_swap_cache(page, entry);
126		radix_tree_preload_end();
127	}
128	return error;
129}
130
131/*
132 * This must be called only on pages that have
133 * been verified to be in the swap cache.
134 */
135void __delete_from_swap_cache(struct page *page)
136{
137	swp_entry_t entry;
138	struct address_space *address_space;
139
140	VM_BUG_ON_PAGE(!PageLocked(page), page);
141	VM_BUG_ON_PAGE(!PageSwapCache(page), page);
142	VM_BUG_ON_PAGE(PageWriteback(page), page);
143
144	entry.val = page_private(page);
145	address_space = swap_address_space(entry);
146	radix_tree_delete(&address_space->page_tree, page_private(page));
147	set_page_private(page, 0);
148	ClearPageSwapCache(page);
149	address_space->nrpages--;
150	__dec_zone_page_state(page, NR_FILE_PAGES);
151	INC_CACHE_INFO(del_total);
152}
153
154/**
155 * add_to_swap - allocate swap space for a page
156 * @page: page we want to move to swap
157 *
158 * Allocate swap space for the page and add the page to the
159 * swap cache.  Caller needs to hold the page lock.
160 */
161int add_to_swap(struct page *page, struct list_head *list)
162{
163	swp_entry_t entry;
164	int err;
165
166	VM_BUG_ON_PAGE(!PageLocked(page), page);
167	VM_BUG_ON_PAGE(!PageUptodate(page), page);
168
169	entry = get_swap_page();
170	if (!entry.val)
171		return 0;
172
173	if (unlikely(PageTransHuge(page)))
174		if (unlikely(split_huge_page_to_list(page, list))) {
175			swapcache_free(entry);
176			return 0;
177		}
178
179	/*
180	 * Radix-tree node allocations from PF_MEMALLOC contexts could
181	 * completely exhaust the page allocator. __GFP_NOMEMALLOC
182	 * stops emergency reserves from being allocated.
183	 *
184	 * TODO: this could cause a theoretical memory reclaim
185	 * deadlock in the swap out path.
186	 */
187	/*
188	 * Add it to the swap cache and mark it dirty
189	 */
190	err = add_to_swap_cache(page, entry,
191			__GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
192
193	if (!err) {	/* Success */
194		SetPageDirty(page);
195		return 1;
196	} else {	/* -ENOMEM radix-tree allocation failure */
197		/*
198		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
199		 * clear SWAP_HAS_CACHE flag.
200		 */
201		swapcache_free(entry);
202		return 0;
203	}
204}
205
206/*
207 * This must be called only on pages that have
208 * been verified to be in the swap cache and locked.
209 * It will never put the page into the free list,
210 * the caller has a reference on the page.
211 */
212void delete_from_swap_cache(struct page *page)
213{
214	swp_entry_t entry;
215	struct address_space *address_space;
216
217	entry.val = page_private(page);
218
219	address_space = swap_address_space(entry);
220	spin_lock_irq(&address_space->tree_lock);
221	__delete_from_swap_cache(page);
222	spin_unlock_irq(&address_space->tree_lock);
223
224	swapcache_free(entry);
225	page_cache_release(page);
226}
227
228/*
229 * If we are the only user, then try to free up the swap cache.
230 *
231 * Its ok to check for PageSwapCache without the page lock
232 * here because we are going to recheck again inside
233 * try_to_free_swap() _with_ the lock.
234 * 					- Marcelo
235 */
236static inline void free_swap_cache(struct page *page)
237{
238	if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
239		try_to_free_swap(page);
240		unlock_page(page);
241	}
242}
243
244/*
245 * Perform a free_page(), also freeing any swap cache associated with
246 * this page if it is the last user of the page.
247 */
248void free_page_and_swap_cache(struct page *page)
249{
250	free_swap_cache(page);
251	page_cache_release(page);
252}
253
254/*
255 * Passed an array of pages, drop them all from swapcache and then release
256 * them.  They are removed from the LRU and freed if this is their last use.
257 */
258void free_pages_and_swap_cache(struct page **pages, int nr)
259{
260	struct page **pagep = pages;
261	int i;
262
263	lru_add_drain();
264	for (i = 0; i < nr; i++)
265		free_swap_cache(pagep[i]);
266	release_pages(pagep, nr, false);
267}
268
269/*
270 * Lookup a swap entry in the swap cache. A found page will be returned
271 * unlocked and with its refcount incremented - we rely on the kernel
272 * lock getting page table operations atomic even if we drop the page
273 * lock before returning.
274 */
275struct page * lookup_swap_cache(swp_entry_t entry)
276{
277	struct page *page;
278
279	page = find_get_page(swap_address_space(entry), entry.val);
280
281	if (page) {
282		INC_CACHE_INFO(find_success);
283		if (TestClearPageReadahead(page))
284			atomic_inc(&swapin_readahead_hits);
285	}
286
287	INC_CACHE_INFO(find_total);
288	return page;
289}
290
291/*
292 * Locate a page of swap in physical memory, reserving swap cache space
293 * and reading the disk if it is not already cached.
294 * A failure return means that either the page allocation failed or that
295 * the swap entry is no longer in use.
296 */
297struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
298			struct vm_area_struct *vma, unsigned long addr)
299{
300	struct page *found_page, *new_page = NULL;
301	int err;
302
303	do {
304		/*
305		 * First check the swap cache.  Since this is normally
306		 * called after lookup_swap_cache() failed, re-calling
307		 * that would confuse statistics.
308		 */
309		found_page = find_get_page(swap_address_space(entry),
310					entry.val);
311		if (found_page)
312			break;
313
314		/*
315		 * Get a new page to read into from swap.
316		 */
317		if (!new_page) {
318			new_page = alloc_page_vma(gfp_mask, vma, addr);
319			if (!new_page)
320				break;		/* Out of memory */
321		}
322
323		/*
324		 * call radix_tree_preload() while we can wait.
325		 */
326		err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL);
327		if (err)
328			break;
329
330		/*
331		 * Swap entry may have been freed since our caller observed it.
332		 */
333		err = swapcache_prepare(entry);
334		if (err == -EEXIST) {
335			radix_tree_preload_end();
336			/*
337			 * We might race against get_swap_page() and stumble
338			 * across a SWAP_HAS_CACHE swap_map entry whose page
339			 * has not been brought into the swapcache yet, while
340			 * the other end is scheduled away waiting on discard
341			 * I/O completion at scan_swap_map().
342			 *
343			 * In order to avoid turning this transitory state
344			 * into a permanent loop around this -EEXIST case
345			 * if !CONFIG_PREEMPT and the I/O completion happens
346			 * to be waiting on the CPU waitqueue where we are now
347			 * busy looping, we just conditionally invoke the
348			 * scheduler here, if there are some more important
349			 * tasks to run.
350			 */
351			cond_resched();
352			continue;
353		}
354		if (err) {		/* swp entry is obsolete ? */
355			radix_tree_preload_end();
356			break;
357		}
358
359		/* May fail (-ENOMEM) if radix-tree node allocation failed. */
360		__set_page_locked(new_page);
361		SetPageSwapBacked(new_page);
362		err = __add_to_swap_cache(new_page, entry);
363		if (likely(!err)) {
364			radix_tree_preload_end();
365			/*
366			 * Initiate read into locked page and return.
367			 */
368			lru_cache_add_anon(new_page);
369			swap_readpage(new_page);
370			return new_page;
371		}
372		radix_tree_preload_end();
373		ClearPageSwapBacked(new_page);
374		__clear_page_locked(new_page);
375		/*
376		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
377		 * clear SWAP_HAS_CACHE flag.
378		 */
379		swapcache_free(entry);
380	} while (err != -ENOMEM);
381
382	if (new_page)
383		page_cache_release(new_page);
384	return found_page;
385}
386
387static unsigned long swapin_nr_pages(unsigned long offset)
388{
389	static unsigned long prev_offset;
390	unsigned int pages, max_pages, last_ra;
391	static atomic_t last_readahead_pages;
392
393	max_pages = 1 << READ_ONCE(page_cluster);
394	if (max_pages <= 1)
395		return 1;
396
397	/*
398	 * This heuristic has been found to work well on both sequential and
399	 * random loads, swapping to hard disk or to SSD: please don't ask
400	 * what the "+ 2" means, it just happens to work well, that's all.
401	 */
402	pages = atomic_xchg(&swapin_readahead_hits, 0) + 2;
403	if (pages == 2) {
404		/*
405		 * We can have no readahead hits to judge by: but must not get
406		 * stuck here forever, so check for an adjacent offset instead
407		 * (and don't even bother to check whether swap type is same).
408		 */
409		if (offset != prev_offset + 1 && offset != prev_offset - 1)
410			pages = 1;
411		prev_offset = offset;
412	} else {
413		unsigned int roundup = 4;
414		while (roundup < pages)
415			roundup <<= 1;
416		pages = roundup;
417	}
418
419	if (pages > max_pages)
420		pages = max_pages;
421
422	/* Don't shrink readahead too fast */
423	last_ra = atomic_read(&last_readahead_pages) / 2;
424	if (pages < last_ra)
425		pages = last_ra;
426	atomic_set(&last_readahead_pages, pages);
427
428	return pages;
429}
430
431/**
432 * swapin_readahead - swap in pages in hope we need them soon
433 * @entry: swap entry of this memory
434 * @gfp_mask: memory allocation flags
435 * @vma: user vma this address belongs to
436 * @addr: target address for mempolicy
437 *
438 * Returns the struct page for entry and addr, after queueing swapin.
439 *
440 * Primitive swap readahead code. We simply read an aligned block of
441 * (1 << page_cluster) entries in the swap area. This method is chosen
442 * because it doesn't cost us any seek time.  We also make sure to queue
443 * the 'original' request together with the readahead ones...
444 *
445 * This has been extended to use the NUMA policies from the mm triggering
446 * the readahead.
447 *
448 * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
449 */
450struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
451			struct vm_area_struct *vma, unsigned long addr)
452{
453	struct page *page;
454	unsigned long entry_offset = swp_offset(entry);
455	unsigned long offset = entry_offset;
456	unsigned long start_offset, end_offset;
457	unsigned long mask;
458	struct blk_plug plug;
459
460	mask = swapin_nr_pages(offset) - 1;
461	if (!mask)
462		goto skip;
463
464	/* Read a page_cluster sized and aligned cluster around offset. */
465	start_offset = offset & ~mask;
466	end_offset = offset | mask;
467	if (!start_offset)	/* First page is swap header. */
468		start_offset++;
469
470	blk_start_plug(&plug);
471	for (offset = start_offset; offset <= end_offset ; offset++) {
472		/* Ok, do the async read-ahead now */
473		page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
474						gfp_mask, vma, addr);
475		if (!page)
476			continue;
477		if (offset != entry_offset)
478			SetPageReadahead(page);
479		page_cache_release(page);
480	}
481	blk_finish_plug(&plug);
482
483	lru_add_drain();	/* Push any new pages onto the LRU now */
484skip:
485	return read_swap_cache_async(entry, gfp_mask, vma, addr);
486}
487