root/mm/vmscan.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. set_task_reclaim_state
  2. prealloc_memcg_shrinker
  3. unregister_memcg_shrinker
  4. global_reclaim
  5. sane_reclaim
  6. set_memcg_congestion
  7. memcg_congested
  8. prealloc_memcg_shrinker
  9. unregister_memcg_shrinker
  10. global_reclaim
  11. sane_reclaim
  12. set_memcg_congestion
  13. memcg_congested
  14. zone_reclaimable_pages
  15. lruvec_lru_size
  16. prealloc_shrinker
  17. free_prealloced_shrinker
  18. register_shrinker_prepared
  19. register_shrinker
  20. unregister_shrinker
  21. do_shrink_slab
  22. shrink_slab_memcg
  23. shrink_slab_memcg
  24. shrink_slab
  25. drop_slab_node
  26. drop_slab
  27. is_page_cache_freeable
  28. may_write_to_inode
  29. handle_write_error
  30. __remove_mapping
  31. remove_mapping
  32. putback_lru_page
  33. page_check_references
  34. page_check_dirty_writeback
  35. shrink_page_list
  36. reclaim_clean_pages_from_list
  37. __isolate_lru_page
  38. update_lru_sizes
  39. isolate_lru_pages
  40. isolate_lru_page
  41. too_many_isolated
  42. move_pages_to_lru
  43. current_may_throttle
  44. shrink_inactive_list
  45. shrink_active_list
  46. reclaim_pages
  47. inactive_list_is_low
  48. shrink_list
  49. get_scan_count
  50. shrink_node_memcg
  51. in_reclaim_compaction
  52. should_continue_reclaim
  53. pgdat_memcg_congested
  54. shrink_node
  55. compaction_ready
  56. shrink_zones
  57. snapshot_refaults
  58. do_try_to_free_pages
  59. allow_direct_reclaim
  60. throttle_direct_reclaim
  61. try_to_free_pages
  62. mem_cgroup_shrink_node
  63. try_to_free_mem_cgroup_pages
  64. age_active_anon
  65. pgdat_watermark_boosted
  66. pgdat_balanced
  67. clear_pgdat_congested
  68. prepare_kswapd_sleep
  69. kswapd_shrink_node
  70. balance_pgdat
  71. kswapd_classzone_idx
  72. kswapd_try_to_sleep
  73. kswapd
  74. wakeup_kswapd
  75. shrink_all_memory
  76. kswapd_cpu_online
  77. kswapd_run
  78. kswapd_stop
  79. kswapd_init
  80. node_unmapped_file_pages
  81. node_pagecache_reclaimable
  82. __node_reclaim
  83. node_reclaim
  84. page_evictable
  85. check_move_unevictable_pages

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  *  linux/mm/vmscan.c
   4  *
   5  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   6  *
   7  *  Swap reorganised 29.12.95, Stephen Tweedie.
   8  *  kswapd added: 7.1.96  sct
   9  *  Removed kswapd_ctl limits, and swap out as many pages as needed
  10  *  to bring the system back to freepages.high: 2.4.97, Rik van Riel.
  11  *  Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
  12  *  Multiqueue VM started 5.8.00, Rik van Riel.
  13  */
  14 
  15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  16 
  17 #include <linux/mm.h>
  18 #include <linux/sched/mm.h>
  19 #include <linux/module.h>
  20 #include <linux/gfp.h>
  21 #include <linux/kernel_stat.h>
  22 #include <linux/swap.h>
  23 #include <linux/pagemap.h>
  24 #include <linux/init.h>
  25 #include <linux/highmem.h>
  26 #include <linux/vmpressure.h>
  27 #include <linux/vmstat.h>
  28 #include <linux/file.h>
  29 #include <linux/writeback.h>
  30 #include <linux/blkdev.h>
  31 #include <linux/buffer_head.h>  /* for try_to_release_page(),
  32                                         buffer_heads_over_limit */
  33 #include <linux/mm_inline.h>
  34 #include <linux/backing-dev.h>
  35 #include <linux/rmap.h>
  36 #include <linux/topology.h>
  37 #include <linux/cpu.h>
  38 #include <linux/cpuset.h>
  39 #include <linux/compaction.h>
  40 #include <linux/notifier.h>
  41 #include <linux/rwsem.h>
  42 #include <linux/delay.h>
  43 #include <linux/kthread.h>
  44 #include <linux/freezer.h>
  45 #include <linux/memcontrol.h>
  46 #include <linux/delayacct.h>
  47 #include <linux/sysctl.h>
  48 #include <linux/oom.h>
  49 #include <linux/pagevec.h>
  50 #include <linux/prefetch.h>
  51 #include <linux/printk.h>
  52 #include <linux/dax.h>
  53 #include <linux/psi.h>
  54 
  55 #include <asm/tlbflush.h>
  56 #include <asm/div64.h>
  57 
  58 #include <linux/swapops.h>
  59 #include <linux/balloon_compaction.h>
  60 
  61 #include "internal.h"
  62 
  63 #define CREATE_TRACE_POINTS
  64 #include <trace/events/vmscan.h>
  65 
  66 struct scan_control {
  67         /* How many pages shrink_list() should reclaim */
  68         unsigned long nr_to_reclaim;
  69 
  70         /*
  71          * Nodemask of nodes allowed by the caller. If NULL, all nodes
  72          * are scanned.
  73          */
  74         nodemask_t      *nodemask;
  75 
  76         /*
  77          * The memory cgroup that hit its limit and as a result is the
  78          * primary target of this reclaim invocation.
  79          */
  80         struct mem_cgroup *target_mem_cgroup;
  81 
  82         /* Writepage batching in laptop mode; RECLAIM_WRITE */
  83         unsigned int may_writepage:1;
  84 
  85         /* Can mapped pages be reclaimed? */
  86         unsigned int may_unmap:1;
  87 
  88         /* Can pages be swapped as part of reclaim? */
  89         unsigned int may_swap:1;
  90 
  91         /*
  92          * Cgroups are not reclaimed below their configured memory.low,
  93          * unless we threaten to OOM. If any cgroups are skipped due to
  94          * memory.low and nothing was reclaimed, go back for memory.low.
  95          */
  96         unsigned int memcg_low_reclaim:1;
  97         unsigned int memcg_low_skipped:1;
  98 
  99         unsigned int hibernation_mode:1;
 100 
 101         /* One of the zones is ready for compaction */
 102         unsigned int compaction_ready:1;
 103 
 104         /* Allocation order */
 105         s8 order;
 106 
 107         /* Scan (total_size >> priority) pages at once */
 108         s8 priority;
 109 
 110         /* The highest zone to isolate pages for reclaim from */
 111         s8 reclaim_idx;
 112 
 113         /* This context's GFP mask */
 114         gfp_t gfp_mask;
 115 
 116         /* Incremented by the number of inactive pages that were scanned */
 117         unsigned long nr_scanned;
 118 
 119         /* Number of pages freed so far during a call to shrink_zones() */
 120         unsigned long nr_reclaimed;
 121 
 122         struct {
 123                 unsigned int dirty;
 124                 unsigned int unqueued_dirty;
 125                 unsigned int congested;
 126                 unsigned int writeback;
 127                 unsigned int immediate;
 128                 unsigned int file_taken;
 129                 unsigned int taken;
 130         } nr;
 131 
 132         /* for recording the reclaimed slab by now */
 133         struct reclaim_state reclaim_state;
 134 };
 135 
 136 #ifdef ARCH_HAS_PREFETCH
 137 #define prefetch_prev_lru_page(_page, _base, _field)                    \
 138         do {                                                            \
 139                 if ((_page)->lru.prev != _base) {                       \
 140                         struct page *prev;                              \
 141                                                                         \
 142                         prev = lru_to_page(&(_page->lru));              \
 143                         prefetch(&prev->_field);                        \
 144                 }                                                       \
 145         } while (0)
 146 #else
 147 #define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
 148 #endif
 149 
 150 #ifdef ARCH_HAS_PREFETCHW
 151 #define prefetchw_prev_lru_page(_page, _base, _field)                   \
 152         do {                                                            \
 153                 if ((_page)->lru.prev != _base) {                       \
 154                         struct page *prev;                              \
 155                                                                         \
 156                         prev = lru_to_page(&(_page->lru));              \
 157                         prefetchw(&prev->_field);                       \
 158                 }                                                       \
 159         } while (0)
 160 #else
 161 #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
 162 #endif
 163 
 164 /*
 165  * From 0 .. 100.  Higher means more swappy.
 166  */
 167 int vm_swappiness = 60;
 168 /*
 169  * The total number of pages which are beyond the high watermark within all
 170  * zones.
 171  */
 172 unsigned long vm_total_pages;
 173 
 174 static void set_task_reclaim_state(struct task_struct *task,
 175                                    struct reclaim_state *rs)
 176 {
 177         /* Check for an overwrite */
 178         WARN_ON_ONCE(rs && task->reclaim_state);
 179 
 180         /* Check for the nulling of an already-nulled member */
 181         WARN_ON_ONCE(!rs && !task->reclaim_state);
 182 
 183         task->reclaim_state = rs;
 184 }
 185 
 186 static LIST_HEAD(shrinker_list);
 187 static DECLARE_RWSEM(shrinker_rwsem);
 188 
 189 #ifdef CONFIG_MEMCG
 190 /*
 191  * We allow subsystems to populate their shrinker-related
 192  * LRU lists before register_shrinker_prepared() is called
 193  * for the shrinker, since we don't want to impose
 194  * restrictions on their internal registration order.
 195  * In this case shrink_slab_memcg() may find corresponding
 196  * bit is set in the shrinkers map.
 197  *
 198  * This value is used by the function to detect registering
 199  * shrinkers and to skip do_shrink_slab() calls for them.
 200  */
 201 #define SHRINKER_REGISTERING ((struct shrinker *)~0UL)
 202 
 203 static DEFINE_IDR(shrinker_idr);
 204 static int shrinker_nr_max;
 205 
 206 static int prealloc_memcg_shrinker(struct shrinker *shrinker)
 207 {
 208         int id, ret = -ENOMEM;
 209 
 210         down_write(&shrinker_rwsem);
 211         /* This may call shrinker, so it must use down_read_trylock() */
 212         id = idr_alloc(&shrinker_idr, SHRINKER_REGISTERING, 0, 0, GFP_KERNEL);
 213         if (id < 0)
 214                 goto unlock;
 215 
 216         if (id >= shrinker_nr_max) {
 217                 if (memcg_expand_shrinker_maps(id)) {
 218                         idr_remove(&shrinker_idr, id);
 219                         goto unlock;
 220                 }
 221 
 222                 shrinker_nr_max = id + 1;
 223         }
 224         shrinker->id = id;
 225         ret = 0;
 226 unlock:
 227         up_write(&shrinker_rwsem);
 228         return ret;
 229 }
 230 
 231 static void unregister_memcg_shrinker(struct shrinker *shrinker)
 232 {
 233         int id = shrinker->id;
 234 
 235         BUG_ON(id < 0);
 236 
 237         down_write(&shrinker_rwsem);
 238         idr_remove(&shrinker_idr, id);
 239         up_write(&shrinker_rwsem);
 240 }
 241 
 242 static bool global_reclaim(struct scan_control *sc)
 243 {
 244         return !sc->target_mem_cgroup;
 245 }
 246 
 247 /**
 248  * sane_reclaim - is the usual dirty throttling mechanism operational?
 249  * @sc: scan_control in question
 250  *
 251  * The normal page dirty throttling mechanism in balance_dirty_pages() is
 252  * completely broken with the legacy memcg and direct stalling in
 253  * shrink_page_list() is used for throttling instead, which lacks all the
 254  * niceties such as fairness, adaptive pausing, bandwidth proportional
 255  * allocation and configurability.
 256  *
 257  * This function tests whether the vmscan currently in progress can assume
 258  * that the normal dirty throttling mechanism is operational.
 259  */
 260 static bool sane_reclaim(struct scan_control *sc)
 261 {
 262         struct mem_cgroup *memcg = sc->target_mem_cgroup;
 263 
 264         if (!memcg)
 265                 return true;
 266 #ifdef CONFIG_CGROUP_WRITEBACK
 267         if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
 268                 return true;
 269 #endif
 270         return false;
 271 }
 272 
 273 static void set_memcg_congestion(pg_data_t *pgdat,
 274                                 struct mem_cgroup *memcg,
 275                                 bool congested)
 276 {
 277         struct mem_cgroup_per_node *mn;
 278 
 279         if (!memcg)
 280                 return;
 281 
 282         mn = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
 283         WRITE_ONCE(mn->congested, congested);
 284 }
 285 
 286 static bool memcg_congested(pg_data_t *pgdat,
 287                         struct mem_cgroup *memcg)
 288 {
 289         struct mem_cgroup_per_node *mn;
 290 
 291         mn = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
 292         return READ_ONCE(mn->congested);
 293 
 294 }
 295 #else
 296 static int prealloc_memcg_shrinker(struct shrinker *shrinker)
 297 {
 298         return 0;
 299 }
 300 
 301 static void unregister_memcg_shrinker(struct shrinker *shrinker)
 302 {
 303 }
 304 
 305 static bool global_reclaim(struct scan_control *sc)
 306 {
 307         return true;
 308 }
 309 
 310 static bool sane_reclaim(struct scan_control *sc)
 311 {
 312         return true;
 313 }
 314 
 315 static inline void set_memcg_congestion(struct pglist_data *pgdat,
 316                                 struct mem_cgroup *memcg, bool congested)
 317 {
 318 }
 319 
 320 static inline bool memcg_congested(struct pglist_data *pgdat,
 321                         struct mem_cgroup *memcg)
 322 {
 323         return false;
 324 
 325 }
 326 #endif
 327 
 328 /*
 329  * This misses isolated pages which are not accounted for to save counters.
 330  * As the data only determines if reclaim or compaction continues, it is
 331  * not expected that isolated pages will be a dominating factor.
 332  */
 333 unsigned long zone_reclaimable_pages(struct zone *zone)
 334 {
 335         unsigned long nr;
 336 
 337         nr = zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_FILE) +
 338                 zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_FILE);
 339         if (get_nr_swap_pages() > 0)
 340                 nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) +
 341                         zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON);
 342 
 343         return nr;
 344 }
 345 
 346 /**
 347  * lruvec_lru_size -  Returns the number of pages on the given LRU list.
 348  * @lruvec: lru vector
 349  * @lru: lru to use
 350  * @zone_idx: zones to consider (use MAX_NR_ZONES for the whole LRU list)
 351  */
 352 unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx)
 353 {
 354         unsigned long lru_size = 0;
 355         int zid;
 356 
 357         if (!mem_cgroup_disabled()) {
 358                 for (zid = 0; zid < MAX_NR_ZONES; zid++)
 359                         lru_size += mem_cgroup_get_zone_lru_size(lruvec, lru, zid);
 360         } else
 361                 lru_size = node_page_state(lruvec_pgdat(lruvec), NR_LRU_BASE + lru);
 362 
 363         for (zid = zone_idx + 1; zid < MAX_NR_ZONES; zid++) {
 364                 struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid];
 365                 unsigned long size;
 366 
 367                 if (!managed_zone(zone))
 368                         continue;
 369 
 370                 if (!mem_cgroup_disabled())
 371                         size = mem_cgroup_get_zone_lru_size(lruvec, lru, zid);
 372                 else
 373                         size = zone_page_state(&lruvec_pgdat(lruvec)->node_zones[zid],
 374                                        NR_ZONE_LRU_BASE + lru);
 375                 lru_size -= min(size, lru_size);
 376         }
 377 
 378         return lru_size;
 379 
 380 }
 381 
 382 /*
 383  * Add a shrinker callback to be called from the vm.
 384  */
 385 int prealloc_shrinker(struct shrinker *shrinker)
 386 {
 387         unsigned int size = sizeof(*shrinker->nr_deferred);
 388 
 389         if (shrinker->flags & SHRINKER_NUMA_AWARE)
 390                 size *= nr_node_ids;
 391 
 392         shrinker->nr_deferred = kzalloc(size, GFP_KERNEL);
 393         if (!shrinker->nr_deferred)
 394                 return -ENOMEM;
 395 
 396         if (shrinker->flags & SHRINKER_MEMCG_AWARE) {
 397                 if (prealloc_memcg_shrinker(shrinker))
 398                         goto free_deferred;
 399         }
 400 
 401         return 0;
 402 
 403 free_deferred:
 404         kfree(shrinker->nr_deferred);
 405         shrinker->nr_deferred = NULL;
 406         return -ENOMEM;
 407 }
 408 
 409 void free_prealloced_shrinker(struct shrinker *shrinker)
 410 {
 411         if (!shrinker->nr_deferred)
 412                 return;
 413 
 414         if (shrinker->flags & SHRINKER_MEMCG_AWARE)
 415                 unregister_memcg_shrinker(shrinker);
 416 
 417         kfree(shrinker->nr_deferred);
 418         shrinker->nr_deferred = NULL;
 419 }
 420 
 421 void register_shrinker_prepared(struct shrinker *shrinker)
 422 {
 423         down_write(&shrinker_rwsem);
 424         list_add_tail(&shrinker->list, &shrinker_list);
 425 #ifdef CONFIG_MEMCG
 426         if (shrinker->flags & SHRINKER_MEMCG_AWARE)
 427                 idr_replace(&shrinker_idr, shrinker, shrinker->id);
 428 #endif
 429         up_write(&shrinker_rwsem);
 430 }
 431 
 432 int register_shrinker(struct shrinker *shrinker)
 433 {
 434         int err = prealloc_shrinker(shrinker);
 435 
 436         if (err)
 437                 return err;
 438         register_shrinker_prepared(shrinker);
 439         return 0;
 440 }
 441 EXPORT_SYMBOL(register_shrinker);
 442 
 443 /*
 444  * Remove one
 445  */
 446 void unregister_shrinker(struct shrinker *shrinker)
 447 {
 448         if (!shrinker->nr_deferred)
 449                 return;
 450         if (shrinker->flags & SHRINKER_MEMCG_AWARE)
 451                 unregister_memcg_shrinker(shrinker);
 452         down_write(&shrinker_rwsem);
 453         list_del(&shrinker->list);
 454         up_write(&shrinker_rwsem);
 455         kfree(shrinker->nr_deferred);
 456         shrinker->nr_deferred = NULL;
 457 }
 458 EXPORT_SYMBOL(unregister_shrinker);
 459 
 460 #define SHRINK_BATCH 128
 461 
 462 static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
 463                                     struct shrinker *shrinker, int priority)
 464 {
 465         unsigned long freed = 0;
 466         unsigned long long delta;
 467         long total_scan;
 468         long freeable;
 469         long nr;
 470         long new_nr;
 471         int nid = shrinkctl->nid;
 472         long batch_size = shrinker->batch ? shrinker->batch
 473                                           : SHRINK_BATCH;
 474         long scanned = 0, next_deferred;
 475 
 476         if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
 477                 nid = 0;
 478 
 479         freeable = shrinker->count_objects(shrinker, shrinkctl);
 480         if (freeable == 0 || freeable == SHRINK_EMPTY)
 481                 return freeable;
 482 
 483         /*
 484          * copy the current shrinker scan count into a local variable
 485          * and zero it so that other concurrent shrinker invocations
 486          * don't also do this scanning work.
 487          */
 488         nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
 489 
 490         total_scan = nr;
 491         if (shrinker->seeks) {
 492                 delta = freeable >> priority;
 493                 delta *= 4;
 494                 do_div(delta, shrinker->seeks);
 495         } else {
 496                 /*
 497                  * These objects don't require any IO to create. Trim
 498                  * them aggressively under memory pressure to keep
 499                  * them from causing refetches in the IO caches.
 500                  */
 501                 delta = freeable / 2;
 502         }
 503 
 504         total_scan += delta;
 505         if (total_scan < 0) {
 506                 pr_err("shrink_slab: %pS negative objects to delete nr=%ld\n",
 507                        shrinker->scan_objects, total_scan);
 508                 total_scan = freeable;
 509                 next_deferred = nr;
 510         } else
 511                 next_deferred = total_scan;
 512 
 513         /*
 514          * We need to avoid excessive windup on filesystem shrinkers
 515          * due to large numbers of GFP_NOFS allocations causing the
 516          * shrinkers to return -1 all the time. This results in a large
 517          * nr being built up so when a shrink that can do some work
 518          * comes along it empties the entire cache due to nr >>>
 519          * freeable. This is bad for sustaining a working set in
 520          * memory.
 521          *
 522          * Hence only allow the shrinker to scan the entire cache when
 523          * a large delta change is calculated directly.
 524          */
 525         if (delta < freeable / 4)
 526                 total_scan = min(total_scan, freeable / 2);
 527 
 528         /*
 529          * Avoid risking looping forever due to too large nr value:
 530          * never try to free more than twice the estimate number of
 531          * freeable entries.
 532          */
 533         if (total_scan > freeable * 2)
 534                 total_scan = freeable * 2;
 535 
 536         trace_mm_shrink_slab_start(shrinker, shrinkctl, nr,
 537                                    freeable, delta, total_scan, priority);
 538 
 539         /*
 540          * Normally, we should not scan less than batch_size objects in one
 541          * pass to avoid too frequent shrinker calls, but if the slab has less
 542          * than batch_size objects in total and we are really tight on memory,
 543          * we will try to reclaim all available objects, otherwise we can end
 544          * up failing allocations although there are plenty of reclaimable
 545          * objects spread over several slabs with usage less than the
 546          * batch_size.
 547          *
 548          * We detect the "tight on memory" situations by looking at the total
 549          * number of objects we want to scan (total_scan). If it is greater
 550          * than the total number of objects on slab (freeable), we must be
 551          * scanning at high prio and therefore should try to reclaim as much as
 552          * possible.
 553          */
 554         while (total_scan >= batch_size ||
 555                total_scan >= freeable) {
 556                 unsigned long ret;
 557                 unsigned long nr_to_scan = min(batch_size, total_scan);
 558 
 559                 shrinkctl->nr_to_scan = nr_to_scan;
 560                 shrinkctl->nr_scanned = nr_to_scan;
 561                 ret = shrinker->scan_objects(shrinker, shrinkctl);
 562                 if (ret == SHRINK_STOP)
 563                         break;
 564                 freed += ret;
 565 
 566                 count_vm_events(SLABS_SCANNED, shrinkctl->nr_scanned);
 567                 total_scan -= shrinkctl->nr_scanned;
 568                 scanned += shrinkctl->nr_scanned;
 569 
 570                 cond_resched();
 571         }
 572 
 573         if (next_deferred >= scanned)
 574                 next_deferred -= scanned;
 575         else
 576                 next_deferred = 0;
 577         /*
 578          * move the unused scan count back into the shrinker in a
 579          * manner that handles concurrent updates. If we exhausted the
 580          * scan, there is no need to do an update.
 581          */
 582         if (next_deferred > 0)
 583                 new_nr = atomic_long_add_return(next_deferred,
 584                                                 &shrinker->nr_deferred[nid]);
 585         else
 586                 new_nr = atomic_long_read(&shrinker->nr_deferred[nid]);
 587 
 588         trace_mm_shrink_slab_end(shrinker, nid, freed, nr, new_nr, total_scan);
 589         return freed;
 590 }
 591 
 592 #ifdef CONFIG_MEMCG
 593 static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
 594                         struct mem_cgroup *memcg, int priority)
 595 {
 596         struct memcg_shrinker_map *map;
 597         unsigned long ret, freed = 0;
 598         int i;
 599 
 600         if (!mem_cgroup_online(memcg))
 601                 return 0;
 602 
 603         if (!down_read_trylock(&shrinker_rwsem))
 604                 return 0;
 605 
 606         map = rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_map,
 607                                         true);
 608         if (unlikely(!map))
 609                 goto unlock;
 610 
 611         for_each_set_bit(i, map->map, shrinker_nr_max) {
 612                 struct shrink_control sc = {
 613                         .gfp_mask = gfp_mask,
 614                         .nid = nid,
 615                         .memcg = memcg,
 616                 };
 617                 struct shrinker *shrinker;
 618 
 619                 shrinker = idr_find(&shrinker_idr, i);
 620                 if (unlikely(!shrinker || shrinker == SHRINKER_REGISTERING)) {
 621                         if (!shrinker)
 622                                 clear_bit(i, map->map);
 623                         continue;
 624                 }
 625 
 626                 /* Call non-slab shrinkers even though kmem is disabled */
 627                 if (!memcg_kmem_enabled() &&
 628                     !(shrinker->flags & SHRINKER_NONSLAB))
 629                         continue;
 630 
 631                 ret = do_shrink_slab(&sc, shrinker, priority);
 632                 if (ret == SHRINK_EMPTY) {
 633                         clear_bit(i, map->map);
 634                         /*
 635                          * After the shrinker reported that it had no objects to
 636                          * free, but before we cleared the corresponding bit in
 637                          * the memcg shrinker map, a new object might have been
 638                          * added. To make sure, we have the bit set in this
 639                          * case, we invoke the shrinker one more time and reset
 640                          * the bit if it reports that it is not empty anymore.
 641                          * The memory barrier here pairs with the barrier in
 642                          * memcg_set_shrinker_bit():
 643                          *
 644                          * list_lru_add()     shrink_slab_memcg()
 645                          *   list_add_tail()    clear_bit()
 646                          *   <MB>               <MB>
 647                          *   set_bit()          do_shrink_slab()
 648                          */
 649                         smp_mb__after_atomic();
 650                         ret = do_shrink_slab(&sc, shrinker, priority);
 651                         if (ret == SHRINK_EMPTY)
 652                                 ret = 0;
 653                         else
 654                                 memcg_set_shrinker_bit(memcg, nid, i);
 655                 }
 656                 freed += ret;
 657 
 658                 if (rwsem_is_contended(&shrinker_rwsem)) {
 659                         freed = freed ? : 1;
 660                         break;
 661                 }
 662         }
 663 unlock:
 664         up_read(&shrinker_rwsem);
 665         return freed;
 666 }
 667 #else /* CONFIG_MEMCG */
 668 static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
 669                         struct mem_cgroup *memcg, int priority)
 670 {
 671         return 0;
 672 }
 673 #endif /* CONFIG_MEMCG */
 674 
 675 /**
 676  * shrink_slab - shrink slab caches
 677  * @gfp_mask: allocation context
 678  * @nid: node whose slab caches to target
 679  * @memcg: memory cgroup whose slab caches to target
 680  * @priority: the reclaim priority
 681  *
 682  * Call the shrink functions to age shrinkable caches.
 683  *
 684  * @nid is passed along to shrinkers with SHRINKER_NUMA_AWARE set,
 685  * unaware shrinkers will receive a node id of 0 instead.
 686  *
 687  * @memcg specifies the memory cgroup to target. Unaware shrinkers
 688  * are called only if it is the root cgroup.
 689  *
 690  * @priority is sc->priority, we take the number of objects and >> by priority
 691  * in order to get the scan target.
 692  *
 693  * Returns the number of reclaimed slab objects.
 694  */
 695 static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
 696                                  struct mem_cgroup *memcg,
 697                                  int priority)
 698 {
 699         unsigned long ret, freed = 0;
 700         struct shrinker *shrinker;
 701 
 702         /*
 703          * The root memcg might be allocated even though memcg is disabled
 704          * via "cgroup_disable=memory" boot parameter.  This could make
 705          * mem_cgroup_is_root() return false, then just run memcg slab
 706          * shrink, but skip global shrink.  This may result in premature
 707          * oom.
 708          */
 709         if (!mem_cgroup_disabled() && !mem_cgroup_is_root(memcg))
 710                 return shrink_slab_memcg(gfp_mask, nid, memcg, priority);
 711 
 712         if (!down_read_trylock(&shrinker_rwsem))
 713                 goto out;
 714 
 715         list_for_each_entry(shrinker, &shrinker_list, list) {
 716                 struct shrink_control sc = {
 717                         .gfp_mask = gfp_mask,
 718                         .nid = nid,
 719                         .memcg = memcg,
 720                 };
 721 
 722                 ret = do_shrink_slab(&sc, shrinker, priority);
 723                 if (ret == SHRINK_EMPTY)
 724                         ret = 0;
 725                 freed += ret;
 726                 /*
 727                  * Bail out if someone want to register a new shrinker to
 728                  * prevent the regsitration from being stalled for long periods
 729                  * by parallel ongoing shrinking.
 730                  */
 731                 if (rwsem_is_contended(&shrinker_rwsem)) {
 732                         freed = freed ? : 1;
 733                         break;
 734                 }
 735         }
 736 
 737         up_read(&shrinker_rwsem);
 738 out:
 739         cond_resched();
 740         return freed;
 741 }
 742 
 743 void drop_slab_node(int nid)
 744 {
 745         unsigned long freed;
 746 
 747         do {
 748                 struct mem_cgroup *memcg = NULL;
 749 
 750                 freed = 0;
 751                 memcg = mem_cgroup_iter(NULL, NULL, NULL);
 752                 do {
 753                         freed += shrink_slab(GFP_KERNEL, nid, memcg, 0);
 754                 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
 755         } while (freed > 10);
 756 }
 757 
 758 void drop_slab(void)
 759 {
 760         int nid;
 761 
 762         for_each_online_node(nid)
 763                 drop_slab_node(nid);
 764 }
 765 
 766 static inline int is_page_cache_freeable(struct page *page)
 767 {
 768         /*
 769          * A freeable page cache page is referenced only by the caller
 770          * that isolated the page, the page cache and optional buffer
 771          * heads at page->private.
 772          */
 773         int page_cache_pins = PageTransHuge(page) && PageSwapCache(page) ?
 774                 HPAGE_PMD_NR : 1;
 775         return page_count(page) - page_has_private(page) == 1 + page_cache_pins;
 776 }
 777 
 778 static int may_write_to_inode(struct inode *inode, struct scan_control *sc)
 779 {
 780         if (current->flags & PF_SWAPWRITE)
 781                 return 1;
 782         if (!inode_write_congested(inode))
 783                 return 1;
 784         if (inode_to_bdi(inode) == current->backing_dev_info)
 785                 return 1;
 786         return 0;
 787 }
 788 
 789 /*
 790  * We detected a synchronous write error writing a page out.  Probably
 791  * -ENOSPC.  We need to propagate that into the address_space for a subsequent
 792  * fsync(), msync() or close().
 793  *
 794  * The tricky part is that after writepage we cannot touch the mapping: nothing
 795  * prevents it from being freed up.  But we have a ref on the page and once
 796  * that page is locked, the mapping is pinned.
 797  *
 798  * We're allowed to run sleeping lock_page() here because we know the caller has
 799  * __GFP_FS.
 800  */
 801 static void handle_write_error(struct address_space *mapping,
 802                                 struct page *page, int error)
 803 {
 804         lock_page(page);
 805         if (page_mapping(page) == mapping)
 806                 mapping_set_error(mapping, error);
 807         unlock_page(page);
 808 }
 809 
 810 /* possible outcome of pageout() */
 811 typedef enum {
 812         /* failed to write page out, page is locked */
 813         PAGE_KEEP,
 814         /* move page to the active list, page is locked */
 815         PAGE_ACTIVATE,
 816         /* page has been sent to the disk successfully, page is unlocked */
 817         PAGE_SUCCESS,
 818         /* page is clean and locked */
 819         PAGE_CLEAN,
 820 } pageout_t;
 821 
 822 /*
 823  * pageout is called by shrink_page_list() for each dirty page.
 824  * Calls ->writepage().
 825  */
 826 static pageout_t pageout(struct page *page, struct address_space *mapping,
 827                          struct scan_control *sc)
 828 {
 829         /*
 830          * If the page is dirty, only perform writeback if that write
 831          * will be non-blocking.  To prevent this allocation from being
 832          * stalled by pagecache activity.  But note that there may be
 833          * stalls if we need to run get_block().  We could test
 834          * PagePrivate for that.
 835          *
 836          * If this process is currently in __generic_file_write_iter() against
 837          * this page's queue, we can perform writeback even if that
 838          * will block.
 839          *
 840          * If the page is swapcache, write it back even if that would
 841          * block, for some throttling. This happens by accident, because
 842          * swap_backing_dev_info is bust: it doesn't reflect the
 843          * congestion state of the swapdevs.  Easy to fix, if needed.
 844          */
 845         if (!is_page_cache_freeable(page))
 846                 return PAGE_KEEP;
 847         if (!mapping) {
 848                 /*
 849                  * Some data journaling orphaned pages can have
 850                  * page->mapping == NULL while being dirty with clean buffers.
 851                  */
 852                 if (page_has_private(page)) {
 853                         if (try_to_free_buffers(page)) {
 854                                 ClearPageDirty(page);
 855                                 pr_info("%s: orphaned page\n", __func__);
 856                                 return PAGE_CLEAN;
 857                         }
 858                 }
 859                 return PAGE_KEEP;
 860         }
 861         if (mapping->a_ops->writepage == NULL)
 862                 return PAGE_ACTIVATE;
 863         if (!may_write_to_inode(mapping->host, sc))
 864                 return PAGE_KEEP;
 865 
 866         if (clear_page_dirty_for_io(page)) {
 867                 int res;
 868                 struct writeback_control wbc = {
 869                         .sync_mode = WB_SYNC_NONE,
 870                         .nr_to_write = SWAP_CLUSTER_MAX,
 871                         .range_start = 0,
 872                         .range_end = LLONG_MAX,
 873                         .for_reclaim = 1,
 874                 };
 875 
 876                 SetPageReclaim(page);
 877                 res = mapping->a_ops->writepage(page, &wbc);
 878                 if (res < 0)
 879                         handle_write_error(mapping, page, res);
 880                 if (res == AOP_WRITEPAGE_ACTIVATE) {
 881                         ClearPageReclaim(page);
 882                         return PAGE_ACTIVATE;
 883                 }
 884 
 885                 if (!PageWriteback(page)) {
 886                         /* synchronous write or broken a_ops? */
 887                         ClearPageReclaim(page);
 888                 }
 889                 trace_mm_vmscan_writepage(page);
 890                 inc_node_page_state(page, NR_VMSCAN_WRITE);
 891                 return PAGE_SUCCESS;
 892         }
 893 
 894         return PAGE_CLEAN;
 895 }
 896 
 897 /*
 898  * Same as remove_mapping, but if the page is removed from the mapping, it
 899  * gets returned with a refcount of 0.
 900  */
 901 static int __remove_mapping(struct address_space *mapping, struct page *page,
 902                             bool reclaimed)
 903 {
 904         unsigned long flags;
 905         int refcount;
 906 
 907         BUG_ON(!PageLocked(page));
 908         BUG_ON(mapping != page_mapping(page));
 909 
 910         xa_lock_irqsave(&mapping->i_pages, flags);
 911         /*
 912          * The non racy check for a busy page.
 913          *
 914          * Must be careful with the order of the tests. When someone has
 915          * a ref to the page, it may be possible that they dirty it then
 916          * drop the reference. So if PageDirty is tested before page_count
 917          * here, then the following race may occur:
 918          *
 919          * get_user_pages(&page);
 920          * [user mapping goes away]
 921          * write_to(page);
 922          *                              !PageDirty(page)    [good]
 923          * SetPageDirty(page);
 924          * put_page(page);
 925          *                              !page_count(page)   [good, discard it]
 926          *
 927          * [oops, our write_to data is lost]
 928          *
 929          * Reversing the order of the tests ensures such a situation cannot
 930          * escape unnoticed. The smp_rmb is needed to ensure the page->flags
 931          * load is not satisfied before that of page->_refcount.
 932          *
 933          * Note that if SetPageDirty is always performed via set_page_dirty,
 934          * and thus under the i_pages lock, then this ordering is not required.
 935          */
 936         refcount = 1 + compound_nr(page);
 937         if (!page_ref_freeze(page, refcount))
 938                 goto cannot_free;
 939         /* note: atomic_cmpxchg in page_ref_freeze provides the smp_rmb */
 940         if (unlikely(PageDirty(page))) {
 941                 page_ref_unfreeze(page, refcount);
 942                 goto cannot_free;
 943         }
 944 
 945         if (PageSwapCache(page)) {
 946                 swp_entry_t swap = { .val = page_private(page) };
 947                 mem_cgroup_swapout(page, swap);
 948                 __delete_from_swap_cache(page, swap);
 949                 xa_unlock_irqrestore(&mapping->i_pages, flags);
 950                 put_swap_page(page, swap);
 951         } else {
 952                 void (*freepage)(struct page *);
 953                 void *shadow = NULL;
 954 
 955                 freepage = mapping->a_ops->freepage;
 956                 /*
 957                  * Remember a shadow entry for reclaimed file cache in
 958                  * order to detect refaults, thus thrashing, later on.
 959                  *
 960                  * But don't store shadows in an address space that is
 961                  * already exiting.  This is not just an optizimation,
 962                  * inode reclaim needs to empty out the radix tree or
 963                  * the nodes are lost.  Don't plant shadows behind its
 964                  * back.
 965                  *
 966                  * We also don't store shadows for DAX mappings because the
 967                  * only page cache pages found in these are zero pages
 968                  * covering holes, and because we don't want to mix DAX
 969                  * exceptional entries and shadow exceptional entries in the
 970                  * same address_space.
 971                  */
 972                 if (reclaimed && page_is_file_cache(page) &&
 973                     !mapping_exiting(mapping) && !dax_mapping(mapping))
 974                         shadow = workingset_eviction(page);
 975                 __delete_from_page_cache(page, shadow);
 976                 xa_unlock_irqrestore(&mapping->i_pages, flags);
 977 
 978                 if (freepage != NULL)
 979                         freepage(page);
 980         }
 981 
 982         return 1;
 983 
 984 cannot_free:
 985         xa_unlock_irqrestore(&mapping->i_pages, flags);
 986         return 0;
 987 }
 988 
 989 /*
 990  * Attempt to detach a locked page from its ->mapping.  If it is dirty or if
 991  * someone else has a ref on the page, abort and return 0.  If it was
 992  * successfully detached, return 1.  Assumes the caller has a single ref on
 993  * this page.
 994  */
 995 int remove_mapping(struct address_space *mapping, struct page *page)
 996 {
 997         if (__remove_mapping(mapping, page, false)) {
 998                 /*
 999                  * Unfreezing the refcount with 1 rather than 2 effectively
1000                  * drops the pagecache ref for us without requiring another
1001                  * atomic operation.
1002                  */
1003                 page_ref_unfreeze(page, 1);
1004                 return 1;
1005         }
1006         return 0;
1007 }
1008 
1009 /**
1010  * putback_lru_page - put previously isolated page onto appropriate LRU list
1011  * @page: page to be put back to appropriate lru list
1012  *
1013  * Add previously isolated @page to appropriate LRU list.
1014  * Page may still be unevictable for other reasons.
1015  *
1016  * lru_lock must not be held, interrupts must be enabled.
1017  */
1018 void putback_lru_page(struct page *page)
1019 {
1020         lru_cache_add(page);
1021         put_page(page);         /* drop ref from isolate */
1022 }
1023 
1024 enum page_references {
1025         PAGEREF_RECLAIM,
1026         PAGEREF_RECLAIM_CLEAN,
1027         PAGEREF_KEEP,
1028         PAGEREF_ACTIVATE,
1029 };
1030 
1031 static enum page_references page_check_references(struct page *page,
1032                                                   struct scan_control *sc)
1033 {
1034         int referenced_ptes, referenced_page;
1035         unsigned long vm_flags;
1036 
1037         referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup,
1038                                           &vm_flags);
1039         referenced_page = TestClearPageReferenced(page);
1040 
1041         /*
1042          * Mlock lost the isolation race with us.  Let try_to_unmap()
1043          * move the page to the unevictable list.
1044          */
1045         if (vm_flags & VM_LOCKED)
1046                 return PAGEREF_RECLAIM;
1047 
1048         if (referenced_ptes) {
1049                 if (PageSwapBacked(page))
1050                         return PAGEREF_ACTIVATE;
1051                 /*
1052                  * All mapped pages start out with page table
1053                  * references from the instantiating fault, so we need
1054                  * to look twice if a mapped file page is used more
1055                  * than once.
1056                  *
1057                  * Mark it and spare it for another trip around the
1058                  * inactive list.  Another page table reference will
1059                  * lead to its activation.
1060                  *
1061                  * Note: the mark is set for activated pages as well
1062                  * so that recently deactivated but used pages are
1063                  * quickly recovered.
1064                  */
1065                 SetPageReferenced(page);
1066 
1067                 if (referenced_page || referenced_ptes > 1)
1068                         return PAGEREF_ACTIVATE;
1069 
1070                 /*
1071                  * Activate file-backed executable pages after first usage.
1072                  */
1073                 if (vm_flags & VM_EXEC)
1074                         return PAGEREF_ACTIVATE;
1075 
1076                 return PAGEREF_KEEP;
1077         }
1078 
1079         /* Reclaim if clean, defer dirty pages to writeback */
1080         if (referenced_page && !PageSwapBacked(page))
1081                 return PAGEREF_RECLAIM_CLEAN;
1082 
1083         return PAGEREF_RECLAIM;
1084 }
1085 
1086 /* Check if a page is dirty or under writeback */
1087 static void page_check_dirty_writeback(struct page *page,
1088                                        bool *dirty, bool *writeback)
1089 {
1090         struct address_space *mapping;
1091 
1092         /*
1093          * Anonymous pages are not handled by flushers and must be written
1094          * from reclaim context. Do not stall reclaim based on them
1095          */
1096         if (!page_is_file_cache(page) ||
1097             (PageAnon(page) && !PageSwapBacked(page))) {
1098                 *dirty = false;
1099                 *writeback = false;
1100                 return;
1101         }
1102 
1103         /* By default assume that the page flags are accurate */
1104         *dirty = PageDirty(page);
1105         *writeback = PageWriteback(page);
1106 
1107         /* Verify dirty/writeback state if the filesystem supports it */
1108         if (!page_has_private(page))
1109                 return;
1110 
1111         mapping = page_mapping(page);
1112         if (mapping && mapping->a_ops->is_dirty_writeback)
1113                 mapping->a_ops->is_dirty_writeback(page, dirty, writeback);
1114 }
1115 
1116 /*
1117  * shrink_page_list() returns the number of reclaimed pages
1118  */
1119 static unsigned long shrink_page_list(struct list_head *page_list,
1120                                       struct pglist_data *pgdat,
1121                                       struct scan_control *sc,
1122                                       enum ttu_flags ttu_flags,
1123                                       struct reclaim_stat *stat,
1124                                       bool ignore_references)
1125 {
1126         LIST_HEAD(ret_pages);
1127         LIST_HEAD(free_pages);
1128         unsigned nr_reclaimed = 0;
1129         unsigned pgactivate = 0;
1130 
1131         memset(stat, 0, sizeof(*stat));
1132         cond_resched();
1133 
1134         while (!list_empty(page_list)) {
1135                 struct address_space *mapping;
1136                 struct page *page;
1137                 int may_enter_fs;
1138                 enum page_references references = PAGEREF_RECLAIM;
1139                 bool dirty, writeback;
1140                 unsigned int nr_pages;
1141 
1142                 cond_resched();
1143 
1144                 page = lru_to_page(page_list);
1145                 list_del(&page->lru);
1146 
1147                 if (!trylock_page(page))
1148                         goto keep;
1149 
1150                 VM_BUG_ON_PAGE(PageActive(page), page);
1151 
1152                 nr_pages = compound_nr(page);
1153 
1154                 /* Account the number of base pages even though THP */
1155                 sc->nr_scanned += nr_pages;
1156 
1157                 if (unlikely(!page_evictable(page)))
1158                         goto activate_locked;
1159 
1160                 if (!sc->may_unmap && page_mapped(page))
1161                         goto keep_locked;
1162 
1163                 may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
1164                         (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
1165 
1166                 /*
1167                  * The number of dirty pages determines if a node is marked
1168                  * reclaim_congested which affects wait_iff_congested. kswapd
1169                  * will stall and start writing pages if the tail of the LRU
1170                  * is all dirty unqueued pages.
1171                  */
1172                 page_check_dirty_writeback(page, &dirty, &writeback);
1173                 if (dirty || writeback)
1174                         stat->nr_dirty++;
1175 
1176                 if (dirty && !writeback)
1177                         stat->nr_unqueued_dirty++;
1178 
1179                 /*
1180                  * Treat this page as congested if the underlying BDI is or if
1181                  * pages are cycling through the LRU so quickly that the
1182                  * pages marked for immediate reclaim are making it to the
1183                  * end of the LRU a second time.
1184                  */
1185                 mapping = page_mapping(page);
1186                 if (((dirty || writeback) && mapping &&
1187                      inode_write_congested(mapping->host)) ||
1188                     (writeback && PageReclaim(page)))
1189                         stat->nr_congested++;
1190 
1191                 /*
1192                  * If a page at the tail of the LRU is under writeback, there
1193                  * are three cases to consider.
1194                  *
1195                  * 1) If reclaim is encountering an excessive number of pages
1196                  *    under writeback and this page is both under writeback and
1197                  *    PageReclaim then it indicates that pages are being queued
1198                  *    for IO but are being recycled through the LRU before the
1199                  *    IO can complete. Waiting on the page itself risks an
1200                  *    indefinite stall if it is impossible to writeback the
1201                  *    page due to IO error or disconnected storage so instead
1202                  *    note that the LRU is being scanned too quickly and the
1203                  *    caller can stall after page list has been processed.
1204                  *
1205                  * 2) Global or new memcg reclaim encounters a page that is
1206                  *    not marked for immediate reclaim, or the caller does not
1207                  *    have __GFP_FS (or __GFP_IO if it's simply going to swap,
1208                  *    not to fs). In this case mark the page for immediate
1209                  *    reclaim and continue scanning.
1210                  *
1211                  *    Require may_enter_fs because we would wait on fs, which
1212                  *    may not have submitted IO yet. And the loop driver might
1213                  *    enter reclaim, and deadlock if it waits on a page for
1214                  *    which it is needed to do the write (loop masks off
1215                  *    __GFP_IO|__GFP_FS for this reason); but more thought
1216                  *    would probably show more reasons.
1217                  *
1218                  * 3) Legacy memcg encounters a page that is already marked
1219                  *    PageReclaim. memcg does not have any dirty pages
1220                  *    throttling so we could easily OOM just because too many
1221                  *    pages are in writeback and there is nothing else to
1222                  *    reclaim. Wait for the writeback to complete.
1223                  *
1224                  * In cases 1) and 2) we activate the pages to get them out of
1225                  * the way while we continue scanning for clean pages on the
1226                  * inactive list and refilling from the active list. The
1227                  * observation here is that waiting for disk writes is more
1228                  * expensive than potentially causing reloads down the line.
1229                  * Since they're marked for immediate reclaim, they won't put
1230                  * memory pressure on the cache working set any longer than it
1231                  * takes to write them to disk.
1232                  */
1233                 if (PageWriteback(page)) {
1234                         /* Case 1 above */
1235                         if (current_is_kswapd() &&
1236                             PageReclaim(page) &&
1237                             test_bit(PGDAT_WRITEBACK, &pgdat->flags)) {
1238                                 stat->nr_immediate++;
1239                                 goto activate_locked;
1240 
1241                         /* Case 2 above */
1242                         } else if (sane_reclaim(sc) ||
1243                             !PageReclaim(page) || !may_enter_fs) {
1244                                 /*
1245                                  * This is slightly racy - end_page_writeback()
1246                                  * might have just cleared PageReclaim, then
1247                                  * setting PageReclaim here end up interpreted
1248                                  * as PageReadahead - but that does not matter
1249                                  * enough to care.  What we do want is for this
1250                                  * page to have PageReclaim set next time memcg
1251                                  * reclaim reaches the tests above, so it will
1252                                  * then wait_on_page_writeback() to avoid OOM;
1253                                  * and it's also appropriate in global reclaim.
1254                                  */
1255                                 SetPageReclaim(page);
1256                                 stat->nr_writeback++;
1257                                 goto activate_locked;
1258 
1259                         /* Case 3 above */
1260                         } else {
1261                                 unlock_page(page);
1262                                 wait_on_page_writeback(page);
1263                                 /* then go back and try same page again */
1264                                 list_add_tail(&page->lru, page_list);
1265                                 continue;
1266                         }
1267                 }
1268 
1269                 if (!ignore_references)
1270                         references = page_check_references(page, sc);
1271 
1272                 switch (references) {
1273                 case PAGEREF_ACTIVATE:
1274                         goto activate_locked;
1275                 case PAGEREF_KEEP:
1276                         stat->nr_ref_keep += nr_pages;
1277                         goto keep_locked;
1278                 case PAGEREF_RECLAIM:
1279                 case PAGEREF_RECLAIM_CLEAN:
1280                         ; /* try to reclaim the page below */
1281                 }
1282 
1283                 /*
1284                  * Anonymous process memory has backing store?
1285                  * Try to allocate it some swap space here.
1286                  * Lazyfree page could be freed directly
1287                  */
1288                 if (PageAnon(page) && PageSwapBacked(page)) {
1289                         if (!PageSwapCache(page)) {
1290                                 if (!(sc->gfp_mask & __GFP_IO))
1291                                         goto keep_locked;
1292                                 if (PageTransHuge(page)) {
1293                                         /* cannot split THP, skip it */
1294                                         if (!can_split_huge_page(page, NULL))
1295                                                 goto activate_locked;
1296                                         /*
1297                                          * Split pages without a PMD map right
1298                                          * away. Chances are some or all of the
1299                                          * tail pages can be freed without IO.
1300                                          */
1301                                         if (!compound_mapcount(page) &&
1302                                             split_huge_page_to_list(page,
1303                                                                     page_list))
1304                                                 goto activate_locked;
1305                                 }
1306                                 if (!add_to_swap(page)) {
1307                                         if (!PageTransHuge(page))
1308                                                 goto activate_locked_split;
1309                                         /* Fallback to swap normal pages */
1310                                         if (split_huge_page_to_list(page,
1311                                                                     page_list))
1312                                                 goto activate_locked;
1313 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1314                                         count_vm_event(THP_SWPOUT_FALLBACK);
1315 #endif
1316                                         if (!add_to_swap(page))
1317                                                 goto activate_locked_split;
1318                                 }
1319 
1320                                 may_enter_fs = 1;
1321 
1322                                 /* Adding to swap updated mapping */
1323                                 mapping = page_mapping(page);
1324                         }
1325                 } else if (unlikely(PageTransHuge(page))) {
1326                         /* Split file THP */
1327                         if (split_huge_page_to_list(page, page_list))
1328                                 goto keep_locked;
1329                 }
1330 
1331                 /*
1332                  * THP may get split above, need minus tail pages and update
1333                  * nr_pages to avoid accounting tail pages twice.
1334                  *
1335                  * The tail pages that are added into swap cache successfully
1336                  * reach here.
1337                  */
1338                 if ((nr_pages > 1) && !PageTransHuge(page)) {
1339                         sc->nr_scanned -= (nr_pages - 1);
1340                         nr_pages = 1;
1341                 }
1342 
1343                 /*
1344                  * The page is mapped into the page tables of one or more
1345                  * processes. Try to unmap it here.
1346                  */
1347                 if (page_mapped(page)) {
1348                         enum ttu_flags flags = ttu_flags | TTU_BATCH_FLUSH;
1349 
1350                         if (unlikely(PageTransHuge(page)))
1351                                 flags |= TTU_SPLIT_HUGE_PMD;
1352                         if (!try_to_unmap(page, flags)) {
1353                                 stat->nr_unmap_fail += nr_pages;
1354                                 goto activate_locked;
1355                         }
1356                 }
1357 
1358                 if (PageDirty(page)) {
1359                         /*
1360                          * Only kswapd can writeback filesystem pages
1361                          * to avoid risk of stack overflow. But avoid
1362                          * injecting inefficient single-page IO into
1363                          * flusher writeback as much as possible: only
1364                          * write pages when we've encountered many
1365                          * dirty pages, and when we've already scanned
1366                          * the rest of the LRU for clean pages and see
1367                          * the same dirty pages again (PageReclaim).
1368                          */
1369                         if (page_is_file_cache(page) &&
1370                             (!current_is_kswapd() || !PageReclaim(page) ||
1371                              !test_bit(PGDAT_DIRTY, &pgdat->flags))) {
1372                                 /*
1373                                  * Immediately reclaim when written back.
1374                                  * Similar in principal to deactivate_page()
1375                                  * except we already have the page isolated
1376                                  * and know it's dirty
1377                                  */
1378                                 inc_node_page_state(page, NR_VMSCAN_IMMEDIATE);
1379                                 SetPageReclaim(page);
1380 
1381                                 goto activate_locked;
1382                         }
1383 
1384                         if (references == PAGEREF_RECLAIM_CLEAN)
1385                                 goto keep_locked;
1386                         if (!may_enter_fs)
1387                                 goto keep_locked;
1388                         if (!sc->may_writepage)
1389                                 goto keep_locked;
1390 
1391                         /*
1392                          * Page is dirty. Flush the TLB if a writable entry
1393                          * potentially exists to avoid CPU writes after IO
1394                          * starts and then write it out here.
1395                          */
1396                         try_to_unmap_flush_dirty();
1397                         switch (pageout(page, mapping, sc)) {
1398                         case PAGE_KEEP:
1399                                 goto keep_locked;
1400                         case PAGE_ACTIVATE:
1401                                 goto activate_locked;
1402                         case PAGE_SUCCESS:
1403                                 if (PageWriteback(page))
1404                                         goto keep;
1405                                 if (PageDirty(page))
1406                                         goto keep;
1407 
1408                                 /*
1409                                  * A synchronous write - probably a ramdisk.  Go
1410                                  * ahead and try to reclaim the page.
1411                                  */
1412                                 if (!trylock_page(page))
1413                                         goto keep;
1414                                 if (PageDirty(page) || PageWriteback(page))
1415                                         goto keep_locked;
1416                                 mapping = page_mapping(page);
1417                         case PAGE_CLEAN:
1418                                 ; /* try to free the page below */
1419                         }
1420                 }
1421 
1422                 /*
1423                  * If the page has buffers, try to free the buffer mappings
1424                  * associated with this page. If we succeed we try to free
1425                  * the page as well.
1426                  *
1427                  * We do this even if the page is PageDirty().
1428                  * try_to_release_page() does not perform I/O, but it is
1429                  * possible for a page to have PageDirty set, but it is actually
1430                  * clean (all its buffers are clean).  This happens if the
1431                  * buffers were written out directly, with submit_bh(). ext3
1432                  * will do this, as well as the blockdev mapping.
1433                  * try_to_release_page() will discover that cleanness and will
1434                  * drop the buffers and mark the page clean - it can be freed.
1435                  *
1436                  * Rarely, pages can have buffers and no ->mapping.  These are
1437                  * the pages which were not successfully invalidated in
1438                  * truncate_complete_page().  We try to drop those buffers here
1439                  * and if that worked, and the page is no longer mapped into
1440                  * process address space (page_count == 1) it can be freed.
1441                  * Otherwise, leave the page on the LRU so it is swappable.
1442                  */
1443                 if (page_has_private(page)) {
1444                         if (!try_to_release_page(page, sc->gfp_mask))
1445                                 goto activate_locked;
1446                         if (!mapping && page_count(page) == 1) {
1447                                 unlock_page(page);
1448                                 if (put_page_testzero(page))
1449                                         goto free_it;
1450                                 else {
1451                                         /*
1452                                          * rare race with speculative reference.
1453                                          * the speculative reference will free
1454                                          * this page shortly, so we may
1455                                          * increment nr_reclaimed here (and
1456                                          * leave it off the LRU).
1457                                          */
1458                                         nr_reclaimed++;
1459                                         continue;
1460                                 }
1461                         }
1462                 }
1463 
1464                 if (PageAnon(page) && !PageSwapBacked(page)) {
1465                         /* follow __remove_mapping for reference */
1466                         if (!page_ref_freeze(page, 1))
1467                                 goto keep_locked;
1468                         if (PageDirty(page)) {
1469                                 page_ref_unfreeze(page, 1);
1470                                 goto keep_locked;
1471                         }
1472 
1473                         count_vm_event(PGLAZYFREED);
1474                         count_memcg_page_event(page, PGLAZYFREED);
1475                 } else if (!mapping || !__remove_mapping(mapping, page, true))
1476                         goto keep_locked;
1477 
1478                 unlock_page(page);
1479 free_it:
1480                 /*
1481                  * THP may get swapped out in a whole, need account
1482                  * all base pages.
1483                  */
1484                 nr_reclaimed += nr_pages;
1485 
1486                 /*
1487                  * Is there need to periodically free_page_list? It would
1488                  * appear not as the counts should be low
1489                  */
1490                 if (unlikely(PageTransHuge(page)))
1491                         (*get_compound_page_dtor(page))(page);
1492                 else
1493                         list_add(&page->lru, &free_pages);
1494                 continue;
1495 
1496 activate_locked_split:
1497                 /*
1498                  * The tail pages that are failed to add into swap cache
1499                  * reach here.  Fixup nr_scanned and nr_pages.
1500                  */
1501                 if (nr_pages > 1) {
1502                         sc->nr_scanned -= (nr_pages - 1);
1503                         nr_pages = 1;
1504                 }
1505 activate_locked:
1506                 /* Not a candidate for swapping, so reclaim swap space. */
1507                 if (PageSwapCache(page) && (mem_cgroup_swap_full(page) ||
1508                                                 PageMlocked(page)))
1509                         try_to_free_swap(page);
1510                 VM_BUG_ON_PAGE(PageActive(page), page);
1511                 if (!PageMlocked(page)) {
1512                         int type = page_is_file_cache(page);
1513                         SetPageActive(page);
1514                         stat->nr_activate[type] += nr_pages;
1515                         count_memcg_page_event(page, PGACTIVATE);
1516                 }
1517 keep_locked:
1518                 unlock_page(page);
1519 keep:
1520                 list_add(&page->lru, &ret_pages);
1521                 VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page);
1522         }
1523 
1524         pgactivate = stat->nr_activate[0] + stat->nr_activate[1];
1525 
1526         mem_cgroup_uncharge_list(&free_pages);
1527         try_to_unmap_flush();
1528         free_unref_page_list(&free_pages);
1529 
1530         list_splice(&ret_pages, page_list);
1531         count_vm_events(PGACTIVATE, pgactivate);
1532 
1533         return nr_reclaimed;
1534 }
1535 
1536 unsigned long reclaim_clean_pages_from_list(struct zone *zone,
1537                                             struct list_head *page_list)
1538 {
1539         struct scan_control sc = {
1540                 .gfp_mask = GFP_KERNEL,
1541                 .priority = DEF_PRIORITY,
1542                 .may_unmap = 1,
1543         };
1544         struct reclaim_stat dummy_stat;
1545         unsigned long ret;
1546         struct page *page, *next;
1547         LIST_HEAD(clean_pages);
1548 
1549         list_for_each_entry_safe(page, next, page_list, lru) {
1550                 if (page_is_file_cache(page) && !PageDirty(page) &&
1551                     !__PageMovable(page) && !PageUnevictable(page)) {
1552                         ClearPageActive(page);
1553                         list_move(&page->lru, &clean_pages);
1554                 }
1555         }
1556 
1557         ret = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc,
1558                         TTU_IGNORE_ACCESS, &dummy_stat, true);
1559         list_splice(&clean_pages, page_list);
1560         mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, -ret);
1561         return ret;
1562 }
1563 
1564 /*
1565  * Attempt to remove the specified page from its LRU.  Only take this page
1566  * if it is of the appropriate PageActive status.  Pages which are being
1567  * freed elsewhere are also ignored.
1568  *
1569  * page:        page to consider
1570  * mode:        one of the LRU isolation modes defined above
1571  *
1572  * returns 0 on success, -ve errno on failure.
1573  */
1574 int __isolate_lru_page(struct page *page, isolate_mode_t mode)
1575 {
1576         int ret = -EINVAL;
1577 
1578         /* Only take pages on the LRU. */
1579         if (!PageLRU(page))
1580                 return ret;
1581 
1582         /* Compaction should not handle unevictable pages but CMA can do so */
1583         if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE))
1584                 return ret;
1585 
1586         ret = -EBUSY;
1587 
1588         /*
1589          * To minimise LRU disruption, the caller can indicate that it only
1590          * wants to isolate pages it will be able to operate on without
1591          * blocking - clean pages for the most part.
1592          *
1593          * ISOLATE_ASYNC_MIGRATE is used to indicate that it only wants to pages
1594          * that it is possible to migrate without blocking
1595          */
1596         if (mode & ISOLATE_ASYNC_MIGRATE) {
1597                 /* All the caller can do on PageWriteback is block */
1598                 if (PageWriteback(page))
1599                         return ret;
1600 
1601                 if (PageDirty(page)) {
1602                         struct address_space *mapping;
1603                         bool migrate_dirty;
1604 
1605                         /*
1606                          * Only pages without mappings or that have a
1607                          * ->migratepage callback are possible to migrate
1608                          * without blocking. However, we can be racing with
1609                          * truncation so it's necessary to lock the page
1610                          * to stabilise the mapping as truncation holds
1611                          * the page lock until after the page is removed
1612                          * from the page cache.
1613                          */
1614                         if (!trylock_page(page))
1615                                 return ret;
1616 
1617                         mapping = page_mapping(page);
1618                         migrate_dirty = !mapping || mapping->a_ops->migratepage;
1619                         unlock_page(page);
1620                         if (!migrate_dirty)
1621                                 return ret;
1622                 }
1623         }
1624 
1625         if ((mode & ISOLATE_UNMAPPED) && page_mapped(page))
1626                 return ret;
1627 
1628         if (likely(get_page_unless_zero(page))) {
1629                 /*
1630                  * Be careful not to clear PageLRU until after we're
1631                  * sure the page is not being freed elsewhere -- the
1632                  * page release code relies on it.
1633                  */
1634                 ClearPageLRU(page);
1635                 ret = 0;
1636         }
1637 
1638         return ret;
1639 }
1640 
1641 
1642 /*
1643  * Update LRU sizes after isolating pages. The LRU size updates must
1644  * be complete before mem_cgroup_update_lru_size due to a santity check.
1645  */
1646 static __always_inline void update_lru_sizes(struct lruvec *lruvec,
1647                         enum lru_list lru, unsigned long *nr_zone_taken)
1648 {
1649         int zid;
1650 
1651         for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1652                 if (!nr_zone_taken[zid])
1653                         continue;
1654 
1655                 __update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]);
1656 #ifdef CONFIG_MEMCG
1657                 mem_cgroup_update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]);
1658 #endif
1659         }
1660 
1661 }
1662 
1663 /**
1664  * pgdat->lru_lock is heavily contended.  Some of the functions that
1665  * shrink the lists perform better by taking out a batch of pages
1666  * and working on them outside the LRU lock.
1667  *
1668  * For pagecache intensive workloads, this function is the hottest
1669  * spot in the kernel (apart from copy_*_user functions).
1670  *
1671  * Appropriate locks must be held before calling this function.
1672  *
1673  * @nr_to_scan: The number of eligible pages to look through on the list.
1674  * @lruvec:     The LRU vector to pull pages from.
1675  * @dst:        The temp list to put pages on to.
1676  * @nr_scanned: The number of pages that were scanned.
1677  * @sc:         The scan_control struct for this reclaim session
1678  * @mode:       One of the LRU isolation modes
1679  * @lru:        LRU list id for isolating
1680  *
1681  * returns how many pages were moved onto *@dst.
1682  */
1683 static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
1684                 struct lruvec *lruvec, struct list_head *dst,
1685                 unsigned long *nr_scanned, struct scan_control *sc,
1686                 enum lru_list lru)
1687 {
1688         struct list_head *src = &lruvec->lists[lru];
1689         unsigned long nr_taken = 0;
1690         unsigned long nr_zone_taken[MAX_NR_ZONES] = { 0 };
1691         unsigned long nr_skipped[MAX_NR_ZONES] = { 0, };
1692         unsigned long skipped = 0;
1693         unsigned long scan, total_scan, nr_pages;
1694         LIST_HEAD(pages_skipped);
1695         isolate_mode_t mode = (sc->may_unmap ? 0 : ISOLATE_UNMAPPED);
1696 
1697         total_scan = 0;
1698         scan = 0;
1699         while (scan < nr_to_scan && !list_empty(src)) {
1700                 struct page *page;
1701 
1702                 page = lru_to_page(src);
1703                 prefetchw_prev_lru_page(page, src, flags);
1704 
1705                 VM_BUG_ON_PAGE(!PageLRU(page), page);
1706 
1707                 nr_pages = compound_nr(page);
1708                 total_scan += nr_pages;
1709 
1710                 if (page_zonenum(page) > sc->reclaim_idx) {
1711                         list_move(&page->lru, &pages_skipped);
1712                         nr_skipped[page_zonenum(page)] += nr_pages;
1713                         continue;
1714                 }
1715 
1716                 /*
1717                  * Do not count skipped pages because that makes the function
1718                  * return with no isolated pages if the LRU mostly contains
1719                  * ineligible pages.  This causes the VM to not reclaim any
1720                  * pages, triggering a premature OOM.
1721                  *
1722                  * Account all tail pages of THP.  This would not cause
1723                  * premature OOM since __isolate_lru_page() returns -EBUSY
1724                  * only when the page is being freed somewhere else.
1725                  */
1726                 scan += nr_pages;
1727                 switch (__isolate_lru_page(page, mode)) {
1728                 case 0:
1729                         nr_taken += nr_pages;
1730                         nr_zone_taken[page_zonenum(page)] += nr_pages;
1731                         list_move(&page->lru, dst);
1732                         break;
1733 
1734                 case -EBUSY:
1735                         /* else it is being freed elsewhere */
1736                         list_move(&page->lru, src);
1737                         continue;
1738 
1739                 default:
1740                         BUG();
1741                 }
1742         }
1743 
1744         /*
1745          * Splice any skipped pages to the start of the LRU list. Note that
1746          * this disrupts the LRU order when reclaiming for lower zones but
1747          * we cannot splice to the tail. If we did then the SWAP_CLUSTER_MAX
1748          * scanning would soon rescan the same pages to skip and put the
1749          * system at risk of premature OOM.
1750          */
1751         if (!list_empty(&pages_skipped)) {
1752                 int zid;
1753 
1754                 list_splice(&pages_skipped, src);
1755                 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1756                         if (!nr_skipped[zid])
1757                                 continue;
1758 
1759                         __count_zid_vm_events(PGSCAN_SKIP, zid, nr_skipped[zid]);
1760                         skipped += nr_skipped[zid];
1761                 }
1762         }
1763         *nr_scanned = total_scan;
1764         trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan,
1765                                     total_scan, skipped, nr_taken, mode, lru);
1766         update_lru_sizes(lruvec, lru, nr_zone_taken);
1767         return nr_taken;
1768 }
1769 
1770 /**
1771  * isolate_lru_page - tries to isolate a page from its LRU list
1772  * @page: page to isolate from its LRU list
1773  *
1774  * Isolates a @page from an LRU list, clears PageLRU and adjusts the
1775  * vmstat statistic corresponding to whatever LRU list the page was on.
1776  *
1777  * Returns 0 if the page was removed from an LRU list.
1778  * Returns -EBUSY if the page was not on an LRU list.
1779  *
1780  * The returned page will have PageLRU() cleared.  If it was found on
1781  * the active list, it will have PageActive set.  If it was found on
1782  * the unevictable list, it will have the PageUnevictable bit set. That flag
1783  * may need to be cleared by the caller before letting the page go.
1784  *
1785  * The vmstat statistic corresponding to the list on which the page was
1786  * found will be decremented.
1787  *
1788  * Restrictions:
1789  *
1790  * (1) Must be called with an elevated refcount on the page. This is a
1791  *     fundamentnal difference from isolate_lru_pages (which is called
1792  *     without a stable reference).
1793  * (2) the lru_lock must not be held.
1794  * (3) interrupts must be enabled.
1795  */
1796 int isolate_lru_page(struct page *page)
1797 {
1798         int ret = -EBUSY;
1799 
1800         VM_BUG_ON_PAGE(!page_count(page), page);
1801         WARN_RATELIMIT(PageTail(page), "trying to isolate tail page");
1802 
1803         if (PageLRU(page)) {
1804                 pg_data_t *pgdat = page_pgdat(page);
1805                 struct lruvec *lruvec;
1806 
1807                 spin_lock_irq(&pgdat->lru_lock);
1808                 lruvec = mem_cgroup_page_lruvec(page, pgdat);
1809                 if (PageLRU(page)) {
1810                         int lru = page_lru(page);
1811                         get_page(page);
1812                         ClearPageLRU(page);
1813                         del_page_from_lru_list(page, lruvec, lru);
1814                         ret = 0;
1815                 }
1816                 spin_unlock_irq(&pgdat->lru_lock);
1817         }
1818         return ret;
1819 }
1820 
1821 /*
1822  * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and
1823  * then get resheduled. When there are massive number of tasks doing page
1824  * allocation, such sleeping direct reclaimers may keep piling up on each CPU,
1825  * the LRU list will go small and be scanned faster than necessary, leading to
1826  * unnecessary swapping, thrashing and OOM.
1827  */
1828 static int too_many_isolated(struct pglist_data *pgdat, int file,
1829                 struct scan_control *sc)
1830 {
1831         unsigned long inactive, isolated;
1832 
1833         if (current_is_kswapd())
1834                 return 0;
1835 
1836         if (!sane_reclaim(sc))
1837                 return 0;
1838 
1839         if (file) {
1840                 inactive = node_page_state(pgdat, NR_INACTIVE_FILE);
1841                 isolated = node_page_state(pgdat, NR_ISOLATED_FILE);
1842         } else {
1843                 inactive = node_page_state(pgdat, NR_INACTIVE_ANON);
1844                 isolated = node_page_state(pgdat, NR_ISOLATED_ANON);
1845         }
1846 
1847         /*
1848          * GFP_NOIO/GFP_NOFS callers are allowed to isolate more pages, so they
1849          * won't get blocked by normal direct-reclaimers, forming a circular
1850          * deadlock.
1851          */
1852         if ((sc->gfp_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
1853                 inactive >>= 3;
1854 
1855         return isolated > inactive;
1856 }
1857 
1858 /*
1859  * This moves pages from @list to corresponding LRU list.
1860  *
1861  * We move them the other way if the page is referenced by one or more
1862  * processes, from rmap.
1863  *
1864  * If the pages are mostly unmapped, the processing is fast and it is
1865  * appropriate to hold zone_lru_lock across the whole operation.  But if
1866  * the pages are mapped, the processing is slow (page_referenced()) so we
1867  * should drop zone_lru_lock around each page.  It's impossible to balance
1868  * this, so instead we remove the pages from the LRU while processing them.
1869  * It is safe to rely on PG_active against the non-LRU pages in here because
1870  * nobody will play with that bit on a non-LRU page.
1871  *
1872  * The downside is that we have to touch page->_refcount against each page.
1873  * But we had to alter page->flags anyway.
1874  *
1875  * Returns the number of pages moved to the given lruvec.
1876  */
1877 
1878 static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
1879                                                      struct list_head *list)
1880 {
1881         struct pglist_data *pgdat = lruvec_pgdat(lruvec);
1882         int nr_pages, nr_moved = 0;
1883         LIST_HEAD(pages_to_free);
1884         struct page *page;
1885         enum lru_list lru;
1886 
1887         while (!list_empty(list)) {
1888                 page = lru_to_page(list);
1889                 VM_BUG_ON_PAGE(PageLRU(page), page);
1890                 if (unlikely(!page_evictable(page))) {
1891                         list_del(&page->lru);
1892                         spin_unlock_irq(&pgdat->lru_lock);
1893                         putback_lru_page(page);
1894                         spin_lock_irq(&pgdat->lru_lock);
1895                         continue;
1896                 }
1897                 lruvec = mem_cgroup_page_lruvec(page, pgdat);
1898 
1899                 SetPageLRU(page);
1900                 lru = page_lru(page);
1901 
1902                 nr_pages = hpage_nr_pages(page);
1903                 update_lru_size(lruvec, lru, page_zonenum(page), nr_pages);
1904                 list_move(&page->lru, &lruvec->lists[lru]);
1905 
1906                 if (put_page_testzero(page)) {
1907                         __ClearPageLRU(page);
1908                         __ClearPageActive(page);
1909                         del_page_from_lru_list(page, lruvec, lru);
1910 
1911                         if (unlikely(PageCompound(page))) {
1912                                 spin_unlock_irq(&pgdat->lru_lock);
1913                                 (*get_compound_page_dtor(page))(page);
1914                                 spin_lock_irq(&pgdat->lru_lock);
1915                         } else
1916                                 list_add(&page->lru, &pages_to_free);
1917                 } else {
1918                         nr_moved += nr_pages;
1919                 }
1920         }
1921 
1922         /*
1923          * To save our caller's stack, now use input list for pages to free.
1924          */
1925         list_splice(&pages_to_free, list);
1926 
1927         return nr_moved;
1928 }
1929 
1930 /*
1931  * If a kernel thread (such as nfsd for loop-back mounts) services
1932  * a backing device by writing to the page cache it sets PF_LESS_THROTTLE.
1933  * In that case we should only throttle if the backing device it is
1934  * writing to is congested.  In other cases it is safe to throttle.
1935  */
1936 static int current_may_throttle(void)
1937 {
1938         return !(current->flags & PF_LESS_THROTTLE) ||
1939                 current->backing_dev_info == NULL ||
1940                 bdi_write_congested(current->backing_dev_info);
1941 }
1942 
1943 /*
1944  * shrink_inactive_list() is a helper for shrink_node().  It returns the number
1945  * of reclaimed pages
1946  */
1947 static noinline_for_stack unsigned long
1948 shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
1949                      struct scan_control *sc, enum lru_list lru)
1950 {
1951         LIST_HEAD(page_list);
1952         unsigned long nr_scanned;
1953         unsigned long nr_reclaimed = 0;
1954         unsigned long nr_taken;
1955         struct reclaim_stat stat;
1956         int file = is_file_lru(lru);
1957         enum vm_event_item item;
1958         struct pglist_data *pgdat = lruvec_pgdat(lruvec);
1959         struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
1960         bool stalled = false;
1961 
1962         while (unlikely(too_many_isolated(pgdat, file, sc))) {
1963                 if (stalled)
1964                         return 0;
1965 
1966                 /* wait a bit for the reclaimer. */
1967                 msleep(100);
1968                 stalled = true;
1969 
1970                 /* We are about to die and free our memory. Return now. */
1971                 if (fatal_signal_pending(current))
1972                         return SWAP_CLUSTER_MAX;
1973         }
1974 
1975         lru_add_drain();
1976 
1977         spin_lock_irq(&pgdat->lru_lock);
1978 
1979         nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list,
1980                                      &nr_scanned, sc, lru);
1981 
1982         __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
1983         reclaim_stat->recent_scanned[file] += nr_taken;
1984 
1985         item = current_is_kswapd() ? PGSCAN_KSWAPD : PGSCAN_DIRECT;
1986         if (global_reclaim(sc))
1987                 __count_vm_events(item, nr_scanned);
1988         __count_memcg_events(lruvec_memcg(lruvec), item, nr_scanned);
1989         spin_unlock_irq(&pgdat->lru_lock);
1990 
1991         if (nr_taken == 0)
1992                 return 0;
1993 
1994         nr_reclaimed = shrink_page_list(&page_list, pgdat, sc, 0,
1995                                 &stat, false);
1996 
1997         spin_lock_irq(&pgdat->lru_lock);
1998 
1999         item = current_is_kswapd() ? PGSTEAL_KSWAPD : PGSTEAL_DIRECT;
2000         if (global_reclaim(sc))
2001                 __count_vm_events(item, nr_reclaimed);
2002         __count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed);
2003         reclaim_stat->recent_rotated[0] += stat.nr_activate[0];
2004         reclaim_stat->recent_rotated[1] += stat.nr_activate[1];
2005 
2006         move_pages_to_lru(lruvec, &page_list);
2007 
2008         __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
2009 
2010         spin_unlock_irq(&pgdat->lru_lock);
2011 
2012         mem_cgroup_uncharge_list(&page_list);
2013         free_unref_page_list(&page_list);
2014 
2015         /*
2016          * If dirty pages are scanned that are not queued for IO, it
2017          * implies that flushers are not doing their job. This can
2018          * happen when memory pressure pushes dirty pages to the end of
2019          * the LRU before the dirty limits are breached and the dirty
2020          * data has expired. It can also happen when the proportion of
2021          * dirty pages grows not through writes but through memory
2022          * pressure reclaiming all the clean cache. And in some cases,
2023          * the flushers simply cannot keep up with the allocation
2024          * rate. Nudge the flusher threads in case they are asleep.
2025          */
2026         if (stat.nr_unqueued_dirty == nr_taken)
2027                 wakeup_flusher_threads(WB_REASON_VMSCAN);
2028 
2029         sc->nr.dirty += stat.nr_dirty;
2030         sc->nr.congested += stat.nr_congested;
2031         sc->nr.unqueued_dirty += stat.nr_unqueued_dirty;
2032         sc->nr.writeback += stat.nr_writeback;
2033         sc->nr.immediate += stat.nr_immediate;
2034         sc->nr.taken += nr_taken;
2035         if (file)
2036                 sc->nr.file_taken += nr_taken;
2037 
2038         trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id,
2039                         nr_scanned, nr_reclaimed, &stat, sc->priority, file);
2040         return nr_reclaimed;
2041 }
2042 
2043 static void shrink_active_list(unsigned long nr_to_scan,
2044                                struct lruvec *lruvec,
2045                                struct scan_control *sc,
2046                                enum lru_list lru)
2047 {
2048         unsigned long nr_taken;
2049         unsigned long nr_scanned;
2050         unsigned long vm_flags;
2051         LIST_HEAD(l_hold);      /* The pages which were snipped off */
2052         LIST_HEAD(l_active);
2053         LIST_HEAD(l_inactive);
2054         struct page *page;
2055         struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
2056         unsigned nr_deactivate, nr_activate;
2057         unsigned nr_rotated = 0;
2058         int file = is_file_lru(lru);
2059         struct pglist_data *pgdat = lruvec_pgdat(lruvec);
2060 
2061         lru_add_drain();
2062 
2063         spin_lock_irq(&pgdat->lru_lock);
2064 
2065         nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold,
2066                                      &nr_scanned, sc, lru);
2067 
2068         __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
2069         reclaim_stat->recent_scanned[file] += nr_taken;
2070 
2071         __count_vm_events(PGREFILL, nr_scanned);
2072         __count_memcg_events(lruvec_memcg(lruvec), PGREFILL, nr_scanned);
2073 
2074         spin_unlock_irq(&pgdat->lru_lock);
2075 
2076         while (!list_empty(&l_hold)) {
2077                 cond_resched();
2078                 page = lru_to_page(&l_hold);
2079                 list_del(&page->lru);
2080 
2081                 if (unlikely(!page_evictable(page))) {
2082                         putback_lru_page(page);
2083                         continue;
2084                 }
2085 
2086                 if (unlikely(buffer_heads_over_limit)) {
2087                         if (page_has_private(page) && trylock_page(page)) {
2088                                 if (page_has_private(page))
2089                                         try_to_release_page(page, 0);
2090                                 unlock_page(page);
2091                         }
2092                 }
2093 
2094                 if (page_referenced(page, 0, sc->target_mem_cgroup,
2095                                     &vm_flags)) {
2096                         nr_rotated += hpage_nr_pages(page);
2097                         /*
2098                          * Identify referenced, file-backed active pages and
2099                          * give them one more trip around the active list. So
2100                          * that executable code get better chances to stay in
2101                          * memory under moderate memory pressure.  Anon pages
2102                          * are not likely to be evicted by use-once streaming
2103                          * IO, plus JVM can create lots of anon VM_EXEC pages,
2104                          * so we ignore them here.
2105                          */
2106                         if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) {
2107                                 list_add(&page->lru, &l_active);
2108                                 continue;
2109                         }
2110                 }
2111 
2112                 ClearPageActive(page);  /* we are de-activating */
2113                 SetPageWorkingset(page);
2114                 list_add(&page->lru, &l_inactive);
2115         }
2116 
2117         /*
2118          * Move pages back to the lru list.
2119          */
2120         spin_lock_irq(&pgdat->lru_lock);
2121         /*
2122          * Count referenced pages from currently used mappings as rotated,
2123          * even though only some of them are actually re-activated.  This
2124          * helps balance scan pressure between file and anonymous pages in
2125          * get_scan_count.
2126          */
2127         reclaim_stat->recent_rotated[file] += nr_rotated;
2128 
2129         nr_activate = move_pages_to_lru(lruvec, &l_active);
2130         nr_deactivate = move_pages_to_lru(lruvec, &l_inactive);
2131         /* Keep all free pages in l_active list */
2132         list_splice(&l_inactive, &l_active);
2133 
2134         __count_vm_events(PGDEACTIVATE, nr_deactivate);
2135         __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_deactivate);
2136 
2137         __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
2138         spin_unlock_irq(&pgdat->lru_lock);
2139 
2140         mem_cgroup_uncharge_list(&l_active);
2141         free_unref_page_list(&l_active);
2142         trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate,
2143                         nr_deactivate, nr_rotated, sc->priority, file);
2144 }
2145 
2146 unsigned long reclaim_pages(struct list_head *page_list)
2147 {
2148         int nid = -1;
2149         unsigned long nr_reclaimed = 0;
2150         LIST_HEAD(node_page_list);
2151         struct reclaim_stat dummy_stat;
2152         struct page *page;
2153         struct scan_control sc = {
2154                 .gfp_mask = GFP_KERNEL,
2155                 .priority = DEF_PRIORITY,
2156                 .may_writepage = 1,
2157                 .may_unmap = 1,
2158                 .may_swap = 1,
2159         };
2160 
2161         while (!list_empty(page_list)) {
2162                 page = lru_to_page(page_list);
2163                 if (nid == -1) {
2164                         nid = page_to_nid(page);
2165                         INIT_LIST_HEAD(&node_page_list);
2166                 }
2167 
2168                 if (nid == page_to_nid(page)) {
2169                         ClearPageActive(page);
2170                         list_move(&page->lru, &node_page_list);
2171                         continue;
2172                 }
2173 
2174                 nr_reclaimed += shrink_page_list(&node_page_list,
2175                                                 NODE_DATA(nid),
2176                                                 &sc, 0,
2177                                                 &dummy_stat, false);
2178                 while (!list_empty(&node_page_list)) {
2179                         page = lru_to_page(&node_page_list);
2180                         list_del(&page->lru);
2181                         putback_lru_page(page);
2182                 }
2183 
2184                 nid = -1;
2185         }
2186 
2187         if (!list_empty(&node_page_list)) {
2188                 nr_reclaimed += shrink_page_list(&node_page_list,
2189                                                 NODE_DATA(nid),
2190                                                 &sc, 0,
2191                                                 &dummy_stat, false);
2192                 while (!list_empty(&node_page_list)) {
2193                         page = lru_to_page(&node_page_list);
2194                         list_del(&page->lru);
2195                         putback_lru_page(page);
2196                 }
2197         }
2198 
2199         return nr_reclaimed;
2200 }
2201 
2202 /*
2203  * The inactive anon list should be small enough that the VM never has
2204  * to do too much work.
2205  *
2206  * The inactive file list should be small enough to leave most memory
2207  * to the established workingset on the scan-resistant active list,
2208  * but large enough to avoid thrashing the aggregate readahead window.
2209  *
2210  * Both inactive lists should also be large enough that each inactive
2211  * page has a chance to be referenced again before it is reclaimed.
2212  *
2213  * If that fails and refaulting is observed, the inactive list grows.
2214  *
2215  * The inactive_ratio is the target ratio of ACTIVE to INACTIVE pages
2216  * on this LRU, maintained by the pageout code. An inactive_ratio
2217  * of 3 means 3:1 or 25% of the pages are kept on the inactive list.
2218  *
2219  * total     target    max
2220  * memory    ratio     inactive
2221  * -------------------------------------
2222  *   10MB       1         5MB
2223  *  100MB       1        50MB
2224  *    1GB       3       250MB
2225  *   10GB      10       0.9GB
2226  *  100GB      31         3GB
2227  *    1TB     101        10GB
2228  *   10TB     320        32GB
2229  */
2230 static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
2231                                  struct scan_control *sc, bool trace)
2232 {
2233         enum lru_list active_lru = file * LRU_FILE + LRU_ACTIVE;
2234         struct pglist_data *pgdat = lruvec_pgdat(lruvec);
2235         enum lru_list inactive_lru = file * LRU_FILE;
2236         unsigned long inactive, active;
2237         unsigned long inactive_ratio;
2238         unsigned long refaults;
2239         unsigned long gb;
2240 
2241         /*
2242          * If we don't have swap space, anonymous page deactivation
2243          * is pointless.
2244          */
2245         if (!file && !total_swap_pages)
2246                 return false;
2247 
2248         inactive = lruvec_lru_size(lruvec, inactive_lru, sc->reclaim_idx);
2249         active = lruvec_lru_size(lruvec, active_lru, sc->reclaim_idx);
2250 
2251         /*
2252          * When refaults are being observed, it means a new workingset
2253          * is being established. Disable active list protection to get
2254          * rid of the stale workingset quickly.
2255          */
2256         refaults = lruvec_page_state_local(lruvec, WORKINGSET_ACTIVATE);
2257         if (file && lruvec->refaults != refaults) {
2258                 inactive_ratio = 0;
2259         } else {
2260                 gb = (inactive + active) >> (30 - PAGE_SHIFT);
2261                 if (gb)
2262                         inactive_ratio = int_sqrt(10 * gb);
2263                 else
2264                         inactive_ratio = 1;
2265         }
2266 
2267         if (trace)
2268                 trace_mm_vmscan_inactive_list_is_low(pgdat->node_id, sc->reclaim_idx,
2269                         lruvec_lru_size(lruvec, inactive_lru, MAX_NR_ZONES), inactive,
2270                         lruvec_lru_size(lruvec, active_lru, MAX_NR_ZONES), active,
2271                         inactive_ratio, file);
2272 
2273         return inactive * inactive_ratio < active;
2274 }
2275 
2276 static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
2277                                  struct lruvec *lruvec, struct scan_control *sc)
2278 {
2279         if (is_active_lru(lru)) {
2280                 if (inactive_list_is_low(lruvec, is_file_lru(lru), sc, true))
2281                         shrink_active_list(nr_to_scan, lruvec, sc, lru);
2282                 return 0;
2283         }
2284 
2285         return shrink_inactive_list(nr_to_scan, lruvec, sc, lru);
2286 }
2287 
2288 enum scan_balance {
2289         SCAN_EQUAL,
2290         SCAN_FRACT,
2291         SCAN_ANON,
2292         SCAN_FILE,
2293 };
2294 
2295 /*
2296  * Determine how aggressively the anon and file LRU lists should be
2297  * scanned.  The relative value of each set of LRU lists is determined
2298  * by looking at the fraction of the pages scanned we did rotate back
2299  * onto the active list instead of evict.
2300  *
2301  * nr[0] = anon inactive pages to scan; nr[1] = anon active pages to scan
2302  * nr[2] = file inactive pages to scan; nr[3] = file active pages to scan
2303  */
2304 static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
2305                            struct scan_control *sc, unsigned long *nr,
2306                            unsigned long *lru_pages)
2307 {
2308         int swappiness = mem_cgroup_swappiness(memcg);
2309         struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
2310         u64 fraction[2];
2311         u64 denominator = 0;    /* gcc */
2312         struct pglist_data *pgdat = lruvec_pgdat(lruvec);
2313         unsigned long anon_prio, file_prio;
2314         enum scan_balance scan_balance;
2315         unsigned long anon, file;
2316         unsigned long ap, fp;
2317         enum lru_list lru;
2318 
2319         /* If we have no swap space, do not bother scanning anon pages. */
2320         if (!sc->may_swap || mem_cgroup_get_nr_swap_pages(memcg) <= 0) {
2321                 scan_balance = SCAN_FILE;
2322                 goto out;
2323         }
2324 
2325         /*
2326          * Global reclaim will swap to prevent OOM even with no
2327          * swappiness, but memcg users want to use this knob to
2328          * disable swapping for individual groups completely when
2329          * using the memory controller's swap limit feature would be
2330          * too expensive.
2331          */
2332         if (!global_reclaim(sc) && !swappiness) {
2333                 scan_balance = SCAN_FILE;
2334                 goto out;
2335         }
2336 
2337         /*
2338          * Do not apply any pressure balancing cleverness when the
2339          * system is close to OOM, scan both anon and file equally
2340          * (unless the swappiness setting disagrees with swapping).
2341          */
2342         if (!sc->priority && swappiness) {
2343                 scan_balance = SCAN_EQUAL;
2344                 goto out;
2345         }
2346 
2347         /*
2348          * Prevent the reclaimer from falling into the cache trap: as
2349          * cache pages start out inactive, every cache fault will tip
2350          * the scan balance towards the file LRU.  And as the file LRU
2351          * shrinks, so does the window for rotation from references.
2352          * This means we have a runaway feedback loop where a tiny
2353          * thrashing file LRU becomes infinitely more attractive than
2354          * anon pages.  Try to detect this based on file LRU size.
2355          */
2356         if (global_reclaim(sc)) {
2357                 unsigned long pgdatfile;
2358                 unsigned long pgdatfree;
2359                 int z;
2360                 unsigned long total_high_wmark = 0;
2361 
2362                 pgdatfree = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);
2363                 pgdatfile = node_page_state(pgdat, NR_ACTIVE_FILE) +
2364                            node_page_state(pgdat, NR_INACTIVE_FILE);
2365 
2366                 for (z = 0; z < MAX_NR_ZONES; z++) {
2367                         struct zone *zone = &pgdat->node_zones[z];
2368                         if (!managed_zone(zone))
2369                                 continue;
2370 
2371                         total_high_wmark += high_wmark_pages(zone);
2372                 }
2373 
2374                 if (unlikely(pgdatfile + pgdatfree <= total_high_wmark)) {
2375                         /*
2376                          * Force SCAN_ANON if there are enough inactive
2377                          * anonymous pages on the LRU in eligible zones.
2378                          * Otherwise, the small LRU gets thrashed.
2379                          */
2380                         if (!inactive_list_is_low(lruvec, false, sc, false) &&
2381                             lruvec_lru_size(lruvec, LRU_INACTIVE_ANON, sc->reclaim_idx)
2382                                         >> sc->priority) {
2383                                 scan_balance = SCAN_ANON;
2384                                 goto out;
2385                         }
2386                 }
2387         }
2388 
2389         /*
2390          * If there is enough inactive page cache, i.e. if the size of the
2391          * inactive list is greater than that of the active list *and* the
2392          * inactive list actually has some pages to scan on this priority, we
2393          * do not reclaim anything from the anonymous working set right now.
2394          * Without the second condition we could end up never scanning an
2395          * lruvec even if it has plenty of old anonymous pages unless the
2396          * system is under heavy pressure.
2397          */
2398         if (!inactive_list_is_low(lruvec, true, sc, false) &&
2399             lruvec_lru_size(lruvec, LRU_INACTIVE_FILE, sc->reclaim_idx) >> sc->priority) {
2400                 scan_balance = SCAN_FILE;
2401                 goto out;
2402         }
2403 
2404         scan_balance = SCAN_FRACT;
2405 
2406         /*
2407          * With swappiness at 100, anonymous and file have the same priority.
2408          * This scanning priority is essentially the inverse of IO cost.
2409          */
2410         anon_prio = swappiness;
2411         file_prio = 200 - anon_prio;
2412 
2413         /*
2414          * OK, so we have swap space and a fair amount of page cache
2415          * pages.  We use the recently rotated / recently scanned
2416          * ratios to determine how valuable each cache is.
2417          *
2418          * Because workloads change over time (and to avoid overflow)
2419          * we keep these statistics as a floating average, which ends
2420          * up weighing recent references more than old ones.
2421          *
2422          * anon in [0], file in [1]
2423          */
2424 
2425         anon  = lruvec_lru_size(lruvec, LRU_ACTIVE_ANON, MAX_NR_ZONES) +
2426                 lruvec_lru_size(lruvec, LRU_INACTIVE_ANON, MAX_NR_ZONES);
2427         file  = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE, MAX_NR_ZONES) +
2428                 lruvec_lru_size(lruvec, LRU_INACTIVE_FILE, MAX_NR_ZONES);
2429 
2430         spin_lock_irq(&pgdat->lru_lock);
2431         if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
2432                 reclaim_stat->recent_scanned[0] /= 2;
2433                 reclaim_stat->recent_rotated[0] /= 2;
2434         }
2435 
2436         if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
2437                 reclaim_stat->recent_scanned[1] /= 2;
2438                 reclaim_stat->recent_rotated[1] /= 2;
2439         }
2440 
2441         /*
2442          * The amount of pressure on anon vs file pages is inversely
2443          * proportional to the fraction of recently scanned pages on
2444          * each list that were recently referenced and in active use.
2445          */
2446         ap = anon_prio * (reclaim_stat->recent_scanned[0] + 1);
2447         ap /= reclaim_stat->recent_rotated[0] + 1;
2448 
2449         fp = file_prio * (reclaim_stat->recent_scanned[1] + 1);
2450         fp /= reclaim_stat->recent_rotated[1] + 1;
2451         spin_unlock_irq(&pgdat->lru_lock);
2452 
2453         fraction[0] = ap;
2454         fraction[1] = fp;
2455         denominator = ap + fp + 1;
2456 out:
2457         *lru_pages = 0;
2458         for_each_evictable_lru(lru) {
2459                 int file = is_file_lru(lru);
2460                 unsigned long lruvec_size;
2461                 unsigned long scan;
2462                 unsigned long protection;
2463 
2464                 lruvec_size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx);
2465                 protection = mem_cgroup_protection(memcg,
2466                                                    sc->memcg_low_reclaim);
2467 
2468                 if (protection) {
2469                         /*
2470                          * Scale a cgroup's reclaim pressure by proportioning
2471                          * its current usage to its memory.low or memory.min
2472                          * setting.
2473                          *
2474                          * This is important, as otherwise scanning aggression
2475                          * becomes extremely binary -- from nothing as we
2476                          * approach the memory protection threshold, to totally
2477                          * nominal as we exceed it.  This results in requiring
2478                          * setting extremely liberal protection thresholds. It
2479                          * also means we simply get no protection at all if we
2480                          * set it too low, which is not ideal.
2481                          *
2482                          * If there is any protection in place, we reduce scan
2483                          * pressure by how much of the total memory used is
2484                          * within protection thresholds.
2485                          *
2486                          * There is one special case: in the first reclaim pass,
2487                          * we skip over all groups that are within their low
2488                          * protection. If that fails to reclaim enough pages to
2489                          * satisfy the reclaim goal, we come back and override
2490                          * the best-effort low protection. However, we still
2491                          * ideally want to honor how well-behaved groups are in
2492                          * that case instead of simply punishing them all
2493                          * equally. As such, we reclaim them based on how much
2494                          * memory they are using, reducing the scan pressure
2495                          * again by how much of the total memory used is under
2496                          * hard protection.
2497                          */
2498                         unsigned long cgroup_size = mem_cgroup_size(memcg);
2499 
2500                         /* Avoid TOCTOU with earlier protection check */
2501                         cgroup_size = max(cgroup_size, protection);
2502 
2503                         scan = lruvec_size - lruvec_size * protection /
2504                                 cgroup_size;
2505 
2506                         /*
2507                          * Minimally target SWAP_CLUSTER_MAX pages to keep
2508                          * reclaim moving forwards, avoiding decremeting
2509                          * sc->priority further than desirable.
2510                          */
2511                         scan = max(scan, SWAP_CLUSTER_MAX);
2512                 } else {
2513                         scan = lruvec_size;
2514                 }
2515 
2516                 scan >>= sc->priority;
2517 
2518                 /*
2519                  * If the cgroup's already been deleted, make sure to
2520                  * scrape out the remaining cache.
2521                  */
2522                 if (!scan && !mem_cgroup_online(memcg))
2523                         scan = min(lruvec_size, SWAP_CLUSTER_MAX);
2524 
2525                 switch (scan_balance) {
2526                 case SCAN_EQUAL:
2527                         /* Scan lists relative to size */
2528                         break;
2529                 case SCAN_FRACT:
2530                         /*
2531                          * Scan types proportional to swappiness and
2532                          * their relative recent reclaim efficiency.
2533                          * Make sure we don't miss the last page on
2534                          * the offlined memory cgroups because of a
2535                          * round-off error.
2536                          */
2537                         scan = mem_cgroup_online(memcg) ?
2538                                div64_u64(scan * fraction[file], denominator) :
2539                                DIV64_U64_ROUND_UP(scan * fraction[file],
2540                                                   denominator);
2541                         break;
2542                 case SCAN_FILE:
2543                 case SCAN_ANON:
2544                         /* Scan one type exclusively */
2545                         if ((scan_balance == SCAN_FILE) != file) {
2546                                 lruvec_size = 0;
2547                                 scan = 0;
2548                         }
2549                         break;
2550                 default:
2551                         /* Look ma, no brain */
2552                         BUG();
2553                 }
2554 
2555                 *lru_pages += lruvec_size;
2556                 nr[lru] = scan;
2557         }
2558 }
2559 
2560 /*
2561  * This is a basic per-node page freer.  Used by both kswapd and direct reclaim.
2562  */
2563 static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memcg,
2564                               struct scan_control *sc, unsigned long *lru_pages)
2565 {
2566         struct lruvec *lruvec = mem_cgroup_lruvec(pgdat, memcg);
2567         unsigned long nr[NR_LRU_LISTS];
2568         unsigned long targets[NR_LRU_LISTS];
2569         unsigned long nr_to_scan;
2570         enum lru_list lru;
2571         unsigned long nr_reclaimed = 0;
2572         unsigned long nr_to_reclaim = sc->nr_to_reclaim;
2573         struct blk_plug plug;
2574         bool scan_adjusted;
2575 
2576         get_scan_count(lruvec, memcg, sc, nr, lru_pages);
2577 
2578         /* Record the original scan target for proportional adjustments later */
2579         memcpy(targets, nr, sizeof(nr));
2580 
2581         /*
2582          * Global reclaiming within direct reclaim at DEF_PRIORITY is a normal
2583          * event that can occur when there is little memory pressure e.g.
2584          * multiple streaming readers/writers. Hence, we do not abort scanning
2585          * when the requested number of pages are reclaimed when scanning at
2586          * DEF_PRIORITY on the assumption that the fact we are direct
2587          * reclaiming implies that kswapd is not keeping up and it is best to
2588          * do a batch of work at once. For memcg reclaim one check is made to
2589          * abort proportional reclaim if either the file or anon lru has already
2590          * dropped to zero at the first pass.
2591          */
2592         scan_adjusted = (global_reclaim(sc) && !current_is_kswapd() &&
2593                          sc->priority == DEF_PRIORITY);
2594 
2595         blk_start_plug(&plug);
2596         while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
2597                                         nr[LRU_INACTIVE_FILE]) {
2598                 unsigned long nr_anon, nr_file, percentage;
2599                 unsigned long nr_scanned;
2600 
2601                 for_each_evictable_lru(lru) {
2602                         if (nr[lru]) {
2603                                 nr_to_scan = min(nr[lru], SWAP_CLUSTER_MAX);
2604                                 nr[lru] -= nr_to_scan;
2605 
2606                                 nr_reclaimed += shrink_list(lru, nr_to_scan,
2607                                                             lruvec, sc);
2608                         }
2609                 }
2610 
2611                 cond_resched();
2612 
2613                 if (nr_reclaimed < nr_to_reclaim || scan_adjusted)
2614                         continue;
2615 
2616                 /*
2617                  * For kswapd and memcg, reclaim at least the number of pages
2618                  * requested. Ensure that the anon and file LRUs are scanned
2619                  * proportionally what was requested by get_scan_count(). We
2620                  * stop reclaiming one LRU and reduce the amount scanning
2621                  * proportional to the original scan target.
2622                  */
2623                 nr_file = nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE];
2624                 nr_anon = nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON];
2625 
2626                 /*
2627                  * It's just vindictive to attack the larger once the smaller
2628                  * has gone to zero.  And given the way we stop scanning the
2629                  * smaller below, this makes sure that we only make one nudge
2630                  * towards proportionality once we've got nr_to_reclaim.
2631                  */
2632                 if (!nr_file || !nr_anon)
2633                         break;
2634 
2635                 if (nr_file > nr_anon) {
2636                         unsigned long scan_target = targets[LRU_INACTIVE_ANON] +
2637                                                 targets[LRU_ACTIVE_ANON] + 1;
2638                         lru = LRU_BASE;
2639                         percentage = nr_anon * 100 / scan_target;
2640                 } else {
2641                         unsigned long scan_target = targets[LRU_INACTIVE_FILE] +
2642                                                 targets[LRU_ACTIVE_FILE] + 1;
2643                         lru = LRU_FILE;
2644                         percentage = nr_file * 100 / scan_target;
2645                 }
2646 
2647                 /* Stop scanning the smaller of the LRU */
2648                 nr[lru] = 0;
2649                 nr[lru + LRU_ACTIVE] = 0;
2650 
2651                 /*
2652                  * Recalculate the other LRU scan count based on its original
2653                  * scan target and the percentage scanning already complete
2654                  */
2655                 lru = (lru == LRU_FILE) ? LRU_BASE : LRU_FILE;
2656                 nr_scanned = targets[lru] - nr[lru];
2657                 nr[lru] = targets[lru] * (100 - percentage) / 100;
2658                 nr[lru] -= min(nr[lru], nr_scanned);
2659 
2660                 lru += LRU_ACTIVE;
2661                 nr_scanned = targets[lru] - nr[lru];
2662                 nr[lru] = targets[lru] * (100 - percentage) / 100;
2663                 nr[lru] -= min(nr[lru], nr_scanned);
2664 
2665                 scan_adjusted = true;
2666         }
2667         blk_finish_plug(&plug);
2668         sc->nr_reclaimed += nr_reclaimed;
2669 
2670         /*
2671          * Even if we did not try to evict anon pages at all, we want to
2672          * rebalance the anon lru active/inactive ratio.
2673          */
2674         if (inactive_list_is_low(lruvec, false, sc, true))
2675                 shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
2676                                    sc, LRU_ACTIVE_ANON);
2677 }
2678 
2679 /* Use reclaim/compaction for costly allocs or under memory pressure */
2680 static bool in_reclaim_compaction(struct scan_control *sc)
2681 {
2682         if (IS_ENABLED(CONFIG_COMPACTION) && sc->order &&
2683                         (sc->order > PAGE_ALLOC_COSTLY_ORDER ||
2684                          sc->priority < DEF_PRIORITY - 2))
2685                 return true;
2686 
2687         return false;
2688 }
2689 
2690 /*
2691  * Reclaim/compaction is used for high-order allocation requests. It reclaims
2692  * order-0 pages before compacting the zone. should_continue_reclaim() returns
2693  * true if more pages should be reclaimed such that when the page allocator
2694  * calls try_to_compact_zone() that it will have enough free pages to succeed.
2695  * It will give up earlier than that if there is difficulty reclaiming pages.
2696  */
2697 static inline bool should_continue_reclaim(struct pglist_data *pgdat,
2698                                         unsigned long nr_reclaimed,
2699                                         struct scan_control *sc)
2700 {
2701         unsigned long pages_for_compaction;
2702         unsigned long inactive_lru_pages;
2703         int z;
2704 
2705         /* If not in reclaim/compaction mode, stop */
2706         if (!in_reclaim_compaction(sc))
2707                 return false;
2708 
2709         /*
2710          * Stop if we failed to reclaim any pages from the last SWAP_CLUSTER_MAX
2711          * number of pages that were scanned. This will return to the caller
2712          * with the risk reclaim/compaction and the resulting allocation attempt
2713          * fails. In the past we have tried harder for __GFP_RETRY_MAYFAIL
2714          * allocations through requiring that the full LRU list has been scanned
2715          * first, by assuming that zero delta of sc->nr_scanned means full LRU
2716          * scan, but that approximation was wrong, and there were corner cases
2717          * where always a non-zero amount of pages were scanned.
2718          */
2719         if (!nr_reclaimed)
2720                 return false;
2721 
2722         /* If compaction would go ahead or the allocation would succeed, stop */
2723         for (z = 0; z <= sc->reclaim_idx; z++) {
2724                 struct zone *zone = &pgdat->node_zones[z];
2725                 if (!managed_zone(zone))
2726                         continue;
2727 
2728                 switch (compaction_suitable(zone, sc->order, 0, sc->reclaim_idx)) {
2729                 case COMPACT_SUCCESS:
2730                 case COMPACT_CONTINUE:
2731                         return false;
2732                 default:
2733                         /* check next zone */
2734                         ;
2735                 }
2736         }
2737 
2738         /*
2739          * If we have not reclaimed enough pages for compaction and the
2740          * inactive lists are large enough, continue reclaiming
2741          */
2742         pages_for_compaction = compact_gap(sc->order);
2743         inactive_lru_pages = node_page_state(pgdat, NR_INACTIVE_FILE);
2744         if (get_nr_swap_pages() > 0)
2745                 inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON);
2746 
2747         return inactive_lru_pages > pages_for_compaction;
2748 }
2749 
2750 static bool pgdat_memcg_congested(pg_data_t *pgdat, struct mem_cgroup *memcg)
2751 {
2752         return test_bit(PGDAT_CONGESTED, &pgdat->flags) ||
2753                 (memcg && memcg_congested(pgdat, memcg));
2754 }
2755 
2756 static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
2757 {
2758         struct reclaim_state *reclaim_state = current->reclaim_state;
2759         unsigned long nr_reclaimed, nr_scanned;
2760         bool reclaimable = false;
2761 
2762         do {
2763                 struct mem_cgroup *root = sc->target_mem_cgroup;
2764                 unsigned long node_lru_pages = 0;
2765                 struct mem_cgroup *memcg;
2766 
2767                 memset(&sc->nr, 0, sizeof(sc->nr));
2768 
2769                 nr_reclaimed = sc->nr_reclaimed;
2770                 nr_scanned = sc->nr_scanned;
2771 
2772                 memcg = mem_cgroup_iter(root, NULL, NULL);
2773                 do {
2774                         unsigned long lru_pages;
2775                         unsigned long reclaimed;
2776                         unsigned long scanned;
2777 
2778                         switch (mem_cgroup_protected(root, memcg)) {
2779                         case MEMCG_PROT_MIN:
2780                                 /*
2781                                  * Hard protection.
2782                                  * If there is no reclaimable memory, OOM.
2783                                  */
2784                                 continue;
2785                         case MEMCG_PROT_LOW:
2786                                 /*
2787                                  * Soft protection.
2788                                  * Respect the protection only as long as
2789                                  * there is an unprotected supply
2790                                  * of reclaimable memory from other cgroups.
2791                                  */
2792                                 if (!sc->memcg_low_reclaim) {
2793                                         sc->memcg_low_skipped = 1;
2794                                         continue;
2795                                 }
2796                                 memcg_memory_event(memcg, MEMCG_LOW);
2797                                 break;
2798                         case MEMCG_PROT_NONE:
2799                                 /*
2800                                  * All protection thresholds breached. We may
2801                                  * still choose to vary the scan pressure
2802                                  * applied based on by how much the cgroup in
2803                                  * question has exceeded its protection
2804                                  * thresholds (see get_scan_count).
2805                                  */
2806                                 break;
2807                         }
2808 
2809                         reclaimed = sc->nr_reclaimed;
2810                         scanned = sc->nr_scanned;
2811                         shrink_node_memcg(pgdat, memcg, sc, &lru_pages);
2812                         node_lru_pages += lru_pages;
2813 
2814                         shrink_slab(sc->gfp_mask, pgdat->node_id, memcg,
2815                                         sc->priority);
2816 
2817                         /* Record the group's reclaim efficiency */
2818                         vmpressure(sc->gfp_mask, memcg, false,
2819                                    sc->nr_scanned - scanned,
2820                                    sc->nr_reclaimed - reclaimed);
2821 
2822                 } while ((memcg = mem_cgroup_iter(root, memcg, NULL)));
2823 
2824                 if (reclaim_state) {
2825                         sc->nr_reclaimed += reclaim_state->reclaimed_slab;
2826                         reclaim_state->reclaimed_slab = 0;
2827                 }
2828 
2829                 /* Record the subtree's reclaim efficiency */
2830                 vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true,
2831                            sc->nr_scanned - nr_scanned,
2832                            sc->nr_reclaimed - nr_reclaimed);
2833 
2834                 if (sc->nr_reclaimed - nr_reclaimed)
2835                         reclaimable = true;
2836 
2837                 if (current_is_kswapd()) {
2838                         /*
2839                          * If reclaim is isolating dirty pages under writeback,
2840                          * it implies that the long-lived page allocation rate
2841                          * is exceeding the page laundering rate. Either the
2842                          * global limits are not being effective at throttling
2843                          * processes due to the page distribution throughout
2844                          * zones or there is heavy usage of a slow backing
2845                          * device. The only option is to throttle from reclaim
2846                          * context which is not ideal as there is no guarantee
2847                          * the dirtying process is throttled in the same way
2848                          * balance_dirty_pages() manages.
2849                          *
2850                          * Once a node is flagged PGDAT_WRITEBACK, kswapd will
2851                          * count the number of pages under pages flagged for
2852                          * immediate reclaim and stall if any are encountered
2853                          * in the nr_immediate check below.
2854                          */
2855                         if (sc->nr.writeback && sc->nr.writeback == sc->nr.taken)
2856                                 set_bit(PGDAT_WRITEBACK, &pgdat->flags);
2857 
2858                         /*
2859                          * Tag a node as congested if all the dirty pages
2860                          * scanned were backed by a congested BDI and
2861                          * wait_iff_congested will stall.
2862                          */
2863                         if (sc->nr.dirty && sc->nr.dirty == sc->nr.congested)
2864                                 set_bit(PGDAT_CONGESTED, &pgdat->flags);
2865 
2866                         /* Allow kswapd to start writing pages during reclaim.*/
2867                         if (sc->nr.unqueued_dirty == sc->nr.file_taken)
2868                                 set_bit(PGDAT_DIRTY, &pgdat->flags);
2869 
2870                         /*
2871                          * If kswapd scans pages marked marked for immediate
2872                          * reclaim and under writeback (nr_immediate), it
2873                          * implies that pages are cycling through the LRU
2874                          * faster than they are written so also forcibly stall.
2875                          */
2876                         if (sc->nr.immediate)
2877                                 congestion_wait(BLK_RW_ASYNC, HZ/10);
2878                 }
2879 
2880                 /*
2881                  * Legacy memcg will stall in page writeback so avoid forcibly
2882                  * stalling in wait_iff_congested().
2883                  */
2884                 if (!global_reclaim(sc) && sane_reclaim(sc) &&
2885                     sc->nr.dirty && sc->nr.dirty == sc->nr.congested)
2886                         set_memcg_congestion(pgdat, root, true);
2887 
2888                 /*
2889                  * Stall direct reclaim for IO completions if underlying BDIs
2890                  * and node is congested. Allow kswapd to continue until it
2891                  * starts encountering unqueued dirty pages or cycling through
2892                  * the LRU too quickly.
2893                  */
2894                 if (!sc->hibernation_mode && !current_is_kswapd() &&
2895                    current_may_throttle() && pgdat_memcg_congested(pgdat, root))
2896                         wait_iff_congested(BLK_RW_ASYNC, HZ/10);
2897 
2898         } while (should_continue_reclaim(pgdat, sc->nr_reclaimed - nr_reclaimed,
2899                                          sc));
2900 
2901         /*
2902          * Kswapd gives up on balancing particular nodes after too
2903          * many failures to reclaim anything from them and goes to
2904          * sleep. On reclaim progress, reset the failure counter. A
2905          * successful direct reclaim run will revive a dormant kswapd.
2906          */
2907         if (reclaimable)
2908                 pgdat->kswapd_failures = 0;
2909 
2910         return reclaimable;
2911 }
2912 
2913 /*
2914  * Returns true if compaction should go ahead for a costly-order request, or
2915  * the allocation would already succeed without compaction. Return false if we
2916  * should reclaim first.
2917  */
2918 static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
2919 {
2920         unsigned long watermark;
2921         enum compact_result suitable;
2922 
2923         suitable = compaction_suitable(zone, sc->order, 0, sc->reclaim_idx);
2924         if (suitable == COMPACT_SUCCESS)
2925                 /* Allocation should succeed already. Don't reclaim. */
2926                 return true;
2927         if (suitable == COMPACT_SKIPPED)
2928                 /* Compaction cannot yet proceed. Do reclaim. */
2929                 return false;
2930 
2931         /*
2932          * Compaction is already possible, but it takes time to run and there
2933          * are potentially other callers using the pages just freed. So proceed
2934          * with reclaim to make a buffer of free pages available to give
2935          * compaction a reasonable chance of completing and allocating the page.
2936          * Note that we won't actually reclaim the whole buffer in one attempt
2937          * as the target watermark in should_continue_reclaim() is lower. But if
2938          * we are already above the high+gap watermark, don't reclaim at all.
2939          */
2940         watermark = high_wmark_pages(zone) + compact_gap(sc->order);
2941 
2942         return zone_watermark_ok_safe(zone, 0, watermark, sc->reclaim_idx);
2943 }
2944 
2945 /*
2946  * This is the direct reclaim path, for page-allocating processes.  We only
2947  * try to reclaim pages from zones which will satisfy the caller's allocation
2948  * request.
2949  *
2950  * If a zone is deemed to be full of pinned pages then just give it a light
2951  * scan then give up on it.
2952  */
2953 static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
2954 {
2955         struct zoneref *z;
2956         struct zone *zone;
2957         unsigned long nr_soft_reclaimed;
2958         unsigned long nr_soft_scanned;
2959         gfp_t orig_mask;
2960         pg_data_t *last_pgdat = NULL;
2961 
2962         /*
2963          * If the number of buffer_heads in the machine exceeds the maximum
2964          * allowed level, force direct reclaim to scan the highmem zone as
2965          * highmem pages could be pinning lowmem pages storing buffer_heads
2966          */
2967         orig_mask = sc->gfp_mask;
2968         if (buffer_heads_over_limit) {
2969                 sc->gfp_mask |= __GFP_HIGHMEM;
2970                 sc->reclaim_idx = gfp_zone(sc->gfp_mask);
2971         }
2972 
2973         for_each_zone_zonelist_nodemask(zone, z, zonelist,
2974                                         sc->reclaim_idx, sc->nodemask) {
2975                 /*
2976                  * Take care memory controller reclaiming has small influence
2977                  * to global LRU.
2978                  */
2979                 if (global_reclaim(sc)) {
2980                         if (!cpuset_zone_allowed(zone,
2981                                                  GFP_KERNEL | __GFP_HARDWALL))
2982                                 continue;
2983 
2984                         /*
2985                          * If we already have plenty of memory free for
2986                          * compaction in this zone, don't free any more.
2987                          * Even though compaction is invoked for any
2988                          * non-zero order, only frequent costly order
2989                          * reclamation is disruptive enough to become a
2990                          * noticeable problem, like transparent huge
2991                          * page allocations.
2992                          */
2993                         if (IS_ENABLED(CONFIG_COMPACTION) &&
2994                             sc->order > PAGE_ALLOC_COSTLY_ORDER &&
2995                             compaction_ready(zone, sc)) {
2996                                 sc->compaction_ready = true;
2997                                 continue;
2998                         }
2999 
3000                         /*
3001                          * Shrink each node in the zonelist once. If the
3002                          * zonelist is ordered by zone (not the default) then a
3003                          * node may be shrunk multiple times but in that case
3004                          * the user prefers lower zones being preserved.
3005                          */
3006                         if (zone->zone_pgdat == last_pgdat)
3007                                 continue;
3008 
3009                         /*
3010                          * This steals pages from memory cgroups over softlimit
3011                          * and returns the number of reclaimed pages and
3012                          * scanned pages. This works for global memory pressure
3013                          * and balancing, not for a memcg's limit.
3014                          */
3015                         nr_soft_scanned = 0;
3016                         nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone->zone_pgdat,
3017                                                 sc->order, sc->gfp_mask,
3018                                                 &nr_soft_scanned);
3019                         sc->nr_reclaimed += nr_soft_reclaimed;
3020                         sc->nr_scanned += nr_soft_scanned;
3021                         /* need some check for avoid more shrink_zone() */
3022                 }
3023 
3024                 /* See comment about same check for global reclaim above */
3025                 if (zone->zone_pgdat == last_pgdat)
3026                         continue;
3027                 last_pgdat = zone->zone_pgdat;
3028                 shrink_node(zone->zone_pgdat, sc);
3029         }
3030 
3031         /*
3032          * Restore to original mask to avoid the impact on the caller if we
3033          * promoted it to __GFP_HIGHMEM.
3034          */
3035         sc->gfp_mask = orig_mask;
3036 }
3037 
3038 static void snapshot_refaults(struct mem_cgroup *root_memcg, pg_data_t *pgdat)
3039 {
3040         struct mem_cgroup *memcg;
3041 
3042         memcg = mem_cgroup_iter(root_memcg, NULL, NULL);
3043         do {
3044                 unsigned long refaults;
3045                 struct lruvec *lruvec;
3046 
3047                 lruvec = mem_cgroup_lruvec(pgdat, memcg);
3048                 refaults = lruvec_page_state_local(lruvec, WORKINGSET_ACTIVATE);
3049                 lruvec->refaults = refaults;
3050         } while ((memcg = mem_cgroup_iter(root_memcg, memcg, NULL)));
3051 }
3052 
3053 /*
3054  * This is the main entry point to direct page reclaim.
3055  *
3056  * If a full scan of the inactive list fails to free enough memory then we
3057  * are "out of memory" and something needs to be killed.
3058  *
3059  * If the caller is !__GFP_FS then the probability of a failure is reasonably
3060  * high - the zone may be full of dirty or under-writeback pages, which this
3061  * caller can't do much about.  We kick the writeback threads and take explicit
3062  * naps in the hope that some of these pages can be written.  But if the
3063  * allocating task holds filesystem locks which prevent writeout this might not
3064  * work, and the allocation attempt will fail.
3065  *
3066  * returns:     0, if no pages reclaimed
3067  *              else, the number of pages reclaimed
3068  */
3069 static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
3070                                           struct scan_control *sc)
3071 {
3072         int initial_priority = sc->priority;
3073         pg_data_t *last_pgdat;
3074         struct zoneref *z;
3075         struct zone *zone;
3076 retry:
3077         delayacct_freepages_start();
3078 
3079         if (global_reclaim(sc))
3080                 __count_zid_vm_events(ALLOCSTALL, sc->reclaim_idx, 1);
3081 
3082         do {
3083                 vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup,
3084                                 sc->priority);
3085                 sc->nr_scanned = 0;
3086                 shrink_zones(zonelist, sc);
3087 
3088                 if (sc->nr_reclaimed >= sc->nr_to_reclaim)
3089                         break;
3090 
3091                 if (sc->compaction_ready)
3092                         break;
3093 
3094                 /*
3095                  * If we're getting trouble reclaiming, start doing
3096                  * writepage even in laptop mode.
3097                  */
3098                 if (sc->priority < DEF_PRIORITY - 2)
3099                         sc->may_writepage = 1;
3100         } while (--sc->priority >= 0);
3101 
3102         last_pgdat = NULL;
3103         for_each_zone_zonelist_nodemask(zone, z, zonelist, sc->reclaim_idx,
3104                                         sc->nodemask) {
3105                 if (zone->zone_pgdat == last_pgdat)
3106                         continue;
3107                 last_pgdat = zone->zone_pgdat;
3108                 snapshot_refaults(sc->target_mem_cgroup, zone->zone_pgdat);
3109                 set_memcg_congestion(last_pgdat, sc->target_mem_cgroup, false);
3110         }
3111 
3112         delayacct_freepages_end();
3113 
3114         if (sc->nr_reclaimed)
3115                 return sc->nr_reclaimed;
3116 
3117         /* Aborted reclaim to try compaction? don't OOM, then */
3118         if (sc->compaction_ready)
3119                 return 1;
3120 
3121         /* Untapped cgroup reserves?  Don't OOM, retry. */
3122         if (sc->memcg_low_skipped) {
3123                 sc->priority = initial_priority;
3124                 sc->memcg_low_reclaim = 1;
3125                 sc->memcg_low_skipped = 0;
3126                 goto retry;
3127         }
3128 
3129         return 0;
3130 }
3131 
3132 static bool allow_direct_reclaim(pg_data_t *pgdat)
3133 {
3134         struct zone *zone;
3135         unsigned long pfmemalloc_reserve = 0;
3136         unsigned long free_pages = 0;
3137         int i;
3138         bool wmark_ok;
3139 
3140         if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
3141                 return true;
3142 
3143         for (i = 0; i <= ZONE_NORMAL; i++) {
3144                 zone = &pgdat->node_zones[i];
3145                 if (!managed_zone(zone))
3146                         continue;
3147 
3148                 if (!zone_reclaimable_pages(zone))
3149                         continue;
3150 
3151                 pfmemalloc_reserve += min_wmark_pages(zone);
3152                 free_pages += zone_page_state(zone, NR_FREE_PAGES);
3153         }
3154 
3155         /* If there are no reserves (unexpected config) then do not throttle */
3156         if (!pfmemalloc_reserve)
3157                 return true;
3158 
3159         wmark_ok = free_pages > pfmemalloc_reserve / 2;
3160 
3161         /* kswapd must be awake if processes are being throttled */
3162         if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) {
3163                 pgdat->kswapd_classzone_idx = min(pgdat->kswapd_classzone_idx,
3164                                                 (enum zone_type)ZONE_NORMAL);
3165                 wake_up_interruptible(&pgdat->kswapd_wait);
3166         }
3167 
3168         return wmark_ok;
3169 }
3170 
3171 /*
3172  * Throttle direct reclaimers if backing storage is backed by the network
3173  * and the PFMEMALLOC reserve for the preferred node is getting dangerously
3174  * depleted. kswapd will continue to make progress and wake the processes
3175  * when the low watermark is reached.
3176  *
3177  * Returns true if a fatal signal was delivered during throttling. If this
3178  * happens, the page allocator should not consider triggering the OOM killer.
3179  */
3180 static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
3181                                         nodemask_t *nodemask)
3182 {
3183         struct zoneref *z;
3184         struct zone *zone;
3185         pg_data_t *pgdat = NULL;
3186 
3187         /*
3188          * Kernel threads should not be throttled as they may be indirectly
3189          * responsible for cleaning pages necessary for reclaim to make forward
3190          * progress. kjournald for example may enter direct reclaim while
3191          * committing a transaction where throttling it could forcing other
3192          * processes to block on log_wait_commit().
3193          */
3194         if (current->flags & PF_KTHREAD)
3195                 goto out;
3196 
3197         /*
3198          * If a fatal signal is pending, this process should not throttle.
3199          * It should return quickly so it can exit and free its memory
3200          */
3201         if (fatal_signal_pending(current))
3202                 goto out;
3203 
3204         /*
3205          * Check if the pfmemalloc reserves are ok by finding the first node
3206          * with a usable ZONE_NORMAL or lower zone. The expectation is that
3207          * GFP_KERNEL will be required for allocating network buffers when
3208          * swapping over the network so ZONE_HIGHMEM is unusable.
3209          *
3210          * Throttling is based on the first usable node and throttled processes
3211          * wait on a queue until kswapd makes progress and wakes them. There
3212          * is an affinity then between processes waking up and where reclaim
3213          * progress has been made assuming the process wakes on the same node.
3214          * More importantly, processes running on remote nodes will not compete
3215          * for remote pfmemalloc reserves and processes on different nodes
3216          * should make reasonable progress.
3217          */
3218         for_each_zone_zonelist_nodemask(zone, z, zonelist,
3219                                         gfp_zone(gfp_mask), nodemask) {
3220                 if (zone_idx(zone) > ZONE_NORMAL)
3221                         continue;
3222 
3223                 /* Throttle based on the first usable node */
3224                 pgdat = zone->zone_pgdat;
3225                 if (allow_direct_reclaim(pgdat))
3226                         goto out;
3227                 break;
3228         }
3229 
3230         /* If no zone was usable by the allocation flags then do not throttle */
3231         if (!pgdat)
3232                 goto out;
3233 
3234         /* Account for the throttling */
3235         count_vm_event(PGSCAN_DIRECT_THROTTLE);
3236 
3237         /*
3238          * If the caller cannot enter the filesystem, it's possible that it
3239          * is due to the caller holding an FS lock or performing a journal
3240          * transaction in the case of a filesystem like ext[3|4]. In this case,
3241          * it is not safe to block on pfmemalloc_wait as kswapd could be
3242          * blocked waiting on the same lock. Instead, throttle for up to a
3243          * second before continuing.
3244          */
3245         if (!(gfp_mask & __GFP_FS)) {
3246                 wait_event_interruptible_timeout(pgdat->pfmemalloc_wait,
3247                         allow_direct_reclaim(pgdat), HZ);
3248 
3249                 goto check_pending;
3250         }
3251 
3252         /* Throttle until kswapd wakes the process */
3253         wait_event_killable(zone->zone_pgdat->pfmemalloc_wait,
3254                 allow_direct_reclaim(pgdat));
3255 
3256 check_pending:
3257         if (fatal_signal_pending(current))
3258                 return true;
3259 
3260 out:
3261         return false;
3262 }
3263 
3264 unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
3265                                 gfp_t gfp_mask, nodemask_t *nodemask)
3266 {
3267         unsigned long nr_reclaimed;
3268         struct scan_control sc = {
3269                 .nr_to_reclaim = SWAP_CLUSTER_MAX,
3270                 .gfp_mask = current_gfp_context(gfp_mask),
3271                 .reclaim_idx = gfp_zone(gfp_mask),
3272                 .order = order,
3273                 .nodemask = nodemask,
3274                 .priority = DEF_PRIORITY,
3275                 .may_writepage = !laptop_mode,
3276                 .may_unmap = 1,
3277                 .may_swap = 1,
3278         };
3279 
3280         /*
3281          * scan_control uses s8 fields for order, priority, and reclaim_idx.
3282          * Confirm they are large enough for max values.
3283          */
3284         BUILD_BUG_ON(MAX_ORDER > S8_MAX);
3285         BUILD_BUG_ON(DEF_PRIORITY > S8_MAX);
3286         BUILD_BUG_ON(MAX_NR_ZONES > S8_MAX);
3287 
3288         /*
3289          * Do not enter reclaim if fatal signal was delivered while throttled.
3290          * 1 is returned so that the page allocator does not OOM kill at this
3291          * point.
3292          */
3293         if (throttle_direct_reclaim(sc.gfp_mask, zonelist, nodemask))
3294                 return 1;
3295 
3296         set_task_reclaim_state(current, &sc.reclaim_state);
3297         trace_mm_vmscan_direct_reclaim_begin(order, sc.gfp_mask);
3298 
3299         nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
3300 
3301         trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
3302         set_task_reclaim_state(current, NULL);
3303 
3304         return nr_reclaimed;
3305 }
3306 
3307 #ifdef CONFIG_MEMCG
3308 
3309 /* Only used by soft limit reclaim. Do not reuse for anything else. */
3310 unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
3311                                                 gfp_t gfp_mask, bool noswap,
3312                                                 pg_data_t *pgdat,
3313                                                 unsigned long *nr_scanned)
3314 {
3315         struct scan_control sc = {
3316                 .nr_to_reclaim = SWAP_CLUSTER_MAX,
3317                 .target_mem_cgroup = memcg,
3318                 .may_writepage = !laptop_mode,
3319                 .may_unmap = 1,
3320                 .reclaim_idx = MAX_NR_ZONES - 1,
3321                 .may_swap = !noswap,
3322         };
3323         unsigned long lru_pages;
3324 
3325         WARN_ON_ONCE(!current->reclaim_state);
3326 
3327         sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
3328                         (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
3329 
3330         trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order,
3331                                                       sc.gfp_mask);
3332 
3333         /*
3334          * NOTE: Although we can get the priority field, using it
3335          * here is not a good idea, since it limits the pages we can scan.
3336          * if we don't reclaim here, the shrink_node from balance_pgdat
3337          * will pick up pages from other mem cgroup's as well. We hack
3338          * the priority and make it zero.
3339          */
3340         shrink_node_memcg(pgdat, memcg, &sc, &lru_pages);
3341 
3342         trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
3343 
3344         *nr_scanned = sc.nr_scanned;
3345 
3346         return sc.nr_reclaimed;
3347 }
3348 
3349 unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
3350                                            unsigned long nr_pages,
3351                                            gfp_t gfp_mask,
3352                                            bool may_swap)
3353 {
3354         struct zonelist *zonelist;
3355         unsigned long nr_reclaimed;
3356         unsigned long pflags;
3357         int nid;
3358         unsigned int noreclaim_flag;
3359         struct scan_control sc = {
3360                 .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
3361                 .gfp_mask = (current_gfp_context(gfp_mask) & GFP_RECLAIM_MASK) |
3362                                 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK),
3363                 .reclaim_idx = MAX_NR_ZONES - 1,
3364                 .target_mem_cgroup = memcg,
3365                 .priority = DEF_PRIORITY,
3366                 .may_writepage = !laptop_mode,
3367                 .may_unmap = 1,
3368                 .may_swap = may_swap,
3369         };
3370 
3371         set_task_reclaim_state(current, &sc.reclaim_state);
3372         /*
3373          * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't
3374          * take care of from where we get pages. So the node where we start the
3375          * scan does not need to be the current node.
3376          */
3377         nid = mem_cgroup_select_victim_node(memcg);
3378 
3379         zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK];
3380 
3381         trace_mm_vmscan_memcg_reclaim_begin(0, sc.gfp_mask);
3382 
3383         psi_memstall_enter(&pflags);
3384         noreclaim_flag = memalloc_noreclaim_save();
3385 
3386         nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
3387 
3388         memalloc_noreclaim_restore(noreclaim_flag);
3389         psi_memstall_leave(&pflags);
3390 
3391         trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
3392         set_task_reclaim_state(current, NULL);
3393 
3394         return nr_reclaimed;
3395 }
3396 #endif
3397 
3398 static void age_active_anon(struct pglist_data *pgdat,
3399                                 struct scan_control *sc)
3400 {
3401         struct mem_cgroup *memcg;
3402 
3403         if (!total_swap_pages)
3404                 return;
3405 
3406         memcg = mem_cgroup_iter(NULL, NULL, NULL);
3407         do {
3408                 struct lruvec *lruvec = mem_cgroup_lruvec(pgdat, memcg);
3409 
3410                 if (inactive_list_is_low(lruvec, false, sc, true))
3411                         shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
3412                                            sc, LRU_ACTIVE_ANON);
3413 
3414                 memcg = mem_cgroup_iter(NULL, memcg, NULL);
3415         } while (memcg);
3416 }
3417 
3418 static bool pgdat_watermark_boosted(pg_data_t *pgdat, int classzone_idx)
3419 {
3420         int i;
3421         struct zone *zone;
3422 
3423         /*
3424          * Check for watermark boosts top-down as the higher zones
3425          * are more likely to be boosted. Both watermarks and boosts
3426          * should not be checked at the time time as reclaim would
3427          * start prematurely when there is no boosting and a lower
3428          * zone is balanced.
3429          */
3430         for (i = classzone_idx; i >= 0; i--) {
3431                 zone = pgdat->node_zones + i;
3432                 if (!managed_zone(zone))
3433                         continue;
3434 
3435                 if (zone->watermark_boost)
3436                         return true;
3437         }
3438 
3439         return false;
3440 }
3441 
3442 /*
3443  * Returns true if there is an eligible zone balanced for the request order
3444  * and classzone_idx
3445  */
3446 static bool pgdat_balanced(pg_data_t *pgdat, int order, int classzone_idx)
3447 {
3448         int i;
3449         unsigned long mark = -1;
3450         struct zone *zone;
3451 
3452         /*
3453          * Check watermarks bottom-up as lower zones are more likely to
3454          * meet watermarks.
3455          */
3456         for (i = 0; i <= classzone_idx; i++) {
3457                 zone = pgdat->node_zones + i;
3458 
3459                 if (!managed_zone(zone))
3460                         continue;
3461 
3462                 mark = high_wmark_pages(zone);
3463                 if (zone_watermark_ok_safe(zone, order, mark, classzone_idx))
3464                         return true;
3465         }
3466 
3467         /*
3468          * If a node has no populated zone within classzone_idx, it does not
3469          * need balancing by definition. This can happen if a zone-restricted
3470          * allocation tries to wake a remote kswapd.
3471          */
3472         if (mark == -1)
3473                 return true;
3474 
3475         return false;
3476 }
3477 
3478 /* Clear pgdat state for congested, dirty or under writeback. */
3479 static void clear_pgdat_congested(pg_data_t *pgdat)
3480 {
3481         clear_bit(PGDAT_CONGESTED, &pgdat->flags);
3482         clear_bit(PGDAT_DIRTY, &pgdat->flags);
3483         clear_bit(PGDAT_WRITEBACK, &pgdat->flags);
3484 }
3485 
3486 /*
3487  * Prepare kswapd for sleeping. This verifies that there are no processes
3488  * waiting in throttle_direct_reclaim() and that watermarks have been met.
3489  *
3490  * Returns true if kswapd is ready to sleep
3491  */
3492 static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, int classzone_idx)
3493 {
3494         /*
3495          * The throttled processes are normally woken up in balance_pgdat() as
3496          * soon as allow_direct_reclaim() is true. But there is a potential
3497          * race between when kswapd checks the watermarks and a process gets
3498          * throttled. There is also a potential race if processes get
3499          * throttled, kswapd wakes, a large process exits thereby balancing the
3500          * zones, which causes kswapd to exit balance_pgdat() before reaching
3501          * the wake up checks. If kswapd is going to sleep, no process should
3502          * be sleeping on pfmemalloc_wait, so wake them now if necessary. If
3503          * the wake up is premature, processes will wake kswapd and get
3504          * throttled again. The difference from wake ups in balance_pgdat() is
3505          * that here we are under prepare_to_wait().
3506          */
3507         if (waitqueue_active(&pgdat->pfmemalloc_wait))
3508                 wake_up_all(&pgdat->pfmemalloc_wait);
3509 
3510         /* Hopeless node, leave it to direct reclaim */
3511         if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
3512                 return true;
3513 
3514         if (pgdat_balanced(pgdat, order, classzone_idx)) {
3515                 clear_pgdat_congested(pgdat);
3516                 return true;
3517         }
3518 
3519         return false;
3520 }
3521 
3522 /*
3523  * kswapd shrinks a node of pages that are at or below the highest usable
3524  * zone that is currently unbalanced.
3525  *
3526  * Returns true if kswapd scanned at least the requested number of pages to
3527  * reclaim or if the lack of progress was due to pages under writeback.
3528  * This is used to determine if the scanning priority needs to be raised.
3529  */
3530 static bool kswapd_shrink_node(pg_data_t *pgdat,
3531                                struct scan_control *sc)
3532 {
3533         struct zone *zone;
3534         int z;
3535 
3536         /* Reclaim a number of pages proportional to the number of zones */
3537         sc->nr_to_reclaim = 0;
3538         for (z = 0; z <= sc->reclaim_idx; z++) {
3539                 zone = pgdat->node_zones + z;
3540                 if (!managed_zone(zone))
3541                         continue;
3542 
3543                 sc->nr_to_reclaim += max(high_wmark_pages(zone), SWAP_CLUSTER_MAX);
3544         }
3545 
3546         /*
3547          * Historically care was taken to put equal pressure on all zones but
3548          * now pressure is applied based on node LRU order.
3549          */
3550         shrink_node(pgdat, sc);
3551 
3552         /*
3553          * Fragmentation may mean that the system cannot be rebalanced for
3554          * high-order allocations. If twice the allocation size has been
3555          * reclaimed then recheck watermarks only at order-0 to prevent
3556          * excessive reclaim. Assume that a process requested a high-order
3557          * can direct reclaim/compact.
3558          */
3559         if (sc->order && sc->nr_reclaimed >= compact_gap(sc->order))
3560                 sc->order = 0;
3561 
3562         return sc->nr_scanned >= sc->nr_to_reclaim;
3563 }
3564 
3565 /*
3566  * For kswapd, balance_pgdat() will reclaim pages across a node from zones
3567  * that are eligible for use by the caller until at least one zone is
3568  * balanced.
3569  *
3570  * Returns the order kswapd finished reclaiming at.
3571  *
3572  * kswapd scans the zones in the highmem->normal->dma direction.  It skips
3573  * zones which have free_pages > high_wmark_pages(zone), but once a zone is
3574  * found to have free_pages <= high_wmark_pages(zone), any page in that zone
3575  * or lower is eligible for reclaim until at least one usable zone is
3576  * balanced.
3577  */
3578 static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
3579 {
3580         int i;
3581         unsigned long nr_soft_reclaimed;
3582         unsigned long nr_soft_scanned;
3583         unsigned long pflags;
3584         unsigned long nr_boost_reclaim;
3585         unsigned long zone_boosts[MAX_NR_ZONES] = { 0, };
3586         bool boosted;
3587         struct zone *zone;
3588         struct scan_control sc = {
3589                 .gfp_mask = GFP_KERNEL,
3590                 .order = order,
3591                 .may_unmap = 1,
3592         };
3593 
3594         set_task_reclaim_state(current, &sc.reclaim_state);
3595         psi_memstall_enter(&pflags);
3596         __fs_reclaim_acquire();
3597 
3598         count_vm_event(PAGEOUTRUN);
3599 
3600         /*
3601          * Account for the reclaim boost. Note that the zone boost is left in
3602          * place so that parallel allocations that are near the watermark will
3603          * stall or direct reclaim until kswapd is finished.
3604          */
3605         nr_boost_reclaim = 0;
3606         for (i = 0; i <= classzone_idx; i++) {
3607                 zone = pgdat->node_zones + i;
3608                 if (!managed_zone(zone))
3609                         continue;
3610 
3611                 nr_boost_reclaim += zone->watermark_boost;
3612                 zone_boosts[i] = zone->watermark_boost;
3613         }
3614         boosted = nr_boost_reclaim;
3615 
3616 restart:
3617         sc.priority = DEF_PRIORITY;
3618         do {
3619                 unsigned long nr_reclaimed = sc.nr_reclaimed;
3620                 bool raise_priority = true;
3621                 bool balanced;
3622                 bool ret;
3623 
3624                 sc.reclaim_idx = classzone_idx;
3625 
3626                 /*
3627                  * If the number of buffer_heads exceeds the maximum allowed
3628                  * then consider reclaiming from all zones. This has a dual
3629                  * purpose -- on 64-bit systems it is expected that
3630                  * buffer_heads are stripped during active rotation. On 32-bit
3631                  * systems, highmem pages can pin lowmem memory and shrinking
3632                  * buffers can relieve lowmem pressure. Reclaim may still not
3633                  * go ahead if all eligible zones for the original allocation
3634                  * request are balanced to avoid excessive reclaim from kswapd.
3635                  */
3636                 if (buffer_heads_over_limit) {
3637                         for (i = MAX_NR_ZONES - 1; i >= 0; i--) {
3638                                 zone = pgdat->node_zones + i;
3639                                 if (!managed_zone(zone))
3640                                         continue;
3641 
3642                                 sc.reclaim_idx = i;
3643                                 break;
3644                         }
3645                 }
3646 
3647                 /*
3648                  * If the pgdat is imbalanced then ignore boosting and preserve
3649                  * the watermarks for a later time and restart. Note that the
3650                  * zone watermarks will be still reset at the end of balancing
3651                  * on the grounds that the normal reclaim should be enough to
3652                  * re-evaluate if boosting is required when kswapd next wakes.
3653                  */
3654                 balanced = pgdat_balanced(pgdat, sc.order, classzone_idx);
3655                 if (!balanced && nr_boost_reclaim) {
3656                         nr_boost_reclaim = 0;
3657                         goto restart;
3658                 }
3659 
3660                 /*
3661                  * If boosting is not active then only reclaim if there are no
3662                  * eligible zones. Note that sc.reclaim_idx is not used as
3663                  * buffer_heads_over_limit may have adjusted it.
3664                  */
3665                 if (!nr_boost_reclaim && balanced)
3666                         goto out;
3667 
3668                 /* Limit the priority of boosting to avoid reclaim writeback */
3669                 if (nr_boost_reclaim && sc.priority == DEF_PRIORITY - 2)
3670                         raise_priority = false;
3671 
3672                 /*
3673                  * Do not writeback or swap pages for boosted reclaim. The
3674                  * intent is to relieve pressure not issue sub-optimal IO
3675                  * from reclaim context. If no pages are reclaimed, the
3676                  * reclaim will be aborted.
3677                  */
3678                 sc.may_writepage = !laptop_mode && !nr_boost_reclaim;
3679                 sc.may_swap = !nr_boost_reclaim;
3680 
3681                 /*
3682                  * Do some background aging of the anon list, to give
3683                  * pages a chance to be referenced before reclaiming. All
3684                  * pages are rotated regardless of classzone as this is
3685                  * about consistent aging.
3686                  */
3687                 age_active_anon(pgdat, &sc);
3688 
3689                 /*
3690                  * If we're getting trouble reclaiming, start doing writepage
3691                  * even in laptop mode.
3692                  */
3693                 if (sc.priority < DEF_PRIORITY - 2)
3694                         sc.may_writepage = 1;
3695 
3696                 /* Call soft limit reclaim before calling shrink_node. */
3697                 sc.nr_scanned = 0;
3698                 nr_soft_scanned = 0;
3699                 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(pgdat, sc.order,
3700                                                 sc.gfp_mask, &nr_soft_scanned);
3701                 sc.nr_reclaimed += nr_soft_reclaimed;
3702 
3703                 /*
3704                  * There should be no need to raise the scanning priority if
3705                  * enough pages are already being scanned that that high
3706                  * watermark would be met at 100% efficiency.
3707                  */
3708                 if (kswapd_shrink_node(pgdat, &sc))
3709                         raise_priority = false;
3710 
3711                 /*
3712                  * If the low watermark is met there is no need for processes
3713                  * to be throttled on pfmemalloc_wait as they should not be
3714                  * able to safely make forward progress. Wake them
3715                  */
3716                 if (waitqueue_active(&pgdat->pfmemalloc_wait) &&
3717                                 allow_direct_reclaim(pgdat))
3718                         wake_up_all(&pgdat->pfmemalloc_wait);
3719 
3720                 /* Check if kswapd should be suspending */
3721                 __fs_reclaim_release();
3722                 ret = try_to_freeze();
3723                 __fs_reclaim_acquire();
3724                 if (ret || kthread_should_stop())
3725                         break;
3726 
3727                 /*
3728                  * Raise priority if scanning rate is too low or there was no
3729                  * progress in reclaiming pages
3730                  */
3731                 nr_reclaimed = sc.nr_reclaimed - nr_reclaimed;
3732                 nr_boost_reclaim -= min(nr_boost_reclaim, nr_reclaimed);
3733 
3734                 /*
3735                  * If reclaim made no progress for a boost, stop reclaim as
3736                  * IO cannot be queued and it could be an infinite loop in
3737                  * extreme circumstances.
3738                  */
3739                 if (nr_boost_reclaim && !nr_reclaimed)
3740                         break;
3741 
3742                 if (raise_priority || !nr_reclaimed)
3743                         sc.priority--;
3744         } while (sc.priority >= 1);
3745 
3746         if (!sc.nr_reclaimed)
3747                 pgdat->kswapd_failures++;
3748 
3749 out:
3750         /* If reclaim was boosted, account for the reclaim done in this pass */
3751         if (boosted) {
3752                 unsigned long flags;
3753 
3754                 for (i = 0; i <= classzone_idx; i++) {
3755                         if (!zone_boosts[i])
3756                                 continue;
3757 
3758                         /* Increments are under the zone lock */
3759                         zone = pgdat->node_zones + i;
3760                         spin_lock_irqsave(&zone->lock, flags);
3761                         zone->watermark_boost -= min(zone->watermark_boost, zone_boosts[i]);
3762                         spin_unlock_irqrestore(&zone->lock, flags);
3763                 }
3764 
3765                 /*
3766                  * As there is now likely space, wakeup kcompact to defragment
3767                  * pageblocks.
3768                  */
3769                 wakeup_kcompactd(pgdat, pageblock_order, classzone_idx);
3770         }
3771 
3772         snapshot_refaults(NULL, pgdat);
3773         __fs_reclaim_release();
3774         psi_memstall_leave(&pflags);
3775         set_task_reclaim_state(current, NULL);
3776 
3777         /*
3778          * Return the order kswapd stopped reclaiming at as
3779          * prepare_kswapd_sleep() takes it into account. If another caller
3780          * entered the allocator slow path while kswapd was awake, order will
3781          * remain at the higher level.
3782          */
3783         return sc.order;
3784 }
3785 
3786 /*
3787  * The pgdat->kswapd_classzone_idx is used to pass the highest zone index to be
3788  * reclaimed by kswapd from the waker. If the value is MAX_NR_ZONES which is not
3789  * a valid index then either kswapd runs for first time or kswapd couldn't sleep
3790  * after previous reclaim attempt (node is still unbalanced). In that case
3791  * return the zone index of the previous kswapd reclaim cycle.
3792  */
3793 static enum zone_type kswapd_classzone_idx(pg_data_t *pgdat,
3794                                            enum zone_type prev_classzone_idx)
3795 {
3796         if (pgdat->kswapd_classzone_idx == MAX_NR_ZONES)
3797                 return prev_classzone_idx;
3798         return pgdat->kswapd_classzone_idx;
3799 }
3800 
3801 static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order,
3802                                 unsigned int classzone_idx)
3803 {
3804         long remaining = 0;
3805         DEFINE_WAIT(wait);
3806 
3807         if (freezing(current) || kthread_should_stop())
3808                 return;
3809 
3810         prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
3811 
3812         /*
3813          * Try to sleep for a short interval. Note that kcompactd will only be
3814          * woken if it is possible to sleep for a short interval. This is
3815          * deliberate on the assumption that if reclaim cannot keep an
3816          * eligible zone balanced that it's also unlikely that compaction will
3817          * succeed.
3818          */
3819         if (prepare_kswapd_sleep(pgdat, reclaim_order, classzone_idx)) {
3820                 /*
3821                  * Compaction records what page blocks it recently failed to
3822                  * isolate pages from and skips them in the future scanning.
3823                  * When kswapd is going to sleep, it is reasonable to assume
3824                  * that pages and compaction may succeed so reset the cache.
3825                  */
3826                 reset_isolation_suitable(pgdat);
3827 
3828                 /*
3829                  * We have freed the memory, now we should compact it to make
3830                  * allocation of the requested order possible.
3831                  */
3832                 wakeup_kcompactd(pgdat, alloc_order, classzone_idx);
3833 
3834                 remaining = schedule_timeout(HZ/10);
3835 
3836                 /*
3837                  * If woken prematurely then reset kswapd_classzone_idx and
3838                  * order. The values will either be from a wakeup request or
3839                  * the previous request that slept prematurely.
3840                  */
3841                 if (remaining) {
3842                         pgdat->kswapd_classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx);
3843                         pgdat->kswapd_order = max(pgdat->kswapd_order, reclaim_order);
3844                 }
3845 
3846                 finish_wait(&pgdat->kswapd_wait, &wait);
3847                 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
3848         }
3849 
3850         /*
3851          * After a short sleep, check if it was a premature sleep. If not, then
3852          * go fully to sleep until explicitly woken up.
3853          */
3854         if (!remaining &&
3855             prepare_kswapd_sleep(pgdat, reclaim_order, classzone_idx)) {
3856                 trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
3857 
3858                 /*
3859                  * vmstat counters are not perfectly accurate and the estimated
3860                  * value for counters such as NR_FREE_PAGES can deviate from the
3861                  * true value by nr_online_cpus * threshold. To avoid the zone
3862                  * watermarks being breached while under pressure, we reduce the
3863                  * per-cpu vmstat threshold while kswapd is awake and restore
3864                  * them before going back to sleep.
3865                  */
3866                 set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
3867 
3868                 if (!kthread_should_stop())
3869                         schedule();
3870 
3871                 set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
3872         } else {
3873                 if (remaining)
3874                         count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
3875                 else
3876                         count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
3877         }
3878         finish_wait(&pgdat->kswapd_wait, &wait);
3879 }
3880 
3881 /*
3882  * The background pageout daemon, started as a kernel thread
3883  * from the init process.
3884  *
3885  * This basically trickles out pages so that we have _some_
3886  * free memory available even if there is no other activity
3887  * that frees anything up. This is needed for things like routing
3888  * etc, where we otherwise might have all activity going on in
3889  * asynchronous contexts that cannot page things out.
3890  *
3891  * If there are applications that are active memory-allocators
3892  * (most normal use), this basically shouldn't matter.
3893  */
3894 static int kswapd(void *p)
3895 {
3896         unsigned int alloc_order, reclaim_order;
3897         unsigned int classzone_idx = MAX_NR_ZONES - 1;
3898         pg_data_t *pgdat = (pg_data_t*)p;
3899         struct task_struct *tsk = current;
3900         const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
3901 
3902         if (!cpumask_empty(cpumask))
3903                 set_cpus_allowed_ptr(tsk, cpumask);
3904 
3905         /*
3906          * Tell the memory management that we're a "memory allocator",
3907          * and that if we need more memory we should get access to it
3908          * regardless (see "__alloc_pages()"). "kswapd" should
3909          * never get caught in the normal page freeing logic.
3910          *
3911          * (Kswapd normally doesn't need memory anyway, but sometimes
3912          * you need a small amount of memory in order to be able to
3913          * page out something else, and this flag essentially protects
3914          * us from recursively trying to free more memory as we're
3915          * trying to free the first piece of memory in the first place).
3916          */
3917         tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
3918         set_freezable();
3919 
3920         pgdat->kswapd_order = 0;
3921         pgdat->kswapd_classzone_idx = MAX_NR_ZONES;
3922         for ( ; ; ) {
3923                 bool ret;
3924 
3925                 alloc_order = reclaim_order = pgdat->kswapd_order;
3926                 classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx);
3927 
3928 kswapd_try_sleep:
3929                 kswapd_try_to_sleep(pgdat, alloc_order, reclaim_order,
3930                                         classzone_idx);
3931 
3932                 /* Read the new order and classzone_idx */
3933                 alloc_order = reclaim_order = pgdat->kswapd_order;
3934                 classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx);
3935                 pgdat->kswapd_order = 0;
3936                 pgdat->kswapd_classzone_idx = MAX_NR_ZONES;
3937 
3938                 ret = try_to_freeze();
3939                 if (kthread_should_stop())
3940                         break;
3941 
3942                 /*
3943                  * We can speed up thawing tasks if we don't call balance_pgdat
3944                  * after returning from the refrigerator
3945                  */
3946                 if (ret)
3947                         continue;
3948 
3949                 /*
3950                  * Reclaim begins at the requested order but if a high-order
3951                  * reclaim fails then kswapd falls back to reclaiming for
3952                  * order-0. If that happens, kswapd will consider sleeping
3953                  * for the order it finished reclaiming at (reclaim_order)
3954                  * but kcompactd is woken to compact for the original
3955                  * request (alloc_order).
3956                  */
3957                 trace_mm_vmscan_kswapd_wake(pgdat->node_id, classzone_idx,
3958                                                 alloc_order);
3959                 reclaim_order = balance_pgdat(pgdat, alloc_order, classzone_idx);
3960                 if (reclaim_order < alloc_order)
3961                         goto kswapd_try_sleep;
3962         }
3963 
3964         tsk->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD);
3965 
3966         return 0;
3967 }
3968 
3969 /*
3970  * A zone is low on free memory or too fragmented for high-order memory.  If
3971  * kswapd should reclaim (direct reclaim is deferred), wake it up for the zone's
3972  * pgdat.  It will wake up kcompactd after reclaiming memory.  If kswapd reclaim
3973  * has failed or is not needed, still wake up kcompactd if only compaction is
3974  * needed.
3975  */
3976 void wakeup_kswapd(struct zone *zone, gfp_t gfp_flags, int order,
3977                    enum zone_type classzone_idx)
3978 {
3979         pg_data_t *pgdat;
3980 
3981         if (!managed_zone(zone))
3982                 return;
3983 
3984         if (!cpuset_zone_allowed(zone, gfp_flags))
3985                 return;
3986         pgdat = zone->zone_pgdat;
3987 
3988         if (pgdat->kswapd_classzone_idx == MAX_NR_ZONES)
3989                 pgdat->kswapd_classzone_idx = classzone_idx;
3990         else
3991                 pgdat->kswapd_classzone_idx = max(pgdat->kswapd_classzone_idx,
3992                                                   classzone_idx);
3993         pgdat->kswapd_order = max(pgdat->kswapd_order, order);
3994         if (!waitqueue_active(&pgdat->kswapd_wait))
3995                 return;
3996 
3997         /* Hopeless node, leave it to direct reclaim if possible */
3998         if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ||
3999             (pgdat_balanced(pgdat, order, classzone_idx) &&
4000              !pgdat_watermark_boosted(pgdat, classzone_idx))) {
4001                 /*
4002                  * There may be plenty of free memory available, but it's too
4003                  * fragmented for high-order allocations.  Wake up kcompactd
4004                  * and rely on compaction_suitable() to determine if it's
4005                  * needed.  If it fails, it will defer subsequent attempts to
4006                  * ratelimit its work.
4007                  */
4008                 if (!(gfp_flags & __GFP_DIRECT_RECLAIM))
4009                         wakeup_kcompactd(pgdat, order, classzone_idx);
4010                 return;
4011         }
4012 
4013         trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, classzone_idx, order,
4014                                       gfp_flags);
4015         wake_up_interruptible(&pgdat->kswapd_wait);
4016 }
4017 
4018 #ifdef CONFIG_HIBERNATION
4019 /*
4020  * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
4021  * freed pages.
4022  *
4023  * Rather than trying to age LRUs the aim is to preserve the overall
4024  * LRU order by reclaiming preferentially
4025  * inactive > active > active referenced > active mapped
4026  */
4027 unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
4028 {
4029         struct scan_control sc = {
4030                 .nr_to_reclaim = nr_to_reclaim,
4031                 .gfp_mask = GFP_HIGHUSER_MOVABLE,
4032                 .reclaim_idx = MAX_NR_ZONES - 1,
4033                 .priority = DEF_PRIORITY,
4034                 .may_writepage = 1,
4035                 .may_unmap = 1,
4036                 .may_swap = 1,
4037                 .hibernation_mode = 1,
4038         };
4039         struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
4040         unsigned long nr_reclaimed;
4041         unsigned int noreclaim_flag;
4042 
4043         fs_reclaim_acquire(sc.gfp_mask);
4044         noreclaim_flag = memalloc_noreclaim_save();
4045         set_task_reclaim_state(current, &sc.reclaim_state);
4046 
4047         nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
4048 
4049         set_task_reclaim_state(current, NULL);
4050         memalloc_noreclaim_restore(noreclaim_flag);
4051         fs_reclaim_release(sc.gfp_mask);
4052 
4053         return nr_reclaimed;
4054 }
4055 #endif /* CONFIG_HIBERNATION */
4056 
4057 /* It's optimal to keep kswapds on the same CPUs as their memory, but
4058    not required for correctness.  So if the last cpu in a node goes
4059    away, we get changed to run anywhere: as the first one comes back,
4060    restore their cpu bindings. */
4061 static int kswapd_cpu_online(unsigned int cpu)
4062 {
4063         int nid;
4064 
4065         for_each_node_state(nid, N_MEMORY) {
4066                 pg_data_t *pgdat = NODE_DATA(nid);
4067                 const struct cpumask *mask;
4068 
4069                 mask = cpumask_of_node(pgdat->node_id);
4070 
4071                 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
4072                         /* One of our CPUs online: restore mask */
4073                         set_cpus_allowed_ptr(pgdat->kswapd, mask);
4074         }
4075         return 0;
4076 }
4077 
4078 /*
4079  * This kswapd start function will be called by init and node-hot-add.
4080  * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
4081  */
4082 int kswapd_run(int nid)
4083 {
4084         pg_data_t *pgdat = NODE_DATA(nid);
4085         int ret = 0;
4086 
4087         if (pgdat->kswapd)
4088                 return 0;
4089 
4090         pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
4091         if (IS_ERR(pgdat->kswapd)) {
4092                 /* failure at boot is fatal */
4093                 BUG_ON(system_state < SYSTEM_RUNNING);
4094                 pr_err("Failed to start kswapd on node %d\n", nid);
4095                 ret = PTR_ERR(pgdat->kswapd);
4096                 pgdat->kswapd = NULL;
4097         }
4098         return ret;
4099 }
4100 
4101 /*
4102  * Called by memory hotplug when all memory in a node is offlined.  Caller must
4103  * hold mem_hotplug_begin/end().
4104  */
4105 void kswapd_stop(int nid)
4106 {
4107         struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
4108 
4109         if (kswapd) {
4110                 kthread_stop(kswapd);
4111                 NODE_DATA(nid)->kswapd = NULL;
4112         }
4113 }
4114 
4115 static int __init kswapd_init(void)
4116 {
4117         int nid, ret;
4118 
4119         swap_setup();
4120         for_each_node_state(nid, N_MEMORY)
4121                 kswapd_run(nid);
4122         ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
4123                                         "mm/vmscan:online", kswapd_cpu_online,
4124                                         NULL);
4125         WARN_ON(ret < 0);
4126         return 0;
4127 }
4128 
4129 module_init(kswapd_init)
4130 
4131 #ifdef CONFIG_NUMA
4132 /*
4133  * Node reclaim mode
4134  *
4135  * If non-zero call node_reclaim when the number of free pages falls below
4136  * the watermarks.
4137  */
4138 int node_reclaim_mode __read_mostly;
4139 
4140 #define RECLAIM_OFF 0
4141 #define RECLAIM_ZONE (1<<0)     /* Run shrink_inactive_list on the zone */
4142 #define RECLAIM_WRITE (1<<1)    /* Writeout pages during reclaim */
4143 #define RECLAIM_UNMAP (1<<2)    /* Unmap pages during reclaim */
4144 
4145 /*
4146  * Priority for NODE_RECLAIM. This determines the fraction of pages
4147  * of a node considered for each zone_reclaim. 4 scans 1/16th of
4148  * a zone.
4149  */
4150 #define NODE_RECLAIM_PRIORITY 4
4151 
4152 /*
4153  * Percentage of pages in a zone that must be unmapped for node_reclaim to
4154  * occur.
4155  */
4156 int sysctl_min_unmapped_ratio = 1;
4157 
4158 /*
4159  * If the number of slab pages in a zone grows beyond this percentage then
4160  * slab reclaim needs to occur.
4161  */
4162 int sysctl_min_slab_ratio = 5;
4163 
4164 static inline unsigned long node_unmapped_file_pages(struct pglist_data *pgdat)
4165 {
4166         unsigned long file_mapped = node_page_state(pgdat, NR_FILE_MAPPED);
4167         unsigned long file_lru = node_page_state(pgdat, NR_INACTIVE_FILE) +
4168                 node_page_state(pgdat, NR_ACTIVE_FILE);
4169 
4170         /*
4171          * It's possible for there to be more file mapped pages than
4172          * accounted for by the pages on the file LRU lists because
4173          * tmpfs pages accounted for as ANON can also be FILE_MAPPED
4174          */
4175         return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0;
4176 }
4177 
4178 /* Work out how many page cache pages we can reclaim in this reclaim_mode */
4179 static unsigned long node_pagecache_reclaimable(struct pglist_data *pgdat)
4180 {
4181         unsigned long nr_pagecache_reclaimable;
4182         unsigned long delta = 0;
4183 
4184         /*
4185          * If RECLAIM_UNMAP is set, then all file pages are considered
4186          * potentially reclaimable. Otherwise, we have to worry about
4187          * pages like swapcache and node_unmapped_file_pages() provides
4188          * a better estimate
4189          */
4190         if (node_reclaim_mode & RECLAIM_UNMAP)
4191                 nr_pagecache_reclaimable = node_page_state(pgdat, NR_FILE_PAGES);
4192         else
4193                 nr_pagecache_reclaimable = node_unmapped_file_pages(pgdat);
4194 
4195         /* If we can't clean pages, remove dirty pages from consideration */
4196         if (!(node_reclaim_mode & RECLAIM_WRITE))
4197                 delta += node_page_state(pgdat, NR_FILE_DIRTY);
4198 
4199         /* Watch for any possible underflows due to delta */
4200         if (unlikely(delta > nr_pagecache_reclaimable))
4201                 delta = nr_pagecache_reclaimable;
4202 
4203         return nr_pagecache_reclaimable - delta;
4204 }
4205 
4206 /*
4207  * Try to free up some pages from this node through reclaim.
4208  */
4209 static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
4210 {
4211         /* Minimum pages needed in order to stay on node */
4212         const unsigned long nr_pages = 1 << order;
4213         struct task_struct *p = current;
4214         unsigned int noreclaim_flag;
4215         struct scan_control sc = {
4216                 .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
4217                 .gfp_mask = current_gfp_context(gfp_mask),
4218                 .order = order,
4219                 .priority = NODE_RECLAIM_PRIORITY,
4220                 .may_writepage = !!(node_reclaim_mode & RECLAIM_WRITE),
4221                 .may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP),
4222                 .may_swap = 1,
4223                 .reclaim_idx = gfp_zone(gfp_mask),
4224         };
4225 
4226         trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, order,
4227                                            sc.gfp_mask);
4228 
4229         cond_resched();
4230         fs_reclaim_acquire(sc.gfp_mask);
4231         /*
4232          * We need to be able to allocate from the reserves for RECLAIM_UNMAP
4233          * and we also need to be able to write out pages for RECLAIM_WRITE
4234          * and RECLAIM_UNMAP.
4235          */
4236         noreclaim_flag = memalloc_noreclaim_save();
4237         p->flags |= PF_SWAPWRITE;
4238         set_task_reclaim_state(p, &sc.reclaim_state);
4239 
4240         if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages) {
4241                 /*
4242                  * Free memory by calling shrink node with increasing
4243                  * priorities until we have enough memory freed.
4244                  */
4245                 do {
4246                         shrink_node(pgdat, &sc);
4247                 } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0);
4248         }
4249 
4250         set_task_reclaim_state(p, NULL);
4251         current->flags &= ~PF_SWAPWRITE;
4252         memalloc_noreclaim_restore(noreclaim_flag);
4253         fs_reclaim_release(sc.gfp_mask);
4254 
4255         trace_mm_vmscan_node_reclaim_end(sc.nr_reclaimed);
4256 
4257         return sc.nr_reclaimed >= nr_pages;
4258 }
4259 
4260 int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
4261 {
4262         int ret;
4263 
4264         /*
4265          * Node reclaim reclaims unmapped file backed pages and
4266          * slab pages if we are over the defined limits.
4267          *
4268          * A small portion of unmapped file backed pages is needed for
4269          * file I/O otherwise pages read by file I/O will be immediately
4270          * thrown out if the node is overallocated. So we do not reclaim
4271          * if less than a specified percentage of the node is used by
4272          * unmapped file backed pages.
4273          */
4274         if (node_pagecache_reclaimable(pgdat) <= pgdat->min_unmapped_pages &&
4275             node_page_state(pgdat, NR_SLAB_RECLAIMABLE) <= pgdat->min_slab_pages)
4276                 return NODE_RECLAIM_FULL;
4277 
4278         /*
4279          * Do not scan if the allocation should not be delayed.
4280          */
4281         if (!gfpflags_allow_blocking(gfp_mask) || (current->flags & PF_MEMALLOC))
4282                 return NODE_RECLAIM_NOSCAN;
4283 
4284         /*
4285          * Only run node reclaim on the local node or on nodes that do not
4286          * have associated processors. This will favor the local processor
4287          * over remote processors and spread off node memory allocations
4288          * as wide as possible.
4289          */
4290         if (node_state(pgdat->node_id, N_CPU) && pgdat->node_id != numa_node_id())
4291                 return NODE_RECLAIM_NOSCAN;
4292 
4293         if (test_and_set_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags))
4294                 return NODE_RECLAIM_NOSCAN;
4295 
4296         ret = __node_reclaim(pgdat, gfp_mask, order);
4297         clear_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags);
4298 
4299         if (!ret)
4300                 count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
4301 
4302         return ret;
4303 }
4304 #endif
4305 
4306 /*
4307  * page_evictable - test whether a page is evictable
4308  * @page: the page to test
4309  *
4310  * Test whether page is evictable--i.e., should be placed on active/inactive
4311  * lists vs unevictable list.
4312  *
4313  * Reasons page might not be evictable:
4314  * (1) page's mapping marked unevictable
4315  * (2) page is part of an mlocked VMA
4316  *
4317  */
4318 int page_evictable(struct page *page)
4319 {
4320         int ret;
4321 
4322         /* Prevent address_space of inode and swap cache from being freed */
4323         rcu_read_lock();
4324         ret = !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
4325         rcu_read_unlock();
4326         return ret;
4327 }
4328 
4329 /**
4330  * check_move_unevictable_pages - check pages for evictability and move to
4331  * appropriate zone lru list
4332  * @pvec: pagevec with lru pages to check
4333  *
4334  * Checks pages for evictability, if an evictable page is in the unevictable
4335  * lru list, moves it to the appropriate evictable lru list. This function
4336  * should be only used for lru pages.
4337  */
4338 void check_move_unevictable_pages(struct pagevec *pvec)
4339 {
4340         struct lruvec *lruvec;
4341         struct pglist_data *pgdat = NULL;
4342         int pgscanned = 0;
4343         int pgrescued = 0;
4344         int i;
4345 
4346         for (i = 0; i < pvec->nr; i++) {
4347                 struct page *page = pvec->pages[i];
4348                 struct pglist_data *pagepgdat = page_pgdat(page);
4349 
4350                 pgscanned++;
4351                 if (pagepgdat != pgdat) {
4352                         if (pgdat)
4353                                 spin_unlock_irq(&pgdat->lru_lock);
4354                         pgdat = pagepgdat;
4355                         spin_lock_irq(&pgdat->lru_lock);
4356                 }
4357                 lruvec = mem_cgroup_page_lruvec(page, pgdat);
4358 
4359                 if (!PageLRU(page) || !PageUnevictable(page))
4360                         continue;
4361 
4362                 if (page_evictable(page)) {
4363                         enum lru_list lru = page_lru_base_type(page);
4364 
4365                         VM_BUG_ON_PAGE(PageActive(page), page);
4366                         ClearPageUnevictable(page);
4367                         del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE);
4368                         add_page_to_lru_list(page, lruvec, lru);
4369                         pgrescued++;
4370                 }
4371         }
4372 
4373         if (pgdat) {
4374                 __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
4375                 __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
4376                 spin_unlock_irq(&pgdat->lru_lock);
4377         }
4378 }
4379 EXPORT_SYMBOL_GPL(check_move_unevictable_pages);

/* [<][>][^][v][top][bottom][index][help] */