root/mm/page_alloc.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. early_init_on_alloc
  2. early_init_on_free
  3. get_pcppage_migratetype
  4. set_pcppage_migratetype
  5. pm_restore_gfp_mask
  6. pm_restrict_gfp_mask
  7. pm_suspended_storage
  8. kasan_free_nondeferred_pages
  9. early_page_uninitialised
  10. defer_init
  11. early_page_uninitialised
  12. defer_init
  13. get_pageblock_bitmap
  14. pfn_to_bitidx
  15. __get_pfnblock_flags_mask
  16. get_pfnblock_flags_mask
  17. get_pfnblock_migratetype
  18. set_pfnblock_flags_mask
  19. set_pageblock_migratetype
  20. page_outside_zone_boundaries
  21. page_is_consistent
  22. bad_range
  23. bad_range
  24. bad_page
  25. free_compound_page
  26. prep_compound_page
  27. early_debug_pagealloc
  28. init_debug_pagealloc
  29. debug_guardpage_minorder_setup
  30. set_page_guard
  31. clear_page_guard
  32. set_page_guard
  33. clear_page_guard
  34. set_page_order
  35. page_is_buddy
  36. task_capc
  37. compaction_capture
  38. task_capc
  39. compaction_capture
  40. __free_one_page
  41. page_expected_state
  42. free_pages_check_bad
  43. free_pages_check
  44. free_tail_pages_check
  45. kernel_init_free_pages
  46. free_pages_prepare
  47. free_pcp_prepare
  48. bulkfree_pcp_prepare
  49. free_pcp_prepare
  50. bulkfree_pcp_prepare
  51. prefetch_buddy
  52. free_pcppages_bulk
  53. free_one_page
  54. __init_single_page
  55. init_reserved_page
  56. init_reserved_page
  57. reserve_bootmem_region
  58. __free_pages_ok
  59. __free_pages_core
  60. early_pfn_to_nid
  61. early_pfn_in_nid
  62. early_pfn_in_nid
  63. memblock_free_pages
  64. __pageblock_pfn_to_page
  65. set_zone_contiguous
  66. clear_zone_contiguous
  67. deferred_free_range
  68. pgdat_init_report_one_done
  69. deferred_pfn_valid
  70. deferred_free_pages
  71. deferred_init_pages
  72. deferred_init_mem_pfn_range_in_zone
  73. deferred_init_maxorder
  74. deferred_init_memmap
  75. deferred_grow_zone
  76. _deferred_grow_zone
  77. page_alloc_init_late
  78. init_cma_reserved_pageblock
  79. expand
  80. check_new_page_bad
  81. check_new_page
  82. free_pages_prezeroed
  83. check_pcp_refill
  84. check_new_pcp
  85. check_pcp_refill
  86. check_new_pcp
  87. check_new_pages
  88. post_alloc_hook
  89. prep_new_page
  90. __rmqueue_smallest
  91. __rmqueue_cma_fallback
  92. __rmqueue_cma_fallback
  93. move_freepages
  94. move_freepages_block
  95. change_pageblock_range
  96. can_steal_fallback
  97. boost_watermark
  98. steal_suitable_fallback
  99. find_suitable_fallback
  100. reserve_highatomic_pageblock
  101. unreserve_highatomic_pageblock
  102. __rmqueue_fallback
  103. __rmqueue
  104. rmqueue_bulk
  105. drain_zone_pages
  106. drain_pages_zone
  107. drain_pages
  108. drain_local_pages
  109. drain_local_pages_wq
  110. drain_all_pages
  111. mark_free_pages
  112. free_unref_page_prepare
  113. free_unref_page_commit
  114. free_unref_page
  115. free_unref_page_list
  116. split_page
  117. __isolate_free_page
  118. zone_statistics
  119. __rmqueue_pcplist
  120. rmqueue_pcplist
  121. rmqueue
  122. setup_fail_page_alloc
  123. __should_fail_alloc_page
  124. fail_page_alloc_debugfs
  125. __should_fail_alloc_page
  126. should_fail_alloc_page
  127. __zone_watermark_ok
  128. zone_watermark_ok
  129. zone_watermark_fast
  130. zone_watermark_ok_safe
  131. zone_allows_reclaim
  132. zone_allows_reclaim
  133. alloc_flags_nofragment
  134. get_page_from_freelist
  135. warn_alloc_show_mem
  136. warn_alloc
  137. __alloc_pages_cpuset_fallback
  138. __alloc_pages_may_oom
  139. __alloc_pages_direct_compact
  140. should_compact_retry
  141. __alloc_pages_direct_compact
  142. should_compact_retry
  143. __need_fs_reclaim
  144. __fs_reclaim_acquire
  145. __fs_reclaim_release
  146. fs_reclaim_acquire
  147. fs_reclaim_release
  148. __perform_reclaim
  149. __alloc_pages_direct_reclaim
  150. wake_all_kswapds
  151. gfp_to_alloc_flags
  152. oom_reserves_allowed
  153. __gfp_pfmemalloc_flags
  154. gfp_pfmemalloc_allowed
  155. should_reclaim_retry
  156. check_retry_cpuset
  157. __alloc_pages_slowpath
  158. prepare_alloc_pages
  159. finalise_ac
  160. __alloc_pages_nodemask
  161. __get_free_pages
  162. get_zeroed_page
  163. free_the_page
  164. __free_pages
  165. free_pages
  166. __page_frag_cache_refill
  167. __page_frag_cache_drain
  168. page_frag_alloc
  169. page_frag_free
  170. make_alloc_exact
  171. alloc_pages_exact
  172. alloc_pages_exact_nid
  173. free_pages_exact
  174. nr_free_zone_pages
  175. nr_free_buffer_pages
  176. nr_free_pagecache_pages
  177. show_node
  178. si_mem_available
  179. si_meminfo
  180. si_meminfo_node
  181. show_mem_node_skip
  182. show_migration_types
  183. show_free_areas
  184. zoneref_set_zone
  185. build_zonerefs_node
  186. __parse_numa_zonelist_order
  187. setup_numa_zonelist_order
  188. numa_zonelist_order_handler
  189. find_next_best_node
  190. build_zonelists_in_node_order
  191. build_thisnode_zonelists
  192. build_zonelists
  193. local_memory_node
  194. build_zonelists
  195. __build_all_zonelists
  196. build_all_zonelists_init
  197. build_all_zonelists
  198. overlap_memmap_init
  199. memmap_init_zone
  200. memmap_init_zone_device
  201. zone_init_free_lists
  202. memmap_init
  203. zone_batchsize
  204. pageset_update
  205. pageset_set_batch
  206. pageset_init
  207. setup_pageset
  208. pageset_set_high
  209. pageset_set_high_and_batch
  210. zone_pageset_init
  211. setup_zone_pageset
  212. setup_per_cpu_pageset
  213. zone_pcp_init
  214. init_currently_empty_zone
  215. __early_pfn_to_nid
  216. free_bootmem_with_active_regions
  217. sparse_memory_present_with_active_regions
  218. get_pfn_range_for_nid
  219. find_usable_zone_for_movable
  220. adjust_zone_range_for_zone_movable
  221. zone_spanned_pages_in_node
  222. __absent_pages_in_range
  223. absent_pages_in_range
  224. zone_absent_pages_in_node
  225. zone_spanned_pages_in_node
  226. zone_absent_pages_in_node
  227. calculate_node_totalpages
  228. usemap_size
  229. setup_usemap
  230. setup_usemap
  231. set_pageblock_order
  232. set_pageblock_order
  233. calc_memmap_size
  234. pgdat_init_split_queue
  235. pgdat_init_split_queue
  236. pgdat_init_kcompactd
  237. pgdat_init_kcompactd
  238. pgdat_init_internals
  239. zone_init_internals
  240. free_area_init_core_hotplug
  241. free_area_init_core
  242. alloc_node_mem_map
  243. alloc_node_mem_map
  244. pgdat_set_deferred_range
  245. pgdat_set_deferred_range
  246. free_area_init_node
  247. zero_pfn_range
  248. zero_resv_unavail
  249. setup_nr_node_ids
  250. node_map_pfn_alignment
  251. find_min_pfn_for_node
  252. find_min_pfn_with_active_regions
  253. early_calculate_totalpages
  254. find_zone_movable_pfns_for_nodes
  255. check_for_memory
  256. free_area_init_nodes
  257. cmdline_parse_core
  258. cmdline_parse_kernelcore
  259. cmdline_parse_movablecore
  260. adjust_managed_page_count
  261. free_reserved_area
  262. free_highmem_page
  263. mem_init_print_info
  264. set_dma_reserve
  265. free_area_init
  266. page_alloc_cpu_dead
  267. set_hashdist
  268. page_alloc_init
  269. calculate_totalreserve_pages
  270. setup_per_zone_lowmem_reserve
  271. __setup_per_zone_wmarks
  272. setup_per_zone_wmarks
  273. init_per_zone_wmark_min
  274. core_initcall
  275. watermark_boost_factor_sysctl_handler
  276. watermark_scale_factor_sysctl_handler
  277. setup_min_unmapped_ratio
  278. sysctl_min_unmapped_ratio_sysctl_handler
  279. setup_min_slab_ratio
  280. sysctl_min_slab_ratio_sysctl_handler
  281. lowmem_reserve_ratio_sysctl_handler
  282. percpu_pagelist_fraction_sysctl_handler
  283. arch_reserved_kernel_pages
  284. alloc_large_system_hash
  285. has_unmovable_pages
  286. pfn_max_align_down
  287. pfn_max_align_up
  288. __alloc_contig_migrate_range
  289. alloc_contig_range
  290. free_contig_range
  291. zone_pcp_update
  292. zone_pcp_reset
  293. __offline_isolated_pages
  294. is_free_buddy_page
  295. set_hwpoison_free_buddy_page

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  *  linux/mm/page_alloc.c
   4  *
   5  *  Manages the free list, the system allocates free pages here.
   6  *  Note that kmalloc() lives in slab.c
   7  *
   8  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   9  *  Swap reorganised 29.12.95, Stephen Tweedie
  10  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
  11  *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
  12  *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
  13  *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
  14  *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
  15  *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
  16  */
  17 
  18 #include <linux/stddef.h>
  19 #include <linux/mm.h>
  20 #include <linux/highmem.h>
  21 #include <linux/swap.h>
  22 #include <linux/interrupt.h>
  23 #include <linux/pagemap.h>
  24 #include <linux/jiffies.h>
  25 #include <linux/memblock.h>
  26 #include <linux/compiler.h>
  27 #include <linux/kernel.h>
  28 #include <linux/kasan.h>
  29 #include <linux/module.h>
  30 #include <linux/suspend.h>
  31 #include <linux/pagevec.h>
  32 #include <linux/blkdev.h>
  33 #include <linux/slab.h>
  34 #include <linux/ratelimit.h>
  35 #include <linux/oom.h>
  36 #include <linux/topology.h>
  37 #include <linux/sysctl.h>
  38 #include <linux/cpu.h>
  39 #include <linux/cpuset.h>
  40 #include <linux/memory_hotplug.h>
  41 #include <linux/nodemask.h>
  42 #include <linux/vmalloc.h>
  43 #include <linux/vmstat.h>
  44 #include <linux/mempolicy.h>
  45 #include <linux/memremap.h>
  46 #include <linux/stop_machine.h>
  47 #include <linux/random.h>
  48 #include <linux/sort.h>
  49 #include <linux/pfn.h>
  50 #include <linux/backing-dev.h>
  51 #include <linux/fault-inject.h>
  52 #include <linux/page-isolation.h>
  53 #include <linux/debugobjects.h>
  54 #include <linux/kmemleak.h>
  55 #include <linux/compaction.h>
  56 #include <trace/events/kmem.h>
  57 #include <trace/events/oom.h>
  58 #include <linux/prefetch.h>
  59 #include <linux/mm_inline.h>
  60 #include <linux/migrate.h>
  61 #include <linux/hugetlb.h>
  62 #include <linux/sched/rt.h>
  63 #include <linux/sched/mm.h>
  64 #include <linux/page_owner.h>
  65 #include <linux/kthread.h>
  66 #include <linux/memcontrol.h>
  67 #include <linux/ftrace.h>
  68 #include <linux/lockdep.h>
  69 #include <linux/nmi.h>
  70 #include <linux/psi.h>
  71 
  72 #include <asm/sections.h>
  73 #include <asm/tlbflush.h>
  74 #include <asm/div64.h>
  75 #include "internal.h"
  76 #include "shuffle.h"
  77 
  78 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
  79 static DEFINE_MUTEX(pcp_batch_high_lock);
  80 #define MIN_PERCPU_PAGELIST_FRACTION    (8)
  81 
  82 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
  83 DEFINE_PER_CPU(int, numa_node);
  84 EXPORT_PER_CPU_SYMBOL(numa_node);
  85 #endif
  86 
  87 DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key);
  88 
  89 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
  90 /*
  91  * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
  92  * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
  93  * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
  94  * defined in <linux/topology.h>.
  95  */
  96 DEFINE_PER_CPU(int, _numa_mem_);                /* Kernel "local memory" node */
  97 EXPORT_PER_CPU_SYMBOL(_numa_mem_);
  98 int _node_numa_mem_[MAX_NUMNODES];
  99 #endif
 100 
 101 /* work_structs for global per-cpu drains */
 102 struct pcpu_drain {
 103         struct zone *zone;
 104         struct work_struct work;
 105 };
 106 DEFINE_MUTEX(pcpu_drain_mutex);
 107 DEFINE_PER_CPU(struct pcpu_drain, pcpu_drain);
 108 
 109 #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
 110 volatile unsigned long latent_entropy __latent_entropy;
 111 EXPORT_SYMBOL(latent_entropy);
 112 #endif
 113 
 114 /*
 115  * Array of node states.
 116  */
 117 nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
 118         [N_POSSIBLE] = NODE_MASK_ALL,
 119         [N_ONLINE] = { { [0] = 1UL } },
 120 #ifndef CONFIG_NUMA
 121         [N_NORMAL_MEMORY] = { { [0] = 1UL } },
 122 #ifdef CONFIG_HIGHMEM
 123         [N_HIGH_MEMORY] = { { [0] = 1UL } },
 124 #endif
 125         [N_MEMORY] = { { [0] = 1UL } },
 126         [N_CPU] = { { [0] = 1UL } },
 127 #endif  /* NUMA */
 128 };
 129 EXPORT_SYMBOL(node_states);
 130 
 131 atomic_long_t _totalram_pages __read_mostly;
 132 EXPORT_SYMBOL(_totalram_pages);
 133 unsigned long totalreserve_pages __read_mostly;
 134 unsigned long totalcma_pages __read_mostly;
 135 
 136 int percpu_pagelist_fraction;
 137 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
 138 #ifdef CONFIG_INIT_ON_ALLOC_DEFAULT_ON
 139 DEFINE_STATIC_KEY_TRUE(init_on_alloc);
 140 #else
 141 DEFINE_STATIC_KEY_FALSE(init_on_alloc);
 142 #endif
 143 EXPORT_SYMBOL(init_on_alloc);
 144 
 145 #ifdef CONFIG_INIT_ON_FREE_DEFAULT_ON
 146 DEFINE_STATIC_KEY_TRUE(init_on_free);
 147 #else
 148 DEFINE_STATIC_KEY_FALSE(init_on_free);
 149 #endif
 150 EXPORT_SYMBOL(init_on_free);
 151 
 152 static int __init early_init_on_alloc(char *buf)
 153 {
 154         int ret;
 155         bool bool_result;
 156 
 157         if (!buf)
 158                 return -EINVAL;
 159         ret = kstrtobool(buf, &bool_result);
 160         if (bool_result && page_poisoning_enabled())
 161                 pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, will take precedence over init_on_alloc\n");
 162         if (bool_result)
 163                 static_branch_enable(&init_on_alloc);
 164         else
 165                 static_branch_disable(&init_on_alloc);
 166         return ret;
 167 }
 168 early_param("init_on_alloc", early_init_on_alloc);
 169 
 170 static int __init early_init_on_free(char *buf)
 171 {
 172         int ret;
 173         bool bool_result;
 174 
 175         if (!buf)
 176                 return -EINVAL;
 177         ret = kstrtobool(buf, &bool_result);
 178         if (bool_result && page_poisoning_enabled())
 179                 pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, will take precedence over init_on_free\n");
 180         if (bool_result)
 181                 static_branch_enable(&init_on_free);
 182         else
 183                 static_branch_disable(&init_on_free);
 184         return ret;
 185 }
 186 early_param("init_on_free", early_init_on_free);
 187 
 188 /*
 189  * A cached value of the page's pageblock's migratetype, used when the page is
 190  * put on a pcplist. Used to avoid the pageblock migratetype lookup when
 191  * freeing from pcplists in most cases, at the cost of possibly becoming stale.
 192  * Also the migratetype set in the page does not necessarily match the pcplist
 193  * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any
 194  * other index - this ensures that it will be put on the correct CMA freelist.
 195  */
 196 static inline int get_pcppage_migratetype(struct page *page)
 197 {
 198         return page->index;
 199 }
 200 
 201 static inline void set_pcppage_migratetype(struct page *page, int migratetype)
 202 {
 203         page->index = migratetype;
 204 }
 205 
 206 #ifdef CONFIG_PM_SLEEP
 207 /*
 208  * The following functions are used by the suspend/hibernate code to temporarily
 209  * change gfp_allowed_mask in order to avoid using I/O during memory allocations
 210  * while devices are suspended.  To avoid races with the suspend/hibernate code,
 211  * they should always be called with system_transition_mutex held
 212  * (gfp_allowed_mask also should only be modified with system_transition_mutex
 213  * held, unless the suspend/hibernate code is guaranteed not to run in parallel
 214  * with that modification).
 215  */
 216 
 217 static gfp_t saved_gfp_mask;
 218 
 219 void pm_restore_gfp_mask(void)
 220 {
 221         WARN_ON(!mutex_is_locked(&system_transition_mutex));
 222         if (saved_gfp_mask) {
 223                 gfp_allowed_mask = saved_gfp_mask;
 224                 saved_gfp_mask = 0;
 225         }
 226 }
 227 
 228 void pm_restrict_gfp_mask(void)
 229 {
 230         WARN_ON(!mutex_is_locked(&system_transition_mutex));
 231         WARN_ON(saved_gfp_mask);
 232         saved_gfp_mask = gfp_allowed_mask;
 233         gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS);
 234 }
 235 
 236 bool pm_suspended_storage(void)
 237 {
 238         if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
 239                 return false;
 240         return true;
 241 }
 242 #endif /* CONFIG_PM_SLEEP */
 243 
 244 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
 245 unsigned int pageblock_order __read_mostly;
 246 #endif
 247 
 248 static void __free_pages_ok(struct page *page, unsigned int order);
 249 
 250 /*
 251  * results with 256, 32 in the lowmem_reserve sysctl:
 252  *      1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
 253  *      1G machine -> (16M dma, 784M normal, 224M high)
 254  *      NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
 255  *      HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
 256  *      HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA
 257  *
 258  * TBD: should special case ZONE_DMA32 machines here - in those we normally
 259  * don't need any ZONE_NORMAL reservation
 260  */
 261 int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = {
 262 #ifdef CONFIG_ZONE_DMA
 263         [ZONE_DMA] = 256,
 264 #endif
 265 #ifdef CONFIG_ZONE_DMA32
 266         [ZONE_DMA32] = 256,
 267 #endif
 268         [ZONE_NORMAL] = 32,
 269 #ifdef CONFIG_HIGHMEM
 270         [ZONE_HIGHMEM] = 0,
 271 #endif
 272         [ZONE_MOVABLE] = 0,
 273 };
 274 
 275 static char * const zone_names[MAX_NR_ZONES] = {
 276 #ifdef CONFIG_ZONE_DMA
 277          "DMA",
 278 #endif
 279 #ifdef CONFIG_ZONE_DMA32
 280          "DMA32",
 281 #endif
 282          "Normal",
 283 #ifdef CONFIG_HIGHMEM
 284          "HighMem",
 285 #endif
 286          "Movable",
 287 #ifdef CONFIG_ZONE_DEVICE
 288          "Device",
 289 #endif
 290 };
 291 
 292 const char * const migratetype_names[MIGRATE_TYPES] = {
 293         "Unmovable",
 294         "Movable",
 295         "Reclaimable",
 296         "HighAtomic",
 297 #ifdef CONFIG_CMA
 298         "CMA",
 299 #endif
 300 #ifdef CONFIG_MEMORY_ISOLATION
 301         "Isolate",
 302 #endif
 303 };
 304 
 305 compound_page_dtor * const compound_page_dtors[] = {
 306         NULL,
 307         free_compound_page,
 308 #ifdef CONFIG_HUGETLB_PAGE
 309         free_huge_page,
 310 #endif
 311 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 312         free_transhuge_page,
 313 #endif
 314 };
 315 
 316 int min_free_kbytes = 1024;
 317 int user_min_free_kbytes = -1;
 318 #ifdef CONFIG_DISCONTIGMEM
 319 /*
 320  * DiscontigMem defines memory ranges as separate pg_data_t even if the ranges
 321  * are not on separate NUMA nodes. Functionally this works but with
 322  * watermark_boost_factor, it can reclaim prematurely as the ranges can be
 323  * quite small. By default, do not boost watermarks on discontigmem as in
 324  * many cases very high-order allocations like THP are likely to be
 325  * unsupported and the premature reclaim offsets the advantage of long-term
 326  * fragmentation avoidance.
 327  */
 328 int watermark_boost_factor __read_mostly;
 329 #else
 330 int watermark_boost_factor __read_mostly = 15000;
 331 #endif
 332 int watermark_scale_factor = 10;
 333 
 334 static unsigned long nr_kernel_pages __initdata;
 335 static unsigned long nr_all_pages __initdata;
 336 static unsigned long dma_reserve __initdata;
 337 
 338 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
 339 static unsigned long arch_zone_lowest_possible_pfn[MAX_NR_ZONES] __initdata;
 340 static unsigned long arch_zone_highest_possible_pfn[MAX_NR_ZONES] __initdata;
 341 static unsigned long required_kernelcore __initdata;
 342 static unsigned long required_kernelcore_percent __initdata;
 343 static unsigned long required_movablecore __initdata;
 344 static unsigned long required_movablecore_percent __initdata;
 345 static unsigned long zone_movable_pfn[MAX_NUMNODES] __initdata;
 346 static bool mirrored_kernelcore __meminitdata;
 347 
 348 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
 349 int movable_zone;
 350 EXPORT_SYMBOL(movable_zone);
 351 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
 352 
 353 #if MAX_NUMNODES > 1
 354 unsigned int nr_node_ids __read_mostly = MAX_NUMNODES;
 355 unsigned int nr_online_nodes __read_mostly = 1;
 356 EXPORT_SYMBOL(nr_node_ids);
 357 EXPORT_SYMBOL(nr_online_nodes);
 358 #endif
 359 
 360 int page_group_by_mobility_disabled __read_mostly;
 361 
 362 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
 363 /*
 364  * During boot we initialize deferred pages on-demand, as needed, but once
 365  * page_alloc_init_late() has finished, the deferred pages are all initialized,
 366  * and we can permanently disable that path.
 367  */
 368 static DEFINE_STATIC_KEY_TRUE(deferred_pages);
 369 
 370 /*
 371  * Calling kasan_free_pages() only after deferred memory initialization
 372  * has completed. Poisoning pages during deferred memory init will greatly
 373  * lengthen the process and cause problem in large memory systems as the
 374  * deferred pages initialization is done with interrupt disabled.
 375  *
 376  * Assuming that there will be no reference to those newly initialized
 377  * pages before they are ever allocated, this should have no effect on
 378  * KASAN memory tracking as the poison will be properly inserted at page
 379  * allocation time. The only corner case is when pages are allocated by
 380  * on-demand allocation and then freed again before the deferred pages
 381  * initialization is done, but this is not likely to happen.
 382  */
 383 static inline void kasan_free_nondeferred_pages(struct page *page, int order)
 384 {
 385         if (!static_branch_unlikely(&deferred_pages))
 386                 kasan_free_pages(page, order);
 387 }
 388 
 389 /* Returns true if the struct page for the pfn is uninitialised */
 390 static inline bool __meminit early_page_uninitialised(unsigned long pfn)
 391 {
 392         int nid = early_pfn_to_nid(pfn);
 393 
 394         if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
 395                 return true;
 396 
 397         return false;
 398 }
 399 
 400 /*
 401  * Returns true when the remaining initialisation should be deferred until
 402  * later in the boot cycle when it can be parallelised.
 403  */
 404 static bool __meminit
 405 defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
 406 {
 407         static unsigned long prev_end_pfn, nr_initialised;
 408 
 409         /*
 410          * prev_end_pfn static that contains the end of previous zone
 411          * No need to protect because called very early in boot before smp_init.
 412          */
 413         if (prev_end_pfn != end_pfn) {
 414                 prev_end_pfn = end_pfn;
 415                 nr_initialised = 0;
 416         }
 417 
 418         /* Always populate low zones for address-constrained allocations */
 419         if (end_pfn < pgdat_end_pfn(NODE_DATA(nid)))
 420                 return false;
 421 
 422         /*
 423          * We start only with one section of pages, more pages are added as
 424          * needed until the rest of deferred pages are initialized.
 425          */
 426         nr_initialised++;
 427         if ((nr_initialised > PAGES_PER_SECTION) &&
 428             (pfn & (PAGES_PER_SECTION - 1)) == 0) {
 429                 NODE_DATA(nid)->first_deferred_pfn = pfn;
 430                 return true;
 431         }
 432         return false;
 433 }
 434 #else
 435 #define kasan_free_nondeferred_pages(p, o)      kasan_free_pages(p, o)
 436 
 437 static inline bool early_page_uninitialised(unsigned long pfn)
 438 {
 439         return false;
 440 }
 441 
 442 static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
 443 {
 444         return false;
 445 }
 446 #endif
 447 
 448 /* Return a pointer to the bitmap storing bits affecting a block of pages */
 449 static inline unsigned long *get_pageblock_bitmap(struct page *page,
 450                                                         unsigned long pfn)
 451 {
 452 #ifdef CONFIG_SPARSEMEM
 453         return section_to_usemap(__pfn_to_section(pfn));
 454 #else
 455         return page_zone(page)->pageblock_flags;
 456 #endif /* CONFIG_SPARSEMEM */
 457 }
 458 
 459 static inline int pfn_to_bitidx(struct page *page, unsigned long pfn)
 460 {
 461 #ifdef CONFIG_SPARSEMEM
 462         pfn &= (PAGES_PER_SECTION-1);
 463         return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
 464 #else
 465         pfn = pfn - round_down(page_zone(page)->zone_start_pfn, pageblock_nr_pages);
 466         return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
 467 #endif /* CONFIG_SPARSEMEM */
 468 }
 469 
 470 /**
 471  * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
 472  * @page: The page within the block of interest
 473  * @pfn: The target page frame number
 474  * @end_bitidx: The last bit of interest to retrieve
 475  * @mask: mask of bits that the caller is interested in
 476  *
 477  * Return: pageblock_bits flags
 478  */
 479 static __always_inline unsigned long __get_pfnblock_flags_mask(struct page *page,
 480                                         unsigned long pfn,
 481                                         unsigned long end_bitidx,
 482                                         unsigned long mask)
 483 {
 484         unsigned long *bitmap;
 485         unsigned long bitidx, word_bitidx;
 486         unsigned long word;
 487 
 488         bitmap = get_pageblock_bitmap(page, pfn);
 489         bitidx = pfn_to_bitidx(page, pfn);
 490         word_bitidx = bitidx / BITS_PER_LONG;
 491         bitidx &= (BITS_PER_LONG-1);
 492 
 493         word = bitmap[word_bitidx];
 494         bitidx += end_bitidx;
 495         return (word >> (BITS_PER_LONG - bitidx - 1)) & mask;
 496 }
 497 
 498 unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
 499                                         unsigned long end_bitidx,
 500                                         unsigned long mask)
 501 {
 502         return __get_pfnblock_flags_mask(page, pfn, end_bitidx, mask);
 503 }
 504 
 505 static __always_inline int get_pfnblock_migratetype(struct page *page, unsigned long pfn)
 506 {
 507         return __get_pfnblock_flags_mask(page, pfn, PB_migrate_end, MIGRATETYPE_MASK);
 508 }
 509 
 510 /**
 511  * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
 512  * @page: The page within the block of interest
 513  * @flags: The flags to set
 514  * @pfn: The target page frame number
 515  * @end_bitidx: The last bit of interest
 516  * @mask: mask of bits that the caller is interested in
 517  */
 518 void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
 519                                         unsigned long pfn,
 520                                         unsigned long end_bitidx,
 521                                         unsigned long mask)
 522 {
 523         unsigned long *bitmap;
 524         unsigned long bitidx, word_bitidx;
 525         unsigned long old_word, word;
 526 
 527         BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
 528         BUILD_BUG_ON(MIGRATE_TYPES > (1 << PB_migratetype_bits));
 529 
 530         bitmap = get_pageblock_bitmap(page, pfn);
 531         bitidx = pfn_to_bitidx(page, pfn);
 532         word_bitidx = bitidx / BITS_PER_LONG;
 533         bitidx &= (BITS_PER_LONG-1);
 534 
 535         VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page);
 536 
 537         bitidx += end_bitidx;
 538         mask <<= (BITS_PER_LONG - bitidx - 1);
 539         flags <<= (BITS_PER_LONG - bitidx - 1);
 540 
 541         word = READ_ONCE(bitmap[word_bitidx]);
 542         for (;;) {
 543                 old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags);
 544                 if (word == old_word)
 545                         break;
 546                 word = old_word;
 547         }
 548 }
 549 
 550 void set_pageblock_migratetype(struct page *page, int migratetype)
 551 {
 552         if (unlikely(page_group_by_mobility_disabled &&
 553                      migratetype < MIGRATE_PCPTYPES))
 554                 migratetype = MIGRATE_UNMOVABLE;
 555 
 556         set_pageblock_flags_group(page, (unsigned long)migratetype,
 557                                         PB_migrate, PB_migrate_end);
 558 }
 559 
 560 #ifdef CONFIG_DEBUG_VM
 561 static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
 562 {
 563         int ret = 0;
 564         unsigned seq;
 565         unsigned long pfn = page_to_pfn(page);
 566         unsigned long sp, start_pfn;
 567 
 568         do {
 569                 seq = zone_span_seqbegin(zone);
 570                 start_pfn = zone->zone_start_pfn;
 571                 sp = zone->spanned_pages;
 572                 if (!zone_spans_pfn(zone, pfn))
 573                         ret = 1;
 574         } while (zone_span_seqretry(zone, seq));
 575 
 576         if (ret)
 577                 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
 578                         pfn, zone_to_nid(zone), zone->name,
 579                         start_pfn, start_pfn + sp);
 580 
 581         return ret;
 582 }
 583 
 584 static int page_is_consistent(struct zone *zone, struct page *page)
 585 {
 586         if (!pfn_valid_within(page_to_pfn(page)))
 587                 return 0;
 588         if (zone != page_zone(page))
 589                 return 0;
 590 
 591         return 1;
 592 }
 593 /*
 594  * Temporary debugging check for pages not lying within a given zone.
 595  */
 596 static int __maybe_unused bad_range(struct zone *zone, struct page *page)
 597 {
 598         if (page_outside_zone_boundaries(zone, page))
 599                 return 1;
 600         if (!page_is_consistent(zone, page))
 601                 return 1;
 602 
 603         return 0;
 604 }
 605 #else
 606 static inline int __maybe_unused bad_range(struct zone *zone, struct page *page)
 607 {
 608         return 0;
 609 }
 610 #endif
 611 
 612 static void bad_page(struct page *page, const char *reason,
 613                 unsigned long bad_flags)
 614 {
 615         static unsigned long resume;
 616         static unsigned long nr_shown;
 617         static unsigned long nr_unshown;
 618 
 619         /*
 620          * Allow a burst of 60 reports, then keep quiet for that minute;
 621          * or allow a steady drip of one report per second.
 622          */
 623         if (nr_shown == 60) {
 624                 if (time_before(jiffies, resume)) {
 625                         nr_unshown++;
 626                         goto out;
 627                 }
 628                 if (nr_unshown) {
 629                         pr_alert(
 630                               "BUG: Bad page state: %lu messages suppressed\n",
 631                                 nr_unshown);
 632                         nr_unshown = 0;
 633                 }
 634                 nr_shown = 0;
 635         }
 636         if (nr_shown++ == 0)
 637                 resume = jiffies + 60 * HZ;
 638 
 639         pr_alert("BUG: Bad page state in process %s  pfn:%05lx\n",
 640                 current->comm, page_to_pfn(page));
 641         __dump_page(page, reason);
 642         bad_flags &= page->flags;
 643         if (bad_flags)
 644                 pr_alert("bad because of flags: %#lx(%pGp)\n",
 645                                                 bad_flags, &bad_flags);
 646         dump_page_owner(page);
 647 
 648         print_modules();
 649         dump_stack();
 650 out:
 651         /* Leave bad fields for debug, except PageBuddy could make trouble */
 652         page_mapcount_reset(page); /* remove PageBuddy */
 653         add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
 654 }
 655 
 656 /*
 657  * Higher-order pages are called "compound pages".  They are structured thusly:
 658  *
 659  * The first PAGE_SIZE page is called the "head page" and have PG_head set.
 660  *
 661  * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded
 662  * in bit 0 of page->compound_head. The rest of bits is pointer to head page.
 663  *
 664  * The first tail page's ->compound_dtor holds the offset in array of compound
 665  * page destructors. See compound_page_dtors.
 666  *
 667  * The first tail page's ->compound_order holds the order of allocation.
 668  * This usage means that zero-order pages may not be compound.
 669  */
 670 
 671 void free_compound_page(struct page *page)
 672 {
 673         mem_cgroup_uncharge(page);
 674         __free_pages_ok(page, compound_order(page));
 675 }
 676 
 677 void prep_compound_page(struct page *page, unsigned int order)
 678 {
 679         int i;
 680         int nr_pages = 1 << order;
 681 
 682         set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
 683         set_compound_order(page, order);
 684         __SetPageHead(page);
 685         for (i = 1; i < nr_pages; i++) {
 686                 struct page *p = page + i;
 687                 set_page_count(p, 0);
 688                 p->mapping = TAIL_MAPPING;
 689                 set_compound_head(p, page);
 690         }
 691         atomic_set(compound_mapcount_ptr(page), -1);
 692 }
 693 
 694 #ifdef CONFIG_DEBUG_PAGEALLOC
 695 unsigned int _debug_guardpage_minorder;
 696 
 697 bool _debug_pagealloc_enabled_early __read_mostly
 698                         = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT);
 699 EXPORT_SYMBOL(_debug_pagealloc_enabled_early);
 700 DEFINE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
 701 EXPORT_SYMBOL(_debug_pagealloc_enabled);
 702 
 703 DEFINE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
 704 
 705 static int __init early_debug_pagealloc(char *buf)
 706 {
 707         return kstrtobool(buf, &_debug_pagealloc_enabled_early);
 708 }
 709 early_param("debug_pagealloc", early_debug_pagealloc);
 710 
 711 void init_debug_pagealloc(void)
 712 {
 713         if (!debug_pagealloc_enabled())
 714                 return;
 715 
 716         static_branch_enable(&_debug_pagealloc_enabled);
 717 
 718         if (!debug_guardpage_minorder())
 719                 return;
 720 
 721         static_branch_enable(&_debug_guardpage_enabled);
 722 }
 723 
 724 static int __init debug_guardpage_minorder_setup(char *buf)
 725 {
 726         unsigned long res;
 727 
 728         if (kstrtoul(buf, 10, &res) < 0 ||  res > MAX_ORDER / 2) {
 729                 pr_err("Bad debug_guardpage_minorder value\n");
 730                 return 0;
 731         }
 732         _debug_guardpage_minorder = res;
 733         pr_info("Setting debug_guardpage_minorder to %lu\n", res);
 734         return 0;
 735 }
 736 early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup);
 737 
 738 static inline bool set_page_guard(struct zone *zone, struct page *page,
 739                                 unsigned int order, int migratetype)
 740 {
 741         if (!debug_guardpage_enabled())
 742                 return false;
 743 
 744         if (order >= debug_guardpage_minorder())
 745                 return false;
 746 
 747         __SetPageGuard(page);
 748         INIT_LIST_HEAD(&page->lru);
 749         set_page_private(page, order);
 750         /* Guard pages are not available for any usage */
 751         __mod_zone_freepage_state(zone, -(1 << order), migratetype);
 752 
 753         return true;
 754 }
 755 
 756 static inline void clear_page_guard(struct zone *zone, struct page *page,
 757                                 unsigned int order, int migratetype)
 758 {
 759         if (!debug_guardpage_enabled())
 760                 return;
 761 
 762         __ClearPageGuard(page);
 763 
 764         set_page_private(page, 0);
 765         if (!is_migrate_isolate(migratetype))
 766                 __mod_zone_freepage_state(zone, (1 << order), migratetype);
 767 }
 768 #else
 769 static inline bool set_page_guard(struct zone *zone, struct page *page,
 770                         unsigned int order, int migratetype) { return false; }
 771 static inline void clear_page_guard(struct zone *zone, struct page *page,
 772                                 unsigned int order, int migratetype) {}
 773 #endif
 774 
 775 static inline void set_page_order(struct page *page, unsigned int order)
 776 {
 777         set_page_private(page, order);
 778         __SetPageBuddy(page);
 779 }
 780 
 781 /*
 782  * This function checks whether a page is free && is the buddy
 783  * we can coalesce a page and its buddy if
 784  * (a) the buddy is not in a hole (check before calling!) &&
 785  * (b) the buddy is in the buddy system &&
 786  * (c) a page and its buddy have the same order &&
 787  * (d) a page and its buddy are in the same zone.
 788  *
 789  * For recording whether a page is in the buddy system, we set PageBuddy.
 790  * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
 791  *
 792  * For recording page's order, we use page_private(page).
 793  */
 794 static inline int page_is_buddy(struct page *page, struct page *buddy,
 795                                                         unsigned int order)
 796 {
 797         if (page_is_guard(buddy) && page_order(buddy) == order) {
 798                 if (page_zone_id(page) != page_zone_id(buddy))
 799                         return 0;
 800 
 801                 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
 802 
 803                 return 1;
 804         }
 805 
 806         if (PageBuddy(buddy) && page_order(buddy) == order) {
 807                 /*
 808                  * zone check is done late to avoid uselessly
 809                  * calculating zone/node ids for pages that could
 810                  * never merge.
 811                  */
 812                 if (page_zone_id(page) != page_zone_id(buddy))
 813                         return 0;
 814 
 815                 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
 816 
 817                 return 1;
 818         }
 819         return 0;
 820 }
 821 
 822 #ifdef CONFIG_COMPACTION
 823 static inline struct capture_control *task_capc(struct zone *zone)
 824 {
 825         struct capture_control *capc = current->capture_control;
 826 
 827         return capc &&
 828                 !(current->flags & PF_KTHREAD) &&
 829                 !capc->page &&
 830                 capc->cc->zone == zone &&
 831                 capc->cc->direct_compaction ? capc : NULL;
 832 }
 833 
 834 static inline bool
 835 compaction_capture(struct capture_control *capc, struct page *page,
 836                    int order, int migratetype)
 837 {
 838         if (!capc || order != capc->cc->order)
 839                 return false;
 840 
 841         /* Do not accidentally pollute CMA or isolated regions*/
 842         if (is_migrate_cma(migratetype) ||
 843             is_migrate_isolate(migratetype))
 844                 return false;
 845 
 846         /*
 847          * Do not let lower order allocations polluate a movable pageblock.
 848          * This might let an unmovable request use a reclaimable pageblock
 849          * and vice-versa but no more than normal fallback logic which can
 850          * have trouble finding a high-order free page.
 851          */
 852         if (order < pageblock_order && migratetype == MIGRATE_MOVABLE)
 853                 return false;
 854 
 855         capc->page = page;
 856         return true;
 857 }
 858 
 859 #else
 860 static inline struct capture_control *task_capc(struct zone *zone)
 861 {
 862         return NULL;
 863 }
 864 
 865 static inline bool
 866 compaction_capture(struct capture_control *capc, struct page *page,
 867                    int order, int migratetype)
 868 {
 869         return false;
 870 }
 871 #endif /* CONFIG_COMPACTION */
 872 
 873 /*
 874  * Freeing function for a buddy system allocator.
 875  *
 876  * The concept of a buddy system is to maintain direct-mapped table
 877  * (containing bit values) for memory blocks of various "orders".
 878  * The bottom level table contains the map for the smallest allocatable
 879  * units of memory (here, pages), and each level above it describes
 880  * pairs of units from the levels below, hence, "buddies".
 881  * At a high level, all that happens here is marking the table entry
 882  * at the bottom level available, and propagating the changes upward
 883  * as necessary, plus some accounting needed to play nicely with other
 884  * parts of the VM system.
 885  * At each level, we keep a list of pages, which are heads of continuous
 886  * free pages of length of (1 << order) and marked with PageBuddy.
 887  * Page's order is recorded in page_private(page) field.
 888  * So when we are allocating or freeing one, we can derive the state of the
 889  * other.  That is, if we allocate a small block, and both were
 890  * free, the remainder of the region must be split into blocks.
 891  * If a block is freed, and its buddy is also free, then this
 892  * triggers coalescing into a block of larger size.
 893  *
 894  * -- nyc
 895  */
 896 
 897 static inline void __free_one_page(struct page *page,
 898                 unsigned long pfn,
 899                 struct zone *zone, unsigned int order,
 900                 int migratetype)
 901 {
 902         unsigned long combined_pfn;
 903         unsigned long uninitialized_var(buddy_pfn);
 904         struct page *buddy;
 905         unsigned int max_order;
 906         struct capture_control *capc = task_capc(zone);
 907 
 908         max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
 909 
 910         VM_BUG_ON(!zone_is_initialized(zone));
 911         VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
 912 
 913         VM_BUG_ON(migratetype == -1);
 914         if (likely(!is_migrate_isolate(migratetype)))
 915                 __mod_zone_freepage_state(zone, 1 << order, migratetype);
 916 
 917         VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page);
 918         VM_BUG_ON_PAGE(bad_range(zone, page), page);
 919 
 920 continue_merging:
 921         while (order < max_order - 1) {
 922                 if (compaction_capture(capc, page, order, migratetype)) {
 923                         __mod_zone_freepage_state(zone, -(1 << order),
 924                                                                 migratetype);
 925                         return;
 926                 }
 927                 buddy_pfn = __find_buddy_pfn(pfn, order);
 928                 buddy = page + (buddy_pfn - pfn);
 929 
 930                 if (!pfn_valid_within(buddy_pfn))
 931                         goto done_merging;
 932                 if (!page_is_buddy(page, buddy, order))
 933                         goto done_merging;
 934                 /*
 935                  * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
 936                  * merge with it and move up one order.
 937                  */
 938                 if (page_is_guard(buddy))
 939                         clear_page_guard(zone, buddy, order, migratetype);
 940                 else
 941                         del_page_from_free_area(buddy, &zone->free_area[order]);
 942                 combined_pfn = buddy_pfn & pfn;
 943                 page = page + (combined_pfn - pfn);
 944                 pfn = combined_pfn;
 945                 order++;
 946         }
 947         if (max_order < MAX_ORDER) {
 948                 /* If we are here, it means order is >= pageblock_order.
 949                  * We want to prevent merge between freepages on isolate
 950                  * pageblock and normal pageblock. Without this, pageblock
 951                  * isolation could cause incorrect freepage or CMA accounting.
 952                  *
 953                  * We don't want to hit this code for the more frequent
 954                  * low-order merging.
 955                  */
 956                 if (unlikely(has_isolate_pageblock(zone))) {
 957                         int buddy_mt;
 958 
 959                         buddy_pfn = __find_buddy_pfn(pfn, order);
 960                         buddy = page + (buddy_pfn - pfn);
 961                         buddy_mt = get_pageblock_migratetype(buddy);
 962 
 963                         if (migratetype != buddy_mt
 964                                         && (is_migrate_isolate(migratetype) ||
 965                                                 is_migrate_isolate(buddy_mt)))
 966                                 goto done_merging;
 967                 }
 968                 max_order++;
 969                 goto continue_merging;
 970         }
 971 
 972 done_merging:
 973         set_page_order(page, order);
 974 
 975         /*
 976          * If this is not the largest possible page, check if the buddy
 977          * of the next-highest order is free. If it is, it's possible
 978          * that pages are being freed that will coalesce soon. In case,
 979          * that is happening, add the free page to the tail of the list
 980          * so it's less likely to be used soon and more likely to be merged
 981          * as a higher order page
 982          */
 983         if ((order < MAX_ORDER-2) && pfn_valid_within(buddy_pfn)
 984                         && !is_shuffle_order(order)) {
 985                 struct page *higher_page, *higher_buddy;
 986                 combined_pfn = buddy_pfn & pfn;
 987                 higher_page = page + (combined_pfn - pfn);
 988                 buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1);
 989                 higher_buddy = higher_page + (buddy_pfn - combined_pfn);
 990                 if (pfn_valid_within(buddy_pfn) &&
 991                     page_is_buddy(higher_page, higher_buddy, order + 1)) {
 992                         add_to_free_area_tail(page, &zone->free_area[order],
 993                                               migratetype);
 994                         return;
 995                 }
 996         }
 997 
 998         if (is_shuffle_order(order))
 999                 add_to_free_area_random(page, &zone->free_area[order],
1000                                 migratetype);
1001         else
1002                 add_to_free_area(page, &zone->free_area[order], migratetype);
1003 
1004 }
1005 
1006 /*
1007  * A bad page could be due to a number of fields. Instead of multiple branches,
1008  * try and check multiple fields with one check. The caller must do a detailed
1009  * check if necessary.
1010  */
1011 static inline bool page_expected_state(struct page *page,
1012                                         unsigned long check_flags)
1013 {
1014         if (unlikely(atomic_read(&page->_mapcount) != -1))
1015                 return false;
1016 
1017         if (unlikely((unsigned long)page->mapping |
1018                         page_ref_count(page) |
1019 #ifdef CONFIG_MEMCG
1020                         (unsigned long)page->mem_cgroup |
1021 #endif
1022                         (page->flags & check_flags)))
1023                 return false;
1024 
1025         return true;
1026 }
1027 
1028 static void free_pages_check_bad(struct page *page)
1029 {
1030         const char *bad_reason;
1031         unsigned long bad_flags;
1032 
1033         bad_reason = NULL;
1034         bad_flags = 0;
1035 
1036         if (unlikely(atomic_read(&page->_mapcount) != -1))
1037                 bad_reason = "nonzero mapcount";
1038         if (unlikely(page->mapping != NULL))
1039                 bad_reason = "non-NULL mapping";
1040         if (unlikely(page_ref_count(page) != 0))
1041                 bad_reason = "nonzero _refcount";
1042         if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) {
1043                 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
1044                 bad_flags = PAGE_FLAGS_CHECK_AT_FREE;
1045         }
1046 #ifdef CONFIG_MEMCG
1047         if (unlikely(page->mem_cgroup))
1048                 bad_reason = "page still charged to cgroup";
1049 #endif
1050         bad_page(page, bad_reason, bad_flags);
1051 }
1052 
1053 static inline int free_pages_check(struct page *page)
1054 {
1055         if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE)))
1056                 return 0;
1057 
1058         /* Something has gone sideways, find it */
1059         free_pages_check_bad(page);
1060         return 1;
1061 }
1062 
1063 static int free_tail_pages_check(struct page *head_page, struct page *page)
1064 {
1065         int ret = 1;
1066 
1067         /*
1068          * We rely page->lru.next never has bit 0 set, unless the page
1069          * is PageTail(). Let's make sure that's true even for poisoned ->lru.
1070          */
1071         BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1);
1072 
1073         if (!IS_ENABLED(CONFIG_DEBUG_VM)) {
1074                 ret = 0;
1075                 goto out;
1076         }
1077         switch (page - head_page) {
1078         case 1:
1079                 /* the first tail page: ->mapping may be compound_mapcount() */
1080                 if (unlikely(compound_mapcount(page))) {
1081                         bad_page(page, "nonzero compound_mapcount", 0);
1082                         goto out;
1083                 }
1084                 break;
1085         case 2:
1086                 /*
1087                  * the second tail page: ->mapping is
1088                  * deferred_list.next -- ignore value.
1089                  */
1090                 break;
1091         default:
1092                 if (page->mapping != TAIL_MAPPING) {
1093                         bad_page(page, "corrupted mapping in tail page", 0);
1094                         goto out;
1095                 }
1096                 break;
1097         }
1098         if (unlikely(!PageTail(page))) {
1099                 bad_page(page, "PageTail not set", 0);
1100                 goto out;
1101         }
1102         if (unlikely(compound_head(page) != head_page)) {
1103                 bad_page(page, "compound_head not consistent", 0);
1104                 goto out;
1105         }
1106         ret = 0;
1107 out:
1108         page->mapping = NULL;
1109         clear_compound_head(page);
1110         return ret;
1111 }
1112 
1113 static void kernel_init_free_pages(struct page *page, int numpages)
1114 {
1115         int i;
1116 
1117         for (i = 0; i < numpages; i++)
1118                 clear_highpage(page + i);
1119 }
1120 
1121 static __always_inline bool free_pages_prepare(struct page *page,
1122                                         unsigned int order, bool check_free)
1123 {
1124         int bad = 0;
1125 
1126         VM_BUG_ON_PAGE(PageTail(page), page);
1127 
1128         trace_mm_page_free(page, order);
1129 
1130         /*
1131          * Check tail pages before head page information is cleared to
1132          * avoid checking PageCompound for order-0 pages.
1133          */
1134         if (unlikely(order)) {
1135                 bool compound = PageCompound(page);
1136                 int i;
1137 
1138                 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
1139 
1140                 if (compound)
1141                         ClearPageDoubleMap(page);
1142                 for (i = 1; i < (1 << order); i++) {
1143                         if (compound)
1144                                 bad += free_tail_pages_check(page, page + i);
1145                         if (unlikely(free_pages_check(page + i))) {
1146                                 bad++;
1147                                 continue;
1148                         }
1149                         (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1150                 }
1151         }
1152         if (PageMappingFlags(page))
1153                 page->mapping = NULL;
1154         if (memcg_kmem_enabled() && PageKmemcg(page))
1155                 __memcg_kmem_uncharge(page, order);
1156         if (check_free)
1157                 bad += free_pages_check(page);
1158         if (bad)
1159                 return false;
1160 
1161         page_cpupid_reset_last(page);
1162         page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1163         reset_page_owner(page, order);
1164 
1165         if (!PageHighMem(page)) {
1166                 debug_check_no_locks_freed(page_address(page),
1167                                            PAGE_SIZE << order);
1168                 debug_check_no_obj_freed(page_address(page),
1169                                            PAGE_SIZE << order);
1170         }
1171         if (want_init_on_free())
1172                 kernel_init_free_pages(page, 1 << order);
1173 
1174         kernel_poison_pages(page, 1 << order, 0);
1175         /*
1176          * arch_free_page() can make the page's contents inaccessible.  s390
1177          * does this.  So nothing which can access the page's contents should
1178          * happen after this.
1179          */
1180         arch_free_page(page, order);
1181 
1182         if (debug_pagealloc_enabled_static())
1183                 kernel_map_pages(page, 1 << order, 0);
1184 
1185         kasan_free_nondeferred_pages(page, order);
1186 
1187         return true;
1188 }
1189 
1190 #ifdef CONFIG_DEBUG_VM
1191 /*
1192  * With DEBUG_VM enabled, order-0 pages are checked immediately when being freed
1193  * to pcp lists. With debug_pagealloc also enabled, they are also rechecked when
1194  * moved from pcp lists to free lists.
1195  */
1196 static bool free_pcp_prepare(struct page *page)
1197 {
1198         return free_pages_prepare(page, 0, true);
1199 }
1200 
1201 static bool bulkfree_pcp_prepare(struct page *page)
1202 {
1203         if (debug_pagealloc_enabled_static())
1204                 return free_pages_check(page);
1205         else
1206                 return false;
1207 }
1208 #else
1209 /*
1210  * With DEBUG_VM disabled, order-0 pages being freed are checked only when
1211  * moving from pcp lists to free list in order to reduce overhead. With
1212  * debug_pagealloc enabled, they are checked also immediately when being freed
1213  * to the pcp lists.
1214  */
1215 static bool free_pcp_prepare(struct page *page)
1216 {
1217         if (debug_pagealloc_enabled_static())
1218                 return free_pages_prepare(page, 0, true);
1219         else
1220                 return free_pages_prepare(page, 0, false);
1221 }
1222 
1223 static bool bulkfree_pcp_prepare(struct page *page)
1224 {
1225         return free_pages_check(page);
1226 }
1227 #endif /* CONFIG_DEBUG_VM */
1228 
1229 static inline void prefetch_buddy(struct page *page)
1230 {
1231         unsigned long pfn = page_to_pfn(page);
1232         unsigned long buddy_pfn = __find_buddy_pfn(pfn, 0);
1233         struct page *buddy = page + (buddy_pfn - pfn);
1234 
1235         prefetch(buddy);
1236 }
1237 
1238 /*
1239  * Frees a number of pages from the PCP lists
1240  * Assumes all pages on list are in same zone, and of same order.
1241  * count is the number of pages to free.
1242  *
1243  * If the zone was previously in an "all pages pinned" state then look to
1244  * see if this freeing clears that state.
1245  *
1246  * And clear the zone's pages_scanned counter, to hold off the "all pages are
1247  * pinned" detection logic.
1248  */
1249 static void free_pcppages_bulk(struct zone *zone, int count,
1250                                         struct per_cpu_pages *pcp)
1251 {
1252         int migratetype = 0;
1253         int batch_free = 0;
1254         int prefetch_nr = 0;
1255         bool isolated_pageblocks;
1256         struct page *page, *tmp;
1257         LIST_HEAD(head);
1258 
1259         while (count) {
1260                 struct list_head *list;
1261 
1262                 /*
1263                  * Remove pages from lists in a round-robin fashion. A
1264                  * batch_free count is maintained that is incremented when an
1265                  * empty list is encountered.  This is so more pages are freed
1266                  * off fuller lists instead of spinning excessively around empty
1267                  * lists
1268                  */
1269                 do {
1270                         batch_free++;
1271                         if (++migratetype == MIGRATE_PCPTYPES)
1272                                 migratetype = 0;
1273                         list = &pcp->lists[migratetype];
1274                 } while (list_empty(list));
1275 
1276                 /* This is the only non-empty list. Free them all. */
1277                 if (batch_free == MIGRATE_PCPTYPES)
1278                         batch_free = count;
1279 
1280                 do {
1281                         page = list_last_entry(list, struct page, lru);
1282                         /* must delete to avoid corrupting pcp list */
1283                         list_del(&page->lru);
1284                         pcp->count--;
1285 
1286                         if (bulkfree_pcp_prepare(page))
1287                                 continue;
1288 
1289                         list_add_tail(&page->lru, &head);
1290 
1291                         /*
1292                          * We are going to put the page back to the global
1293                          * pool, prefetch its buddy to speed up later access
1294                          * under zone->lock. It is believed the overhead of
1295                          * an additional test and calculating buddy_pfn here
1296                          * can be offset by reduced memory latency later. To
1297                          * avoid excessive prefetching due to large count, only
1298                          * prefetch buddy for the first pcp->batch nr of pages.
1299                          */
1300                         if (prefetch_nr++ < pcp->batch)
1301                                 prefetch_buddy(page);
1302                 } while (--count && --batch_free && !list_empty(list));
1303         }
1304 
1305         spin_lock(&zone->lock);
1306         isolated_pageblocks = has_isolate_pageblock(zone);
1307 
1308         /*
1309          * Use safe version since after __free_one_page(),
1310          * page->lru.next will not point to original list.
1311          */
1312         list_for_each_entry_safe(page, tmp, &head, lru) {
1313                 int mt = get_pcppage_migratetype(page);
1314                 /* MIGRATE_ISOLATE page should not go to pcplists */
1315                 VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
1316                 /* Pageblock could have been isolated meanwhile */
1317                 if (unlikely(isolated_pageblocks))
1318                         mt = get_pageblock_migratetype(page);
1319 
1320                 __free_one_page(page, page_to_pfn(page), zone, 0, mt);
1321                 trace_mm_page_pcpu_drain(page, 0, mt);
1322         }
1323         spin_unlock(&zone->lock);
1324 }
1325 
1326 static void free_one_page(struct zone *zone,
1327                                 struct page *page, unsigned long pfn,
1328                                 unsigned int order,
1329                                 int migratetype)
1330 {
1331         spin_lock(&zone->lock);
1332         if (unlikely(has_isolate_pageblock(zone) ||
1333                 is_migrate_isolate(migratetype))) {
1334                 migratetype = get_pfnblock_migratetype(page, pfn);
1335         }
1336         __free_one_page(page, pfn, zone, order, migratetype);
1337         spin_unlock(&zone->lock);
1338 }
1339 
1340 static void __meminit __init_single_page(struct page *page, unsigned long pfn,
1341                                 unsigned long zone, int nid)
1342 {
1343         mm_zero_struct_page(page);
1344         set_page_links(page, zone, nid, pfn);
1345         init_page_count(page);
1346         page_mapcount_reset(page);
1347         page_cpupid_reset_last(page);
1348         page_kasan_tag_reset(page);
1349 
1350         INIT_LIST_HEAD(&page->lru);
1351 #ifdef WANT_PAGE_VIRTUAL
1352         /* The shift won't overflow because ZONE_NORMAL is below 4G. */
1353         if (!is_highmem_idx(zone))
1354                 set_page_address(page, __va(pfn << PAGE_SHIFT));
1355 #endif
1356 }
1357 
1358 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1359 static void __meminit init_reserved_page(unsigned long pfn)
1360 {
1361         pg_data_t *pgdat;
1362         int nid, zid;
1363 
1364         if (!early_page_uninitialised(pfn))
1365                 return;
1366 
1367         nid = early_pfn_to_nid(pfn);
1368         pgdat = NODE_DATA(nid);
1369 
1370         for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1371                 struct zone *zone = &pgdat->node_zones[zid];
1372 
1373                 if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone))
1374                         break;
1375         }
1376         __init_single_page(pfn_to_page(pfn), pfn, zid, nid);
1377 }
1378 #else
1379 static inline void init_reserved_page(unsigned long pfn)
1380 {
1381 }
1382 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1383 
1384 /*
1385  * Initialised pages do not have PageReserved set. This function is
1386  * called for each range allocated by the bootmem allocator and
1387  * marks the pages PageReserved. The remaining valid pages are later
1388  * sent to the buddy page allocator.
1389  */
1390 void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
1391 {
1392         unsigned long start_pfn = PFN_DOWN(start);
1393         unsigned long end_pfn = PFN_UP(end);
1394 
1395         for (; start_pfn < end_pfn; start_pfn++) {
1396                 if (pfn_valid(start_pfn)) {
1397                         struct page *page = pfn_to_page(start_pfn);
1398 
1399                         init_reserved_page(start_pfn);
1400 
1401                         /* Avoid false-positive PageTail() */
1402                         INIT_LIST_HEAD(&page->lru);
1403 
1404                         /*
1405                          * no need for atomic set_bit because the struct
1406                          * page is not visible yet so nobody should
1407                          * access it yet.
1408                          */
1409                         __SetPageReserved(page);
1410                 }
1411         }
1412 }
1413 
1414 static void __free_pages_ok(struct page *page, unsigned int order)
1415 {
1416         unsigned long flags;
1417         int migratetype;
1418         unsigned long pfn = page_to_pfn(page);
1419 
1420         if (!free_pages_prepare(page, order, true))
1421                 return;
1422 
1423         migratetype = get_pfnblock_migratetype(page, pfn);
1424         local_irq_save(flags);
1425         __count_vm_events(PGFREE, 1 << order);
1426         free_one_page(page_zone(page), page, pfn, order, migratetype);
1427         local_irq_restore(flags);
1428 }
1429 
1430 void __free_pages_core(struct page *page, unsigned int order)
1431 {
1432         unsigned int nr_pages = 1 << order;
1433         struct page *p = page;
1434         unsigned int loop;
1435 
1436         prefetchw(p);
1437         for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
1438                 prefetchw(p + 1);
1439                 __ClearPageReserved(p);
1440                 set_page_count(p, 0);
1441         }
1442         __ClearPageReserved(p);
1443         set_page_count(p, 0);
1444 
1445         atomic_long_add(nr_pages, &page_zone(page)->managed_pages);
1446         set_page_refcounted(page);
1447         __free_pages(page, order);
1448 }
1449 
1450 #if defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) || \
1451         defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
1452 
1453 static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
1454 
1455 int __meminit early_pfn_to_nid(unsigned long pfn)
1456 {
1457         static DEFINE_SPINLOCK(early_pfn_lock);
1458         int nid;
1459 
1460         spin_lock(&early_pfn_lock);
1461         nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
1462         if (nid < 0)
1463                 nid = first_online_node;
1464         spin_unlock(&early_pfn_lock);
1465 
1466         return nid;
1467 }
1468 #endif
1469 
1470 #ifdef CONFIG_NODES_SPAN_OTHER_NODES
1471 /* Only safe to use early in boot when initialisation is single-threaded */
1472 static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
1473 {
1474         int nid;
1475 
1476         nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
1477         if (nid >= 0 && nid != node)
1478                 return false;
1479         return true;
1480 }
1481 
1482 #else
1483 static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
1484 {
1485         return true;
1486 }
1487 #endif
1488 
1489 
1490 void __init memblock_free_pages(struct page *page, unsigned long pfn,
1491                                                         unsigned int order)
1492 {
1493         if (early_page_uninitialised(pfn))
1494                 return;
1495         __free_pages_core(page, order);
1496 }
1497 
1498 /*
1499  * Check that the whole (or subset of) a pageblock given by the interval of
1500  * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
1501  * with the migration of free compaction scanner. The scanners then need to
1502  * use only pfn_valid_within() check for arches that allow holes within
1503  * pageblocks.
1504  *
1505  * Return struct page pointer of start_pfn, or NULL if checks were not passed.
1506  *
1507  * It's possible on some configurations to have a setup like node0 node1 node0
1508  * i.e. it's possible that all pages within a zones range of pages do not
1509  * belong to a single zone. We assume that a border between node0 and node1
1510  * can occur within a single pageblock, but not a node0 node1 node0
1511  * interleaving within a single pageblock. It is therefore sufficient to check
1512  * the first and last page of a pageblock and avoid checking each individual
1513  * page in a pageblock.
1514  */
1515 struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
1516                                      unsigned long end_pfn, struct zone *zone)
1517 {
1518         struct page *start_page;
1519         struct page *end_page;
1520 
1521         /* end_pfn is one past the range we are checking */
1522         end_pfn--;
1523 
1524         if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn))
1525                 return NULL;
1526 
1527         start_page = pfn_to_online_page(start_pfn);
1528         if (!start_page)
1529                 return NULL;
1530 
1531         if (page_zone(start_page) != zone)
1532                 return NULL;
1533 
1534         end_page = pfn_to_page(end_pfn);
1535 
1536         /* This gives a shorter code than deriving page_zone(end_page) */
1537         if (page_zone_id(start_page) != page_zone_id(end_page))
1538                 return NULL;
1539 
1540         return start_page;
1541 }
1542 
1543 void set_zone_contiguous(struct zone *zone)
1544 {
1545         unsigned long block_start_pfn = zone->zone_start_pfn;
1546         unsigned long block_end_pfn;
1547 
1548         block_end_pfn = ALIGN(block_start_pfn + 1, pageblock_nr_pages);
1549         for (; block_start_pfn < zone_end_pfn(zone);
1550                         block_start_pfn = block_end_pfn,
1551                          block_end_pfn += pageblock_nr_pages) {
1552 
1553                 block_end_pfn = min(block_end_pfn, zone_end_pfn(zone));
1554 
1555                 if (!__pageblock_pfn_to_page(block_start_pfn,
1556                                              block_end_pfn, zone))
1557                         return;
1558                 cond_resched();
1559         }
1560 
1561         /* We confirm that there is no hole */
1562         zone->contiguous = true;
1563 }
1564 
1565 void clear_zone_contiguous(struct zone *zone)
1566 {
1567         zone->contiguous = false;
1568 }
1569 
1570 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1571 static void __init deferred_free_range(unsigned long pfn,
1572                                        unsigned long nr_pages)
1573 {
1574         struct page *page;
1575         unsigned long i;
1576 
1577         if (!nr_pages)
1578                 return;
1579 
1580         page = pfn_to_page(pfn);
1581 
1582         /* Free a large naturally-aligned chunk if possible */
1583         if (nr_pages == pageblock_nr_pages &&
1584             (pfn & (pageblock_nr_pages - 1)) == 0) {
1585                 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1586                 __free_pages_core(page, pageblock_order);
1587                 return;
1588         }
1589 
1590         for (i = 0; i < nr_pages; i++, page++, pfn++) {
1591                 if ((pfn & (pageblock_nr_pages - 1)) == 0)
1592                         set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1593                 __free_pages_core(page, 0);
1594         }
1595 }
1596 
1597 /* Completion tracking for deferred_init_memmap() threads */
1598 static atomic_t pgdat_init_n_undone __initdata;
1599 static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
1600 
1601 static inline void __init pgdat_init_report_one_done(void)
1602 {
1603         if (atomic_dec_and_test(&pgdat_init_n_undone))
1604                 complete(&pgdat_init_all_done_comp);
1605 }
1606 
1607 /*
1608  * Returns true if page needs to be initialized or freed to buddy allocator.
1609  *
1610  * First we check if pfn is valid on architectures where it is possible to have
1611  * holes within pageblock_nr_pages. On systems where it is not possible, this
1612  * function is optimized out.
1613  *
1614  * Then, we check if a current large page is valid by only checking the validity
1615  * of the head pfn.
1616  */
1617 static inline bool __init deferred_pfn_valid(unsigned long pfn)
1618 {
1619         if (!pfn_valid_within(pfn))
1620                 return false;
1621         if (!(pfn & (pageblock_nr_pages - 1)) && !pfn_valid(pfn))
1622                 return false;
1623         return true;
1624 }
1625 
1626 /*
1627  * Free pages to buddy allocator. Try to free aligned pages in
1628  * pageblock_nr_pages sizes.
1629  */
1630 static void __init deferred_free_pages(unsigned long pfn,
1631                                        unsigned long end_pfn)
1632 {
1633         unsigned long nr_pgmask = pageblock_nr_pages - 1;
1634         unsigned long nr_free = 0;
1635 
1636         for (; pfn < end_pfn; pfn++) {
1637                 if (!deferred_pfn_valid(pfn)) {
1638                         deferred_free_range(pfn - nr_free, nr_free);
1639                         nr_free = 0;
1640                 } else if (!(pfn & nr_pgmask)) {
1641                         deferred_free_range(pfn - nr_free, nr_free);
1642                         nr_free = 1;
1643                         touch_nmi_watchdog();
1644                 } else {
1645                         nr_free++;
1646                 }
1647         }
1648         /* Free the last block of pages to allocator */
1649         deferred_free_range(pfn - nr_free, nr_free);
1650 }
1651 
1652 /*
1653  * Initialize struct pages.  We minimize pfn page lookups and scheduler checks
1654  * by performing it only once every pageblock_nr_pages.
1655  * Return number of pages initialized.
1656  */
1657 static unsigned long  __init deferred_init_pages(struct zone *zone,
1658                                                  unsigned long pfn,
1659                                                  unsigned long end_pfn)
1660 {
1661         unsigned long nr_pgmask = pageblock_nr_pages - 1;
1662         int nid = zone_to_nid(zone);
1663         unsigned long nr_pages = 0;
1664         int zid = zone_idx(zone);
1665         struct page *page = NULL;
1666 
1667         for (; pfn < end_pfn; pfn++) {
1668                 if (!deferred_pfn_valid(pfn)) {
1669                         page = NULL;
1670                         continue;
1671                 } else if (!page || !(pfn & nr_pgmask)) {
1672                         page = pfn_to_page(pfn);
1673                         touch_nmi_watchdog();
1674                 } else {
1675                         page++;
1676                 }
1677                 __init_single_page(page, pfn, zid, nid);
1678                 nr_pages++;
1679         }
1680         return (nr_pages);
1681 }
1682 
1683 /*
1684  * This function is meant to pre-load the iterator for the zone init.
1685  * Specifically it walks through the ranges until we are caught up to the
1686  * first_init_pfn value and exits there. If we never encounter the value we
1687  * return false indicating there are no valid ranges left.
1688  */
1689 static bool __init
1690 deferred_init_mem_pfn_range_in_zone(u64 *i, struct zone *zone,
1691                                     unsigned long *spfn, unsigned long *epfn,
1692                                     unsigned long first_init_pfn)
1693 {
1694         u64 j;
1695 
1696         /*
1697          * Start out by walking through the ranges in this zone that have
1698          * already been initialized. We don't need to do anything with them
1699          * so we just need to flush them out of the system.
1700          */
1701         for_each_free_mem_pfn_range_in_zone(j, zone, spfn, epfn) {
1702                 if (*epfn <= first_init_pfn)
1703                         continue;
1704                 if (*spfn < first_init_pfn)
1705                         *spfn = first_init_pfn;
1706                 *i = j;
1707                 return true;
1708         }
1709 
1710         return false;
1711 }
1712 
1713 /*
1714  * Initialize and free pages. We do it in two loops: first we initialize
1715  * struct page, then free to buddy allocator, because while we are
1716  * freeing pages we can access pages that are ahead (computing buddy
1717  * page in __free_one_page()).
1718  *
1719  * In order to try and keep some memory in the cache we have the loop
1720  * broken along max page order boundaries. This way we will not cause
1721  * any issues with the buddy page computation.
1722  */
1723 static unsigned long __init
1724 deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn,
1725                        unsigned long *end_pfn)
1726 {
1727         unsigned long mo_pfn = ALIGN(*start_pfn + 1, MAX_ORDER_NR_PAGES);
1728         unsigned long spfn = *start_pfn, epfn = *end_pfn;
1729         unsigned long nr_pages = 0;
1730         u64 j = *i;
1731 
1732         /* First we loop through and initialize the page values */
1733         for_each_free_mem_pfn_range_in_zone_from(j, zone, start_pfn, end_pfn) {
1734                 unsigned long t;
1735 
1736                 if (mo_pfn <= *start_pfn)
1737                         break;
1738 
1739                 t = min(mo_pfn, *end_pfn);
1740                 nr_pages += deferred_init_pages(zone, *start_pfn, t);
1741 
1742                 if (mo_pfn < *end_pfn) {
1743                         *start_pfn = mo_pfn;
1744                         break;
1745                 }
1746         }
1747 
1748         /* Reset values and now loop through freeing pages as needed */
1749         swap(j, *i);
1750 
1751         for_each_free_mem_pfn_range_in_zone_from(j, zone, &spfn, &epfn) {
1752                 unsigned long t;
1753 
1754                 if (mo_pfn <= spfn)
1755                         break;
1756 
1757                 t = min(mo_pfn, epfn);
1758                 deferred_free_pages(spfn, t);
1759 
1760                 if (mo_pfn <= epfn)
1761                         break;
1762         }
1763 
1764         return nr_pages;
1765 }
1766 
1767 /* Initialise remaining memory on a node */
1768 static int __init deferred_init_memmap(void *data)
1769 {
1770         pg_data_t *pgdat = data;
1771         const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
1772         unsigned long spfn = 0, epfn = 0, nr_pages = 0;
1773         unsigned long first_init_pfn, flags;
1774         unsigned long start = jiffies;
1775         struct zone *zone;
1776         int zid;
1777         u64 i;
1778 
1779         /* Bind memory initialisation thread to a local node if possible */
1780         if (!cpumask_empty(cpumask))
1781                 set_cpus_allowed_ptr(current, cpumask);
1782 
1783         pgdat_resize_lock(pgdat, &flags);
1784         first_init_pfn = pgdat->first_deferred_pfn;
1785         if (first_init_pfn == ULONG_MAX) {
1786                 pgdat_resize_unlock(pgdat, &flags);
1787                 pgdat_init_report_one_done();
1788                 return 0;
1789         }
1790 
1791         /* Sanity check boundaries */
1792         BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn);
1793         BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
1794         pgdat->first_deferred_pfn = ULONG_MAX;
1795 
1796         /* Only the highest zone is deferred so find it */
1797         for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1798                 zone = pgdat->node_zones + zid;
1799                 if (first_init_pfn < zone_end_pfn(zone))
1800                         break;
1801         }
1802 
1803         /* If the zone is empty somebody else may have cleared out the zone */
1804         if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
1805                                                  first_init_pfn))
1806                 goto zone_empty;
1807 
1808         /*
1809          * Initialize and free pages in MAX_ORDER sized increments so
1810          * that we can avoid introducing any issues with the buddy
1811          * allocator.
1812          */
1813         while (spfn < epfn)
1814                 nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn);
1815 zone_empty:
1816         pgdat_resize_unlock(pgdat, &flags);
1817 
1818         /* Sanity check that the next zone really is unpopulated */
1819         WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
1820 
1821         pr_info("node %d initialised, %lu pages in %ums\n",
1822                 pgdat->node_id, nr_pages, jiffies_to_msecs(jiffies - start));
1823 
1824         pgdat_init_report_one_done();
1825         return 0;
1826 }
1827 
1828 /*
1829  * If this zone has deferred pages, try to grow it by initializing enough
1830  * deferred pages to satisfy the allocation specified by order, rounded up to
1831  * the nearest PAGES_PER_SECTION boundary.  So we're adding memory in increments
1832  * of SECTION_SIZE bytes by initializing struct pages in increments of
1833  * PAGES_PER_SECTION * sizeof(struct page) bytes.
1834  *
1835  * Return true when zone was grown, otherwise return false. We return true even
1836  * when we grow less than requested, to let the caller decide if there are
1837  * enough pages to satisfy the allocation.
1838  *
1839  * Note: We use noinline because this function is needed only during boot, and
1840  * it is called from a __ref function _deferred_grow_zone. This way we are
1841  * making sure that it is not inlined into permanent text section.
1842  */
1843 static noinline bool __init
1844 deferred_grow_zone(struct zone *zone, unsigned int order)
1845 {
1846         unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION);
1847         pg_data_t *pgdat = zone->zone_pgdat;
1848         unsigned long first_deferred_pfn = pgdat->first_deferred_pfn;
1849         unsigned long spfn, epfn, flags;
1850         unsigned long nr_pages = 0;
1851         u64 i;
1852 
1853         /* Only the last zone may have deferred pages */
1854         if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat))
1855                 return false;
1856 
1857         pgdat_resize_lock(pgdat, &flags);
1858 
1859         /*
1860          * If deferred pages have been initialized while we were waiting for
1861          * the lock, return true, as the zone was grown.  The caller will retry
1862          * this zone.  We won't return to this function since the caller also
1863          * has this static branch.
1864          */
1865         if (!static_branch_unlikely(&deferred_pages)) {
1866                 pgdat_resize_unlock(pgdat, &flags);
1867                 return true;
1868         }
1869 
1870         /*
1871          * If someone grew this zone while we were waiting for spinlock, return
1872          * true, as there might be enough pages already.
1873          */
1874         if (first_deferred_pfn != pgdat->first_deferred_pfn) {
1875                 pgdat_resize_unlock(pgdat, &flags);
1876                 return true;
1877         }
1878 
1879         /* If the zone is empty somebody else may have cleared out the zone */
1880         if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
1881                                                  first_deferred_pfn)) {
1882                 pgdat->first_deferred_pfn = ULONG_MAX;
1883                 pgdat_resize_unlock(pgdat, &flags);
1884                 /* Retry only once. */
1885                 return first_deferred_pfn != ULONG_MAX;
1886         }
1887 
1888         /*
1889          * Initialize and free pages in MAX_ORDER sized increments so
1890          * that we can avoid introducing any issues with the buddy
1891          * allocator.
1892          */
1893         while (spfn < epfn) {
1894                 /* update our first deferred PFN for this section */
1895                 first_deferred_pfn = spfn;
1896 
1897                 nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn);
1898 
1899                 /* We should only stop along section boundaries */
1900                 if ((first_deferred_pfn ^ spfn) < PAGES_PER_SECTION)
1901                         continue;
1902 
1903                 /* If our quota has been met we can stop here */
1904                 if (nr_pages >= nr_pages_needed)
1905                         break;
1906         }
1907 
1908         pgdat->first_deferred_pfn = spfn;
1909         pgdat_resize_unlock(pgdat, &flags);
1910 
1911         return nr_pages > 0;
1912 }
1913 
1914 /*
1915  * deferred_grow_zone() is __init, but it is called from
1916  * get_page_from_freelist() during early boot until deferred_pages permanently
1917  * disables this call. This is why we have refdata wrapper to avoid warning,
1918  * and to ensure that the function body gets unloaded.
1919  */
1920 static bool __ref
1921 _deferred_grow_zone(struct zone *zone, unsigned int order)
1922 {
1923         return deferred_grow_zone(zone, order);
1924 }
1925 
1926 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1927 
1928 void __init page_alloc_init_late(void)
1929 {
1930         struct zone *zone;
1931         int nid;
1932 
1933 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1934 
1935         /* There will be num_node_state(N_MEMORY) threads */
1936         atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
1937         for_each_node_state(nid, N_MEMORY) {
1938                 kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
1939         }
1940 
1941         /* Block until all are initialised */
1942         wait_for_completion(&pgdat_init_all_done_comp);
1943 
1944         /*
1945          * The number of managed pages has changed due to the initialisation
1946          * so the pcpu batch and high limits needs to be updated or the limits
1947          * will be artificially small.
1948          */
1949         for_each_populated_zone(zone)
1950                 zone_pcp_update(zone);
1951 
1952         /*
1953          * We initialized the rest of the deferred pages.  Permanently disable
1954          * on-demand struct page initialization.
1955          */
1956         static_branch_disable(&deferred_pages);
1957 
1958         /* Reinit limits that are based on free pages after the kernel is up */
1959         files_maxfiles_init();
1960 #endif
1961 
1962         /* Discard memblock private memory */
1963         memblock_discard();
1964 
1965         for_each_node_state(nid, N_MEMORY)
1966                 shuffle_free_memory(NODE_DATA(nid));
1967 
1968         for_each_populated_zone(zone)
1969                 set_zone_contiguous(zone);
1970 }
1971 
1972 #ifdef CONFIG_CMA
1973 /* Free whole pageblock and set its migration type to MIGRATE_CMA. */
1974 void __init init_cma_reserved_pageblock(struct page *page)
1975 {
1976         unsigned i = pageblock_nr_pages;
1977         struct page *p = page;
1978 
1979         do {
1980                 __ClearPageReserved(p);
1981                 set_page_count(p, 0);
1982         } while (++p, --i);
1983 
1984         set_pageblock_migratetype(page, MIGRATE_CMA);
1985 
1986         if (pageblock_order >= MAX_ORDER) {
1987                 i = pageblock_nr_pages;
1988                 p = page;
1989                 do {
1990                         set_page_refcounted(p);
1991                         __free_pages(p, MAX_ORDER - 1);
1992                         p += MAX_ORDER_NR_PAGES;
1993                 } while (i -= MAX_ORDER_NR_PAGES);
1994         } else {
1995                 set_page_refcounted(page);
1996                 __free_pages(page, pageblock_order);
1997         }
1998 
1999         adjust_managed_page_count(page, pageblock_nr_pages);
2000 }
2001 #endif
2002 
2003 /*
2004  * The order of subdivision here is critical for the IO subsystem.
2005  * Please do not alter this order without good reasons and regression
2006  * testing. Specifically, as large blocks of memory are subdivided,
2007  * the order in which smaller blocks are delivered depends on the order
2008  * they're subdivided in this function. This is the primary factor
2009  * influencing the order in which pages are delivered to the IO
2010  * subsystem according to empirical testing, and this is also justified
2011  * by considering the behavior of a buddy system containing a single
2012  * large block of memory acted on by a series of small allocations.
2013  * This behavior is a critical factor in sglist merging's success.
2014  *
2015  * -- nyc
2016  */
2017 static inline void expand(struct zone *zone, struct page *page,
2018         int low, int high, struct free_area *area,
2019         int migratetype)
2020 {
2021         unsigned long size = 1 << high;
2022 
2023         while (high > low) {
2024                 area--;
2025                 high--;
2026                 size >>= 1;
2027                 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
2028 
2029                 /*
2030                  * Mark as guard pages (or page), that will allow to
2031                  * merge back to allocator when buddy will be freed.
2032                  * Corresponding page table entries will not be touched,
2033                  * pages will stay not present in virtual address space
2034                  */
2035                 if (set_page_guard(zone, &page[size], high, migratetype))
2036                         continue;
2037 
2038                 add_to_free_area(&page[size], area, migratetype);
2039                 set_page_order(&page[size], high);
2040         }
2041 }
2042 
2043 static void check_new_page_bad(struct page *page)
2044 {
2045         const char *bad_reason = NULL;
2046         unsigned long bad_flags = 0;
2047 
2048         if (unlikely(atomic_read(&page->_mapcount) != -1))
2049                 bad_reason = "nonzero mapcount";
2050         if (unlikely(page->mapping != NULL))
2051                 bad_reason = "non-NULL mapping";
2052         if (unlikely(page_ref_count(page) != 0))
2053                 bad_reason = "nonzero _refcount";
2054         if (unlikely(page->flags & __PG_HWPOISON)) {
2055                 bad_reason = "HWPoisoned (hardware-corrupted)";
2056                 bad_flags = __PG_HWPOISON;
2057                 /* Don't complain about hwpoisoned pages */
2058                 page_mapcount_reset(page); /* remove PageBuddy */
2059                 return;
2060         }
2061         if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) {
2062                 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set";
2063                 bad_flags = PAGE_FLAGS_CHECK_AT_PREP;
2064         }
2065 #ifdef CONFIG_MEMCG
2066         if (unlikely(page->mem_cgroup))
2067                 bad_reason = "page still charged to cgroup";
2068 #endif
2069         bad_page(page, bad_reason, bad_flags);
2070 }
2071 
2072 /*
2073  * This page is about to be returned from the page allocator
2074  */
2075 static inline int check_new_page(struct page *page)
2076 {
2077         if (likely(page_expected_state(page,
2078                                 PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON)))
2079                 return 0;
2080 
2081         check_new_page_bad(page);
2082         return 1;
2083 }
2084 
2085 static inline bool free_pages_prezeroed(void)
2086 {
2087         return (IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) &&
2088                 page_poisoning_enabled()) || want_init_on_free();
2089 }
2090 
2091 #ifdef CONFIG_DEBUG_VM
2092 /*
2093  * With DEBUG_VM enabled, order-0 pages are checked for expected state when
2094  * being allocated from pcp lists. With debug_pagealloc also enabled, they are
2095  * also checked when pcp lists are refilled from the free lists.
2096  */
2097 static inline bool check_pcp_refill(struct page *page)
2098 {
2099         if (debug_pagealloc_enabled_static())
2100                 return check_new_page(page);
2101         else
2102                 return false;
2103 }
2104 
2105 static inline bool check_new_pcp(struct page *page)
2106 {
2107         return check_new_page(page);
2108 }
2109 #else
2110 /*
2111  * With DEBUG_VM disabled, free order-0 pages are checked for expected state
2112  * when pcp lists are being refilled from the free lists. With debug_pagealloc
2113  * enabled, they are also checked when being allocated from the pcp lists.
2114  */
2115 static inline bool check_pcp_refill(struct page *page)
2116 {
2117         return check_new_page(page);
2118 }
2119 static inline bool check_new_pcp(struct page *page)
2120 {
2121         if (debug_pagealloc_enabled_static())
2122                 return check_new_page(page);
2123         else
2124                 return false;
2125 }
2126 #endif /* CONFIG_DEBUG_VM */
2127 
2128 static bool check_new_pages(struct page *page, unsigned int order)
2129 {
2130         int i;
2131         for (i = 0; i < (1 << order); i++) {
2132                 struct page *p = page + i;
2133 
2134                 if (unlikely(check_new_page(p)))
2135                         return true;
2136         }
2137 
2138         return false;
2139 }
2140 
2141 inline void post_alloc_hook(struct page *page, unsigned int order,
2142                                 gfp_t gfp_flags)
2143 {
2144         set_page_private(page, 0);
2145         set_page_refcounted(page);
2146 
2147         arch_alloc_page(page, order);
2148         if (debug_pagealloc_enabled_static())
2149                 kernel_map_pages(page, 1 << order, 1);
2150         kasan_alloc_pages(page, order);
2151         kernel_poison_pages(page, 1 << order, 1);
2152         set_page_owner(page, order, gfp_flags);
2153 }
2154 
2155 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
2156                                                         unsigned int alloc_flags)
2157 {
2158         post_alloc_hook(page, order, gfp_flags);
2159 
2160         if (!free_pages_prezeroed() && want_init_on_alloc(gfp_flags))
2161                 kernel_init_free_pages(page, 1 << order);
2162 
2163         if (order && (gfp_flags & __GFP_COMP))
2164                 prep_compound_page(page, order);
2165 
2166         /*
2167          * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
2168          * allocate the page. The expectation is that the caller is taking
2169          * steps that will free more memory. The caller should avoid the page
2170          * being used for !PFMEMALLOC purposes.
2171          */
2172         if (alloc_flags & ALLOC_NO_WATERMARKS)
2173                 set_page_pfmemalloc(page);
2174         else
2175                 clear_page_pfmemalloc(page);
2176 }
2177 
2178 /*
2179  * Go through the free lists for the given migratetype and remove
2180  * the smallest available page from the freelists
2181  */
2182 static __always_inline
2183 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
2184                                                 int migratetype)
2185 {
2186         unsigned int current_order;
2187         struct free_area *area;
2188         struct page *page;
2189 
2190         /* Find a page of the appropriate size in the preferred list */
2191         for (current_order = order; current_order < MAX_ORDER; ++current_order) {
2192                 area = &(zone->free_area[current_order]);
2193                 page = get_page_from_free_area(area, migratetype);
2194                 if (!page)
2195                         continue;
2196                 del_page_from_free_area(page, area);
2197                 expand(zone, page, order, current_order, area, migratetype);
2198                 set_pcppage_migratetype(page, migratetype);
2199                 return page;
2200         }
2201 
2202         return NULL;
2203 }
2204 
2205 
2206 /*
2207  * This array describes the order lists are fallen back to when
2208  * the free lists for the desirable migrate type are depleted
2209  */
2210 static int fallbacks[MIGRATE_TYPES][4] = {
2211         [MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,   MIGRATE_TYPES },
2212         [MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES },
2213         [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,   MIGRATE_TYPES },
2214 #ifdef CONFIG_CMA
2215         [MIGRATE_CMA]         = { MIGRATE_TYPES }, /* Never used */
2216 #endif
2217 #ifdef CONFIG_MEMORY_ISOLATION
2218         [MIGRATE_ISOLATE]     = { MIGRATE_TYPES }, /* Never used */
2219 #endif
2220 };
2221 
2222 #ifdef CONFIG_CMA
2223 static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone,
2224                                         unsigned int order)
2225 {
2226         return __rmqueue_smallest(zone, order, MIGRATE_CMA);
2227 }
2228 #else
2229 static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
2230                                         unsigned int order) { return NULL; }
2231 #endif
2232 
2233 /*
2234  * Move the free pages in a range to the free lists of the requested type.
2235  * Note that start_page and end_pages are not aligned on a pageblock
2236  * boundary. If alignment is required, use move_freepages_block()
2237  */
2238 static int move_freepages(struct zone *zone,
2239                           struct page *start_page, struct page *end_page,
2240                           int migratetype, int *num_movable)
2241 {
2242         struct page *page;
2243         unsigned int order;
2244         int pages_moved = 0;
2245 
2246         for (page = start_page; page <= end_page;) {
2247                 if (!pfn_valid_within(page_to_pfn(page))) {
2248                         page++;
2249                         continue;
2250                 }
2251 
2252                 if (!PageBuddy(page)) {
2253                         /*
2254                          * We assume that pages that could be isolated for
2255                          * migration are movable. But we don't actually try
2256                          * isolating, as that would be expensive.
2257                          */
2258                         if (num_movable &&
2259                                         (PageLRU(page) || __PageMovable(page)))
2260                                 (*num_movable)++;
2261 
2262                         page++;
2263                         continue;
2264                 }
2265 
2266                 /* Make sure we are not inadvertently changing nodes */
2267                 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
2268                 VM_BUG_ON_PAGE(page_zone(page) != zone, page);
2269 
2270                 order = page_order(page);
2271                 move_to_free_area(page, &zone->free_area[order], migratetype);
2272                 page += 1 << order;
2273                 pages_moved += 1 << order;
2274         }
2275 
2276         return pages_moved;
2277 }
2278 
2279 int move_freepages_block(struct zone *zone, struct page *page,
2280                                 int migratetype, int *num_movable)
2281 {
2282         unsigned long start_pfn, end_pfn;
2283         struct page *start_page, *end_page;
2284 
2285         if (num_movable)
2286                 *num_movable = 0;
2287 
2288         start_pfn = page_to_pfn(page);
2289         start_pfn = start_pfn & ~(pageblock_nr_pages-1);
2290         start_page = pfn_to_page(start_pfn);
2291         end_page = start_page + pageblock_nr_pages - 1;
2292         end_pfn = start_pfn + pageblock_nr_pages - 1;
2293 
2294         /* Do not cross zone boundaries */
2295         if (!zone_spans_pfn(zone, start_pfn))
2296                 start_page = page;
2297         if (!zone_spans_pfn(zone, end_pfn))
2298                 return 0;
2299 
2300         return move_freepages(zone, start_page, end_page, migratetype,
2301                                                                 num_movable);
2302 }
2303 
2304 static void change_pageblock_range(struct page *pageblock_page,
2305                                         int start_order, int migratetype)
2306 {
2307         int nr_pageblocks = 1 << (start_order - pageblock_order);
2308 
2309         while (nr_pageblocks--) {
2310                 set_pageblock_migratetype(pageblock_page, migratetype);
2311                 pageblock_page += pageblock_nr_pages;
2312         }
2313 }
2314 
2315 /*
2316  * When we are falling back to another migratetype during allocation, try to
2317  * steal extra free pages from the same pageblocks to satisfy further
2318  * allocations, instead of polluting multiple pageblocks.
2319  *
2320  * If we are stealing a relatively large buddy page, it is likely there will
2321  * be more free pages in the pageblock, so try to steal them all. For
2322  * reclaimable and unmovable allocations, we steal regardless of page size,
2323  * as fragmentation caused by those allocations polluting movable pageblocks
2324  * is worse than movable allocations stealing from unmovable and reclaimable
2325  * pageblocks.
2326  */
2327 static bool can_steal_fallback(unsigned int order, int start_mt)
2328 {
2329         /*
2330          * Leaving this order check is intended, although there is
2331          * relaxed order check in next check. The reason is that
2332          * we can actually steal whole pageblock if this condition met,
2333          * but, below check doesn't guarantee it and that is just heuristic
2334          * so could be changed anytime.
2335          */
2336         if (order >= pageblock_order)
2337                 return true;
2338 
2339         if (order >= pageblock_order / 2 ||
2340                 start_mt == MIGRATE_RECLAIMABLE ||
2341                 start_mt == MIGRATE_UNMOVABLE ||
2342                 page_group_by_mobility_disabled)
2343                 return true;
2344 
2345         return false;
2346 }
2347 
2348 static inline void boost_watermark(struct zone *zone)
2349 {
2350         unsigned long max_boost;
2351 
2352         if (!watermark_boost_factor)
2353                 return;
2354         /*
2355          * Don't bother in zones that are unlikely to produce results.
2356          * On small machines, including kdump capture kernels running
2357          * in a small area, boosting the watermark can cause an out of
2358          * memory situation immediately.
2359          */
2360         if ((pageblock_nr_pages * 4) > zone_managed_pages(zone))
2361                 return;
2362 
2363         max_boost = mult_frac(zone->_watermark[WMARK_HIGH],
2364                         watermark_boost_factor, 10000);
2365 
2366         /*
2367          * high watermark may be uninitialised if fragmentation occurs
2368          * very early in boot so do not boost. We do not fall
2369          * through and boost by pageblock_nr_pages as failing
2370          * allocations that early means that reclaim is not going
2371          * to help and it may even be impossible to reclaim the
2372          * boosted watermark resulting in a hang.
2373          */
2374         if (!max_boost)
2375                 return;
2376 
2377         max_boost = max(pageblock_nr_pages, max_boost);
2378 
2379         zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages,
2380                 max_boost);
2381 }
2382 
2383 /*
2384  * This function implements actual steal behaviour. If order is large enough,
2385  * we can steal whole pageblock. If not, we first move freepages in this
2386  * pageblock to our migratetype and determine how many already-allocated pages
2387  * are there in the pageblock with a compatible migratetype. If at least half
2388  * of pages are free or compatible, we can change migratetype of the pageblock
2389  * itself, so pages freed in the future will be put on the correct free list.
2390  */
2391 static void steal_suitable_fallback(struct zone *zone, struct page *page,
2392                 unsigned int alloc_flags, int start_type, bool whole_block)
2393 {
2394         unsigned int current_order = page_order(page);
2395         struct free_area *area;
2396         int free_pages, movable_pages, alike_pages;
2397         int old_block_type;
2398 
2399         old_block_type = get_pageblock_migratetype(page);
2400 
2401         /*
2402          * This can happen due to races and we want to prevent broken
2403          * highatomic accounting.
2404          */
2405         if (is_migrate_highatomic(old_block_type))
2406                 goto single_page;
2407 
2408         /* Take ownership for orders >= pageblock_order */
2409         if (current_order >= pageblock_order) {
2410                 change_pageblock_range(page, current_order, start_type);
2411                 goto single_page;
2412         }
2413 
2414         /*
2415          * Boost watermarks to increase reclaim pressure to reduce the
2416          * likelihood of future fallbacks. Wake kswapd now as the node
2417          * may be balanced overall and kswapd will not wake naturally.
2418          */
2419         boost_watermark(zone);
2420         if (alloc_flags & ALLOC_KSWAPD)
2421                 set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
2422 
2423         /* We are not allowed to try stealing from the whole block */
2424         if (!whole_block)
2425                 goto single_page;
2426 
2427         free_pages = move_freepages_block(zone, page, start_type,
2428                                                 &movable_pages);
2429         /*
2430          * Determine how many pages are compatible with our allocation.
2431          * For movable allocation, it's the number of movable pages which
2432          * we just obtained. For other types it's a bit more tricky.
2433          */
2434         if (start_type == MIGRATE_MOVABLE) {
2435                 alike_pages = movable_pages;
2436         } else {
2437                 /*
2438                  * If we are falling back a RECLAIMABLE or UNMOVABLE allocation
2439                  * to MOVABLE pageblock, consider all non-movable pages as
2440                  * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or
2441                  * vice versa, be conservative since we can't distinguish the
2442                  * exact migratetype of non-movable pages.
2443                  */
2444                 if (old_block_type == MIGRATE_MOVABLE)
2445                         alike_pages = pageblock_nr_pages
2446                                                 - (free_pages + movable_pages);
2447                 else
2448                         alike_pages = 0;
2449         }
2450 
2451         /* moving whole block can fail due to zone boundary conditions */
2452         if (!free_pages)
2453                 goto single_page;
2454 
2455         /*
2456          * If a sufficient number of pages in the block are either free or of
2457          * comparable migratability as our allocation, claim the whole block.
2458          */
2459         if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
2460                         page_group_by_mobility_disabled)
2461                 set_pageblock_migratetype(page, start_type);
2462 
2463         return;
2464 
2465 single_page:
2466         area = &zone->free_area[current_order];
2467         move_to_free_area(page, area, start_type);
2468 }
2469 
2470 /*
2471  * Check whether there is a suitable fallback freepage with requested order.
2472  * If only_stealable is true, this function returns fallback_mt only if
2473  * we can steal other freepages all together. This would help to reduce
2474  * fragmentation due to mixed migratetype pages in one pageblock.
2475  */
2476 int find_suitable_fallback(struct free_area *area, unsigned int order,
2477                         int migratetype, bool only_stealable, bool *can_steal)
2478 {
2479         int i;
2480         int fallback_mt;
2481 
2482         if (area->nr_free == 0)
2483                 return -1;
2484 
2485         *can_steal = false;
2486         for (i = 0;; i++) {
2487                 fallback_mt = fallbacks[migratetype][i];
2488                 if (fallback_mt == MIGRATE_TYPES)
2489                         break;
2490 
2491                 if (free_area_empty(area, fallback_mt))
2492                         continue;
2493 
2494                 if (can_steal_fallback(order, migratetype))
2495                         *can_steal = true;
2496 
2497                 if (!only_stealable)
2498                         return fallback_mt;
2499 
2500                 if (*can_steal)
2501                         return fallback_mt;
2502         }
2503 
2504         return -1;
2505 }
2506 
2507 /*
2508  * Reserve a pageblock for exclusive use of high-order atomic allocations if
2509  * there are no empty page blocks that contain a page with a suitable order
2510  */
2511 static void reserve_highatomic_pageblock(struct page *page, struct zone *zone,
2512                                 unsigned int alloc_order)
2513 {
2514         int mt;
2515         unsigned long max_managed, flags;
2516 
2517         /*
2518          * Limit the number reserved to 1 pageblock or roughly 1% of a zone.
2519          * Check is race-prone but harmless.
2520          */
2521         max_managed = (zone_managed_pages(zone) / 100) + pageblock_nr_pages;
2522         if (zone->nr_reserved_highatomic >= max_managed)
2523                 return;
2524 
2525         spin_lock_irqsave(&zone->lock, flags);
2526 
2527         /* Recheck the nr_reserved_highatomic limit under the lock */
2528         if (zone->nr_reserved_highatomic >= max_managed)
2529                 goto out_unlock;
2530 
2531         /* Yoink! */
2532         mt = get_pageblock_migratetype(page);
2533         if (!is_migrate_highatomic(mt) && !is_migrate_isolate(mt)
2534             && !is_migrate_cma(mt)) {
2535                 zone->nr_reserved_highatomic += pageblock_nr_pages;
2536                 set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
2537                 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL);
2538         }
2539 
2540 out_unlock:
2541         spin_unlock_irqrestore(&zone->lock, flags);
2542 }
2543 
2544 /*
2545  * Used when an allocation is about to fail under memory pressure. This
2546  * potentially hurts the reliability of high-order allocations when under
2547  * intense memory pressure but failed atomic allocations should be easier
2548  * to recover from than an OOM.
2549  *
2550  * If @force is true, try to unreserve a pageblock even though highatomic
2551  * pageblock is exhausted.
2552  */
2553 static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
2554                                                 bool force)
2555 {
2556         struct zonelist *zonelist = ac->zonelist;
2557         unsigned long flags;
2558         struct zoneref *z;
2559         struct zone *zone;
2560         struct page *page;
2561         int order;
2562         bool ret;
2563 
2564         for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx,
2565                                                                 ac->nodemask) {
2566                 /*
2567                  * Preserve at least one pageblock unless memory pressure
2568                  * is really high.
2569                  */
2570                 if (!force && zone->nr_reserved_highatomic <=
2571                                         pageblock_nr_pages)
2572                         continue;
2573 
2574                 spin_lock_irqsave(&zone->lock, flags);
2575                 for (order = 0; order < MAX_ORDER; order++) {
2576                         struct free_area *area = &(zone->free_area[order]);
2577 
2578                         page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC);
2579                         if (!page)
2580                                 continue;
2581 
2582                         /*
2583                          * In page freeing path, migratetype change is racy so
2584                          * we can counter several free pages in a pageblock
2585                          * in this loop althoug we changed the pageblock type
2586                          * from highatomic to ac->migratetype. So we should
2587                          * adjust the count once.
2588                          */
2589                         if (is_migrate_highatomic_page(page)) {
2590                                 /*
2591                                  * It should never happen but changes to
2592                                  * locking could inadvertently allow a per-cpu
2593                                  * drain to add pages to MIGRATE_HIGHATOMIC
2594                                  * while unreserving so be safe and watch for
2595                                  * underflows.
2596                                  */
2597                                 zone->nr_reserved_highatomic -= min(
2598                                                 pageblock_nr_pages,
2599                                                 zone->nr_reserved_highatomic);
2600                         }
2601 
2602                         /*
2603                          * Convert to ac->migratetype and avoid the normal
2604                          * pageblock stealing heuristics. Minimally, the caller
2605                          * is doing the work and needs the pages. More
2606                          * importantly, if the block was always converted to
2607                          * MIGRATE_UNMOVABLE or another type then the number
2608                          * of pageblocks that cannot be completely freed
2609                          * may increase.
2610                          */
2611                         set_pageblock_migratetype(page, ac->migratetype);
2612                         ret = move_freepages_block(zone, page, ac->migratetype,
2613                                                                         NULL);
2614                         if (ret) {
2615                                 spin_unlock_irqrestore(&zone->lock, flags);
2616                                 return ret;
2617                         }
2618                 }
2619                 spin_unlock_irqrestore(&zone->lock, flags);
2620         }
2621 
2622         return false;
2623 }
2624 
2625 /*
2626  * Try finding a free buddy page on the fallback list and put it on the free
2627  * list of requested migratetype, possibly along with other pages from the same
2628  * block, depending on fragmentation avoidance heuristics. Returns true if
2629  * fallback was found so that __rmqueue_smallest() can grab it.
2630  *
2631  * The use of signed ints for order and current_order is a deliberate
2632  * deviation from the rest of this file, to make the for loop
2633  * condition simpler.
2634  */
2635 static __always_inline bool
2636 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
2637                                                 unsigned int alloc_flags)
2638 {
2639         struct free_area *area;
2640         int current_order;
2641         int min_order = order;
2642         struct page *page;
2643         int fallback_mt;
2644         bool can_steal;
2645 
2646         /*
2647          * Do not steal pages from freelists belonging to other pageblocks
2648          * i.e. orders < pageblock_order. If there are no local zones free,
2649          * the zonelists will be reiterated without ALLOC_NOFRAGMENT.
2650          */
2651         if (alloc_flags & ALLOC_NOFRAGMENT)
2652                 min_order = pageblock_order;
2653 
2654         /*
2655          * Find the largest available free page in the other list. This roughly
2656          * approximates finding the pageblock with the most free pages, which
2657          * would be too costly to do exactly.
2658          */
2659         for (current_order = MAX_ORDER - 1; current_order >= min_order;
2660                                 --current_order) {
2661                 area = &(zone->free_area[current_order]);
2662                 fallback_mt = find_suitable_fallback(area, current_order,
2663                                 start_migratetype, false, &can_steal);
2664                 if (fallback_mt == -1)
2665                         continue;
2666 
2667                 /*
2668                  * We cannot steal all free pages from the pageblock and the
2669                  * requested migratetype is movable. In that case it's better to
2670                  * steal and split the smallest available page instead of the
2671                  * largest available page, because even if the next movable
2672                  * allocation falls back into a different pageblock than this
2673                  * one, it won't cause permanent fragmentation.
2674                  */
2675                 if (!can_steal && start_migratetype == MIGRATE_MOVABLE
2676                                         && current_order > order)
2677                         goto find_smallest;
2678 
2679                 goto do_steal;
2680         }
2681 
2682         return false;
2683 
2684 find_smallest:
2685         for (current_order = order; current_order < MAX_ORDER;
2686                                                         current_order++) {
2687                 area = &(zone->free_area[current_order]);
2688                 fallback_mt = find_suitable_fallback(area, current_order,
2689                                 start_migratetype, false, &can_steal);
2690                 if (fallback_mt != -1)
2691                         break;
2692         }
2693 
2694         /*
2695          * This should not happen - we already found a suitable fallback
2696          * when looking for the largest page.
2697          */
2698         VM_BUG_ON(current_order == MAX_ORDER);
2699 
2700 do_steal:
2701         page = get_page_from_free_area(area, fallback_mt);
2702 
2703         steal_suitable_fallback(zone, page, alloc_flags, start_migratetype,
2704                                                                 can_steal);
2705 
2706         trace_mm_page_alloc_extfrag(page, order, current_order,
2707                 start_migratetype, fallback_mt);
2708 
2709         return true;
2710 
2711 }
2712 
2713 /*
2714  * Do the hard work of removing an element from the buddy allocator.
2715  * Call me with the zone->lock already held.
2716  */
2717 static __always_inline struct page *
2718 __rmqueue(struct zone *zone, unsigned int order, int migratetype,
2719                                                 unsigned int alloc_flags)
2720 {
2721         struct page *page;
2722 
2723 retry:
2724         page = __rmqueue_smallest(zone, order, migratetype);
2725         if (unlikely(!page)) {
2726                 if (migratetype == MIGRATE_MOVABLE)
2727                         page = __rmqueue_cma_fallback(zone, order);
2728 
2729                 if (!page && __rmqueue_fallback(zone, order, migratetype,
2730                                                                 alloc_flags))
2731                         goto retry;
2732         }
2733 
2734         trace_mm_page_alloc_zone_locked(page, order, migratetype);
2735         return page;
2736 }
2737 
2738 /*
2739  * Obtain a specified number of elements from the buddy allocator, all under
2740  * a single hold of the lock, for efficiency.  Add them to the supplied list.
2741  * Returns the number of new pages which were placed at *list.
2742  */
2743 static int rmqueue_bulk(struct zone *zone, unsigned int order,
2744                         unsigned long count, struct list_head *list,
2745                         int migratetype, unsigned int alloc_flags)
2746 {
2747         int i, alloced = 0;
2748 
2749         spin_lock(&zone->lock);
2750         for (i = 0; i < count; ++i) {
2751                 struct page *page = __rmqueue(zone, order, migratetype,
2752                                                                 alloc_flags);
2753                 if (unlikely(page == NULL))
2754                         break;
2755 
2756                 if (unlikely(check_pcp_refill(page)))
2757                         continue;
2758 
2759                 /*
2760                  * Split buddy pages returned by expand() are received here in
2761                  * physical page order. The page is added to the tail of
2762                  * caller's list. From the callers perspective, the linked list
2763                  * is ordered by page number under some conditions. This is
2764                  * useful for IO devices that can forward direction from the
2765                  * head, thus also in the physical page order. This is useful
2766                  * for IO devices that can merge IO requests if the physical
2767                  * pages are ordered properly.
2768                  */
2769                 list_add_tail(&page->lru, list);
2770                 alloced++;
2771                 if (is_migrate_cma(get_pcppage_migratetype(page)))
2772                         __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
2773                                               -(1 << order));
2774         }
2775 
2776         /*
2777          * i pages were removed from the buddy list even if some leak due
2778          * to check_pcp_refill failing so adjust NR_FREE_PAGES based
2779          * on i. Do not confuse with 'alloced' which is the number of
2780          * pages added to the pcp list.
2781          */
2782         __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
2783         spin_unlock(&zone->lock);
2784         return alloced;
2785 }
2786 
2787 #ifdef CONFIG_NUMA
2788 /*
2789  * Called from the vmstat counter updater to drain pagesets of this
2790  * currently executing processor on remote nodes after they have
2791  * expired.
2792  *
2793  * Note that this function must be called with the thread pinned to
2794  * a single processor.
2795  */
2796 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
2797 {
2798         unsigned long flags;
2799         int to_drain, batch;
2800 
2801         local_irq_save(flags);
2802         batch = READ_ONCE(pcp->batch);
2803         to_drain = min(pcp->count, batch);
2804         if (to_drain > 0)
2805                 free_pcppages_bulk(zone, to_drain, pcp);
2806         local_irq_restore(flags);
2807 }
2808 #endif
2809 
2810 /*
2811  * Drain pcplists of the indicated processor and zone.
2812  *
2813  * The processor must either be the current processor and the
2814  * thread pinned to the current processor or a processor that
2815  * is not online.
2816  */
2817 static void drain_pages_zone(unsigned int cpu, struct zone *zone)
2818 {
2819         unsigned long flags;
2820         struct per_cpu_pageset *pset;
2821         struct per_cpu_pages *pcp;
2822 
2823         local_irq_save(flags);
2824         pset = per_cpu_ptr(zone->pageset, cpu);
2825 
2826         pcp = &pset->pcp;
2827         if (pcp->count)
2828                 free_pcppages_bulk(zone, pcp->count, pcp);
2829         local_irq_restore(flags);
2830 }
2831 
2832 /*
2833  * Drain pcplists of all zones on the indicated processor.
2834  *
2835  * The processor must either be the current processor and the
2836  * thread pinned to the current processor or a processor that
2837  * is not online.
2838  */
2839 static void drain_pages(unsigned int cpu)
2840 {
2841         struct zone *zone;
2842 
2843         for_each_populated_zone(zone) {
2844                 drain_pages_zone(cpu, zone);
2845         }
2846 }
2847 
2848 /*
2849  * Spill all of this CPU's per-cpu pages back into the buddy allocator.
2850  *
2851  * The CPU has to be pinned. When zone parameter is non-NULL, spill just
2852  * the single zone's pages.
2853  */
2854 void drain_local_pages(struct zone *zone)
2855 {
2856         int cpu = smp_processor_id();
2857 
2858         if (zone)
2859                 drain_pages_zone(cpu, zone);
2860         else
2861                 drain_pages(cpu);
2862 }
2863 
2864 static void drain_local_pages_wq(struct work_struct *work)
2865 {
2866         struct pcpu_drain *drain;
2867 
2868         drain = container_of(work, struct pcpu_drain, work);
2869 
2870         /*
2871          * drain_all_pages doesn't use proper cpu hotplug protection so
2872          * we can race with cpu offline when the WQ can move this from
2873          * a cpu pinned worker to an unbound one. We can operate on a different
2874          * cpu which is allright but we also have to make sure to not move to
2875          * a different one.
2876          */
2877         preempt_disable();
2878         drain_local_pages(drain->zone);
2879         preempt_enable();
2880 }
2881 
2882 /*
2883  * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
2884  *
2885  * When zone parameter is non-NULL, spill just the single zone's pages.
2886  *
2887  * Note that this can be extremely slow as the draining happens in a workqueue.
2888  */
2889 void drain_all_pages(struct zone *zone)
2890 {
2891         int cpu;
2892 
2893         /*
2894          * Allocate in the BSS so we wont require allocation in
2895          * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
2896          */
2897         static cpumask_t cpus_with_pcps;
2898 
2899         /*
2900          * Make sure nobody triggers this path before mm_percpu_wq is fully
2901          * initialized.
2902          */
2903         if (WARN_ON_ONCE(!mm_percpu_wq))
2904                 return;
2905 
2906         /*
2907          * Do not drain if one is already in progress unless it's specific to
2908          * a zone. Such callers are primarily CMA and memory hotplug and need
2909          * the drain to be complete when the call returns.
2910          */
2911         if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) {
2912                 if (!zone)
2913                         return;
2914                 mutex_lock(&pcpu_drain_mutex);
2915         }
2916 
2917         /*
2918          * We don't care about racing with CPU hotplug event
2919          * as offline notification will cause the notified
2920          * cpu to drain that CPU pcps and on_each_cpu_mask
2921          * disables preemption as part of its processing
2922          */
2923         for_each_online_cpu(cpu) {
2924                 struct per_cpu_pageset *pcp;
2925                 struct zone *z;
2926                 bool has_pcps = false;
2927 
2928                 if (zone) {
2929                         pcp = per_cpu_ptr(zone->pageset, cpu);
2930                         if (pcp->pcp.count)
2931                                 has_pcps = true;
2932                 } else {
2933                         for_each_populated_zone(z) {
2934                                 pcp = per_cpu_ptr(z->pageset, cpu);
2935                                 if (pcp->pcp.count) {
2936                                         has_pcps = true;
2937                                         break;
2938                                 }
2939                         }
2940                 }
2941 
2942                 if (has_pcps)
2943                         cpumask_set_cpu(cpu, &cpus_with_pcps);
2944                 else
2945                         cpumask_clear_cpu(cpu, &cpus_with_pcps);
2946         }
2947 
2948         for_each_cpu(cpu, &cpus_with_pcps) {
2949                 struct pcpu_drain *drain = per_cpu_ptr(&pcpu_drain, cpu);
2950 
2951                 drain->zone = zone;
2952                 INIT_WORK(&drain->work, drain_local_pages_wq);
2953                 queue_work_on(cpu, mm_percpu_wq, &drain->work);
2954         }
2955         for_each_cpu(cpu, &cpus_with_pcps)
2956                 flush_work(&per_cpu_ptr(&pcpu_drain, cpu)->work);
2957 
2958         mutex_unlock(&pcpu_drain_mutex);
2959 }
2960 
2961 #ifdef CONFIG_HIBERNATION
2962 
2963 /*
2964  * Touch the watchdog for every WD_PAGE_COUNT pages.
2965  */
2966 #define WD_PAGE_COUNT   (128*1024)
2967 
2968 void mark_free_pages(struct zone *zone)
2969 {
2970         unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT;
2971         unsigned long flags;
2972         unsigned int order, t;
2973         struct page *page;
2974 
2975         if (zone_is_empty(zone))
2976                 return;
2977 
2978         spin_lock_irqsave(&zone->lock, flags);
2979 
2980         max_zone_pfn = zone_end_pfn(zone);
2981         for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
2982                 if (pfn_valid(pfn)) {
2983                         page = pfn_to_page(pfn);
2984 
2985                         if (!--page_count) {
2986                                 touch_nmi_watchdog();
2987                                 page_count = WD_PAGE_COUNT;
2988                         }
2989 
2990                         if (page_zone(page) != zone)
2991                                 continue;
2992 
2993                         if (!swsusp_page_is_forbidden(page))
2994                                 swsusp_unset_page_free(page);
2995                 }
2996 
2997         for_each_migratetype_order(order, t) {
2998                 list_for_each_entry(page,
2999                                 &zone->free_area[order].free_list[t], lru) {
3000                         unsigned long i;
3001 
3002                         pfn = page_to_pfn(page);
3003                         for (i = 0; i < (1UL << order); i++) {
3004                                 if (!--page_count) {
3005                                         touch_nmi_watchdog();
3006                                         page_count = WD_PAGE_COUNT;
3007                                 }
3008                                 swsusp_set_page_free(pfn_to_page(pfn + i));
3009                         }
3010                 }
3011         }
3012         spin_unlock_irqrestore(&zone->lock, flags);
3013 }
3014 #endif /* CONFIG_PM */
3015 
3016 static bool free_unref_page_prepare(struct page *page, unsigned long pfn)
3017 {
3018         int migratetype;
3019 
3020         if (!free_pcp_prepare(page))
3021                 return false;
3022 
3023         migratetype = get_pfnblock_migratetype(page, pfn);
3024         set_pcppage_migratetype(page, migratetype);
3025         return true;
3026 }
3027 
3028 static void free_unref_page_commit(struct page *page, unsigned long pfn)
3029 {
3030         struct zone *zone = page_zone(page);
3031         struct per_cpu_pages *pcp;
3032         int migratetype;
3033 
3034         migratetype = get_pcppage_migratetype(page);
3035         __count_vm_event(PGFREE);
3036 
3037         /*
3038          * We only track unmovable, reclaimable and movable on pcp lists.
3039          * Free ISOLATE pages back to the allocator because they are being
3040          * offlined but treat HIGHATOMIC as movable pages so we can get those
3041          * areas back if necessary. Otherwise, we may have to free
3042          * excessively into the page allocator
3043          */
3044         if (migratetype >= MIGRATE_PCPTYPES) {
3045                 if (unlikely(is_migrate_isolate(migratetype))) {
3046                         free_one_page(zone, page, pfn, 0, migratetype);
3047                         return;
3048                 }
3049                 migratetype = MIGRATE_MOVABLE;
3050         }
3051 
3052         pcp = &this_cpu_ptr(zone->pageset)->pcp;
3053         list_add(&page->lru, &pcp->lists[migratetype]);
3054         pcp->count++;
3055         if (pcp->count >= pcp->high) {
3056                 unsigned long batch = READ_ONCE(pcp->batch);
3057                 free_pcppages_bulk(zone, batch, pcp);
3058         }
3059 }
3060 
3061 /*
3062  * Free a 0-order page
3063  */
3064 void free_unref_page(struct page *page)
3065 {
3066         unsigned long flags;
3067         unsigned long pfn = page_to_pfn(page);
3068 
3069         if (!free_unref_page_prepare(page, pfn))
3070                 return;
3071 
3072         local_irq_save(flags);
3073         free_unref_page_commit(page, pfn);
3074         local_irq_restore(flags);
3075 }
3076 
3077 /*
3078  * Free a list of 0-order pages
3079  */
3080 void free_unref_page_list(struct list_head *list)
3081 {
3082         struct page *page, *next;
3083         unsigned long flags, pfn;
3084         int batch_count = 0;
3085 
3086         /* Prepare pages for freeing */
3087         list_for_each_entry_safe(page, next, list, lru) {
3088                 pfn = page_to_pfn(page);
3089                 if (!free_unref_page_prepare(page, pfn))
3090                         list_del(&page->lru);
3091                 set_page_private(page, pfn);
3092         }
3093 
3094         local_irq_save(flags);
3095         list_for_each_entry_safe(page, next, list, lru) {
3096                 unsigned long pfn = page_private(page);
3097 
3098                 set_page_private(page, 0);
3099                 trace_mm_page_free_batched(page);
3100                 free_unref_page_commit(page, pfn);
3101 
3102                 /*
3103                  * Guard against excessive IRQ disabled times when we get
3104                  * a large list of pages to free.
3105                  */
3106                 if (++batch_count == SWAP_CLUSTER_MAX) {
3107                         local_irq_restore(flags);
3108                         batch_count = 0;
3109                         local_irq_save(flags);
3110                 }
3111         }
3112         local_irq_restore(flags);
3113 }
3114 
3115 /*
3116  * split_page takes a non-compound higher-order page, and splits it into
3117  * n (1<<order) sub-pages: page[0..n]
3118  * Each sub-page must be freed individually.
3119  *
3120  * Note: this is probably too low level an operation for use in drivers.
3121  * Please consult with lkml before using this in your driver.
3122  */
3123 void split_page(struct page *page, unsigned int order)
3124 {
3125         int i;
3126 
3127         VM_BUG_ON_PAGE(PageCompound(page), page);
3128         VM_BUG_ON_PAGE(!page_count(page), page);
3129 
3130         for (i = 1; i < (1 << order); i++)
3131                 set_page_refcounted(page + i);
3132         split_page_owner(page, order);
3133 }
3134 EXPORT_SYMBOL_GPL(split_page);
3135 
3136 int __isolate_free_page(struct page *page, unsigned int order)
3137 {
3138         struct free_area *area = &page_zone(page)->free_area[order];
3139         unsigned long watermark;
3140         struct zone *zone;
3141         int mt;
3142 
3143         BUG_ON(!PageBuddy(page));
3144 
3145         zone = page_zone(page);
3146         mt = get_pageblock_migratetype(page);
3147 
3148         if (!is_migrate_isolate(mt)) {
3149                 /*
3150                  * Obey watermarks as if the page was being allocated. We can
3151                  * emulate a high-order watermark check with a raised order-0
3152                  * watermark, because we already know our high-order page
3153                  * exists.
3154                  */
3155                 watermark = zone->_watermark[WMARK_MIN] + (1UL << order);
3156                 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
3157                         return 0;
3158 
3159                 __mod_zone_freepage_state(zone, -(1UL << order), mt);
3160         }
3161 
3162         /* Remove page from free list */
3163 
3164         del_page_from_free_area(page, area);
3165 
3166         /*
3167          * Set the pageblock if the isolated page is at least half of a
3168          * pageblock
3169          */
3170         if (order >= pageblock_order - 1) {
3171                 struct page *endpage = page + (1 << order) - 1;
3172                 for (; page < endpage; page += pageblock_nr_pages) {
3173                         int mt = get_pageblock_migratetype(page);
3174                         if (!is_migrate_isolate(mt) && !is_migrate_cma(mt)
3175                             && !is_migrate_highatomic(mt))
3176                                 set_pageblock_migratetype(page,
3177                                                           MIGRATE_MOVABLE);
3178                 }
3179         }
3180 
3181 
3182         return 1UL << order;
3183 }
3184 
3185 /*
3186  * Update NUMA hit/miss statistics
3187  *
3188  * Must be called with interrupts disabled.
3189  */
3190 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z)
3191 {
3192 #ifdef CONFIG_NUMA
3193         enum numa_stat_item local_stat = NUMA_LOCAL;
3194 
3195         /* skip numa counters update if numa stats is disabled */
3196         if (!static_branch_likely(&vm_numa_stat_key))
3197                 return;
3198 
3199         if (zone_to_nid(z) != numa_node_id())
3200                 local_stat = NUMA_OTHER;
3201 
3202         if (zone_to_nid(z) == zone_to_nid(preferred_zone))
3203                 __inc_numa_state(z, NUMA_HIT);
3204         else {
3205                 __inc_numa_state(z, NUMA_MISS);
3206                 __inc_numa_state(preferred_zone, NUMA_FOREIGN);
3207         }
3208         __inc_numa_state(z, local_stat);
3209 #endif
3210 }
3211 
3212 /* Remove page from the per-cpu list, caller must protect the list */
3213 static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
3214                         unsigned int alloc_flags,
3215                         struct per_cpu_pages *pcp,
3216                         struct list_head *list)
3217 {
3218         struct page *page;
3219 
3220         do {
3221                 if (list_empty(list)) {
3222                         pcp->count += rmqueue_bulk(zone, 0,
3223                                         pcp->batch, list,
3224                                         migratetype, alloc_flags);
3225                         if (unlikely(list_empty(list)))
3226                                 return NULL;
3227                 }
3228 
3229                 page = list_first_entry(list, struct page, lru);
3230                 list_del(&page->lru);
3231                 pcp->count--;
3232         } while (check_new_pcp(page));
3233 
3234         return page;
3235 }
3236 
3237 /* Lock and remove page from the per-cpu list */
3238 static struct page *rmqueue_pcplist(struct zone *preferred_zone,
3239                         struct zone *zone, gfp_t gfp_flags,
3240                         int migratetype, unsigned int alloc_flags)
3241 {
3242         struct per_cpu_pages *pcp;
3243         struct list_head *list;
3244         struct page *page;
3245         unsigned long flags;
3246 
3247         local_irq_save(flags);
3248         pcp = &this_cpu_ptr(zone->pageset)->pcp;
3249         list = &pcp->lists[migratetype];
3250         page = __rmqueue_pcplist(zone,  migratetype, alloc_flags, pcp, list);
3251         if (page) {
3252                 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1);
3253                 zone_statistics(preferred_zone, zone);
3254         }
3255         local_irq_restore(flags);
3256         return page;
3257 }
3258 
3259 /*
3260  * Allocate a page from the given zone. Use pcplists for order-0 allocations.
3261  */
3262 static inline
3263 struct page *rmqueue(struct zone *preferred_zone,
3264                         struct zone *zone, unsigned int order,
3265                         gfp_t gfp_flags, unsigned int alloc_flags,
3266                         int migratetype)
3267 {
3268         unsigned long flags;
3269         struct page *page;
3270 
3271         if (likely(order == 0)) {
3272                 page = rmqueue_pcplist(preferred_zone, zone, gfp_flags,
3273                                         migratetype, alloc_flags);
3274                 goto out;
3275         }
3276 
3277         /*
3278          * We most definitely don't want callers attempting to
3279          * allocate greater than order-1 page units with __GFP_NOFAIL.
3280          */
3281         WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
3282         spin_lock_irqsave(&zone->lock, flags);
3283 
3284         do {
3285                 page = NULL;
3286                 if (alloc_flags & ALLOC_HARDER) {
3287                         page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
3288                         if (page)
3289                                 trace_mm_page_alloc_zone_locked(page, order, migratetype);
3290                 }
3291                 if (!page)
3292                         page = __rmqueue(zone, order, migratetype, alloc_flags);
3293         } while (page && check_new_pages(page, order));
3294         spin_unlock(&zone->lock);
3295         if (!page)
3296                 goto failed;
3297         __mod_zone_freepage_state(zone, -(1 << order),
3298                                   get_pcppage_migratetype(page));
3299 
3300         __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
3301         zone_statistics(preferred_zone, zone);
3302         local_irq_restore(flags);
3303 
3304 out:
3305         /* Separate test+clear to avoid unnecessary atomics */
3306         if (test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags)) {
3307                 clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
3308                 wakeup_kswapd(zone, 0, 0, zone_idx(zone));
3309         }
3310 
3311         VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
3312         return page;
3313 
3314 failed:
3315         local_irq_restore(flags);
3316         return NULL;
3317 }
3318 
3319 #ifdef CONFIG_FAIL_PAGE_ALLOC
3320 
3321 static struct {
3322         struct fault_attr attr;
3323 
3324         bool ignore_gfp_highmem;
3325         bool ignore_gfp_reclaim;
3326         u32 min_order;
3327 } fail_page_alloc = {
3328         .attr = FAULT_ATTR_INITIALIZER,
3329         .ignore_gfp_reclaim = true,
3330         .ignore_gfp_highmem = true,
3331         .min_order = 1,
3332 };
3333 
3334 static int __init setup_fail_page_alloc(char *str)
3335 {
3336         return setup_fault_attr(&fail_page_alloc.attr, str);
3337 }
3338 __setup("fail_page_alloc=", setup_fail_page_alloc);
3339 
3340 static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3341 {
3342         if (order < fail_page_alloc.min_order)
3343                 return false;
3344         if (gfp_mask & __GFP_NOFAIL)
3345                 return false;
3346         if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
3347                 return false;
3348         if (fail_page_alloc.ignore_gfp_reclaim &&
3349                         (gfp_mask & __GFP_DIRECT_RECLAIM))
3350                 return false;
3351 
3352         return should_fail(&fail_page_alloc.attr, 1 << order);
3353 }
3354 
3355 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
3356 
3357 static int __init fail_page_alloc_debugfs(void)
3358 {
3359         umode_t mode = S_IFREG | 0600;
3360         struct dentry *dir;
3361 
3362         dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
3363                                         &fail_page_alloc.attr);
3364 
3365         debugfs_create_bool("ignore-gfp-wait", mode, dir,
3366                             &fail_page_alloc.ignore_gfp_reclaim);
3367         debugfs_create_bool("ignore-gfp-highmem", mode, dir,
3368                             &fail_page_alloc.ignore_gfp_highmem);
3369         debugfs_create_u32("min-order", mode, dir, &fail_page_alloc.min_order);
3370 
3371         return 0;
3372 }
3373 
3374 late_initcall(fail_page_alloc_debugfs);
3375 
3376 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
3377 
3378 #else /* CONFIG_FAIL_PAGE_ALLOC */
3379 
3380 static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3381 {
3382         return false;
3383 }
3384 
3385 #endif /* CONFIG_FAIL_PAGE_ALLOC */
3386 
3387 static noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3388 {
3389         return __should_fail_alloc_page(gfp_mask, order);
3390 }
3391 ALLOW_ERROR_INJECTION(should_fail_alloc_page, TRUE);
3392 
3393 /*
3394  * Return true if free base pages are above 'mark'. For high-order checks it
3395  * will return true of the order-0 watermark is reached and there is at least
3396  * one free page of a suitable size. Checking now avoids taking the zone lock
3397  * to check in the allocation paths if no pages are free.
3398  */
3399 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
3400                          int classzone_idx, unsigned int alloc_flags,
3401                          long free_pages)
3402 {
3403         long min = mark;
3404         int o;
3405         const bool alloc_harder = (alloc_flags & (ALLOC_HARDER|ALLOC_OOM));
3406 
3407         /* free_pages may go negative - that's OK */
3408         free_pages -= (1 << order) - 1;
3409 
3410         if (alloc_flags & ALLOC_HIGH)
3411                 min -= min / 2;
3412 
3413         /*
3414          * If the caller does not have rights to ALLOC_HARDER then subtract
3415          * the high-atomic reserves. This will over-estimate the size of the
3416          * atomic reserve but it avoids a search.
3417          */
3418         if (likely(!alloc_harder)) {
3419                 free_pages -= z->nr_reserved_highatomic;
3420         } else {
3421                 /*
3422                  * OOM victims can try even harder than normal ALLOC_HARDER
3423                  * users on the grounds that it's definitely going to be in
3424                  * the exit path shortly and free memory. Any allocation it
3425                  * makes during the free path will be small and short-lived.
3426                  */
3427                 if (alloc_flags & ALLOC_OOM)
3428                         min -= min / 2;
3429                 else
3430                         min -= min / 4;
3431         }
3432 
3433 
3434 #ifdef CONFIG_CMA
3435         /* If allocation can't use CMA areas don't use free CMA pages */
3436         if (!(alloc_flags & ALLOC_CMA))
3437                 free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES);
3438 #endif
3439 
3440         /*
3441          * Check watermarks for an order-0 allocation request. If these
3442          * are not met, then a high-order request also cannot go ahead
3443          * even if a suitable page happened to be free.
3444          */
3445         if (free_pages <= min + z->lowmem_reserve[classzone_idx])
3446                 return false;
3447 
3448         /* If this is an order-0 request then the watermark is fine */
3449         if (!order)
3450                 return true;
3451 
3452         /* For a high-order request, check at least one suitable page is free */
3453         for (o = order; o < MAX_ORDER; o++) {
3454                 struct free_area *area = &z->free_area[o];
3455                 int mt;
3456 
3457                 if (!area->nr_free)
3458                         continue;
3459 
3460                 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
3461                         if (!free_area_empty(area, mt))
3462                                 return true;
3463                 }
3464 
3465 #ifdef CONFIG_CMA
3466                 if ((alloc_flags & ALLOC_CMA) &&
3467                     !free_area_empty(area, MIGRATE_CMA)) {
3468                         return true;
3469                 }
3470 #endif
3471                 if (alloc_harder &&
3472                         !list_empty(&area->free_list[MIGRATE_HIGHATOMIC]))
3473                         return true;
3474         }
3475         return false;
3476 }
3477 
3478 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
3479                       int classzone_idx, unsigned int alloc_flags)
3480 {
3481         return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
3482                                         zone_page_state(z, NR_FREE_PAGES));
3483 }
3484 
3485 static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
3486                 unsigned long mark, int classzone_idx, unsigned int alloc_flags)
3487 {
3488         long free_pages = zone_page_state(z, NR_FREE_PAGES);
3489         long cma_pages = 0;
3490 
3491 #ifdef CONFIG_CMA
3492         /* If allocation can't use CMA areas don't use free CMA pages */
3493         if (!(alloc_flags & ALLOC_CMA))
3494                 cma_pages = zone_page_state(z, NR_FREE_CMA_PAGES);
3495 #endif
3496 
3497         /*
3498          * Fast check for order-0 only. If this fails then the reserves
3499          * need to be calculated. There is a corner case where the check
3500          * passes but only the high-order atomic reserve are free. If
3501          * the caller is !atomic then it'll uselessly search the free
3502          * list. That corner case is then slower but it is harmless.
3503          */
3504         if (!order && (free_pages - cma_pages) > mark + z->lowmem_reserve[classzone_idx])
3505                 return true;
3506 
3507         return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
3508                                         free_pages);
3509 }
3510 
3511 bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
3512                         unsigned long mark, int classzone_idx)
3513 {
3514         long free_pages = zone_page_state(z, NR_FREE_PAGES);
3515 
3516         if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
3517                 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
3518 
3519         return __zone_watermark_ok(z, order, mark, classzone_idx, 0,
3520                                                                 free_pages);
3521 }
3522 
3523 #ifdef CONFIG_NUMA
3524 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3525 {
3526         return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <=
3527                                 node_reclaim_distance;
3528 }
3529 #else   /* CONFIG_NUMA */
3530 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3531 {
3532         return true;
3533 }
3534 #endif  /* CONFIG_NUMA */
3535 
3536 /*
3537  * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid
3538  * fragmentation is subtle. If the preferred zone was HIGHMEM then
3539  * premature use of a lower zone may cause lowmem pressure problems that
3540  * are worse than fragmentation. If the next zone is ZONE_DMA then it is
3541  * probably too small. It only makes sense to spread allocations to avoid
3542  * fragmentation between the Normal and DMA32 zones.
3543  */
3544 static inline unsigned int
3545 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
3546 {
3547         unsigned int alloc_flags = 0;
3548 
3549         if (gfp_mask & __GFP_KSWAPD_RECLAIM)
3550                 alloc_flags |= ALLOC_KSWAPD;
3551 
3552 #ifdef CONFIG_ZONE_DMA32
3553         if (!zone)
3554                 return alloc_flags;
3555 
3556         if (zone_idx(zone) != ZONE_NORMAL)
3557                 return alloc_flags;
3558 
3559         /*
3560          * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and
3561          * the pointer is within zone->zone_pgdat->node_zones[]. Also assume
3562          * on UMA that if Normal is populated then so is DMA32.
3563          */
3564         BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1);
3565         if (nr_online_nodes > 1 && !populated_zone(--zone))
3566                 return alloc_flags;
3567 
3568         alloc_flags |= ALLOC_NOFRAGMENT;
3569 #endif /* CONFIG_ZONE_DMA32 */
3570         return alloc_flags;
3571 }
3572 
3573 /*
3574  * get_page_from_freelist goes through the zonelist trying to allocate
3575  * a page.
3576  */
3577 static struct page *
3578 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
3579                                                 const struct alloc_context *ac)
3580 {
3581         struct zoneref *z;
3582         struct zone *zone;
3583         struct pglist_data *last_pgdat_dirty_limit = NULL;
3584         bool no_fallback;
3585 
3586 retry:
3587         /*
3588          * Scan zonelist, looking for a zone with enough free.
3589          * See also __cpuset_node_allowed() comment in kernel/cpuset.c.
3590          */
3591         no_fallback = alloc_flags & ALLOC_NOFRAGMENT;
3592         z = ac->preferred_zoneref;
3593         for_next_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
3594                                                                 ac->nodemask) {
3595                 struct page *page;
3596                 unsigned long mark;
3597 
3598                 if (cpusets_enabled() &&
3599                         (alloc_flags & ALLOC_CPUSET) &&
3600                         !__cpuset_zone_allowed(zone, gfp_mask))
3601                                 continue;
3602                 /*
3603                  * When allocating a page cache page for writing, we
3604                  * want to get it from a node that is within its dirty
3605                  * limit, such that no single node holds more than its
3606                  * proportional share of globally allowed dirty pages.
3607                  * The dirty limits take into account the node's
3608                  * lowmem reserves and high watermark so that kswapd
3609                  * should be able to balance it without having to
3610                  * write pages from its LRU list.
3611                  *
3612                  * XXX: For now, allow allocations to potentially
3613                  * exceed the per-node dirty limit in the slowpath
3614                  * (spread_dirty_pages unset) before going into reclaim,
3615                  * which is important when on a NUMA setup the allowed
3616                  * nodes are together not big enough to reach the
3617                  * global limit.  The proper fix for these situations
3618                  * will require awareness of nodes in the
3619                  * dirty-throttling and the flusher threads.
3620                  */
3621                 if (ac->spread_dirty_pages) {
3622                         if (last_pgdat_dirty_limit == zone->zone_pgdat)
3623                                 continue;
3624 
3625                         if (!node_dirty_ok(zone->zone_pgdat)) {
3626                                 last_pgdat_dirty_limit = zone->zone_pgdat;
3627                                 continue;
3628                         }
3629                 }
3630 
3631                 if (no_fallback && nr_online_nodes > 1 &&
3632                     zone != ac->preferred_zoneref->zone) {
3633                         int local_nid;
3634 
3635                         /*
3636                          * If moving to a remote node, retry but allow
3637                          * fragmenting fallbacks. Locality is more important
3638                          * than fragmentation avoidance.
3639                          */
3640                         local_nid = zone_to_nid(ac->preferred_zoneref->zone);
3641                         if (zone_to_nid(zone) != local_nid) {
3642                                 alloc_flags &= ~ALLOC_NOFRAGMENT;
3643                                 goto retry;
3644                         }
3645                 }
3646 
3647                 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
3648                 if (!zone_watermark_fast(zone, order, mark,
3649                                        ac_classzone_idx(ac), alloc_flags)) {
3650                         int ret;
3651 
3652 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
3653                         /*
3654                          * Watermark failed for this zone, but see if we can
3655                          * grow this zone if it contains deferred pages.
3656                          */
3657                         if (static_branch_unlikely(&deferred_pages)) {
3658                                 if (_deferred_grow_zone(zone, order))
3659                                         goto try_this_zone;
3660                         }
3661 #endif
3662                         /* Checked here to keep the fast path fast */
3663                         BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
3664                         if (alloc_flags & ALLOC_NO_WATERMARKS)
3665                                 goto try_this_zone;
3666 
3667                         if (node_reclaim_mode == 0 ||
3668                             !zone_allows_reclaim(ac->preferred_zoneref->zone, zone))
3669                                 continue;
3670 
3671                         ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
3672                         switch (ret) {
3673                         case NODE_RECLAIM_NOSCAN:
3674                                 /* did not scan */
3675                                 continue;
3676                         case NODE_RECLAIM_FULL:
3677                                 /* scanned but unreclaimable */
3678                                 continue;
3679                         default:
3680                                 /* did we reclaim enough */
3681                                 if (zone_watermark_ok(zone, order, mark,
3682                                                 ac_classzone_idx(ac), alloc_flags))
3683                                         goto try_this_zone;
3684 
3685                                 continue;
3686                         }
3687                 }
3688 
3689 try_this_zone:
3690                 page = rmqueue(ac->preferred_zoneref->zone, zone, order,
3691                                 gfp_mask, alloc_flags, ac->migratetype);
3692                 if (page) {
3693                         prep_new_page(page, order, gfp_mask, alloc_flags);
3694 
3695                         /*
3696                          * If this is a high-order atomic allocation then check
3697                          * if the pageblock should be reserved for the future
3698                          */
3699                         if (unlikely(order && (alloc_flags & ALLOC_HARDER)))
3700                                 reserve_highatomic_pageblock(page, zone, order);
3701 
3702                         return page;
3703                 } else {
3704 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
3705                         /* Try again if zone has deferred pages */
3706                         if (static_branch_unlikely(&deferred_pages)) {
3707                                 if (_deferred_grow_zone(zone, order))
3708                                         goto try_this_zone;
3709                         }
3710 #endif
3711                 }
3712         }
3713 
3714         /*
3715          * It's possible on a UMA machine to get through all zones that are
3716          * fragmented. If avoiding fragmentation, reset and try again.
3717          */
3718         if (no_fallback) {
3719                 alloc_flags &= ~ALLOC_NOFRAGMENT;
3720                 goto retry;
3721         }
3722 
3723         return NULL;
3724 }
3725 
3726 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
3727 {
3728         unsigned int filter = SHOW_MEM_FILTER_NODES;
3729 
3730         /*
3731          * This documents exceptions given to allocations in certain
3732          * contexts that are allowed to allocate outside current's set
3733          * of allowed nodes.
3734          */
3735         if (!(gfp_mask & __GFP_NOMEMALLOC))
3736                 if (tsk_is_oom_victim(current) ||
3737                     (current->flags & (PF_MEMALLOC | PF_EXITING)))
3738                         filter &= ~SHOW_MEM_FILTER_NODES;
3739         if (in_interrupt() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
3740                 filter &= ~SHOW_MEM_FILTER_NODES;
3741 
3742         show_mem(filter, nodemask);
3743 }
3744 
3745 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
3746 {
3747         struct va_format vaf;
3748         va_list args;
3749         static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1);
3750 
3751         if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs))
3752                 return;
3753 
3754         va_start(args, fmt);
3755         vaf.fmt = fmt;
3756         vaf.va = &args;
3757         pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl",
3758                         current->comm, &vaf, gfp_mask, &gfp_mask,
3759                         nodemask_pr_args(nodemask));
3760         va_end(args);
3761 
3762         cpuset_print_current_mems_allowed();
3763         pr_cont("\n");
3764         dump_stack();
3765         warn_alloc_show_mem(gfp_mask, nodemask);
3766 }
3767 
3768 static inline struct page *
3769 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order,
3770                               unsigned int alloc_flags,
3771                               const struct alloc_context *ac)
3772 {
3773         struct page *page;
3774 
3775         page = get_page_from_freelist(gfp_mask, order,
3776                         alloc_flags|ALLOC_CPUSET, ac);
3777         /*
3778          * fallback to ignore cpuset restriction if our nodes
3779          * are depleted
3780          */
3781         if (!page)
3782                 page = get_page_from_freelist(gfp_mask, order,
3783                                 alloc_flags, ac);
3784 
3785         return page;
3786 }
3787 
3788 static inline struct page *
3789 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
3790         const struct alloc_context *ac, unsigned long *did_some_progress)
3791 {
3792         struct oom_control oc = {
3793                 .zonelist = ac->zonelist,
3794                 .nodemask = ac->nodemask,
3795                 .memcg = NULL,
3796                 .gfp_mask = gfp_mask,
3797                 .order = order,
3798         };
3799         struct page *page;
3800 
3801         *did_some_progress = 0;
3802 
3803         /*
3804          * Acquire the oom lock.  If that fails, somebody else is
3805          * making progress for us.
3806          */
3807         if (!mutex_trylock(&oom_lock)) {
3808                 *did_some_progress = 1;
3809                 schedule_timeout_uninterruptible(1);
3810                 return NULL;
3811         }
3812 
3813         /*
3814          * Go through the zonelist yet one more time, keep very high watermark
3815          * here, this is only to catch a parallel oom killing, we must fail if
3816          * we're still under heavy pressure. But make sure that this reclaim
3817          * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY
3818          * allocation which will never fail due to oom_lock already held.
3819          */
3820         page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) &
3821                                       ~__GFP_DIRECT_RECLAIM, order,
3822                                       ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
3823         if (page)
3824                 goto out;
3825 
3826         /* Coredumps can quickly deplete all memory reserves */
3827         if (current->flags & PF_DUMPCORE)
3828                 goto out;
3829         /* The OOM killer will not help higher order allocs */
3830         if (order > PAGE_ALLOC_COSTLY_ORDER)
3831                 goto out;
3832         /*
3833          * We have already exhausted all our reclaim opportunities without any
3834          * success so it is time to admit defeat. We will skip the OOM killer
3835          * because it is very likely that the caller has a more reasonable
3836          * fallback than shooting a random task.
3837          */
3838         if (gfp_mask & __GFP_RETRY_MAYFAIL)
3839                 goto out;
3840         /* The OOM killer does not needlessly kill tasks for lowmem */
3841         if (ac->high_zoneidx < ZONE_NORMAL)
3842                 goto out;
3843         if (pm_suspended_storage())
3844                 goto out;
3845         /*
3846          * XXX: GFP_NOFS allocations should rather fail than rely on
3847          * other request to make a forward progress.
3848          * We are in an unfortunate situation where out_of_memory cannot
3849          * do much for this context but let's try it to at least get
3850          * access to memory reserved if the current task is killed (see
3851          * out_of_memory). Once filesystems are ready to handle allocation
3852          * failures more gracefully we should just bail out here.
3853          */
3854 
3855         /* The OOM killer may not free memory on a specific node */
3856         if (gfp_mask & __GFP_THISNODE)
3857                 goto out;
3858 
3859         /* Exhausted what can be done so it's blame time */
3860         if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
3861                 *did_some_progress = 1;
3862 
3863                 /*
3864                  * Help non-failing allocations by giving them access to memory
3865                  * reserves
3866                  */
3867                 if (gfp_mask & __GFP_NOFAIL)
3868                         page = __alloc_pages_cpuset_fallback(gfp_mask, order,
3869                                         ALLOC_NO_WATERMARKS, ac);
3870         }
3871 out:
3872         mutex_unlock(&oom_lock);
3873         return page;
3874 }
3875 
3876 /*
3877  * Maximum number of compaction retries wit a progress before OOM
3878  * killer is consider as the only way to move forward.
3879  */
3880 #define MAX_COMPACT_RETRIES 16
3881 
3882 #ifdef CONFIG_COMPACTION
3883 /* Try memory compaction for high-order allocations before reclaim */
3884 static struct page *
3885 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3886                 unsigned int alloc_flags, const struct alloc_context *ac,
3887                 enum compact_priority prio, enum compact_result *compact_result)
3888 {
3889         struct page *page = NULL;
3890         unsigned long pflags;
3891         unsigned int noreclaim_flag;
3892 
3893         if (!order)
3894                 return NULL;
3895 
3896         psi_memstall_enter(&pflags);
3897         noreclaim_flag = memalloc_noreclaim_save();
3898 
3899         *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
3900                                                                 prio, &page);
3901 
3902         memalloc_noreclaim_restore(noreclaim_flag);
3903         psi_memstall_leave(&pflags);
3904 
3905         /*
3906          * At least in one zone compaction wasn't deferred or skipped, so let's
3907          * count a compaction stall
3908          */
3909         count_vm_event(COMPACTSTALL);
3910 
3911         /* Prep a captured page if available */
3912         if (page)
3913                 prep_new_page(page, order, gfp_mask, alloc_flags);
3914 
3915         /* Try get a page from the freelist if available */
3916         if (!page)
3917                 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
3918 
3919         if (page) {
3920                 struct zone *zone = page_zone(page);
3921 
3922                 zone->compact_blockskip_flush = false;
3923                 compaction_defer_reset(zone, order, true);
3924                 count_vm_event(COMPACTSUCCESS);
3925                 return page;
3926         }
3927 
3928         /*
3929          * It's bad if compaction run occurs and fails. The most likely reason
3930          * is that pages exist, but not enough to satisfy watermarks.
3931          */
3932         count_vm_event(COMPACTFAIL);
3933 
3934         cond_resched();
3935 
3936         return NULL;
3937 }
3938 
3939 static inline bool
3940 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
3941                      enum compact_result compact_result,
3942                      enum compact_priority *compact_priority,
3943                      int *compaction_retries)
3944 {
3945         int max_retries = MAX_COMPACT_RETRIES;
3946         int min_priority;
3947         bool ret = false;
3948         int retries = *compaction_retries;
3949         enum compact_priority priority = *compact_priority;
3950 
3951         if (!order)
3952                 return false;
3953 
3954         if (compaction_made_progress(compact_result))
3955                 (*compaction_retries)++;
3956 
3957         /*
3958          * compaction considers all the zone as desperately out of memory
3959          * so it doesn't really make much sense to retry except when the
3960          * failure could be caused by insufficient priority
3961          */
3962         if (compaction_failed(compact_result))
3963                 goto check_priority;
3964 
3965         /*
3966          * compaction was skipped because there are not enough order-0 pages
3967          * to work with, so we retry only if it looks like reclaim can help.
3968          */
3969         if (compaction_needs_reclaim(compact_result)) {
3970                 ret = compaction_zonelist_suitable(ac, order, alloc_flags);
3971                 goto out;
3972         }
3973 
3974         /*
3975          * make sure the compaction wasn't deferred or didn't bail out early
3976          * due to locks contention before we declare that we should give up.
3977          * But the next retry should use a higher priority if allowed, so
3978          * we don't just keep bailing out endlessly.
3979          */
3980         if (compaction_withdrawn(compact_result)) {
3981                 goto check_priority;
3982         }
3983 
3984         /*
3985          * !costly requests are much more important than __GFP_RETRY_MAYFAIL
3986          * costly ones because they are de facto nofail and invoke OOM
3987          * killer to move on while costly can fail and users are ready
3988          * to cope with that. 1/4 retries is rather arbitrary but we
3989          * would need much more detailed feedback from compaction to
3990          * make a better decision.
3991          */
3992         if (order > PAGE_ALLOC_COSTLY_ORDER)
3993                 max_retries /= 4;
3994         if (*compaction_retries <= max_retries) {
3995                 ret = true;
3996                 goto out;
3997         }
3998 
3999         /*
4000          * Make sure there are attempts at the highest priority if we exhausted
4001          * all retries or failed at the lower priorities.
4002          */
4003 check_priority:
4004         min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ?
4005                         MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY;
4006 
4007         if (*compact_priority > min_priority) {
4008                 (*compact_priority)--;
4009                 *compaction_retries = 0;
4010                 ret = true;
4011         }
4012 out:
4013         trace_compact_retry(order, priority, compact_result, retries, max_retries, ret);
4014         return ret;
4015 }
4016 #else
4017 static inline struct page *
4018 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
4019                 unsigned int alloc_flags, const struct alloc_context *ac,
4020                 enum compact_priority prio, enum compact_result *compact_result)
4021 {
4022         *compact_result = COMPACT_SKIPPED;
4023         return NULL;
4024 }
4025 
4026 static inline bool
4027 should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags,
4028                      enum compact_result compact_result,
4029                      enum compact_priority *compact_priority,
4030                      int *compaction_retries)
4031 {
4032         struct zone *zone;
4033         struct zoneref *z;
4034 
4035         if (!order || order > PAGE_ALLOC_COSTLY_ORDER)
4036                 return false;
4037 
4038         /*
4039          * There are setups with compaction disabled which would prefer to loop
4040          * inside the allocator rather than hit the oom killer prematurely.
4041          * Let's give them a good hope and keep retrying while the order-0
4042          * watermarks are OK.
4043          */
4044         for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
4045                                         ac->nodemask) {
4046                 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone),
4047                                         ac_classzone_idx(ac), alloc_flags))
4048                         return true;
4049         }
4050         return false;
4051 }
4052 #endif /* CONFIG_COMPACTION */
4053 
4054 #ifdef CONFIG_LOCKDEP
4055 static struct lockdep_map __fs_reclaim_map =
4056         STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map);
4057 
4058 static bool __need_fs_reclaim(gfp_t gfp_mask)
4059 {
4060         gfp_mask = current_gfp_context(gfp_mask);
4061 
4062         /* no reclaim without waiting on it */
4063         if (!(gfp_mask & __GFP_DIRECT_RECLAIM))
4064                 return false;
4065 
4066         /* this guy won't enter reclaim */
4067         if (current->flags & PF_MEMALLOC)
4068                 return false;
4069 
4070         /* We're only interested __GFP_FS allocations for now */
4071         if (!(gfp_mask & __GFP_FS))
4072                 return false;
4073 
4074         if (gfp_mask & __GFP_NOLOCKDEP)
4075                 return false;
4076 
4077         return true;
4078 }
4079 
4080 void __fs_reclaim_acquire(void)
4081 {
4082         lock_map_acquire(&__fs_reclaim_map);
4083 }
4084 
4085 void __fs_reclaim_release(void)
4086 {
4087         lock_map_release(&__fs_reclaim_map);
4088 }
4089 
4090 void fs_reclaim_acquire(gfp_t gfp_mask)
4091 {
4092         if (__need_fs_reclaim(gfp_mask))
4093                 __fs_reclaim_acquire();
4094 }
4095 EXPORT_SYMBOL_GPL(fs_reclaim_acquire);
4096 
4097 void fs_reclaim_release(gfp_t gfp_mask)
4098 {
4099         if (__need_fs_reclaim(gfp_mask))
4100                 __fs_reclaim_release();
4101 }
4102 EXPORT_SYMBOL_GPL(fs_reclaim_release);
4103 #endif
4104 
4105 /* Perform direct synchronous page reclaim */
4106 static int
4107 __perform_reclaim(gfp_t gfp_mask, unsigned int order,
4108                                         const struct alloc_context *ac)
4109 {
4110         int progress;
4111         unsigned int noreclaim_flag;
4112         unsigned long pflags;
4113 
4114         cond_resched();
4115 
4116         /* We now go into synchronous reclaim */
4117         cpuset_memory_pressure_bump();
4118         psi_memstall_enter(&pflags);
4119         fs_reclaim_acquire(gfp_mask);
4120         noreclaim_flag = memalloc_noreclaim_save();
4121 
4122         progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
4123                                                                 ac->nodemask);
4124 
4125         memalloc_noreclaim_restore(noreclaim_flag);
4126         fs_reclaim_release(gfp_mask);
4127         psi_memstall_leave(&pflags);
4128 
4129         cond_resched();
4130 
4131         return progress;
4132 }
4133 
4134 /* The really slow allocator path where we enter direct reclaim */
4135 static inline struct page *
4136 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
4137                 unsigned int alloc_flags, const struct alloc_context *ac,
4138                 unsigned long *did_some_progress)
4139 {
4140         struct page *page = NULL;
4141         bool drained = false;
4142 
4143         *did_some_progress = __perform_reclaim(gfp_mask, order, ac);
4144         if (unlikely(!(*did_some_progress)))
4145                 return NULL;
4146 
4147 retry:
4148         page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4149 
4150         /*
4151          * If an allocation failed after direct reclaim, it could be because
4152          * pages are pinned on the per-cpu lists or in high alloc reserves.
4153          * Shrink them them and try again
4154          */
4155         if (!page && !drained) {
4156                 unreserve_highatomic_pageblock(ac, false);
4157                 drain_all_pages(NULL);
4158                 drained = true;
4159                 goto retry;
4160         }
4161 
4162         return page;
4163 }
4164 
4165 static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask,
4166                              const struct alloc_context *ac)
4167 {
4168         struct zoneref *z;
4169         struct zone *zone;
4170         pg_data_t *last_pgdat = NULL;
4171         enum zone_type high_zoneidx = ac->high_zoneidx;
4172 
4173         for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, high_zoneidx,
4174                                         ac->nodemask) {
4175                 if (last_pgdat != zone->zone_pgdat)
4176                         wakeup_kswapd(zone, gfp_mask, order, high_zoneidx);
4177                 last_pgdat = zone->zone_pgdat;
4178         }
4179 }
4180 
4181 static inline unsigned int
4182 gfp_to_alloc_flags(gfp_t gfp_mask)
4183 {
4184         unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
4185 
4186         /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
4187         BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
4188 
4189         /*
4190          * The caller may dip into page reserves a bit more if the caller
4191          * cannot run direct reclaim, or if the caller has realtime scheduling
4192          * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
4193          * set both ALLOC_HARDER (__GFP_ATOMIC) and ALLOC_HIGH (__GFP_HIGH).
4194          */
4195         alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
4196 
4197         if (gfp_mask & __GFP_ATOMIC) {
4198                 /*
4199                  * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
4200                  * if it can't schedule.
4201                  */
4202                 if (!(gfp_mask & __GFP_NOMEMALLOC))
4203                         alloc_flags |= ALLOC_HARDER;
4204                 /*
4205                  * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
4206                  * comment for __cpuset_node_allowed().
4207                  */
4208                 alloc_flags &= ~ALLOC_CPUSET;
4209         } else if (unlikely(rt_task(current)) && !in_interrupt())
4210                 alloc_flags |= ALLOC_HARDER;
4211 
4212         if (gfp_mask & __GFP_KSWAPD_RECLAIM)
4213                 alloc_flags |= ALLOC_KSWAPD;
4214 
4215 #ifdef CONFIG_CMA
4216         if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
4217                 alloc_flags |= ALLOC_CMA;
4218 #endif
4219         return alloc_flags;
4220 }
4221 
4222 static bool oom_reserves_allowed(struct task_struct *tsk)
4223 {
4224         if (!tsk_is_oom_victim(tsk))
4225                 return false;
4226 
4227         /*
4228          * !MMU doesn't have oom reaper so give access to memory reserves
4229          * only to the thread with TIF_MEMDIE set
4230          */
4231         if (!IS_ENABLED(CONFIG_MMU) && !test_thread_flag(TIF_MEMDIE))
4232                 return false;
4233 
4234         return true;
4235 }
4236 
4237 /*
4238  * Distinguish requests which really need access to full memory
4239  * reserves from oom victims which can live with a portion of it
4240  */
4241 static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask)
4242 {
4243         if (unlikely(gfp_mask & __GFP_NOMEMALLOC))
4244                 return 0;
4245         if (gfp_mask & __GFP_MEMALLOC)
4246                 return ALLOC_NO_WATERMARKS;
4247         if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
4248                 return ALLOC_NO_WATERMARKS;
4249         if (!in_interrupt()) {
4250                 if (current->flags & PF_MEMALLOC)
4251                         return ALLOC_NO_WATERMARKS;
4252                 else if (oom_reserves_allowed(current))
4253                         return ALLOC_OOM;
4254         }
4255 
4256         return 0;
4257 }
4258 
4259 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
4260 {
4261         return !!__gfp_pfmemalloc_flags(gfp_mask);
4262 }
4263 
4264 /*
4265  * Checks whether it makes sense to retry the reclaim to make a forward progress
4266  * for the given allocation request.
4267  *
4268  * We give up when we either have tried MAX_RECLAIM_RETRIES in a row
4269  * without success, or when we couldn't even meet the watermark if we
4270  * reclaimed all remaining pages on the LRU lists.
4271  *
4272  * Returns true if a retry is viable or false to enter the oom path.
4273  */
4274 static inline bool
4275 should_reclaim_retry(gfp_t gfp_mask, unsigned order,
4276                      struct alloc_context *ac, int alloc_flags,
4277                      bool did_some_progress, int *no_progress_loops)
4278 {
4279         struct zone *zone;
4280         struct zoneref *z;
4281         bool ret = false;
4282 
4283         /*
4284          * Costly allocations might have made a progress but this doesn't mean
4285          * their order will become available due to high fragmentation so
4286          * always increment the no progress counter for them
4287          */
4288         if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER)
4289                 *no_progress_loops = 0;
4290         else
4291                 (*no_progress_loops)++;
4292 
4293         /*
4294          * Make sure we converge to OOM if we cannot make any progress
4295          * several times in the row.
4296          */
4297         if (*no_progress_loops > MAX_RECLAIM_RETRIES) {
4298                 /* Before OOM, exhaust highatomic_reserve */
4299                 return unreserve_highatomic_pageblock(ac, true);
4300         }
4301 
4302         /*
4303          * Keep reclaiming pages while there is a chance this will lead
4304          * somewhere.  If none of the target zones can satisfy our allocation
4305          * request even if all reclaimable pages are considered then we are
4306          * screwed and have to go OOM.
4307          */
4308         for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
4309                                         ac->nodemask) {
4310                 unsigned long available;
4311                 unsigned long reclaimable;
4312                 unsigned long min_wmark = min_wmark_pages(zone);
4313                 bool wmark;
4314 
4315                 available = reclaimable = zone_reclaimable_pages(zone);
4316                 available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
4317 
4318                 /*
4319                  * Would the allocation succeed if we reclaimed all
4320                  * reclaimable pages?
4321                  */
4322                 wmark = __zone_watermark_ok(zone, order, min_wmark,
4323                                 ac_classzone_idx(ac), alloc_flags, available);
4324                 trace_reclaim_retry_zone(z, order, reclaimable,
4325                                 available, min_wmark, *no_progress_loops, wmark);
4326                 if (wmark) {
4327                         /*
4328                          * If we didn't make any progress and have a lot of
4329                          * dirty + writeback pages then we should wait for
4330                          * an IO to complete to slow down the reclaim and
4331                          * prevent from pre mature OOM
4332                          */
4333                         if (!did_some_progress) {
4334                                 unsigned long write_pending;
4335 
4336                                 write_pending = zone_page_state_snapshot(zone,
4337                                                         NR_ZONE_WRITE_PENDING);
4338 
4339                                 if (2 * write_pending > reclaimable) {
4340                                         congestion_wait(BLK_RW_ASYNC, HZ/10);
4341                                         return true;
4342                                 }
4343                         }
4344 
4345                         ret = true;
4346                         goto out;
4347                 }
4348         }
4349 
4350 out:
4351         /*
4352          * Memory allocation/reclaim might be called from a WQ context and the
4353          * current implementation of the WQ concurrency control doesn't
4354          * recognize that a particular WQ is congested if the worker thread is
4355          * looping without ever sleeping. Therefore we have to do a short sleep
4356          * here rather than calling cond_resched().
4357          */
4358         if (current->flags & PF_WQ_WORKER)
4359                 schedule_timeout_uninterruptible(1);
4360         else
4361                 cond_resched();
4362         return ret;
4363 }
4364 
4365 static inline bool
4366 check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac)
4367 {
4368         /*
4369          * It's possible that cpuset's mems_allowed and the nodemask from
4370          * mempolicy don't intersect. This should be normally dealt with by
4371          * policy_nodemask(), but it's possible to race with cpuset update in
4372          * such a way the check therein was true, and then it became false
4373          * before we got our cpuset_mems_cookie here.
4374          * This assumes that for all allocations, ac->nodemask can come only
4375          * from MPOL_BIND mempolicy (whose documented semantics is to be ignored
4376          * when it does not intersect with the cpuset restrictions) or the
4377          * caller can deal with a violated nodemask.
4378          */
4379         if (cpusets_enabled() && ac->nodemask &&
4380                         !cpuset_nodemask_valid_mems_allowed(ac->nodemask)) {
4381                 ac->nodemask = NULL;
4382                 return true;
4383         }
4384 
4385         /*
4386          * When updating a task's mems_allowed or mempolicy nodemask, it is
4387          * possible to race with parallel threads in such a way that our
4388          * allocation can fail while the mask is being updated. If we are about
4389          * to fail, check if the cpuset changed during allocation and if so,
4390          * retry.
4391          */
4392         if (read_mems_allowed_retry(cpuset_mems_cookie))
4393                 return true;
4394 
4395         return false;
4396 }
4397 
4398 static inline struct page *
4399 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
4400                                                 struct alloc_context *ac)
4401 {
4402         bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
4403         const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
4404         struct page *page = NULL;
4405         unsigned int alloc_flags;
4406         unsigned long did_some_progress;
4407         enum compact_priority compact_priority;
4408         enum compact_result compact_result;
4409         int compaction_retries;
4410         int no_progress_loops;
4411         unsigned int cpuset_mems_cookie;
4412         int reserve_flags;
4413 
4414         /*
4415          * We also sanity check to catch abuse of atomic reserves being used by
4416          * callers that are not in atomic context.
4417          */
4418         if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
4419                                 (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
4420                 gfp_mask &= ~__GFP_ATOMIC;
4421 
4422 retry_cpuset:
4423         compaction_retries = 0;
4424         no_progress_loops = 0;
4425         compact_priority = DEF_COMPACT_PRIORITY;
4426         cpuset_mems_cookie = read_mems_allowed_begin();
4427 
4428         /*
4429          * The fast path uses conservative alloc_flags to succeed only until
4430          * kswapd needs to be woken up, and to avoid the cost of setting up
4431          * alloc_flags precisely. So we do that now.
4432          */
4433         alloc_flags = gfp_to_alloc_flags(gfp_mask);
4434 
4435         /*
4436          * We need to recalculate the starting point for the zonelist iterator
4437          * because we might have used different nodemask in the fast path, or
4438          * there was a cpuset modification and we are retrying - otherwise we
4439          * could end up iterating over non-eligible zones endlessly.
4440          */
4441         ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4442                                         ac->high_zoneidx, ac->nodemask);
4443         if (!ac->preferred_zoneref->zone)
4444                 goto nopage;
4445 
4446         if (alloc_flags & ALLOC_KSWAPD)
4447                 wake_all_kswapds(order, gfp_mask, ac);
4448 
4449         /*
4450          * The adjusted alloc_flags might result in immediate success, so try
4451          * that first
4452          */
4453         page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4454         if (page)
4455                 goto got_pg;
4456 
4457         /*
4458          * For costly allocations, try direct compaction first, as it's likely
4459          * that we have enough base pages and don't need to reclaim. For non-
4460          * movable high-order allocations, do that as well, as compaction will
4461          * try prevent permanent fragmentation by migrating from blocks of the
4462          * same migratetype.
4463          * Don't try this for allocations that are allowed to ignore
4464          * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen.
4465          */
4466         if (can_direct_reclaim &&
4467                         (costly_order ||
4468                            (order > 0 && ac->migratetype != MIGRATE_MOVABLE))
4469                         && !gfp_pfmemalloc_allowed(gfp_mask)) {
4470                 page = __alloc_pages_direct_compact(gfp_mask, order,
4471                                                 alloc_flags, ac,
4472                                                 INIT_COMPACT_PRIORITY,
4473                                                 &compact_result);
4474                 if (page)
4475                         goto got_pg;
4476 
4477                  if (order >= pageblock_order && (gfp_mask & __GFP_IO) &&
4478                      !(gfp_mask & __GFP_RETRY_MAYFAIL)) {
4479                         /*
4480                          * If allocating entire pageblock(s) and compaction
4481                          * failed because all zones are below low watermarks
4482                          * or is prohibited because it recently failed at this
4483                          * order, fail immediately unless the allocator has
4484                          * requested compaction and reclaim retry.
4485                          *
4486                          * Reclaim is
4487                          *  - potentially very expensive because zones are far
4488                          *    below their low watermarks or this is part of very
4489                          *    bursty high order allocations,
4490                          *  - not guaranteed to help because isolate_freepages()
4491                          *    may not iterate over freed pages as part of its
4492                          *    linear scan, and
4493                          *  - unlikely to make entire pageblocks free on its
4494                          *    own.
4495                          */
4496                         if (compact_result == COMPACT_SKIPPED ||
4497                             compact_result == COMPACT_DEFERRED)
4498                                 goto nopage;
4499                 }
4500 
4501                 /*
4502                  * Checks for costly allocations with __GFP_NORETRY, which
4503                  * includes THP page fault allocations
4504                  */
4505                 if (costly_order && (gfp_mask & __GFP_NORETRY)) {
4506                         /*
4507                          * If compaction is deferred for high-order allocations,
4508                          * it is because sync compaction recently failed. If
4509                          * this is the case and the caller requested a THP
4510                          * allocation, we do not want to heavily disrupt the
4511                          * system, so we fail the allocation instead of entering
4512                          * direct reclaim.
4513                          */
4514                         if (compact_result == COMPACT_DEFERRED)
4515                                 goto nopage;
4516 
4517                         /*
4518                          * Looks like reclaim/compaction is worth trying, but
4519                          * sync compaction could be very expensive, so keep
4520                          * using async compaction.
4521                          */
4522                         compact_priority = INIT_COMPACT_PRIORITY;
4523                 }
4524         }
4525 
4526 retry:
4527         /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */
4528         if (alloc_flags & ALLOC_KSWAPD)
4529                 wake_all_kswapds(order, gfp_mask, ac);
4530 
4531         reserve_flags = __gfp_pfmemalloc_flags(gfp_mask);
4532         if (reserve_flags)
4533                 alloc_flags = reserve_flags;
4534 
4535         /*
4536          * Reset the nodemask and zonelist iterators if memory policies can be
4537          * ignored. These allocations are high priority and system rather than
4538          * user oriented.
4539          */
4540         if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) {
4541                 ac->nodemask = NULL;
4542                 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4543                                         ac->high_zoneidx, ac->nodemask);
4544         }
4545 
4546         /* Attempt with potentially adjusted zonelist and alloc_flags */
4547         page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4548         if (page)
4549                 goto got_pg;
4550 
4551         /* Caller is not willing to reclaim, we can't balance anything */
4552         if (!can_direct_reclaim)
4553                 goto nopage;
4554 
4555         /* Avoid recursion of direct reclaim */
4556         if (current->flags & PF_MEMALLOC)
4557                 goto nopage;
4558 
4559         /* Try direct reclaim and then allocating */
4560         page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
4561                                                         &did_some_progress);
4562         if (page)
4563                 goto got_pg;
4564 
4565         /* Try direct compaction and then allocating */
4566         page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
4567                                         compact_priority, &compact_result);
4568         if (page)
4569                 goto got_pg;
4570 
4571         /* Do not loop if specifically requested */
4572         if (gfp_mask & __GFP_NORETRY)
4573                 goto nopage;
4574 
4575         /*
4576          * Do not retry costly high order allocations unless they are
4577          * __GFP_RETRY_MAYFAIL
4578          */
4579         if (costly_order && !(gfp_mask & __GFP_RETRY_MAYFAIL))
4580                 goto nopage;
4581 
4582         if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
4583                                  did_some_progress > 0, &no_progress_loops))
4584                 goto retry;
4585 
4586         /*
4587          * It doesn't make any sense to retry for the compaction if the order-0
4588          * reclaim is not able to make any progress because the current
4589          * implementation of the compaction depends on the sufficient amount
4590          * of free memory (see __compaction_suitable)
4591          */
4592         if (did_some_progress > 0 &&
4593                         should_compact_retry(ac, order, alloc_flags,
4594                                 compact_result, &compact_priority,
4595                                 &compaction_retries))
4596                 goto retry;
4597 
4598 
4599         /* Deal with possible cpuset update races before we start OOM killing */
4600         if (check_retry_cpuset(cpuset_mems_cookie, ac))
4601                 goto retry_cpuset;
4602 
4603         /* Reclaim has failed us, start killing things */
4604         page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
4605         if (page)
4606                 goto got_pg;
4607 
4608         /* Avoid allocations with no watermarks from looping endlessly */
4609         if (tsk_is_oom_victim(current) &&
4610             (alloc_flags == ALLOC_OOM ||
4611              (gfp_mask & __GFP_NOMEMALLOC)))
4612                 goto nopage;
4613 
4614         /* Retry as long as the OOM killer is making progress */
4615         if (did_some_progress) {
4616                 no_progress_loops = 0;
4617                 goto retry;
4618         }
4619 
4620 nopage:
4621         /* Deal with possible cpuset update races before we fail */
4622         if (check_retry_cpuset(cpuset_mems_cookie, ac))
4623                 goto retry_cpuset;
4624 
4625         /*
4626          * Make sure that __GFP_NOFAIL request doesn't leak out and make sure
4627          * we always retry
4628          */
4629         if (gfp_mask & __GFP_NOFAIL) {
4630                 /*
4631                  * All existing users of the __GFP_NOFAIL are blockable, so warn
4632                  * of any new users that actually require GFP_NOWAIT
4633                  */
4634                 if (WARN_ON_ONCE(!can_direct_reclaim))
4635                         goto fail;
4636 
4637                 /*
4638                  * PF_MEMALLOC request from this context is rather bizarre
4639                  * because we cannot reclaim anything and only can loop waiting
4640                  * for somebody to do a work for us
4641                  */
4642                 WARN_ON_ONCE(current->flags & PF_MEMALLOC);
4643 
4644                 /*
4645                  * non failing costly orders are a hard requirement which we
4646                  * are not prepared for much so let's warn about these users
4647                  * so that we can identify them and convert them to something
4648                  * else.
4649                  */
4650                 WARN_ON_ONCE(order > PAGE_ALLOC_COSTLY_ORDER);
4651 
4652                 /*
4653                  * Help non-failing allocations by giving them access to memory
4654                  * reserves but do not use ALLOC_NO_WATERMARKS because this
4655                  * could deplete whole memory reserves which would just make
4656                  * the situation worse
4657                  */
4658                 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac);
4659                 if (page)
4660                         goto got_pg;
4661 
4662                 cond_resched();
4663                 goto retry;
4664         }
4665 fail:
4666         warn_alloc(gfp_mask, ac->nodemask,
4667                         "page allocation failure: order:%u", order);
4668 got_pg:
4669         return page;
4670 }
4671 
4672 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
4673                 int preferred_nid, nodemask_t *nodemask,
4674                 struct alloc_context *ac, gfp_t *alloc_mask,
4675                 unsigned int *alloc_flags)
4676 {
4677         ac->high_zoneidx = gfp_zone(gfp_mask);
4678         ac->zonelist = node_zonelist(preferred_nid, gfp_mask);
4679         ac->nodemask = nodemask;
4680         ac->migratetype = gfpflags_to_migratetype(gfp_mask);
4681 
4682         if (cpusets_enabled()) {
4683                 *alloc_mask |= __GFP_HARDWALL;
4684                 if (!ac->nodemask)
4685                         ac->nodemask = &cpuset_current_mems_allowed;
4686                 else
4687                         *alloc_flags |= ALLOC_CPUSET;
4688         }
4689 
4690         fs_reclaim_acquire(gfp_mask);
4691         fs_reclaim_release(gfp_mask);
4692 
4693         might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
4694 
4695         if (should_fail_alloc_page(gfp_mask, order))
4696                 return false;
4697 
4698         if (IS_ENABLED(CONFIG_CMA) && ac->migratetype == MIGRATE_MOVABLE)
4699                 *alloc_flags |= ALLOC_CMA;
4700 
4701         return true;
4702 }
4703 
4704 /* Determine whether to spread dirty pages and what the first usable zone */
4705 static inline void finalise_ac(gfp_t gfp_mask, struct alloc_context *ac)
4706 {
4707         /* Dirty zone balancing only done in the fast path */
4708         ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE);
4709 
4710         /*
4711          * The preferred zone is used for statistics but crucially it is
4712          * also used as the starting point for the zonelist iterator. It
4713          * may get reset for allocations that ignore memory policies.
4714          */
4715         ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4716                                         ac->high_zoneidx, ac->nodemask);
4717 }
4718 
4719 /*
4720  * This is the 'heart' of the zoned buddy allocator.
4721  */
4722 struct page *
4723 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
4724                                                         nodemask_t *nodemask)
4725 {
4726         struct page *page;
4727         unsigned int alloc_flags = ALLOC_WMARK_LOW;
4728         gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */
4729         struct alloc_context ac = { };
4730 
4731         /*
4732          * There are several places where we assume that the order value is sane
4733          * so bail out early if the request is out of bound.
4734          */
4735         if (unlikely(order >= MAX_ORDER)) {
4736                 WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
4737                 return NULL;
4738         }
4739 
4740         gfp_mask &= gfp_allowed_mask;
4741         alloc_mask = gfp_mask;
4742         if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags))
4743                 return NULL;
4744 
4745         finalise_ac(gfp_mask, &ac);
4746 
4747         /*
4748          * Forbid the first pass from falling back to types that fragment
4749          * memory until all local zones are considered.
4750          */
4751         alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp_mask);
4752 
4753         /* First allocation attempt */
4754         page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
4755         if (likely(page))
4756                 goto out;
4757 
4758         /*
4759          * Apply scoped allocation constraints. This is mainly about GFP_NOFS
4760          * resp. GFP_NOIO which has to be inherited for all allocation requests
4761          * from a particular context which has been marked by
4762          * memalloc_no{fs,io}_{save,restore}.
4763          */
4764         alloc_mask = current_gfp_context(gfp_mask);
4765         ac.spread_dirty_pages = false;
4766 
4767         /*
4768          * Restore the original nodemask if it was potentially replaced with
4769          * &cpuset_current_mems_allowed to optimize the fast-path attempt.
4770          */
4771         if (unlikely(ac.nodemask != nodemask))
4772                 ac.nodemask = nodemask;
4773 
4774         page = __alloc_pages_slowpath(alloc_mask, order, &ac);
4775 
4776 out:
4777         if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page &&
4778             unlikely(__memcg_kmem_charge(page, gfp_mask, order) != 0)) {
4779                 __free_pages(page, order);
4780                 page = NULL;
4781         }
4782 
4783         trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);
4784 
4785         return page;
4786 }
4787 EXPORT_SYMBOL(__alloc_pages_nodemask);
4788 
4789 /*
4790  * Common helper functions. Never use with __GFP_HIGHMEM because the returned
4791  * address cannot represent highmem pages. Use alloc_pages and then kmap if
4792  * you need to access high mem.
4793  */
4794 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
4795 {
4796         struct page *page;
4797 
4798         page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order);
4799         if (!page)
4800                 return 0;
4801         return (unsigned long) page_address(page);
4802 }
4803 EXPORT_SYMBOL(__get_free_pages);
4804 
4805 unsigned long get_zeroed_page(gfp_t gfp_mask)
4806 {
4807         return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
4808 }
4809 EXPORT_SYMBOL(get_zeroed_page);
4810 
4811 static inline void free_the_page(struct page *page, unsigned int order)
4812 {
4813         if (order == 0)         /* Via pcp? */
4814                 free_unref_page(page);
4815         else
4816                 __free_pages_ok(page, order);
4817 }
4818 
4819 void __free_pages(struct page *page, unsigned int order)
4820 {
4821         if (put_page_testzero(page))
4822                 free_the_page(page, order);
4823 }
4824 EXPORT_SYMBOL(__free_pages);
4825 
4826 void free_pages(unsigned long addr, unsigned int order)
4827 {
4828         if (addr != 0) {
4829                 VM_BUG_ON(!virt_addr_valid((void *)addr));
4830                 __free_pages(virt_to_page((void *)addr), order);
4831         }
4832 }
4833 
4834 EXPORT_SYMBOL(free_pages);
4835 
4836 /*
4837  * Page Fragment:
4838  *  An arbitrary-length arbitrary-offset area of memory which resides
4839  *  within a 0 or higher order page.  Multiple fragments within that page
4840  *  are individually refcounted, in the page's reference counter.
4841  *
4842  * The page_frag functions below provide a simple allocation framework for
4843  * page fragments.  This is used by the network stack and network device
4844  * drivers to provide a backing region of memory for use as either an
4845  * sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
4846  */
4847 static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
4848                                              gfp_t gfp_mask)
4849 {
4850         struct page *page = NULL;
4851         gfp_t gfp = gfp_mask;
4852 
4853 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
4854         gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
4855                     __GFP_NOMEMALLOC;
4856         page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
4857                                 PAGE_FRAG_CACHE_MAX_ORDER);
4858         nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
4859 #endif
4860         if (unlikely(!page))
4861                 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
4862 
4863         nc->va = page ? page_address(page) : NULL;
4864 
4865         return page;
4866 }
4867 
4868 void __page_frag_cache_drain(struct page *page, unsigned int count)
4869 {
4870         VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
4871 
4872         if (page_ref_sub_and_test(page, count))
4873                 free_the_page(page, compound_order(page));
4874 }
4875 EXPORT_SYMBOL(__page_frag_cache_drain);
4876 
4877 void *page_frag_alloc(struct page_frag_cache *nc,
4878                       unsigned int fragsz, gfp_t gfp_mask)
4879 {
4880         unsigned int size = PAGE_SIZE;
4881         struct page *page;
4882         int offset;
4883 
4884         if (unlikely(!nc->va)) {
4885 refill:
4886                 page = __page_frag_cache_refill(nc, gfp_mask);
4887                 if (!page)
4888                         return NULL;
4889 
4890 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
4891                 /* if size can vary use size else just use PAGE_SIZE */
4892                 size = nc->size;
4893 #endif
4894                 /* Even if we own the page, we do not use atomic_set().
4895                  * This would break get_page_unless_zero() users.
4896                  */
4897                 page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE);
4898 
4899                 /* reset page count bias and offset to start of new frag */
4900                 nc->pfmemalloc = page_is_pfmemalloc(page);
4901                 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
4902                 nc->offset = size;
4903         }
4904 
4905         offset = nc->offset - fragsz;
4906         if (unlikely(offset < 0)) {
4907                 page = virt_to_page(nc->va);
4908 
4909                 if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
4910                         goto refill;
4911 
4912 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
4913                 /* if size can vary use size else just use PAGE_SIZE */
4914                 size = nc->size;
4915 #endif
4916                 /* OK, page count is 0, we can safely set it */
4917                 set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1);
4918 
4919                 /* reset page count bias and offset to start of new frag */
4920                 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
4921                 offset = size - fragsz;
4922         }
4923 
4924         nc->pagecnt_bias--;
4925         nc->offset = offset;
4926 
4927         return nc->va + offset;
4928 }
4929 EXPORT_SYMBOL(page_frag_alloc);
4930 
4931 /*
4932  * Frees a page fragment allocated out of either a compound or order 0 page.
4933  */
4934 void page_frag_free(void *addr)
4935 {
4936         struct page *page = virt_to_head_page(addr);
4937 
4938         if (unlikely(put_page_testzero(page)))
4939                 free_the_page(page, compound_order(page));
4940 }
4941 EXPORT_SYMBOL(page_frag_free);
4942 
4943 static void *make_alloc_exact(unsigned long addr, unsigned int order,
4944                 size_t size)
4945 {
4946         if (addr) {
4947                 unsigned long alloc_end = addr + (PAGE_SIZE << order);
4948                 unsigned long used = addr + PAGE_ALIGN(size);
4949 
4950                 split_page(virt_to_page((void *)addr), order);
4951                 while (used < alloc_end) {
4952                         free_page(used);
4953                         used += PAGE_SIZE;
4954                 }
4955         }
4956         return (void *)addr;
4957 }
4958 
4959 /**
4960  * alloc_pages_exact - allocate an exact number physically-contiguous pages.
4961  * @size: the number of bytes to allocate
4962  * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
4963  *
4964  * This function is similar to alloc_pages(), except that it allocates the
4965  * minimum number of pages to satisfy the request.  alloc_pages() can only
4966  * allocate memory in power-of-two pages.
4967  *
4968  * This function is also limited by MAX_ORDER.
4969  *
4970  * Memory allocated by this function must be released by free_pages_exact().
4971  *
4972  * Return: pointer to the allocated area or %NULL in case of error.
4973  */
4974 void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
4975 {
4976         unsigned int order = get_order(size);
4977         unsigned long addr;
4978 
4979         if (WARN_ON_ONCE(gfp_mask & __GFP_COMP))
4980                 gfp_mask &= ~__GFP_COMP;
4981 
4982         addr = __get_free_pages(gfp_mask, order);
4983         return make_alloc_exact(addr, order, size);
4984 }
4985 EXPORT_SYMBOL(alloc_pages_exact);
4986 
4987 /**
4988  * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
4989  *                         pages on a node.
4990  * @nid: the preferred node ID where memory should be allocated
4991  * @size: the number of bytes to allocate
4992  * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
4993  *
4994  * Like alloc_pages_exact(), but try to allocate on node nid first before falling
4995  * back.
4996  *
4997  * Return: pointer to the allocated area or %NULL in case of error.
4998  */
4999 void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
5000 {
5001         unsigned int order = get_order(size);
5002         struct page *p;
5003 
5004         if (WARN_ON_ONCE(gfp_mask & __GFP_COMP))
5005                 gfp_mask &= ~__GFP_COMP;
5006 
5007         p = alloc_pages_node(nid, gfp_mask, order);
5008         if (!p)
5009                 return NULL;
5010         return make_alloc_exact((unsigned long)page_address(p), order, size);
5011 }
5012 
5013 /**
5014  * free_pages_exact - release memory allocated via alloc_pages_exact()
5015  * @virt: the value returned by alloc_pages_exact.
5016  * @size: size of allocation, same value as passed to alloc_pages_exact().
5017  *
5018  * Release the memory allocated by a previous call to alloc_pages_exact.
5019  */
5020 void free_pages_exact(void *virt, size_t size)
5021 {
5022         unsigned long addr = (unsigned long)virt;
5023         unsigned long end = addr + PAGE_ALIGN(size);
5024 
5025         while (addr < end) {
5026                 free_page(addr);
5027                 addr += PAGE_SIZE;
5028         }
5029 }
5030 EXPORT_SYMBOL(free_pages_exact);
5031 
5032 /**
5033  * nr_free_zone_pages - count number of pages beyond high watermark
5034  * @offset: The zone index of the highest zone
5035  *
5036  * nr_free_zone_pages() counts the number of pages which are beyond the
5037  * high watermark within all zones at or below a given zone index.  For each
5038  * zone, the number of pages is calculated as:
5039  *
5040  *     nr_free_zone_pages = managed_pages - high_pages
5041  *
5042  * Return: number of pages beyond high watermark.
5043  */
5044 static unsigned long nr_free_zone_pages(int offset)
5045 {
5046         struct zoneref *z;
5047         struct zone *zone;
5048 
5049         /* Just pick one node, since fallback list is circular */
5050         unsigned long sum = 0;
5051 
5052         struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
5053 
5054         for_each_zone_zonelist(zone, z, zonelist, offset) {
5055                 unsigned long size = zone_managed_pages(zone);
5056                 unsigned long high = high_wmark_pages(zone);
5057                 if (size > high)
5058                         sum += size - high;
5059         }
5060 
5061         return sum;
5062 }
5063 
5064 /**
5065  * nr_free_buffer_pages - count number of pages beyond high watermark
5066  *
5067  * nr_free_buffer_pages() counts the number of pages which are beyond the high
5068  * watermark within ZONE_DMA and ZONE_NORMAL.
5069  *
5070  * Return: number of pages beyond high watermark within ZONE_DMA and
5071  * ZONE_NORMAL.
5072  */
5073 unsigned long nr_free_buffer_pages(void)
5074 {
5075         return nr_free_zone_pages(gfp_zone(GFP_USER));
5076 }
5077 EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
5078 
5079 /**
5080  * nr_free_pagecache_pages - count number of pages beyond high watermark
5081  *
5082  * nr_free_pagecache_pages() counts the number of pages which are beyond the
5083  * high watermark within all zones.
5084  *
5085  * Return: number of pages beyond high watermark within all zones.
5086  */
5087 unsigned long nr_free_pagecache_pages(void)
5088 {
5089         return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
5090 }
5091 
5092 static inline void show_node(struct zone *zone)
5093 {
5094         if (IS_ENABLED(CONFIG_NUMA))
5095                 printk("Node %d ", zone_to_nid(zone));
5096 }
5097 
5098 long si_mem_available(void)
5099 {
5100         long available;
5101         unsigned long pagecache;
5102         unsigned long wmark_low = 0;
5103         unsigned long pages[NR_LRU_LISTS];
5104         unsigned long reclaimable;
5105         struct zone *zone;
5106         int lru;
5107 
5108         for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
5109                 pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
5110 
5111         for_each_zone(zone)
5112                 wmark_low += low_wmark_pages(zone);
5113 
5114         /*
5115          * Estimate the amount of memory available for userspace allocations,
5116          * without causing swapping.
5117          */
5118         available = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages;
5119 
5120         /*
5121          * Not all the page cache can be freed, otherwise the system will
5122          * start swapping. Assume at least half of the page cache, or the
5123          * low watermark worth of cache, needs to stay.
5124          */
5125         pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE];
5126         pagecache -= min(pagecache / 2, wmark_low);
5127         available += pagecache;
5128 
5129         /*
5130          * Part of the reclaimable slab and other kernel memory consists of
5131          * items that are in use, and cannot be freed. Cap this estimate at the
5132          * low watermark.
5133          */
5134         reclaimable = global_node_page_state(NR_SLAB_RECLAIMABLE) +
5135                         global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE);
5136         available += reclaimable - min(reclaimable / 2, wmark_low);
5137 
5138         if (available < 0)
5139                 available = 0;
5140         return available;
5141 }
5142 EXPORT_SYMBOL_GPL(si_mem_available);
5143 
5144 void si_meminfo(struct sysinfo *val)
5145 {
5146         val->totalram = totalram_pages();
5147         val->sharedram = global_node_page_state(NR_SHMEM);
5148         val->freeram = global_zone_page_state(NR_FREE_PAGES);
5149         val->bufferram = nr_blockdev_pages();
5150         val->totalhigh = totalhigh_pages();
5151         val->freehigh = nr_free_highpages();
5152         val->mem_unit = PAGE_SIZE;
5153 }
5154 
5155 EXPORT_SYMBOL(si_meminfo);
5156 
5157 #ifdef CONFIG_NUMA
5158 void si_meminfo_node(struct sysinfo *val, int nid)
5159 {
5160         int zone_type;          /* needs to be signed */
5161         unsigned long managed_pages = 0;
5162         unsigned long managed_highpages = 0;
5163         unsigned long free_highpages = 0;
5164         pg_data_t *pgdat = NODE_DATA(nid);
5165 
5166         for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
5167                 managed_pages += zone_managed_pages(&pgdat->node_zones[zone_type]);
5168         val->totalram = managed_pages;
5169         val->sharedram = node_page_state(pgdat, NR_SHMEM);
5170         val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES);
5171 #ifdef CONFIG_HIGHMEM
5172         for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
5173                 struct zone *zone = &pgdat->node_zones[zone_type];
5174 
5175                 if (is_highmem(zone)) {
5176                         managed_highpages += zone_managed_pages(zone);
5177                         free_highpages += zone_page_state(zone, NR_FREE_PAGES);
5178                 }
5179         }
5180         val->totalhigh = managed_highpages;
5181         val->freehigh = free_highpages;
5182 #else
5183         val->totalhigh = managed_highpages;
5184         val->freehigh = free_highpages;
5185 #endif
5186         val->mem_unit = PAGE_SIZE;
5187 }
5188 #endif
5189 
5190 /*
5191  * Determine whether the node should be displayed or not, depending on whether
5192  * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
5193  */
5194 static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask)
5195 {
5196         if (!(flags & SHOW_MEM_FILTER_NODES))
5197                 return false;
5198 
5199         /*
5200          * no node mask - aka implicit memory numa policy. Do not bother with
5201          * the synchronization - read_mems_allowed_begin - because we do not
5202          * have to be precise here.
5203          */
5204         if (!nodemask)
5205                 nodemask = &cpuset_current_mems_allowed;
5206 
5207         return !node_isset(nid, *nodemask);
5208 }
5209 
5210 #define K(x) ((x) << (PAGE_SHIFT-10))
5211 
5212 static void show_migration_types(unsigned char type)
5213 {
5214         static const char types[MIGRATE_TYPES] = {
5215                 [MIGRATE_UNMOVABLE]     = 'U',
5216                 [MIGRATE_MOVABLE]       = 'M',
5217                 [MIGRATE_RECLAIMABLE]   = 'E',
5218                 [MIGRATE_HIGHATOMIC]    = 'H',
5219 #ifdef CONFIG_CMA
5220                 [MIGRATE_CMA]           = 'C',
5221 #endif
5222 #ifdef CONFIG_MEMORY_ISOLATION
5223                 [MIGRATE_ISOLATE]       = 'I',
5224 #endif
5225         };
5226         char tmp[MIGRATE_TYPES + 1];
5227         char *p = tmp;
5228         int i;
5229 
5230         for (i = 0; i < MIGRATE_TYPES; i++) {
5231                 if (type & (1 << i))
5232                         *p++ = types[i];
5233         }
5234 
5235         *p = '\0';
5236         printk(KERN_CONT "(%s) ", tmp);
5237 }
5238 
5239 /*
5240  * Show free area list (used inside shift_scroll-lock stuff)
5241  * We also calculate the percentage fragmentation. We do this by counting the
5242  * memory on each free list with the exception of the first item on the list.
5243  *
5244  * Bits in @filter:
5245  * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's
5246  *   cpuset.
5247  */
5248 void show_free_areas(unsigned int filter, nodemask_t *nodemask)
5249 {
5250         unsigned long free_pcp = 0;
5251         int cpu;
5252         struct zone *zone;
5253         pg_data_t *pgdat;
5254 
5255         for_each_populated_zone(zone) {
5256                 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
5257                         continue;
5258 
5259                 for_each_online_cpu(cpu)
5260                         free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
5261         }
5262 
5263         printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
5264                 " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
5265                 " unevictable:%lu dirty:%lu writeback:%lu unstable:%lu\n"
5266                 " slab_reclaimable:%lu slab_unreclaimable:%lu\n"
5267                 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
5268                 " free:%lu free_pcp:%lu free_cma:%lu\n",
5269                 global_node_page_state(NR_ACTIVE_ANON),
5270                 global_node_page_state(NR_INACTIVE_ANON),
5271                 global_node_page_state(NR_ISOLATED_ANON),
5272                 global_node_page_state(NR_ACTIVE_FILE),
5273                 global_node_page_state(NR_INACTIVE_FILE),
5274                 global_node_page_state(NR_ISOLATED_FILE),
5275                 global_node_page_state(NR_UNEVICTABLE),
5276                 global_node_page_state(NR_FILE_DIRTY),
5277                 global_node_page_state(NR_WRITEBACK),
5278                 global_node_page_state(NR_UNSTABLE_NFS),
5279                 global_node_page_state(NR_SLAB_RECLAIMABLE),
5280                 global_node_page_state(NR_SLAB_UNRECLAIMABLE),
5281                 global_node_page_state(NR_FILE_MAPPED),
5282                 global_node_page_state(NR_SHMEM),
5283                 global_zone_page_state(NR_PAGETABLE),
5284                 global_zone_page_state(NR_BOUNCE),
5285                 global_zone_page_state(NR_FREE_PAGES),
5286                 free_pcp,
5287                 global_zone_page_state(NR_FREE_CMA_PAGES));
5288 
5289         for_each_online_pgdat(pgdat) {
5290                 if (show_mem_node_skip(filter, pgdat->node_id, nodemask))
5291                         continue;
5292 
5293                 printk("Node %d"
5294                         " active_anon:%lukB"
5295                         " inactive_anon:%lukB"
5296                         " active_file:%lukB"
5297                         " inactive_file:%lukB"
5298                         " unevictable:%lukB"
5299                         " isolated(anon):%lukB"
5300                         " isolated(file):%lukB"
5301                         " mapped:%lukB"
5302                         " dirty:%lukB"
5303                         " writeback:%lukB"
5304                         " shmem:%lukB"
5305 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5306                         " shmem_thp: %lukB"
5307                         " shmem_pmdmapped: %lukB"
5308                         " anon_thp: %lukB"
5309 #endif
5310                         " writeback_tmp:%lukB"
5311                         " unstable:%lukB"
5312                         " all_unreclaimable? %s"
5313                         "\n",
5314                         pgdat->node_id,
5315                         K(node_page_state(pgdat, NR_ACTIVE_ANON)),
5316                         K(node_page_state(pgdat, NR_INACTIVE_ANON)),
5317                         K(node_page_state(pgdat, NR_ACTIVE_FILE)),
5318                         K(node_page_state(pgdat, NR_INACTIVE_FILE)),
5319                         K(node_page_state(pgdat, NR_UNEVICTABLE)),
5320                         K(node_page_state(pgdat, NR_ISOLATED_ANON)),
5321                         K(node_page_state(pgdat, NR_ISOLATED_FILE)),
5322                         K(node_page_state(pgdat, NR_FILE_MAPPED)),
5323                         K(node_page_state(pgdat, NR_FILE_DIRTY)),
5324                         K(node_page_state(pgdat, NR_WRITEBACK)),
5325                         K(node_page_state(pgdat, NR_SHMEM)),
5326 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5327                         K(node_page_state(pgdat, NR_SHMEM_THPS) * HPAGE_PMD_NR),
5328                         K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)
5329                                         * HPAGE_PMD_NR),
5330                         K(node_page_state(pgdat, NR_ANON_THPS) * HPAGE_PMD_NR),
5331 #endif
5332                         K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
5333                         K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
5334                         pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ?
5335                                 "yes" : "no");
5336         }
5337 
5338         for_each_populated_zone(zone) {
5339                 int i;
5340 
5341                 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
5342                         continue;
5343 
5344                 free_pcp = 0;
5345                 for_each_online_cpu(cpu)
5346                         free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
5347 
5348                 show_node(zone);
5349                 printk(KERN_CONT
5350                         "%s"
5351                         " free:%lukB"
5352                         " min:%lukB"
5353                         " low:%lukB"
5354                         " high:%lukB"
5355                         " active_anon:%lukB"
5356                         " inactive_anon:%lukB"
5357                         " active_file:%lukB"
5358                         " inactive_file:%lukB"
5359                         " unevictable:%lukB"
5360                         " writepending:%lukB"
5361                         " present:%lukB"
5362                         " managed:%lukB"
5363                         " mlocked:%lukB"
5364                         " kernel_stack:%lukB"
5365                         " pagetables:%lukB"
5366                         " bounce:%lukB"
5367                         " free_pcp:%lukB"
5368                         " local_pcp:%ukB"
5369                         " free_cma:%lukB"
5370                         "\n",
5371                         zone->name,
5372                         K(zone_page_state(zone, NR_FREE_PAGES)),
5373                         K(min_wmark_pages(zone)),
5374                         K(low_wmark_pages(zone)),
5375                         K(high_wmark_pages(zone)),
5376                         K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)),
5377                         K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)),
5378                         K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)),
5379                         K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)),
5380                         K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)),
5381                         K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)),
5382                         K(zone->present_pages),
5383                         K(zone_managed_pages(zone)),
5384                         K(zone_page_state(zone, NR_MLOCK)),
5385                         zone_page_state(zone, NR_KERNEL_STACK_KB),
5386                         K(zone_page_state(zone, NR_PAGETABLE)),
5387                         K(zone_page_state(zone, NR_BOUNCE)),
5388                         K(free_pcp),
5389                         K(this_cpu_read(zone->pageset->pcp.count)),
5390                         K(zone_page_state(zone, NR_FREE_CMA_PAGES)));
5391                 printk("lowmem_reserve[]:");
5392                 for (i = 0; i < MAX_NR_ZONES; i++)
5393                         printk(KERN_CONT " %ld", zone->lowmem_reserve[i]);
5394                 printk(KERN_CONT "\n");
5395         }
5396 
5397         for_each_populated_zone(zone) {
5398                 unsigned int order;
5399                 unsigned long nr[MAX_ORDER], flags, total = 0;
5400                 unsigned char types[MAX_ORDER];
5401 
5402                 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
5403                         continue;
5404                 show_node(zone);
5405                 printk(KERN_CONT "%s: ", zone->name);
5406 
5407                 spin_lock_irqsave(&zone->lock, flags);
5408                 for (order = 0; order < MAX_ORDER; order++) {
5409                         struct free_area *area = &zone->free_area[order];
5410                         int type;
5411 
5412                         nr[order] = area->nr_free;
5413                         total += nr[order] << order;
5414 
5415                         types[order] = 0;
5416                         for (type = 0; type < MIGRATE_TYPES; type++) {
5417                                 if (!free_area_empty(area, type))
5418                                         types[order] |= 1 << type;
5419                         }
5420                 }
5421                 spin_unlock_irqrestore(&zone->lock, flags);
5422                 for (order = 0; order < MAX_ORDER; order++) {
5423                         printk(KERN_CONT "%lu*%lukB ",
5424                                nr[order], K(1UL) << order);
5425                         if (nr[order])
5426                                 show_migration_types(types[order]);
5427                 }
5428                 printk(KERN_CONT "= %lukB\n", K(total));
5429         }
5430 
5431         hugetlb_show_meminfo();
5432 
5433         printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES));
5434 
5435         show_swap_cache_info();
5436 }
5437 
5438 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
5439 {
5440         zoneref->zone = zone;
5441         zoneref->zone_idx = zone_idx(zone);
5442 }
5443 
5444 /*
5445  * Builds allocation fallback zone lists.
5446  *
5447  * Add all populated zones of a node to the zonelist.
5448  */
5449 static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs)
5450 {
5451         struct zone *zone;
5452         enum zone_type zone_type = MAX_NR_ZONES;
5453         int nr_zones = 0;
5454 
5455         do {
5456                 zone_type--;
5457                 zone = pgdat->node_zones + zone_type;
5458                 if (managed_zone(zone)) {
5459                         zoneref_set_zone(zone, &zonerefs[nr_zones++]);
5460                         check_highest_zone(zone_type);
5461                 }
5462         } while (zone_type);
5463 
5464         return nr_zones;
5465 }
5466 
5467 #ifdef CONFIG_NUMA
5468 
5469 static int __parse_numa_zonelist_order(char *s)
5470 {
5471         /*
5472          * We used to support different zonlists modes but they turned
5473          * out to be just not useful. Let's keep the warning in place
5474          * if somebody still use the cmd line parameter so that we do
5475          * not fail it silently
5476          */
5477         if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) {
5478                 pr_warn("Ignoring unsupported numa_zonelist_order value:  %s\n", s);
5479                 return -EINVAL;
5480         }
5481         return 0;
5482 }
5483 
5484 static __init int setup_numa_zonelist_order(char *s)
5485 {
5486         if (!s)
5487                 return 0;
5488 
5489         return __parse_numa_zonelist_order(s);
5490 }
5491 early_param("numa_zonelist_order", setup_numa_zonelist_order);
5492 
5493 char numa_zonelist_order[] = "Node";
5494 
5495 /*
5496  * sysctl handler for numa_zonelist_order
5497  */
5498 int numa_zonelist_order_handler(struct ctl_table *table, int write,
5499                 void __user *buffer, size_t *length,
5500                 loff_t *ppos)
5501 {
5502         char *str;
5503         int ret;
5504 
5505         if (!write)
5506                 return proc_dostring(table, write, buffer, length, ppos);
5507         str = memdup_user_nul(buffer, 16);
5508         if (IS_ERR(str))
5509                 return PTR_ERR(str);
5510 
5511         ret = __parse_numa_zonelist_order(str);
5512         kfree(str);
5513         return ret;
5514 }
5515 
5516 
5517 #define MAX_NODE_LOAD (nr_online_nodes)
5518 static int node_load[MAX_NUMNODES];
5519 
5520 /**
5521  * find_next_best_node - find the next node that should appear in a given node's fallback list
5522  * @node: node whose fallback list we're appending
5523  * @used_node_mask: nodemask_t of already used nodes
5524  *
5525  * We use a number of factors to determine which is the next node that should
5526  * appear on a given node's fallback list.  The node should not have appeared
5527  * already in @node's fallback list, and it should be the next closest node
5528  * according to the distance array (which contains arbitrary distance values
5529  * from each node to each node in the system), and should also prefer nodes
5530  * with no CPUs, since presumably they'll have very little allocation pressure
5531  * on them otherwise.
5532  *
5533  * Return: node id of the found node or %NUMA_NO_NODE if no node is found.
5534  */
5535 static int find_next_best_node(int node, nodemask_t *used_node_mask)
5536 {
5537         int n, val;
5538         int min_val = INT_MAX;
5539         int best_node = NUMA_NO_NODE;
5540         const struct cpumask *tmp = cpumask_of_node(0);
5541 
5542         /* Use the local node if we haven't already */
5543         if (!node_isset(node, *used_node_mask)) {
5544                 node_set(node, *used_node_mask);
5545                 return node;
5546         }
5547 
5548         for_each_node_state(n, N_MEMORY) {
5549 
5550                 /* Don't want a node to appear more than once */
5551                 if (node_isset(n, *used_node_mask))
5552                         continue;
5553 
5554                 /* Use the distance array to find the distance */
5555                 val = node_distance(node, n);
5556 
5557                 /* Penalize nodes under us ("prefer the next node") */
5558                 val += (n < node);
5559 
5560                 /* Give preference to headless and unused nodes */
5561                 tmp = cpumask_of_node(n);
5562                 if (!cpumask_empty(tmp))
5563                         val += PENALTY_FOR_NODE_WITH_CPUS;
5564 
5565                 /* Slight preference for less loaded node */
5566                 val *= (MAX_NODE_LOAD*MAX_NUMNODES);
5567                 val += node_load[n];
5568 
5569                 if (val < min_val) {
5570                         min_val = val;
5571                         best_node = n;
5572                 }
5573         }
5574 
5575         if (best_node >= 0)
5576                 node_set(best_node, *used_node_mask);
5577 
5578         return best_node;
5579 }
5580 
5581 
5582 /*
5583  * Build zonelists ordered by node and zones within node.
5584  * This results in maximum locality--normal zone overflows into local
5585  * DMA zone, if any--but risks exhausting DMA zone.
5586  */
5587 static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order,
5588                 unsigned nr_nodes)
5589 {
5590         struct zoneref *zonerefs;
5591         int i;
5592 
5593         zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
5594 
5595         for (i = 0; i < nr_nodes; i++) {
5596                 int nr_zones;
5597 
5598                 pg_data_t *node = NODE_DATA(node_order[i]);
5599 
5600                 nr_zones = build_zonerefs_node(node, zonerefs);
5601                 zonerefs += nr_zones;
5602         }
5603         zonerefs->zone = NULL;
5604         zonerefs->zone_idx = 0;
5605 }
5606 
5607 /*
5608  * Build gfp_thisnode zonelists
5609  */
5610 static void build_thisnode_zonelists(pg_data_t *pgdat)
5611 {
5612         struct zoneref *zonerefs;
5613         int nr_zones;
5614 
5615         zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs;
5616         nr_zones = build_zonerefs_node(pgdat, zonerefs);
5617         zonerefs += nr_zones;
5618         zonerefs->zone = NULL;
5619         zonerefs->zone_idx = 0;
5620 }
5621 
5622 /*
5623  * Build zonelists ordered by zone and nodes within zones.
5624  * This results in conserving DMA zone[s] until all Normal memory is
5625  * exhausted, but results in overflowing to remote node while memory
5626  * may still exist in local DMA zone.
5627  */
5628 
5629 static void build_zonelists(pg_data_t *pgdat)
5630 {
5631         static int node_order[MAX_NUMNODES];
5632         int node, load, nr_nodes = 0;
5633         nodemask_t used_mask;
5634         int local_node, prev_node;
5635 
5636         /* NUMA-aware ordering of nodes */
5637         local_node = pgdat->node_id;
5638         load = nr_online_nodes;
5639         prev_node = local_node;
5640         nodes_clear(used_mask);
5641 
5642         memset(node_order, 0, sizeof(node_order));
5643         while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
5644                 /*
5645                  * We don't want to pressure a particular node.
5646                  * So adding penalty to the first node in same
5647                  * distance group to make it round-robin.
5648                  */
5649                 if (node_distance(local_node, node) !=
5650                     node_distance(local_node, prev_node))
5651                         node_load[node] = load;
5652 
5653                 node_order[nr_nodes++] = node;
5654                 prev_node = node;
5655                 load--;
5656         }
5657 
5658         build_zonelists_in_node_order(pgdat, node_order, nr_nodes);
5659         build_thisnode_zonelists(pgdat);
5660 }
5661 
5662 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
5663 /*
5664  * Return node id of node used for "local" allocations.
5665  * I.e., first node id of first zone in arg node's generic zonelist.
5666  * Used for initializing percpu 'numa_mem', which is used primarily
5667  * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
5668  */
5669 int local_memory_node(int node)
5670 {
5671         struct zoneref *z;
5672 
5673         z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
5674                                    gfp_zone(GFP_KERNEL),
5675                                    NULL);
5676         return zone_to_nid(z->zone);
5677 }
5678 #endif
5679 
5680 static void setup_min_unmapped_ratio(void);
5681 static void setup_min_slab_ratio(void);
5682 #else   /* CONFIG_NUMA */
5683 
5684 static void build_zonelists(pg_data_t *pgdat)
5685 {
5686         int node, local_node;
5687         struct zoneref *zonerefs;
5688         int nr_zones;
5689 
5690         local_node = pgdat->node_id;
5691 
5692         zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
5693         nr_zones = build_zonerefs_node(pgdat, zonerefs);
5694         zonerefs += nr_zones;
5695 
5696         /*
5697          * Now we build the zonelist so that it contains the zones
5698          * of all the other nodes.
5699          * We don't want to pressure a particular node, so when
5700          * building the zones for node N, we make sure that the
5701          * zones coming right after the local ones are those from
5702          * node N+1 (modulo N)
5703          */
5704         for (node = local_node + 1; node < MAX_NUMNODES; node++) {
5705                 if (!node_online(node))
5706                         continue;
5707                 nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs);
5708                 zonerefs += nr_zones;
5709         }
5710         for (node = 0; node < local_node; node++) {
5711                 if (!node_online(node))
5712                         continue;
5713                 nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs);
5714                 zonerefs += nr_zones;
5715         }
5716 
5717         zonerefs->zone = NULL;
5718         zonerefs->zone_idx = 0;
5719 }
5720 
5721 #endif  /* CONFIG_NUMA */
5722 
5723 /*
5724  * Boot pageset table. One per cpu which is going to be used for all
5725  * zones and all nodes. The parameters will be set in such a way
5726  * that an item put on a list will immediately be handed over to
5727  * the buddy list. This is safe since pageset manipulation is done
5728  * with interrupts disabled.
5729  *
5730  * The boot_pagesets must be kept even after bootup is complete for
5731  * unused processors and/or zones. They do play a role for bootstrapping
5732  * hotplugged processors.
5733  *
5734  * zoneinfo_show() and maybe other functions do
5735  * not check if the processor is online before following the pageset pointer.
5736  * Other parts of the kernel may not check if the zone is available.
5737  */
5738 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
5739 static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
5740 static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
5741 
5742 static void __build_all_zonelists(void *data)
5743 {
5744         int nid;
5745         int __maybe_unused cpu;
5746         pg_data_t *self = data;
5747         static DEFINE_SPINLOCK(lock);
5748 
5749         spin_lock(&lock);
5750 
5751 #ifdef CONFIG_NUMA
5752         memset(node_load, 0, sizeof(node_load));
5753 #endif
5754 
5755         /*
5756          * This node is hotadded and no memory is yet present.   So just
5757          * building zonelists is fine - no need to touch other nodes.
5758          */
5759         if (self && !node_online(self->node_id)) {
5760                 build_zonelists(self);
5761         } else {
5762                 for_each_online_node(nid) {
5763                         pg_data_t *pgdat = NODE_DATA(nid);
5764 
5765                         build_zonelists(pgdat);
5766                 }
5767 
5768 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
5769                 /*
5770                  * We now know the "local memory node" for each node--
5771                  * i.e., the node of the first zone in the generic zonelist.
5772                  * Set up numa_mem percpu variable for on-line cpus.  During
5773                  * boot, only the boot cpu should be on-line;  we'll init the
5774                  * secondary cpus' numa_mem as they come on-line.  During
5775                  * node/memory hotplug, we'll fixup all on-line cpus.
5776                  */
5777                 for_each_online_cpu(cpu)
5778                         set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
5779 #endif
5780         }
5781 
5782         spin_unlock(&lock);
5783 }
5784 
5785 static noinline void __init
5786 build_all_zonelists_init(void)
5787 {
5788         int cpu;
5789 
5790         __build_all_zonelists(NULL);
5791 
5792         /*
5793          * Initialize the boot_pagesets that are going to be used
5794          * for bootstrapping processors. The real pagesets for
5795          * each zone will be allocated later when the per cpu
5796          * allocator is available.
5797          *
5798          * boot_pagesets are used also for bootstrapping offline
5799          * cpus if the system is already booted because the pagesets
5800          * are needed to initialize allocators on a specific cpu too.
5801          * F.e. the percpu allocator needs the page allocator which
5802          * needs the percpu allocator in order to allocate its pagesets
5803          * (a chicken-egg dilemma).
5804          */
5805         for_each_possible_cpu(cpu)
5806                 setup_pageset(&per_cpu(boot_pageset, cpu), 0);
5807 
5808         mminit_verify_zonelist();
5809         cpuset_init_current_mems_allowed();
5810 }
5811 
5812 /*
5813  * unless system_state == SYSTEM_BOOTING.
5814  *
5815  * __ref due to call of __init annotated helper build_all_zonelists_init
5816  * [protected by SYSTEM_BOOTING].
5817  */
5818 void __ref build_all_zonelists(pg_data_t *pgdat)
5819 {
5820         if (system_state == SYSTEM_BOOTING) {
5821                 build_all_zonelists_init();
5822         } else {
5823                 __build_all_zonelists(pgdat);
5824                 /* cpuset refresh routine should be here */
5825         }
5826         vm_total_pages = nr_free_pagecache_pages();
5827         /*
5828          * Disable grouping by mobility if the number of pages in the
5829          * system is too low to allow the mechanism to work. It would be
5830          * more accurate, but expensive to check per-zone. This check is
5831          * made on memory-hotadd so a system can start with mobility
5832          * disabled and enable it later
5833          */
5834         if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
5835                 page_group_by_mobility_disabled = 1;
5836         else
5837                 page_group_by_mobility_disabled = 0;
5838 
5839         pr_info("Built %u zonelists, mobility grouping %s.  Total pages: %ld\n",
5840                 nr_online_nodes,
5841                 page_group_by_mobility_disabled ? "off" : "on",
5842                 vm_total_pages);
5843 #ifdef CONFIG_NUMA
5844         pr_info("Policy zone: %s\n", zone_names[policy_zone]);
5845 #endif
5846 }
5847 
5848 /* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */
5849 static bool __meminit
5850 overlap_memmap_init(unsigned long zone, unsigned long *pfn)
5851 {
5852 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5853         static struct memblock_region *r;
5854 
5855         if (mirrored_kernelcore && zone == ZONE_MOVABLE) {
5856                 if (!r || *pfn >= memblock_region_memory_end_pfn(r)) {
5857                         for_each_memblock(memory, r) {
5858                                 if (*pfn < memblock_region_memory_end_pfn(r))
5859                                         break;
5860                         }
5861                 }
5862                 if (*pfn >= memblock_region_memory_base_pfn(r) &&
5863                     memblock_is_mirror(r)) {
5864                         *pfn = memblock_region_memory_end_pfn(r);
5865                         return true;
5866                 }
5867         }
5868 #endif
5869         return false;
5870 }
5871 
5872 /*
5873  * Initially all pages are reserved - free ones are freed
5874  * up by memblock_free_all() once the early boot process is
5875  * done. Non-atomic initialization, single-pass.
5876  */
5877 void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
5878                 unsigned long start_pfn, enum memmap_context context,
5879                 struct vmem_altmap *altmap)
5880 {
5881         unsigned long pfn, end_pfn = start_pfn + size;
5882         struct page *page;
5883 
5884         if (highest_memmap_pfn < end_pfn - 1)
5885                 highest_memmap_pfn = end_pfn - 1;
5886 
5887 #ifdef CONFIG_ZONE_DEVICE
5888         /*
5889          * Honor reservation requested by the driver for this ZONE_DEVICE
5890          * memory. We limit the total number of pages to initialize to just
5891          * those that might contain the memory mapping. We will defer the
5892          * ZONE_DEVICE page initialization until after we have released
5893          * the hotplug lock.
5894          */
5895         if (zone == ZONE_DEVICE) {
5896                 if (!altmap)
5897                         return;
5898 
5899                 if (start_pfn == altmap->base_pfn)
5900                         start_pfn += altmap->reserve;
5901                 end_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
5902         }
5903 #endif
5904 
5905         for (pfn = start_pfn; pfn < end_pfn; pfn++) {
5906                 /*
5907                  * There can be holes in boot-time mem_map[]s handed to this
5908                  * function.  They do not exist on hotplugged memory.
5909                  */
5910                 if (context == MEMMAP_EARLY) {
5911                         if (!early_pfn_valid(pfn))
5912                                 continue;
5913                         if (!early_pfn_in_nid(pfn, nid))
5914                                 continue;
5915                         if (overlap_memmap_init(zone, &pfn))
5916                                 continue;
5917                         if (defer_init(nid, pfn, end_pfn))
5918                                 break;
5919                 }
5920 
5921                 page = pfn_to_page(pfn);
5922                 __init_single_page(page, pfn, zone, nid);
5923                 if (context == MEMMAP_HOTPLUG)
5924                         __SetPageReserved(page);
5925 
5926                 /*
5927                  * Mark the block movable so that blocks are reserved for
5928                  * movable at startup. This will force kernel allocations
5929                  * to reserve their blocks rather than leaking throughout
5930                  * the address space during boot when many long-lived
5931                  * kernel allocations are made.
5932                  *
5933                  * bitmap is created for zone's valid pfn range. but memmap
5934                  * can be created for invalid pages (for alignment)
5935                  * check here not to call set_pageblock_migratetype() against
5936                  * pfn out of zone.
5937                  */
5938                 if (!(pfn & (pageblock_nr_pages - 1))) {
5939                         set_pageblock_migratetype(page, MIGRATE_MOVABLE);
5940                         cond_resched();
5941                 }
5942         }
5943 }
5944 
5945 #ifdef CONFIG_ZONE_DEVICE
5946 void __ref memmap_init_zone_device(struct zone *zone,
5947                                    unsigned long start_pfn,
5948                                    unsigned long size,
5949                                    struct dev_pagemap *pgmap)
5950 {
5951         unsigned long pfn, end_pfn = start_pfn + size;
5952         struct pglist_data *pgdat = zone->zone_pgdat;
5953         struct vmem_altmap *altmap = pgmap_altmap(pgmap);
5954         unsigned long zone_idx = zone_idx(zone);
5955         unsigned long start = jiffies;
5956         int nid = pgdat->node_id;
5957 
5958         if (WARN_ON_ONCE(!pgmap || zone_idx(zone) != ZONE_DEVICE))
5959                 return;
5960 
5961         /*
5962          * The call to memmap_init_zone should have already taken care
5963          * of the pages reserved for the memmap, so we can just jump to
5964          * the end of that region and start processing the device pages.
5965          */
5966         if (altmap) {
5967                 start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
5968                 size = end_pfn - start_pfn;
5969         }
5970 
5971         for (pfn = start_pfn; pfn < end_pfn; pfn++) {
5972                 struct page *page = pfn_to_page(pfn);
5973 
5974                 __init_single_page(page, pfn, zone_idx, nid);
5975 
5976                 /*
5977                  * Mark page reserved as it will need to wait for onlining
5978                  * phase for it to be fully associated with a zone.
5979                  *
5980                  * We can use the non-atomic __set_bit operation for setting
5981                  * the flag as we are still initializing the pages.
5982                  */
5983                 __SetPageReserved(page);
5984 
5985                 /*
5986                  * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer
5987                  * and zone_device_data.  It is a bug if a ZONE_DEVICE page is
5988                  * ever freed or placed on a driver-private list.
5989                  */
5990                 page->pgmap = pgmap;
5991                 page->zone_device_data = NULL;
5992 
5993                 /*
5994                  * Mark the block movable so that blocks are reserved for
5995                  * movable at startup. This will force kernel allocations
5996                  * to reserve their blocks rather than leaking throughout
5997                  * the address space during boot when many long-lived
5998                  * kernel allocations are made.
5999                  *
6000                  * bitmap is created for zone's valid pfn range. but memmap
6001                  * can be created for invalid pages (for alignment)
6002                  * check here not to call set_pageblock_migratetype() against
6003                  * pfn out of zone.
6004                  *
6005                  * Please note that MEMMAP_HOTPLUG path doesn't clear memmap
6006                  * because this is done early in section_activate()
6007                  */
6008                 if (!(pfn & (pageblock_nr_pages - 1))) {
6009                         set_pageblock_migratetype(page, MIGRATE_MOVABLE);
6010                         cond_resched();
6011                 }
6012         }
6013 
6014         pr_info("%s initialised %lu pages in %ums\n", __func__,
6015                 size, jiffies_to_msecs(jiffies - start));
6016 }
6017 
6018 #endif
6019 static void __meminit zone_init_free_lists(struct zone *zone)
6020 {
6021         unsigned int order, t;
6022         for_each_migratetype_order(order, t) {
6023                 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
6024                 zone->free_area[order].nr_free = 0;
6025         }
6026 }
6027 
6028 void __meminit __weak memmap_init(unsigned long size, int nid,
6029                                   unsigned long zone, unsigned long start_pfn)
6030 {
6031         memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY, NULL);
6032 }
6033 
6034 static int zone_batchsize(struct zone *zone)
6035 {
6036 #ifdef CONFIG_MMU
6037         int batch;
6038 
6039         /*
6040          * The per-cpu-pages pools are set to around 1000th of the
6041          * size of the zone.
6042          */
6043         batch = zone_managed_pages(zone) / 1024;
6044         /* But no more than a meg. */
6045         if (batch * PAGE_SIZE > 1024 * 1024)
6046                 batch = (1024 * 1024) / PAGE_SIZE;
6047         batch /= 4;             /* We effectively *= 4 below */
6048         if (batch < 1)
6049                 batch = 1;
6050 
6051         /*
6052          * Clamp the batch to a 2^n - 1 value. Having a power
6053          * of 2 value was found to be more likely to have
6054          * suboptimal cache aliasing properties in some cases.
6055          *
6056          * For example if 2 tasks are alternately allocating
6057          * batches of pages, one task can end up with a lot
6058          * of pages of one half of the possible page colors
6059          * and the other with pages of the other colors.
6060          */
6061         batch = rounddown_pow_of_two(batch + batch/2) - 1;
6062 
6063         return batch;
6064 
6065 #else
6066         /* The deferral and batching of frees should be suppressed under NOMMU
6067          * conditions.
6068          *
6069          * The problem is that NOMMU needs to be able to allocate large chunks
6070          * of contiguous memory as there's no hardware page translation to
6071          * assemble apparent contiguous memory from discontiguous pages.
6072          *
6073          * Queueing large contiguous runs of pages for batching, however,
6074          * causes the pages to actually be freed in smaller chunks.  As there
6075          * can be a significant delay between the individual batches being
6076          * recycled, this leads to the once large chunks of space being
6077          * fragmented and becoming unavailable for high-order allocations.
6078          */
6079         return 0;
6080 #endif
6081 }
6082 
6083 /*
6084  * pcp->high and pcp->batch values are related and dependent on one another:
6085  * ->batch must never be higher then ->high.
6086  * The following function updates them in a safe manner without read side
6087  * locking.
6088  *
6089  * Any new users of pcp->batch and pcp->high should ensure they can cope with
6090  * those fields changing asynchronously (acording the the above rule).
6091  *
6092  * mutex_is_locked(&pcp_batch_high_lock) required when calling this function
6093  * outside of boot time (or some other assurance that no concurrent updaters
6094  * exist).
6095  */
6096 static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
6097                 unsigned long batch)
6098 {
6099        /* start with a fail safe value for batch */
6100         pcp->batch = 1;
6101         smp_wmb();
6102 
6103        /* Update high, then batch, in order */
6104         pcp->high = high;
6105         smp_wmb();
6106 
6107         pcp->batch = batch;
6108 }
6109 
6110 /* a companion to pageset_set_high() */
6111 static void pageset_set_batch(struct per_cpu_pageset *p, unsigned long batch)
6112 {
6113         pageset_update(&p->pcp, 6 * batch, max(1UL, 1 * batch));
6114 }
6115 
6116 static void pageset_init(struct per_cpu_pageset *p)
6117 {
6118         struct per_cpu_pages *pcp;
6119         int migratetype;
6120 
6121         memset(p, 0, sizeof(*p));
6122 
6123         pcp = &p->pcp;
6124         for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
6125                 INIT_LIST_HEAD(&pcp->lists[migratetype]);
6126 }
6127 
6128 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
6129 {
6130         pageset_init(p);
6131         pageset_set_batch(p, batch);
6132 }
6133 
6134 /*
6135  * pageset_set_high() sets the high water mark for hot per_cpu_pagelist
6136  * to the value high for the pageset p.
6137  */
6138 static void pageset_set_high(struct per_cpu_pageset *p,
6139                                 unsigned long high)
6140 {
6141         unsigned long batch = max(1UL, high / 4);
6142         if ((high / 4) > (PAGE_SHIFT * 8))
6143                 batch = PAGE_SHIFT * 8;
6144 
6145         pageset_update(&p->pcp, high, batch);
6146 }
6147 
6148 static void pageset_set_high_and_batch(struct zone *zone,
6149                                        struct per_cpu_pageset *pcp)
6150 {
6151         if (percpu_pagelist_fraction)
6152                 pageset_set_high(pcp,
6153                         (zone_managed_pages(zone) /
6154                                 percpu_pagelist_fraction));
6155         else
6156                 pageset_set_batch(pcp, zone_batchsize(zone));
6157 }
6158 
6159 static void __meminit zone_pageset_init(struct zone *zone, int cpu)
6160 {
6161         struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
6162 
6163         pageset_init(pcp);
6164         pageset_set_high_and_batch(zone, pcp);
6165 }
6166 
6167 void __meminit setup_zone_pageset(struct zone *zone)
6168 {
6169         int cpu;
6170         zone->pageset = alloc_percpu(struct per_cpu_pageset);
6171         for_each_possible_cpu(cpu)
6172                 zone_pageset_init(zone, cpu);
6173 }
6174 
6175 /*
6176  * Allocate per cpu pagesets and initialize them.
6177  * Before this call only boot pagesets were available.
6178  */
6179 void __init setup_per_cpu_pageset(void)
6180 {
6181         struct pglist_data *pgdat;
6182         struct zone *zone;
6183 
6184         for_each_populated_zone(zone)
6185                 setup_zone_pageset(zone);
6186 
6187         for_each_online_pgdat(pgdat)
6188                 pgdat->per_cpu_nodestats =
6189                         alloc_percpu(struct per_cpu_nodestat);
6190 }
6191 
6192 static __meminit void zone_pcp_init(struct zone *zone)
6193 {
6194         /*
6195          * per cpu subsystem is not up at this point. The following code
6196          * relies on the ability of the linker to provide the
6197          * offset of a (static) per cpu variable into the per cpu area.
6198          */
6199         zone->pageset = &boot_pageset;
6200 
6201         if (populated_zone(zone))
6202                 printk(KERN_DEBUG "  %s zone: %lu pages, LIFO batch:%u\n",
6203                         zone->name, zone->present_pages,
6204                                          zone_batchsize(zone));
6205 }
6206 
6207 void __meminit init_currently_empty_zone(struct zone *zone,
6208                                         unsigned long zone_start_pfn,
6209                                         unsigned long size)
6210 {
6211         struct pglist_data *pgdat = zone->zone_pgdat;
6212         int zone_idx = zone_idx(zone) + 1;
6213 
6214         if (zone_idx > pgdat->nr_zones)
6215                 pgdat->nr_zones = zone_idx;
6216 
6217         zone->zone_start_pfn = zone_start_pfn;
6218 
6219         mminit_dprintk(MMINIT_TRACE, "memmap_init",
6220                         "Initialising map node %d zone %lu pfns %lu -> %lu\n",
6221                         pgdat->node_id,
6222                         (unsigned long)zone_idx(zone),
6223                         zone_start_pfn, (zone_start_pfn + size));
6224 
6225         zone_init_free_lists(zone);
6226         zone->initialized = 1;
6227 }
6228 
6229 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
6230 #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
6231 
6232 /*
6233  * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
6234  */
6235 int __meminit __early_pfn_to_nid(unsigned long pfn,
6236                                         struct mminit_pfnnid_cache *state)
6237 {
6238         unsigned long start_pfn, end_pfn;
6239         int nid;
6240 
6241         if (state->last_start <= pfn && pfn < state->last_end)
6242                 return state->last_nid;
6243 
6244         nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
6245         if (nid != NUMA_NO_NODE) {
6246                 state->last_start = start_pfn;
6247                 state->last_end = end_pfn;
6248                 state->last_nid = nid;
6249         }
6250 
6251         return nid;
6252 }
6253 #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
6254 
6255 /**
6256  * free_bootmem_with_active_regions - Call memblock_free_early_nid for each active range
6257  * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
6258  * @max_low_pfn: The highest PFN that will be passed to memblock_free_early_nid
6259  *
6260  * If an architecture guarantees that all ranges registered contain no holes
6261  * and may be freed, this this function may be used instead of calling
6262  * memblock_free_early_nid() manually.
6263  */
6264 void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
6265 {
6266         unsigned long start_pfn, end_pfn;
6267         int i, this_nid;
6268 
6269         for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) {
6270                 start_pfn = min(start_pfn, max_low_pfn);
6271                 end_pfn = min(end_pfn, max_low_pfn);
6272 
6273                 if (start_pfn < end_pfn)
6274                         memblock_free_early_nid(PFN_PHYS(start_pfn),
6275                                         (end_pfn - start_pfn) << PAGE_SHIFT,
6276                                         this_nid);
6277         }
6278 }
6279 
6280 /**
6281  * sparse_memory_present_with_active_regions - Call memory_present for each active range
6282  * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
6283  *
6284  * If an architecture guarantees that all ranges registered contain no holes and may
6285  * be freed, this function may be used instead of calling memory_present() manually.
6286  */
6287 void __init sparse_memory_present_with_active_regions(int nid)
6288 {
6289         unsigned long start_pfn, end_pfn;
6290         int i, this_nid;
6291 
6292         for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid)
6293                 memory_present(this_nid, start_pfn, end_pfn);
6294 }
6295 
6296 /**
6297  * get_pfn_range_for_nid - Return the start and end page frames for a node
6298  * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
6299  * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
6300  * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
6301  *
6302  * It returns the start and end page frame of a node based on information
6303  * provided by memblock_set_node(). If called for a node
6304  * with no available memory, a warning is printed and the start and end
6305  * PFNs will be 0.
6306  */
6307 void __init get_pfn_range_for_nid(unsigned int nid,
6308                         unsigned long *start_pfn, unsigned long *end_pfn)
6309 {
6310         unsigned long this_start_pfn, this_end_pfn;
6311         int i;
6312 
6313         *start_pfn = -1UL;
6314         *end_pfn = 0;
6315 
6316         for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
6317                 *start_pfn = min(*start_pfn, this_start_pfn);
6318                 *end_pfn = max(*end_pfn, this_end_pfn);
6319         }
6320 
6321         if (*start_pfn == -1UL)
6322                 *start_pfn = 0;
6323 }
6324 
6325 /*
6326  * This finds a zone that can be used for ZONE_MOVABLE pages. The
6327  * assumption is made that zones within a node are ordered in monotonic
6328  * increasing memory addresses so that the "highest" populated zone is used
6329  */
6330 static void __init find_usable_zone_for_movable(void)
6331 {
6332         int zone_index;
6333         for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
6334                 if (zone_index == ZONE_MOVABLE)
6335                         continue;
6336 
6337                 if (arch_zone_highest_possible_pfn[zone_index] >
6338                                 arch_zone_lowest_possible_pfn[zone_index])
6339                         break;
6340         }
6341 
6342         VM_BUG_ON(zone_index == -1);
6343         movable_zone = zone_index;
6344 }
6345 
6346 /*
6347  * The zone ranges provided by the architecture do not include ZONE_MOVABLE
6348  * because it is sized independent of architecture. Unlike the other zones,
6349  * the starting point for ZONE_MOVABLE is not fixed. It may be different
6350  * in each node depending on the size of each node and how evenly kernelcore
6351  * is distributed. This helper function adjusts the zone ranges
6352  * provided by the architecture for a given node by using the end of the
6353  * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
6354  * zones within a node are in order of monotonic increases memory addresses
6355  */
6356 static void __init adjust_zone_range_for_zone_movable(int nid,
6357                                         unsigned long zone_type,
6358                                         unsigned long node_start_pfn,
6359                                         unsigned long node_end_pfn,
6360                                         unsigned long *zone_start_pfn,
6361                                         unsigned long *zone_end_pfn)
6362 {
6363         /* Only adjust if ZONE_MOVABLE is on this node */
6364         if (zone_movable_pfn[nid]) {
6365                 /* Size ZONE_MOVABLE */
6366                 if (zone_type == ZONE_MOVABLE) {
6367                         *zone_start_pfn = zone_movable_pfn[nid];
6368                         *zone_end_pfn = min(node_end_pfn,
6369                                 arch_zone_highest_possible_pfn[movable_zone]);
6370 
6371                 /* Adjust for ZONE_MOVABLE starting within this range */
6372                 } else if (!mirrored_kernelcore &&
6373                         *zone_start_pfn < zone_movable_pfn[nid] &&
6374                         *zone_end_pfn > zone_movable_pfn[nid]) {
6375                         *zone_end_pfn = zone_movable_pfn[nid];
6376 
6377                 /* Check if this whole range is within ZONE_MOVABLE */
6378                 } else if (*zone_start_pfn >= zone_movable_pfn[nid])
6379                         *zone_start_pfn = *zone_end_pfn;
6380         }
6381 }
6382 
6383 /*
6384  * Return the number of pages a zone spans in a node, including holes
6385  * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
6386  */
6387 static unsigned long __init zone_spanned_pages_in_node(int nid,
6388                                         unsigned long zone_type,
6389                                         unsigned long node_start_pfn,
6390                                         unsigned long node_end_pfn,
6391                                         unsigned long *zone_start_pfn,
6392                                         unsigned long *zone_end_pfn,
6393                                         unsigned long *ignored)
6394 {
6395         unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
6396         unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
6397         /* When hotadd a new node from cpu_up(), the node should be empty */
6398         if (!node_start_pfn && !node_end_pfn)
6399                 return 0;
6400 
6401         /* Get the start and end of the zone */
6402         *zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
6403         *zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
6404         adjust_zone_range_for_zone_movable(nid, zone_type,
6405                                 node_start_pfn, node_end_pfn,
6406                                 zone_start_pfn, zone_end_pfn);
6407 
6408         /* Check that this node has pages within the zone's required range */
6409         if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn)
6410                 return 0;
6411 
6412         /* Move the zone boundaries inside the node if necessary */
6413         *zone_end_pfn = min(*zone_end_pfn, node_end_pfn);
6414         *zone_start_pfn = max(*zone_start_pfn, node_start_pfn);
6415 
6416         /* Return the spanned pages */
6417         return *zone_end_pfn - *zone_start_pfn;
6418 }
6419 
6420 /*
6421  * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
6422  * then all holes in the requested range will be accounted for.
6423  */
6424 unsigned long __init __absent_pages_in_range(int nid,
6425                                 unsigned long range_start_pfn,
6426                                 unsigned long range_end_pfn)
6427 {
6428         unsigned long nr_absent = range_end_pfn - range_start_pfn;
6429         unsigned long start_pfn, end_pfn;
6430         int i;
6431 
6432         for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
6433                 start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
6434                 end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
6435                 nr_absent -= end_pfn - start_pfn;
6436         }
6437         return nr_absent;
6438 }
6439 
6440 /**
6441  * absent_pages_in_range - Return number of page frames in holes within a range
6442  * @start_pfn: The start PFN to start searching for holes
6443  * @end_pfn: The end PFN to stop searching for holes
6444  *
6445  * Return: the number of pages frames in memory holes within a range.
6446  */
6447 unsigned long __init absent_pages_in_range(unsigned long start_pfn,
6448                                                         unsigned long end_pfn)
6449 {
6450         return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
6451 }
6452 
6453 /* Return the number of page frames in holes in a zone on a node */
6454 static unsigned long __init zone_absent_pages_in_node(int nid,
6455                                         unsigned long zone_type,
6456                                         unsigned long node_start_pfn,
6457                                         unsigned long node_end_pfn,
6458                                         unsigned long *ignored)
6459 {
6460         unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
6461         unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
6462         unsigned long zone_start_pfn, zone_end_pfn;
6463         unsigned long nr_absent;
6464 
6465         /* When hotadd a new node from cpu_up(), the node should be empty */
6466         if (!node_start_pfn && !node_end_pfn)
6467                 return 0;
6468 
6469         zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
6470         zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
6471 
6472         adjust_zone_range_for_zone_movable(nid, zone_type,
6473                         node_start_pfn, node_end_pfn,
6474                         &zone_start_pfn, &zone_end_pfn);
6475         nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
6476 
6477         /*
6478          * ZONE_MOVABLE handling.
6479          * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages
6480          * and vice versa.
6481          */
6482         if (mirrored_kernelcore && zone_movable_pfn[nid]) {
6483                 unsigned long start_pfn, end_pfn;
6484                 struct memblock_region *r;
6485 
6486                 for_each_memblock(memory, r) {
6487                         start_pfn = clamp(memblock_region_memory_base_pfn(r),
6488                                           zone_start_pfn, zone_end_pfn);
6489                         end_pfn = clamp(memblock_region_memory_end_pfn(r),
6490                                         zone_start_pfn, zone_end_pfn);
6491 
6492                         if (zone_type == ZONE_MOVABLE &&
6493                             memblock_is_mirror(r))
6494                                 nr_absent += end_pfn - start_pfn;
6495 
6496                         if (zone_type == ZONE_NORMAL &&
6497                             !memblock_is_mirror(r))
6498                                 nr_absent += end_pfn - start_pfn;
6499                 }
6500         }
6501 
6502         return nr_absent;
6503 }
6504 
6505 #else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
6506 static inline unsigned long __init zone_spanned_pages_in_node(int nid,
6507                                         unsigned long zone_type,
6508                                         unsigned long node_start_pfn,
6509                                         unsigned long node_end_pfn,
6510                                         unsigned long *zone_start_pfn,
6511                                         unsigned long *zone_end_pfn,
6512                                         unsigned long *zones_size)
6513 {
6514         unsigned int zone;
6515 
6516         *zone_start_pfn = node_start_pfn;
6517         for (zone = 0; zone < zone_type; zone++)
6518                 *zone_start_pfn += zones_size[zone];
6519 
6520         *zone_end_pfn = *zone_start_pfn + zones_size[zone_type];
6521 
6522         return zones_size[zone_type];
6523 }
6524 
6525 static inline unsigned long __init zone_absent_pages_in_node(int nid,
6526                                                 unsigned long zone_type,
6527                                                 unsigned long node_start_pfn,
6528                                                 unsigned long node_end_pfn,
6529                                                 unsigned long *zholes_size)
6530 {
6531         if (!zholes_size)
6532                 return 0;
6533 
6534         return zholes_size[zone_type];
6535 }
6536 
6537 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
6538 
6539 static void __init calculate_node_totalpages(struct pglist_data *pgdat,
6540                                                 unsigned long node_start_pfn,
6541                                                 unsigned long node_end_pfn,
6542                                                 unsigned long *zones_size,
6543                                                 unsigned long *zholes_size)
6544 {
6545         unsigned long realtotalpages = 0, totalpages = 0;
6546         enum zone_type i;
6547 
6548         for (i = 0; i < MAX_NR_ZONES; i++) {
6549                 struct zone *zone = pgdat->node_zones + i;
6550                 unsigned long zone_start_pfn, zone_end_pfn;
6551                 unsigned long size, real_size;
6552 
6553                 size = zone_spanned_pages_in_node(pgdat->node_id, i,
6554                                                   node_start_pfn,
6555                                                   node_end_pfn,
6556                                                   &zone_start_pfn,
6557                                                   &zone_end_pfn,
6558                                                   zones_size);
6559                 real_size = size - zone_absent_pages_in_node(pgdat->node_id, i,
6560                                                   node_start_pfn, node_end_pfn,
6561                                                   zholes_size);
6562                 if (size)
6563                         zone->zone_start_pfn = zone_start_pfn;
6564                 else
6565                         zone->zone_start_pfn = 0;
6566                 zone->spanned_pages = size;
6567                 zone->present_pages = real_size;
6568 
6569                 totalpages += size;
6570                 realtotalpages += real_size;
6571         }
6572 
6573         pgdat->node_spanned_pages = totalpages;
6574         pgdat->node_present_pages = realtotalpages;
6575         printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
6576                                                         realtotalpages);
6577 }
6578 
6579 #ifndef CONFIG_SPARSEMEM
6580 /*
6581  * Calculate the size of the zone->blockflags rounded to an unsigned long
6582  * Start by making sure zonesize is a multiple of pageblock_order by rounding
6583  * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
6584  * round what is now in bits to nearest long in bits, then return it in
6585  * bytes.
6586  */
6587 static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
6588 {
6589         unsigned long usemapsize;
6590 
6591         zonesize += zone_start_pfn & (pageblock_nr_pages-1);
6592         usemapsize = roundup(zonesize, pageblock_nr_pages);
6593         usemapsize = usemapsize >> pageblock_order;
6594         usemapsize *= NR_PAGEBLOCK_BITS;
6595         usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
6596 
6597         return usemapsize / 8;
6598 }
6599 
6600 static void __ref setup_usemap(struct pglist_data *pgdat,
6601                                 struct zone *zone,
6602                                 unsigned long zone_start_pfn,
6603                                 unsigned long zonesize)
6604 {
6605         unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
6606         zone->pageblock_flags = NULL;
6607         if (usemapsize) {
6608                 zone->pageblock_flags =
6609                         memblock_alloc_node(usemapsize, SMP_CACHE_BYTES,
6610                                             pgdat->node_id);
6611                 if (!zone->pageblock_flags)
6612                         panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n",
6613                               usemapsize, zone->name, pgdat->node_id);
6614         }
6615 }
6616 #else
6617 static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
6618                                 unsigned long zone_start_pfn, unsigned long zonesize) {}
6619 #endif /* CONFIG_SPARSEMEM */
6620 
6621 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
6622 
6623 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
6624 void __init set_pageblock_order(void)
6625 {
6626         unsigned int order;
6627 
6628         /* Check that pageblock_nr_pages has not already been setup */
6629         if (pageblock_order)
6630                 return;
6631 
6632         if (HPAGE_SHIFT > PAGE_SHIFT)
6633                 order = HUGETLB_PAGE_ORDER;
6634         else
6635                 order = MAX_ORDER - 1;
6636 
6637         /*
6638          * Assume the largest contiguous order of interest is a huge page.
6639          * This value may be variable depending on boot parameters on IA64 and
6640          * powerpc.
6641          */
6642         pageblock_order = order;
6643 }
6644 #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
6645 
6646 /*
6647  * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
6648  * is unused as pageblock_order is set at compile-time. See
6649  * include/linux/pageblock-flags.h for the values of pageblock_order based on
6650  * the kernel config
6651  */
6652 void __init set_pageblock_order(void)
6653 {
6654 }
6655 
6656 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
6657 
6658 static unsigned long __init calc_memmap_size(unsigned long spanned_pages,
6659                                                 unsigned long present_pages)
6660 {
6661         unsigned long pages = spanned_pages;
6662 
6663         /*
6664          * Provide a more accurate estimation if there are holes within
6665          * the zone and SPARSEMEM is in use. If there are holes within the
6666          * zone, each populated memory region may cost us one or two extra
6667          * memmap pages due to alignment because memmap pages for each
6668          * populated regions may not be naturally aligned on page boundary.
6669          * So the (present_pages >> 4) heuristic is a tradeoff for that.
6670          */
6671         if (spanned_pages > present_pages + (present_pages >> 4) &&
6672             IS_ENABLED(CONFIG_SPARSEMEM))
6673                 pages = present_pages;
6674 
6675         return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
6676 }
6677 
6678 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
6679 static void pgdat_init_split_queue(struct pglist_data *pgdat)
6680 {
6681         struct deferred_split *ds_queue = &pgdat->deferred_split_queue;
6682 
6683         spin_lock_init(&ds_queue->split_queue_lock);
6684         INIT_LIST_HEAD(&ds_queue->split_queue);
6685         ds_queue->split_queue_len = 0;
6686 }
6687 #else
6688 static void pgdat_init_split_queue(struct pglist_data *pgdat) {}
6689 #endif
6690 
6691 #ifdef CONFIG_COMPACTION
6692 static void pgdat_init_kcompactd(struct pglist_data *pgdat)
6693 {
6694         init_waitqueue_head(&pgdat->kcompactd_wait);
6695 }
6696 #else
6697 static void pgdat_init_kcompactd(struct pglist_data *pgdat) {}
6698 #endif
6699 
6700 static void __meminit pgdat_init_internals(struct pglist_data *pgdat)
6701 {
6702         pgdat_resize_init(pgdat);
6703 
6704         pgdat_init_split_queue(pgdat);
6705         pgdat_init_kcompactd(pgdat);
6706 
6707         init_waitqueue_head(&pgdat->kswapd_wait);
6708         init_waitqueue_head(&pgdat->pfmemalloc_wait);
6709 
6710         pgdat_page_ext_init(pgdat);
6711         spin_lock_init(&pgdat->lru_lock);
6712         lruvec_init(node_lruvec(pgdat));
6713 }
6714 
6715 static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid,
6716                                                         unsigned long remaining_pages)
6717 {
6718         atomic_long_set(&zone->managed_pages, remaining_pages);
6719         zone_set_nid(zone, nid);
6720         zone->name = zone_names[idx];
6721         zone->zone_pgdat = NODE_DATA(nid);
6722         spin_lock_init(&zone->lock);
6723         zone_seqlock_init(zone);
6724         zone_pcp_init(zone);
6725 }
6726 
6727 /*
6728  * Set up the zone data structures
6729  * - init pgdat internals
6730  * - init all zones belonging to this node
6731  *
6732  * NOTE: this function is only called during memory hotplug
6733  */
6734 #ifdef CONFIG_MEMORY_HOTPLUG
6735 void __ref free_area_init_core_hotplug(int nid)
6736 {
6737         enum zone_type z;
6738         pg_data_t *pgdat = NODE_DATA(nid);
6739 
6740         pgdat_init_internals(pgdat);
6741         for (z = 0; z < MAX_NR_ZONES; z++)
6742                 zone_init_internals(&pgdat->node_zones[z], z, nid, 0);
6743 }
6744 #endif
6745 
6746 /*
6747  * Set up the zone data structures:
6748  *   - mark all pages reserved
6749  *   - mark all memory queues empty
6750  *   - clear the memory bitmaps
6751  *
6752  * NOTE: pgdat should get zeroed by caller.
6753  * NOTE: this function is only called during early init.
6754  */
6755 static void __init free_area_init_core(struct pglist_data *pgdat)
6756 {
6757         enum zone_type j;
6758         int nid = pgdat->node_id;
6759 
6760         pgdat_init_internals(pgdat);
6761         pgdat->per_cpu_nodestats = &boot_nodestats;
6762 
6763         for (j = 0; j < MAX_NR_ZONES; j++) {
6764                 struct zone *zone = pgdat->node_zones + j;
6765                 unsigned long size, freesize, memmap_pages;
6766                 unsigned long zone_start_pfn = zone->zone_start_pfn;
6767 
6768                 size = zone->spanned_pages;
6769                 freesize = zone->present_pages;
6770 
6771                 /*
6772                  * Adjust freesize so that it accounts for how much memory
6773                  * is used by this zone for memmap. This affects the watermark
6774                  * and per-cpu initialisations
6775                  */
6776                 memmap_pages = calc_memmap_size(size, freesize);
6777                 if (!is_highmem_idx(j)) {
6778                         if (freesize >= memmap_pages) {
6779                                 freesize -= memmap_pages;
6780                                 if (memmap_pages)
6781                                         printk(KERN_DEBUG
6782                                                "  %s zone: %lu pages used for memmap\n",
6783                                                zone_names[j], memmap_pages);
6784                         } else
6785                                 pr_warn("  %s zone: %lu pages exceeds freesize %lu\n",
6786                                         zone_names[j], memmap_pages, freesize);
6787                 }
6788 
6789                 /* Account for reserved pages */
6790                 if (j == 0 && freesize > dma_reserve) {
6791                         freesize -= dma_reserve;
6792                         printk(KERN_DEBUG "  %s zone: %lu pages reserved\n",
6793                                         zone_names[0], dma_reserve);
6794                 }
6795 
6796                 if (!is_highmem_idx(j))
6797                         nr_kernel_pages += freesize;
6798                 /* Charge for highmem memmap if there are enough kernel pages */
6799                 else if (nr_kernel_pages > memmap_pages * 2)
6800                         nr_kernel_pages -= memmap_pages;
6801                 nr_all_pages += freesize;
6802 
6803                 /*
6804                  * Set an approximate value for lowmem here, it will be adjusted
6805                  * when the bootmem allocator frees pages into the buddy system.
6806                  * And all highmem pages will be managed by the buddy system.
6807                  */
6808                 zone_init_internals(zone, j, nid, freesize);
6809 
6810                 if (!size)
6811                         continue;
6812 
6813                 set_pageblock_order();
6814                 setup_usemap(pgdat, zone, zone_start_pfn, size);
6815                 init_currently_empty_zone(zone, zone_start_pfn, size);
6816                 memmap_init(size, nid, j, zone_start_pfn);
6817         }
6818 }
6819 
6820 #ifdef CONFIG_FLAT_NODE_MEM_MAP
6821 static void __ref alloc_node_mem_map(struct pglist_data *pgdat)
6822 {
6823         unsigned long __maybe_unused start = 0;
6824         unsigned long __maybe_unused offset = 0;
6825 
6826         /* Skip empty nodes */
6827         if (!pgdat->node_spanned_pages)
6828                 return;
6829 
6830         start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
6831         offset = pgdat->node_start_pfn - start;
6832         /* ia64 gets its own node_mem_map, before this, without bootmem */
6833         if (!pgdat->node_mem_map) {
6834                 unsigned long size, end;
6835                 struct page *map;
6836 
6837                 /*
6838                  * The zone's endpoints aren't required to be MAX_ORDER
6839                  * aligned but the node_mem_map endpoints must be in order
6840                  * for the buddy allocator to function correctly.
6841                  */
6842                 end = pgdat_end_pfn(pgdat);
6843                 end = ALIGN(end, MAX_ORDER_NR_PAGES);
6844                 size =  (end - start) * sizeof(struct page);
6845                 map = memblock_alloc_node(size, SMP_CACHE_BYTES,
6846                                           pgdat->node_id);
6847                 if (!map)
6848                         panic("Failed to allocate %ld bytes for node %d memory map\n",
6849                               size, pgdat->node_id);
6850                 pgdat->node_mem_map = map + offset;
6851         }
6852         pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",
6853                                 __func__, pgdat->node_id, (unsigned long)pgdat,
6854                                 (unsigned long)pgdat->node_mem_map);
6855 #ifndef CONFIG_NEED_MULTIPLE_NODES
6856         /*
6857          * With no DISCONTIG, the global mem_map is just set as node 0's
6858          */
6859         if (pgdat == NODE_DATA(0)) {
6860                 mem_map = NODE_DATA(0)->node_mem_map;
6861 #if defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) || defined(CONFIG_FLATMEM)
6862                 if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
6863                         mem_map -= offset;
6864 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
6865         }
6866 #endif
6867 }
6868 #else
6869 static void __ref alloc_node_mem_map(struct pglist_data *pgdat) { }
6870 #endif /* CONFIG_FLAT_NODE_MEM_MAP */
6871 
6872 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
6873 static inline void pgdat_set_deferred_range(pg_data_t *pgdat)
6874 {
6875         pgdat->first_deferred_pfn = ULONG_MAX;
6876 }
6877 #else
6878 static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {}
6879 #endif
6880 
6881 void __init free_area_init_node(int nid, unsigned long *zones_size,
6882                                    unsigned long node_start_pfn,
6883                                    unsigned long *zholes_size)
6884 {
6885         pg_data_t *pgdat = NODE_DATA(nid);
6886         unsigned long start_pfn = 0;
6887         unsigned long end_pfn = 0;
6888 
6889         /* pg_data_t should be reset to zero when it's allocated */
6890         WARN_ON(pgdat->nr_zones || pgdat->kswapd_classzone_idx);
6891 
6892         pgdat->node_id = nid;
6893         pgdat->node_start_pfn = node_start_pfn;
6894         pgdat->per_cpu_nodestats = NULL;
6895 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
6896         get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
6897         pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
6898                 (u64)start_pfn << PAGE_SHIFT,
6899                 end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0);
6900 #else
6901         start_pfn = node_start_pfn;
6902 #endif
6903         calculate_node_totalpages(pgdat, start_pfn, end_pfn,
6904                                   zones_size, zholes_size);
6905 
6906         alloc_node_mem_map(pgdat);
6907         pgdat_set_deferred_range(pgdat);
6908 
6909         free_area_init_core(pgdat);
6910 }
6911 
6912 #if !defined(CONFIG_FLAT_NODE_MEM_MAP)
6913 /*
6914  * Zero all valid struct pages in range [spfn, epfn), return number of struct
6915  * pages zeroed
6916  */
6917 static u64 zero_pfn_range(unsigned long spfn, unsigned long epfn)
6918 {
6919         unsigned long pfn;
6920         u64 pgcnt = 0;
6921 
6922         for (pfn = spfn; pfn < epfn; pfn++) {
6923                 if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages))) {
6924                         pfn = ALIGN_DOWN(pfn, pageblock_nr_pages)
6925                                 + pageblock_nr_pages - 1;
6926                         continue;
6927                 }
6928                 mm_zero_struct_page(pfn_to_page(pfn));
6929                 pgcnt++;
6930         }
6931 
6932         return pgcnt;
6933 }
6934 
6935 /*
6936  * Only struct pages that are backed by physical memory are zeroed and
6937  * initialized by going through __init_single_page(). But, there are some
6938  * struct pages which are reserved in memblock allocator and their fields
6939  * may be accessed (for example page_to_pfn() on some configuration accesses
6940  * flags). We must explicitly zero those struct pages.
6941  *
6942  * This function also addresses a similar issue where struct pages are left
6943  * uninitialized because the physical address range is not covered by
6944  * memblock.memory or memblock.reserved. That could happen when memblock
6945  * layout is manually configured via memmap=, or when the highest physical
6946  * address (max_pfn) does not end on a section boundary.
6947  */
6948 void __init zero_resv_unavail(void)
6949 {
6950         phys_addr_t start, end;
6951         u64 i, pgcnt;
6952         phys_addr_t next = 0;
6953 
6954         /*
6955          * Loop through unavailable ranges not covered by memblock.memory.
6956          */
6957         pgcnt = 0;
6958         for_each_mem_range(i, &memblock.memory, NULL,
6959                         NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end, NULL) {
6960                 if (next < start)
6961                         pgcnt += zero_pfn_range(PFN_DOWN(next), PFN_UP(start));
6962                 next = end;
6963         }
6964 
6965         /*
6966          * Early sections always have a fully populated memmap for the whole
6967          * section - see pfn_valid(). If the last section has holes at the
6968          * end and that section is marked "online", the memmap will be
6969          * considered initialized. Make sure that memmap has a well defined
6970          * state.
6971          */
6972         pgcnt += zero_pfn_range(PFN_DOWN(next),
6973                                 round_up(max_pfn, PAGES_PER_SECTION));
6974 
6975         /*
6976          * Struct pages that do not have backing memory. This could be because
6977          * firmware is using some of this memory, or for some other reasons.
6978          */
6979         if (pgcnt)
6980                 pr_info("Zeroed struct page in unavailable ranges: %lld pages", pgcnt);
6981 }
6982 #endif /* !CONFIG_FLAT_NODE_MEM_MAP */
6983 
6984 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
6985 
6986 #if MAX_NUMNODES > 1
6987 /*
6988  * Figure out the number of possible node ids.
6989  */
6990 void __init setup_nr_node_ids(void)
6991 {
6992         unsigned int highest;
6993 
6994         highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES);
6995         nr_node_ids = highest + 1;
6996 }
6997 #endif
6998 
6999 /**
7000  * node_map_pfn_alignment - determine the maximum internode alignment
7001  *
7002  * This function should be called after node map is populated and sorted.
7003  * It calculates the maximum power of two alignment which can distinguish
7004  * all the nodes.
7005  *
7006  * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
7007  * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)).  If the
7008  * nodes are shifted by 256MiB, 256MiB.  Note that if only the last node is
7009  * shifted, 1GiB is enough and this function will indicate so.
7010  *
7011  * This is used to test whether pfn -> nid mapping of the chosen memory
7012  * model has fine enough granularity to avoid incorrect mapping for the
7013  * populated node map.
7014  *
7015  * Return: the determined alignment in pfn's.  0 if there is no alignment
7016  * requirement (single node).
7017  */
7018 unsigned long __init node_map_pfn_alignment(void)
7019 {
7020         unsigned long accl_mask = 0, last_end = 0;
7021         unsigned long start, end, mask;
7022         int last_nid = NUMA_NO_NODE;
7023         int i, nid;
7024 
7025         for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
7026                 if (!start || last_nid < 0 || last_nid == nid) {
7027                         last_nid = nid;
7028                         last_end = end;
7029                         continue;
7030                 }
7031 
7032                 /*
7033                  * Start with a mask granular enough to pin-point to the
7034                  * start pfn and tick off bits one-by-one until it becomes
7035                  * too coarse to separate the current node from the last.
7036                  */
7037                 mask = ~((1 << __ffs(start)) - 1);
7038                 while (mask && last_end <= (start & (mask << 1)))
7039                         mask <<= 1;
7040 
7041                 /* accumulate all internode masks */
7042                 accl_mask |= mask;
7043         }
7044 
7045         /* convert mask to number of pages */
7046         return ~accl_mask + 1;
7047 }
7048 
7049 /* Find the lowest pfn for a node */
7050 static unsigned long __init find_min_pfn_for_node(int nid)
7051 {
7052         unsigned long min_pfn = ULONG_MAX;
7053         unsigned long start_pfn;
7054         int i;
7055 
7056         for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL)
7057                 min_pfn = min(min_pfn, start_pfn);
7058 
7059         if (min_pfn == ULONG_MAX) {
7060                 pr_warn("Could not find start_pfn for node %d\n", nid);
7061                 return 0;
7062         }
7063 
7064         return min_pfn;
7065 }
7066 
7067 /**
7068  * find_min_pfn_with_active_regions - Find the minimum PFN registered
7069  *
7070  * Return: the minimum PFN based on information provided via
7071  * memblock_set_node().
7072  */
7073 unsigned long __init find_min_pfn_with_active_regions(void)
7074 {
7075         return find_min_pfn_for_node(MAX_NUMNODES);
7076 }
7077 
7078 /*
7079  * early_calculate_totalpages()
7080  * Sum pages in active regions for movable zone.
7081  * Populate N_MEMORY for calculating usable_nodes.
7082  */
7083 static unsigned long __init early_calculate_totalpages(void)
7084 {
7085         unsigned long totalpages = 0;
7086         unsigned long start_pfn, end_pfn;
7087         int i, nid;
7088 
7089         for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
7090                 unsigned long pages = end_pfn - start_pfn;
7091 
7092                 totalpages += pages;
7093                 if (pages)
7094                         node_set_state(nid, N_MEMORY);
7095         }
7096         return totalpages;
7097 }
7098 
7099 /*
7100  * Find the PFN the Movable zone begins in each node. Kernel memory
7101  * is spread evenly between nodes as long as the nodes have enough
7102  * memory. When they don't, some nodes will have more kernelcore than
7103  * others
7104  */
7105 static void __init find_zone_movable_pfns_for_nodes(void)
7106 {
7107         int i, nid;
7108         unsigned long usable_startpfn;
7109         unsigned long kernelcore_node, kernelcore_remaining;
7110         /* save the state before borrow the nodemask */
7111         nodemask_t saved_node_state = node_states[N_MEMORY];
7112         unsigned long totalpages = early_calculate_totalpages();
7113         int usable_nodes = nodes_weight(node_states[N_MEMORY]);
7114         struct memblock_region *r;
7115 
7116         /* Need to find movable_zone earlier when movable_node is specified. */
7117         find_usable_zone_for_movable();
7118 
7119         /*
7120          * If movable_node is specified, ignore kernelcore and movablecore
7121          * options.
7122          */
7123         if (movable_node_is_enabled()) {
7124                 for_each_memblock(memory, r) {
7125                         if (!memblock_is_hotpluggable(r))
7126                                 continue;
7127 
7128                         nid = r->nid;
7129 
7130                         usable_startpfn = PFN_DOWN(r->base);
7131                         zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
7132                                 min(usable_startpfn, zone_movable_pfn[nid]) :
7133                                 usable_startpfn;
7134                 }
7135 
7136                 goto out2;
7137         }
7138 
7139         /*
7140          * If kernelcore=mirror is specified, ignore movablecore option
7141          */
7142         if (mirrored_kernelcore) {
7143                 bool mem_below_4gb_not_mirrored = false;
7144 
7145                 for_each_memblock(memory, r) {
7146                         if (memblock_is_mirror(r))
7147                                 continue;
7148 
7149                         nid = r->nid;
7150 
7151                         usable_startpfn = memblock_region_memory_base_pfn(r);
7152 
7153                         if (usable_startpfn < 0x100000) {
7154                                 mem_below_4gb_not_mirrored = true;
7155                                 continue;
7156                         }
7157 
7158                         zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
7159                                 min(usable_startpfn, zone_movable_pfn[nid]) :
7160                                 usable_startpfn;
7161                 }
7162 
7163                 if (mem_below_4gb_not_mirrored)
7164                         pr_warn("This configuration results in unmirrored kernel memory.");
7165 
7166                 goto out2;
7167         }
7168 
7169         /*
7170          * If kernelcore=nn% or movablecore=nn% was specified, calculate the
7171          * amount of necessary memory.
7172          */
7173         if (required_kernelcore_percent)
7174                 required_kernelcore = (totalpages * 100 * required_kernelcore_percent) /
7175                                        10000UL;
7176         if (required_movablecore_percent)
7177                 required_movablecore = (totalpages * 100 * required_movablecore_percent) /
7178                                         10000UL;
7179 
7180         /*
7181          * If movablecore= was specified, calculate what size of
7182          * kernelcore that corresponds so that memory usable for
7183          * any allocation type is evenly spread. If both kernelcore
7184          * and movablecore are specified, then the value of kernelcore
7185          * will be used for required_kernelcore if it's greater than
7186          * what movablecore would have allowed.
7187          */
7188         if (required_movablecore) {
7189                 unsigned long corepages;
7190 
7191                 /*
7192                  * Round-up so that ZONE_MOVABLE is at least as large as what
7193                  * was requested by the user
7194                  */
7195                 required_movablecore =
7196                         roundup(required_movablecore, MAX_ORDER_NR_PAGES);
7197                 required_movablecore = min(totalpages, required_movablecore);
7198                 corepages = totalpages - required_movablecore;
7199 
7200                 required_kernelcore = max(required_kernelcore, corepages);
7201         }
7202 
7203         /*
7204          * If kernelcore was not specified or kernelcore size is larger
7205          * than totalpages, there is no ZONE_MOVABLE.
7206          */
7207         if (!required_kernelcore || required_kernelcore >= totalpages)
7208                 goto out;
7209 
7210         /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
7211         usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
7212 
7213 restart:
7214         /* Spread kernelcore memory as evenly as possible throughout nodes */
7215         kernelcore_node = required_kernelcore / usable_nodes;
7216         for_each_node_state(nid, N_MEMORY) {
7217                 unsigned long start_pfn, end_pfn;
7218 
7219                 /*
7220                  * Recalculate kernelcore_node if the division per node
7221                  * now exceeds what is necessary to satisfy the requested
7222                  * amount of memory for the kernel
7223                  */
7224                 if (required_kernelcore < kernelcore_node)
7225                         kernelcore_node = required_kernelcore / usable_nodes;
7226 
7227                 /*
7228                  * As the map is walked, we track how much memory is usable
7229                  * by the kernel using kernelcore_remaining. When it is
7230                  * 0, the rest of the node is usable by ZONE_MOVABLE
7231                  */
7232                 kernelcore_remaining = kernelcore_node;
7233 
7234                 /* Go through each range of PFNs within this node */
7235                 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
7236                         unsigned long size_pages;
7237 
7238                         start_pfn = max(start_pfn, zone_movable_pfn[nid]);
7239                         if (start_pfn >= end_pfn)
7240                                 continue;
7241 
7242                         /* Account for what is only usable for kernelcore */
7243                         if (start_pfn < usable_startpfn) {
7244                                 unsigned long kernel_pages;
7245                                 kernel_pages = min(end_pfn, usable_startpfn)
7246                                                                 - start_pfn;
7247 
7248                                 kernelcore_remaining -= min(kernel_pages,
7249                                                         kernelcore_remaining);
7250                                 required_kernelcore -= min(kernel_pages,
7251                                                         required_kernelcore);
7252 
7253                                 /* Continue if range is now fully accounted */
7254                                 if (end_pfn <= usable_startpfn) {
7255 
7256                                         /*
7257                                          * Push zone_movable_pfn to the end so
7258                                          * that if we have to rebalance
7259                                          * kernelcore across nodes, we will
7260                                          * not double account here
7261                                          */
7262                                         zone_movable_pfn[nid] = end_pfn;
7263                                         continue;
7264                                 }
7265                                 start_pfn = usable_startpfn;
7266                         }
7267 
7268                         /*
7269                          * The usable PFN range for ZONE_MOVABLE is from
7270                          * start_pfn->end_pfn. Calculate size_pages as the
7271                          * number of pages used as kernelcore
7272                          */
7273                         size_pages = end_pfn - start_pfn;
7274                         if (size_pages > kernelcore_remaining)
7275                                 size_pages = kernelcore_remaining;
7276                         zone_movable_pfn[nid] = start_pfn + size_pages;
7277 
7278                         /*
7279                          * Some kernelcore has been met, update counts and
7280                          * break if the kernelcore for this node has been
7281                          * satisfied
7282                          */
7283                         required_kernelcore -= min(required_kernelcore,
7284                                                                 size_pages);
7285                         kernelcore_remaining -= size_pages;
7286                         if (!kernelcore_remaining)
7287                                 break;
7288                 }
7289         }
7290 
7291         /*
7292          * If there is still required_kernelcore, we do another pass with one
7293          * less node in the count. This will push zone_movable_pfn[nid] further
7294          * along on the nodes that still have memory until kernelcore is
7295          * satisfied
7296          */
7297         usable_nodes--;
7298         if (usable_nodes && required_kernelcore > usable_nodes)
7299                 goto restart;
7300 
7301 out2:
7302         /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
7303         for (nid = 0; nid < MAX_NUMNODES; nid++)
7304                 zone_movable_pfn[nid] =
7305                         roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
7306 
7307 out:
7308         /* restore the node_state */
7309         node_states[N_MEMORY] = saved_node_state;
7310 }
7311 
7312 /* Any regular or high memory on that node ? */
7313 static void check_for_memory(pg_data_t *pgdat, int nid)
7314 {
7315         enum zone_type zone_type;
7316 
7317         for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
7318                 struct zone *zone = &pgdat->node_zones[zone_type];
7319                 if (populated_zone(zone)) {
7320                         if (IS_ENABLED(CONFIG_HIGHMEM))
7321                                 node_set_state(nid, N_HIGH_MEMORY);
7322                         if (zone_type <= ZONE_NORMAL)
7323                                 node_set_state(nid, N_NORMAL_MEMORY);
7324                         break;
7325                 }
7326         }
7327 }
7328 
7329 /**
7330  * free_area_init_nodes - Initialise all pg_data_t and zone data
7331  * @max_zone_pfn: an array of max PFNs for each zone
7332  *
7333  * This will call free_area_init_node() for each active node in the system.
7334  * Using the page ranges provided by memblock_set_node(), the size of each
7335  * zone in each node and their holes is calculated. If the maximum PFN
7336  * between two adjacent zones match, it is assumed that the zone is empty.
7337  * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
7338  * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
7339  * starts where the previous one ended. For example, ZONE_DMA32 starts
7340  * at arch_max_dma_pfn.
7341  */
7342 void __init free_area_init_nodes(unsigned long *max_zone_pfn)
7343 {
7344         unsigned long start_pfn, end_pfn;
7345         int i, nid;
7346 
7347         /* Record where the zone boundaries are */
7348         memset(arch_zone_lowest_possible_pfn, 0,
7349                                 sizeof(arch_zone_lowest_possible_pfn));
7350         memset(arch_zone_highest_possible_pfn, 0,
7351                                 sizeof(arch_zone_highest_possible_pfn));
7352 
7353         start_pfn = find_min_pfn_with_active_regions();
7354 
7355         for (i = 0; i < MAX_NR_ZONES; i++) {
7356                 if (i == ZONE_MOVABLE)
7357                         continue;
7358 
7359                 end_pfn = max(max_zone_pfn[i], start_pfn);
7360                 arch_zone_lowest_possible_pfn[i] = start_pfn;
7361                 arch_zone_highest_possible_pfn[i] = end_pfn;
7362 
7363                 start_pfn = end_pfn;
7364         }
7365 
7366         /* Find the PFNs that ZONE_MOVABLE begins at in each node */
7367         memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
7368         find_zone_movable_pfns_for_nodes();
7369 
7370         /* Print out the zone ranges */
7371         pr_info("Zone ranges:\n");
7372         for (i = 0; i < MAX_NR_ZONES; i++) {
7373                 if (i == ZONE_MOVABLE)
7374                         continue;
7375                 pr_info("  %-8s ", zone_names[i]);
7376                 if (arch_zone_lowest_possible_pfn[i] ==
7377                                 arch_zone_highest_possible_pfn[i])
7378                         pr_cont("empty\n");
7379                 else
7380                         pr_cont("[mem %#018Lx-%#018Lx]\n",
7381                                 (u64)arch_zone_lowest_possible_pfn[i]
7382                                         << PAGE_SHIFT,
7383                                 ((u64)arch_zone_highest_possible_pfn[i]
7384                                         << PAGE_SHIFT) - 1);
7385         }
7386 
7387         /* Print out the PFNs ZONE_MOVABLE begins at in each node */
7388         pr_info("Movable zone start for each node\n");
7389         for (i = 0; i < MAX_NUMNODES; i++) {
7390                 if (zone_movable_pfn[i])
7391                         pr_info("  Node %d: %#018Lx\n", i,
7392                                (u64)zone_movable_pfn[i] << PAGE_SHIFT);
7393         }
7394 
7395         /*
7396          * Print out the early node map, and initialize the
7397          * subsection-map relative to active online memory ranges to
7398          * enable future "sub-section" extensions of the memory map.
7399          */
7400         pr_info("Early memory node ranges\n");
7401         for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
7402                 pr_info("  node %3d: [mem %#018Lx-%#018Lx]\n", nid,
7403                         (u64)start_pfn << PAGE_SHIFT,
7404                         ((u64)end_pfn << PAGE_SHIFT) - 1);
7405                 subsection_map_init(start_pfn, end_pfn - start_pfn);
7406         }
7407 
7408         /* Initialise every node */
7409         mminit_verify_pageflags_layout();
7410         setup_nr_node_ids();
7411         zero_resv_unavail();
7412         for_each_online_node(nid) {
7413                 pg_data_t *pgdat = NODE_DATA(nid);
7414                 free_area_init_node(nid, NULL,
7415                                 find_min_pfn_for_node(nid), NULL);
7416 
7417                 /* Any memory on that node */
7418                 if (pgdat->node_present_pages)
7419                         node_set_state(nid, N_MEMORY);
7420                 check_for_memory(pgdat, nid);
7421         }
7422 }
7423 
7424 static int __init cmdline_parse_core(char *p, unsigned long *core,
7425                                      unsigned long *percent)
7426 {
7427         unsigned long long coremem;
7428         char *endptr;
7429 
7430         if (!p)
7431                 return -EINVAL;
7432 
7433         /* Value may be a percentage of total memory, otherwise bytes */
7434         coremem = simple_strtoull(p, &endptr, 0);
7435         if (*endptr == '%') {
7436                 /* Paranoid check for percent values greater than 100 */
7437                 WARN_ON(coremem > 100);
7438 
7439                 *percent = coremem;
7440         } else {
7441                 coremem = memparse(p, &p);
7442                 /* Paranoid check that UL is enough for the coremem value */
7443                 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
7444 
7445                 *core = coremem >> PAGE_SHIFT;
7446                 *percent = 0UL;
7447         }
7448         return 0;
7449 }
7450 
7451 /*
7452  * kernelcore=size sets the amount of memory for use for allocations that
7453  * cannot be reclaimed or migrated.
7454  */
7455 static int __init cmdline_parse_kernelcore(char *p)
7456 {
7457         /* parse kernelcore=mirror */
7458         if (parse_option_str(p, "mirror")) {
7459                 mirrored_kernelcore = true;
7460                 return 0;
7461         }
7462 
7463         return cmdline_parse_core(p, &required_kernelcore,
7464                                   &required_kernelcore_percent);
7465 }
7466 
7467 /*
7468  * movablecore=size sets the amount of memory for use for allocations that
7469  * can be reclaimed or migrated.
7470  */
7471 static int __init cmdline_parse_movablecore(char *p)
7472 {
7473         return cmdline_parse_core(p, &required_movablecore,
7474                                   &required_movablecore_percent);
7475 }
7476 
7477 early_param("kernelcore", cmdline_parse_kernelcore);
7478 early_param("movablecore", cmdline_parse_movablecore);
7479 
7480 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
7481 
7482 void adjust_managed_page_count(struct page *page, long count)
7483 {
7484         atomic_long_add(count, &page_zone(page)->managed_pages);
7485         totalram_pages_add(count);
7486 #ifdef CONFIG_HIGHMEM
7487         if (PageHighMem(page))
7488                 totalhigh_pages_add(count);
7489 #endif
7490 }
7491 EXPORT_SYMBOL(adjust_managed_page_count);
7492 
7493 unsigned long free_reserved_area(void *start, void *end, int poison, const char *s)
7494 {
7495         void *pos;
7496         unsigned long pages = 0;
7497 
7498         start = (void *)PAGE_ALIGN((unsigned long)start);
7499         end = (void *)((unsigned long)end & PAGE_MASK);
7500         for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
7501                 struct page *page = virt_to_page(pos);
7502                 void *direct_map_addr;
7503 
7504                 /*
7505                  * 'direct_map_addr' might be different from 'pos'
7506                  * because some architectures' virt_to_page()
7507                  * work with aliases.  Getting the direct map
7508                  * address ensures that we get a _writeable_
7509                  * alias for the memset().
7510                  */
7511                 direct_map_addr = page_address(page);
7512                 if ((unsigned int)poison <= 0xFF)
7513                         memset(direct_map_addr, poison, PAGE_SIZE);
7514 
7515                 free_reserved_page(page);
7516         }
7517 
7518         if (pages && s)
7519                 pr_info("Freeing %s memory: %ldK\n",
7520                         s, pages << (PAGE_SHIFT - 10));
7521 
7522         return pages;
7523 }
7524 
7525 #ifdef  CONFIG_HIGHMEM
7526 void free_highmem_page(struct page *page)
7527 {
7528         __free_reserved_page(page);
7529         totalram_pages_inc();
7530         atomic_long_inc(&page_zone(page)->managed_pages);
7531         totalhigh_pages_inc();
7532 }
7533 #endif
7534 
7535 
7536 void __init mem_init_print_info(const char *str)
7537 {
7538         unsigned long physpages, codesize, datasize, rosize, bss_size;
7539         unsigned long init_code_size, init_data_size;
7540 
7541         physpages = get_num_physpages();
7542         codesize = _etext - _stext;
7543         datasize = _edata - _sdata;
7544         rosize = __end_rodata - __start_rodata;
7545         bss_size = __bss_stop - __bss_start;
7546         init_data_size = __init_end - __init_begin;
7547         init_code_size = _einittext - _sinittext;
7548 
7549         /*
7550          * Detect special cases and adjust section sizes accordingly:
7551          * 1) .init.* may be embedded into .data sections
7552          * 2) .init.text.* may be out of [__init_begin, __init_end],
7553          *    please refer to arch/tile/kernel/vmlinux.lds.S.
7554          * 3) .rodata.* may be embedded into .text or .data sections.
7555          */
7556 #define adj_init_size(start, end, size, pos, adj) \
7557         do { \
7558                 if (start <= pos && pos < end && size > adj) \
7559                         size -= adj; \
7560         } while (0)
7561 
7562         adj_init_size(__init_begin, __init_end, init_data_size,
7563                      _sinittext, init_code_size);
7564         adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size);
7565         adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size);
7566         adj_init_size(_stext, _etext, codesize, __start_rodata, rosize);
7567         adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize);
7568 
7569 #undef  adj_init_size
7570 
7571         pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved"
7572 #ifdef  CONFIG_HIGHMEM
7573                 ", %luK highmem"
7574 #endif
7575                 "%s%s)\n",
7576                 nr_free_pages() << (PAGE_SHIFT - 10),
7577                 physpages << (PAGE_SHIFT - 10),
7578                 codesize >> 10, datasize >> 10, rosize >> 10,
7579                 (init_data_size + init_code_size) >> 10, bss_size >> 10,
7580                 (physpages - totalram_pages() - totalcma_pages) << (PAGE_SHIFT - 10),
7581                 totalcma_pages << (PAGE_SHIFT - 10),
7582 #ifdef  CONFIG_HIGHMEM
7583                 totalhigh_pages() << (PAGE_SHIFT - 10),
7584 #endif
7585                 str ? ", " : "", str ? str : "");
7586 }
7587 
7588 /**
7589  * set_dma_reserve - set the specified number of pages reserved in the first zone
7590  * @new_dma_reserve: The number of pages to mark reserved
7591  *
7592  * The per-cpu batchsize and zone watermarks are determined by managed_pages.
7593  * In the DMA zone, a significant percentage may be consumed by kernel image
7594  * and other unfreeable allocations which can skew the watermarks badly. This
7595  * function may optionally be used to account for unfreeable pages in the
7596  * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
7597  * smaller per-cpu batchsize.
7598  */
7599 void __init set_dma_reserve(unsigned long new_dma_reserve)
7600 {
7601         dma_reserve = new_dma_reserve;
7602 }
7603 
7604 void __init free_area_init(unsigned long *zones_size)
7605 {
7606         zero_resv_unavail();
7607         free_area_init_node(0, zones_size,
7608                         __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
7609 }
7610 
7611 static int page_alloc_cpu_dead(unsigned int cpu)
7612 {
7613 
7614         lru_add_drain_cpu(cpu);
7615         drain_pages(cpu);
7616 
7617         /*
7618          * Spill the event counters of the dead processor
7619          * into the current processors event counters.
7620          * This artificially elevates the count of the current
7621          * processor.
7622          */
7623         vm_events_fold_cpu(cpu);
7624 
7625         /*
7626          * Zero the differential counters of the dead processor
7627          * so that the vm statistics are consistent.
7628          *
7629          * This is only okay since the processor is dead and cannot
7630          * race with what we are doing.
7631          */
7632         cpu_vm_stats_fold(cpu);
7633         return 0;
7634 }
7635 
7636 #ifdef CONFIG_NUMA
7637 int hashdist = HASHDIST_DEFAULT;
7638 
7639 static int __init set_hashdist(char *str)
7640 {
7641         if (!str)
7642                 return 0;
7643         hashdist = simple_strtoul(str, &str, 0);
7644         return 1;
7645 }
7646 __setup("hashdist=", set_hashdist);
7647 #endif
7648 
7649 void __init page_alloc_init(void)
7650 {
7651         int ret;
7652 
7653 #ifdef CONFIG_NUMA
7654         if (num_node_state(N_MEMORY) == 1)
7655                 hashdist = 0;
7656 #endif
7657 
7658         ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC_DEAD,
7659                                         "mm/page_alloc:dead", NULL,
7660                                         page_alloc_cpu_dead);
7661         WARN_ON(ret < 0);
7662 }
7663 
7664 /*
7665  * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio
7666  *      or min_free_kbytes changes.
7667  */
7668 static void calculate_totalreserve_pages(void)
7669 {
7670         struct pglist_data *pgdat;
7671         unsigned long reserve_pages = 0;
7672         enum zone_type i, j;
7673 
7674         for_each_online_pgdat(pgdat) {
7675 
7676                 pgdat->totalreserve_pages = 0;
7677 
7678                 for (i = 0; i < MAX_NR_ZONES; i++) {
7679                         struct zone *zone = pgdat->node_zones + i;
7680                         long max = 0;
7681                         unsigned long managed_pages = zone_managed_pages(zone);
7682 
7683                         /* Find valid and maximum lowmem_reserve in the zone */
7684                         for (j = i; j < MAX_NR_ZONES; j++) {
7685                                 if (zone->lowmem_reserve[j] > max)
7686                                         max = zone->lowmem_reserve[j];
7687                         }
7688 
7689                         /* we treat the high watermark as reserved pages. */
7690                         max += high_wmark_pages(zone);
7691 
7692                         if (max > managed_pages)
7693                                 max = managed_pages;
7694 
7695                         pgdat->totalreserve_pages += max;
7696 
7697                         reserve_pages += max;
7698                 }
7699         }
7700         totalreserve_pages = reserve_pages;
7701 }
7702 
7703 /*
7704  * setup_per_zone_lowmem_reserve - called whenever
7705  *      sysctl_lowmem_reserve_ratio changes.  Ensures that each zone
7706  *      has a correct pages reserved value, so an adequate number of
7707  *      pages are left in the zone after a successful __alloc_pages().
7708  */
7709 static void setup_per_zone_lowmem_reserve(void)
7710 {
7711         struct pglist_data *pgdat;
7712         enum zone_type j, idx;
7713 
7714         for_each_online_pgdat(pgdat) {
7715                 for (j = 0; j < MAX_NR_ZONES; j++) {
7716                         struct zone *zone = pgdat->node_zones + j;
7717                         unsigned long managed_pages = zone_managed_pages(zone);
7718 
7719                         zone->lowmem_reserve[j] = 0;
7720 
7721                         idx = j;
7722                         while (idx) {
7723                                 struct zone *lower_zone;
7724 
7725                                 idx--;
7726                                 lower_zone = pgdat->node_zones + idx;
7727 
7728                                 if (sysctl_lowmem_reserve_ratio[idx] < 1) {
7729                                         sysctl_lowmem_reserve_ratio[idx] = 0;
7730                                         lower_zone->lowmem_reserve[j] = 0;
7731                                 } else {
7732                                         lower_zone->lowmem_reserve[j] =
7733                                                 managed_pages / sysctl_lowmem_reserve_ratio[idx];
7734                                 }
7735                                 managed_pages += zone_managed_pages(lower_zone);
7736                         }
7737                 }
7738         }
7739 
7740         /* update totalreserve_pages */
7741         calculate_totalreserve_pages();
7742 }
7743 
7744 static void __setup_per_zone_wmarks(void)
7745 {
7746         unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
7747         unsigned long lowmem_pages = 0;
7748         struct zone *zone;
7749         unsigned long flags;
7750 
7751         /* Calculate total number of !ZONE_HIGHMEM pages */
7752         for_each_zone(zone) {
7753                 if (!is_highmem(zone))
7754                         lowmem_pages += zone_managed_pages(zone);
7755         }
7756 
7757         for_each_zone(zone) {
7758                 u64 tmp;
7759 
7760                 spin_lock_irqsave(&zone->lock, flags);
7761                 tmp = (u64)pages_min * zone_managed_pages(zone);
7762                 do_div(tmp, lowmem_pages);
7763                 if (is_highmem(zone)) {
7764                         /*
7765                          * __GFP_HIGH and PF_MEMALLOC allocations usually don't
7766                          * need highmem pages, so cap pages_min to a small
7767                          * value here.
7768                          *
7769                          * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
7770                          * deltas control async page reclaim, and so should
7771                          * not be capped for highmem.
7772                          */
7773                         unsigned long min_pages;
7774 
7775                         min_pages = zone_managed_pages(zone) / 1024;
7776                         min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
7777                         zone->_watermark[WMARK_MIN] = min_pages;
7778                 } else {
7779                         /*
7780                          * If it's a lowmem zone, reserve a number of pages
7781                          * proportionate to the zone's size.
7782                          */
7783                         zone->_watermark[WMARK_MIN] = tmp;
7784                 }
7785 
7786                 /*
7787                  * Set the kswapd watermarks distance according to the
7788                  * scale factor in proportion to available memory, but
7789                  * ensure a minimum size on small systems.
7790                  */
7791                 tmp = max_t(u64, tmp >> 2,
7792                             mult_frac(zone_managed_pages(zone),
7793                                       watermark_scale_factor, 10000));
7794 
7795                 zone->_watermark[WMARK_LOW]  = min_wmark_pages(zone) + tmp;
7796                 zone->_watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2;
7797                 zone->watermark_boost = 0;
7798 
7799                 spin_unlock_irqrestore(&zone->lock, flags);
7800         }
7801 
7802         /* update totalreserve_pages */
7803         calculate_totalreserve_pages();
7804 }
7805 
7806 /**
7807  * setup_per_zone_wmarks - called when min_free_kbytes changes
7808  * or when memory is hot-{added|removed}
7809  *
7810  * Ensures that the watermark[min,low,high] values for each zone are set
7811  * correctly with respect to min_free_kbytes.
7812  */
7813 void setup_per_zone_wmarks(void)
7814 {
7815         static DEFINE_SPINLOCK(lock);
7816 
7817         spin_lock(&lock);
7818         __setup_per_zone_wmarks();
7819         spin_unlock(&lock);
7820 }
7821 
7822 /*
7823  * Initialise min_free_kbytes.
7824  *
7825  * For small machines we want it small (128k min).  For large machines
7826  * we want it large (64MB max).  But it is not linear, because network
7827  * bandwidth does not increase linearly with machine size.  We use
7828  *
7829  *      min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
7830  *      min_free_kbytes = sqrt(lowmem_kbytes * 16)
7831  *
7832  * which yields
7833  *
7834  * 16MB:        512k
7835  * 32MB:        724k
7836  * 64MB:        1024k
7837  * 128MB:       1448k
7838  * 256MB:       2048k
7839  * 512MB:       2896k
7840  * 1024MB:      4096k
7841  * 2048MB:      5792k
7842  * 4096MB:      8192k
7843  * 8192MB:      11584k
7844  * 16384MB:     16384k
7845  */
7846 int __meminit init_per_zone_wmark_min(void)
7847 {
7848         unsigned long lowmem_kbytes;
7849         int new_min_free_kbytes;
7850 
7851         lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
7852         new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
7853 
7854         if (new_min_free_kbytes > user_min_free_kbytes) {
7855                 min_free_kbytes = new_min_free_kbytes;
7856                 if (min_free_kbytes < 128)
7857                         min_free_kbytes = 128;
7858                 if (min_free_kbytes > 65536)
7859                         min_free_kbytes = 65536;
7860         } else {
7861                 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
7862                                 new_min_free_kbytes, user_min_free_kbytes);
7863         }
7864         setup_per_zone_wmarks();
7865         refresh_zone_stat_thresholds();
7866         setup_per_zone_lowmem_reserve();
7867 
7868 #ifdef CONFIG_NUMA
7869         setup_min_unmapped_ratio();
7870         setup_min_slab_ratio();
7871 #endif
7872 
7873         return 0;
7874 }
7875 core_initcall(init_per_zone_wmark_min)
7876 
7877 /*
7878  * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
7879  *      that we can call two helper functions whenever min_free_kbytes
7880  *      changes.
7881  */
7882 int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
7883         void __user *buffer, size_t *length, loff_t *ppos)
7884 {
7885         int rc;
7886 
7887         rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
7888         if (rc)
7889                 return rc;
7890 
7891         if (write) {
7892                 user_min_free_kbytes = min_free_kbytes;
7893                 setup_per_zone_wmarks();
7894         }
7895         return 0;
7896 }
7897 
7898 int watermark_boost_factor_sysctl_handler(struct ctl_table *table, int write,
7899         void __user *buffer, size_t *length, loff_t *ppos)
7900 {
7901         int rc;
7902 
7903         rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
7904         if (rc)
7905                 return rc;
7906 
7907         return 0;
7908 }
7909 
7910 int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write,
7911         void __user *buffer, size_t *length, loff_t *ppos)
7912 {
7913         int rc;
7914 
7915         rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
7916         if (rc)
7917                 return rc;
7918 
7919         if (write)
7920                 setup_per_zone_wmarks();
7921 
7922         return 0;
7923 }
7924 
7925 #ifdef CONFIG_NUMA
7926 static void setup_min_unmapped_ratio(void)
7927 {
7928         pg_data_t *pgdat;
7929         struct zone *zone;
7930 
7931         for_each_online_pgdat(pgdat)
7932                 pgdat->min_unmapped_pages = 0;
7933 
7934         for_each_zone(zone)
7935                 zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) *
7936                                                          sysctl_min_unmapped_ratio) / 100;
7937 }
7938 
7939 
7940 int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
7941         void __user *buffer, size_t *length, loff_t *ppos)
7942 {
7943         int rc;
7944 
7945         rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
7946         if (rc)
7947                 return rc;
7948 
7949         setup_min_unmapped_ratio();
7950 
7951         return 0;
7952 }
7953 
7954 static void setup_min_slab_ratio(void)
7955 {
7956         pg_data_t *pgdat;
7957         struct zone *zone;
7958 
7959         for_each_online_pgdat(pgdat)
7960                 pgdat->min_slab_pages = 0;
7961 
7962         for_each_zone(zone)
7963                 zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) *
7964                                                      sysctl_min_slab_ratio) / 100;
7965 }
7966 
7967 int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
7968         void __user *buffer, size_t *length, loff_t *ppos)
7969 {
7970         int rc;
7971 
7972         rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
7973         if (rc)
7974                 return rc;
7975 
7976         setup_min_slab_ratio();
7977 
7978         return 0;
7979 }
7980 #endif
7981 
7982 /*
7983  * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
7984  *      proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
7985  *      whenever sysctl_lowmem_reserve_ratio changes.
7986  *
7987  * The reserve ratio obviously has absolutely no relation with the
7988  * minimum watermarks. The lowmem reserve ratio can only make sense
7989  * if in function of the boot time zone sizes.
7990  */
7991 int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write,
7992         void __user *buffer, size_t *length, loff_t *ppos)
7993 {
7994         proc_dointvec_minmax(table, write, buffer, length, ppos);
7995         setup_per_zone_lowmem_reserve();
7996         return 0;
7997 }
7998 
7999 /*
8000  * percpu_pagelist_fraction - changes the pcp->high for each zone on each
8001  * cpu.  It is the fraction of total pages in each zone that a hot per cpu
8002  * pagelist can have before it gets flushed back to buddy allocator.
8003  */
8004 int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write,
8005         void __user *buffer, size_t *length, loff_t *ppos)
8006 {
8007         struct zone *zone;
8008         int old_percpu_pagelist_fraction;
8009         int ret;
8010 
8011         mutex_lock(&pcp_batch_high_lock);
8012         old_percpu_pagelist_fraction = percpu_pagelist_fraction;
8013 
8014         ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
8015         if (!write || ret < 0)
8016                 goto out;
8017 
8018         /* Sanity checking to avoid pcp imbalance */
8019         if (percpu_pagelist_fraction &&
8020             percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) {
8021                 percpu_pagelist_fraction = old_percpu_pagelist_fraction;
8022                 ret = -EINVAL;
8023                 goto out;
8024         }
8025 
8026         /* No change? */
8027         if (percpu_pagelist_fraction == old_percpu_pagelist_fraction)
8028                 goto out;
8029 
8030         for_each_populated_zone(zone) {
8031                 unsigned int cpu;
8032 
8033                 for_each_possible_cpu(cpu)
8034                         pageset_set_high_and_batch(zone,
8035                                         per_cpu_ptr(zone->pageset, cpu));
8036         }
8037 out:
8038         mutex_unlock(&pcp_batch_high_lock);
8039         return ret;
8040 }
8041 
8042 #ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES
8043 /*
8044  * Returns the number of pages that arch has reserved but
8045  * is not known to alloc_large_system_hash().
8046  */
8047 static unsigned long __init arch_reserved_kernel_pages(void)
8048 {
8049         return 0;
8050 }
8051 #endif
8052 
8053 /*
8054  * Adaptive scale is meant to reduce sizes of hash tables on large memory
8055  * machines. As memory size is increased the scale is also increased but at
8056  * slower pace.  Starting from ADAPT_SCALE_BASE (64G), every time memory
8057  * quadruples the scale is increased by one, which means the size of hash table
8058  * only doubles, instead of quadrupling as well.
8059  * Because 32-bit systems cannot have large physical memory, where this scaling
8060  * makes sense, it is disabled on such platforms.
8061  */
8062 #if __BITS_PER_LONG > 32
8063 #define ADAPT_SCALE_BASE        (64ul << 30)
8064 #define ADAPT_SCALE_SHIFT       2
8065 #define ADAPT_SCALE_NPAGES      (ADAPT_SCALE_BASE >> PAGE_SHIFT)
8066 #endif
8067 
8068 /*
8069  * allocate a large system hash table from bootmem
8070  * - it is assumed that the hash table must contain an exact power-of-2
8071  *   quantity of entries
8072  * - limit is the number of hash buckets, not the total allocation size
8073  */
8074 void *__init alloc_large_system_hash(const char *tablename,
8075                                      unsigned long bucketsize,
8076                                      unsigned long numentries,
8077                                      int scale,
8078                                      int flags,
8079                                      unsigned int *_hash_shift,
8080                                      unsigned int *_hash_mask,
8081                                      unsigned long low_limit,
8082                                      unsigned long high_limit)
8083 {
8084         unsigned long long max = high_limit;
8085         unsigned long log2qty, size;
8086         void *table = NULL;
8087         gfp_t gfp_flags;
8088         bool virt;
8089 
8090         /* allow the kernel cmdline to have a say */
8091         if (!numentries) {
8092                 /* round applicable memory size up to nearest megabyte */
8093                 numentries = nr_kernel_pages;
8094                 numentries -= arch_reserved_kernel_pages();
8095 
8096                 /* It isn't necessary when PAGE_SIZE >= 1MB */
8097                 if (PAGE_SHIFT < 20)
8098                         numentries = round_up(numentries, (1<<20)/PAGE_SIZE);
8099 
8100 #if __BITS_PER_LONG > 32
8101                 if (!high_limit) {
8102                         unsigned long adapt;
8103 
8104                         for (adapt = ADAPT_SCALE_NPAGES; adapt < numentries;
8105                              adapt <<= ADAPT_SCALE_SHIFT)
8106                                 scale++;
8107                 }
8108 #endif
8109 
8110                 /* limit to 1 bucket per 2^scale bytes of low memory */
8111                 if (scale > PAGE_SHIFT)
8112                         numentries >>= (scale - PAGE_SHIFT);
8113                 else
8114                         numentries <<= (PAGE_SHIFT - scale);
8115 
8116                 /* Make sure we've got at least a 0-order allocation.. */
8117                 if (unlikely(flags & HASH_SMALL)) {
8118                         /* Makes no sense without HASH_EARLY */
8119                         WARN_ON(!(flags & HASH_EARLY));
8120                         if (!(numentries >> *_hash_shift)) {
8121                                 numentries = 1UL << *_hash_shift;
8122                                 BUG_ON(!numentries);
8123                         }
8124                 } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
8125                         numentries = PAGE_SIZE / bucketsize;
8126         }
8127         numentries = roundup_pow_of_two(numentries);
8128 
8129         /* limit allocation size to 1/16 total memory by default */
8130         if (max == 0) {
8131                 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
8132                 do_div(max, bucketsize);
8133         }
8134         max = min(max, 0x80000000ULL);
8135 
8136         if (numentries < low_limit)
8137                 numentries = low_limit;
8138         if (numentries > max)
8139                 numentries = max;
8140 
8141         log2qty = ilog2(numentries);
8142 
8143         gfp_flags = (flags & HASH_ZERO) ? GFP_ATOMIC | __GFP_ZERO : GFP_ATOMIC;
8144         do {
8145                 virt = false;
8146                 size = bucketsize << log2qty;
8147                 if (flags & HASH_EARLY) {
8148                         if (flags & HASH_ZERO)
8149                                 table = memblock_alloc(size, SMP_CACHE_BYTES);
8150                         else
8151                                 table = memblock_alloc_raw(size,
8152                                                            SMP_CACHE_BYTES);
8153                 } else if (get_order(size) >= MAX_ORDER || hashdist) {
8154                         table = __vmalloc(size, gfp_flags, PAGE_KERNEL);
8155                         virt = true;
8156                 } else {
8157                         /*
8158                          * If bucketsize is not a power-of-two, we may free
8159                          * some pages at the end of hash table which
8160                          * alloc_pages_exact() automatically does
8161                          */
8162                         table = alloc_pages_exact(size, gfp_flags);
8163                         kmemleak_alloc(table, size, 1, gfp_flags);
8164                 }
8165         } while (!table && size > PAGE_SIZE && --log2qty);
8166 
8167         if (!table)
8168                 panic("Failed to allocate %s hash table\n", tablename);
8169 
8170         pr_info("%s hash table entries: %ld (order: %d, %lu bytes, %s)\n",
8171                 tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size,
8172                 virt ? "vmalloc" : "linear");
8173 
8174         if (_hash_shift)
8175                 *_hash_shift = log2qty;
8176         if (_hash_mask)
8177                 *_hash_mask = (1 << log2qty) - 1;
8178 
8179         return table;
8180 }
8181 
8182 /*
8183  * This function checks whether pageblock includes unmovable pages or not.
8184  * If @count is not zero, it is okay to include less @count unmovable pages
8185  *
8186  * PageLRU check without isolation or lru_lock could race so that
8187  * MIGRATE_MOVABLE block might include unmovable pages. And __PageMovable
8188  * check without lock_page also may miss some movable non-lru pages at
8189  * race condition. So you can't expect this function should be exact.
8190  */
8191 bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
8192                          int migratetype, int flags)
8193 {
8194         unsigned long found;
8195         unsigned long iter = 0;
8196         unsigned long pfn = page_to_pfn(page);
8197         const char *reason = "unmovable page";
8198 
8199         /*
8200          * TODO we could make this much more efficient by not checking every
8201          * page in the range if we know all of them are in MOVABLE_ZONE and
8202          * that the movable zone guarantees that pages are migratable but
8203          * the later is not the case right now unfortunatelly. E.g. movablecore
8204          * can still lead to having bootmem allocations in zone_movable.
8205          */
8206 
8207         if (is_migrate_cma_page(page)) {
8208                 /*
8209                  * CMA allocations (alloc_contig_range) really need to mark
8210                  * isolate CMA pageblocks even when they are not movable in fact
8211                  * so consider them movable here.
8212                  */
8213                 if (is_migrate_cma(migratetype))
8214                         return false;
8215 
8216                 reason = "CMA page";
8217                 goto unmovable;
8218         }
8219 
8220         for (found = 0; iter < pageblock_nr_pages; iter++) {
8221                 unsigned long check = pfn + iter;
8222 
8223                 if (!pfn_valid_within(check))
8224                         continue;
8225 
8226                 page = pfn_to_page(check);
8227 
8228                 if (PageReserved(page))
8229                         goto unmovable;
8230 
8231                 /*
8232                  * If the zone is movable and we have ruled out all reserved
8233                  * pages then it should be reasonably safe to assume the rest
8234                  * is movable.
8235                  */
8236                 if (zone_idx(zone) == ZONE_MOVABLE)
8237                         continue;
8238 
8239                 /*
8240                  * Hugepages are not in LRU lists, but they're movable.
8241                  * We need not scan over tail pages because we don't
8242                  * handle each tail page individually in migration.
8243                  */
8244                 if (PageHuge(page)) {
8245                         struct page *head = compound_head(page);
8246                         unsigned int skip_pages;
8247 
8248                         if (!hugepage_migration_supported(page_hstate(head)))
8249                                 goto unmovable;
8250 
8251                         skip_pages = compound_nr(head) - (page - head);
8252                         iter += skip_pages - 1;
8253                         continue;
8254                 }
8255 
8256                 /*
8257                  * We can't use page_count without pin a page
8258                  * because another CPU can free compound page.
8259                  * This check already skips compound tails of THP
8260                  * because their page->_refcount is zero at all time.
8261                  */
8262                 if (!page_ref_count(page)) {
8263                         if (PageBuddy(page))
8264                                 iter += (1 << page_order(page)) - 1;
8265                         continue;
8266                 }
8267 
8268                 /*
8269                  * The HWPoisoned page may be not in buddy system, and
8270                  * page_count() is not 0.
8271                  */
8272                 if ((flags & SKIP_HWPOISON) && PageHWPoison(page))
8273                         continue;
8274 
8275                 if (__PageMovable(page))
8276                         continue;
8277 
8278                 if (!PageLRU(page))
8279                         found++;
8280                 /*
8281                  * If there are RECLAIMABLE pages, we need to check
8282                  * it.  But now, memory offline itself doesn't call
8283                  * shrink_node_slabs() and it still to be fixed.
8284                  */
8285                 /*
8286                  * If the page is not RAM, page_count()should be 0.
8287                  * we don't need more check. This is an _used_ not-movable page.
8288                  *
8289                  * The problematic thing here is PG_reserved pages. PG_reserved
8290                  * is set to both of a memory hole page and a _used_ kernel
8291                  * page at boot.
8292                  */
8293                 if (found > count)
8294                         goto unmovable;
8295         }
8296         return false;
8297 unmovable:
8298         WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE);
8299         if (flags & REPORT_FAILURE)
8300                 dump_page(pfn_to_page(pfn + iter), reason);
8301         return true;
8302 }
8303 
8304 #ifdef CONFIG_CONTIG_ALLOC
8305 static unsigned long pfn_max_align_down(unsigned long pfn)
8306 {
8307         return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
8308                              pageblock_nr_pages) - 1);
8309 }
8310 
8311 static unsigned long pfn_max_align_up(unsigned long pfn)
8312 {
8313         return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
8314                                 pageblock_nr_pages));
8315 }
8316 
8317 /* [start, end) must belong to a single zone. */
8318 static int __alloc_contig_migrate_range(struct compact_control *cc,
8319                                         unsigned long start, unsigned long end)
8320 {
8321         /* This function is based on compact_zone() from compaction.c. */
8322         unsigned long nr_reclaimed;
8323         unsigned long pfn = start;
8324         unsigned int tries = 0;
8325         int ret = 0;
8326 
8327         migrate_prep();
8328 
8329         while (pfn < end || !list_empty(&cc->migratepages)) {
8330                 if (fatal_signal_pending(current)) {
8331                         ret = -EINTR;
8332                         break;
8333                 }
8334 
8335                 if (list_empty(&cc->migratepages)) {
8336                         cc->nr_migratepages = 0;
8337                         pfn = isolate_migratepages_range(cc, pfn, end);
8338                         if (!pfn) {
8339                                 ret = -EINTR;
8340                                 break;
8341                         }
8342                         tries = 0;
8343                 } else if (++tries == 5) {
8344                         ret = ret < 0 ? ret : -EBUSY;
8345                         break;
8346                 }
8347 
8348                 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
8349                                                         &cc->migratepages);
8350                 cc->nr_migratepages -= nr_reclaimed;
8351 
8352                 ret = migrate_pages(&cc->migratepages, alloc_migrate_target,
8353                                     NULL, 0, cc->mode, MR_CONTIG_RANGE);
8354         }
8355         if (ret < 0) {
8356                 putback_movable_pages(&cc->migratepages);
8357                 return ret;
8358         }
8359         return 0;
8360 }
8361 
8362 /**
8363  * alloc_contig_range() -- tries to allocate given range of pages
8364  * @start:      start PFN to allocate
8365  * @end:        one-past-the-last PFN to allocate
8366  * @migratetype:        migratetype of the underlaying pageblocks (either
8367  *                      #MIGRATE_MOVABLE or #MIGRATE_CMA).  All pageblocks
8368  *                      in range must have the same migratetype and it must
8369  *                      be either of the two.
8370  * @gfp_mask:   GFP mask to use during compaction
8371  *
8372  * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
8373  * aligned.  The PFN range must belong to a single zone.
8374  *
8375  * The first thing this routine does is attempt to MIGRATE_ISOLATE all
8376  * pageblocks in the range.  Once isolated, the pageblocks should not
8377  * be modified by others.
8378  *
8379  * Return: zero on success or negative error code.  On success all
8380  * pages which PFN is in [start, end) are allocated for the caller and
8381  * need to be freed with free_contig_range().
8382  */
8383 int alloc_contig_range(unsigned long start, unsigned long end,
8384                        unsigned migratetype, gfp_t gfp_mask)
8385 {
8386         unsigned long outer_start, outer_end;
8387         unsigned int order;
8388         int ret = 0;
8389 
8390         struct compact_control cc = {
8391                 .nr_migratepages = 0,
8392                 .order = -1,
8393                 .zone = page_zone(pfn_to_page(start)),
8394                 .mode = MIGRATE_SYNC,
8395                 .ignore_skip_hint = true,
8396                 .no_set_skip_hint = true,
8397                 .gfp_mask = current_gfp_context(gfp_mask),
8398         };
8399         INIT_LIST_HEAD(&cc.migratepages);
8400 
8401         /*
8402          * What we do here is we mark all pageblocks in range as
8403          * MIGRATE_ISOLATE.  Because pageblock and max order pages may
8404          * have different sizes, and due to the way page allocator
8405          * work, we align the range to biggest of the two pages so
8406          * that page allocator won't try to merge buddies from
8407          * different pageblocks and change MIGRATE_ISOLATE to some
8408          * other migration type.
8409          *
8410          * Once the pageblocks are marked as MIGRATE_ISOLATE, we
8411          * migrate the pages from an unaligned range (ie. pages that
8412          * we are interested in).  This will put all the pages in
8413          * range back to page allocator as MIGRATE_ISOLATE.
8414          *
8415          * When this is done, we take the pages in range from page
8416          * allocator removing them from the buddy system.  This way
8417          * page allocator will never consider using them.
8418          *
8419          * This lets us mark the pageblocks back as
8420          * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
8421          * aligned range but not in the unaligned, original range are
8422          * put back to page allocator so that buddy can use them.
8423          */
8424 
8425         ret = start_isolate_page_range(pfn_max_align_down(start),
8426                                        pfn_max_align_up(end), migratetype, 0);
8427         if (ret < 0)
8428                 return ret;
8429 
8430         /*
8431          * In case of -EBUSY, we'd like to know which page causes problem.
8432          * So, just fall through. test_pages_isolated() has a tracepoint
8433          * which will report the busy page.
8434          *
8435          * It is possible that busy pages could become available before
8436          * the call to test_pages_isolated, and the range will actually be
8437          * allocated.  So, if we fall through be sure to clear ret so that
8438          * -EBUSY is not accidentally used or returned to caller.
8439          */
8440         ret = __alloc_contig_migrate_range(&cc, start, end);
8441         if (ret && ret != -EBUSY)
8442                 goto done;
8443         ret =0;
8444 
8445         /*
8446          * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
8447          * aligned blocks that are marked as MIGRATE_ISOLATE.  What's
8448          * more, all pages in [start, end) are free in page allocator.
8449          * What we are going to do is to allocate all pages from
8450          * [start, end) (that is remove them from page allocator).
8451          *
8452          * The only problem is that pages at the beginning and at the
8453          * end of interesting range may be not aligned with pages that
8454          * page allocator holds, ie. they can be part of higher order
8455          * pages.  Because of this, we reserve the bigger range and
8456          * once this is done free the pages we are not interested in.
8457          *
8458          * We don't have to hold zone->lock here because the pages are
8459          * isolated thus they won't get removed from buddy.
8460          */
8461 
8462         lru_add_drain_all();
8463 
8464         order = 0;
8465         outer_start = start;
8466         while (!PageBuddy(pfn_to_page(outer_start))) {
8467                 if (++order >= MAX_ORDER) {
8468                         outer_start = start;
8469                         break;
8470                 }
8471                 outer_start &= ~0UL << order;
8472         }
8473 
8474         if (outer_start != start) {
8475                 order = page_order(pfn_to_page(outer_start));
8476 
8477                 /*
8478                  * outer_start page could be small order buddy page and
8479                  * it doesn't include start page. Adjust outer_start
8480                  * in this case to report failed page properly
8481                  * on tracepoint in test_pages_isolated()
8482                  */
8483                 if (outer_start + (1UL << order) <= start)
8484                         outer_start = start;
8485         }
8486 
8487         /* Make sure the range is really isolated. */
8488         if (test_pages_isolated(outer_start, end, false)) {
8489                 pr_info_ratelimited("%s: [%lx, %lx) PFNs busy\n",
8490                         __func__, outer_start, end);
8491                 ret = -EBUSY;
8492                 goto done;
8493         }
8494 
8495         /* Grab isolated pages from freelists. */
8496         outer_end = isolate_freepages_range(&cc, outer_start, end);
8497         if (!outer_end) {
8498                 ret = -EBUSY;
8499                 goto done;
8500         }
8501 
8502         /* Free head and tail (if any) */
8503         if (start != outer_start)
8504                 free_contig_range(outer_start, start - outer_start);
8505         if (end != outer_end)
8506                 free_contig_range(end, outer_end - end);
8507 
8508 done:
8509         undo_isolate_page_range(pfn_max_align_down(start),
8510                                 pfn_max_align_up(end), migratetype);
8511         return ret;
8512 }
8513 #endif /* CONFIG_CONTIG_ALLOC */
8514 
8515 void free_contig_range(unsigned long pfn, unsigned int nr_pages)
8516 {
8517         unsigned int count = 0;
8518 
8519         for (; nr_pages--; pfn++) {
8520                 struct page *page = pfn_to_page(pfn);
8521 
8522                 count += page_count(page) != 1;
8523                 __free_page(page);
8524         }
8525         WARN(count != 0, "%d pages are still in use!\n", count);
8526 }
8527 
8528 /*
8529  * The zone indicated has a new number of managed_pages; batch sizes and percpu
8530  * page high values need to be recalulated.
8531  */
8532 void __meminit zone_pcp_update(struct zone *zone)
8533 {
8534         unsigned cpu;
8535         mutex_lock(&pcp_batch_high_lock);
8536         for_each_possible_cpu(cpu)
8537                 pageset_set_high_and_batch(zone,
8538                                 per_cpu_ptr(zone->pageset, cpu));
8539         mutex_unlock(&pcp_batch_high_lock);
8540 }
8541 
8542 void zone_pcp_reset(struct zone *zone)
8543 {
8544         unsigned long flags;
8545         int cpu;
8546         struct per_cpu_pageset *pset;
8547 
8548         /* avoid races with drain_pages()  */
8549         local_irq_save(flags);
8550         if (zone->pageset != &boot_pageset) {
8551                 for_each_online_cpu(cpu) {
8552                         pset = per_cpu_ptr(zone->pageset, cpu);
8553                         drain_zonestat(zone, pset);
8554                 }
8555                 free_percpu(zone->pageset);
8556                 zone->pageset = &boot_pageset;
8557         }
8558         local_irq_restore(flags);
8559 }
8560 
8561 #ifdef CONFIG_MEMORY_HOTREMOVE
8562 /*
8563  * All pages in the range must be in a single zone and isolated
8564  * before calling this.
8565  */
8566 unsigned long
8567 __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
8568 {
8569         struct page *page;
8570         struct zone *zone;
8571         unsigned int order, i;
8572         unsigned long pfn;
8573         unsigned long flags;
8574         unsigned long offlined_pages = 0;
8575 
8576         /* find the first valid pfn */
8577         for (pfn = start_pfn; pfn < end_pfn; pfn++)
8578                 if (pfn_valid(pfn))
8579                         break;
8580         if (pfn == end_pfn)
8581                 return offlined_pages;
8582 
8583         offline_mem_sections(pfn, end_pfn);
8584         zone = page_zone(pfn_to_page(pfn));
8585         spin_lock_irqsave(&zone->lock, flags);
8586         pfn = start_pfn;
8587         while (pfn < end_pfn) {
8588                 if (!pfn_valid(pfn)) {
8589                         pfn++;
8590                         continue;
8591                 }
8592                 page = pfn_to_page(pfn);
8593                 /*
8594                  * The HWPoisoned page may be not in buddy system, and
8595                  * page_count() is not 0.
8596                  */
8597                 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
8598                         pfn++;
8599                         SetPageReserved(page);
8600                         offlined_pages++;
8601                         continue;
8602                 }
8603 
8604                 BUG_ON(page_count(page));
8605                 BUG_ON(!PageBuddy(page));
8606                 order = page_order(page);
8607                 offlined_pages += 1 << order;
8608 #ifdef CONFIG_DEBUG_VM
8609                 pr_info("remove from free list %lx %d %lx\n",
8610                         pfn, 1 << order, end_pfn);
8611 #endif
8612                 del_page_from_free_area(page, &zone->free_area[order]);
8613                 for (i = 0; i < (1 << order); i++)
8614                         SetPageReserved((page+i));
8615                 pfn += (1 << order);
8616         }
8617         spin_unlock_irqrestore(&zone->lock, flags);
8618 
8619         return offlined_pages;
8620 }
8621 #endif
8622 
8623 bool is_free_buddy_page(struct page *page)
8624 {
8625         struct zone *zone = page_zone(page);
8626         unsigned long pfn = page_to_pfn(page);
8627         unsigned long flags;
8628         unsigned int order;
8629 
8630         spin_lock_irqsave(&zone->lock, flags);
8631         for (order = 0; order < MAX_ORDER; order++) {
8632                 struct page *page_head = page - (pfn & ((1 << order) - 1));
8633 
8634                 if (PageBuddy(page_head) && page_order(page_head) >= order)
8635                         break;
8636         }
8637         spin_unlock_irqrestore(&zone->lock, flags);
8638 
8639         return order < MAX_ORDER;
8640 }
8641 
8642 #ifdef CONFIG_MEMORY_FAILURE
8643 /*
8644  * Set PG_hwpoison flag if a given page is confirmed to be a free page.  This
8645  * test is performed under the zone lock to prevent a race against page
8646  * allocation.
8647  */
8648 bool set_hwpoison_free_buddy_page(struct page *page)
8649 {
8650         struct zone *zone = page_zone(page);
8651         unsigned long pfn = page_to_pfn(page);
8652         unsigned long flags;
8653         unsigned int order;
8654         bool hwpoisoned = false;
8655 
8656         spin_lock_irqsave(&zone->lock, flags);
8657         for (order = 0; order < MAX_ORDER; order++) {
8658                 struct page *page_head = page - (pfn & ((1 << order) - 1));
8659 
8660                 if (PageBuddy(page_head) && page_order(page_head) >= order) {
8661                         if (!TestSetPageHWPoison(page))
8662                                 hwpoisoned = true;
8663                         break;
8664                 }
8665         }
8666         spin_unlock_irqrestore(&zone->lock, flags);
8667 
8668         return hwpoisoned;
8669 }
8670 #endif

/* [<][>][^][v][top][bottom][index][help] */