root/mm/page_isolation.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. set_migratetype_isolate
  2. unset_migratetype_isolate
  3. __first_valid_page
  4. start_isolate_page_range
  5. undo_isolate_page_range
  6. __test_page_isolated_in_pageblock
  7. test_pages_isolated
  8. alloc_migrate_target

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * linux/mm/page_isolation.c
   4  */
   5 
   6 #include <linux/mm.h>
   7 #include <linux/page-isolation.h>
   8 #include <linux/pageblock-flags.h>
   9 #include <linux/memory.h>
  10 #include <linux/hugetlb.h>
  11 #include <linux/page_owner.h>
  12 #include <linux/migrate.h>
  13 #include "internal.h"
  14 
  15 #define CREATE_TRACE_POINTS
  16 #include <trace/events/page_isolation.h>
  17 
  18 static int set_migratetype_isolate(struct page *page, int migratetype, int isol_flags)
  19 {
  20         struct zone *zone;
  21         unsigned long flags, pfn;
  22         struct memory_isolate_notify arg;
  23         int notifier_ret;
  24         int ret = -EBUSY;
  25 
  26         zone = page_zone(page);
  27 
  28         spin_lock_irqsave(&zone->lock, flags);
  29 
  30         /*
  31          * We assume the caller intended to SET migrate type to isolate.
  32          * If it is already set, then someone else must have raced and
  33          * set it before us.  Return -EBUSY
  34          */
  35         if (is_migrate_isolate_page(page))
  36                 goto out;
  37 
  38         pfn = page_to_pfn(page);
  39         arg.start_pfn = pfn;
  40         arg.nr_pages = pageblock_nr_pages;
  41         arg.pages_found = 0;
  42 
  43         /*
  44          * It may be possible to isolate a pageblock even if the
  45          * migratetype is not MIGRATE_MOVABLE. The memory isolation
  46          * notifier chain is used by balloon drivers to return the
  47          * number of pages in a range that are held by the balloon
  48          * driver to shrink memory. If all the pages are accounted for
  49          * by balloons, are free, or on the LRU, isolation can continue.
  50          * Later, for example, when memory hotplug notifier runs, these
  51          * pages reported as "can be isolated" should be isolated(freed)
  52          * by the balloon driver through the memory notifier chain.
  53          */
  54         notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
  55         notifier_ret = notifier_to_errno(notifier_ret);
  56         if (notifier_ret)
  57                 goto out;
  58         /*
  59          * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
  60          * We just check MOVABLE pages.
  61          */
  62         if (!has_unmovable_pages(zone, page, arg.pages_found, migratetype,
  63                                  isol_flags))
  64                 ret = 0;
  65 
  66         /*
  67          * immobile means "not-on-lru" pages. If immobile is larger than
  68          * removable-by-driver pages reported by notifier, we'll fail.
  69          */
  70 
  71 out:
  72         if (!ret) {
  73                 unsigned long nr_pages;
  74                 int mt = get_pageblock_migratetype(page);
  75 
  76                 set_pageblock_migratetype(page, MIGRATE_ISOLATE);
  77                 zone->nr_isolate_pageblock++;
  78                 nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE,
  79                                                                         NULL);
  80 
  81                 __mod_zone_freepage_state(zone, -nr_pages, mt);
  82         }
  83 
  84         spin_unlock_irqrestore(&zone->lock, flags);
  85         if (!ret)
  86                 drain_all_pages(zone);
  87         return ret;
  88 }
  89 
  90 static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
  91 {
  92         struct zone *zone;
  93         unsigned long flags, nr_pages;
  94         bool isolated_page = false;
  95         unsigned int order;
  96         unsigned long pfn, buddy_pfn;
  97         struct page *buddy;
  98 
  99         zone = page_zone(page);
 100         spin_lock_irqsave(&zone->lock, flags);
 101         if (!is_migrate_isolate_page(page))
 102                 goto out;
 103 
 104         /*
 105          * Because freepage with more than pageblock_order on isolated
 106          * pageblock is restricted to merge due to freepage counting problem,
 107          * it is possible that there is free buddy page.
 108          * move_freepages_block() doesn't care of merge so we need other
 109          * approach in order to merge them. Isolation and free will make
 110          * these pages to be merged.
 111          */
 112         if (PageBuddy(page)) {
 113                 order = page_order(page);
 114                 if (order >= pageblock_order) {
 115                         pfn = page_to_pfn(page);
 116                         buddy_pfn = __find_buddy_pfn(pfn, order);
 117                         buddy = page + (buddy_pfn - pfn);
 118 
 119                         if (pfn_valid_within(buddy_pfn) &&
 120                             !is_migrate_isolate_page(buddy)) {
 121                                 __isolate_free_page(page, order);
 122                                 isolated_page = true;
 123                         }
 124                 }
 125         }
 126 
 127         /*
 128          * If we isolate freepage with more than pageblock_order, there
 129          * should be no freepage in the range, so we could avoid costly
 130          * pageblock scanning for freepage moving.
 131          */
 132         if (!isolated_page) {
 133                 nr_pages = move_freepages_block(zone, page, migratetype, NULL);
 134                 __mod_zone_freepage_state(zone, nr_pages, migratetype);
 135         }
 136         set_pageblock_migratetype(page, migratetype);
 137         zone->nr_isolate_pageblock--;
 138 out:
 139         spin_unlock_irqrestore(&zone->lock, flags);
 140         if (isolated_page) {
 141                 post_alloc_hook(page, order, __GFP_MOVABLE);
 142                 __free_pages(page, order);
 143         }
 144 }
 145 
 146 static inline struct page *
 147 __first_valid_page(unsigned long pfn, unsigned long nr_pages)
 148 {
 149         int i;
 150 
 151         for (i = 0; i < nr_pages; i++) {
 152                 struct page *page;
 153 
 154                 page = pfn_to_online_page(pfn + i);
 155                 if (!page)
 156                         continue;
 157                 return page;
 158         }
 159         return NULL;
 160 }
 161 
 162 /**
 163  * start_isolate_page_range() - make page-allocation-type of range of pages to
 164  * be MIGRATE_ISOLATE.
 165  * @start_pfn:          The lower PFN of the range to be isolated.
 166  * @end_pfn:            The upper PFN of the range to be isolated.
 167  *                      start_pfn/end_pfn must be aligned to pageblock_order.
 168  * @migratetype:        Migrate type to set in error recovery.
 169  * @flags:              The following flags are allowed (they can be combined in
 170  *                      a bit mask)
 171  *                      SKIP_HWPOISON - ignore hwpoison pages
 172  *                      REPORT_FAILURE - report details about the failure to
 173  *                      isolate the range
 174  *
 175  * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
 176  * the range will never be allocated. Any free pages and pages freed in the
 177  * future will not be allocated again. If specified range includes migrate types
 178  * other than MOVABLE or CMA, this will fail with -EBUSY. For isolating all
 179  * pages in the range finally, the caller have to free all pages in the range.
 180  * test_page_isolated() can be used for test it.
 181  *
 182  * There is no high level synchronization mechanism that prevents two threads
 183  * from trying to isolate overlapping ranges. If this happens, one thread
 184  * will notice pageblocks in the overlapping range already set to isolate.
 185  * This happens in set_migratetype_isolate, and set_migratetype_isolate
 186  * returns an error. We then clean up by restoring the migration type on
 187  * pageblocks we may have modified and return -EBUSY to caller. This
 188  * prevents two threads from simultaneously working on overlapping ranges.
 189  *
 190  * Return: the number of isolated pageblocks on success and -EBUSY if any part
 191  * of range cannot be isolated.
 192  */
 193 int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
 194                              unsigned migratetype, int flags)
 195 {
 196         unsigned long pfn;
 197         unsigned long undo_pfn;
 198         struct page *page;
 199         int nr_isolate_pageblock = 0;
 200 
 201         BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
 202         BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
 203 
 204         for (pfn = start_pfn;
 205              pfn < end_pfn;
 206              pfn += pageblock_nr_pages) {
 207                 page = __first_valid_page(pfn, pageblock_nr_pages);
 208                 if (page) {
 209                         if (set_migratetype_isolate(page, migratetype, flags)) {
 210                                 undo_pfn = pfn;
 211                                 goto undo;
 212                         }
 213                         nr_isolate_pageblock++;
 214                 }
 215         }
 216         return nr_isolate_pageblock;
 217 undo:
 218         for (pfn = start_pfn;
 219              pfn < undo_pfn;
 220              pfn += pageblock_nr_pages) {
 221                 struct page *page = pfn_to_online_page(pfn);
 222                 if (!page)
 223                         continue;
 224                 unset_migratetype_isolate(page, migratetype);
 225         }
 226 
 227         return -EBUSY;
 228 }
 229 
 230 /*
 231  * Make isolated pages available again.
 232  */
 233 void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
 234                             unsigned migratetype)
 235 {
 236         unsigned long pfn;
 237         struct page *page;
 238 
 239         BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
 240         BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
 241 
 242         for (pfn = start_pfn;
 243              pfn < end_pfn;
 244              pfn += pageblock_nr_pages) {
 245                 page = __first_valid_page(pfn, pageblock_nr_pages);
 246                 if (!page || !is_migrate_isolate_page(page))
 247                         continue;
 248                 unset_migratetype_isolate(page, migratetype);
 249         }
 250 }
 251 /*
 252  * Test all pages in the range is free(means isolated) or not.
 253  * all pages in [start_pfn...end_pfn) must be in the same zone.
 254  * zone->lock must be held before call this.
 255  *
 256  * Returns the last tested pfn.
 257  */
 258 static unsigned long
 259 __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
 260                                   bool skip_hwpoisoned_pages)
 261 {
 262         struct page *page;
 263 
 264         while (pfn < end_pfn) {
 265                 if (!pfn_valid_within(pfn)) {
 266                         pfn++;
 267                         continue;
 268                 }
 269                 page = pfn_to_page(pfn);
 270                 if (PageBuddy(page))
 271                         /*
 272                          * If the page is on a free list, it has to be on
 273                          * the correct MIGRATE_ISOLATE freelist. There is no
 274                          * simple way to verify that as VM_BUG_ON(), though.
 275                          */
 276                         pfn += 1 << page_order(page);
 277                 else if (skip_hwpoisoned_pages && PageHWPoison(page))
 278                         /* A HWPoisoned page cannot be also PageBuddy */
 279                         pfn++;
 280                 else
 281                         break;
 282         }
 283 
 284         return pfn;
 285 }
 286 
 287 /* Caller should ensure that requested range is in a single zone */
 288 int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
 289                         bool skip_hwpoisoned_pages)
 290 {
 291         unsigned long pfn, flags;
 292         struct page *page;
 293         struct zone *zone;
 294 
 295         /*
 296          * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages
 297          * are not aligned to pageblock_nr_pages.
 298          * Then we just check migratetype first.
 299          */
 300         for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
 301                 page = __first_valid_page(pfn, pageblock_nr_pages);
 302                 if (page && !is_migrate_isolate_page(page))
 303                         break;
 304         }
 305         page = __first_valid_page(start_pfn, end_pfn - start_pfn);
 306         if ((pfn < end_pfn) || !page)
 307                 return -EBUSY;
 308         /* Check all pages are free or marked as ISOLATED */
 309         zone = page_zone(page);
 310         spin_lock_irqsave(&zone->lock, flags);
 311         pfn = __test_page_isolated_in_pageblock(start_pfn, end_pfn,
 312                                                 skip_hwpoisoned_pages);
 313         spin_unlock_irqrestore(&zone->lock, flags);
 314 
 315         trace_test_pages_isolated(start_pfn, end_pfn, pfn);
 316 
 317         return pfn < end_pfn ? -EBUSY : 0;
 318 }
 319 
 320 struct page *alloc_migrate_target(struct page *page, unsigned long private)
 321 {
 322         return new_page_nodemask(page, numa_node_id(), &node_states[N_MEMORY]);
 323 }

/* [<][>][^][v][top][bottom][index][help] */