root/mm/page_io.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. get_swap_bio
  2. end_swap_bio_write
  3. swap_slot_free_notify
  4. end_swap_bio_read
  5. generic_swapfile_activate
  6. swap_writepage
  7. swap_page_sector
  8. count_swpout_vm_event
  9. __swap_writepage
  10. swap_readpage
  11. swap_set_page_dirty

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  *  linux/mm/page_io.c
   4  *
   5  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   6  *
   7  *  Swap reorganised 29.12.95, 
   8  *  Asynchronous swapping added 30.12.95. Stephen Tweedie
   9  *  Removed race in async swapping. 14.4.1996. Bruno Haible
  10  *  Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie
  11  *  Always use brw_page, life becomes simpler. 12 May 1998 Eric Biederman
  12  */
  13 
  14 #include <linux/mm.h>
  15 #include <linux/kernel_stat.h>
  16 #include <linux/gfp.h>
  17 #include <linux/pagemap.h>
  18 #include <linux/swap.h>
  19 #include <linux/bio.h>
  20 #include <linux/swapops.h>
  21 #include <linux/buffer_head.h>
  22 #include <linux/writeback.h>
  23 #include <linux/frontswap.h>
  24 #include <linux/blkdev.h>
  25 #include <linux/uio.h>
  26 #include <linux/sched/task.h>
  27 #include <asm/pgtable.h>
  28 
  29 static struct bio *get_swap_bio(gfp_t gfp_flags,
  30                                 struct page *page, bio_end_io_t end_io)
  31 {
  32         struct bio *bio;
  33 
  34         bio = bio_alloc(gfp_flags, 1);
  35         if (bio) {
  36                 struct block_device *bdev;
  37 
  38                 bio->bi_iter.bi_sector = map_swap_page(page, &bdev);
  39                 bio_set_dev(bio, bdev);
  40                 bio->bi_iter.bi_sector <<= PAGE_SHIFT - 9;
  41                 bio->bi_end_io = end_io;
  42 
  43                 bio_add_page(bio, page, PAGE_SIZE * hpage_nr_pages(page), 0);
  44         }
  45         return bio;
  46 }
  47 
  48 void end_swap_bio_write(struct bio *bio)
  49 {
  50         struct page *page = bio_first_page_all(bio);
  51 
  52         if (bio->bi_status) {
  53                 SetPageError(page);
  54                 /*
  55                  * We failed to write the page out to swap-space.
  56                  * Re-dirty the page in order to avoid it being reclaimed.
  57                  * Also print a dire warning that things will go BAD (tm)
  58                  * very quickly.
  59                  *
  60                  * Also clear PG_reclaim to avoid rotate_reclaimable_page()
  61                  */
  62                 set_page_dirty(page);
  63                 pr_alert("Write-error on swap-device (%u:%u:%llu)\n",
  64                          MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
  65                          (unsigned long long)bio->bi_iter.bi_sector);
  66                 ClearPageReclaim(page);
  67         }
  68         end_page_writeback(page);
  69         bio_put(bio);
  70 }
  71 
  72 static void swap_slot_free_notify(struct page *page)
  73 {
  74         struct swap_info_struct *sis;
  75         struct gendisk *disk;
  76         swp_entry_t entry;
  77 
  78         /*
  79          * There is no guarantee that the page is in swap cache - the software
  80          * suspend code (at least) uses end_swap_bio_read() against a non-
  81          * swapcache page.  So we must check PG_swapcache before proceeding with
  82          * this optimization.
  83          */
  84         if (unlikely(!PageSwapCache(page)))
  85                 return;
  86 
  87         sis = page_swap_info(page);
  88         if (!(sis->flags & SWP_BLKDEV))
  89                 return;
  90 
  91         /*
  92          * The swap subsystem performs lazy swap slot freeing,
  93          * expecting that the page will be swapped out again.
  94          * So we can avoid an unnecessary write if the page
  95          * isn't redirtied.
  96          * This is good for real swap storage because we can
  97          * reduce unnecessary I/O and enhance wear-leveling
  98          * if an SSD is used as the as swap device.
  99          * But if in-memory swap device (eg zram) is used,
 100          * this causes a duplicated copy between uncompressed
 101          * data in VM-owned memory and compressed data in
 102          * zram-owned memory.  So let's free zram-owned memory
 103          * and make the VM-owned decompressed page *dirty*,
 104          * so the page should be swapped out somewhere again if
 105          * we again wish to reclaim it.
 106          */
 107         disk = sis->bdev->bd_disk;
 108         entry.val = page_private(page);
 109         if (disk->fops->swap_slot_free_notify && __swap_count(entry) == 1) {
 110                 unsigned long offset;
 111 
 112                 offset = swp_offset(entry);
 113 
 114                 SetPageDirty(page);
 115                 disk->fops->swap_slot_free_notify(sis->bdev,
 116                                 offset);
 117         }
 118 }
 119 
 120 static void end_swap_bio_read(struct bio *bio)
 121 {
 122         struct page *page = bio_first_page_all(bio);
 123         struct task_struct *waiter = bio->bi_private;
 124 
 125         if (bio->bi_status) {
 126                 SetPageError(page);
 127                 ClearPageUptodate(page);
 128                 pr_alert("Read-error on swap-device (%u:%u:%llu)\n",
 129                          MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
 130                          (unsigned long long)bio->bi_iter.bi_sector);
 131                 goto out;
 132         }
 133 
 134         SetPageUptodate(page);
 135         swap_slot_free_notify(page);
 136 out:
 137         unlock_page(page);
 138         WRITE_ONCE(bio->bi_private, NULL);
 139         bio_put(bio);
 140         if (waiter) {
 141                 blk_wake_io_task(waiter);
 142                 put_task_struct(waiter);
 143         }
 144 }
 145 
 146 int generic_swapfile_activate(struct swap_info_struct *sis,
 147                                 struct file *swap_file,
 148                                 sector_t *span)
 149 {
 150         struct address_space *mapping = swap_file->f_mapping;
 151         struct inode *inode = mapping->host;
 152         unsigned blocks_per_page;
 153         unsigned long page_no;
 154         unsigned blkbits;
 155         sector_t probe_block;
 156         sector_t last_block;
 157         sector_t lowest_block = -1;
 158         sector_t highest_block = 0;
 159         int nr_extents = 0;
 160         int ret;
 161 
 162         blkbits = inode->i_blkbits;
 163         blocks_per_page = PAGE_SIZE >> blkbits;
 164 
 165         /*
 166          * Map all the blocks into the extent tree.  This code doesn't try
 167          * to be very smart.
 168          */
 169         probe_block = 0;
 170         page_no = 0;
 171         last_block = i_size_read(inode) >> blkbits;
 172         while ((probe_block + blocks_per_page) <= last_block &&
 173                         page_no < sis->max) {
 174                 unsigned block_in_page;
 175                 sector_t first_block;
 176 
 177                 cond_resched();
 178 
 179                 first_block = bmap(inode, probe_block);
 180                 if (first_block == 0)
 181                         goto bad_bmap;
 182 
 183                 /*
 184                  * It must be PAGE_SIZE aligned on-disk
 185                  */
 186                 if (first_block & (blocks_per_page - 1)) {
 187                         probe_block++;
 188                         goto reprobe;
 189                 }
 190 
 191                 for (block_in_page = 1; block_in_page < blocks_per_page;
 192                                         block_in_page++) {
 193                         sector_t block;
 194 
 195                         block = bmap(inode, probe_block + block_in_page);
 196                         if (block == 0)
 197                                 goto bad_bmap;
 198                         if (block != first_block + block_in_page) {
 199                                 /* Discontiguity */
 200                                 probe_block++;
 201                                 goto reprobe;
 202                         }
 203                 }
 204 
 205                 first_block >>= (PAGE_SHIFT - blkbits);
 206                 if (page_no) {  /* exclude the header page */
 207                         if (first_block < lowest_block)
 208                                 lowest_block = first_block;
 209                         if (first_block > highest_block)
 210                                 highest_block = first_block;
 211                 }
 212 
 213                 /*
 214                  * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
 215                  */
 216                 ret = add_swap_extent(sis, page_no, 1, first_block);
 217                 if (ret < 0)
 218                         goto out;
 219                 nr_extents += ret;
 220                 page_no++;
 221                 probe_block += blocks_per_page;
 222 reprobe:
 223                 continue;
 224         }
 225         ret = nr_extents;
 226         *span = 1 + highest_block - lowest_block;
 227         if (page_no == 0)
 228                 page_no = 1;    /* force Empty message */
 229         sis->max = page_no;
 230         sis->pages = page_no - 1;
 231         sis->highest_bit = page_no - 1;
 232 out:
 233         return ret;
 234 bad_bmap:
 235         pr_err("swapon: swapfile has holes\n");
 236         ret = -EINVAL;
 237         goto out;
 238 }
 239 
 240 /*
 241  * We may have stale swap cache pages in memory: notice
 242  * them here and get rid of the unnecessary final write.
 243  */
 244 int swap_writepage(struct page *page, struct writeback_control *wbc)
 245 {
 246         int ret = 0;
 247 
 248         if (try_to_free_swap(page)) {
 249                 unlock_page(page);
 250                 goto out;
 251         }
 252         if (frontswap_store(page) == 0) {
 253                 set_page_writeback(page);
 254                 unlock_page(page);
 255                 end_page_writeback(page);
 256                 goto out;
 257         }
 258         ret = __swap_writepage(page, wbc, end_swap_bio_write);
 259 out:
 260         return ret;
 261 }
 262 
 263 static sector_t swap_page_sector(struct page *page)
 264 {
 265         return (sector_t)__page_file_index(page) << (PAGE_SHIFT - 9);
 266 }
 267 
 268 static inline void count_swpout_vm_event(struct page *page)
 269 {
 270 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 271         if (unlikely(PageTransHuge(page)))
 272                 count_vm_event(THP_SWPOUT);
 273 #endif
 274         count_vm_events(PSWPOUT, hpage_nr_pages(page));
 275 }
 276 
 277 int __swap_writepage(struct page *page, struct writeback_control *wbc,
 278                 bio_end_io_t end_write_func)
 279 {
 280         struct bio *bio;
 281         int ret;
 282         struct swap_info_struct *sis = page_swap_info(page);
 283 
 284         VM_BUG_ON_PAGE(!PageSwapCache(page), page);
 285         if (sis->flags & SWP_FS) {
 286                 struct kiocb kiocb;
 287                 struct file *swap_file = sis->swap_file;
 288                 struct address_space *mapping = swap_file->f_mapping;
 289                 struct bio_vec bv = {
 290                         .bv_page = page,
 291                         .bv_len  = PAGE_SIZE,
 292                         .bv_offset = 0
 293                 };
 294                 struct iov_iter from;
 295 
 296                 iov_iter_bvec(&from, WRITE, &bv, 1, PAGE_SIZE);
 297                 init_sync_kiocb(&kiocb, swap_file);
 298                 kiocb.ki_pos = page_file_offset(page);
 299 
 300                 set_page_writeback(page);
 301                 unlock_page(page);
 302                 ret = mapping->a_ops->direct_IO(&kiocb, &from);
 303                 if (ret == PAGE_SIZE) {
 304                         count_vm_event(PSWPOUT);
 305                         ret = 0;
 306                 } else {
 307                         /*
 308                          * In the case of swap-over-nfs, this can be a
 309                          * temporary failure if the system has limited
 310                          * memory for allocating transmit buffers.
 311                          * Mark the page dirty and avoid
 312                          * rotate_reclaimable_page but rate-limit the
 313                          * messages but do not flag PageError like
 314                          * the normal direct-to-bio case as it could
 315                          * be temporary.
 316                          */
 317                         set_page_dirty(page);
 318                         ClearPageReclaim(page);
 319                         pr_err_ratelimited("Write error on dio swapfile (%llu)\n",
 320                                            page_file_offset(page));
 321                 }
 322                 end_page_writeback(page);
 323                 return ret;
 324         }
 325 
 326         ret = bdev_write_page(sis->bdev, swap_page_sector(page), page, wbc);
 327         if (!ret) {
 328                 count_swpout_vm_event(page);
 329                 return 0;
 330         }
 331 
 332         ret = 0;
 333         bio = get_swap_bio(GFP_NOIO, page, end_write_func);
 334         if (bio == NULL) {
 335                 set_page_dirty(page);
 336                 unlock_page(page);
 337                 ret = -ENOMEM;
 338                 goto out;
 339         }
 340         bio->bi_opf = REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc);
 341         bio_associate_blkg_from_page(bio, page);
 342         count_swpout_vm_event(page);
 343         set_page_writeback(page);
 344         unlock_page(page);
 345         submit_bio(bio);
 346 out:
 347         return ret;
 348 }
 349 
 350 int swap_readpage(struct page *page, bool synchronous)
 351 {
 352         struct bio *bio;
 353         int ret = 0;
 354         struct swap_info_struct *sis = page_swap_info(page);
 355         blk_qc_t qc;
 356         struct gendisk *disk;
 357 
 358         VM_BUG_ON_PAGE(!PageSwapCache(page) && !synchronous, page);
 359         VM_BUG_ON_PAGE(!PageLocked(page), page);
 360         VM_BUG_ON_PAGE(PageUptodate(page), page);
 361         if (frontswap_load(page) == 0) {
 362                 SetPageUptodate(page);
 363                 unlock_page(page);
 364                 goto out;
 365         }
 366 
 367         if (sis->flags & SWP_FS) {
 368                 struct file *swap_file = sis->swap_file;
 369                 struct address_space *mapping = swap_file->f_mapping;
 370 
 371                 ret = mapping->a_ops->readpage(swap_file, page);
 372                 if (!ret)
 373                         count_vm_event(PSWPIN);
 374                 return ret;
 375         }
 376 
 377         ret = bdev_read_page(sis->bdev, swap_page_sector(page), page);
 378         if (!ret) {
 379                 if (trylock_page(page)) {
 380                         swap_slot_free_notify(page);
 381                         unlock_page(page);
 382                 }
 383 
 384                 count_vm_event(PSWPIN);
 385                 return 0;
 386         }
 387 
 388         ret = 0;
 389         bio = get_swap_bio(GFP_KERNEL, page, end_swap_bio_read);
 390         if (bio == NULL) {
 391                 unlock_page(page);
 392                 ret = -ENOMEM;
 393                 goto out;
 394         }
 395         disk = bio->bi_disk;
 396         /*
 397          * Keep this task valid during swap readpage because the oom killer may
 398          * attempt to access it in the page fault retry time check.
 399          */
 400         bio_set_op_attrs(bio, REQ_OP_READ, 0);
 401         if (synchronous) {
 402                 bio->bi_opf |= REQ_HIPRI;
 403                 get_task_struct(current);
 404                 bio->bi_private = current;
 405         }
 406         count_vm_event(PSWPIN);
 407         bio_get(bio);
 408         qc = submit_bio(bio);
 409         while (synchronous) {
 410                 set_current_state(TASK_UNINTERRUPTIBLE);
 411                 if (!READ_ONCE(bio->bi_private))
 412                         break;
 413 
 414                 if (!blk_poll(disk->queue, qc, true))
 415                         io_schedule();
 416         }
 417         __set_current_state(TASK_RUNNING);
 418         bio_put(bio);
 419 
 420 out:
 421         return ret;
 422 }
 423 
 424 int swap_set_page_dirty(struct page *page)
 425 {
 426         struct swap_info_struct *sis = page_swap_info(page);
 427 
 428         if (sis->flags & SWP_FS) {
 429                 struct address_space *mapping = sis->swap_file->f_mapping;
 430 
 431                 VM_BUG_ON_PAGE(!PageSwapCache(page), page);
 432                 return mapping->a_ops->set_page_dirty(page);
 433         } else {
 434                 return __set_page_dirty_no_writeback(page);
 435         }
 436 }

/* [<][>][^][v][top][bottom][index][help] */