root/fs/ext4/readpage.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. __read_end_io
  2. decrypt_work
  3. verity_work
  4. bio_post_read_processing
  5. bio_post_read_required
  6. mpage_end_io
  7. ext4_need_verity
  8. get_bio_post_read_ctx
  9. ext4_readpage_limit
  10. ext4_mpage_readpages
  11. ext4_init_post_read_processing
  12. ext4_exit_post_read_processing

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * linux/fs/ext4/readpage.c
   4  *
   5  * Copyright (C) 2002, Linus Torvalds.
   6  * Copyright (C) 2015, Google, Inc.
   7  *
   8  * This was originally taken from fs/mpage.c
   9  *
  10  * The intent is the ext4_mpage_readpages() function here is intended
  11  * to replace mpage_readpages() in the general case, not just for
  12  * encrypted files.  It has some limitations (see below), where it
  13  * will fall back to read_block_full_page(), but these limitations
  14  * should only be hit when page_size != block_size.
  15  *
  16  * This will allow us to attach a callback function to support ext4
  17  * encryption.
  18  *
  19  * If anything unusual happens, such as:
  20  *
  21  * - encountering a page which has buffers
  22  * - encountering a page which has a non-hole after a hole
  23  * - encountering a page with non-contiguous blocks
  24  *
  25  * then this code just gives up and calls the buffer_head-based read function.
  26  * It does handle a page which has holes at the end - that is a common case:
  27  * the end-of-file on blocksize < PAGE_SIZE setups.
  28  *
  29  */
  30 
  31 #include <linux/kernel.h>
  32 #include <linux/export.h>
  33 #include <linux/mm.h>
  34 #include <linux/kdev_t.h>
  35 #include <linux/gfp.h>
  36 #include <linux/bio.h>
  37 #include <linux/fs.h>
  38 #include <linux/buffer_head.h>
  39 #include <linux/blkdev.h>
  40 #include <linux/highmem.h>
  41 #include <linux/prefetch.h>
  42 #include <linux/mpage.h>
  43 #include <linux/writeback.h>
  44 #include <linux/backing-dev.h>
  45 #include <linux/pagevec.h>
  46 #include <linux/cleancache.h>
  47 
  48 #include "ext4.h"
  49 
  50 #define NUM_PREALLOC_POST_READ_CTXS     128
  51 
  52 static struct kmem_cache *bio_post_read_ctx_cache;
  53 static mempool_t *bio_post_read_ctx_pool;
  54 
  55 /* postprocessing steps for read bios */
  56 enum bio_post_read_step {
  57         STEP_INITIAL = 0,
  58         STEP_DECRYPT,
  59         STEP_VERITY,
  60         STEP_MAX,
  61 };
  62 
  63 struct bio_post_read_ctx {
  64         struct bio *bio;
  65         struct work_struct work;
  66         unsigned int cur_step;
  67         unsigned int enabled_steps;
  68 };
  69 
  70 static void __read_end_io(struct bio *bio)
  71 {
  72         struct page *page;
  73         struct bio_vec *bv;
  74         struct bvec_iter_all iter_all;
  75 
  76         bio_for_each_segment_all(bv, bio, iter_all) {
  77                 page = bv->bv_page;
  78 
  79                 /* PG_error was set if any post_read step failed */
  80                 if (bio->bi_status || PageError(page)) {
  81                         ClearPageUptodate(page);
  82                         /* will re-read again later */
  83                         ClearPageError(page);
  84                 } else {
  85                         SetPageUptodate(page);
  86                 }
  87                 unlock_page(page);
  88         }
  89         if (bio->bi_private)
  90                 mempool_free(bio->bi_private, bio_post_read_ctx_pool);
  91         bio_put(bio);
  92 }
  93 
  94 static void bio_post_read_processing(struct bio_post_read_ctx *ctx);
  95 
  96 static void decrypt_work(struct work_struct *work)
  97 {
  98         struct bio_post_read_ctx *ctx =
  99                 container_of(work, struct bio_post_read_ctx, work);
 100 
 101         fscrypt_decrypt_bio(ctx->bio);
 102 
 103         bio_post_read_processing(ctx);
 104 }
 105 
 106 static void verity_work(struct work_struct *work)
 107 {
 108         struct bio_post_read_ctx *ctx =
 109                 container_of(work, struct bio_post_read_ctx, work);
 110         struct bio *bio = ctx->bio;
 111 
 112         /*
 113          * fsverity_verify_bio() may call readpages() again, and although verity
 114          * will be disabled for that, decryption may still be needed, causing
 115          * another bio_post_read_ctx to be allocated.  So to guarantee that
 116          * mempool_alloc() never deadlocks we must free the current ctx first.
 117          * This is safe because verity is the last post-read step.
 118          */
 119         BUILD_BUG_ON(STEP_VERITY + 1 != STEP_MAX);
 120         mempool_free(ctx, bio_post_read_ctx_pool);
 121         bio->bi_private = NULL;
 122 
 123         fsverity_verify_bio(bio);
 124 
 125         __read_end_io(bio);
 126 }
 127 
 128 static void bio_post_read_processing(struct bio_post_read_ctx *ctx)
 129 {
 130         /*
 131          * We use different work queues for decryption and for verity because
 132          * verity may require reading metadata pages that need decryption, and
 133          * we shouldn't recurse to the same workqueue.
 134          */
 135         switch (++ctx->cur_step) {
 136         case STEP_DECRYPT:
 137                 if (ctx->enabled_steps & (1 << STEP_DECRYPT)) {
 138                         INIT_WORK(&ctx->work, decrypt_work);
 139                         fscrypt_enqueue_decrypt_work(&ctx->work);
 140                         return;
 141                 }
 142                 ctx->cur_step++;
 143                 /* fall-through */
 144         case STEP_VERITY:
 145                 if (ctx->enabled_steps & (1 << STEP_VERITY)) {
 146                         INIT_WORK(&ctx->work, verity_work);
 147                         fsverity_enqueue_verify_work(&ctx->work);
 148                         return;
 149                 }
 150                 ctx->cur_step++;
 151                 /* fall-through */
 152         default:
 153                 __read_end_io(ctx->bio);
 154         }
 155 }
 156 
 157 static bool bio_post_read_required(struct bio *bio)
 158 {
 159         return bio->bi_private && !bio->bi_status;
 160 }
 161 
 162 /*
 163  * I/O completion handler for multipage BIOs.
 164  *
 165  * The mpage code never puts partial pages into a BIO (except for end-of-file).
 166  * If a page does not map to a contiguous run of blocks then it simply falls
 167  * back to block_read_full_page().
 168  *
 169  * Why is this?  If a page's completion depends on a number of different BIOs
 170  * which can complete in any order (or at the same time) then determining the
 171  * status of that page is hard.  See end_buffer_async_read() for the details.
 172  * There is no point in duplicating all that complexity.
 173  */
 174 static void mpage_end_io(struct bio *bio)
 175 {
 176         if (bio_post_read_required(bio)) {
 177                 struct bio_post_read_ctx *ctx = bio->bi_private;
 178 
 179                 ctx->cur_step = STEP_INITIAL;
 180                 bio_post_read_processing(ctx);
 181                 return;
 182         }
 183         __read_end_io(bio);
 184 }
 185 
 186 static inline bool ext4_need_verity(const struct inode *inode, pgoff_t idx)
 187 {
 188         return fsverity_active(inode) &&
 189                idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
 190 }
 191 
 192 static struct bio_post_read_ctx *get_bio_post_read_ctx(struct inode *inode,
 193                                                        struct bio *bio,
 194                                                        pgoff_t first_idx)
 195 {
 196         unsigned int post_read_steps = 0;
 197         struct bio_post_read_ctx *ctx = NULL;
 198 
 199         if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode))
 200                 post_read_steps |= 1 << STEP_DECRYPT;
 201 
 202         if (ext4_need_verity(inode, first_idx))
 203                 post_read_steps |= 1 << STEP_VERITY;
 204 
 205         if (post_read_steps) {
 206                 ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
 207                 if (!ctx)
 208                         return ERR_PTR(-ENOMEM);
 209                 ctx->bio = bio;
 210                 ctx->enabled_steps = post_read_steps;
 211                 bio->bi_private = ctx;
 212         }
 213         return ctx;
 214 }
 215 
 216 static inline loff_t ext4_readpage_limit(struct inode *inode)
 217 {
 218         if (IS_ENABLED(CONFIG_FS_VERITY) &&
 219             (IS_VERITY(inode) || ext4_verity_in_progress(inode)))
 220                 return inode->i_sb->s_maxbytes;
 221 
 222         return i_size_read(inode);
 223 }
 224 
 225 int ext4_mpage_readpages(struct address_space *mapping,
 226                          struct list_head *pages, struct page *page,
 227                          unsigned nr_pages, bool is_readahead)
 228 {
 229         struct bio *bio = NULL;
 230         sector_t last_block_in_bio = 0;
 231 
 232         struct inode *inode = mapping->host;
 233         const unsigned blkbits = inode->i_blkbits;
 234         const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
 235         const unsigned blocksize = 1 << blkbits;
 236         sector_t block_in_file;
 237         sector_t last_block;
 238         sector_t last_block_in_file;
 239         sector_t blocks[MAX_BUF_PER_PAGE];
 240         unsigned page_block;
 241         struct block_device *bdev = inode->i_sb->s_bdev;
 242         int length;
 243         unsigned relative_block = 0;
 244         struct ext4_map_blocks map;
 245 
 246         map.m_pblk = 0;
 247         map.m_lblk = 0;
 248         map.m_len = 0;
 249         map.m_flags = 0;
 250 
 251         for (; nr_pages; nr_pages--) {
 252                 int fully_mapped = 1;
 253                 unsigned first_hole = blocks_per_page;
 254 
 255                 if (pages) {
 256                         page = lru_to_page(pages);
 257 
 258                         prefetchw(&page->flags);
 259                         list_del(&page->lru);
 260                         if (add_to_page_cache_lru(page, mapping, page->index,
 261                                   readahead_gfp_mask(mapping)))
 262                                 goto next_page;
 263                 }
 264 
 265                 if (page_has_buffers(page))
 266                         goto confused;
 267 
 268                 block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
 269                 last_block = block_in_file + nr_pages * blocks_per_page;
 270                 last_block_in_file = (ext4_readpage_limit(inode) +
 271                                       blocksize - 1) >> blkbits;
 272                 if (last_block > last_block_in_file)
 273                         last_block = last_block_in_file;
 274                 page_block = 0;
 275 
 276                 /*
 277                  * Map blocks using the previous result first.
 278                  */
 279                 if ((map.m_flags & EXT4_MAP_MAPPED) &&
 280                     block_in_file > map.m_lblk &&
 281                     block_in_file < (map.m_lblk + map.m_len)) {
 282                         unsigned map_offset = block_in_file - map.m_lblk;
 283                         unsigned last = map.m_len - map_offset;
 284 
 285                         for (relative_block = 0; ; relative_block++) {
 286                                 if (relative_block == last) {
 287                                         /* needed? */
 288                                         map.m_flags &= ~EXT4_MAP_MAPPED;
 289                                         break;
 290                                 }
 291                                 if (page_block == blocks_per_page)
 292                                         break;
 293                                 blocks[page_block] = map.m_pblk + map_offset +
 294                                         relative_block;
 295                                 page_block++;
 296                                 block_in_file++;
 297                         }
 298                 }
 299 
 300                 /*
 301                  * Then do more ext4_map_blocks() calls until we are
 302                  * done with this page.
 303                  */
 304                 while (page_block < blocks_per_page) {
 305                         if (block_in_file < last_block) {
 306                                 map.m_lblk = block_in_file;
 307                                 map.m_len = last_block - block_in_file;
 308 
 309                                 if (ext4_map_blocks(NULL, inode, &map, 0) < 0) {
 310                                 set_error_page:
 311                                         SetPageError(page);
 312                                         zero_user_segment(page, 0,
 313                                                           PAGE_SIZE);
 314                                         unlock_page(page);
 315                                         goto next_page;
 316                                 }
 317                         }
 318                         if ((map.m_flags & EXT4_MAP_MAPPED) == 0) {
 319                                 fully_mapped = 0;
 320                                 if (first_hole == blocks_per_page)
 321                                         first_hole = page_block;
 322                                 page_block++;
 323                                 block_in_file++;
 324                                 continue;
 325                         }
 326                         if (first_hole != blocks_per_page)
 327                                 goto confused;          /* hole -> non-hole */
 328 
 329                         /* Contiguous blocks? */
 330                         if (page_block && blocks[page_block-1] != map.m_pblk-1)
 331                                 goto confused;
 332                         for (relative_block = 0; ; relative_block++) {
 333                                 if (relative_block == map.m_len) {
 334                                         /* needed? */
 335                                         map.m_flags &= ~EXT4_MAP_MAPPED;
 336                                         break;
 337                                 } else if (page_block == blocks_per_page)
 338                                         break;
 339                                 blocks[page_block] = map.m_pblk+relative_block;
 340                                 page_block++;
 341                                 block_in_file++;
 342                         }
 343                 }
 344                 if (first_hole != blocks_per_page) {
 345                         zero_user_segment(page, first_hole << blkbits,
 346                                           PAGE_SIZE);
 347                         if (first_hole == 0) {
 348                                 if (ext4_need_verity(inode, page->index) &&
 349                                     !fsverity_verify_page(page))
 350                                         goto set_error_page;
 351                                 SetPageUptodate(page);
 352                                 unlock_page(page);
 353                                 goto next_page;
 354                         }
 355                 } else if (fully_mapped) {
 356                         SetPageMappedToDisk(page);
 357                 }
 358                 if (fully_mapped && blocks_per_page == 1 &&
 359                     !PageUptodate(page) && cleancache_get_page(page) == 0) {
 360                         SetPageUptodate(page);
 361                         goto confused;
 362                 }
 363 
 364                 /*
 365                  * This page will go to BIO.  Do we need to send this
 366                  * BIO off first?
 367                  */
 368                 if (bio && (last_block_in_bio != blocks[0] - 1)) {
 369                 submit_and_realloc:
 370                         submit_bio(bio);
 371                         bio = NULL;
 372                 }
 373                 if (bio == NULL) {
 374                         struct bio_post_read_ctx *ctx;
 375 
 376                         bio = bio_alloc(GFP_KERNEL,
 377                                 min_t(int, nr_pages, BIO_MAX_PAGES));
 378                         if (!bio)
 379                                 goto set_error_page;
 380                         ctx = get_bio_post_read_ctx(inode, bio, page->index);
 381                         if (IS_ERR(ctx)) {
 382                                 bio_put(bio);
 383                                 bio = NULL;
 384                                 goto set_error_page;
 385                         }
 386                         bio_set_dev(bio, bdev);
 387                         bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
 388                         bio->bi_end_io = mpage_end_io;
 389                         bio->bi_private = ctx;
 390                         bio_set_op_attrs(bio, REQ_OP_READ,
 391                                                 is_readahead ? REQ_RAHEAD : 0);
 392                 }
 393 
 394                 length = first_hole << blkbits;
 395                 if (bio_add_page(bio, page, length, 0) < length)
 396                         goto submit_and_realloc;
 397 
 398                 if (((map.m_flags & EXT4_MAP_BOUNDARY) &&
 399                      (relative_block == map.m_len)) ||
 400                     (first_hole != blocks_per_page)) {
 401                         submit_bio(bio);
 402                         bio = NULL;
 403                 } else
 404                         last_block_in_bio = blocks[blocks_per_page - 1];
 405                 goto next_page;
 406         confused:
 407                 if (bio) {
 408                         submit_bio(bio);
 409                         bio = NULL;
 410                 }
 411                 if (!PageUptodate(page))
 412                         block_read_full_page(page, ext4_get_block);
 413                 else
 414                         unlock_page(page);
 415         next_page:
 416                 if (pages)
 417                         put_page(page);
 418         }
 419         BUG_ON(pages && !list_empty(pages));
 420         if (bio)
 421                 submit_bio(bio);
 422         return 0;
 423 }
 424 
 425 int __init ext4_init_post_read_processing(void)
 426 {
 427         bio_post_read_ctx_cache =
 428                 kmem_cache_create("ext4_bio_post_read_ctx",
 429                                   sizeof(struct bio_post_read_ctx), 0, 0, NULL);
 430         if (!bio_post_read_ctx_cache)
 431                 goto fail;
 432         bio_post_read_ctx_pool =
 433                 mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS,
 434                                          bio_post_read_ctx_cache);
 435         if (!bio_post_read_ctx_pool)
 436                 goto fail_free_cache;
 437         return 0;
 438 
 439 fail_free_cache:
 440         kmem_cache_destroy(bio_post_read_ctx_cache);
 441 fail:
 442         return -ENOMEM;
 443 }
 444 
 445 void ext4_exit_post_read_processing(void)
 446 {
 447         mempool_destroy(bio_post_read_ctx_pool);
 448         kmem_cache_destroy(bio_post_read_ctx_cache);
 449 }

/* [<][>][^][v][top][bottom][index][help] */