root/fs/ext4/inode.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. ext4_inode_csum
  2. ext4_inode_csum_verify
  3. ext4_inode_csum_set
  4. ext4_begin_ordered_truncate
  5. ext4_inode_is_fast_symlink
  6. ext4_truncate_restart_trans
  7. ext4_evict_inode
  8. ext4_get_reserved_space
  9. ext4_da_update_reserve_space
  10. __check_block_validity
  11. ext4_issue_zeroout
  12. ext4_map_blocks_es_recheck
  13. ext4_map_blocks
  14. ext4_update_bh_state
  15. _ext4_get_block
  16. ext4_get_block
  17. ext4_get_block_unwritten
  18. ext4_get_block_trans
  19. ext4_dio_get_block
  20. ext4_dio_get_block_unwritten_async
  21. ext4_dio_get_block_unwritten_sync
  22. ext4_dio_get_block_overwrite
  23. ext4_getblk
  24. ext4_bread
  25. ext4_bread_batch
  26. ext4_walk_page_buffers
  27. do_journal_get_write_access
  28. ext4_block_write_begin
  29. ext4_write_begin
  30. write_end_fn
  31. ext4_write_end
  32. ext4_journalled_zero_new_buffers
  33. ext4_journalled_write_end
  34. ext4_da_reserve_space
  35. ext4_da_release_space
  36. mpage_release_unused_pages
  37. ext4_print_free_blocks
  38. ext4_bh_delay_or_unwritten
  39. ext4_insert_delayed_block
  40. ext4_da_map_blocks
  41. ext4_da_get_block_prep
  42. bget_one
  43. bput_one
  44. __ext4_journalled_writepage
  45. ext4_writepage
  46. mpage_submit_page
  47. mpage_add_bh_to_extent
  48. mpage_process_page_bufs
  49. mpage_map_and_submit_buffers
  50. mpage_map_one_extent
  51. mpage_map_and_submit_extent
  52. ext4_da_writepages_trans_blocks
  53. mpage_prepare_extent_to_map
  54. ext4_writepages
  55. ext4_dax_writepages
  56. ext4_nonda_switch
  57. ext4_da_write_credits
  58. ext4_da_write_begin
  59. ext4_da_should_update_i_disksize
  60. ext4_da_write_end
  61. ext4_alloc_da_blocks
  62. ext4_bmap
  63. ext4_readpage
  64. ext4_readpages
  65. ext4_invalidatepage
  66. __ext4_journalled_invalidatepage
  67. ext4_journalled_invalidatepage
  68. ext4_releasepage
  69. ext4_inode_datasync_dirty
  70. ext4_iomap_begin
  71. ext4_iomap_end
  72. ext4_end_io_dio
  73. ext4_direct_IO_write
  74. ext4_direct_IO_read
  75. ext4_direct_IO
  76. ext4_journalled_set_page_dirty
  77. ext4_set_page_dirty
  78. ext4_set_aops
  79. __ext4_block_zero_page_range
  80. ext4_block_zero_page_range
  81. ext4_block_truncate_page
  82. ext4_zero_partial_blocks
  83. ext4_can_truncate
  84. ext4_update_disksize_before_punch
  85. ext4_wait_dax_page
  86. ext4_break_layouts
  87. ext4_punch_hole
  88. ext4_inode_attach_jinode
  89. ext4_truncate
  90. __ext4_get_inode_loc
  91. ext4_get_inode_loc
  92. ext4_should_use_dax
  93. ext4_set_inode_flags
  94. ext4_inode_blocks
  95. ext4_iget_extra_inode
  96. ext4_get_projid
  97. ext4_inode_set_iversion_queried
  98. ext4_inode_peek_iversion
  99. __ext4_iget
  100. ext4_inode_blocks_set
  101. other_inode_match
  102. ext4_update_other_inodes_time
  103. ext4_do_update_inode
  104. ext4_write_inode
  105. ext4_wait_for_tail_page_commit
  106. ext4_setattr
  107. ext4_getattr
  108. ext4_file_getattr
  109. ext4_index_trans_blocks
  110. ext4_meta_trans_blocks
  111. ext4_writepage_trans_blocks
  112. ext4_chunk_trans_blocks
  113. ext4_mark_iloc_dirty
  114. ext4_reserve_inode_write
  115. __ext4_expand_extra_isize
  116. ext4_try_to_expand_extra_isize
  117. ext4_expand_extra_isize
  118. ext4_mark_inode_dirty
  119. ext4_dirty_inode
  120. ext4_change_inode_journal_flag
  121. ext4_bh_unmapped
  122. ext4_page_mkwrite
  123. ext4_filemap_fault

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  *  linux/fs/ext4/inode.c
   4  *
   5  * Copyright (C) 1992, 1993, 1994, 1995
   6  * Remy Card (card@masi.ibp.fr)
   7  * Laboratoire MASI - Institut Blaise Pascal
   8  * Universite Pierre et Marie Curie (Paris VI)
   9  *
  10  *  from
  11  *
  12  *  linux/fs/minix/inode.c
  13  *
  14  *  Copyright (C) 1991, 1992  Linus Torvalds
  15  *
  16  *  64-bit file support on 64-bit platforms by Jakub Jelinek
  17  *      (jj@sunsite.ms.mff.cuni.cz)
  18  *
  19  *  Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
  20  */
  21 
  22 #include <linux/fs.h>
  23 #include <linux/time.h>
  24 #include <linux/highuid.h>
  25 #include <linux/pagemap.h>
  26 #include <linux/dax.h>
  27 #include <linux/quotaops.h>
  28 #include <linux/string.h>
  29 #include <linux/buffer_head.h>
  30 #include <linux/writeback.h>
  31 #include <linux/pagevec.h>
  32 #include <linux/mpage.h>
  33 #include <linux/namei.h>
  34 #include <linux/uio.h>
  35 #include <linux/bio.h>
  36 #include <linux/workqueue.h>
  37 #include <linux/kernel.h>
  38 #include <linux/printk.h>
  39 #include <linux/slab.h>
  40 #include <linux/bitops.h>
  41 #include <linux/iomap.h>
  42 #include <linux/iversion.h>
  43 
  44 #include "ext4_jbd2.h"
  45 #include "xattr.h"
  46 #include "acl.h"
  47 #include "truncate.h"
  48 
  49 #include <trace/events/ext4.h>
  50 
  51 #define MPAGE_DA_EXTENT_TAIL 0x01
  52 
  53 static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
  54                               struct ext4_inode_info *ei)
  55 {
  56         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  57         __u32 csum;
  58         __u16 dummy_csum = 0;
  59         int offset = offsetof(struct ext4_inode, i_checksum_lo);
  60         unsigned int csum_size = sizeof(dummy_csum);
  61 
  62         csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw, offset);
  63         csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, csum_size);
  64         offset += csum_size;
  65         csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
  66                            EXT4_GOOD_OLD_INODE_SIZE - offset);
  67 
  68         if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
  69                 offset = offsetof(struct ext4_inode, i_checksum_hi);
  70                 csum = ext4_chksum(sbi, csum, (__u8 *)raw +
  71                                    EXT4_GOOD_OLD_INODE_SIZE,
  72                                    offset - EXT4_GOOD_OLD_INODE_SIZE);
  73                 if (EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) {
  74                         csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum,
  75                                            csum_size);
  76                         offset += csum_size;
  77                 }
  78                 csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
  79                                    EXT4_INODE_SIZE(inode->i_sb) - offset);
  80         }
  81 
  82         return csum;
  83 }
  84 
  85 static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw,
  86                                   struct ext4_inode_info *ei)
  87 {
  88         __u32 provided, calculated;
  89 
  90         if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
  91             cpu_to_le32(EXT4_OS_LINUX) ||
  92             !ext4_has_metadata_csum(inode->i_sb))
  93                 return 1;
  94 
  95         provided = le16_to_cpu(raw->i_checksum_lo);
  96         calculated = ext4_inode_csum(inode, raw, ei);
  97         if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
  98             EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
  99                 provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16;
 100         else
 101                 calculated &= 0xFFFF;
 102 
 103         return provided == calculated;
 104 }
 105 
 106 static void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw,
 107                                 struct ext4_inode_info *ei)
 108 {
 109         __u32 csum;
 110 
 111         if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
 112             cpu_to_le32(EXT4_OS_LINUX) ||
 113             !ext4_has_metadata_csum(inode->i_sb))
 114                 return;
 115 
 116         csum = ext4_inode_csum(inode, raw, ei);
 117         raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF);
 118         if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
 119             EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
 120                 raw->i_checksum_hi = cpu_to_le16(csum >> 16);
 121 }
 122 
 123 static inline int ext4_begin_ordered_truncate(struct inode *inode,
 124                                               loff_t new_size)
 125 {
 126         trace_ext4_begin_ordered_truncate(inode, new_size);
 127         /*
 128          * If jinode is zero, then we never opened the file for
 129          * writing, so there's no need to call
 130          * jbd2_journal_begin_ordered_truncate() since there's no
 131          * outstanding writes we need to flush.
 132          */
 133         if (!EXT4_I(inode)->jinode)
 134                 return 0;
 135         return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode),
 136                                                    EXT4_I(inode)->jinode,
 137                                                    new_size);
 138 }
 139 
 140 static void ext4_invalidatepage(struct page *page, unsigned int offset,
 141                                 unsigned int length);
 142 static int __ext4_journalled_writepage(struct page *page, unsigned int len);
 143 static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh);
 144 static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
 145                                   int pextents);
 146 
 147 /*
 148  * Test whether an inode is a fast symlink.
 149  * A fast symlink has its symlink data stored in ext4_inode_info->i_data.
 150  */
 151 int ext4_inode_is_fast_symlink(struct inode *inode)
 152 {
 153         if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) {
 154                 int ea_blocks = EXT4_I(inode)->i_file_acl ?
 155                                 EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0;
 156 
 157                 if (ext4_has_inline_data(inode))
 158                         return 0;
 159 
 160                 return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
 161         }
 162         return S_ISLNK(inode->i_mode) && inode->i_size &&
 163                (inode->i_size < EXT4_N_BLOCKS * 4);
 164 }
 165 
 166 /*
 167  * Restart the transaction associated with *handle.  This does a commit,
 168  * so before we call here everything must be consistently dirtied against
 169  * this transaction.
 170  */
 171 int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode,
 172                                  int nblocks)
 173 {
 174         int ret;
 175 
 176         /*
 177          * Drop i_data_sem to avoid deadlock with ext4_map_blocks.  At this
 178          * moment, get_block can be called only for blocks inside i_size since
 179          * page cache has been already dropped and writes are blocked by
 180          * i_mutex. So we can safely drop the i_data_sem here.
 181          */
 182         BUG_ON(EXT4_JOURNAL(inode) == NULL);
 183         jbd_debug(2, "restarting handle %p\n", handle);
 184         up_write(&EXT4_I(inode)->i_data_sem);
 185         ret = ext4_journal_restart(handle, nblocks);
 186         down_write(&EXT4_I(inode)->i_data_sem);
 187         ext4_discard_preallocations(inode);
 188 
 189         return ret;
 190 }
 191 
 192 /*
 193  * Called at the last iput() if i_nlink is zero.
 194  */
 195 void ext4_evict_inode(struct inode *inode)
 196 {
 197         handle_t *handle;
 198         int err;
 199         /*
 200          * Credits for final inode cleanup and freeing:
 201          * sb + inode (ext4_orphan_del()), block bitmap, group descriptor
 202          * (xattr block freeing), bitmap, group descriptor (inode freeing)
 203          */
 204         int extra_credits = 6;
 205         struct ext4_xattr_inode_array *ea_inode_array = NULL;
 206 
 207         trace_ext4_evict_inode(inode);
 208 
 209         if (inode->i_nlink) {
 210                 /*
 211                  * When journalling data dirty buffers are tracked only in the
 212                  * journal. So although mm thinks everything is clean and
 213                  * ready for reaping the inode might still have some pages to
 214                  * write in the running transaction or waiting to be
 215                  * checkpointed. Thus calling jbd2_journal_invalidatepage()
 216                  * (via truncate_inode_pages()) to discard these buffers can
 217                  * cause data loss. Also even if we did not discard these
 218                  * buffers, we would have no way to find them after the inode
 219                  * is reaped and thus user could see stale data if he tries to
 220                  * read them before the transaction is checkpointed. So be
 221                  * careful and force everything to disk here... We use
 222                  * ei->i_datasync_tid to store the newest transaction
 223                  * containing inode's data.
 224                  *
 225                  * Note that directories do not have this problem because they
 226                  * don't use page cache.
 227                  */
 228                 if (inode->i_ino != EXT4_JOURNAL_INO &&
 229                     ext4_should_journal_data(inode) &&
 230                     (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) &&
 231                     inode->i_data.nrpages) {
 232                         journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
 233                         tid_t commit_tid = EXT4_I(inode)->i_datasync_tid;
 234 
 235                         jbd2_complete_transaction(journal, commit_tid);
 236                         filemap_write_and_wait(&inode->i_data);
 237                 }
 238                 truncate_inode_pages_final(&inode->i_data);
 239 
 240                 goto no_delete;
 241         }
 242 
 243         if (is_bad_inode(inode))
 244                 goto no_delete;
 245         dquot_initialize(inode);
 246 
 247         if (ext4_should_order_data(inode))
 248                 ext4_begin_ordered_truncate(inode, 0);
 249         truncate_inode_pages_final(&inode->i_data);
 250 
 251         /*
 252          * Protect us against freezing - iput() caller didn't have to have any
 253          * protection against it
 254          */
 255         sb_start_intwrite(inode->i_sb);
 256 
 257         if (!IS_NOQUOTA(inode))
 258                 extra_credits += EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb);
 259 
 260         /*
 261          * Block bitmap, group descriptor, and inode are accounted in both
 262          * ext4_blocks_for_truncate() and extra_credits. So subtract 3.
 263          */
 264         handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE,
 265                          ext4_blocks_for_truncate(inode) + extra_credits - 3);
 266         if (IS_ERR(handle)) {
 267                 ext4_std_error(inode->i_sb, PTR_ERR(handle));
 268                 /*
 269                  * If we're going to skip the normal cleanup, we still need to
 270                  * make sure that the in-core orphan linked list is properly
 271                  * cleaned up.
 272                  */
 273                 ext4_orphan_del(NULL, inode);
 274                 sb_end_intwrite(inode->i_sb);
 275                 goto no_delete;
 276         }
 277 
 278         if (IS_SYNC(inode))
 279                 ext4_handle_sync(handle);
 280 
 281         /*
 282          * Set inode->i_size to 0 before calling ext4_truncate(). We need
 283          * special handling of symlinks here because i_size is used to
 284          * determine whether ext4_inode_info->i_data contains symlink data or
 285          * block mappings. Setting i_size to 0 will remove its fast symlink
 286          * status. Erase i_data so that it becomes a valid empty block map.
 287          */
 288         if (ext4_inode_is_fast_symlink(inode))
 289                 memset(EXT4_I(inode)->i_data, 0, sizeof(EXT4_I(inode)->i_data));
 290         inode->i_size = 0;
 291         err = ext4_mark_inode_dirty(handle, inode);
 292         if (err) {
 293                 ext4_warning(inode->i_sb,
 294                              "couldn't mark inode dirty (err %d)", err);
 295                 goto stop_handle;
 296         }
 297         if (inode->i_blocks) {
 298                 err = ext4_truncate(inode);
 299                 if (err) {
 300                         ext4_error(inode->i_sb,
 301                                    "couldn't truncate inode %lu (err %d)",
 302                                    inode->i_ino, err);
 303                         goto stop_handle;
 304                 }
 305         }
 306 
 307         /* Remove xattr references. */
 308         err = ext4_xattr_delete_inode(handle, inode, &ea_inode_array,
 309                                       extra_credits);
 310         if (err) {
 311                 ext4_warning(inode->i_sb, "xattr delete (err %d)", err);
 312 stop_handle:
 313                 ext4_journal_stop(handle);
 314                 ext4_orphan_del(NULL, inode);
 315                 sb_end_intwrite(inode->i_sb);
 316                 ext4_xattr_inode_array_free(ea_inode_array);
 317                 goto no_delete;
 318         }
 319 
 320         /*
 321          * Kill off the orphan record which ext4_truncate created.
 322          * AKPM: I think this can be inside the above `if'.
 323          * Note that ext4_orphan_del() has to be able to cope with the
 324          * deletion of a non-existent orphan - this is because we don't
 325          * know if ext4_truncate() actually created an orphan record.
 326          * (Well, we could do this if we need to, but heck - it works)
 327          */
 328         ext4_orphan_del(handle, inode);
 329         EXT4_I(inode)->i_dtime  = (__u32)ktime_get_real_seconds();
 330 
 331         /*
 332          * One subtle ordering requirement: if anything has gone wrong
 333          * (transaction abort, IO errors, whatever), then we can still
 334          * do these next steps (the fs will already have been marked as
 335          * having errors), but we can't free the inode if the mark_dirty
 336          * fails.
 337          */
 338         if (ext4_mark_inode_dirty(handle, inode))
 339                 /* If that failed, just do the required in-core inode clear. */
 340                 ext4_clear_inode(inode);
 341         else
 342                 ext4_free_inode(handle, inode);
 343         ext4_journal_stop(handle);
 344         sb_end_intwrite(inode->i_sb);
 345         ext4_xattr_inode_array_free(ea_inode_array);
 346         return;
 347 no_delete:
 348         ext4_clear_inode(inode);        /* We must guarantee clearing of inode... */
 349 }
 350 
 351 #ifdef CONFIG_QUOTA
 352 qsize_t *ext4_get_reserved_space(struct inode *inode)
 353 {
 354         return &EXT4_I(inode)->i_reserved_quota;
 355 }
 356 #endif
 357 
 358 /*
 359  * Called with i_data_sem down, which is important since we can call
 360  * ext4_discard_preallocations() from here.
 361  */
 362 void ext4_da_update_reserve_space(struct inode *inode,
 363                                         int used, int quota_claim)
 364 {
 365         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
 366         struct ext4_inode_info *ei = EXT4_I(inode);
 367 
 368         spin_lock(&ei->i_block_reservation_lock);
 369         trace_ext4_da_update_reserve_space(inode, used, quota_claim);
 370         if (unlikely(used > ei->i_reserved_data_blocks)) {
 371                 ext4_warning(inode->i_sb, "%s: ino %lu, used %d "
 372                          "with only %d reserved data blocks",
 373                          __func__, inode->i_ino, used,
 374                          ei->i_reserved_data_blocks);
 375                 WARN_ON(1);
 376                 used = ei->i_reserved_data_blocks;
 377         }
 378 
 379         /* Update per-inode reservations */
 380         ei->i_reserved_data_blocks -= used;
 381         percpu_counter_sub(&sbi->s_dirtyclusters_counter, used);
 382 
 383         spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
 384 
 385         /* Update quota subsystem for data blocks */
 386         if (quota_claim)
 387                 dquot_claim_block(inode, EXT4_C2B(sbi, used));
 388         else {
 389                 /*
 390                  * We did fallocate with an offset that is already delayed
 391                  * allocated. So on delayed allocated writeback we should
 392                  * not re-claim the quota for fallocated blocks.
 393                  */
 394                 dquot_release_reservation_block(inode, EXT4_C2B(sbi, used));
 395         }
 396 
 397         /*
 398          * If we have done all the pending block allocations and if
 399          * there aren't any writers on the inode, we can discard the
 400          * inode's preallocations.
 401          */
 402         if ((ei->i_reserved_data_blocks == 0) &&
 403             !inode_is_open_for_write(inode))
 404                 ext4_discard_preallocations(inode);
 405 }
 406 
 407 static int __check_block_validity(struct inode *inode, const char *func,
 408                                 unsigned int line,
 409                                 struct ext4_map_blocks *map)
 410 {
 411         if (ext4_has_feature_journal(inode->i_sb) &&
 412             (inode->i_ino ==
 413              le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum)))
 414                 return 0;
 415         if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk,
 416                                    map->m_len)) {
 417                 ext4_error_inode(inode, func, line, map->m_pblk,
 418                                  "lblock %lu mapped to illegal pblock %llu "
 419                                  "(length %d)", (unsigned long) map->m_lblk,
 420                                  map->m_pblk, map->m_len);
 421                 return -EFSCORRUPTED;
 422         }
 423         return 0;
 424 }
 425 
 426 int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk,
 427                        ext4_lblk_t len)
 428 {
 429         int ret;
 430 
 431         if (IS_ENCRYPTED(inode))
 432                 return fscrypt_zeroout_range(inode, lblk, pblk, len);
 433 
 434         ret = sb_issue_zeroout(inode->i_sb, pblk, len, GFP_NOFS);
 435         if (ret > 0)
 436                 ret = 0;
 437 
 438         return ret;
 439 }
 440 
 441 #define check_block_validity(inode, map)        \
 442         __check_block_validity((inode), __func__, __LINE__, (map))
 443 
 444 #ifdef ES_AGGRESSIVE_TEST
 445 static void ext4_map_blocks_es_recheck(handle_t *handle,
 446                                        struct inode *inode,
 447                                        struct ext4_map_blocks *es_map,
 448                                        struct ext4_map_blocks *map,
 449                                        int flags)
 450 {
 451         int retval;
 452 
 453         map->m_flags = 0;
 454         /*
 455          * There is a race window that the result is not the same.
 456          * e.g. xfstests #223 when dioread_nolock enables.  The reason
 457          * is that we lookup a block mapping in extent status tree with
 458          * out taking i_data_sem.  So at the time the unwritten extent
 459          * could be converted.
 460          */
 461         down_read(&EXT4_I(inode)->i_data_sem);
 462         if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
 463                 retval = ext4_ext_map_blocks(handle, inode, map, flags &
 464                                              EXT4_GET_BLOCKS_KEEP_SIZE);
 465         } else {
 466                 retval = ext4_ind_map_blocks(handle, inode, map, flags &
 467                                              EXT4_GET_BLOCKS_KEEP_SIZE);
 468         }
 469         up_read((&EXT4_I(inode)->i_data_sem));
 470 
 471         /*
 472          * We don't check m_len because extent will be collpased in status
 473          * tree.  So the m_len might not equal.
 474          */
 475         if (es_map->m_lblk != map->m_lblk ||
 476             es_map->m_flags != map->m_flags ||
 477             es_map->m_pblk != map->m_pblk) {
 478                 printk("ES cache assertion failed for inode: %lu "
 479                        "es_cached ex [%d/%d/%llu/%x] != "
 480                        "found ex [%d/%d/%llu/%x] retval %d flags %x\n",
 481                        inode->i_ino, es_map->m_lblk, es_map->m_len,
 482                        es_map->m_pblk, es_map->m_flags, map->m_lblk,
 483                        map->m_len, map->m_pblk, map->m_flags,
 484                        retval, flags);
 485         }
 486 }
 487 #endif /* ES_AGGRESSIVE_TEST */
 488 
 489 /*
 490  * The ext4_map_blocks() function tries to look up the requested blocks,
 491  * and returns if the blocks are already mapped.
 492  *
 493  * Otherwise it takes the write lock of the i_data_sem and allocate blocks
 494  * and store the allocated blocks in the result buffer head and mark it
 495  * mapped.
 496  *
 497  * If file type is extents based, it will call ext4_ext_map_blocks(),
 498  * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
 499  * based files
 500  *
 501  * On success, it returns the number of blocks being mapped or allocated.  if
 502  * create==0 and the blocks are pre-allocated and unwritten, the resulting @map
 503  * is marked as unwritten. If the create == 1, it will mark @map as mapped.
 504  *
 505  * It returns 0 if plain look up failed (blocks have not been allocated), in
 506  * that case, @map is returned as unmapped but we still do fill map->m_len to
 507  * indicate the length of a hole starting at map->m_lblk.
 508  *
 509  * It returns the error in case of allocation failure.
 510  */
 511 int ext4_map_blocks(handle_t *handle, struct inode *inode,
 512                     struct ext4_map_blocks *map, int flags)
 513 {
 514         struct extent_status es;
 515         int retval;
 516         int ret = 0;
 517 #ifdef ES_AGGRESSIVE_TEST
 518         struct ext4_map_blocks orig_map;
 519 
 520         memcpy(&orig_map, map, sizeof(*map));
 521 #endif
 522 
 523         map->m_flags = 0;
 524         ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u,"
 525                   "logical block %lu\n", inode->i_ino, flags, map->m_len,
 526                   (unsigned long) map->m_lblk);
 527 
 528         /*
 529          * ext4_map_blocks returns an int, and m_len is an unsigned int
 530          */
 531         if (unlikely(map->m_len > INT_MAX))
 532                 map->m_len = INT_MAX;
 533 
 534         /* We can handle the block number less than EXT_MAX_BLOCKS */
 535         if (unlikely(map->m_lblk >= EXT_MAX_BLOCKS))
 536                 return -EFSCORRUPTED;
 537 
 538         /* Lookup extent status tree firstly */
 539         if (ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) {
 540                 if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) {
 541                         map->m_pblk = ext4_es_pblock(&es) +
 542                                         map->m_lblk - es.es_lblk;
 543                         map->m_flags |= ext4_es_is_written(&es) ?
 544                                         EXT4_MAP_MAPPED : EXT4_MAP_UNWRITTEN;
 545                         retval = es.es_len - (map->m_lblk - es.es_lblk);
 546                         if (retval > map->m_len)
 547                                 retval = map->m_len;
 548                         map->m_len = retval;
 549                 } else if (ext4_es_is_delayed(&es) || ext4_es_is_hole(&es)) {
 550                         map->m_pblk = 0;
 551                         retval = es.es_len - (map->m_lblk - es.es_lblk);
 552                         if (retval > map->m_len)
 553                                 retval = map->m_len;
 554                         map->m_len = retval;
 555                         retval = 0;
 556                 } else {
 557                         BUG();
 558                 }
 559 #ifdef ES_AGGRESSIVE_TEST
 560                 ext4_map_blocks_es_recheck(handle, inode, map,
 561                                            &orig_map, flags);
 562 #endif
 563                 goto found;
 564         }
 565 
 566         /*
 567          * Try to see if we can get the block without requesting a new
 568          * file system block.
 569          */
 570         down_read(&EXT4_I(inode)->i_data_sem);
 571         if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
 572                 retval = ext4_ext_map_blocks(handle, inode, map, flags &
 573                                              EXT4_GET_BLOCKS_KEEP_SIZE);
 574         } else {
 575                 retval = ext4_ind_map_blocks(handle, inode, map, flags &
 576                                              EXT4_GET_BLOCKS_KEEP_SIZE);
 577         }
 578         if (retval > 0) {
 579                 unsigned int status;
 580 
 581                 if (unlikely(retval != map->m_len)) {
 582                         ext4_warning(inode->i_sb,
 583                                      "ES len assertion failed for inode "
 584                                      "%lu: retval %d != map->m_len %d",
 585                                      inode->i_ino, retval, map->m_len);
 586                         WARN_ON(1);
 587                 }
 588 
 589                 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
 590                                 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
 591                 if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
 592                     !(status & EXTENT_STATUS_WRITTEN) &&
 593                     ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk,
 594                                        map->m_lblk + map->m_len - 1))
 595                         status |= EXTENT_STATUS_DELAYED;
 596                 ret = ext4_es_insert_extent(inode, map->m_lblk,
 597                                             map->m_len, map->m_pblk, status);
 598                 if (ret < 0)
 599                         retval = ret;
 600         }
 601         up_read((&EXT4_I(inode)->i_data_sem));
 602 
 603 found:
 604         if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
 605                 ret = check_block_validity(inode, map);
 606                 if (ret != 0)
 607                         return ret;
 608         }
 609 
 610         /* If it is only a block(s) look up */
 611         if ((flags & EXT4_GET_BLOCKS_CREATE) == 0)
 612                 return retval;
 613 
 614         /*
 615          * Returns if the blocks have already allocated
 616          *
 617          * Note that if blocks have been preallocated
 618          * ext4_ext_get_block() returns the create = 0
 619          * with buffer head unmapped.
 620          */
 621         if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
 622                 /*
 623                  * If we need to convert extent to unwritten
 624                  * we continue and do the actual work in
 625                  * ext4_ext_map_blocks()
 626                  */
 627                 if (!(flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN))
 628                         return retval;
 629 
 630         /*
 631          * Here we clear m_flags because after allocating an new extent,
 632          * it will be set again.
 633          */
 634         map->m_flags &= ~EXT4_MAP_FLAGS;
 635 
 636         /*
 637          * New blocks allocate and/or writing to unwritten extent
 638          * will possibly result in updating i_data, so we take
 639          * the write lock of i_data_sem, and call get_block()
 640          * with create == 1 flag.
 641          */
 642         down_write(&EXT4_I(inode)->i_data_sem);
 643 
 644         /*
 645          * We need to check for EXT4 here because migrate
 646          * could have changed the inode type in between
 647          */
 648         if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
 649                 retval = ext4_ext_map_blocks(handle, inode, map, flags);
 650         } else {
 651                 retval = ext4_ind_map_blocks(handle, inode, map, flags);
 652 
 653                 if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
 654                         /*
 655                          * We allocated new blocks which will result in
 656                          * i_data's format changing.  Force the migrate
 657                          * to fail by clearing migrate flags
 658                          */
 659                         ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
 660                 }
 661 
 662                 /*
 663                  * Update reserved blocks/metadata blocks after successful
 664                  * block allocation which had been deferred till now. We don't
 665                  * support fallocate for non extent files. So we can update
 666                  * reserve space here.
 667                  */
 668                 if ((retval > 0) &&
 669                         (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE))
 670                         ext4_da_update_reserve_space(inode, retval, 1);
 671         }
 672 
 673         if (retval > 0) {
 674                 unsigned int status;
 675 
 676                 if (unlikely(retval != map->m_len)) {
 677                         ext4_warning(inode->i_sb,
 678                                      "ES len assertion failed for inode "
 679                                      "%lu: retval %d != map->m_len %d",
 680                                      inode->i_ino, retval, map->m_len);
 681                         WARN_ON(1);
 682                 }
 683 
 684                 /*
 685                  * We have to zeroout blocks before inserting them into extent
 686                  * status tree. Otherwise someone could look them up there and
 687                  * use them before they are really zeroed. We also have to
 688                  * unmap metadata before zeroing as otherwise writeback can
 689                  * overwrite zeros with stale data from block device.
 690                  */
 691                 if (flags & EXT4_GET_BLOCKS_ZERO &&
 692                     map->m_flags & EXT4_MAP_MAPPED &&
 693                     map->m_flags & EXT4_MAP_NEW) {
 694                         ret = ext4_issue_zeroout(inode, map->m_lblk,
 695                                                  map->m_pblk, map->m_len);
 696                         if (ret) {
 697                                 retval = ret;
 698                                 goto out_sem;
 699                         }
 700                 }
 701 
 702                 /*
 703                  * If the extent has been zeroed out, we don't need to update
 704                  * extent status tree.
 705                  */
 706                 if ((flags & EXT4_GET_BLOCKS_PRE_IO) &&
 707                     ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) {
 708                         if (ext4_es_is_written(&es))
 709                                 goto out_sem;
 710                 }
 711                 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
 712                                 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
 713                 if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
 714                     !(status & EXTENT_STATUS_WRITTEN) &&
 715                     ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk,
 716                                        map->m_lblk + map->m_len - 1))
 717                         status |= EXTENT_STATUS_DELAYED;
 718                 ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
 719                                             map->m_pblk, status);
 720                 if (ret < 0) {
 721                         retval = ret;
 722                         goto out_sem;
 723                 }
 724         }
 725 
 726 out_sem:
 727         up_write((&EXT4_I(inode)->i_data_sem));
 728         if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
 729                 ret = check_block_validity(inode, map);
 730                 if (ret != 0)
 731                         return ret;
 732 
 733                 /*
 734                  * Inodes with freshly allocated blocks where contents will be
 735                  * visible after transaction commit must be on transaction's
 736                  * ordered data list.
 737                  */
 738                 if (map->m_flags & EXT4_MAP_NEW &&
 739                     !(map->m_flags & EXT4_MAP_UNWRITTEN) &&
 740                     !(flags & EXT4_GET_BLOCKS_ZERO) &&
 741                     !ext4_is_quota_file(inode) &&
 742                     ext4_should_order_data(inode)) {
 743                         loff_t start_byte =
 744                                 (loff_t)map->m_lblk << inode->i_blkbits;
 745                         loff_t length = (loff_t)map->m_len << inode->i_blkbits;
 746 
 747                         if (flags & EXT4_GET_BLOCKS_IO_SUBMIT)
 748                                 ret = ext4_jbd2_inode_add_wait(handle, inode,
 749                                                 start_byte, length);
 750                         else
 751                                 ret = ext4_jbd2_inode_add_write(handle, inode,
 752                                                 start_byte, length);
 753                         if (ret)
 754                                 return ret;
 755                 }
 756         }
 757         return retval;
 758 }
 759 
 760 /*
 761  * Update EXT4_MAP_FLAGS in bh->b_state. For buffer heads attached to pages
 762  * we have to be careful as someone else may be manipulating b_state as well.
 763  */
 764 static void ext4_update_bh_state(struct buffer_head *bh, unsigned long flags)
 765 {
 766         unsigned long old_state;
 767         unsigned long new_state;
 768 
 769         flags &= EXT4_MAP_FLAGS;
 770 
 771         /* Dummy buffer_head? Set non-atomically. */
 772         if (!bh->b_page) {
 773                 bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | flags;
 774                 return;
 775         }
 776         /*
 777          * Someone else may be modifying b_state. Be careful! This is ugly but
 778          * once we get rid of using bh as a container for mapping information
 779          * to pass to / from get_block functions, this can go away.
 780          */
 781         do {
 782                 old_state = READ_ONCE(bh->b_state);
 783                 new_state = (old_state & ~EXT4_MAP_FLAGS) | flags;
 784         } while (unlikely(
 785                  cmpxchg(&bh->b_state, old_state, new_state) != old_state));
 786 }
 787 
 788 static int _ext4_get_block(struct inode *inode, sector_t iblock,
 789                            struct buffer_head *bh, int flags)
 790 {
 791         struct ext4_map_blocks map;
 792         int ret = 0;
 793 
 794         if (ext4_has_inline_data(inode))
 795                 return -ERANGE;
 796 
 797         map.m_lblk = iblock;
 798         map.m_len = bh->b_size >> inode->i_blkbits;
 799 
 800         ret = ext4_map_blocks(ext4_journal_current_handle(), inode, &map,
 801                               flags);
 802         if (ret > 0) {
 803                 map_bh(bh, inode->i_sb, map.m_pblk);
 804                 ext4_update_bh_state(bh, map.m_flags);
 805                 bh->b_size = inode->i_sb->s_blocksize * map.m_len;
 806                 ret = 0;
 807         } else if (ret == 0) {
 808                 /* hole case, need to fill in bh->b_size */
 809                 bh->b_size = inode->i_sb->s_blocksize * map.m_len;
 810         }
 811         return ret;
 812 }
 813 
 814 int ext4_get_block(struct inode *inode, sector_t iblock,
 815                    struct buffer_head *bh, int create)
 816 {
 817         return _ext4_get_block(inode, iblock, bh,
 818                                create ? EXT4_GET_BLOCKS_CREATE : 0);
 819 }
 820 
 821 /*
 822  * Get block function used when preparing for buffered write if we require
 823  * creating an unwritten extent if blocks haven't been allocated.  The extent
 824  * will be converted to written after the IO is complete.
 825  */
 826 int ext4_get_block_unwritten(struct inode *inode, sector_t iblock,
 827                              struct buffer_head *bh_result, int create)
 828 {
 829         ext4_debug("ext4_get_block_unwritten: inode %lu, create flag %d\n",
 830                    inode->i_ino, create);
 831         return _ext4_get_block(inode, iblock, bh_result,
 832                                EXT4_GET_BLOCKS_IO_CREATE_EXT);
 833 }
 834 
 835 /* Maximum number of blocks we map for direct IO at once. */
 836 #define DIO_MAX_BLOCKS 4096
 837 
 838 /*
 839  * Get blocks function for the cases that need to start a transaction -
 840  * generally difference cases of direct IO and DAX IO. It also handles retries
 841  * in case of ENOSPC.
 842  */
 843 static int ext4_get_block_trans(struct inode *inode, sector_t iblock,
 844                                 struct buffer_head *bh_result, int flags)
 845 {
 846         int dio_credits;
 847         handle_t *handle;
 848         int retries = 0;
 849         int ret;
 850 
 851         /* Trim mapping request to maximum we can map at once for DIO */
 852         if (bh_result->b_size >> inode->i_blkbits > DIO_MAX_BLOCKS)
 853                 bh_result->b_size = DIO_MAX_BLOCKS << inode->i_blkbits;
 854         dio_credits = ext4_chunk_trans_blocks(inode,
 855                                       bh_result->b_size >> inode->i_blkbits);
 856 retry:
 857         handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, dio_credits);
 858         if (IS_ERR(handle))
 859                 return PTR_ERR(handle);
 860 
 861         ret = _ext4_get_block(inode, iblock, bh_result, flags);
 862         ext4_journal_stop(handle);
 863 
 864         if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
 865                 goto retry;
 866         return ret;
 867 }
 868 
 869 /* Get block function for DIO reads and writes to inodes without extents */
 870 int ext4_dio_get_block(struct inode *inode, sector_t iblock,
 871                        struct buffer_head *bh, int create)
 872 {
 873         /* We don't expect handle for direct IO */
 874         WARN_ON_ONCE(ext4_journal_current_handle());
 875 
 876         if (!create)
 877                 return _ext4_get_block(inode, iblock, bh, 0);
 878         return ext4_get_block_trans(inode, iblock, bh, EXT4_GET_BLOCKS_CREATE);
 879 }
 880 
 881 /*
 882  * Get block function for AIO DIO writes when we create unwritten extent if
 883  * blocks are not allocated yet. The extent will be converted to written
 884  * after IO is complete.
 885  */
 886 static int ext4_dio_get_block_unwritten_async(struct inode *inode,
 887                 sector_t iblock, struct buffer_head *bh_result, int create)
 888 {
 889         int ret;
 890 
 891         /* We don't expect handle for direct IO */
 892         WARN_ON_ONCE(ext4_journal_current_handle());
 893 
 894         ret = ext4_get_block_trans(inode, iblock, bh_result,
 895                                    EXT4_GET_BLOCKS_IO_CREATE_EXT);
 896 
 897         /*
 898          * When doing DIO using unwritten extents, we need io_end to convert
 899          * unwritten extents to written on IO completion. We allocate io_end
 900          * once we spot unwritten extent and store it in b_private. Generic
 901          * DIO code keeps b_private set and furthermore passes the value to
 902          * our completion callback in 'private' argument.
 903          */
 904         if (!ret && buffer_unwritten(bh_result)) {
 905                 if (!bh_result->b_private) {
 906                         ext4_io_end_t *io_end;
 907 
 908                         io_end = ext4_init_io_end(inode, GFP_KERNEL);
 909                         if (!io_end)
 910                                 return -ENOMEM;
 911                         bh_result->b_private = io_end;
 912                         ext4_set_io_unwritten_flag(inode, io_end);
 913                 }
 914                 set_buffer_defer_completion(bh_result);
 915         }
 916 
 917         return ret;
 918 }
 919 
 920 /*
 921  * Get block function for non-AIO DIO writes when we create unwritten extent if
 922  * blocks are not allocated yet. The extent will be converted to written
 923  * after IO is complete by ext4_direct_IO_write().
 924  */
 925 static int ext4_dio_get_block_unwritten_sync(struct inode *inode,
 926                 sector_t iblock, struct buffer_head *bh_result, int create)
 927 {
 928         int ret;
 929 
 930         /* We don't expect handle for direct IO */
 931         WARN_ON_ONCE(ext4_journal_current_handle());
 932 
 933         ret = ext4_get_block_trans(inode, iblock, bh_result,
 934                                    EXT4_GET_BLOCKS_IO_CREATE_EXT);
 935 
 936         /*
 937          * Mark inode as having pending DIO writes to unwritten extents.
 938          * ext4_direct_IO_write() checks this flag and converts extents to
 939          * written.
 940          */
 941         if (!ret && buffer_unwritten(bh_result))
 942                 ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
 943 
 944         return ret;
 945 }
 946 
 947 static int ext4_dio_get_block_overwrite(struct inode *inode, sector_t iblock,
 948                    struct buffer_head *bh_result, int create)
 949 {
 950         int ret;
 951 
 952         ext4_debug("ext4_dio_get_block_overwrite: inode %lu, create flag %d\n",
 953                    inode->i_ino, create);
 954         /* We don't expect handle for direct IO */
 955         WARN_ON_ONCE(ext4_journal_current_handle());
 956 
 957         ret = _ext4_get_block(inode, iblock, bh_result, 0);
 958         /*
 959          * Blocks should have been preallocated! ext4_file_write_iter() checks
 960          * that.
 961          */
 962         WARN_ON_ONCE(!buffer_mapped(bh_result) || buffer_unwritten(bh_result));
 963 
 964         return ret;
 965 }
 966 
 967 
 968 /*
 969  * `handle' can be NULL if create is zero
 970  */
 971 struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
 972                                 ext4_lblk_t block, int map_flags)
 973 {
 974         struct ext4_map_blocks map;
 975         struct buffer_head *bh;
 976         int create = map_flags & EXT4_GET_BLOCKS_CREATE;
 977         int err;
 978 
 979         J_ASSERT(handle != NULL || create == 0);
 980 
 981         map.m_lblk = block;
 982         map.m_len = 1;
 983         err = ext4_map_blocks(handle, inode, &map, map_flags);
 984 
 985         if (err == 0)
 986                 return create ? ERR_PTR(-ENOSPC) : NULL;
 987         if (err < 0)
 988                 return ERR_PTR(err);
 989 
 990         bh = sb_getblk(inode->i_sb, map.m_pblk);
 991         if (unlikely(!bh))
 992                 return ERR_PTR(-ENOMEM);
 993         if (map.m_flags & EXT4_MAP_NEW) {
 994                 J_ASSERT(create != 0);
 995                 J_ASSERT(handle != NULL);
 996 
 997                 /*
 998                  * Now that we do not always journal data, we should
 999                  * keep in mind whether this should always journal the
1000                  * new buffer as metadata.  For now, regular file
1001                  * writes use ext4_get_block instead, so it's not a
1002                  * problem.
1003                  */
1004                 lock_buffer(bh);
1005                 BUFFER_TRACE(bh, "call get_create_access");
1006                 err = ext4_journal_get_create_access(handle, bh);
1007                 if (unlikely(err)) {
1008                         unlock_buffer(bh);
1009                         goto errout;
1010                 }
1011                 if (!buffer_uptodate(bh)) {
1012                         memset(bh->b_data, 0, inode->i_sb->s_blocksize);
1013                         set_buffer_uptodate(bh);
1014                 }
1015                 unlock_buffer(bh);
1016                 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
1017                 err = ext4_handle_dirty_metadata(handle, inode, bh);
1018                 if (unlikely(err))
1019                         goto errout;
1020         } else
1021                 BUFFER_TRACE(bh, "not a new buffer");
1022         return bh;
1023 errout:
1024         brelse(bh);
1025         return ERR_PTR(err);
1026 }
1027 
1028 struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
1029                                ext4_lblk_t block, int map_flags)
1030 {
1031         struct buffer_head *bh;
1032 
1033         bh = ext4_getblk(handle, inode, block, map_flags);
1034         if (IS_ERR(bh))
1035                 return bh;
1036         if (!bh || ext4_buffer_uptodate(bh))
1037                 return bh;
1038         ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &bh);
1039         wait_on_buffer(bh);
1040         if (buffer_uptodate(bh))
1041                 return bh;
1042         put_bh(bh);
1043         return ERR_PTR(-EIO);
1044 }
1045 
1046 /* Read a contiguous batch of blocks. */
1047 int ext4_bread_batch(struct inode *inode, ext4_lblk_t block, int bh_count,
1048                      bool wait, struct buffer_head **bhs)
1049 {
1050         int i, err;
1051 
1052         for (i = 0; i < bh_count; i++) {
1053                 bhs[i] = ext4_getblk(NULL, inode, block + i, 0 /* map_flags */);
1054                 if (IS_ERR(bhs[i])) {
1055                         err = PTR_ERR(bhs[i]);
1056                         bh_count = i;
1057                         goto out_brelse;
1058                 }
1059         }
1060 
1061         for (i = 0; i < bh_count; i++)
1062                 /* Note that NULL bhs[i] is valid because of holes. */
1063                 if (bhs[i] && !ext4_buffer_uptodate(bhs[i]))
1064                         ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1,
1065                                     &bhs[i]);
1066 
1067         if (!wait)
1068                 return 0;
1069 
1070         for (i = 0; i < bh_count; i++)
1071                 if (bhs[i])
1072                         wait_on_buffer(bhs[i]);
1073 
1074         for (i = 0; i < bh_count; i++) {
1075                 if (bhs[i] && !buffer_uptodate(bhs[i])) {
1076                         err = -EIO;
1077                         goto out_brelse;
1078                 }
1079         }
1080         return 0;
1081 
1082 out_brelse:
1083         for (i = 0; i < bh_count; i++) {
1084                 brelse(bhs[i]);
1085                 bhs[i] = NULL;
1086         }
1087         return err;
1088 }
1089 
1090 int ext4_walk_page_buffers(handle_t *handle,
1091                            struct buffer_head *head,
1092                            unsigned from,
1093                            unsigned to,
1094                            int *partial,
1095                            int (*fn)(handle_t *handle,
1096                                      struct buffer_head *bh))
1097 {
1098         struct buffer_head *bh;
1099         unsigned block_start, block_end;
1100         unsigned blocksize = head->b_size;
1101         int err, ret = 0;
1102         struct buffer_head *next;
1103 
1104         for (bh = head, block_start = 0;
1105              ret == 0 && (bh != head || !block_start);
1106              block_start = block_end, bh = next) {
1107                 next = bh->b_this_page;
1108                 block_end = block_start + blocksize;
1109                 if (block_end <= from || block_start >= to) {
1110                         if (partial && !buffer_uptodate(bh))
1111                                 *partial = 1;
1112                         continue;
1113                 }
1114                 err = (*fn)(handle, bh);
1115                 if (!ret)
1116                         ret = err;
1117         }
1118         return ret;
1119 }
1120 
1121 /*
1122  * To preserve ordering, it is essential that the hole instantiation and
1123  * the data write be encapsulated in a single transaction.  We cannot
1124  * close off a transaction and start a new one between the ext4_get_block()
1125  * and the commit_write().  So doing the jbd2_journal_start at the start of
1126  * prepare_write() is the right place.
1127  *
1128  * Also, this function can nest inside ext4_writepage().  In that case, we
1129  * *know* that ext4_writepage() has generated enough buffer credits to do the
1130  * whole page.  So we won't block on the journal in that case, which is good,
1131  * because the caller may be PF_MEMALLOC.
1132  *
1133  * By accident, ext4 can be reentered when a transaction is open via
1134  * quota file writes.  If we were to commit the transaction while thus
1135  * reentered, there can be a deadlock - we would be holding a quota
1136  * lock, and the commit would never complete if another thread had a
1137  * transaction open and was blocking on the quota lock - a ranking
1138  * violation.
1139  *
1140  * So what we do is to rely on the fact that jbd2_journal_stop/journal_start
1141  * will _not_ run commit under these circumstances because handle->h_ref
1142  * is elevated.  We'll still have enough credits for the tiny quotafile
1143  * write.
1144  */
1145 int do_journal_get_write_access(handle_t *handle,
1146                                 struct buffer_head *bh)
1147 {
1148         int dirty = buffer_dirty(bh);
1149         int ret;
1150 
1151         if (!buffer_mapped(bh) || buffer_freed(bh))
1152                 return 0;
1153         /*
1154          * __block_write_begin() could have dirtied some buffers. Clean
1155          * the dirty bit as jbd2_journal_get_write_access() could complain
1156          * otherwise about fs integrity issues. Setting of the dirty bit
1157          * by __block_write_begin() isn't a real problem here as we clear
1158          * the bit before releasing a page lock and thus writeback cannot
1159          * ever write the buffer.
1160          */
1161         if (dirty)
1162                 clear_buffer_dirty(bh);
1163         BUFFER_TRACE(bh, "get write access");
1164         ret = ext4_journal_get_write_access(handle, bh);
1165         if (!ret && dirty)
1166                 ret = ext4_handle_dirty_metadata(handle, NULL, bh);
1167         return ret;
1168 }
1169 
1170 #ifdef CONFIG_FS_ENCRYPTION
1171 static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
1172                                   get_block_t *get_block)
1173 {
1174         unsigned from = pos & (PAGE_SIZE - 1);
1175         unsigned to = from + len;
1176         struct inode *inode = page->mapping->host;
1177         unsigned block_start, block_end;
1178         sector_t block;
1179         int err = 0;
1180         unsigned blocksize = inode->i_sb->s_blocksize;
1181         unsigned bbits;
1182         struct buffer_head *bh, *head, *wait[2];
1183         int nr_wait = 0;
1184         int i;
1185 
1186         BUG_ON(!PageLocked(page));
1187         BUG_ON(from > PAGE_SIZE);
1188         BUG_ON(to > PAGE_SIZE);
1189         BUG_ON(from > to);
1190 
1191         if (!page_has_buffers(page))
1192                 create_empty_buffers(page, blocksize, 0);
1193         head = page_buffers(page);
1194         bbits = ilog2(blocksize);
1195         block = (sector_t)page->index << (PAGE_SHIFT - bbits);
1196 
1197         for (bh = head, block_start = 0; bh != head || !block_start;
1198             block++, block_start = block_end, bh = bh->b_this_page) {
1199                 block_end = block_start + blocksize;
1200                 if (block_end <= from || block_start >= to) {
1201                         if (PageUptodate(page)) {
1202                                 if (!buffer_uptodate(bh))
1203                                         set_buffer_uptodate(bh);
1204                         }
1205                         continue;
1206                 }
1207                 if (buffer_new(bh))
1208                         clear_buffer_new(bh);
1209                 if (!buffer_mapped(bh)) {
1210                         WARN_ON(bh->b_size != blocksize);
1211                         err = get_block(inode, block, bh, 1);
1212                         if (err)
1213                                 break;
1214                         if (buffer_new(bh)) {
1215                                 if (PageUptodate(page)) {
1216                                         clear_buffer_new(bh);
1217                                         set_buffer_uptodate(bh);
1218                                         mark_buffer_dirty(bh);
1219                                         continue;
1220                                 }
1221                                 if (block_end > to || block_start < from)
1222                                         zero_user_segments(page, to, block_end,
1223                                                            block_start, from);
1224                                 continue;
1225                         }
1226                 }
1227                 if (PageUptodate(page)) {
1228                         if (!buffer_uptodate(bh))
1229                                 set_buffer_uptodate(bh);
1230                         continue;
1231                 }
1232                 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1233                     !buffer_unwritten(bh) &&
1234                     (block_start < from || block_end > to)) {
1235                         ll_rw_block(REQ_OP_READ, 0, 1, &bh);
1236                         wait[nr_wait++] = bh;
1237                 }
1238         }
1239         /*
1240          * If we issued read requests, let them complete.
1241          */
1242         for (i = 0; i < nr_wait; i++) {
1243                 wait_on_buffer(wait[i]);
1244                 if (!buffer_uptodate(wait[i]))
1245                         err = -EIO;
1246         }
1247         if (unlikely(err)) {
1248                 page_zero_new_buffers(page, from, to);
1249         } else if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode)) {
1250                 for (i = 0; i < nr_wait; i++) {
1251                         int err2;
1252 
1253                         err2 = fscrypt_decrypt_pagecache_blocks(page, blocksize,
1254                                                                 bh_offset(wait[i]));
1255                         if (err2) {
1256                                 clear_buffer_uptodate(wait[i]);
1257                                 err = err2;
1258                         }
1259                 }
1260         }
1261 
1262         return err;
1263 }
1264 #endif
1265 
1266 static int ext4_write_begin(struct file *file, struct address_space *mapping,
1267                             loff_t pos, unsigned len, unsigned flags,
1268                             struct page **pagep, void **fsdata)
1269 {
1270         struct inode *inode = mapping->host;
1271         int ret, needed_blocks;
1272         handle_t *handle;
1273         int retries = 0;
1274         struct page *page;
1275         pgoff_t index;
1276         unsigned from, to;
1277 
1278         if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
1279                 return -EIO;
1280 
1281         trace_ext4_write_begin(inode, pos, len, flags);
1282         /*
1283          * Reserve one block more for addition to orphan list in case
1284          * we allocate blocks but write fails for some reason
1285          */
1286         needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
1287         index = pos >> PAGE_SHIFT;
1288         from = pos & (PAGE_SIZE - 1);
1289         to = from + len;
1290 
1291         if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
1292                 ret = ext4_try_to_write_inline_data(mapping, inode, pos, len,
1293                                                     flags, pagep);
1294                 if (ret < 0)
1295                         return ret;
1296                 if (ret == 1)
1297                         return 0;
1298         }
1299 
1300         /*
1301          * grab_cache_page_write_begin() can take a long time if the
1302          * system is thrashing due to memory pressure, or if the page
1303          * is being written back.  So grab it first before we start
1304          * the transaction handle.  This also allows us to allocate
1305          * the page (if needed) without using GFP_NOFS.
1306          */
1307 retry_grab:
1308         page = grab_cache_page_write_begin(mapping, index, flags);
1309         if (!page)
1310                 return -ENOMEM;
1311         unlock_page(page);
1312 
1313 retry_journal:
1314         handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks);
1315         if (IS_ERR(handle)) {
1316                 put_page(page);
1317                 return PTR_ERR(handle);
1318         }
1319 
1320         lock_page(page);
1321         if (page->mapping != mapping) {
1322                 /* The page got truncated from under us */
1323                 unlock_page(page);
1324                 put_page(page);
1325                 ext4_journal_stop(handle);
1326                 goto retry_grab;
1327         }
1328         /* In case writeback began while the page was unlocked */
1329         wait_for_stable_page(page);
1330 
1331 #ifdef CONFIG_FS_ENCRYPTION
1332         if (ext4_should_dioread_nolock(inode))
1333                 ret = ext4_block_write_begin(page, pos, len,
1334                                              ext4_get_block_unwritten);
1335         else
1336                 ret = ext4_block_write_begin(page, pos, len,
1337                                              ext4_get_block);
1338 #else
1339         if (ext4_should_dioread_nolock(inode))
1340                 ret = __block_write_begin(page, pos, len,
1341                                           ext4_get_block_unwritten);
1342         else
1343                 ret = __block_write_begin(page, pos, len, ext4_get_block);
1344 #endif
1345         if (!ret && ext4_should_journal_data(inode)) {
1346                 ret = ext4_walk_page_buffers(handle, page_buffers(page),
1347                                              from, to, NULL,
1348                                              do_journal_get_write_access);
1349         }
1350 
1351         if (ret) {
1352                 bool extended = (pos + len > inode->i_size) &&
1353                                 !ext4_verity_in_progress(inode);
1354 
1355                 unlock_page(page);
1356                 /*
1357                  * __block_write_begin may have instantiated a few blocks
1358                  * outside i_size.  Trim these off again. Don't need
1359                  * i_size_read because we hold i_mutex.
1360                  *
1361                  * Add inode to orphan list in case we crash before
1362                  * truncate finishes
1363                  */
1364                 if (extended && ext4_can_truncate(inode))
1365                         ext4_orphan_add(handle, inode);
1366 
1367                 ext4_journal_stop(handle);
1368                 if (extended) {
1369                         ext4_truncate_failed_write(inode);
1370                         /*
1371                          * If truncate failed early the inode might
1372                          * still be on the orphan list; we need to
1373                          * make sure the inode is removed from the
1374                          * orphan list in that case.
1375                          */
1376                         if (inode->i_nlink)
1377                                 ext4_orphan_del(NULL, inode);
1378                 }
1379 
1380                 if (ret == -ENOSPC &&
1381                     ext4_should_retry_alloc(inode->i_sb, &retries))
1382                         goto retry_journal;
1383                 put_page(page);
1384                 return ret;
1385         }
1386         *pagep = page;
1387         return ret;
1388 }
1389 
1390 /* For write_end() in data=journal mode */
1391 static int write_end_fn(handle_t *handle, struct buffer_head *bh)
1392 {
1393         int ret;
1394         if (!buffer_mapped(bh) || buffer_freed(bh))
1395                 return 0;
1396         set_buffer_uptodate(bh);
1397         ret = ext4_handle_dirty_metadata(handle, NULL, bh);
1398         clear_buffer_meta(bh);
1399         clear_buffer_prio(bh);
1400         return ret;
1401 }
1402 
1403 /*
1404  * We need to pick up the new inode size which generic_commit_write gave us
1405  * `file' can be NULL - eg, when called from page_symlink().
1406  *
1407  * ext4 never places buffers on inode->i_mapping->private_list.  metadata
1408  * buffers are managed internally.
1409  */
1410 static int ext4_write_end(struct file *file,
1411                           struct address_space *mapping,
1412                           loff_t pos, unsigned len, unsigned copied,
1413                           struct page *page, void *fsdata)
1414 {
1415         handle_t *handle = ext4_journal_current_handle();
1416         struct inode *inode = mapping->host;
1417         loff_t old_size = inode->i_size;
1418         int ret = 0, ret2;
1419         int i_size_changed = 0;
1420         int inline_data = ext4_has_inline_data(inode);
1421         bool verity = ext4_verity_in_progress(inode);
1422 
1423         trace_ext4_write_end(inode, pos, len, copied);
1424         if (inline_data) {
1425                 ret = ext4_write_inline_data_end(inode, pos, len,
1426                                                  copied, page);
1427                 if (ret < 0) {
1428                         unlock_page(page);
1429                         put_page(page);
1430                         goto errout;
1431                 }
1432                 copied = ret;
1433         } else
1434                 copied = block_write_end(file, mapping, pos,
1435                                          len, copied, page, fsdata);
1436         /*
1437          * it's important to update i_size while still holding page lock:
1438          * page writeout could otherwise come in and zero beyond i_size.
1439          *
1440          * If FS_IOC_ENABLE_VERITY is running on this inode, then Merkle tree
1441          * blocks are being written past EOF, so skip the i_size update.
1442          */
1443         if (!verity)
1444                 i_size_changed = ext4_update_inode_size(inode, pos + copied);
1445         unlock_page(page);
1446         put_page(page);
1447 
1448         if (old_size < pos && !verity)
1449                 pagecache_isize_extended(inode, old_size, pos);
1450         /*
1451          * Don't mark the inode dirty under page lock. First, it unnecessarily
1452          * makes the holding time of page lock longer. Second, it forces lock
1453          * ordering of page lock and transaction start for journaling
1454          * filesystems.
1455          */
1456         if (i_size_changed || inline_data)
1457                 ext4_mark_inode_dirty(handle, inode);
1458 
1459         if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode))
1460                 /* if we have allocated more blocks and copied
1461                  * less. We will have blocks allocated outside
1462                  * inode->i_size. So truncate them
1463                  */
1464                 ext4_orphan_add(handle, inode);
1465 errout:
1466         ret2 = ext4_journal_stop(handle);
1467         if (!ret)
1468                 ret = ret2;
1469 
1470         if (pos + len > inode->i_size && !verity) {
1471                 ext4_truncate_failed_write(inode);
1472                 /*
1473                  * If truncate failed early the inode might still be
1474                  * on the orphan list; we need to make sure the inode
1475                  * is removed from the orphan list in that case.
1476                  */
1477                 if (inode->i_nlink)
1478                         ext4_orphan_del(NULL, inode);
1479         }
1480 
1481         return ret ? ret : copied;
1482 }
1483 
1484 /*
1485  * This is a private version of page_zero_new_buffers() which doesn't
1486  * set the buffer to be dirty, since in data=journalled mode we need
1487  * to call ext4_handle_dirty_metadata() instead.
1488  */
1489 static void ext4_journalled_zero_new_buffers(handle_t *handle,
1490                                             struct page *page,
1491                                             unsigned from, unsigned to)
1492 {
1493         unsigned int block_start = 0, block_end;
1494         struct buffer_head *head, *bh;
1495 
1496         bh = head = page_buffers(page);
1497         do {
1498                 block_end = block_start + bh->b_size;
1499                 if (buffer_new(bh)) {
1500                         if (block_end > from && block_start < to) {
1501                                 if (!PageUptodate(page)) {
1502                                         unsigned start, size;
1503 
1504                                         start = max(from, block_start);
1505                                         size = min(to, block_end) - start;
1506 
1507                                         zero_user(page, start, size);
1508                                         write_end_fn(handle, bh);
1509                                 }
1510                                 clear_buffer_new(bh);
1511                         }
1512                 }
1513                 block_start = block_end;
1514                 bh = bh->b_this_page;
1515         } while (bh != head);
1516 }
1517 
1518 static int ext4_journalled_write_end(struct file *file,
1519                                      struct address_space *mapping,
1520                                      loff_t pos, unsigned len, unsigned copied,
1521                                      struct page *page, void *fsdata)
1522 {
1523         handle_t *handle = ext4_journal_current_handle();
1524         struct inode *inode = mapping->host;
1525         loff_t old_size = inode->i_size;
1526         int ret = 0, ret2;
1527         int partial = 0;
1528         unsigned from, to;
1529         int size_changed = 0;
1530         int inline_data = ext4_has_inline_data(inode);
1531         bool verity = ext4_verity_in_progress(inode);
1532 
1533         trace_ext4_journalled_write_end(inode, pos, len, copied);
1534         from = pos & (PAGE_SIZE - 1);
1535         to = from + len;
1536 
1537         BUG_ON(!ext4_handle_valid(handle));
1538 
1539         if (inline_data) {
1540                 ret = ext4_write_inline_data_end(inode, pos, len,
1541                                                  copied, page);
1542                 if (ret < 0) {
1543                         unlock_page(page);
1544                         put_page(page);
1545                         goto errout;
1546                 }
1547                 copied = ret;
1548         } else if (unlikely(copied < len) && !PageUptodate(page)) {
1549                 copied = 0;
1550                 ext4_journalled_zero_new_buffers(handle, page, from, to);
1551         } else {
1552                 if (unlikely(copied < len))
1553                         ext4_journalled_zero_new_buffers(handle, page,
1554                                                          from + copied, to);
1555                 ret = ext4_walk_page_buffers(handle, page_buffers(page), from,
1556                                              from + copied, &partial,
1557                                              write_end_fn);
1558                 if (!partial)
1559                         SetPageUptodate(page);
1560         }
1561         if (!verity)
1562                 size_changed = ext4_update_inode_size(inode, pos + copied);
1563         ext4_set_inode_state(inode, EXT4_STATE_JDATA);
1564         EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
1565         unlock_page(page);
1566         put_page(page);
1567 
1568         if (old_size < pos && !verity)
1569                 pagecache_isize_extended(inode, old_size, pos);
1570 
1571         if (size_changed || inline_data) {
1572                 ret2 = ext4_mark_inode_dirty(handle, inode);
1573                 if (!ret)
1574                         ret = ret2;
1575         }
1576 
1577         if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode))
1578                 /* if we have allocated more blocks and copied
1579                  * less. We will have blocks allocated outside
1580                  * inode->i_size. So truncate them
1581                  */
1582                 ext4_orphan_add(handle, inode);
1583 
1584 errout:
1585         ret2 = ext4_journal_stop(handle);
1586         if (!ret)
1587                 ret = ret2;
1588         if (pos + len > inode->i_size && !verity) {
1589                 ext4_truncate_failed_write(inode);
1590                 /*
1591                  * If truncate failed early the inode might still be
1592                  * on the orphan list; we need to make sure the inode
1593                  * is removed from the orphan list in that case.
1594                  */
1595                 if (inode->i_nlink)
1596                         ext4_orphan_del(NULL, inode);
1597         }
1598 
1599         return ret ? ret : copied;
1600 }
1601 
1602 /*
1603  * Reserve space for a single cluster
1604  */
1605 static int ext4_da_reserve_space(struct inode *inode)
1606 {
1607         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1608         struct ext4_inode_info *ei = EXT4_I(inode);
1609         int ret;
1610 
1611         /*
1612          * We will charge metadata quota at writeout time; this saves
1613          * us from metadata over-estimation, though we may go over by
1614          * a small amount in the end.  Here we just reserve for data.
1615          */
1616         ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1));
1617         if (ret)
1618                 return ret;
1619 
1620         spin_lock(&ei->i_block_reservation_lock);
1621         if (ext4_claim_free_clusters(sbi, 1, 0)) {
1622                 spin_unlock(&ei->i_block_reservation_lock);
1623                 dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
1624                 return -ENOSPC;
1625         }
1626         ei->i_reserved_data_blocks++;
1627         trace_ext4_da_reserve_space(inode);
1628         spin_unlock(&ei->i_block_reservation_lock);
1629 
1630         return 0;       /* success */
1631 }
1632 
1633 void ext4_da_release_space(struct inode *inode, int to_free)
1634 {
1635         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1636         struct ext4_inode_info *ei = EXT4_I(inode);
1637 
1638         if (!to_free)
1639                 return;         /* Nothing to release, exit */
1640 
1641         spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1642 
1643         trace_ext4_da_release_space(inode, to_free);
1644         if (unlikely(to_free > ei->i_reserved_data_blocks)) {
1645                 /*
1646                  * if there aren't enough reserved blocks, then the
1647                  * counter is messed up somewhere.  Since this
1648                  * function is called from invalidate page, it's
1649                  * harmless to return without any action.
1650                  */
1651                 ext4_warning(inode->i_sb, "ext4_da_release_space: "
1652                          "ino %lu, to_free %d with only %d reserved "
1653                          "data blocks", inode->i_ino, to_free,
1654                          ei->i_reserved_data_blocks);
1655                 WARN_ON(1);
1656                 to_free = ei->i_reserved_data_blocks;
1657         }
1658         ei->i_reserved_data_blocks -= to_free;
1659 
1660         /* update fs dirty data blocks counter */
1661         percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free);
1662 
1663         spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1664 
1665         dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free));
1666 }
1667 
1668 /*
1669  * Delayed allocation stuff
1670  */
1671 
1672 struct mpage_da_data {
1673         struct inode *inode;
1674         struct writeback_control *wbc;
1675 
1676         pgoff_t first_page;     /* The first page to write */
1677         pgoff_t next_page;      /* Current page to examine */
1678         pgoff_t last_page;      /* Last page to examine */
1679         /*
1680          * Extent to map - this can be after first_page because that can be
1681          * fully mapped. We somewhat abuse m_flags to store whether the extent
1682          * is delalloc or unwritten.
1683          */
1684         struct ext4_map_blocks map;
1685         struct ext4_io_submit io_submit;        /* IO submission data */
1686         unsigned int do_map:1;
1687 };
1688 
1689 static void mpage_release_unused_pages(struct mpage_da_data *mpd,
1690                                        bool invalidate)
1691 {
1692         int nr_pages, i;
1693         pgoff_t index, end;
1694         struct pagevec pvec;
1695         struct inode *inode = mpd->inode;
1696         struct address_space *mapping = inode->i_mapping;
1697 
1698         /* This is necessary when next_page == 0. */
1699         if (mpd->first_page >= mpd->next_page)
1700                 return;
1701 
1702         index = mpd->first_page;
1703         end   = mpd->next_page - 1;
1704         if (invalidate) {
1705                 ext4_lblk_t start, last;
1706                 start = index << (PAGE_SHIFT - inode->i_blkbits);
1707                 last = end << (PAGE_SHIFT - inode->i_blkbits);
1708                 ext4_es_remove_extent(inode, start, last - start + 1);
1709         }
1710 
1711         pagevec_init(&pvec);
1712         while (index <= end) {
1713                 nr_pages = pagevec_lookup_range(&pvec, mapping, &index, end);
1714                 if (nr_pages == 0)
1715                         break;
1716                 for (i = 0; i < nr_pages; i++) {
1717                         struct page *page = pvec.pages[i];
1718 
1719                         BUG_ON(!PageLocked(page));
1720                         BUG_ON(PageWriteback(page));
1721                         if (invalidate) {
1722                                 if (page_mapped(page))
1723                                         clear_page_dirty_for_io(page);
1724                                 block_invalidatepage(page, 0, PAGE_SIZE);
1725                                 ClearPageUptodate(page);
1726                         }
1727                         unlock_page(page);
1728                 }
1729                 pagevec_release(&pvec);
1730         }
1731 }
1732 
1733 static void ext4_print_free_blocks(struct inode *inode)
1734 {
1735         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1736         struct super_block *sb = inode->i_sb;
1737         struct ext4_inode_info *ei = EXT4_I(inode);
1738 
1739         ext4_msg(sb, KERN_CRIT, "Total free blocks count %lld",
1740                EXT4_C2B(EXT4_SB(inode->i_sb),
1741                         ext4_count_free_clusters(sb)));
1742         ext4_msg(sb, KERN_CRIT, "Free/Dirty block details");
1743         ext4_msg(sb, KERN_CRIT, "free_blocks=%lld",
1744                (long long) EXT4_C2B(EXT4_SB(sb),
1745                 percpu_counter_sum(&sbi->s_freeclusters_counter)));
1746         ext4_msg(sb, KERN_CRIT, "dirty_blocks=%lld",
1747                (long long) EXT4_C2B(EXT4_SB(sb),
1748                 percpu_counter_sum(&sbi->s_dirtyclusters_counter)));
1749         ext4_msg(sb, KERN_CRIT, "Block reservation details");
1750         ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u",
1751                  ei->i_reserved_data_blocks);
1752         return;
1753 }
1754 
1755 static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh)
1756 {
1757         return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh);
1758 }
1759 
1760 /*
1761  * ext4_insert_delayed_block - adds a delayed block to the extents status
1762  *                             tree, incrementing the reserved cluster/block
1763  *                             count or making a pending reservation
1764  *                             where needed
1765  *
1766  * @inode - file containing the newly added block
1767  * @lblk - logical block to be added
1768  *
1769  * Returns 0 on success, negative error code on failure.
1770  */
1771 static int ext4_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk)
1772 {
1773         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1774         int ret;
1775         bool allocated = false;
1776 
1777         /*
1778          * If the cluster containing lblk is shared with a delayed,
1779          * written, or unwritten extent in a bigalloc file system, it's
1780          * already been accounted for and does not need to be reserved.
1781          * A pending reservation must be made for the cluster if it's
1782          * shared with a written or unwritten extent and doesn't already
1783          * have one.  Written and unwritten extents can be purged from the
1784          * extents status tree if the system is under memory pressure, so
1785          * it's necessary to examine the extent tree if a search of the
1786          * extents status tree doesn't get a match.
1787          */
1788         if (sbi->s_cluster_ratio == 1) {
1789                 ret = ext4_da_reserve_space(inode);
1790                 if (ret != 0)   /* ENOSPC */
1791                         goto errout;
1792         } else {   /* bigalloc */
1793                 if (!ext4_es_scan_clu(inode, &ext4_es_is_delonly, lblk)) {
1794                         if (!ext4_es_scan_clu(inode,
1795                                               &ext4_es_is_mapped, lblk)) {
1796                                 ret = ext4_clu_mapped(inode,
1797                                                       EXT4_B2C(sbi, lblk));
1798                                 if (ret < 0)
1799                                         goto errout;
1800                                 if (ret == 0) {
1801                                         ret = ext4_da_reserve_space(inode);
1802                                         if (ret != 0)   /* ENOSPC */
1803                                                 goto errout;
1804                                 } else {
1805                                         allocated = true;
1806                                 }
1807                         } else {
1808                                 allocated = true;
1809                         }
1810                 }
1811         }
1812 
1813         ret = ext4_es_insert_delayed_block(inode, lblk, allocated);
1814 
1815 errout:
1816         return ret;
1817 }
1818 
1819 /*
1820  * This function is grabs code from the very beginning of
1821  * ext4_map_blocks, but assumes that the caller is from delayed write
1822  * time. This function looks up the requested blocks and sets the
1823  * buffer delay bit under the protection of i_data_sem.
1824  */
1825 static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
1826                               struct ext4_map_blocks *map,
1827                               struct buffer_head *bh)
1828 {
1829         struct extent_status es;
1830         int retval;
1831         sector_t invalid_block = ~((sector_t) 0xffff);
1832 #ifdef ES_AGGRESSIVE_TEST
1833         struct ext4_map_blocks orig_map;
1834 
1835         memcpy(&orig_map, map, sizeof(*map));
1836 #endif
1837 
1838         if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
1839                 invalid_block = ~0;
1840 
1841         map->m_flags = 0;
1842         ext_debug("ext4_da_map_blocks(): inode %lu, max_blocks %u,"
1843                   "logical block %lu\n", inode->i_ino, map->m_len,
1844                   (unsigned long) map->m_lblk);
1845 
1846         /* Lookup extent status tree firstly */
1847         if (ext4_es_lookup_extent(inode, iblock, NULL, &es)) {
1848                 if (ext4_es_is_hole(&es)) {
1849                         retval = 0;
1850                         down_read(&EXT4_I(inode)->i_data_sem);
1851                         goto add_delayed;
1852                 }
1853 
1854                 /*
1855                  * Delayed extent could be allocated by fallocate.
1856                  * So we need to check it.
1857                  */
1858                 if (ext4_es_is_delayed(&es) && !ext4_es_is_unwritten(&es)) {
1859                         map_bh(bh, inode->i_sb, invalid_block);
1860                         set_buffer_new(bh);
1861                         set_buffer_delay(bh);
1862                         return 0;
1863                 }
1864 
1865                 map->m_pblk = ext4_es_pblock(&es) + iblock - es.es_lblk;
1866                 retval = es.es_len - (iblock - es.es_lblk);
1867                 if (retval > map->m_len)
1868                         retval = map->m_len;
1869                 map->m_len = retval;
1870                 if (ext4_es_is_written(&es))
1871                         map->m_flags |= EXT4_MAP_MAPPED;
1872                 else if (ext4_es_is_unwritten(&es))
1873                         map->m_flags |= EXT4_MAP_UNWRITTEN;
1874                 else
1875                         BUG();
1876 
1877 #ifdef ES_AGGRESSIVE_TEST
1878                 ext4_map_blocks_es_recheck(NULL, inode, map, &orig_map, 0);
1879 #endif
1880                 return retval;
1881         }
1882 
1883         /*
1884          * Try to see if we can get the block without requesting a new
1885          * file system block.
1886          */
1887         down_read(&EXT4_I(inode)->i_data_sem);
1888         if (ext4_has_inline_data(inode))
1889                 retval = 0;
1890         else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
1891                 retval = ext4_ext_map_blocks(NULL, inode, map, 0);
1892         else
1893                 retval = ext4_ind_map_blocks(NULL, inode, map, 0);
1894 
1895 add_delayed:
1896         if (retval == 0) {
1897                 int ret;
1898 
1899                 /*
1900                  * XXX: __block_prepare_write() unmaps passed block,
1901                  * is it OK?
1902                  */
1903 
1904                 ret = ext4_insert_delayed_block(inode, map->m_lblk);
1905                 if (ret != 0) {
1906                         retval = ret;
1907                         goto out_unlock;
1908                 }
1909 
1910                 map_bh(bh, inode->i_sb, invalid_block);
1911                 set_buffer_new(bh);
1912                 set_buffer_delay(bh);
1913         } else if (retval > 0) {
1914                 int ret;
1915                 unsigned int status;
1916 
1917                 if (unlikely(retval != map->m_len)) {
1918                         ext4_warning(inode->i_sb,
1919                                      "ES len assertion failed for inode "
1920                                      "%lu: retval %d != map->m_len %d",
1921                                      inode->i_ino, retval, map->m_len);
1922                         WARN_ON(1);
1923                 }
1924 
1925                 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
1926                                 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
1927                 ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
1928                                             map->m_pblk, status);
1929                 if (ret != 0)
1930                         retval = ret;
1931         }
1932 
1933 out_unlock:
1934         up_read((&EXT4_I(inode)->i_data_sem));
1935 
1936         return retval;
1937 }
1938 
1939 /*
1940  * This is a special get_block_t callback which is used by
1941  * ext4_da_write_begin().  It will either return mapped block or
1942  * reserve space for a single block.
1943  *
1944  * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set.
1945  * We also have b_blocknr = -1 and b_bdev initialized properly
1946  *
1947  * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set.
1948  * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev
1949  * initialized properly.
1950  */
1951 int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
1952                            struct buffer_head *bh, int create)
1953 {
1954         struct ext4_map_blocks map;
1955         int ret = 0;
1956 
1957         BUG_ON(create == 0);
1958         BUG_ON(bh->b_size != inode->i_sb->s_blocksize);
1959 
1960         map.m_lblk = iblock;
1961         map.m_len = 1;
1962 
1963         /*
1964          * first, we need to know whether the block is allocated already
1965          * preallocated blocks are unmapped but should treated
1966          * the same as allocated blocks.
1967          */
1968         ret = ext4_da_map_blocks(inode, iblock, &map, bh);
1969         if (ret <= 0)
1970                 return ret;
1971 
1972         map_bh(bh, inode->i_sb, map.m_pblk);
1973         ext4_update_bh_state(bh, map.m_flags);
1974 
1975         if (buffer_unwritten(bh)) {
1976                 /* A delayed write to unwritten bh should be marked
1977                  * new and mapped.  Mapped ensures that we don't do
1978                  * get_block multiple times when we write to the same
1979                  * offset and new ensures that we do proper zero out
1980                  * for partial write.
1981                  */
1982                 set_buffer_new(bh);
1983                 set_buffer_mapped(bh);
1984         }
1985         return 0;
1986 }
1987 
1988 static int bget_one(handle_t *handle, struct buffer_head *bh)
1989 {
1990         get_bh(bh);
1991         return 0;
1992 }
1993 
1994 static int bput_one(handle_t *handle, struct buffer_head *bh)
1995 {
1996         put_bh(bh);
1997         return 0;
1998 }
1999 
2000 static int __ext4_journalled_writepage(struct page *page,
2001                                        unsigned int len)
2002 {
2003         struct address_space *mapping = page->mapping;
2004         struct inode *inode = mapping->host;
2005         struct buffer_head *page_bufs = NULL;
2006         handle_t *handle = NULL;
2007         int ret = 0, err = 0;
2008         int inline_data = ext4_has_inline_data(inode);
2009         struct buffer_head *inode_bh = NULL;
2010 
2011         ClearPageChecked(page);
2012 
2013         if (inline_data) {
2014                 BUG_ON(page->index != 0);
2015                 BUG_ON(len > ext4_get_max_inline_size(inode));
2016                 inode_bh = ext4_journalled_write_inline_data(inode, len, page);
2017                 if (inode_bh == NULL)
2018                         goto out;
2019         } else {
2020                 page_bufs = page_buffers(page);
2021                 if (!page_bufs) {
2022                         BUG();
2023                         goto out;
2024                 }
2025                 ext4_walk_page_buffers(handle, page_bufs, 0, len,
2026                                        NULL, bget_one);
2027         }
2028         /*
2029          * We need to release the page lock before we start the
2030          * journal, so grab a reference so the page won't disappear
2031          * out from under us.
2032          */
2033         get_page(page);
2034         unlock_page(page);
2035 
2036         handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
2037                                     ext4_writepage_trans_blocks(inode));
2038         if (IS_ERR(handle)) {
2039                 ret = PTR_ERR(handle);
2040                 put_page(page);
2041                 goto out_no_pagelock;
2042         }
2043         BUG_ON(!ext4_handle_valid(handle));
2044 
2045         lock_page(page);
2046         put_page(page);
2047         if (page->mapping != mapping) {
2048                 /* The page got truncated from under us */
2049                 ext4_journal_stop(handle);
2050                 ret = 0;
2051                 goto out;
2052         }
2053 
2054         if (inline_data) {
2055                 ret = ext4_mark_inode_dirty(handle, inode);
2056         } else {
2057                 ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL,
2058                                              do_journal_get_write_access);
2059 
2060                 err = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL,
2061                                              write_end_fn);
2062         }
2063         if (ret == 0)
2064                 ret = err;
2065         EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
2066         err = ext4_journal_stop(handle);
2067         if (!ret)
2068                 ret = err;
2069 
2070         if (!ext4_has_inline_data(inode))
2071                 ext4_walk_page_buffers(NULL, page_bufs, 0, len,
2072                                        NULL, bput_one);
2073         ext4_set_inode_state(inode, EXT4_STATE_JDATA);
2074 out:
2075         unlock_page(page);
2076 out_no_pagelock:
2077         brelse(inode_bh);
2078         return ret;
2079 }
2080 
2081 /*
2082  * Note that we don't need to start a transaction unless we're journaling data
2083  * because we should have holes filled from ext4_page_mkwrite(). We even don't
2084  * need to file the inode to the transaction's list in ordered mode because if
2085  * we are writing back data added by write(), the inode is already there and if
2086  * we are writing back data modified via mmap(), no one guarantees in which
2087  * transaction the data will hit the disk. In case we are journaling data, we
2088  * cannot start transaction directly because transaction start ranks above page
2089  * lock so we have to do some magic.
2090  *
2091  * This function can get called via...
2092  *   - ext4_writepages after taking page lock (have journal handle)
2093  *   - journal_submit_inode_data_buffers (no journal handle)
2094  *   - shrink_page_list via the kswapd/direct reclaim (no journal handle)
2095  *   - grab_page_cache when doing write_begin (have journal handle)
2096  *
2097  * We don't do any block allocation in this function. If we have page with
2098  * multiple blocks we need to write those buffer_heads that are mapped. This
2099  * is important for mmaped based write. So if we do with blocksize 1K
2100  * truncate(f, 1024);
2101  * a = mmap(f, 0, 4096);
2102  * a[0] = 'a';
2103  * truncate(f, 4096);
2104  * we have in the page first buffer_head mapped via page_mkwrite call back
2105  * but other buffer_heads would be unmapped but dirty (dirty done via the
2106  * do_wp_page). So writepage should write the first block. If we modify
2107  * the mmap area beyond 1024 we will again get a page_fault and the
2108  * page_mkwrite callback will do the block allocation and mark the
2109  * buffer_heads mapped.
2110  *
2111  * We redirty the page if we have any buffer_heads that is either delay or
2112  * unwritten in the page.
2113  *
2114  * We can get recursively called as show below.
2115  *
2116  *      ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
2117  *              ext4_writepage()
2118  *
2119  * But since we don't do any block allocation we should not deadlock.
2120  * Page also have the dirty flag cleared so we don't get recurive page_lock.
2121  */
2122 static int ext4_writepage(struct page *page,
2123                           struct writeback_control *wbc)
2124 {
2125         int ret = 0;
2126         loff_t size;
2127         unsigned int len;
2128         struct buffer_head *page_bufs = NULL;
2129         struct inode *inode = page->mapping->host;
2130         struct ext4_io_submit io_submit;
2131         bool keep_towrite = false;
2132 
2133         if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) {
2134                 inode->i_mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
2135                 unlock_page(page);
2136                 return -EIO;
2137         }
2138 
2139         trace_ext4_writepage(page);
2140         size = i_size_read(inode);
2141         if (page->index == size >> PAGE_SHIFT &&
2142             !ext4_verity_in_progress(inode))
2143                 len = size & ~PAGE_MASK;
2144         else
2145                 len = PAGE_SIZE;
2146 
2147         page_bufs = page_buffers(page);
2148         /*
2149          * We cannot do block allocation or other extent handling in this
2150          * function. If there are buffers needing that, we have to redirty
2151          * the page. But we may reach here when we do a journal commit via
2152          * journal_submit_inode_data_buffers() and in that case we must write
2153          * allocated buffers to achieve data=ordered mode guarantees.
2154          *
2155          * Also, if there is only one buffer per page (the fs block
2156          * size == the page size), if one buffer needs block
2157          * allocation or needs to modify the extent tree to clear the
2158          * unwritten flag, we know that the page can't be written at
2159          * all, so we might as well refuse the write immediately.
2160          * Unfortunately if the block size != page size, we can't as
2161          * easily detect this case using ext4_walk_page_buffers(), but
2162          * for the extremely common case, this is an optimization that
2163          * skips a useless round trip through ext4_bio_write_page().
2164          */
2165         if (ext4_walk_page_buffers(NULL, page_bufs, 0, len, NULL,
2166                                    ext4_bh_delay_or_unwritten)) {
2167                 redirty_page_for_writepage(wbc, page);
2168                 if ((current->flags & PF_MEMALLOC) ||
2169                     (inode->i_sb->s_blocksize == PAGE_SIZE)) {
2170                         /*
2171                          * For memory cleaning there's no point in writing only
2172                          * some buffers. So just bail out. Warn if we came here
2173                          * from direct reclaim.
2174                          */
2175                         WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD))
2176                                                         == PF_MEMALLOC);
2177                         unlock_page(page);
2178                         return 0;
2179                 }
2180                 keep_towrite = true;
2181         }
2182 
2183         if (PageChecked(page) && ext4_should_journal_data(inode))
2184                 /*
2185                  * It's mmapped pagecache.  Add buffers and journal it.  There
2186                  * doesn't seem much point in redirtying the page here.
2187                  */
2188                 return __ext4_journalled_writepage(page, len);
2189 
2190         ext4_io_submit_init(&io_submit, wbc);
2191         io_submit.io_end = ext4_init_io_end(inode, GFP_NOFS);
2192         if (!io_submit.io_end) {
2193                 redirty_page_for_writepage(wbc, page);
2194                 unlock_page(page);
2195                 return -ENOMEM;
2196         }
2197         ret = ext4_bio_write_page(&io_submit, page, len, wbc, keep_towrite);
2198         ext4_io_submit(&io_submit);
2199         /* Drop io_end reference we got from init */
2200         ext4_put_io_end_defer(io_submit.io_end);
2201         return ret;
2202 }
2203 
2204 static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page)
2205 {
2206         int len;
2207         loff_t size;
2208         int err;
2209 
2210         BUG_ON(page->index != mpd->first_page);
2211         clear_page_dirty_for_io(page);
2212         /*
2213          * We have to be very careful here!  Nothing protects writeback path
2214          * against i_size changes and the page can be writeably mapped into
2215          * page tables. So an application can be growing i_size and writing
2216          * data through mmap while writeback runs. clear_page_dirty_for_io()
2217          * write-protects our page in page tables and the page cannot get
2218          * written to again until we release page lock. So only after
2219          * clear_page_dirty_for_io() we are safe to sample i_size for
2220          * ext4_bio_write_page() to zero-out tail of the written page. We rely
2221          * on the barrier provided by TestClearPageDirty in
2222          * clear_page_dirty_for_io() to make sure i_size is really sampled only
2223          * after page tables are updated.
2224          */
2225         size = i_size_read(mpd->inode);
2226         if (page->index == size >> PAGE_SHIFT &&
2227             !ext4_verity_in_progress(mpd->inode))
2228                 len = size & ~PAGE_MASK;
2229         else
2230                 len = PAGE_SIZE;
2231         err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc, false);
2232         if (!err)
2233                 mpd->wbc->nr_to_write--;
2234         mpd->first_page++;
2235 
2236         return err;
2237 }
2238 
2239 #define BH_FLAGS ((1 << BH_Unwritten) | (1 << BH_Delay))
2240 
2241 /*
2242  * mballoc gives us at most this number of blocks...
2243  * XXX: That seems to be only a limitation of ext4_mb_normalize_request().
2244  * The rest of mballoc seems to handle chunks up to full group size.
2245  */
2246 #define MAX_WRITEPAGES_EXTENT_LEN 2048
2247 
2248 /*
2249  * mpage_add_bh_to_extent - try to add bh to extent of blocks to map
2250  *
2251  * @mpd - extent of blocks
2252  * @lblk - logical number of the block in the file
2253  * @bh - buffer head we want to add to the extent
2254  *
2255  * The function is used to collect contig. blocks in the same state. If the
2256  * buffer doesn't require mapping for writeback and we haven't started the
2257  * extent of buffers to map yet, the function returns 'true' immediately - the
2258  * caller can write the buffer right away. Otherwise the function returns true
2259  * if the block has been added to the extent, false if the block couldn't be
2260  * added.
2261  */
2262 static bool mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk,
2263                                    struct buffer_head *bh)
2264 {
2265         struct ext4_map_blocks *map = &mpd->map;
2266 
2267         /* Buffer that doesn't need mapping for writeback? */
2268         if (!buffer_dirty(bh) || !buffer_mapped(bh) ||
2269             (!buffer_delay(bh) && !buffer_unwritten(bh))) {
2270                 /* So far no extent to map => we write the buffer right away */
2271                 if (map->m_len == 0)
2272                         return true;
2273                 return false;
2274         }
2275 
2276         /* First block in the extent? */
2277         if (map->m_len == 0) {
2278                 /* We cannot map unless handle is started... */
2279                 if (!mpd->do_map)
2280                         return false;
2281                 map->m_lblk = lblk;
2282                 map->m_len = 1;
2283                 map->m_flags = bh->b_state & BH_FLAGS;
2284                 return true;
2285         }
2286 
2287         /* Don't go larger than mballoc is willing to allocate */
2288         if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN)
2289                 return false;
2290 
2291         /* Can we merge the block to our big extent? */
2292         if (lblk == map->m_lblk + map->m_len &&
2293             (bh->b_state & BH_FLAGS) == map->m_flags) {
2294                 map->m_len++;
2295                 return true;
2296         }
2297         return false;
2298 }
2299 
2300 /*
2301  * mpage_process_page_bufs - submit page buffers for IO or add them to extent
2302  *
2303  * @mpd - extent of blocks for mapping
2304  * @head - the first buffer in the page
2305  * @bh - buffer we should start processing from
2306  * @lblk - logical number of the block in the file corresponding to @bh
2307  *
2308  * Walk through page buffers from @bh upto @head (exclusive) and either submit
2309  * the page for IO if all buffers in this page were mapped and there's no
2310  * accumulated extent of buffers to map or add buffers in the page to the
2311  * extent of buffers to map. The function returns 1 if the caller can continue
2312  * by processing the next page, 0 if it should stop adding buffers to the
2313  * extent to map because we cannot extend it anymore. It can also return value
2314  * < 0 in case of error during IO submission.
2315  */
2316 static int mpage_process_page_bufs(struct mpage_da_data *mpd,
2317                                    struct buffer_head *head,
2318                                    struct buffer_head *bh,
2319                                    ext4_lblk_t lblk)
2320 {
2321         struct inode *inode = mpd->inode;
2322         int err;
2323         ext4_lblk_t blocks = (i_size_read(inode) + i_blocksize(inode) - 1)
2324                                                         >> inode->i_blkbits;
2325 
2326         if (ext4_verity_in_progress(inode))
2327                 blocks = EXT_MAX_BLOCKS;
2328 
2329         do {
2330                 BUG_ON(buffer_locked(bh));
2331 
2332                 if (lblk >= blocks || !mpage_add_bh_to_extent(mpd, lblk, bh)) {
2333                         /* Found extent to map? */
2334                         if (mpd->map.m_len)
2335                                 return 0;
2336                         /* Buffer needs mapping and handle is not started? */
2337                         if (!mpd->do_map)
2338                                 return 0;
2339                         /* Everything mapped so far and we hit EOF */
2340                         break;
2341                 }
2342         } while (lblk++, (bh = bh->b_this_page) != head);
2343         /* So far everything mapped? Submit the page for IO. */
2344         if (mpd->map.m_len == 0) {
2345                 err = mpage_submit_page(mpd, head->b_page);
2346                 if (err < 0)
2347                         return err;
2348         }
2349         return lblk < blocks;
2350 }
2351 
2352 /*
2353  * mpage_map_buffers - update buffers corresponding to changed extent and
2354  *                     submit fully mapped pages for IO
2355  *
2356  * @mpd - description of extent to map, on return next extent to map
2357  *
2358  * Scan buffers corresponding to changed extent (we expect corresponding pages
2359  * to be already locked) and update buffer state according to new extent state.
2360  * We map delalloc buffers to their physical location, clear unwritten bits,
2361  * and mark buffers as uninit when we perform writes to unwritten extents
2362  * and do extent conversion after IO is finished. If the last page is not fully
2363  * mapped, we update @map to the next extent in the last page that needs
2364  * mapping. Otherwise we submit the page for IO.
2365  */
2366 static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
2367 {
2368         struct pagevec pvec;
2369         int nr_pages, i;
2370         struct inode *inode = mpd->inode;
2371         struct buffer_head *head, *bh;
2372         int bpp_bits = PAGE_SHIFT - inode->i_blkbits;
2373         pgoff_t start, end;
2374         ext4_lblk_t lblk;
2375         sector_t pblock;
2376         int err;
2377 
2378         start = mpd->map.m_lblk >> bpp_bits;
2379         end = (mpd->map.m_lblk + mpd->map.m_len - 1) >> bpp_bits;
2380         lblk = start << bpp_bits;
2381         pblock = mpd->map.m_pblk;
2382 
2383         pagevec_init(&pvec);
2384         while (start <= end) {
2385                 nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping,
2386                                                 &start, end);
2387                 if (nr_pages == 0)
2388                         break;
2389                 for (i = 0; i < nr_pages; i++) {
2390                         struct page *page = pvec.pages[i];
2391 
2392                         bh = head = page_buffers(page);
2393                         do {
2394                                 if (lblk < mpd->map.m_lblk)
2395                                         continue;
2396                                 if (lblk >= mpd->map.m_lblk + mpd->map.m_len) {
2397                                         /*
2398                                          * Buffer after end of mapped extent.
2399                                          * Find next buffer in the page to map.
2400                                          */
2401                                         mpd->map.m_len = 0;
2402                                         mpd->map.m_flags = 0;
2403                                         /*
2404                                          * FIXME: If dioread_nolock supports
2405                                          * blocksize < pagesize, we need to make
2406                                          * sure we add size mapped so far to
2407                                          * io_end->size as the following call
2408                                          * can submit the page for IO.
2409                                          */
2410                                         err = mpage_process_page_bufs(mpd, head,
2411                                                                       bh, lblk);
2412                                         pagevec_release(&pvec);
2413                                         if (err > 0)
2414                                                 err = 0;
2415                                         return err;
2416                                 }
2417                                 if (buffer_delay(bh)) {
2418                                         clear_buffer_delay(bh);
2419                                         bh->b_blocknr = pblock++;
2420                                 }
2421                                 clear_buffer_unwritten(bh);
2422                         } while (lblk++, (bh = bh->b_this_page) != head);
2423 
2424                         /*
2425                          * FIXME: This is going to break if dioread_nolock
2426                          * supports blocksize < pagesize as we will try to
2427                          * convert potentially unmapped parts of inode.
2428                          */
2429                         mpd->io_submit.io_end->size += PAGE_SIZE;
2430                         /* Page fully mapped - let IO run! */
2431                         err = mpage_submit_page(mpd, page);
2432                         if (err < 0) {
2433                                 pagevec_release(&pvec);
2434                                 return err;
2435                         }
2436                 }
2437                 pagevec_release(&pvec);
2438         }
2439         /* Extent fully mapped and matches with page boundary. We are done. */
2440         mpd->map.m_len = 0;
2441         mpd->map.m_flags = 0;
2442         return 0;
2443 }
2444 
2445 static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
2446 {
2447         struct inode *inode = mpd->inode;
2448         struct ext4_map_blocks *map = &mpd->map;
2449         int get_blocks_flags;
2450         int err, dioread_nolock;
2451 
2452         trace_ext4_da_write_pages_extent(inode, map);
2453         /*
2454          * Call ext4_map_blocks() to allocate any delayed allocation blocks, or
2455          * to convert an unwritten extent to be initialized (in the case
2456          * where we have written into one or more preallocated blocks).  It is
2457          * possible that we're going to need more metadata blocks than
2458          * previously reserved. However we must not fail because we're in
2459          * writeback and there is nothing we can do about it so it might result
2460          * in data loss.  So use reserved blocks to allocate metadata if
2461          * possible.
2462          *
2463          * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE if
2464          * the blocks in question are delalloc blocks.  This indicates
2465          * that the blocks and quotas has already been checked when
2466          * the data was copied into the page cache.
2467          */
2468         get_blocks_flags = EXT4_GET_BLOCKS_CREATE |
2469                            EXT4_GET_BLOCKS_METADATA_NOFAIL |
2470                            EXT4_GET_BLOCKS_IO_SUBMIT;
2471         dioread_nolock = ext4_should_dioread_nolock(inode);
2472         if (dioread_nolock)
2473                 get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
2474         if (map->m_flags & (1 << BH_Delay))
2475                 get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
2476 
2477         err = ext4_map_blocks(handle, inode, map, get_blocks_flags);
2478         if (err < 0)
2479                 return err;
2480         if (dioread_nolock && (map->m_flags & EXT4_MAP_UNWRITTEN)) {
2481                 if (!mpd->io_submit.io_end->handle &&
2482                     ext4_handle_valid(handle)) {
2483                         mpd->io_submit.io_end->handle = handle->h_rsv_handle;
2484                         handle->h_rsv_handle = NULL;
2485                 }
2486                 ext4_set_io_unwritten_flag(inode, mpd->io_submit.io_end);
2487         }
2488 
2489         BUG_ON(map->m_len == 0);
2490         return 0;
2491 }
2492 
2493 /*
2494  * mpage_map_and_submit_extent - map extent starting at mpd->lblk of length
2495  *                               mpd->len and submit pages underlying it for IO
2496  *
2497  * @handle - handle for journal operations
2498  * @mpd - extent to map
2499  * @give_up_on_write - we set this to true iff there is a fatal error and there
2500  *                     is no hope of writing the data. The caller should discard
2501  *                     dirty pages to avoid infinite loops.
2502  *
2503  * The function maps extent starting at mpd->lblk of length mpd->len. If it is
2504  * delayed, blocks are allocated, if it is unwritten, we may need to convert
2505  * them to initialized or split the described range from larger unwritten
2506  * extent. Note that we need not map all the described range since allocation
2507  * can return less blocks or the range is covered by more unwritten extents. We
2508  * cannot map more because we are limited by reserved transaction credits. On
2509  * the other hand we always make sure that the last touched page is fully
2510  * mapped so that it can be written out (and thus forward progress is
2511  * guaranteed). After mapping we submit all mapped pages for IO.
2512  */
2513 static int mpage_map_and_submit_extent(handle_t *handle,
2514                                        struct mpage_da_data *mpd,
2515                                        bool *give_up_on_write)
2516 {
2517         struct inode *inode = mpd->inode;
2518         struct ext4_map_blocks *map = &mpd->map;
2519         int err;
2520         loff_t disksize;
2521         int progress = 0;
2522 
2523         mpd->io_submit.io_end->offset =
2524                                 ((loff_t)map->m_lblk) << inode->i_blkbits;
2525         do {
2526                 err = mpage_map_one_extent(handle, mpd);
2527                 if (err < 0) {
2528                         struct super_block *sb = inode->i_sb;
2529 
2530                         if (ext4_forced_shutdown(EXT4_SB(sb)) ||
2531                             EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)
2532                                 goto invalidate_dirty_pages;
2533                         /*
2534                          * Let the uper layers retry transient errors.
2535                          * In the case of ENOSPC, if ext4_count_free_blocks()
2536                          * is non-zero, a commit should free up blocks.
2537                          */
2538                         if ((err == -ENOMEM) ||
2539                             (err == -ENOSPC && ext4_count_free_clusters(sb))) {
2540                                 if (progress)
2541                                         goto update_disksize;
2542                                 return err;
2543                         }
2544                         ext4_msg(sb, KERN_CRIT,
2545                                  "Delayed block allocation failed for "
2546                                  "inode %lu at logical offset %llu with"
2547                                  " max blocks %u with error %d",
2548                                  inode->i_ino,
2549                                  (unsigned long long)map->m_lblk,
2550                                  (unsigned)map->m_len, -err);
2551                         ext4_msg(sb, KERN_CRIT,
2552                                  "This should not happen!! Data will "
2553                                  "be lost\n");
2554                         if (err == -ENOSPC)
2555                                 ext4_print_free_blocks(inode);
2556                 invalidate_dirty_pages:
2557                         *give_up_on_write = true;
2558                         return err;
2559                 }
2560                 progress = 1;
2561                 /*
2562                  * Update buffer state, submit mapped pages, and get us new
2563                  * extent to map
2564                  */
2565                 err = mpage_map_and_submit_buffers(mpd);
2566                 if (err < 0)
2567                         goto update_disksize;
2568         } while (map->m_len);
2569 
2570 update_disksize:
2571         /*
2572          * Update on-disk size after IO is submitted.  Races with
2573          * truncate are avoided by checking i_size under i_data_sem.
2574          */
2575         disksize = ((loff_t)mpd->first_page) << PAGE_SHIFT;
2576         if (disksize > READ_ONCE(EXT4_I(inode)->i_disksize)) {
2577                 int err2;
2578                 loff_t i_size;
2579 
2580                 down_write(&EXT4_I(inode)->i_data_sem);
2581                 i_size = i_size_read(inode);
2582                 if (disksize > i_size)
2583                         disksize = i_size;
2584                 if (disksize > EXT4_I(inode)->i_disksize)
2585                         EXT4_I(inode)->i_disksize = disksize;
2586                 up_write(&EXT4_I(inode)->i_data_sem);
2587                 err2 = ext4_mark_inode_dirty(handle, inode);
2588                 if (err2)
2589                         ext4_error(inode->i_sb,
2590                                    "Failed to mark inode %lu dirty",
2591                                    inode->i_ino);
2592                 if (!err)
2593                         err = err2;
2594         }
2595         return err;
2596 }
2597 
2598 /*
2599  * Calculate the total number of credits to reserve for one writepages
2600  * iteration. This is called from ext4_writepages(). We map an extent of
2601  * up to MAX_WRITEPAGES_EXTENT_LEN blocks and then we go on and finish mapping
2602  * the last partial page. So in total we can map MAX_WRITEPAGES_EXTENT_LEN +
2603  * bpp - 1 blocks in bpp different extents.
2604  */
2605 static int ext4_da_writepages_trans_blocks(struct inode *inode)
2606 {
2607         int bpp = ext4_journal_blocks_per_page(inode);
2608 
2609         return ext4_meta_trans_blocks(inode,
2610                                 MAX_WRITEPAGES_EXTENT_LEN + bpp - 1, bpp);
2611 }
2612 
2613 /*
2614  * mpage_prepare_extent_to_map - find & lock contiguous range of dirty pages
2615  *                               and underlying extent to map
2616  *
2617  * @mpd - where to look for pages
2618  *
2619  * Walk dirty pages in the mapping. If they are fully mapped, submit them for
2620  * IO immediately. When we find a page which isn't mapped we start accumulating
2621  * extent of buffers underlying these pages that needs mapping (formed by
2622  * either delayed or unwritten buffers). We also lock the pages containing
2623  * these buffers. The extent found is returned in @mpd structure (starting at
2624  * mpd->lblk with length mpd->len blocks).
2625  *
2626  * Note that this function can attach bios to one io_end structure which are
2627  * neither logically nor physically contiguous. Although it may seem as an
2628  * unnecessary complication, it is actually inevitable in blocksize < pagesize
2629  * case as we need to track IO to all buffers underlying a page in one io_end.
2630  */
2631 static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
2632 {
2633         struct address_space *mapping = mpd->inode->i_mapping;
2634         struct pagevec pvec;
2635         unsigned int nr_pages;
2636         long left = mpd->wbc->nr_to_write;
2637         pgoff_t index = mpd->first_page;
2638         pgoff_t end = mpd->last_page;
2639         xa_mark_t tag;
2640         int i, err = 0;
2641         int blkbits = mpd->inode->i_blkbits;
2642         ext4_lblk_t lblk;
2643         struct buffer_head *head;
2644 
2645         if (mpd->wbc->sync_mode == WB_SYNC_ALL || mpd->wbc->tagged_writepages)
2646                 tag = PAGECACHE_TAG_TOWRITE;
2647         else
2648                 tag = PAGECACHE_TAG_DIRTY;
2649 
2650         pagevec_init(&pvec);
2651         mpd->map.m_len = 0;
2652         mpd->next_page = index;
2653         while (index <= end) {
2654                 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
2655                                 tag);
2656                 if (nr_pages == 0)
2657                         goto out;
2658 
2659                 for (i = 0; i < nr_pages; i++) {
2660                         struct page *page = pvec.pages[i];
2661 
2662                         /*
2663                          * Accumulated enough dirty pages? This doesn't apply
2664                          * to WB_SYNC_ALL mode. For integrity sync we have to
2665                          * keep going because someone may be concurrently
2666                          * dirtying pages, and we might have synced a lot of
2667                          * newly appeared dirty pages, but have not synced all
2668                          * of the old dirty pages.
2669                          */
2670                         if (mpd->wbc->sync_mode == WB_SYNC_NONE && left <= 0)
2671                                 goto out;
2672 
2673                         /* If we can't merge this page, we are done. */
2674                         if (mpd->map.m_len > 0 && mpd->next_page != page->index)
2675                                 goto out;
2676 
2677                         lock_page(page);
2678                         /*
2679                          * If the page is no longer dirty, or its mapping no
2680                          * longer corresponds to inode we are writing (which
2681                          * means it has been truncated or invalidated), or the
2682                          * page is already under writeback and we are not doing
2683                          * a data integrity writeback, skip the page
2684                          */
2685                         if (!PageDirty(page) ||
2686                             (PageWriteback(page) &&
2687                              (mpd->wbc->sync_mode == WB_SYNC_NONE)) ||
2688                             unlikely(page->mapping != mapping)) {
2689                                 unlock_page(page);
2690                                 continue;
2691                         }
2692 
2693                         wait_on_page_writeback(page);
2694                         BUG_ON(PageWriteback(page));
2695 
2696                         if (mpd->map.m_len == 0)
2697                                 mpd->first_page = page->index;
2698                         mpd->next_page = page->index + 1;
2699                         /* Add all dirty buffers to mpd */
2700                         lblk = ((ext4_lblk_t)page->index) <<
2701                                 (PAGE_SHIFT - blkbits);
2702                         head = page_buffers(page);
2703                         err = mpage_process_page_bufs(mpd, head, head, lblk);
2704                         if (err <= 0)
2705                                 goto out;
2706                         err = 0;
2707                         left--;
2708                 }
2709                 pagevec_release(&pvec);
2710                 cond_resched();
2711         }
2712         return 0;
2713 out:
2714         pagevec_release(&pvec);
2715         return err;
2716 }
2717 
2718 static int ext4_writepages(struct address_space *mapping,
2719                            struct writeback_control *wbc)
2720 {
2721         pgoff_t writeback_index = 0;
2722         long nr_to_write = wbc->nr_to_write;
2723         int range_whole = 0;
2724         int cycled = 1;
2725         handle_t *handle = NULL;
2726         struct mpage_da_data mpd;
2727         struct inode *inode = mapping->host;
2728         int needed_blocks, rsv_blocks = 0, ret = 0;
2729         struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
2730         bool done;
2731         struct blk_plug plug;
2732         bool give_up_on_write = false;
2733 
2734         if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
2735                 return -EIO;
2736 
2737         percpu_down_read(&sbi->s_writepages_rwsem);
2738         trace_ext4_writepages(inode, wbc);
2739 
2740         /*
2741          * No pages to write? This is mainly a kludge to avoid starting
2742          * a transaction for special inodes like journal inode on last iput()
2743          * because that could violate lock ordering on umount
2744          */
2745         if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
2746                 goto out_writepages;
2747 
2748         if (ext4_should_journal_data(inode)) {
2749                 ret = generic_writepages(mapping, wbc);
2750                 goto out_writepages;
2751         }
2752 
2753         /*
2754          * If the filesystem has aborted, it is read-only, so return
2755          * right away instead of dumping stack traces later on that
2756          * will obscure the real source of the problem.  We test
2757          * EXT4_MF_FS_ABORTED instead of sb->s_flag's SB_RDONLY because
2758          * the latter could be true if the filesystem is mounted
2759          * read-only, and in that case, ext4_writepages should
2760          * *never* be called, so if that ever happens, we would want
2761          * the stack trace.
2762          */
2763         if (unlikely(ext4_forced_shutdown(EXT4_SB(mapping->host->i_sb)) ||
2764                      sbi->s_mount_flags & EXT4_MF_FS_ABORTED)) {
2765                 ret = -EROFS;
2766                 goto out_writepages;
2767         }
2768 
2769         /*
2770          * If we have inline data and arrive here, it means that
2771          * we will soon create the block for the 1st page, so
2772          * we'd better clear the inline data here.
2773          */
2774         if (ext4_has_inline_data(inode)) {
2775                 /* Just inode will be modified... */
2776                 handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
2777                 if (IS_ERR(handle)) {
2778                         ret = PTR_ERR(handle);
2779                         goto out_writepages;
2780                 }
2781                 BUG_ON(ext4_test_inode_state(inode,
2782                                 EXT4_STATE_MAY_INLINE_DATA));
2783                 ext4_destroy_inline_data(handle, inode);
2784                 ext4_journal_stop(handle);
2785         }
2786 
2787         if (ext4_should_dioread_nolock(inode)) {
2788                 /*
2789                  * We may need to convert up to one extent per block in
2790                  * the page and we may dirty the inode.
2791                  */
2792                 rsv_blocks = 1 + ext4_chunk_trans_blocks(inode,
2793                                                 PAGE_SIZE >> inode->i_blkbits);
2794         }
2795 
2796         if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2797                 range_whole = 1;
2798 
2799         if (wbc->range_cyclic) {
2800                 writeback_index = mapping->writeback_index;
2801                 if (writeback_index)
2802                         cycled = 0;
2803                 mpd.first_page = writeback_index;
2804                 mpd.last_page = -1;
2805         } else {
2806                 mpd.first_page = wbc->range_start >> PAGE_SHIFT;
2807                 mpd.last_page = wbc->range_end >> PAGE_SHIFT;
2808         }
2809 
2810         mpd.inode = inode;
2811         mpd.wbc = wbc;
2812         ext4_io_submit_init(&mpd.io_submit, wbc);
2813 retry:
2814         if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2815                 tag_pages_for_writeback(mapping, mpd.first_page, mpd.last_page);
2816         done = false;
2817         blk_start_plug(&plug);
2818 
2819         /*
2820          * First writeback pages that don't need mapping - we can avoid
2821          * starting a transaction unnecessarily and also avoid being blocked
2822          * in the block layer on device congestion while having transaction
2823          * started.
2824          */
2825         mpd.do_map = 0;
2826         mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
2827         if (!mpd.io_submit.io_end) {
2828                 ret = -ENOMEM;
2829                 goto unplug;
2830         }
2831         ret = mpage_prepare_extent_to_map(&mpd);
2832         /* Unlock pages we didn't use */
2833         mpage_release_unused_pages(&mpd, false);
2834         /* Submit prepared bio */
2835         ext4_io_submit(&mpd.io_submit);
2836         ext4_put_io_end_defer(mpd.io_submit.io_end);
2837         mpd.io_submit.io_end = NULL;
2838         if (ret < 0)
2839                 goto unplug;
2840 
2841         while (!done && mpd.first_page <= mpd.last_page) {
2842                 /* For each extent of pages we use new io_end */
2843                 mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
2844                 if (!mpd.io_submit.io_end) {
2845                         ret = -ENOMEM;
2846                         break;
2847                 }
2848 
2849                 /*
2850                  * We have two constraints: We find one extent to map and we
2851                  * must always write out whole page (makes a difference when
2852                  * blocksize < pagesize) so that we don't block on IO when we
2853                  * try to write out the rest of the page. Journalled mode is
2854                  * not supported by delalloc.
2855                  */
2856                 BUG_ON(ext4_should_journal_data(inode));
2857                 needed_blocks = ext4_da_writepages_trans_blocks(inode);
2858 
2859                 /* start a new transaction */
2860                 handle = ext4_journal_start_with_reserve(inode,
2861                                 EXT4_HT_WRITE_PAGE, needed_blocks, rsv_blocks);
2862                 if (IS_ERR(handle)) {
2863                         ret = PTR_ERR(handle);
2864                         ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
2865                                "%ld pages, ino %lu; err %d", __func__,
2866                                 wbc->nr_to_write, inode->i_ino, ret);
2867                         /* Release allocated io_end */
2868                         ext4_put_io_end(mpd.io_submit.io_end);
2869                         mpd.io_submit.io_end = NULL;
2870                         break;
2871                 }
2872                 mpd.do_map = 1;
2873 
2874                 trace_ext4_da_write_pages(inode, mpd.first_page, mpd.wbc);
2875                 ret = mpage_prepare_extent_to_map(&mpd);
2876                 if (!ret) {
2877                         if (mpd.map.m_len)
2878                                 ret = mpage_map_and_submit_extent(handle, &mpd,
2879                                         &give_up_on_write);
2880                         else {
2881                                 /*
2882                                  * We scanned the whole range (or exhausted
2883                                  * nr_to_write), submitted what was mapped and
2884                                  * didn't find anything needing mapping. We are
2885                                  * done.
2886                                  */
2887                                 done = true;
2888                         }
2889                 }
2890                 /*
2891                  * Caution: If the handle is synchronous,
2892                  * ext4_journal_stop() can wait for transaction commit
2893                  * to finish which may depend on writeback of pages to
2894                  * complete or on page lock to be released.  In that
2895                  * case, we have to wait until after after we have
2896                  * submitted all the IO, released page locks we hold,
2897                  * and dropped io_end reference (for extent conversion
2898                  * to be able to complete) before stopping the handle.
2899                  */
2900                 if (!ext4_handle_valid(handle) || handle->h_sync == 0) {
2901                         ext4_journal_stop(handle);
2902                         handle = NULL;
2903                         mpd.do_map = 0;
2904                 }
2905                 /* Unlock pages we didn't use */
2906                 mpage_release_unused_pages(&mpd, give_up_on_write);
2907                 /* Submit prepared bio */
2908                 ext4_io_submit(&mpd.io_submit);
2909 
2910                 /*
2911                  * Drop our io_end reference we got from init. We have
2912                  * to be careful and use deferred io_end finishing if
2913                  * we are still holding the transaction as we can
2914                  * release the last reference to io_end which may end
2915                  * up doing unwritten extent conversion.
2916                  */
2917                 if (handle) {
2918                         ext4_put_io_end_defer(mpd.io_submit.io_end);
2919                         ext4_journal_stop(handle);
2920                 } else
2921                         ext4_put_io_end(mpd.io_submit.io_end);
2922                 mpd.io_submit.io_end = NULL;
2923 
2924                 if (ret == -ENOSPC && sbi->s_journal) {
2925                         /*
2926                          * Commit the transaction which would
2927                          * free blocks released in the transaction
2928                          * and try again
2929                          */
2930                         jbd2_journal_force_commit_nested(sbi->s_journal);
2931                         ret = 0;
2932                         continue;
2933                 }
2934                 /* Fatal error - ENOMEM, EIO... */
2935                 if (ret)
2936                         break;
2937         }
2938 unplug:
2939         blk_finish_plug(&plug);
2940         if (!ret && !cycled && wbc->nr_to_write > 0) {
2941                 cycled = 1;
2942                 mpd.last_page = writeback_index - 1;
2943                 mpd.first_page = 0;
2944                 goto retry;
2945         }
2946 
2947         /* Update index */
2948         if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2949                 /*
2950                  * Set the writeback_index so that range_cyclic
2951                  * mode will write it back later
2952                  */
2953                 mapping->writeback_index = mpd.first_page;
2954 
2955 out_writepages:
2956         trace_ext4_writepages_result(inode, wbc, ret,
2957                                      nr_to_write - wbc->nr_to_write);
2958         percpu_up_read(&sbi->s_writepages_rwsem);
2959         return ret;
2960 }
2961 
2962 static int ext4_dax_writepages(struct address_space *mapping,
2963                                struct writeback_control *wbc)
2964 {
2965         int ret;
2966         long nr_to_write = wbc->nr_to_write;
2967         struct inode *inode = mapping->host;
2968         struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
2969 
2970         if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
2971                 return -EIO;
2972 
2973         percpu_down_read(&sbi->s_writepages_rwsem);
2974         trace_ext4_writepages(inode, wbc);
2975 
2976         ret = dax_writeback_mapping_range(mapping, inode->i_sb->s_bdev, wbc);
2977         trace_ext4_writepages_result(inode, wbc, ret,
2978                                      nr_to_write - wbc->nr_to_write);
2979         percpu_up_read(&sbi->s_writepages_rwsem);
2980         return ret;
2981 }
2982 
2983 static int ext4_nonda_switch(struct super_block *sb)
2984 {
2985         s64 free_clusters, dirty_clusters;
2986         struct ext4_sb_info *sbi = EXT4_SB(sb);
2987 
2988         /*
2989          * switch to non delalloc mode if we are running low
2990          * on free block. The free block accounting via percpu
2991          * counters can get slightly wrong with percpu_counter_batch getting
2992          * accumulated on each CPU without updating global counters
2993          * Delalloc need an accurate free block accounting. So switch
2994          * to non delalloc when we are near to error range.
2995          */
2996         free_clusters =
2997                 percpu_counter_read_positive(&sbi->s_freeclusters_counter);
2998         dirty_clusters =
2999                 percpu_counter_read_positive(&sbi->s_dirtyclusters_counter);
3000         /*
3001          * Start pushing delalloc when 1/2 of free blocks are dirty.
3002          */
3003         if (dirty_clusters && (free_clusters < 2 * dirty_clusters))
3004                 try_to_writeback_inodes_sb(sb, WB_REASON_FS_FREE_SPACE);
3005 
3006         if (2 * free_clusters < 3 * dirty_clusters ||
3007             free_clusters < (dirty_clusters + EXT4_FREECLUSTERS_WATERMARK)) {
3008                 /*
3009                  * free block count is less than 150% of dirty blocks
3010                  * or free blocks is less than watermark
3011                  */
3012                 return 1;
3013         }
3014         return 0;
3015 }
3016 
3017 /* We always reserve for an inode update; the superblock could be there too */
3018 static int ext4_da_write_credits(struct inode *inode, loff_t pos, unsigned len)
3019 {
3020         if (likely(ext4_has_feature_large_file(inode->i_sb)))
3021                 return 1;
3022 
3023         if (pos + len <= 0x7fffffffULL)
3024                 return 1;
3025 
3026         /* We might need to update the superblock to set LARGE_FILE */
3027         return 2;
3028 }
3029 
3030 static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
3031                                loff_t pos, unsigned len, unsigned flags,
3032                                struct page **pagep, void **fsdata)
3033 {
3034         int ret, retries = 0;
3035         struct page *page;
3036         pgoff_t index;
3037         struct inode *inode = mapping->host;
3038         handle_t *handle;
3039 
3040         if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
3041                 return -EIO;
3042 
3043         index = pos >> PAGE_SHIFT;
3044 
3045         if (ext4_nonda_switch(inode->i_sb) || S_ISLNK(inode->i_mode) ||
3046             ext4_verity_in_progress(inode)) {
3047                 *fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
3048                 return ext4_write_begin(file, mapping, pos,
3049                                         len, flags, pagep, fsdata);
3050         }
3051         *fsdata = (void *)0;
3052         trace_ext4_da_write_begin(inode, pos, len, flags);
3053 
3054         if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
3055                 ret = ext4_da_write_inline_data_begin(mapping, inode,
3056                                                       pos, len, flags,
3057                                                       pagep, fsdata);
3058                 if (ret < 0)
3059                         return ret;
3060                 if (ret == 1)
3061                         return 0;
3062         }
3063 
3064         /*
3065          * grab_cache_page_write_begin() can take a long time if the
3066          * system is thrashing due to memory pressure, or if the page
3067          * is being written back.  So grab it first before we start
3068          * the transaction handle.  This also allows us to allocate
3069          * the page (if needed) without using GFP_NOFS.
3070          */
3071 retry_grab:
3072         page = grab_cache_page_write_begin(mapping, index, flags);
3073         if (!page)
3074                 return -ENOMEM;
3075         unlock_page(page);
3076 
3077         /*
3078          * With delayed allocation, we don't log the i_disksize update
3079          * if there is delayed block allocation. But we still need
3080          * to journalling the i_disksize update if writes to the end
3081          * of file which has an already mapped buffer.
3082          */
3083 retry_journal:
3084         handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
3085                                 ext4_da_write_credits(inode, pos, len));
3086         if (IS_ERR(handle)) {
3087                 put_page(page);
3088                 return PTR_ERR(handle);
3089         }
3090 
3091         lock_page(page);
3092         if (page->mapping != mapping) {
3093                 /* The page got truncated from under us */
3094                 unlock_page(page);
3095                 put_page(page);
3096                 ext4_journal_stop(handle);
3097                 goto retry_grab;
3098         }
3099         /* In case writeback began while the page was unlocked */
3100         wait_for_stable_page(page);
3101 
3102 #ifdef CONFIG_FS_ENCRYPTION
3103         ret = ext4_block_write_begin(page, pos, len,
3104                                      ext4_da_get_block_prep);
3105 #else
3106         ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep);
3107 #endif
3108         if (ret < 0) {
3109                 unlock_page(page);
3110                 ext4_journal_stop(handle);
3111                 /*
3112                  * block_write_begin may have instantiated a few blocks
3113                  * outside i_size.  Trim these off again. Don't need
3114                  * i_size_read because we hold i_mutex.
3115                  */
3116                 if (pos + len > inode->i_size)
3117                         ext4_truncate_failed_write(inode);
3118 
3119                 if (ret == -ENOSPC &&
3120                     ext4_should_retry_alloc(inode->i_sb, &retries))
3121                         goto retry_journal;
3122 
3123                 put_page(page);
3124                 return ret;
3125         }
3126 
3127         *pagep = page;
3128         return ret;
3129 }
3130 
3131 /*
3132  * Check if we should update i_disksize
3133  * when write to the end of file but not require block allocation
3134  */
3135 static int ext4_da_should_update_i_disksize(struct page *page,
3136                                             unsigned long offset)
3137 {
3138         struct buffer_head *bh;
3139         struct inode *inode = page->mapping->host;
3140         unsigned int idx;
3141         int i;
3142 
3143         bh = page_buffers(page);
3144         idx = offset >> inode->i_blkbits;
3145 
3146         for (i = 0; i < idx; i++)
3147                 bh = bh->b_this_page;
3148 
3149         if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh))
3150                 return 0;
3151         return 1;
3152 }
3153 
3154 static int ext4_da_write_end(struct file *file,
3155                              struct address_space *mapping,
3156                              loff_t pos, unsigned len, unsigned copied,
3157                              struct page *page, void *fsdata)
3158 {
3159         struct inode *inode = mapping->host;
3160         int ret = 0, ret2;
3161         handle_t *handle = ext4_journal_current_handle();
3162         loff_t new_i_size;
3163         unsigned long start, end;
3164         int write_mode = (int)(unsigned long)fsdata;
3165 
3166         if (write_mode == FALL_BACK_TO_NONDELALLOC)
3167                 return ext4_write_end(file, mapping, pos,
3168                                       len, copied, page, fsdata);
3169 
3170         trace_ext4_da_write_end(inode, pos, len, copied);
3171         start = pos & (PAGE_SIZE - 1);
3172         end = start + copied - 1;
3173 
3174         /*
3175          * generic_write_end() will run mark_inode_dirty() if i_size
3176          * changes.  So let's piggyback the i_disksize mark_inode_dirty
3177          * into that.
3178          */
3179         new_i_size = pos + copied;
3180         if (copied && new_i_size > EXT4_I(inode)->i_disksize) {
3181                 if (ext4_has_inline_data(inode) ||
3182                     ext4_da_should_update_i_disksize(page, end)) {
3183                         ext4_update_i_disksize(inode, new_i_size);
3184                         /* We need to mark inode dirty even if
3185                          * new_i_size is less that inode->i_size
3186                          * bu greater than i_disksize.(hint delalloc)
3187                          */
3188                         ext4_mark_inode_dirty(handle, inode);
3189                 }
3190         }
3191 
3192         if (write_mode != CONVERT_INLINE_DATA &&
3193             ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) &&
3194             ext4_has_inline_data(inode))
3195                 ret2 = ext4_da_write_inline_data_end(inode, pos, len, copied,
3196                                                      page);
3197         else
3198                 ret2 = generic_write_end(file, mapping, pos, len, copied,
3199                                                         page, fsdata);
3200 
3201         copied = ret2;
3202         if (ret2 < 0)
3203                 ret = ret2;
3204         ret2 = ext4_journal_stop(handle);
3205         if (!ret)
3206                 ret = ret2;
3207 
3208         return ret ? ret : copied;
3209 }
3210 
3211 /*
3212  * Force all delayed allocation blocks to be allocated for a given inode.
3213  */
3214 int ext4_alloc_da_blocks(struct inode *inode)
3215 {
3216         trace_ext4_alloc_da_blocks(inode);
3217 
3218         if (!EXT4_I(inode)->i_reserved_data_blocks)
3219                 return 0;
3220 
3221         /*
3222          * We do something simple for now.  The filemap_flush() will
3223          * also start triggering a write of the data blocks, which is
3224          * not strictly speaking necessary (and for users of
3225          * laptop_mode, not even desirable).  However, to do otherwise
3226          * would require replicating code paths in:
3227          *
3228          * ext4_writepages() ->
3229          *    write_cache_pages() ---> (via passed in callback function)
3230          *        __mpage_da_writepage() -->
3231          *           mpage_add_bh_to_extent()
3232          *           mpage_da_map_blocks()
3233          *
3234          * The problem is that write_cache_pages(), located in
3235          * mm/page-writeback.c, marks pages clean in preparation for
3236          * doing I/O, which is not desirable if we're not planning on
3237          * doing I/O at all.
3238          *
3239          * We could call write_cache_pages(), and then redirty all of
3240          * the pages by calling redirty_page_for_writepage() but that
3241          * would be ugly in the extreme.  So instead we would need to
3242          * replicate parts of the code in the above functions,
3243          * simplifying them because we wouldn't actually intend to
3244          * write out the pages, but rather only collect contiguous
3245          * logical block extents, call the multi-block allocator, and
3246          * then update the buffer heads with the block allocations.
3247          *
3248          * For now, though, we'll cheat by calling filemap_flush(),
3249          * which will map the blocks, and start the I/O, but not
3250          * actually wait for the I/O to complete.
3251          */
3252         return filemap_flush(inode->i_mapping);
3253 }
3254 
3255 /*
3256  * bmap() is special.  It gets used by applications such as lilo and by
3257  * the swapper to find the on-disk block of a specific piece of data.
3258  *
3259  * Naturally, this is dangerous if the block concerned is still in the
3260  * journal.  If somebody makes a swapfile on an ext4 data-journaling
3261  * filesystem and enables swap, then they may get a nasty shock when the
3262  * data getting swapped to that swapfile suddenly gets overwritten by
3263  * the original zero's written out previously to the journal and
3264  * awaiting writeback in the kernel's buffer cache.
3265  *
3266  * So, if we see any bmap calls here on a modified, data-journaled file,
3267  * take extra steps to flush any blocks which might be in the cache.
3268  */
3269 static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
3270 {
3271         struct inode *inode = mapping->host;
3272         journal_t *journal;
3273         int err;
3274 
3275         /*
3276          * We can get here for an inline file via the FIBMAP ioctl
3277          */
3278         if (ext4_has_inline_data(inode))
3279                 return 0;
3280 
3281         if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
3282                         test_opt(inode->i_sb, DELALLOC)) {
3283                 /*
3284                  * With delalloc we want to sync the file
3285                  * so that we can make sure we allocate
3286                  * blocks for file
3287                  */
3288                 filemap_write_and_wait(mapping);
3289         }
3290 
3291         if (EXT4_JOURNAL(inode) &&
3292             ext4_test_inode_state(inode, EXT4_STATE_JDATA)) {
3293                 /*
3294                  * This is a REALLY heavyweight approach, but the use of
3295                  * bmap on dirty files is expected to be extremely rare:
3296                  * only if we run lilo or swapon on a freshly made file
3297                  * do we expect this to happen.
3298                  *
3299                  * (bmap requires CAP_SYS_RAWIO so this does not
3300                  * represent an unprivileged user DOS attack --- we'd be
3301                  * in trouble if mortal users could trigger this path at
3302                  * will.)
3303                  *
3304                  * NB. EXT4_STATE_JDATA is not set on files other than
3305                  * regular files.  If somebody wants to bmap a directory
3306                  * or symlink and gets confused because the buffer
3307                  * hasn't yet been flushed to disk, they deserve
3308                  * everything they get.
3309                  */
3310 
3311                 ext4_clear_inode_state(inode, EXT4_STATE_JDATA);
3312                 journal = EXT4_JOURNAL(inode);
3313                 jbd2_journal_lock_updates(journal);
3314                 err = jbd2_journal_flush(journal);
3315                 jbd2_journal_unlock_updates(journal);
3316 
3317                 if (err)
3318                         return 0;
3319         }
3320 
3321         return generic_block_bmap(mapping, block, ext4_get_block);
3322 }
3323 
3324 static int ext4_readpage(struct file *file, struct page *page)
3325 {
3326         int ret = -EAGAIN;
3327         struct inode *inode = page->mapping->host;
3328 
3329         trace_ext4_readpage(page);
3330 
3331         if (ext4_has_inline_data(inode))
3332                 ret = ext4_readpage_inline(inode, page);
3333 
3334         if (ret == -EAGAIN)
3335                 return ext4_mpage_readpages(page->mapping, NULL, page, 1,
3336                                                 false);
3337 
3338         return ret;
3339 }
3340 
3341 static int
3342 ext4_readpages(struct file *file, struct address_space *mapping,
3343                 struct list_head *pages, unsigned nr_pages)
3344 {
3345         struct inode *inode = mapping->host;
3346 
3347         /* If the file has inline data, no need to do readpages. */
3348         if (ext4_has_inline_data(inode))
3349                 return 0;
3350 
3351         return ext4_mpage_readpages(mapping, pages, NULL, nr_pages, true);
3352 }
3353 
3354 static void ext4_invalidatepage(struct page *page, unsigned int offset,
3355                                 unsigned int length)
3356 {
3357         trace_ext4_invalidatepage(page, offset, length);
3358 
3359         /* No journalling happens on data buffers when this function is used */
3360         WARN_ON(page_has_buffers(page) && buffer_jbd(page_buffers(page)));
3361 
3362         block_invalidatepage(page, offset, length);
3363 }
3364 
3365 static int __ext4_journalled_invalidatepage(struct page *page,
3366                                             unsigned int offset,
3367                                             unsigned int length)
3368 {
3369         journal_t *journal = EXT4_JOURNAL(page->mapping->host);
3370 
3371         trace_ext4_journalled_invalidatepage(page, offset, length);
3372 
3373         /*
3374          * If it's a full truncate we just forget about the pending dirtying
3375          */
3376         if (offset == 0 && length == PAGE_SIZE)
3377                 ClearPageChecked(page);
3378 
3379         return jbd2_journal_invalidatepage(journal, page, offset, length);
3380 }
3381 
3382 /* Wrapper for aops... */
3383 static void ext4_journalled_invalidatepage(struct page *page,
3384                                            unsigned int offset,
3385                                            unsigned int length)
3386 {
3387         WARN_ON(__ext4_journalled_invalidatepage(page, offset, length) < 0);
3388 }
3389 
3390 static int ext4_releasepage(struct page *page, gfp_t wait)
3391 {
3392         journal_t *journal = EXT4_JOURNAL(page->mapping->host);
3393 
3394         trace_ext4_releasepage(page);
3395 
3396         /* Page has dirty journalled data -> cannot release */
3397         if (PageChecked(page))
3398                 return 0;
3399         if (journal)
3400                 return jbd2_journal_try_to_free_buffers(journal, page, wait);
3401         else
3402                 return try_to_free_buffers(page);
3403 }
3404 
3405 static bool ext4_inode_datasync_dirty(struct inode *inode)
3406 {
3407         journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
3408 
3409         if (journal)
3410                 return !jbd2_transaction_committed(journal,
3411                                         EXT4_I(inode)->i_datasync_tid);
3412         /* Any metadata buffers to write? */
3413         if (!list_empty(&inode->i_mapping->private_list))
3414                 return true;
3415         return inode->i_state & I_DIRTY_DATASYNC;
3416 }
3417 
3418 static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
3419                             unsigned flags, struct iomap *iomap)
3420 {
3421         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3422         unsigned int blkbits = inode->i_blkbits;
3423         unsigned long first_block, last_block;
3424         struct ext4_map_blocks map;
3425         bool delalloc = false;
3426         int ret;
3427 
3428         if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK)
3429                 return -EINVAL;
3430         first_block = offset >> blkbits;
3431         last_block = min_t(loff_t, (offset + length - 1) >> blkbits,
3432                            EXT4_MAX_LOGICAL_BLOCK);
3433 
3434         if (flags & IOMAP_REPORT) {
3435                 if (ext4_has_inline_data(inode)) {
3436                         ret = ext4_inline_data_iomap(inode, iomap);
3437                         if (ret != -EAGAIN) {
3438                                 if (ret == 0 && offset >= iomap->length)
3439                                         ret = -ENOENT;
3440                                 return ret;
3441                         }
3442                 }
3443         } else {
3444                 if (WARN_ON_ONCE(ext4_has_inline_data(inode)))
3445                         return -ERANGE;
3446         }
3447 
3448         map.m_lblk = first_block;
3449         map.m_len = last_block - first_block + 1;
3450 
3451         if (flags & IOMAP_REPORT) {
3452                 ret = ext4_map_blocks(NULL, inode, &map, 0);
3453                 if (ret < 0)
3454                         return ret;
3455 
3456                 if (ret == 0) {
3457                         ext4_lblk_t end = map.m_lblk + map.m_len - 1;
3458                         struct extent_status es;
3459 
3460                         ext4_es_find_extent_range(inode, &ext4_es_is_delayed,
3461                                                   map.m_lblk, end, &es);
3462 
3463                         if (!es.es_len || es.es_lblk > end) {
3464                                 /* entire range is a hole */
3465                         } else if (es.es_lblk > map.m_lblk) {
3466                                 /* range starts with a hole */
3467                                 map.m_len = es.es_lblk - map.m_lblk;
3468                         } else {
3469                                 ext4_lblk_t offs = 0;
3470 
3471                                 if (es.es_lblk < map.m_lblk)
3472                                         offs = map.m_lblk - es.es_lblk;
3473                                 map.m_lblk = es.es_lblk + offs;
3474                                 map.m_len = es.es_len - offs;
3475                                 delalloc = true;
3476                         }
3477                 }
3478         } else if (flags & IOMAP_WRITE) {
3479                 int dio_credits;
3480                 handle_t *handle;
3481                 int retries = 0;
3482 
3483                 /* Trim mapping request to maximum we can map at once for DIO */
3484                 if (map.m_len > DIO_MAX_BLOCKS)
3485                         map.m_len = DIO_MAX_BLOCKS;
3486                 dio_credits = ext4_chunk_trans_blocks(inode, map.m_len);
3487 retry:
3488                 /*
3489                  * Either we allocate blocks and then we don't get unwritten
3490                  * extent so we have reserved enough credits, or the blocks
3491                  * are already allocated and unwritten and in that case
3492                  * extent conversion fits in the credits as well.
3493                  */
3494                 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
3495                                             dio_credits);
3496                 if (IS_ERR(handle))
3497                         return PTR_ERR(handle);
3498 
3499                 ret = ext4_map_blocks(handle, inode, &map,
3500                                       EXT4_GET_BLOCKS_CREATE_ZERO);
3501                 if (ret < 0) {
3502                         ext4_journal_stop(handle);
3503                         if (ret == -ENOSPC &&
3504                             ext4_should_retry_alloc(inode->i_sb, &retries))
3505                                 goto retry;
3506                         return ret;
3507                 }
3508 
3509                 /*
3510                  * If we added blocks beyond i_size, we need to make sure they
3511                  * will get truncated if we crash before updating i_size in
3512                  * ext4_iomap_end(). For faults we don't need to do that (and
3513                  * even cannot because for orphan list operations inode_lock is
3514                  * required) - if we happen to instantiate block beyond i_size,
3515                  * it is because we race with truncate which has already added
3516                  * the inode to the orphan list.
3517                  */
3518                 if (!(flags & IOMAP_FAULT) && first_block + map.m_len >
3519                     (i_size_read(inode) + (1 << blkbits) - 1) >> blkbits) {
3520                         int err;
3521 
3522                         err = ext4_orphan_add(handle, inode);
3523                         if (err < 0) {
3524                                 ext4_journal_stop(handle);
3525                                 return err;
3526                         }
3527                 }
3528                 ext4_journal_stop(handle);
3529         } else {
3530                 ret = ext4_map_blocks(NULL, inode, &map, 0);
3531                 if (ret < 0)
3532                         return ret;
3533         }
3534 
3535         /*
3536          * Writes that span EOF might trigger an I/O size update on completion,
3537          * so consider them to be dirty for the purposes of O_DSYNC, even if
3538          * there is no other metadata changes being made or are pending here.
3539          */
3540         iomap->flags = 0;
3541         if (ext4_inode_datasync_dirty(inode) ||
3542             offset + length > i_size_read(inode))
3543                 iomap->flags |= IOMAP_F_DIRTY;
3544         iomap->bdev = inode->i_sb->s_bdev;
3545         iomap->dax_dev = sbi->s_daxdev;
3546         iomap->offset = (u64)first_block << blkbits;
3547         iomap->length = (u64)map.m_len << blkbits;
3548 
3549         if (ret == 0) {
3550                 iomap->type = delalloc ? IOMAP_DELALLOC : IOMAP_HOLE;
3551                 iomap->addr = IOMAP_NULL_ADDR;
3552         } else {
3553                 if (map.m_flags & EXT4_MAP_MAPPED) {
3554                         iomap->type = IOMAP_MAPPED;
3555                 } else if (map.m_flags & EXT4_MAP_UNWRITTEN) {
3556                         iomap->type = IOMAP_UNWRITTEN;
3557                 } else {
3558                         WARN_ON_ONCE(1);
3559                         return -EIO;
3560                 }
3561                 iomap->addr = (u64)map.m_pblk << blkbits;
3562         }
3563 
3564         if (map.m_flags & EXT4_MAP_NEW)
3565                 iomap->flags |= IOMAP_F_NEW;
3566 
3567         return 0;
3568 }
3569 
3570 static int ext4_iomap_end(struct inode *inode, loff_t offset, loff_t length,
3571                           ssize_t written, unsigned flags, struct iomap *iomap)
3572 {
3573         int ret = 0;
3574         handle_t *handle;
3575         int blkbits = inode->i_blkbits;
3576         bool truncate = false;
3577 
3578         if (!(flags & IOMAP_WRITE) || (flags & IOMAP_FAULT))
3579                 return 0;
3580 
3581         handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
3582         if (IS_ERR(handle)) {
3583                 ret = PTR_ERR(handle);
3584                 goto orphan_del;
3585         }
3586         if (ext4_update_inode_size(inode, offset + written))
3587                 ext4_mark_inode_dirty(handle, inode);
3588         /*
3589          * We may need to truncate allocated but not written blocks beyond EOF.
3590          */
3591         if (iomap->offset + iomap->length > 
3592             ALIGN(inode->i_size, 1 << blkbits)) {
3593                 ext4_lblk_t written_blk, end_blk;
3594 
3595                 written_blk = (offset + written) >> blkbits;
3596                 end_blk = (offset + length) >> blkbits;
3597                 if (written_blk < end_blk && ext4_can_truncate(inode))
3598                         truncate = true;
3599         }
3600         /*
3601          * Remove inode from orphan list if we were extending a inode and
3602          * everything went fine.
3603          */
3604         if (!truncate && inode->i_nlink &&
3605             !list_empty(&EXT4_I(inode)->i_orphan))
3606                 ext4_orphan_del(handle, inode);
3607         ext4_journal_stop(handle);
3608         if (truncate) {
3609                 ext4_truncate_failed_write(inode);
3610 orphan_del:
3611                 /*
3612                  * If truncate failed early the inode might still be on the
3613                  * orphan list; we need to make sure the inode is removed from
3614                  * the orphan list in that case.
3615                  */
3616                 if (inode->i_nlink)
3617                         ext4_orphan_del(NULL, inode);
3618         }
3619         return ret;
3620 }
3621 
3622 const struct iomap_ops ext4_iomap_ops = {
3623         .iomap_begin            = ext4_iomap_begin,
3624         .iomap_end              = ext4_iomap_end,
3625 };
3626 
3627 static int ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
3628                             ssize_t size, void *private)
3629 {
3630         ext4_io_end_t *io_end = private;
3631 
3632         /* if not async direct IO just return */
3633         if (!io_end)
3634                 return 0;
3635 
3636         ext_debug("ext4_end_io_dio(): io_end 0x%p "
3637                   "for inode %lu, iocb 0x%p, offset %llu, size %zd\n",
3638                   io_end, io_end->inode->i_ino, iocb, offset, size);
3639 
3640         /*
3641          * Error during AIO DIO. We cannot convert unwritten extents as the
3642          * data was not written. Just clear the unwritten flag and drop io_end.
3643          */
3644         if (size <= 0) {
3645                 ext4_clear_io_unwritten_flag(io_end);
3646                 size = 0;
3647         }
3648         io_end->offset = offset;
3649         io_end->size = size;
3650         ext4_put_io_end(io_end);
3651 
3652         return 0;
3653 }
3654 
3655 /*
3656  * Handling of direct IO writes.
3657  *
3658  * For ext4 extent files, ext4 will do direct-io write even to holes,
3659  * preallocated extents, and those write extend the file, no need to
3660  * fall back to buffered IO.
3661  *
3662  * For holes, we fallocate those blocks, mark them as unwritten
3663  * If those blocks were preallocated, we mark sure they are split, but
3664  * still keep the range to write as unwritten.
3665  *
3666  * The unwritten extents will be converted to written when DIO is completed.
3667  * For async direct IO, since the IO may still pending when return, we
3668  * set up an end_io call back function, which will do the conversion
3669  * when async direct IO completed.
3670  *
3671  * If the O_DIRECT write will extend the file then add this inode to the
3672  * orphan list.  So recovery will truncate it back to the original size
3673  * if the machine crashes during the write.
3674  *
3675  */
3676 static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter)
3677 {
3678         struct file *file = iocb->ki_filp;
3679         struct inode *inode = file->f_mapping->host;
3680         struct ext4_inode_info *ei = EXT4_I(inode);
3681         ssize_t ret;
3682         loff_t offset = iocb->ki_pos;
3683         size_t count = iov_iter_count(iter);
3684         int overwrite = 0;
3685         get_block_t *get_block_func = NULL;
3686         int dio_flags = 0;
3687         loff_t final_size = offset + count;
3688         int orphan = 0;
3689         handle_t *handle;
3690 
3691         if (final_size > inode->i_size || final_size > ei->i_disksize) {
3692                 /* Credits for sb + inode write */
3693                 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
3694                 if (IS_ERR(handle)) {
3695                         ret = PTR_ERR(handle);
3696                         goto out;
3697                 }
3698                 ret = ext4_orphan_add(handle, inode);
3699                 if (ret) {
3700                         ext4_journal_stop(handle);
3701                         goto out;
3702                 }
3703                 orphan = 1;
3704                 ext4_update_i_disksize(inode, inode->i_size);
3705                 ext4_journal_stop(handle);
3706         }
3707 
3708         BUG_ON(iocb->private == NULL);
3709 
3710         /*
3711          * Make all waiters for direct IO properly wait also for extent
3712          * conversion. This also disallows race between truncate() and
3713          * overwrite DIO as i_dio_count needs to be incremented under i_mutex.
3714          */
3715         inode_dio_begin(inode);
3716 
3717         /* If we do a overwrite dio, i_mutex locking can be released */
3718         overwrite = *((int *)iocb->private);
3719 
3720         if (overwrite)
3721                 inode_unlock(inode);
3722 
3723         /*
3724          * For extent mapped files we could direct write to holes and fallocate.
3725          *
3726          * Allocated blocks to fill the hole are marked as unwritten to prevent
3727          * parallel buffered read to expose the stale data before DIO complete
3728          * the data IO.
3729          *
3730          * As to previously fallocated extents, ext4 get_block will just simply
3731          * mark the buffer mapped but still keep the extents unwritten.
3732          *
3733          * For non AIO case, we will convert those unwritten extents to written
3734          * after return back from blockdev_direct_IO. That way we save us from
3735          * allocating io_end structure and also the overhead of offloading
3736          * the extent convertion to a workqueue.
3737          *
3738          * For async DIO, the conversion needs to be deferred when the
3739          * IO is completed. The ext4 end_io callback function will be
3740          * called to take care of the conversion work.  Here for async
3741          * case, we allocate an io_end structure to hook to the iocb.
3742          */
3743         iocb->private = NULL;
3744         if (overwrite)
3745                 get_block_func = ext4_dio_get_block_overwrite;
3746         else if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) ||
3747                    round_down(offset, i_blocksize(inode)) >= inode->i_size) {
3748                 get_block_func = ext4_dio_get_block;
3749                 dio_flags = DIO_LOCKING | DIO_SKIP_HOLES;
3750         } else if (is_sync_kiocb(iocb)) {
3751                 get_block_func = ext4_dio_get_block_unwritten_sync;
3752                 dio_flags = DIO_LOCKING;
3753         } else {
3754                 get_block_func = ext4_dio_get_block_unwritten_async;
3755                 dio_flags = DIO_LOCKING;
3756         }
3757         ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
3758                                    get_block_func, ext4_end_io_dio, NULL,
3759                                    dio_flags);
3760 
3761         if (ret > 0 && !overwrite && ext4_test_inode_state(inode,
3762                                                 EXT4_STATE_DIO_UNWRITTEN)) {
3763                 int err;
3764                 /*
3765                  * for non AIO case, since the IO is already
3766                  * completed, we could do the conversion right here
3767                  */
3768                 err = ext4_convert_unwritten_extents(NULL, inode,
3769                                                      offset, ret);
3770                 if (err < 0)
3771                         ret = err;
3772                 ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
3773         }
3774 
3775         inode_dio_end(inode);
3776         /* take i_mutex locking again if we do a ovewrite dio */
3777         if (overwrite)
3778                 inode_lock(inode);
3779 
3780         if (ret < 0 && final_size > inode->i_size)
3781                 ext4_truncate_failed_write(inode);
3782 
3783         /* Handle extending of i_size after direct IO write */
3784         if (orphan) {
3785                 int err;
3786 
3787                 /* Credits for sb + inode write */
3788                 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
3789                 if (IS_ERR(handle)) {
3790                         /*
3791                          * We wrote the data but cannot extend
3792                          * i_size. Bail out. In async io case, we do
3793                          * not return error here because we have
3794                          * already submmitted the corresponding
3795                          * bio. Returning error here makes the caller
3796                          * think that this IO is done and failed
3797                          * resulting in race with bio's completion
3798                          * handler.
3799                          */
3800                         if (!ret)
3801                                 ret = PTR_ERR(handle);
3802                         if (inode->i_nlink)
3803                                 ext4_orphan_del(NULL, inode);
3804 
3805                         goto out;
3806                 }
3807                 if (inode->i_nlink)
3808                         ext4_orphan_del(handle, inode);
3809                 if (ret > 0) {
3810                         loff_t end = offset + ret;
3811                         if (end > inode->i_size || end > ei->i_disksize) {
3812                                 ext4_update_i_disksize(inode, end);
3813                                 if (end > inode->i_size)
3814                                         i_size_write(inode, end);
3815                                 /*
3816                                  * We're going to return a positive `ret'
3817                                  * here due to non-zero-length I/O, so there's
3818                                  * no way of reporting error returns from
3819                                  * ext4_mark_inode_dirty() to userspace.  So
3820                                  * ignore it.
3821                                  */
3822                                 ext4_mark_inode_dirty(handle, inode);
3823                         }
3824                 }
3825                 err = ext4_journal_stop(handle);
3826                 if (ret == 0)
3827                         ret = err;
3828         }
3829 out:
3830         return ret;
3831 }
3832 
3833 static ssize_t ext4_direct_IO_read(struct kiocb *iocb, struct iov_iter *iter)
3834 {
3835         struct address_space *mapping = iocb->ki_filp->f_mapping;
3836         struct inode *inode = mapping->host;
3837         size_t count = iov_iter_count(iter);
3838         ssize_t ret;
3839 
3840         /*
3841          * Shared inode_lock is enough for us - it protects against concurrent
3842          * writes & truncates and since we take care of writing back page cache,
3843          * we are protected against page writeback as well.
3844          */
3845         if (iocb->ki_flags & IOCB_NOWAIT) {
3846                 if (!inode_trylock_shared(inode))
3847                         return -EAGAIN;
3848         } else {
3849                 inode_lock_shared(inode);
3850         }
3851 
3852         ret = filemap_write_and_wait_range(mapping, iocb->ki_pos,
3853                                            iocb->ki_pos + count - 1);
3854         if (ret)
3855                 goto out_unlock;
3856         ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
3857                                    iter, ext4_dio_get_block, NULL, NULL, 0);
3858 out_unlock:
3859         inode_unlock_shared(inode);
3860         return ret;
3861 }
3862 
3863 static ssize_t ext4_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
3864 {
3865         struct file *file = iocb->ki_filp;
3866         struct inode *inode = file->f_mapping->host;
3867         size_t count = iov_iter_count(iter);
3868         loff_t offset = iocb->ki_pos;
3869         ssize_t ret;
3870 
3871 #ifdef CONFIG_FS_ENCRYPTION
3872         if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode))
3873                 return 0;
3874 #endif
3875         if (fsverity_active(inode))
3876                 return 0;
3877 
3878         /*
3879          * If we are doing data journalling we don't support O_DIRECT
3880          */
3881         if (ext4_should_journal_data(inode))
3882                 return 0;
3883 
3884         /* Let buffer I/O handle the inline data case. */
3885         if (ext4_has_inline_data(inode))
3886                 return 0;
3887 
3888         trace_ext4_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
3889         if (iov_iter_rw(iter) == READ)
3890                 ret = ext4_direct_IO_read(iocb, iter);
3891         else
3892                 ret = ext4_direct_IO_write(iocb, iter);
3893         trace_ext4_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), ret);
3894         return ret;
3895 }
3896 
3897 /*
3898  * Pages can be marked dirty completely asynchronously from ext4's journalling
3899  * activity.  By filemap_sync_pte(), try_to_unmap_one(), etc.  We cannot do
3900  * much here because ->set_page_dirty is called under VFS locks.  The page is
3901  * not necessarily locked.
3902  *
3903  * We cannot just dirty the page and leave attached buffers clean, because the
3904  * buffers' dirty state is "definitive".  We cannot just set the buffers dirty
3905  * or jbddirty because all the journalling code will explode.
3906  *
3907  * So what we do is to mark the page "pending dirty" and next time writepage
3908  * is called, propagate that into the buffers appropriately.
3909  */
3910 static int ext4_journalled_set_page_dirty(struct page *page)
3911 {
3912         SetPageChecked(page);
3913         return __set_page_dirty_nobuffers(page);
3914 }
3915 
3916 static int ext4_set_page_dirty(struct page *page)
3917 {
3918         WARN_ON_ONCE(!PageLocked(page) && !PageDirty(page));
3919         WARN_ON_ONCE(!page_has_buffers(page));
3920         return __set_page_dirty_buffers(page);
3921 }
3922 
3923 static const struct address_space_operations ext4_aops = {
3924         .readpage               = ext4_readpage,
3925         .readpages              = ext4_readpages,
3926         .writepage              = ext4_writepage,
3927         .writepages             = ext4_writepages,
3928         .write_begin            = ext4_write_begin,
3929         .write_end              = ext4_write_end,
3930         .set_page_dirty         = ext4_set_page_dirty,
3931         .bmap                   = ext4_bmap,
3932         .invalidatepage         = ext4_invalidatepage,
3933         .releasepage            = ext4_releasepage,
3934         .direct_IO              = ext4_direct_IO,
3935         .migratepage            = buffer_migrate_page,
3936         .is_partially_uptodate  = block_is_partially_uptodate,
3937         .error_remove_page      = generic_error_remove_page,
3938 };
3939 
3940 static const struct address_space_operations ext4_journalled_aops = {
3941         .readpage               = ext4_readpage,
3942         .readpages              = ext4_readpages,
3943         .writepage              = ext4_writepage,
3944         .writepages             = ext4_writepages,
3945         .write_begin            = ext4_write_begin,
3946         .write_end              = ext4_journalled_write_end,
3947         .set_page_dirty         = ext4_journalled_set_page_dirty,
3948         .bmap                   = ext4_bmap,
3949         .invalidatepage         = ext4_journalled_invalidatepage,
3950         .releasepage            = ext4_releasepage,
3951         .direct_IO              = ext4_direct_IO,
3952         .is_partially_uptodate  = block_is_partially_uptodate,
3953         .error_remove_page      = generic_error_remove_page,
3954 };
3955 
3956 static const struct address_space_operations ext4_da_aops = {
3957         .readpage               = ext4_readpage,
3958         .readpages              = ext4_readpages,
3959         .writepage              = ext4_writepage,
3960         .writepages             = ext4_writepages,
3961         .write_begin            = ext4_da_write_begin,
3962         .write_end              = ext4_da_write_end,
3963         .set_page_dirty         = ext4_set_page_dirty,
3964         .bmap                   = ext4_bmap,
3965         .invalidatepage         = ext4_invalidatepage,
3966         .releasepage            = ext4_releasepage,
3967         .direct_IO              = ext4_direct_IO,
3968         .migratepage            = buffer_migrate_page,
3969         .is_partially_uptodate  = block_is_partially_uptodate,
3970         .error_remove_page      = generic_error_remove_page,
3971 };
3972 
3973 static const struct address_space_operations ext4_dax_aops = {
3974         .writepages             = ext4_dax_writepages,
3975         .direct_IO              = noop_direct_IO,
3976         .set_page_dirty         = noop_set_page_dirty,
3977         .bmap                   = ext4_bmap,
3978         .invalidatepage         = noop_invalidatepage,
3979 };
3980 
3981 void ext4_set_aops(struct inode *inode)
3982 {
3983         switch (ext4_inode_journal_mode(inode)) {
3984         case EXT4_INODE_ORDERED_DATA_MODE:
3985         case EXT4_INODE_WRITEBACK_DATA_MODE:
3986                 break;
3987         case EXT4_INODE_JOURNAL_DATA_MODE:
3988                 inode->i_mapping->a_ops = &ext4_journalled_aops;
3989                 return;
3990         default:
3991                 BUG();
3992         }
3993         if (IS_DAX(inode))
3994                 inode->i_mapping->a_ops = &ext4_dax_aops;
3995         else if (test_opt(inode->i_sb, DELALLOC))
3996                 inode->i_mapping->a_ops = &ext4_da_aops;
3997         else
3998                 inode->i_mapping->a_ops = &ext4_aops;
3999 }
4000 
4001 static int __ext4_block_zero_page_range(handle_t *handle,
4002                 struct address_space *mapping, loff_t from, loff_t length)
4003 {
4004         ext4_fsblk_t index = from >> PAGE_SHIFT;
4005         unsigned offset = from & (PAGE_SIZE-1);
4006         unsigned blocksize, pos;
4007         ext4_lblk_t iblock;
4008         struct inode *inode = mapping->host;
4009         struct buffer_head *bh;
4010         struct page *page;
4011         int err = 0;
4012 
4013         page = find_or_create_page(mapping, from >> PAGE_SHIFT,
4014                                    mapping_gfp_constraint(mapping, ~__GFP_FS));
4015         if (!page)
4016                 return -ENOMEM;
4017 
4018         blocksize = inode->i_sb->s_blocksize;
4019 
4020         iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
4021 
4022         if (!page_has_buffers(page))
4023                 create_empty_buffers(page, blocksize, 0);
4024 
4025         /* Find the buffer that contains "offset" */
4026         bh = page_buffers(page);
4027         pos = blocksize;
4028         while (offset >= pos) {
4029                 bh = bh->b_this_page;
4030                 iblock++;
4031                 pos += blocksize;
4032         }
4033         if (buffer_freed(bh)) {
4034                 BUFFER_TRACE(bh, "freed: skip");
4035                 goto unlock;
4036         }
4037         if (!buffer_mapped(bh)) {
4038                 BUFFER_TRACE(bh, "unmapped");
4039                 ext4_get_block(inode, iblock, bh, 0);
4040                 /* unmapped? It's a hole - nothing to do */
4041                 if (!buffer_mapped(bh)) {
4042                         BUFFER_TRACE(bh, "still unmapped");
4043                         goto unlock;
4044                 }
4045         }
4046 
4047         /* Ok, it's mapped. Make sure it's up-to-date */
4048         if (PageUptodate(page))
4049                 set_buffer_uptodate(bh);
4050 
4051         if (!buffer_uptodate(bh)) {
4052                 err = -EIO;
4053                 ll_rw_block(REQ_OP_READ, 0, 1, &bh);
4054                 wait_on_buffer(bh);
4055                 /* Uhhuh. Read error. Complain and punt. */
4056                 if (!buffer_uptodate(bh))
4057                         goto unlock;
4058                 if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode)) {
4059                         /* We expect the key to be set. */
4060                         BUG_ON(!fscrypt_has_encryption_key(inode));
4061                         WARN_ON_ONCE(fscrypt_decrypt_pagecache_blocks(
4062                                         page, blocksize, bh_offset(bh)));
4063                 }
4064         }
4065         if (ext4_should_journal_data(inode)) {
4066                 BUFFER_TRACE(bh, "get write access");
4067                 err = ext4_journal_get_write_access(handle, bh);
4068                 if (err)
4069                         goto unlock;
4070         }
4071         zero_user(page, offset, length);
4072         BUFFER_TRACE(bh, "zeroed end of block");
4073 
4074         if (ext4_should_journal_data(inode)) {
4075                 err = ext4_handle_dirty_metadata(handle, inode, bh);
4076         } else {
4077                 err = 0;
4078                 mark_buffer_dirty(bh);
4079                 if (ext4_should_order_data(inode))
4080                         err = ext4_jbd2_inode_add_write(handle, inode, from,
4081                                         length);
4082         }
4083 
4084 unlock:
4085         unlock_page(page);
4086         put_page(page);
4087         return err;
4088 }
4089 
4090 /*
4091  * ext4_block_zero_page_range() zeros out a mapping of length 'length'
4092  * starting from file offset 'from'.  The range to be zero'd must
4093  * be contained with in one block.  If the specified range exceeds
4094  * the end of the block it will be shortened to end of the block
4095  * that cooresponds to 'from'
4096  */
4097 static int ext4_block_zero_page_range(handle_t *handle,
4098                 struct address_space *mapping, loff_t from, loff_t length)
4099 {
4100         struct inode *inode = mapping->host;
4101         unsigned offset = from & (PAGE_SIZE-1);
4102         unsigned blocksize = inode->i_sb->s_blocksize;
4103         unsigned max = blocksize - (offset & (blocksize - 1));
4104 
4105         /*
4106          * correct length if it does not fall between
4107          * 'from' and the end of the block
4108          */
4109         if (length > max || length < 0)
4110                 length = max;
4111 
4112         if (IS_DAX(inode)) {
4113                 return iomap_zero_range(inode, from, length, NULL,
4114                                         &ext4_iomap_ops);
4115         }
4116         return __ext4_block_zero_page_range(handle, mapping, from, length);
4117 }
4118 
4119 /*
4120  * ext4_block_truncate_page() zeroes out a mapping from file offset `from'
4121  * up to the end of the block which corresponds to `from'.
4122  * This required during truncate. We need to physically zero the tail end
4123  * of that block so it doesn't yield old data if the file is later grown.
4124  */
4125 static int ext4_block_truncate_page(handle_t *handle,
4126                 struct address_space *mapping, loff_t from)
4127 {
4128         unsigned offset = from & (PAGE_SIZE-1);
4129         unsigned length;
4130         unsigned blocksize;
4131         struct inode *inode = mapping->host;
4132 
4133         /* If we are processing an encrypted inode during orphan list handling */
4134         if (IS_ENCRYPTED(inode) && !fscrypt_has_encryption_key(inode))
4135                 return 0;
4136 
4137         blocksize = inode->i_sb->s_blocksize;
4138         length = blocksize - (offset & (blocksize - 1));
4139 
4140         return ext4_block_zero_page_range(handle, mapping, from, length);
4141 }
4142 
4143 int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode,
4144                              loff_t lstart, loff_t length)
4145 {
4146         struct super_block *sb = inode->i_sb;
4147         struct address_space *mapping = inode->i_mapping;
4148         unsigned partial_start, partial_end;
4149         ext4_fsblk_t start, end;
4150         loff_t byte_end = (lstart + length - 1);
4151         int err = 0;
4152 
4153         partial_start = lstart & (sb->s_blocksize - 1);
4154         partial_end = byte_end & (sb->s_blocksize - 1);
4155 
4156         start = lstart >> sb->s_blocksize_bits;
4157         end = byte_end >> sb->s_blocksize_bits;
4158 
4159         /* Handle partial zero within the single block */
4160         if (start == end &&
4161             (partial_start || (partial_end != sb->s_blocksize - 1))) {
4162                 err = ext4_block_zero_page_range(handle, mapping,
4163                                                  lstart, length);
4164                 return err;
4165         }
4166         /* Handle partial zero out on the start of the range */
4167         if (partial_start) {
4168                 err = ext4_block_zero_page_range(handle, mapping,
4169                                                  lstart, sb->s_blocksize);
4170                 if (err)
4171                         return err;
4172         }
4173         /* Handle partial zero out on the end of the range */
4174         if (partial_end != sb->s_blocksize - 1)
4175                 err = ext4_block_zero_page_range(handle, mapping,
4176                                                  byte_end - partial_end,
4177                                                  partial_end + 1);
4178         return err;
4179 }
4180 
4181 int ext4_can_truncate(struct inode *inode)
4182 {
4183         if (S_ISREG(inode->i_mode))
4184                 return 1;
4185         if (S_ISDIR(inode->i_mode))
4186                 return 1;
4187         if (S_ISLNK(inode->i_mode))
4188                 return !ext4_inode_is_fast_symlink(inode);
4189         return 0;
4190 }
4191 
4192 /*
4193  * We have to make sure i_disksize gets properly updated before we truncate
4194  * page cache due to hole punching or zero range. Otherwise i_disksize update
4195  * can get lost as it may have been postponed to submission of writeback but
4196  * that will never happen after we truncate page cache.
4197  */
4198 int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
4199                                       loff_t len)
4200 {
4201         handle_t *handle;
4202         loff_t size = i_size_read(inode);
4203 
4204         WARN_ON(!inode_is_locked(inode));
4205         if (offset > size || offset + len < size)
4206                 return 0;
4207 
4208         if (EXT4_I(inode)->i_disksize >= size)
4209                 return 0;
4210 
4211         handle = ext4_journal_start(inode, EXT4_HT_MISC, 1);
4212         if (IS_ERR(handle))
4213                 return PTR_ERR(handle);
4214         ext4_update_i_disksize(inode, size);
4215         ext4_mark_inode_dirty(handle, inode);
4216         ext4_journal_stop(handle);
4217 
4218         return 0;
4219 }
4220 
4221 static void ext4_wait_dax_page(struct ext4_inode_info *ei)
4222 {
4223         up_write(&ei->i_mmap_sem);
4224         schedule();
4225         down_write(&ei->i_mmap_sem);
4226 }
4227 
4228 int ext4_break_layouts(struct inode *inode)
4229 {
4230         struct ext4_inode_info *ei = EXT4_I(inode);
4231         struct page *page;
4232         int error;
4233 
4234         if (WARN_ON_ONCE(!rwsem_is_locked(&ei->i_mmap_sem)))
4235                 return -EINVAL;
4236 
4237         do {
4238                 page = dax_layout_busy_page(inode->i_mapping);
4239                 if (!page)
4240                         return 0;
4241 
4242                 error = ___wait_var_event(&page->_refcount,
4243                                 atomic_read(&page->_refcount) == 1,
4244                                 TASK_INTERRUPTIBLE, 0, 0,
4245                                 ext4_wait_dax_page(ei));
4246         } while (error == 0);
4247 
4248         return error;
4249 }
4250 
4251 /*
4252  * ext4_punch_hole: punches a hole in a file by releasing the blocks
4253  * associated with the given offset and length
4254  *
4255  * @inode:  File inode
4256  * @offset: The offset where the hole will begin
4257  * @len:    The length of the hole
4258  *
4259  * Returns: 0 on success or negative on failure
4260  */
4261 
4262 int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
4263 {
4264         struct super_block *sb = inode->i_sb;
4265         ext4_lblk_t first_block, stop_block;
4266         struct address_space *mapping = inode->i_mapping;
4267         loff_t first_block_offset, last_block_offset;
4268         handle_t *handle;
4269         unsigned int credits;
4270         int ret = 0;
4271 
4272         if (!S_ISREG(inode->i_mode))
4273                 return -EOPNOTSUPP;
4274 
4275         trace_ext4_punch_hole(inode, offset, length, 0);
4276 
4277         ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
4278         if (ext4_has_inline_data(inode)) {
4279                 down_write(&EXT4_I(inode)->i_mmap_sem);
4280                 ret = ext4_convert_inline_data(inode);
4281                 up_write(&EXT4_I(inode)->i_mmap_sem);
4282                 if (ret)
4283                         return ret;
4284         }
4285 
4286         /*
4287          * Write out all dirty pages to avoid race conditions
4288          * Then release them.
4289          */
4290         if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
4291                 ret = filemap_write_and_wait_range(mapping, offset,
4292                                                    offset + length - 1);
4293                 if (ret)
4294                         return ret;
4295         }
4296 
4297         inode_lock(inode);
4298 
4299         /* No need to punch hole beyond i_size */
4300         if (offset >= inode->i_size)
4301                 goto out_mutex;
4302 
4303         /*
4304          * If the hole extends beyond i_size, set the hole
4305          * to end after the page that contains i_size
4306          */
4307         if (offset + length > inode->i_size) {
4308                 length = inode->i_size +
4309                    PAGE_SIZE - (inode->i_size & (PAGE_SIZE - 1)) -
4310                    offset;
4311         }
4312 
4313         if (offset & (sb->s_blocksize - 1) ||
4314             (offset + length) & (sb->s_blocksize - 1)) {
4315                 /*
4316                  * Attach jinode to inode for jbd2 if we do any zeroing of
4317                  * partial block
4318                  */
4319                 ret = ext4_inode_attach_jinode(inode);
4320                 if (ret < 0)
4321                         goto out_mutex;
4322 
4323         }
4324 
4325         /* Wait all existing dio workers, newcomers will block on i_mutex */
4326         inode_dio_wait(inode);
4327 
4328         /*
4329          * Prevent page faults from reinstantiating pages we have released from
4330          * page cache.
4331          */
4332         down_write(&EXT4_I(inode)->i_mmap_sem);
4333 
4334         ret = ext4_break_layouts(inode);
4335         if (ret)
4336                 goto out_dio;
4337 
4338         first_block_offset = round_up(offset, sb->s_blocksize);
4339         last_block_offset = round_down((offset + length), sb->s_blocksize) - 1;
4340 
4341         /* Now release the pages and zero block aligned part of pages*/
4342         if (last_block_offset > first_block_offset) {
4343                 ret = ext4_update_disksize_before_punch(inode, offset, length);
4344                 if (ret)
4345                         goto out_dio;
4346                 truncate_pagecache_range(inode, first_block_offset,
4347                                          last_block_offset);
4348         }
4349 
4350         if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4351                 credits = ext4_writepage_trans_blocks(inode);
4352         else
4353                 credits = ext4_blocks_for_truncate(inode);
4354         handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
4355         if (IS_ERR(handle)) {
4356                 ret = PTR_ERR(handle);
4357                 ext4_std_error(sb, ret);
4358                 goto out_dio;
4359         }
4360 
4361         ret = ext4_zero_partial_blocks(handle, inode, offset,
4362                                        length);
4363         if (ret)
4364                 goto out_stop;
4365 
4366         first_block = (offset + sb->s_blocksize - 1) >>
4367                 EXT4_BLOCK_SIZE_BITS(sb);
4368         stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
4369 
4370         /* If there are blocks to remove, do it */
4371         if (stop_block > first_block) {
4372 
4373                 down_write(&EXT4_I(inode)->i_data_sem);
4374                 ext4_discard_preallocations(inode);
4375 
4376                 ret = ext4_es_remove_extent(inode, first_block,
4377                                             stop_block - first_block);
4378                 if (ret) {
4379                         up_write(&EXT4_I(inode)->i_data_sem);
4380                         goto out_stop;
4381                 }
4382 
4383                 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4384                         ret = ext4_ext_remove_space(inode, first_block,
4385                                                     stop_block - 1);
4386                 else
4387                         ret = ext4_ind_remove_space(handle, inode, first_block,
4388                                                     stop_block);
4389 
4390                 up_write(&EXT4_I(inode)->i_data_sem);
4391         }
4392         if (IS_SYNC(inode))
4393                 ext4_handle_sync(handle);
4394 
4395         inode->i_mtime = inode->i_ctime = current_time(inode);
4396         ext4_mark_inode_dirty(handle, inode);
4397         if (ret >= 0)
4398                 ext4_update_inode_fsync_trans(handle, inode, 1);
4399 out_stop:
4400         ext4_journal_stop(handle);
4401 out_dio:
4402         up_write(&EXT4_I(inode)->i_mmap_sem);
4403 out_mutex:
4404         inode_unlock(inode);
4405         return ret;
4406 }
4407 
4408 int ext4_inode_attach_jinode(struct inode *inode)
4409 {
4410         struct ext4_inode_info *ei = EXT4_I(inode);
4411         struct jbd2_inode *jinode;
4412 
4413         if (ei->jinode || !EXT4_SB(inode->i_sb)->s_journal)
4414                 return 0;
4415 
4416         jinode = jbd2_alloc_inode(GFP_KERNEL);
4417         spin_lock(&inode->i_lock);
4418         if (!ei->jinode) {
4419                 if (!jinode) {
4420                         spin_unlock(&inode->i_lock);
4421                         return -ENOMEM;
4422                 }
4423                 ei->jinode = jinode;
4424                 jbd2_journal_init_jbd_inode(ei->jinode, inode);
4425                 jinode = NULL;
4426         }
4427         spin_unlock(&inode->i_lock);
4428         if (unlikely(jinode != NULL))
4429                 jbd2_free_inode(jinode);
4430         return 0;
4431 }
4432 
4433 /*
4434  * ext4_truncate()
4435  *
4436  * We block out ext4_get_block() block instantiations across the entire
4437  * transaction, and VFS/VM ensures that ext4_truncate() cannot run
4438  * simultaneously on behalf of the same inode.
4439  *
4440  * As we work through the truncate and commit bits of it to the journal there
4441  * is one core, guiding principle: the file's tree must always be consistent on
4442  * disk.  We must be able to restart the truncate after a crash.
4443  *
4444  * The file's tree may be transiently inconsistent in memory (although it
4445  * probably isn't), but whenever we close off and commit a journal transaction,
4446  * the contents of (the filesystem + the journal) must be consistent and
4447  * restartable.  It's pretty simple, really: bottom up, right to left (although
4448  * left-to-right works OK too).
4449  *
4450  * Note that at recovery time, journal replay occurs *before* the restart of
4451  * truncate against the orphan inode list.
4452  *
4453  * The committed inode has the new, desired i_size (which is the same as
4454  * i_disksize in this case).  After a crash, ext4_orphan_cleanup() will see
4455  * that this inode's truncate did not complete and it will again call
4456  * ext4_truncate() to have another go.  So there will be instantiated blocks
4457  * to the right of the truncation point in a crashed ext4 filesystem.  But
4458  * that's fine - as long as they are linked from the inode, the post-crash
4459  * ext4_truncate() run will find them and release them.
4460  */
4461 int ext4_truncate(struct inode *inode)
4462 {
4463         struct ext4_inode_info *ei = EXT4_I(inode);
4464         unsigned int credits;
4465         int err = 0;
4466         handle_t *handle;
4467         struct address_space *mapping = inode->i_mapping;
4468 
4469         /*
4470          * There is a possibility that we're either freeing the inode
4471          * or it's a completely new inode. In those cases we might not
4472          * have i_mutex locked because it's not necessary.
4473          */
4474         if (!(inode->i_state & (I_NEW|I_FREEING)))
4475                 WARN_ON(!inode_is_locked(inode));
4476         trace_ext4_truncate_enter(inode);
4477 
4478         if (!ext4_can_truncate(inode))
4479                 return 0;
4480 
4481         ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
4482 
4483         if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
4484                 ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
4485 
4486         if (ext4_has_inline_data(inode)) {
4487                 int has_inline = 1;
4488 
4489                 err = ext4_inline_data_truncate(inode, &has_inline);
4490                 if (err)
4491                         return err;
4492                 if (has_inline)
4493                         return 0;
4494         }
4495 
4496         /* If we zero-out tail of the page, we have to create jinode for jbd2 */
4497         if (inode->i_size & (inode->i_sb->s_blocksize - 1)) {
4498                 if (ext4_inode_attach_jinode(inode) < 0)
4499                         return 0;
4500         }
4501 
4502         if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4503                 credits = ext4_writepage_trans_blocks(inode);
4504         else
4505                 credits = ext4_blocks_for_truncate(inode);
4506 
4507         handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
4508         if (IS_ERR(handle))
4509                 return PTR_ERR(handle);
4510 
4511         if (inode->i_size & (inode->i_sb->s_blocksize - 1))
4512                 ext4_block_truncate_page(handle, mapping, inode->i_size);
4513 
4514         /*
4515          * We add the inode to the orphan list, so that if this
4516          * truncate spans multiple transactions, and we crash, we will
4517          * resume the truncate when the filesystem recovers.  It also
4518          * marks the inode dirty, to catch the new size.
4519          *
4520          * Implication: the file must always be in a sane, consistent
4521          * truncatable state while each transaction commits.
4522          */
4523         err = ext4_orphan_add(handle, inode);
4524         if (err)
4525                 goto out_stop;
4526 
4527         down_write(&EXT4_I(inode)->i_data_sem);
4528 
4529         ext4_discard_preallocations(inode);
4530 
4531         if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4532                 err = ext4_ext_truncate(handle, inode);
4533         else
4534                 ext4_ind_truncate(handle, inode);
4535 
4536         up_write(&ei->i_data_sem);
4537         if (err)
4538                 goto out_stop;
4539 
4540         if (IS_SYNC(inode))
4541                 ext4_handle_sync(handle);
4542 
4543 out_stop:
4544         /*
4545          * If this was a simple ftruncate() and the file will remain alive,
4546          * then we need to clear up the orphan record which we created above.
4547          * However, if this was a real unlink then we were called by
4548          * ext4_evict_inode(), and we allow that function to clean up the
4549          * orphan info for us.
4550          */
4551         if (inode->i_nlink)
4552                 ext4_orphan_del(handle, inode);
4553 
4554         inode->i_mtime = inode->i_ctime = current_time(inode);
4555         ext4_mark_inode_dirty(handle, inode);
4556         ext4_journal_stop(handle);
4557 
4558         trace_ext4_truncate_exit(inode);
4559         return err;
4560 }
4561 
4562 /*
4563  * ext4_get_inode_loc returns with an extra refcount against the inode's
4564  * underlying buffer_head on success. If 'in_mem' is true, we have all
4565  * data in memory that is needed to recreate the on-disk version of this
4566  * inode.
4567  */
4568 static int __ext4_get_inode_loc(struct inode *inode,
4569                                 struct ext4_iloc *iloc, int in_mem)
4570 {
4571         struct ext4_group_desc  *gdp;
4572         struct buffer_head      *bh;
4573         struct super_block      *sb = inode->i_sb;
4574         ext4_fsblk_t            block;
4575         struct blk_plug         plug;
4576         int                     inodes_per_block, inode_offset;
4577 
4578         iloc->bh = NULL;
4579         if (inode->i_ino < EXT4_ROOT_INO ||
4580             inode->i_ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))
4581                 return -EFSCORRUPTED;
4582 
4583         iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb);
4584         gdp = ext4_get_group_desc(sb, iloc->block_group, NULL);
4585         if (!gdp)
4586                 return -EIO;
4587 
4588         /*
4589          * Figure out the offset within the block group inode table
4590          */
4591         inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
4592         inode_offset = ((inode->i_ino - 1) %
4593                         EXT4_INODES_PER_GROUP(sb));
4594         block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block);
4595         iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb);
4596 
4597         bh = sb_getblk(sb, block);
4598         if (unlikely(!bh))
4599                 return -ENOMEM;
4600         if (!buffer_uptodate(bh)) {
4601                 lock_buffer(bh);
4602 
4603                 /*
4604                  * If the buffer has the write error flag, we have failed
4605                  * to write out another inode in the same block.  In this
4606                  * case, we don't have to read the block because we may
4607                  * read the old inode data successfully.
4608                  */
4609                 if (buffer_write_io_error(bh) && !buffer_uptodate(bh))
4610                         set_buffer_uptodate(bh);
4611 
4612                 if (buffer_uptodate(bh)) {
4613                         /* someone brought it uptodate while we waited */
4614                         unlock_buffer(bh);
4615                         goto has_buffer;
4616                 }
4617 
4618                 /*
4619                  * If we have all information of the inode in memory and this
4620                  * is the only valid inode in the block, we need not read the
4621                  * block.
4622                  */
4623                 if (in_mem) {
4624                         struct buffer_head *bitmap_bh;
4625                         int i, start;
4626 
4627                         start = inode_offset & ~(inodes_per_block - 1);
4628 
4629                         /* Is the inode bitmap in cache? */
4630                         bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp));
4631                         if (unlikely(!bitmap_bh))
4632                                 goto make_io;
4633 
4634                         /*
4635                          * If the inode bitmap isn't in cache then the
4636                          * optimisation may end up performing two reads instead
4637                          * of one, so skip it.
4638                          */
4639                         if (!buffer_uptodate(bitmap_bh)) {
4640                                 brelse(bitmap_bh);
4641                                 goto make_io;
4642                         }
4643                         for (i = start; i < start + inodes_per_block; i++) {
4644                                 if (i == inode_offset)
4645                                         continue;
4646                                 if (ext4_test_bit(i, bitmap_bh->b_data))
4647                                         break;
4648                         }
4649                         brelse(bitmap_bh);
4650                         if (i == start + inodes_per_block) {
4651                                 /* all other inodes are free, so skip I/O */
4652                                 memset(bh->b_data, 0, bh->b_size);
4653                                 set_buffer_uptodate(bh);
4654                                 unlock_buffer(bh);
4655                                 goto has_buffer;
4656                         }
4657                 }
4658 
4659 make_io:
4660                 /*
4661                  * If we need to do any I/O, try to pre-readahead extra
4662                  * blocks from the inode table.
4663                  */
4664                 blk_start_plug(&plug);
4665                 if (EXT4_SB(sb)->s_inode_readahead_blks) {
4666                         ext4_fsblk_t b, end, table;
4667                         unsigned num;
4668                         __u32 ra_blks = EXT4_SB(sb)->s_inode_readahead_blks;
4669 
4670                         table = ext4_inode_table(sb, gdp);
4671                         /* s_inode_readahead_blks is always a power of 2 */
4672                         b = block & ~((ext4_fsblk_t) ra_blks - 1);
4673                         if (table > b)
4674                                 b = table;
4675                         end = b + ra_blks;
4676                         num = EXT4_INODES_PER_GROUP(sb);
4677                         if (ext4_has_group_desc_csum(sb))
4678                                 num -= ext4_itable_unused_count(sb, gdp);
4679                         table += num / inodes_per_block;
4680                         if (end > table)
4681                                 end = table;
4682                         while (b <= end)
4683                                 sb_breadahead_unmovable(sb, b++);
4684                 }
4685 
4686                 /*
4687                  * There are other valid inodes in the buffer, this inode
4688                  * has in-inode xattrs, or we don't have this inode in memory.
4689                  * Read the block from disk.
4690                  */
4691                 trace_ext4_load_inode(inode);
4692                 get_bh(bh);
4693                 bh->b_end_io = end_buffer_read_sync;
4694                 submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, bh);
4695                 blk_finish_plug(&plug);
4696                 wait_on_buffer(bh);
4697                 if (!buffer_uptodate(bh)) {
4698                         EXT4_ERROR_INODE_BLOCK(inode, block,
4699                                                "unable to read itable block");
4700                         brelse(bh);
4701                         return -EIO;
4702                 }
4703         }
4704 has_buffer:
4705         iloc->bh = bh;
4706         return 0;
4707 }
4708 
4709 int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
4710 {
4711         /* We have all inode data except xattrs in memory here. */
4712         return __ext4_get_inode_loc(inode, iloc,
4713                 !ext4_test_inode_state(inode, EXT4_STATE_XATTR));
4714 }
4715 
4716 static bool ext4_should_use_dax(struct inode *inode)
4717 {
4718         if (!test_opt(inode->i_sb, DAX))
4719                 return false;
4720         if (!S_ISREG(inode->i_mode))
4721                 return false;
4722         if (ext4_should_journal_data(inode))
4723                 return false;
4724         if (ext4_has_inline_data(inode))
4725                 return false;
4726         if (ext4_test_inode_flag(inode, EXT4_INODE_ENCRYPT))
4727                 return false;
4728         if (ext4_test_inode_flag(inode, EXT4_INODE_VERITY))
4729                 return false;
4730         return true;
4731 }
4732 
4733 void ext4_set_inode_flags(struct inode *inode)
4734 {
4735         unsigned int flags = EXT4_I(inode)->i_flags;
4736         unsigned int new_fl = 0;
4737 
4738         if (flags & EXT4_SYNC_FL)
4739                 new_fl |= S_SYNC;
4740         if (flags & EXT4_APPEND_FL)
4741                 new_fl |= S_APPEND;
4742         if (flags & EXT4_IMMUTABLE_FL)
4743                 new_fl |= S_IMMUTABLE;
4744         if (flags & EXT4_NOATIME_FL)
4745                 new_fl |= S_NOATIME;
4746         if (flags & EXT4_DIRSYNC_FL)
4747                 new_fl |= S_DIRSYNC;
4748         if (ext4_should_use_dax(inode))
4749                 new_fl |= S_DAX;
4750         if (flags & EXT4_ENCRYPT_FL)
4751                 new_fl |= S_ENCRYPTED;
4752         if (flags & EXT4_CASEFOLD_FL)
4753                 new_fl |= S_CASEFOLD;
4754         if (flags & EXT4_VERITY_FL)
4755                 new_fl |= S_VERITY;
4756         inode_set_flags(inode, new_fl,
4757                         S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_DAX|
4758                         S_ENCRYPTED|S_CASEFOLD|S_VERITY);
4759 }
4760 
4761 static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
4762                                   struct ext4_inode_info *ei)
4763 {
4764         blkcnt_t i_blocks ;
4765         struct inode *inode = &(ei->vfs_inode);
4766         struct super_block *sb = inode->i_sb;
4767 
4768         if (ext4_has_feature_huge_file(sb)) {
4769                 /* we are using combined 48 bit field */
4770                 i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 |
4771                                         le32_to_cpu(raw_inode->i_blocks_lo);
4772                 if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) {
4773                         /* i_blocks represent file system block size */
4774                         return i_blocks  << (inode->i_blkbits - 9);
4775                 } else {
4776                         return i_blocks;
4777                 }
4778         } else {
4779                 return le32_to_cpu(raw_inode->i_blocks_lo);
4780         }
4781 }
4782 
4783 static inline int ext4_iget_extra_inode(struct inode *inode,
4784                                          struct ext4_inode *raw_inode,
4785                                          struct ext4_inode_info *ei)
4786 {
4787         __le32 *magic = (void *)raw_inode +
4788                         EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize;
4789 
4790         if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize + sizeof(__le32) <=
4791             EXT4_INODE_SIZE(inode->i_sb) &&
4792             *magic == cpu_to_le32(EXT4_XATTR_MAGIC)) {
4793                 ext4_set_inode_state(inode, EXT4_STATE_XATTR);
4794                 return ext4_find_inline_data_nolock(inode);
4795         } else
4796                 EXT4_I(inode)->i_inline_off = 0;
4797         return 0;
4798 }
4799 
4800 int ext4_get_projid(struct inode *inode, kprojid_t *projid)
4801 {
4802         if (!ext4_has_feature_project(inode->i_sb))
4803                 return -EOPNOTSUPP;
4804         *projid = EXT4_I(inode)->i_projid;
4805         return 0;
4806 }
4807 
4808 /*
4809  * ext4 has self-managed i_version for ea inodes, it stores the lower 32bit of
4810  * refcount in i_version, so use raw values if inode has EXT4_EA_INODE_FL flag
4811  * set.
4812  */
4813 static inline void ext4_inode_set_iversion_queried(struct inode *inode, u64 val)
4814 {
4815         if (unlikely(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
4816                 inode_set_iversion_raw(inode, val);
4817         else
4818                 inode_set_iversion_queried(inode, val);
4819 }
4820 static inline u64 ext4_inode_peek_iversion(const struct inode *inode)
4821 {
4822         if (unlikely(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
4823                 return inode_peek_iversion_raw(inode);
4824         else
4825                 return inode_peek_iversion(inode);
4826 }
4827 
4828 struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
4829                           ext4_iget_flags flags, const char *function,
4830                           unsigned int line)
4831 {
4832         struct ext4_iloc iloc;
4833         struct ext4_inode *raw_inode;
4834         struct ext4_inode_info *ei;
4835         struct inode *inode;
4836         journal_t *journal = EXT4_SB(sb)->s_journal;
4837         long ret;
4838         loff_t size;
4839         int block;
4840         uid_t i_uid;
4841         gid_t i_gid;
4842         projid_t i_projid;
4843 
4844         if ((!(flags & EXT4_IGET_SPECIAL) &&
4845              (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)) ||
4846             (ino < EXT4_ROOT_INO) ||
4847             (ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))) {
4848                 if (flags & EXT4_IGET_HANDLE)
4849                         return ERR_PTR(-ESTALE);
4850                 __ext4_error(sb, function, line,
4851                              "inode #%lu: comm %s: iget: illegal inode #",
4852                              ino, current->comm);
4853                 return ERR_PTR(-EFSCORRUPTED);
4854         }
4855 
4856         inode = iget_locked(sb, ino);
4857         if (!inode)
4858                 return ERR_PTR(-ENOMEM);
4859         if (!(inode->i_state & I_NEW))
4860                 return inode;
4861 
4862         ei = EXT4_I(inode);
4863         iloc.bh = NULL;
4864 
4865         ret = __ext4_get_inode_loc(inode, &iloc, 0);
4866         if (ret < 0)
4867                 goto bad_inode;
4868         raw_inode = ext4_raw_inode(&iloc);
4869 
4870         if ((ino == EXT4_ROOT_INO) && (raw_inode->i_links_count == 0)) {
4871                 ext4_error_inode(inode, function, line, 0,
4872                                  "iget: root inode unallocated");
4873                 ret = -EFSCORRUPTED;
4874                 goto bad_inode;
4875         }
4876 
4877         if ((flags & EXT4_IGET_HANDLE) &&
4878             (raw_inode->i_links_count == 0) && (raw_inode->i_mode == 0)) {
4879                 ret = -ESTALE;
4880                 goto bad_inode;
4881         }
4882 
4883         if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4884                 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
4885                 if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
4886                         EXT4_INODE_SIZE(inode->i_sb) ||
4887                     (ei->i_extra_isize & 3)) {
4888                         ext4_error_inode(inode, function, line, 0,
4889                                          "iget: bad extra_isize %u "
4890                                          "(inode size %u)",
4891                                          ei->i_extra_isize,
4892                                          EXT4_INODE_SIZE(inode->i_sb));
4893                         ret = -EFSCORRUPTED;
4894                         goto bad_inode;
4895                 }
4896         } else
4897                 ei->i_extra_isize = 0;
4898 
4899         /* Precompute checksum seed for inode metadata */
4900         if (ext4_has_metadata_csum(sb)) {
4901                 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4902                 __u32 csum;
4903                 __le32 inum = cpu_to_le32(inode->i_ino);
4904                 __le32 gen = raw_inode->i_generation;
4905                 csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum,
4906                                    sizeof(inum));
4907                 ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen,
4908                                               sizeof(gen));
4909         }
4910 
4911         if (!ext4_inode_csum_verify(inode, raw_inode, ei)) {
4912                 ext4_error_inode(inode, function, line, 0,
4913                                  "iget: checksum invalid");
4914                 ret = -EFSBADCRC;
4915                 goto bad_inode;
4916         }
4917 
4918         inode->i_mode = le16_to_cpu(raw_inode->i_mode);
4919         i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
4920         i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
4921         if (ext4_has_feature_project(sb) &&
4922             EXT4_INODE_SIZE(sb) > EXT4_GOOD_OLD_INODE_SIZE &&
4923             EXT4_FITS_IN_INODE(raw_inode, ei, i_projid))
4924                 i_projid = (projid_t)le32_to_cpu(raw_inode->i_projid);
4925         else
4926                 i_projid = EXT4_DEF_PROJID;
4927 
4928         if (!(test_opt(inode->i_sb, NO_UID32))) {
4929                 i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
4930                 i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
4931         }
4932         i_uid_write(inode, i_uid);
4933         i_gid_write(inode, i_gid);
4934         ei->i_projid = make_kprojid(&init_user_ns, i_projid);
4935         set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
4936 
4937         ext4_clear_state_flags(ei);     /* Only relevant on 32-bit archs */
4938         ei->i_inline_off = 0;
4939         ei->i_dir_start_lookup = 0;
4940         ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
4941         /* We now have enough fields to check if the inode was active or not.
4942          * This is needed because nfsd might try to access dead inodes
4943          * the test is that same one that e2fsck uses
4944          * NeilBrown 1999oct15
4945          */
4946         if (inode->i_nlink == 0) {
4947                 if ((inode->i_mode == 0 ||
4948                      !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) &&
4949                     ino != EXT4_BOOT_LOADER_INO) {
4950                         /* this inode is deleted */
4951                         ret = -ESTALE;
4952                         goto bad_inode;
4953                 }
4954                 /* The only unlinked inodes we let through here have
4955                  * valid i_mode and are being read by the orphan
4956                  * recovery code: that's fine, we're about to complete
4957                  * the process of deleting those.
4958                  * OR it is the EXT4_BOOT_LOADER_INO which is
4959                  * not initialized on a new filesystem. */
4960         }
4961         ei->i_flags = le32_to_cpu(raw_inode->i_flags);
4962         ext4_set_inode_flags(inode);
4963         inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
4964         ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
4965         if (ext4_has_feature_64bit(sb))
4966                 ei->i_file_acl |=
4967                         ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
4968         inode->i_size = ext4_isize(sb, raw_inode);
4969         if ((size = i_size_read(inode)) < 0) {
4970                 ext4_error_inode(inode, function, line, 0,
4971                                  "iget: bad i_size value: %lld", size);
4972                 ret = -EFSCORRUPTED;
4973                 goto bad_inode;
4974         }
4975         /*
4976          * If dir_index is not enabled but there's dir with INDEX flag set,
4977          * we'd normally treat htree data as empty space. But with metadata
4978          * checksumming that corrupts checksums so forbid that.
4979          */
4980         if (!ext4_has_feature_dir_index(sb) && ext4_has_metadata_csum(sb) &&
4981             ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) {
4982                 ext4_error_inode(inode, function, line, 0,
4983                          "iget: Dir with htree data on filesystem without dir_index feature.");
4984                 ret = -EFSCORRUPTED;
4985                 goto bad_inode;
4986         }
4987         ei->i_disksize = inode->i_size;
4988 #ifdef CONFIG_QUOTA
4989         ei->i_reserved_quota = 0;
4990 #endif
4991         inode->i_generation = le32_to_cpu(raw_inode->i_generation);
4992         ei->i_block_group = iloc.block_group;
4993         ei->i_last_alloc_group = ~0;
4994         /*
4995          * NOTE! The in-memory inode i_data array is in little-endian order
4996          * even on big-endian machines: we do NOT byteswap the block numbers!
4997          */
4998         for (block = 0; block < EXT4_N_BLOCKS; block++)
4999                 ei->i_data[block] = raw_inode->i_block[block];
5000         INIT_LIST_HEAD(&ei->i_orphan);
5001 
5002         /*
5003          * Set transaction id's of transactions that have to be committed
5004          * to finish f[data]sync. We set them to currently running transaction
5005          * as we cannot be sure that the inode or some of its metadata isn't
5006          * part of the transaction - the inode could have been reclaimed and
5007          * now it is reread from disk.
5008          */
5009         if (journal) {
5010                 transaction_t *transaction;
5011                 tid_t tid;
5012 
5013                 read_lock(&journal->j_state_lock);
5014                 if (journal->j_running_transaction)
5015                         transaction = journal->j_running_transaction;
5016                 else
5017                         transaction = journal->j_committing_transaction;
5018                 if (transaction)
5019                         tid = transaction->t_tid;
5020                 else
5021                         tid = journal->j_commit_sequence;
5022                 read_unlock(&journal->j_state_lock);
5023                 ei->i_sync_tid = tid;
5024                 ei->i_datasync_tid = tid;
5025         }
5026 
5027         if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
5028                 if (ei->i_extra_isize == 0) {
5029                         /* The extra space is currently unused. Use it. */
5030                         BUILD_BUG_ON(sizeof(struct ext4_inode) & 3);
5031                         ei->i_extra_isize = sizeof(struct ext4_inode) -
5032                                             EXT4_GOOD_OLD_INODE_SIZE;
5033                 } else {
5034                         ret = ext4_iget_extra_inode(inode, raw_inode, ei);
5035                         if (ret)
5036                                 goto bad_inode;
5037                 }
5038         }
5039 
5040         EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode);
5041         EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
5042         EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
5043         EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
5044 
5045         if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
5046                 u64 ivers = le32_to_cpu(raw_inode->i_disk_version);
5047 
5048                 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
5049                         if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
5050                                 ivers |=
5051                     (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
5052                 }
5053                 ext4_inode_set_iversion_queried(inode, ivers);
5054         }
5055 
5056         ret = 0;
5057         if (ei->i_file_acl &&
5058             !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) {
5059                 ext4_error_inode(inode, function, line, 0,
5060                                  "iget: bad extended attribute block %llu",
5061                                  ei->i_file_acl);
5062                 ret = -EFSCORRUPTED;
5063                 goto bad_inode;
5064         } else if (!ext4_has_inline_data(inode)) {
5065                 /* validate the block references in the inode */
5066                 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
5067                    (S_ISLNK(inode->i_mode) &&
5068                     !ext4_inode_is_fast_symlink(inode))) {
5069                         if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
5070                                 ret = ext4_ext_check_inode(inode);
5071                         else
5072                                 ret = ext4_ind_check_inode(inode);
5073                 }
5074         }
5075         if (ret)
5076                 goto bad_inode;
5077 
5078         if (S_ISREG(inode->i_mode)) {
5079                 inode->i_op = &ext4_file_inode_operations;
5080                 inode->i_fop = &ext4_file_operations;
5081                 ext4_set_aops(inode);
5082         } else if (S_ISDIR(inode->i_mode)) {
5083                 inode->i_op = &ext4_dir_inode_operations;
5084                 inode->i_fop = &ext4_dir_operations;
5085         } else if (S_ISLNK(inode->i_mode)) {
5086                 /* VFS does not allow setting these so must be corruption */
5087                 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) {
5088                         ext4_error_inode(inode, function, line, 0,
5089                                          "iget: immutable or append flags "
5090                                          "not allowed on symlinks");
5091                         ret = -EFSCORRUPTED;
5092                         goto bad_inode;
5093                 }
5094                 if (IS_ENCRYPTED(inode)) {
5095                         inode->i_op = &ext4_encrypted_symlink_inode_operations;
5096                         ext4_set_aops(inode);
5097                 } else if (ext4_inode_is_fast_symlink(inode)) {
5098                         inode->i_link = (char *)ei->i_data;
5099                         inode->i_op = &ext4_fast_symlink_inode_operations;
5100                         nd_terminate_link(ei->i_data, inode->i_size,
5101                                 sizeof(ei->i_data) - 1);
5102                 } else {
5103                         inode->i_op = &ext4_symlink_inode_operations;
5104                         ext4_set_aops(inode);
5105                 }
5106                 inode_nohighmem(inode);
5107         } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
5108               S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
5109                 inode->i_op = &ext4_special_inode_operations;
5110                 if (raw_inode->i_block[0])
5111                         init_special_inode(inode, inode->i_mode,
5112                            old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
5113                 else
5114                         init_special_inode(inode, inode->i_mode,
5115                            new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
5116         } else if (ino == EXT4_BOOT_LOADER_INO) {
5117                 make_bad_inode(inode);
5118         } else {
5119                 ret = -EFSCORRUPTED;
5120                 ext4_error_inode(inode, function, line, 0,
5121                                  "iget: bogus i_mode (%o)", inode->i_mode);
5122                 goto bad_inode;
5123         }
5124         if (IS_CASEFOLDED(inode) && !ext4_has_feature_casefold(inode->i_sb))
5125                 ext4_error_inode(inode, function, line, 0,
5126                                  "casefold flag without casefold feature");
5127         brelse(iloc.bh);
5128 
5129         unlock_new_inode(inode);
5130         return inode;
5131 
5132 bad_inode:
5133         brelse(iloc.bh);
5134         iget_failed(inode);
5135         return ERR_PTR(ret);
5136 }
5137 
5138 static int ext4_inode_blocks_set(handle_t *handle,
5139                                 struct ext4_inode *raw_inode,
5140                                 struct ext4_inode_info *ei)
5141 {
5142         struct inode *inode = &(ei->vfs_inode);
5143         u64 i_blocks = READ_ONCE(inode->i_blocks);
5144         struct super_block *sb = inode->i_sb;
5145 
5146         if (i_blocks <= ~0U) {
5147                 /*
5148                  * i_blocks can be represented in a 32 bit variable
5149                  * as multiple of 512 bytes
5150                  */
5151                 raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
5152                 raw_inode->i_blocks_high = 0;
5153                 ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
5154                 return 0;
5155         }
5156         if (!ext4_has_feature_huge_file(sb))
5157                 return -EFBIG;
5158 
5159         if (i_blocks <= 0xffffffffffffULL) {
5160                 /*
5161                  * i_blocks can be represented in a 48 bit variable
5162                  * as multiple of 512 bytes
5163                  */
5164                 raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
5165                 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
5166                 ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
5167         } else {
5168                 ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE);
5169                 /* i_block is stored in file system block size */
5170                 i_blocks = i_blocks >> (inode->i_blkbits - 9);
5171                 raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
5172                 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
5173         }
5174         return 0;
5175 }
5176 
5177 struct other_inode {
5178         unsigned long           orig_ino;
5179         struct ext4_inode       *raw_inode;
5180 };
5181 
5182 static int other_inode_match(struct inode * inode, unsigned long ino,
5183                              void *data)
5184 {
5185         struct other_inode *oi = (struct other_inode *) data;
5186 
5187         if ((inode->i_ino != ino) ||
5188             (inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW |
5189                                I_DIRTY_INODE)) ||
5190             ((inode->i_state & I_DIRTY_TIME) == 0))
5191                 return 0;
5192         spin_lock(&inode->i_lock);
5193         if (((inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW |
5194                                 I_DIRTY_INODE)) == 0) &&
5195             (inode->i_state & I_DIRTY_TIME)) {
5196                 struct ext4_inode_info  *ei = EXT4_I(inode);
5197 
5198                 inode->i_state &= ~(I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED);
5199                 spin_unlock(&inode->i_lock);
5200 
5201                 spin_lock(&ei->i_raw_lock);
5202                 EXT4_INODE_SET_XTIME(i_ctime, inode, oi->raw_inode);
5203                 EXT4_INODE_SET_XTIME(i_mtime, inode, oi->raw_inode);
5204                 EXT4_INODE_SET_XTIME(i_atime, inode, oi->raw_inode);
5205                 ext4_inode_csum_set(inode, oi->raw_inode, ei);
5206                 spin_unlock(&ei->i_raw_lock);
5207                 trace_ext4_other_inode_update_time(inode, oi->orig_ino);
5208                 return -1;
5209         }
5210         spin_unlock(&inode->i_lock);
5211         return -1;
5212 }
5213 
5214 /*
5215  * Opportunistically update the other time fields for other inodes in
5216  * the same inode table block.
5217  */
5218 static void ext4_update_other_inodes_time(struct super_block *sb,
5219                                           unsigned long orig_ino, char *buf)
5220 {
5221         struct other_inode oi;
5222         unsigned long ino;
5223         int i, inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
5224         int inode_size = EXT4_INODE_SIZE(sb);
5225 
5226         oi.orig_ino = orig_ino;
5227         /*
5228          * Calculate the first inode in the inode table block.  Inode
5229          * numbers are one-based.  That is, the first inode in a block
5230          * (assuming 4k blocks and 256 byte inodes) is (n*16 + 1).
5231          */
5232         ino = ((orig_ino - 1) & ~(inodes_per_block - 1)) + 1;
5233         for (i = 0; i < inodes_per_block; i++, ino++, buf += inode_size) {
5234                 if (ino == orig_ino)
5235                         continue;
5236                 oi.raw_inode = (struct ext4_inode *) buf;
5237                 (void) find_inode_nowait(sb, ino, other_inode_match, &oi);
5238         }
5239 }
5240 
5241 /*
5242  * Post the struct inode info into an on-disk inode location in the
5243  * buffer-cache.  This gobbles the caller's reference to the
5244  * buffer_head in the inode location struct.
5245  *
5246  * The caller must have write access to iloc->bh.
5247  */
5248 static int ext4_do_update_inode(handle_t *handle,
5249                                 struct inode *inode,
5250                                 struct ext4_iloc *iloc)
5251 {
5252         struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
5253         struct ext4_inode_info *ei = EXT4_I(inode);
5254         struct buffer_head *bh = iloc->bh;
5255         struct super_block *sb = inode->i_sb;
5256         int err = 0, rc, block;
5257         int need_datasync = 0, set_large_file = 0;
5258         uid_t i_uid;
5259         gid_t i_gid;
5260         projid_t i_projid;
5261 
5262         spin_lock(&ei->i_raw_lock);
5263 
5264         /* For fields not tracked in the in-memory inode,
5265          * initialise them to zero for new inodes. */
5266         if (ext4_test_inode_state(inode, EXT4_STATE_NEW))
5267                 memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
5268 
5269         raw_inode->i_mode = cpu_to_le16(inode->i_mode);
5270         i_uid = i_uid_read(inode);
5271         i_gid = i_gid_read(inode);
5272         i_projid = from_kprojid(&init_user_ns, ei->i_projid);
5273         if (!(test_opt(inode->i_sb, NO_UID32))) {
5274                 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid));
5275                 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid));
5276 /*
5277  * Fix up interoperability with old kernels. Otherwise, old inodes get
5278  * re-used with the upper 16 bits of the uid/gid intact
5279  */
5280                 if (ei->i_dtime && list_empty(&ei->i_orphan)) {
5281                         raw_inode->i_uid_high = 0;
5282                         raw_inode->i_gid_high = 0;
5283                 } else {
5284                         raw_inode->i_uid_high =
5285                                 cpu_to_le16(high_16_bits(i_uid));
5286                         raw_inode->i_gid_high =
5287                                 cpu_to_le16(high_16_bits(i_gid));
5288                 }
5289         } else {
5290                 raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid));
5291                 raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(i_gid));
5292                 raw_inode->i_uid_high = 0;
5293                 raw_inode->i_gid_high = 0;
5294         }
5295         raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
5296 
5297         EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
5298         EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
5299         EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
5300         EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
5301 
5302         err = ext4_inode_blocks_set(handle, raw_inode, ei);
5303         if (err) {
5304                 spin_unlock(&ei->i_raw_lock);
5305                 goto out_brelse;
5306         }
5307         raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
5308         raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF);
5309         if (likely(!test_opt2(inode->i_sb, HURD_COMPAT)))
5310                 raw_inode->i_file_acl_high =
5311                         cpu_to_le16(ei->i_file_acl >> 32);
5312         raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
5313         if (ei->i_disksize != ext4_isize(inode->i_sb, raw_inode)) {
5314                 ext4_isize_set(raw_inode, ei->i_disksize);
5315                 need_datasync = 1;
5316         }
5317         if (ei->i_disksize > 0x7fffffffULL) {
5318                 if (!ext4_has_feature_large_file(sb) ||
5319                                 EXT4_SB(sb)->s_es->s_rev_level ==
5320                     cpu_to_le32(EXT4_GOOD_OLD_REV))
5321                         set_large_file = 1;
5322         }
5323         raw_inode->i_generation = cpu_to_le32(inode->i_generation);
5324         if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
5325                 if (old_valid_dev(inode->i_rdev)) {
5326                         raw_inode->i_block[0] =
5327                                 cpu_to_le32(old_encode_dev(inode->i_rdev));
5328                         raw_inode->i_block[1] = 0;
5329                 } else {
5330                         raw_inode->i_block[0] = 0;
5331                         raw_inode->i_block[1] =
5332                                 cpu_to_le32(new_encode_dev(inode->i_rdev));
5333                         raw_inode->i_block[2] = 0;
5334                 }
5335         } else if (!ext4_has_inline_data(inode)) {
5336                 for (block = 0; block < EXT4_N_BLOCKS; block++)
5337                         raw_inode->i_block[block] = ei->i_data[block];
5338         }
5339 
5340         if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
5341                 u64 ivers = ext4_inode_peek_iversion(inode);
5342 
5343                 raw_inode->i_disk_version = cpu_to_le32(ivers);
5344                 if (ei->i_extra_isize) {
5345                         if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
5346                                 raw_inode->i_version_hi =
5347                                         cpu_to_le32(ivers >> 32);
5348                         raw_inode->i_extra_isize =
5349                                 cpu_to_le16(ei->i_extra_isize);
5350                 }
5351         }
5352 
5353         BUG_ON(!ext4_has_feature_project(inode->i_sb) &&
5354                i_projid != EXT4_DEF_PROJID);
5355 
5356         if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
5357             EXT4_FITS_IN_INODE(raw_inode, ei, i_projid))
5358                 raw_inode->i_projid = cpu_to_le32(i_projid);
5359 
5360         ext4_inode_csum_set(inode, raw_inode, ei);
5361         spin_unlock(&ei->i_raw_lock);
5362         if (inode->i_sb->s_flags & SB_LAZYTIME)
5363                 ext4_update_other_inodes_time(inode->i_sb, inode->i_ino,
5364                                               bh->b_data);
5365 
5366         BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
5367         rc = ext4_handle_dirty_metadata(handle, NULL, bh);
5368         if (!err)
5369                 err = rc;
5370         ext4_clear_inode_state(inode, EXT4_STATE_NEW);
5371         if (set_large_file) {
5372                 BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get write access");
5373                 err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
5374                 if (err)
5375                         goto out_brelse;
5376                 ext4_set_feature_large_file(sb);
5377                 ext4_handle_sync(handle);
5378                 err = ext4_handle_dirty_super(handle, sb);
5379         }
5380         ext4_update_inode_fsync_trans(handle, inode, need_datasync);
5381 out_brelse:
5382         brelse(bh);
5383         ext4_std_error(inode->i_sb, err);
5384         return err;
5385 }
5386 
5387 /*
5388  * ext4_write_inode()
5389  *
5390  * We are called from a few places:
5391  *
5392  * - Within generic_file_aio_write() -> generic_write_sync() for O_SYNC files.
5393  *   Here, there will be no transaction running. We wait for any running
5394  *   transaction to commit.
5395  *
5396  * - Within flush work (sys_sync(), kupdate and such).
5397  *   We wait on commit, if told to.
5398  *
5399  * - Within iput_final() -> write_inode_now()
5400  *   We wait on commit, if told to.
5401  *
5402  * In all cases it is actually safe for us to return without doing anything,
5403  * because the inode has been copied into a raw inode buffer in
5404  * ext4_mark_inode_dirty().  This is a correctness thing for WB_SYNC_ALL
5405  * writeback.
5406  *
5407  * Note that we are absolutely dependent upon all inode dirtiers doing the
5408  * right thing: they *must* call mark_inode_dirty() after dirtying info in
5409  * which we are interested.
5410  *
5411  * It would be a bug for them to not do this.  The code:
5412  *
5413  *      mark_inode_dirty(inode)
5414  *      stuff();
5415  *      inode->i_size = expr;
5416  *
5417  * is in error because write_inode() could occur while `stuff()' is running,
5418  * and the new i_size will be lost.  Plus the inode will no longer be on the
5419  * superblock's dirty inode list.
5420  */
5421 int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
5422 {
5423         int err;
5424 
5425         if (WARN_ON_ONCE(current->flags & PF_MEMALLOC) ||
5426             sb_rdonly(inode->i_sb))
5427                 return 0;
5428 
5429         if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
5430                 return -EIO;
5431 
5432         if (EXT4_SB(inode->i_sb)->s_journal) {
5433                 if (ext4_journal_current_handle()) {
5434                         jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
5435                         dump_stack();
5436                         return -EIO;
5437                 }
5438 
5439                 /*
5440                  * No need to force transaction in WB_SYNC_NONE mode. Also
5441                  * ext4_sync_fs() will force the commit after everything is
5442                  * written.
5443                  */
5444                 if (wbc->sync_mode != WB_SYNC_ALL || wbc->for_sync)
5445                         return 0;
5446 
5447                 err = jbd2_complete_transaction(EXT4_SB(inode->i_sb)->s_journal,
5448                                                 EXT4_I(inode)->i_sync_tid);
5449         } else {
5450                 struct ext4_iloc iloc;
5451 
5452                 err = __ext4_get_inode_loc(inode, &iloc, 0);
5453                 if (err)
5454                         return err;
5455                 /*
5456                  * sync(2) will flush the whole buffer cache. No need to do
5457                  * it here separately for each inode.
5458                  */
5459                 if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
5460                         sync_dirty_buffer(iloc.bh);
5461                 if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
5462                         EXT4_ERROR_INODE_BLOCK(inode, iloc.bh->b_blocknr,
5463                                          "IO error syncing inode");
5464                         err = -EIO;
5465                 }
5466                 brelse(iloc.bh);
5467         }
5468         return err;
5469 }
5470 
5471 /*
5472  * In data=journal mode ext4_journalled_invalidatepage() may fail to invalidate
5473  * buffers that are attached to a page stradding i_size and are undergoing
5474  * commit. In that case we have to wait for commit to finish and try again.
5475  */
5476 static void ext4_wait_for_tail_page_commit(struct inode *inode)
5477 {
5478         struct page *page;
5479         unsigned offset;
5480         journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
5481         tid_t commit_tid = 0;
5482         int ret;
5483 
5484         offset = inode->i_size & (PAGE_SIZE - 1);
5485         /*
5486          * If the page is fully truncated, we don't need to wait for any commit
5487          * (and we even should not as __ext4_journalled_invalidatepage() may
5488          * strip all buffers from the page but keep the page dirty which can then
5489          * confuse e.g. concurrent ext4_writepage() seeing dirty page without
5490          * buffers). Also we don't need to wait for any commit if all buffers in
5491          * the page remain valid. This is most beneficial for the common case of
5492          * blocksize == PAGESIZE.
5493          */
5494         if (!offset || offset > (PAGE_SIZE - i_blocksize(inode)))
5495                 return;
5496         while (1) {
5497                 page = find_lock_page(inode->i_mapping,
5498                                       inode->i_size >> PAGE_SHIFT);
5499                 if (!page)
5500                         return;
5501                 ret = __ext4_journalled_invalidatepage(page, offset,
5502                                                 PAGE_SIZE - offset);
5503                 unlock_page(page);
5504                 put_page(page);
5505                 if (ret != -EBUSY)
5506                         return;
5507                 commit_tid = 0;
5508                 read_lock(&journal->j_state_lock);
5509                 if (journal->j_committing_transaction)
5510                         commit_tid = journal->j_committing_transaction->t_tid;
5511                 read_unlock(&journal->j_state_lock);
5512                 if (commit_tid)
5513                         jbd2_log_wait_commit(journal, commit_tid);
5514         }
5515 }
5516 
5517 /*
5518  * ext4_setattr()
5519  *
5520  * Called from notify_change.
5521  *
5522  * We want to trap VFS attempts to truncate the file as soon as
5523  * possible.  In particular, we want to make sure that when the VFS
5524  * shrinks i_size, we put the inode on the orphan list and modify
5525  * i_disksize immediately, so that during the subsequent flushing of
5526  * dirty pages and freeing of disk blocks, we can guarantee that any
5527  * commit will leave the blocks being flushed in an unused state on
5528  * disk.  (On recovery, the inode will get truncated and the blocks will
5529  * be freed, so we have a strong guarantee that no future commit will
5530  * leave these blocks visible to the user.)
5531  *
5532  * Another thing we have to assure is that if we are in ordered mode
5533  * and inode is still attached to the committing transaction, we must
5534  * we start writeout of all the dirty pages which are being truncated.
5535  * This way we are sure that all the data written in the previous
5536  * transaction are already on disk (truncate waits for pages under
5537  * writeback).
5538  *
5539  * Called with inode->i_mutex down.
5540  */
5541 int ext4_setattr(struct dentry *dentry, struct iattr *attr)
5542 {
5543         struct inode *inode = d_inode(dentry);
5544         int error, rc = 0;
5545         int orphan = 0;
5546         const unsigned int ia_valid = attr->ia_valid;
5547 
5548         if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
5549                 return -EIO;
5550 
5551         if (unlikely(IS_IMMUTABLE(inode)))
5552                 return -EPERM;
5553 
5554         if (unlikely(IS_APPEND(inode) &&
5555                      (ia_valid & (ATTR_MODE | ATTR_UID |
5556                                   ATTR_GID | ATTR_TIMES_SET))))
5557                 return -EPERM;
5558 
5559         error = setattr_prepare(dentry, attr);
5560         if (error)
5561                 return error;
5562 
5563         error = fscrypt_prepare_setattr(dentry, attr);
5564         if (error)
5565                 return error;
5566 
5567         error = fsverity_prepare_setattr(dentry, attr);
5568         if (error)
5569                 return error;
5570 
5571         if (is_quota_modification(inode, attr)) {
5572                 error = dquot_initialize(inode);
5573                 if (error)
5574                         return error;
5575         }
5576         if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
5577             (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
5578                 handle_t *handle;
5579 
5580                 /* (user+group)*(old+new) structure, inode write (sb,
5581                  * inode block, ? - but truncate inode update has it) */
5582                 handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
5583                         (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb) +
5584                          EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)) + 3);
5585                 if (IS_ERR(handle)) {
5586                         error = PTR_ERR(handle);
5587                         goto err_out;
5588                 }
5589 
5590                 /* dquot_transfer() calls back ext4_get_inode_usage() which
5591                  * counts xattr inode references.
5592                  */
5593                 down_read(&EXT4_I(inode)->xattr_sem);
5594                 error = dquot_transfer(inode, attr);
5595                 up_read(&EXT4_I(inode)->xattr_sem);
5596 
5597                 if (error) {
5598                         ext4_journal_stop(handle);
5599                         return error;
5600                 }
5601                 /* Update corresponding info in inode so that everything is in
5602                  * one transaction */
5603                 if (attr->ia_valid & ATTR_UID)
5604                         inode->i_uid = attr->ia_uid;
5605                 if (attr->ia_valid & ATTR_GID)
5606                         inode->i_gid = attr->ia_gid;
5607                 error = ext4_mark_inode_dirty(handle, inode);
5608                 ext4_journal_stop(handle);
5609         }
5610 
5611         if (attr->ia_valid & ATTR_SIZE) {
5612                 handle_t *handle;
5613                 loff_t oldsize = inode->i_size;
5614                 int shrink = (attr->ia_size < inode->i_size);
5615 
5616                 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
5617                         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5618 
5619                         if (attr->ia_size > sbi->s_bitmap_maxbytes)
5620                                 return -EFBIG;
5621                 }
5622                 if (!S_ISREG(inode->i_mode))
5623                         return -EINVAL;
5624 
5625                 if (IS_I_VERSION(inode) && attr->ia_size != inode->i_size)
5626                         inode_inc_iversion(inode);
5627 
5628                 if (shrink) {
5629                         if (ext4_should_order_data(inode)) {
5630                                 error = ext4_begin_ordered_truncate(inode,
5631                                                             attr->ia_size);
5632                                 if (error)
5633                                         goto err_out;
5634                         }
5635                         /*
5636                          * Blocks are going to be removed from the inode. Wait
5637                          * for dio in flight.
5638                          */
5639                         inode_dio_wait(inode);
5640                 }
5641 
5642                 down_write(&EXT4_I(inode)->i_mmap_sem);
5643 
5644                 rc = ext4_break_layouts(inode);
5645                 if (rc) {
5646                         up_write(&EXT4_I(inode)->i_mmap_sem);
5647                         return rc;
5648                 }
5649 
5650                 if (attr->ia_size != inode->i_size) {
5651                         handle = ext4_journal_start(inode, EXT4_HT_INODE, 3);
5652                         if (IS_ERR(handle)) {
5653                                 error = PTR_ERR(handle);
5654                                 goto out_mmap_sem;
5655                         }
5656                         if (ext4_handle_valid(handle) && shrink) {
5657                                 error = ext4_orphan_add(handle, inode);
5658                                 orphan = 1;
5659                         }
5660                         /*
5661                          * Update c/mtime on truncate up, ext4_truncate() will
5662                          * update c/mtime in shrink case below
5663                          */
5664                         if (!shrink) {
5665                                 inode->i_mtime = current_time(inode);
5666                                 inode->i_ctime = inode->i_mtime;
5667                         }
5668                         down_write(&EXT4_I(inode)->i_data_sem);
5669                         EXT4_I(inode)->i_disksize = attr->ia_size;
5670                         rc = ext4_mark_inode_dirty(handle, inode);
5671                         if (!error)
5672                                 error = rc;
5673                         /*
5674                          * We have to update i_size under i_data_sem together
5675                          * with i_disksize to avoid races with writeback code
5676                          * running ext4_wb_update_i_disksize().
5677                          */
5678                         if (!error)
5679                                 i_size_write(inode, attr->ia_size);
5680                         up_write(&EXT4_I(inode)->i_data_sem);
5681                         ext4_journal_stop(handle);
5682                         if (error)
5683                                 goto out_mmap_sem;
5684                         if (!shrink) {
5685                                 pagecache_isize_extended(inode, oldsize,
5686                                                          inode->i_size);
5687                         } else if (ext4_should_journal_data(inode)) {
5688                                 ext4_wait_for_tail_page_commit(inode);
5689                         }
5690                 }
5691 
5692                 /*
5693                  * Truncate pagecache after we've waited for commit
5694                  * in data=journal mode to make pages freeable.
5695                  */
5696                 truncate_pagecache(inode, inode->i_size);
5697                 /*
5698                  * Call ext4_truncate() even if i_size didn't change to
5699                  * truncate possible preallocated blocks.
5700                  */
5701                 if (attr->ia_size <= oldsize) {
5702                         rc = ext4_truncate(inode);
5703                         if (rc)
5704                                 error = rc;
5705                 }
5706 out_mmap_sem:
5707                 up_write(&EXT4_I(inode)->i_mmap_sem);
5708         }
5709 
5710         if (!error) {
5711                 setattr_copy(inode, attr);
5712                 mark_inode_dirty(inode);
5713         }
5714 
5715         /*
5716          * If the call to ext4_truncate failed to get a transaction handle at
5717          * all, we need to clean up the in-core orphan list manually.
5718          */
5719         if (orphan && inode->i_nlink)
5720                 ext4_orphan_del(NULL, inode);
5721 
5722         if (!error && (ia_valid & ATTR_MODE))
5723                 rc = posix_acl_chmod(inode, inode->i_mode);
5724 
5725 err_out:
5726         ext4_std_error(inode->i_sb, error);
5727         if (!error)
5728                 error = rc;
5729         return error;
5730 }
5731 
5732 int ext4_getattr(const struct path *path, struct kstat *stat,
5733                  u32 request_mask, unsigned int query_flags)
5734 {
5735         struct inode *inode = d_inode(path->dentry);
5736         struct ext4_inode *raw_inode;
5737         struct ext4_inode_info *ei = EXT4_I(inode);
5738         unsigned int flags;
5739 
5740         if (EXT4_FITS_IN_INODE(raw_inode, ei, i_crtime)) {
5741                 stat->result_mask |= STATX_BTIME;
5742                 stat->btime.tv_sec = ei->i_crtime.tv_sec;
5743                 stat->btime.tv_nsec = ei->i_crtime.tv_nsec;
5744         }
5745 
5746         flags = ei->i_flags & EXT4_FL_USER_VISIBLE;
5747         if (flags & EXT4_APPEND_FL)
5748                 stat->attributes |= STATX_ATTR_APPEND;
5749         if (flags & EXT4_COMPR_FL)
5750                 stat->attributes |= STATX_ATTR_COMPRESSED;
5751         if (flags & EXT4_ENCRYPT_FL)
5752                 stat->attributes |= STATX_ATTR_ENCRYPTED;
5753         if (flags & EXT4_IMMUTABLE_FL)
5754                 stat->attributes |= STATX_ATTR_IMMUTABLE;
5755         if (flags & EXT4_NODUMP_FL)
5756                 stat->attributes |= STATX_ATTR_NODUMP;
5757 
5758         stat->attributes_mask |= (STATX_ATTR_APPEND |
5759                                   STATX_ATTR_COMPRESSED |
5760                                   STATX_ATTR_ENCRYPTED |
5761                                   STATX_ATTR_IMMUTABLE |
5762                                   STATX_ATTR_NODUMP);
5763 
5764         generic_fillattr(inode, stat);
5765         return 0;
5766 }
5767 
5768 int ext4_file_getattr(const struct path *path, struct kstat *stat,
5769                       u32 request_mask, unsigned int query_flags)
5770 {
5771         struct inode *inode = d_inode(path->dentry);
5772         u64 delalloc_blocks;
5773 
5774         ext4_getattr(path, stat, request_mask, query_flags);
5775 
5776         /*
5777          * If there is inline data in the inode, the inode will normally not
5778          * have data blocks allocated (it may have an external xattr block).
5779          * Report at least one sector for such files, so tools like tar, rsync,
5780          * others don't incorrectly think the file is completely sparse.
5781          */
5782         if (unlikely(ext4_has_inline_data(inode)))
5783                 stat->blocks += (stat->size + 511) >> 9;
5784 
5785         /*
5786          * We can't update i_blocks if the block allocation is delayed
5787          * otherwise in the case of system crash before the real block
5788          * allocation is done, we will have i_blocks inconsistent with
5789          * on-disk file blocks.
5790          * We always keep i_blocks updated together with real
5791          * allocation. But to not confuse with user, stat
5792          * will return the blocks that include the delayed allocation
5793          * blocks for this file.
5794          */
5795         delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb),
5796                                    EXT4_I(inode)->i_reserved_data_blocks);
5797         stat->blocks += delalloc_blocks << (inode->i_sb->s_blocksize_bits - 9);
5798         return 0;
5799 }
5800 
5801 static int ext4_index_trans_blocks(struct inode *inode, int lblocks,
5802                                    int pextents)
5803 {
5804         if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
5805                 return ext4_ind_trans_blocks(inode, lblocks);
5806         return ext4_ext_index_trans_blocks(inode, pextents);
5807 }
5808 
5809 /*
5810  * Account for index blocks, block groups bitmaps and block group
5811  * descriptor blocks if modify datablocks and index blocks
5812  * worse case, the indexs blocks spread over different block groups
5813  *
5814  * If datablocks are discontiguous, they are possible to spread over
5815  * different block groups too. If they are contiguous, with flexbg,
5816  * they could still across block group boundary.
5817  *
5818  * Also account for superblock, inode, quota and xattr blocks
5819  */
5820 static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
5821                                   int pextents)
5822 {
5823         ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb);
5824         int gdpblocks;
5825         int idxblocks;
5826         int ret = 0;
5827 
5828         /*
5829          * How many index blocks need to touch to map @lblocks logical blocks
5830          * to @pextents physical extents?
5831          */
5832         idxblocks = ext4_index_trans_blocks(inode, lblocks, pextents);
5833 
5834         ret = idxblocks;
5835 
5836         /*
5837          * Now let's see how many group bitmaps and group descriptors need
5838          * to account
5839          */
5840         groups = idxblocks + pextents;
5841         gdpblocks = groups;
5842         if (groups > ngroups)
5843                 groups = ngroups;
5844         if (groups > EXT4_SB(inode->i_sb)->s_gdb_count)
5845                 gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count;
5846 
5847         /* bitmaps and block group descriptor blocks */
5848         ret += groups + gdpblocks;
5849 
5850         /* Blocks for super block, inode, quota and xattr blocks */
5851         ret += EXT4_META_TRANS_BLOCKS(inode->i_sb);
5852 
5853         return ret;
5854 }
5855 
5856 /*
5857  * Calculate the total number of credits to reserve to fit
5858  * the modification of a single pages into a single transaction,
5859  * which may include multiple chunks of block allocations.
5860  *
5861  * This could be called via ext4_write_begin()
5862  *
5863  * We need to consider the worse case, when
5864  * one new block per extent.
5865  */
5866 int ext4_writepage_trans_blocks(struct inode *inode)
5867 {
5868         int bpp = ext4_journal_blocks_per_page(inode);
5869         int ret;
5870 
5871         ret = ext4_meta_trans_blocks(inode, bpp, bpp);
5872 
5873         /* Account for data blocks for journalled mode */
5874         if (ext4_should_journal_data(inode))
5875                 ret += bpp;
5876         return ret;
5877 }
5878 
5879 /*
5880  * Calculate the journal credits for a chunk of data modification.
5881  *
5882  * This is called from DIO, fallocate or whoever calling
5883  * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks.
5884  *
5885  * journal buffers for data blocks are not included here, as DIO
5886  * and fallocate do no need to journal data buffers.
5887  */
5888 int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks)
5889 {
5890         return ext4_meta_trans_blocks(inode, nrblocks, 1);
5891 }
5892 
5893 /*
5894  * The caller must have previously called ext4_reserve_inode_write().
5895  * Give this, we know that the caller already has write access to iloc->bh.
5896  */
5897 int ext4_mark_iloc_dirty(handle_t *handle,
5898                          struct inode *inode, struct ext4_iloc *iloc)
5899 {
5900         int err = 0;
5901 
5902         if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) {
5903                 put_bh(iloc->bh);
5904                 return -EIO;
5905         }
5906         if (IS_I_VERSION(inode))
5907                 inode_inc_iversion(inode);
5908 
5909         /* the do_update_inode consumes one bh->b_count */
5910         get_bh(iloc->bh);
5911 
5912         /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
5913         err = ext4_do_update_inode(handle, inode, iloc);
5914         put_bh(iloc->bh);
5915         return err;
5916 }
5917 
5918 /*
5919  * On success, We end up with an outstanding reference count against
5920  * iloc->bh.  This _must_ be cleaned up later.
5921  */
5922 
5923 int
5924 ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
5925                          struct ext4_iloc *iloc)
5926 {
5927         int err;
5928 
5929         if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
5930                 return -EIO;
5931 
5932         err = ext4_get_inode_loc(inode, iloc);
5933         if (!err) {
5934                 BUFFER_TRACE(iloc->bh, "get_write_access");
5935                 err = ext4_journal_get_write_access(handle, iloc->bh);
5936                 if (err) {
5937                         brelse(iloc->bh);
5938                         iloc->bh = NULL;
5939                 }
5940         }
5941         ext4_std_error(inode->i_sb, err);
5942         return err;
5943 }
5944 
5945 static int __ext4_expand_extra_isize(struct inode *inode,
5946                                      unsigned int new_extra_isize,
5947                                      struct ext4_iloc *iloc,
5948                                      handle_t *handle, int *no_expand)
5949 {
5950         struct ext4_inode *raw_inode;
5951         struct ext4_xattr_ibody_header *header;
5952         unsigned int inode_size = EXT4_INODE_SIZE(inode->i_sb);
5953         struct ext4_inode_info *ei = EXT4_I(inode);
5954         int error;
5955 
5956         /* this was checked at iget time, but double check for good measure */
5957         if ((EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > inode_size) ||
5958             (ei->i_extra_isize & 3)) {
5959                 EXT4_ERROR_INODE(inode, "bad extra_isize %u (inode size %u)",
5960                                  ei->i_extra_isize,
5961                                  EXT4_INODE_SIZE(inode->i_sb));
5962                 return -EFSCORRUPTED;
5963         }
5964         if ((new_extra_isize < ei->i_extra_isize) ||
5965             (new_extra_isize < 4) ||
5966             (new_extra_isize > inode_size - EXT4_GOOD_OLD_INODE_SIZE))
5967                 return -EINVAL; /* Should never happen */
5968 
5969         raw_inode = ext4_raw_inode(iloc);
5970 
5971         header = IHDR(inode, raw_inode);
5972 
5973         /* No extended attributes present */
5974         if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
5975             header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
5976                 memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE +
5977                        EXT4_I(inode)->i_extra_isize, 0,
5978                        new_extra_isize - EXT4_I(inode)->i_extra_isize);
5979                 EXT4_I(inode)->i_extra_isize = new_extra_isize;
5980                 return 0;
5981         }
5982 
5983         /* try to expand with EAs present */
5984         error = ext4_expand_extra_isize_ea(inode, new_extra_isize,
5985                                            raw_inode, handle);
5986         if (error) {
5987                 /*
5988                  * Inode size expansion failed; don't try again
5989                  */
5990                 *no_expand = 1;
5991         }
5992 
5993         return error;
5994 }
5995 
5996 /*
5997  * Expand an inode by new_extra_isize bytes.
5998  * Returns 0 on success or negative error number on failure.
5999  */
6000 static int ext4_try_to_expand_extra_isize(struct inode *inode,
6001                                           unsigned int new_extra_isize,
6002                                           struct ext4_iloc iloc,
6003                                           handle_t *handle)
6004 {
6005         int no_expand;
6006         int error;
6007 
6008         if (ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND))
6009                 return -EOVERFLOW;
6010 
6011         /*
6012          * In nojournal mode, we can immediately attempt to expand
6013          * the inode.  When journaled, we first need to obtain extra
6014          * buffer credits since we may write into the EA block
6015          * with this same handle. If journal_extend fails, then it will
6016          * only result in a minor loss of functionality for that inode.
6017          * If this is felt to be critical, then e2fsck should be run to
6018          * force a large enough s_min_extra_isize.
6019          */
6020         if (ext4_handle_valid(handle) &&
6021             jbd2_journal_extend(handle,
6022                                 EXT4_DATA_TRANS_BLOCKS(inode->i_sb)) != 0)
6023                 return -ENOSPC;
6024 
6025         if (ext4_write_trylock_xattr(inode, &no_expand) == 0)
6026                 return -EBUSY;
6027 
6028         error = __ext4_expand_extra_isize(inode, new_extra_isize, &iloc,
6029                                           handle, &no_expand);
6030         ext4_write_unlock_xattr(inode, &no_expand);
6031 
6032         return error;
6033 }
6034 
6035 int ext4_expand_extra_isize(struct inode *inode,
6036                             unsigned int new_extra_isize,
6037                             struct ext4_iloc *iloc)
6038 {
6039         handle_t *handle;
6040         int no_expand;
6041         int error, rc;
6042 
6043         if (ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
6044                 brelse(iloc->bh);
6045                 return -EOVERFLOW;
6046         }
6047 
6048         handle = ext4_journal_start(inode, EXT4_HT_INODE,
6049                                     EXT4_DATA_TRANS_BLOCKS(inode->i_sb));
6050         if (IS_ERR(handle)) {
6051                 error = PTR_ERR(handle);
6052                 brelse(iloc->bh);
6053                 return error;
6054         }
6055 
6056         ext4_write_lock_xattr(inode, &no_expand);
6057 
6058         BUFFER_TRACE(iloc->bh, "get_write_access");
6059         error = ext4_journal_get_write_access(handle, iloc->bh);
6060         if (error) {
6061                 brelse(iloc->bh);
6062                 goto out_unlock;
6063         }
6064 
6065         error = __ext4_expand_extra_isize(inode, new_extra_isize, iloc,
6066                                           handle, &no_expand);
6067 
6068         rc = ext4_mark_iloc_dirty(handle, inode, iloc);
6069         if (!error)
6070                 error = rc;
6071 
6072 out_unlock:
6073         ext4_write_unlock_xattr(inode, &no_expand);
6074         ext4_journal_stop(handle);
6075         return error;
6076 }
6077 
6078 /*
6079  * What we do here is to mark the in-core inode as clean with respect to inode
6080  * dirtiness (it may still be data-dirty).
6081  * This means that the in-core inode may be reaped by prune_icache
6082  * without having to perform any I/O.  This is a very good thing,
6083  * because *any* task may call prune_icache - even ones which
6084  * have a transaction open against a different journal.
6085  *
6086  * Is this cheating?  Not really.  Sure, we haven't written the
6087  * inode out, but prune_icache isn't a user-visible syncing function.
6088  * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
6089  * we start and wait on commits.
6090  */
6091 int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
6092 {
6093         struct ext4_iloc iloc;
6094         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
6095         int err;
6096 
6097         might_sleep();
6098         trace_ext4_mark_inode_dirty(inode, _RET_IP_);
6099         err = ext4_reserve_inode_write(handle, inode, &iloc);
6100         if (err)
6101                 return err;
6102 
6103         if (EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize)
6104                 ext4_try_to_expand_extra_isize(inode, sbi->s_want_extra_isize,
6105                                                iloc, handle);
6106 
6107         return ext4_mark_iloc_dirty(handle, inode, &iloc);
6108 }
6109 
6110 /*
6111  * ext4_dirty_inode() is called from __mark_inode_dirty()
6112  *
6113  * We're really interested in the case where a file is being extended.
6114  * i_size has been changed by generic_commit_write() and we thus need
6115  * to include the updated inode in the current transaction.
6116  *
6117  * Also, dquot_alloc_block() will always dirty the inode when blocks
6118  * are allocated to the file.
6119  *
6120  * If the inode is marked synchronous, we don't honour that here - doing
6121  * so would cause a commit on atime updates, which we don't bother doing.
6122  * We handle synchronous inodes at the highest possible level.
6123  *
6124  * If only the I_DIRTY_TIME flag is set, we can skip everything.  If
6125  * I_DIRTY_TIME and I_DIRTY_SYNC is set, the only inode fields we need
6126  * to copy into the on-disk inode structure are the timestamp files.
6127  */
6128 void ext4_dirty_inode(struct inode *inode, int flags)
6129 {
6130         handle_t *handle;
6131 
6132         if (flags == I_DIRTY_TIME)
6133                 return;
6134         handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
6135         if (IS_ERR(handle))
6136                 goto out;
6137 
6138         ext4_mark_inode_dirty(handle, inode);
6139 
6140         ext4_journal_stop(handle);
6141 out:
6142         return;
6143 }
6144 
6145 int ext4_change_inode_journal_flag(struct inode *inode, int val)
6146 {
6147         journal_t *journal;
6148         handle_t *handle;
6149         int err;
6150         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
6151 
6152         /*
6153          * We have to be very careful here: changing a data block's
6154          * journaling status dynamically is dangerous.  If we write a
6155          * data block to the journal, change the status and then delete
6156          * that block, we risk forgetting to revoke the old log record
6157          * from the journal and so a subsequent replay can corrupt data.
6158          * So, first we make sure that the journal is empty and that
6159          * nobody is changing anything.
6160          */
6161 
6162         journal = EXT4_JOURNAL(inode);
6163         if (!journal)
6164                 return 0;
6165         if (is_journal_aborted(journal))
6166                 return -EROFS;
6167 
6168         /* Wait for all existing dio workers */
6169         inode_dio_wait(inode);
6170 
6171         /*
6172          * Before flushing the journal and switching inode's aops, we have
6173          * to flush all dirty data the inode has. There can be outstanding
6174          * delayed allocations, there can be unwritten extents created by
6175          * fallocate or buffered writes in dioread_nolock mode covered by
6176          * dirty data which can be converted only after flushing the dirty
6177          * data (and journalled aops don't know how to handle these cases).
6178          */
6179         if (val) {
6180                 down_write(&EXT4_I(inode)->i_mmap_sem);
6181                 err = filemap_write_and_wait(inode->i_mapping);
6182                 if (err < 0) {
6183                         up_write(&EXT4_I(inode)->i_mmap_sem);
6184                         return err;
6185                 }
6186         }
6187 
6188         percpu_down_write(&sbi->s_writepages_rwsem);
6189         jbd2_journal_lock_updates(journal);
6190 
6191         /*
6192          * OK, there are no updates running now, and all cached data is
6193          * synced to disk.  We are now in a completely consistent state
6194          * which doesn't have anything in the journal, and we know that
6195          * no filesystem updates are running, so it is safe to modify
6196          * the inode's in-core data-journaling state flag now.
6197          */
6198 
6199         if (val)
6200                 ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
6201         else {
6202                 err = jbd2_journal_flush(journal);
6203                 if (err < 0) {
6204                         jbd2_journal_unlock_updates(journal);
6205                         percpu_up_write(&sbi->s_writepages_rwsem);
6206                         return err;
6207                 }
6208                 ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
6209         }
6210         ext4_set_aops(inode);
6211 
6212         jbd2_journal_unlock_updates(journal);
6213         percpu_up_write(&sbi->s_writepages_rwsem);
6214 
6215         if (val)
6216                 up_write(&EXT4_I(inode)->i_mmap_sem);
6217 
6218         /* Finally we can mark the inode as dirty. */
6219 
6220         handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
6221         if (IS_ERR(handle))
6222                 return PTR_ERR(handle);
6223 
6224         err = ext4_mark_inode_dirty(handle, inode);
6225         ext4_handle_sync(handle);
6226         ext4_journal_stop(handle);
6227         ext4_std_error(inode->i_sb, err);
6228 
6229         return err;
6230 }
6231 
6232 static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh)
6233 {
6234         return !buffer_mapped(bh);
6235 }
6236 
6237 vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf)
6238 {
6239         struct vm_area_struct *vma = vmf->vma;
6240         struct page *page = vmf->page;
6241         loff_t size;
6242         unsigned long len;
6243         int err;
6244         vm_fault_t ret;
6245         struct file *file = vma->vm_file;
6246         struct inode *inode = file_inode(file);
6247         struct address_space *mapping = inode->i_mapping;
6248         handle_t *handle;
6249         get_block_t *get_block;
6250         int retries = 0;
6251 
6252         if (unlikely(IS_IMMUTABLE(inode)))
6253                 return VM_FAULT_SIGBUS;
6254 
6255         sb_start_pagefault(inode->i_sb);
6256         file_update_time(vma->vm_file);
6257 
6258         down_read(&EXT4_I(inode)->i_mmap_sem);
6259 
6260         err = ext4_convert_inline_data(inode);
6261         if (err)
6262                 goto out_ret;
6263 
6264         /* Delalloc case is easy... */
6265         if (test_opt(inode->i_sb, DELALLOC) &&
6266             !ext4_should_journal_data(inode) &&
6267             !ext4_nonda_switch(inode->i_sb)) {
6268                 do {
6269                         err = block_page_mkwrite(vma, vmf,
6270                                                    ext4_da_get_block_prep);
6271                 } while (err == -ENOSPC &&
6272                        ext4_should_retry_alloc(inode->i_sb, &retries));
6273                 goto out_ret;
6274         }
6275 
6276         lock_page(page);
6277         size = i_size_read(inode);
6278         /* Page got truncated from under us? */
6279         if (page->mapping != mapping || page_offset(page) > size) {
6280                 unlock_page(page);
6281                 ret = VM_FAULT_NOPAGE;
6282                 goto out;
6283         }
6284 
6285         if (page->index == size >> PAGE_SHIFT)
6286                 len = size & ~PAGE_MASK;
6287         else
6288                 len = PAGE_SIZE;
6289         /*
6290          * Return if we have all the buffers mapped. This avoids the need to do
6291          * journal_start/journal_stop which can block and take a long time
6292          */
6293         if (page_has_buffers(page)) {
6294                 if (!ext4_walk_page_buffers(NULL, page_buffers(page),
6295                                             0, len, NULL,
6296                                             ext4_bh_unmapped)) {
6297                         /* Wait so that we don't change page under IO */
6298                         wait_for_stable_page(page);
6299                         ret = VM_FAULT_LOCKED;
6300                         goto out;
6301                 }
6302         }
6303         unlock_page(page);
6304         /* OK, we need to fill the hole... */
6305         if (ext4_should_dioread_nolock(inode))
6306                 get_block = ext4_get_block_unwritten;
6307         else
6308                 get_block = ext4_get_block;
6309 retry_alloc:
6310         handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
6311                                     ext4_writepage_trans_blocks(inode));
6312         if (IS_ERR(handle)) {
6313                 ret = VM_FAULT_SIGBUS;
6314                 goto out;
6315         }
6316         err = block_page_mkwrite(vma, vmf, get_block);
6317         if (!err && ext4_should_journal_data(inode)) {
6318                 if (ext4_walk_page_buffers(handle, page_buffers(page), 0,
6319                           PAGE_SIZE, NULL, do_journal_get_write_access)) {
6320                         unlock_page(page);
6321                         ret = VM_FAULT_SIGBUS;
6322                         ext4_journal_stop(handle);
6323                         goto out;
6324                 }
6325                 ext4_set_inode_state(inode, EXT4_STATE_JDATA);
6326         }
6327         ext4_journal_stop(handle);
6328         if (err == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
6329                 goto retry_alloc;
6330 out_ret:
6331         ret = block_page_mkwrite_return(err);
6332 out:
6333         up_read(&EXT4_I(inode)->i_mmap_sem);
6334         sb_end_pagefault(inode->i_sb);
6335         return ret;
6336 }
6337 
6338 vm_fault_t ext4_filemap_fault(struct vm_fault *vmf)
6339 {
6340         struct inode *inode = file_inode(vmf->vma->vm_file);
6341         vm_fault_t ret;
6342 
6343         down_read(&EXT4_I(inode)->i_mmap_sem);
6344         ret = filemap_fault(vmf);
6345         up_read(&EXT4_I(inode)->i_mmap_sem);
6346 
6347         return ret;
6348 }

/* [<][>][^][v][top][bottom][index][help] */