root/fs/ext4/file.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. ext4_dax_read_iter
  2. ext4_file_read_iter
  3. ext4_release_file
  4. ext4_unwritten_wait
  5. ext4_unaligned_aio
  6. ext4_overwrite_io
  7. ext4_write_checks
  8. ext4_dax_write_iter
  9. ext4_file_write_iter
  10. ext4_dax_huge_fault
  11. ext4_dax_fault
  12. ext4_file_mmap
  13. ext4_sample_last_mounted
  14. ext4_file_open
  15. ext4_llseek

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  *  linux/fs/ext4/file.c
   4  *
   5  * Copyright (C) 1992, 1993, 1994, 1995
   6  * Remy Card (card@masi.ibp.fr)
   7  * Laboratoire MASI - Institut Blaise Pascal
   8  * Universite Pierre et Marie Curie (Paris VI)
   9  *
  10  *  from
  11  *
  12  *  linux/fs/minix/file.c
  13  *
  14  *  Copyright (C) 1991, 1992  Linus Torvalds
  15  *
  16  *  ext4 fs regular file handling primitives
  17  *
  18  *  64-bit file support on 64-bit platforms by Jakub Jelinek
  19  *      (jj@sunsite.ms.mff.cuni.cz)
  20  */
  21 
  22 #include <linux/time.h>
  23 #include <linux/fs.h>
  24 #include <linux/iomap.h>
  25 #include <linux/mount.h>
  26 #include <linux/path.h>
  27 #include <linux/dax.h>
  28 #include <linux/quotaops.h>
  29 #include <linux/pagevec.h>
  30 #include <linux/uio.h>
  31 #include <linux/mman.h>
  32 #include "ext4.h"
  33 #include "ext4_jbd2.h"
  34 #include "xattr.h"
  35 #include "acl.h"
  36 
  37 #ifdef CONFIG_FS_DAX
  38 static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
  39 {
  40         struct inode *inode = file_inode(iocb->ki_filp);
  41         ssize_t ret;
  42 
  43         if (iocb->ki_flags & IOCB_NOWAIT) {
  44                 if (!inode_trylock_shared(inode))
  45                         return -EAGAIN;
  46         } else {
  47                 inode_lock_shared(inode);
  48         }
  49         /*
  50          * Recheck under inode lock - at this point we are sure it cannot
  51          * change anymore
  52          */
  53         if (!IS_DAX(inode)) {
  54                 inode_unlock_shared(inode);
  55                 /* Fallback to buffered IO in case we cannot support DAX */
  56                 return generic_file_read_iter(iocb, to);
  57         }
  58         ret = dax_iomap_rw(iocb, to, &ext4_iomap_ops);
  59         inode_unlock_shared(inode);
  60 
  61         file_accessed(iocb->ki_filp);
  62         return ret;
  63 }
  64 #endif
  65 
  66 static ssize_t ext4_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
  67 {
  68         if (unlikely(ext4_forced_shutdown(EXT4_SB(file_inode(iocb->ki_filp)->i_sb))))
  69                 return -EIO;
  70 
  71         if (!iov_iter_count(to))
  72                 return 0; /* skip atime */
  73 
  74 #ifdef CONFIG_FS_DAX
  75         if (IS_DAX(file_inode(iocb->ki_filp)))
  76                 return ext4_dax_read_iter(iocb, to);
  77 #endif
  78         return generic_file_read_iter(iocb, to);
  79 }
  80 
  81 /*
  82  * Called when an inode is released. Note that this is different
  83  * from ext4_file_open: open gets called at every open, but release
  84  * gets called only when /all/ the files are closed.
  85  */
  86 static int ext4_release_file(struct inode *inode, struct file *filp)
  87 {
  88         if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
  89                 ext4_alloc_da_blocks(inode);
  90                 ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
  91         }
  92         /* if we are the last writer on the inode, drop the block reservation */
  93         if ((filp->f_mode & FMODE_WRITE) &&
  94                         (atomic_read(&inode->i_writecount) == 1) &&
  95                         !EXT4_I(inode)->i_reserved_data_blocks)
  96         {
  97                 down_write(&EXT4_I(inode)->i_data_sem);
  98                 ext4_discard_preallocations(inode);
  99                 up_write(&EXT4_I(inode)->i_data_sem);
 100         }
 101         if (is_dx(inode) && filp->private_data)
 102                 ext4_htree_free_dir_info(filp->private_data);
 103 
 104         return 0;
 105 }
 106 
 107 static void ext4_unwritten_wait(struct inode *inode)
 108 {
 109         wait_queue_head_t *wq = ext4_ioend_wq(inode);
 110 
 111         wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0));
 112 }
 113 
 114 /*
 115  * This tests whether the IO in question is block-aligned or not.
 116  * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
 117  * are converted to written only after the IO is complete.  Until they are
 118  * mapped, these blocks appear as holes, so dio_zero_block() will assume that
 119  * it needs to zero out portions of the start and/or end block.  If 2 AIO
 120  * threads are at work on the same unwritten block, they must be synchronized
 121  * or one thread will zero the other's data, causing corruption.
 122  */
 123 static int
 124 ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos)
 125 {
 126         struct super_block *sb = inode->i_sb;
 127         int blockmask = sb->s_blocksize - 1;
 128 
 129         if (pos >= ALIGN(i_size_read(inode), sb->s_blocksize))
 130                 return 0;
 131 
 132         if ((pos | iov_iter_alignment(from)) & blockmask)
 133                 return 1;
 134 
 135         return 0;
 136 }
 137 
 138 /* Is IO overwriting allocated and initialized blocks? */
 139 static bool ext4_overwrite_io(struct inode *inode, loff_t pos, loff_t len)
 140 {
 141         struct ext4_map_blocks map;
 142         unsigned int blkbits = inode->i_blkbits;
 143         int err, blklen;
 144 
 145         if (pos + len > i_size_read(inode))
 146                 return false;
 147 
 148         map.m_lblk = pos >> blkbits;
 149         map.m_len = EXT4_MAX_BLOCKS(len, pos, blkbits);
 150         blklen = map.m_len;
 151 
 152         err = ext4_map_blocks(NULL, inode, &map, 0);
 153         /*
 154          * 'err==len' means that all of the blocks have been preallocated,
 155          * regardless of whether they have been initialized or not. To exclude
 156          * unwritten extents, we need to check m_flags.
 157          */
 158         return err == blklen && (map.m_flags & EXT4_MAP_MAPPED);
 159 }
 160 
 161 static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from)
 162 {
 163         struct inode *inode = file_inode(iocb->ki_filp);
 164         ssize_t ret;
 165 
 166         ret = generic_write_checks(iocb, from);
 167         if (ret <= 0)
 168                 return ret;
 169 
 170         if (unlikely(IS_IMMUTABLE(inode)))
 171                 return -EPERM;
 172 
 173         /*
 174          * If we have encountered a bitmap-format file, the size limit
 175          * is smaller than s_maxbytes, which is for extent-mapped files.
 176          */
 177         if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
 178                 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
 179 
 180                 if (iocb->ki_pos >= sbi->s_bitmap_maxbytes)
 181                         return -EFBIG;
 182                 iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
 183         }
 184         return iov_iter_count(from);
 185 }
 186 
 187 #ifdef CONFIG_FS_DAX
 188 static ssize_t
 189 ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
 190 {
 191         struct inode *inode = file_inode(iocb->ki_filp);
 192         ssize_t ret;
 193 
 194         if (iocb->ki_flags & IOCB_NOWAIT) {
 195                 if (!inode_trylock(inode))
 196                         return -EAGAIN;
 197         } else {
 198                 inode_lock(inode);
 199         }
 200         ret = ext4_write_checks(iocb, from);
 201         if (ret <= 0)
 202                 goto out;
 203         ret = file_remove_privs(iocb->ki_filp);
 204         if (ret)
 205                 goto out;
 206         ret = file_update_time(iocb->ki_filp);
 207         if (ret)
 208                 goto out;
 209 
 210         ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops);
 211 out:
 212         inode_unlock(inode);
 213         if (ret > 0)
 214                 ret = generic_write_sync(iocb, ret);
 215         return ret;
 216 }
 217 #endif
 218 
 219 static ssize_t
 220 ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 221 {
 222         struct inode *inode = file_inode(iocb->ki_filp);
 223         int o_direct = iocb->ki_flags & IOCB_DIRECT;
 224         int unaligned_aio = 0;
 225         int overwrite = 0;
 226         ssize_t ret;
 227 
 228         if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
 229                 return -EIO;
 230 
 231 #ifdef CONFIG_FS_DAX
 232         if (IS_DAX(inode))
 233                 return ext4_dax_write_iter(iocb, from);
 234 #endif
 235 
 236         if (!inode_trylock(inode)) {
 237                 if (iocb->ki_flags & IOCB_NOWAIT)
 238                         return -EAGAIN;
 239                 inode_lock(inode);
 240         }
 241 
 242         ret = ext4_write_checks(iocb, from);
 243         if (ret <= 0)
 244                 goto out;
 245 
 246         /*
 247          * Unaligned direct AIO must be serialized among each other as zeroing
 248          * of partial blocks of two competing unaligned AIOs can result in data
 249          * corruption.
 250          */
 251         if (o_direct && ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
 252             !is_sync_kiocb(iocb) &&
 253             ext4_unaligned_aio(inode, from, iocb->ki_pos)) {
 254                 unaligned_aio = 1;
 255                 ext4_unwritten_wait(inode);
 256         }
 257 
 258         iocb->private = &overwrite;
 259         /* Check whether we do a DIO overwrite or not */
 260         if (o_direct && !unaligned_aio) {
 261                 if (ext4_overwrite_io(inode, iocb->ki_pos, iov_iter_count(from))) {
 262                         if (ext4_should_dioread_nolock(inode))
 263                                 overwrite = 1;
 264                 } else if (iocb->ki_flags & IOCB_NOWAIT) {
 265                         ret = -EAGAIN;
 266                         goto out;
 267                 }
 268         }
 269 
 270         ret = __generic_file_write_iter(iocb, from);
 271         /*
 272          * Unaligned direct AIO must be the only IO in flight. Otherwise
 273          * overlapping aligned IO after unaligned might result in data
 274          * corruption.
 275          */
 276         if (ret == -EIOCBQUEUED && unaligned_aio)
 277                 ext4_unwritten_wait(inode);
 278         inode_unlock(inode);
 279 
 280         if (ret > 0)
 281                 ret = generic_write_sync(iocb, ret);
 282 
 283         return ret;
 284 
 285 out:
 286         inode_unlock(inode);
 287         return ret;
 288 }
 289 
 290 #ifdef CONFIG_FS_DAX
 291 static vm_fault_t ext4_dax_huge_fault(struct vm_fault *vmf,
 292                 enum page_entry_size pe_size)
 293 {
 294         int error = 0;
 295         vm_fault_t result;
 296         int retries = 0;
 297         handle_t *handle = NULL;
 298         struct inode *inode = file_inode(vmf->vma->vm_file);
 299         struct super_block *sb = inode->i_sb;
 300 
 301         /*
 302          * We have to distinguish real writes from writes which will result in a
 303          * COW page; COW writes should *not* poke the journal (the file will not
 304          * be changed). Doing so would cause unintended failures when mounted
 305          * read-only.
 306          *
 307          * We check for VM_SHARED rather than vmf->cow_page since the latter is
 308          * unset for pe_size != PE_SIZE_PTE (i.e. only in do_cow_fault); for
 309          * other sizes, dax_iomap_fault will handle splitting / fallback so that
 310          * we eventually come back with a COW page.
 311          */
 312         bool write = (vmf->flags & FAULT_FLAG_WRITE) &&
 313                 (vmf->vma->vm_flags & VM_SHARED);
 314         pfn_t pfn;
 315 
 316         if (write) {
 317                 sb_start_pagefault(sb);
 318                 file_update_time(vmf->vma->vm_file);
 319                 down_read(&EXT4_I(inode)->i_mmap_sem);
 320 retry:
 321                 handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
 322                                                EXT4_DATA_TRANS_BLOCKS(sb));
 323                 if (IS_ERR(handle)) {
 324                         up_read(&EXT4_I(inode)->i_mmap_sem);
 325                         sb_end_pagefault(sb);
 326                         return VM_FAULT_SIGBUS;
 327                 }
 328         } else {
 329                 down_read(&EXT4_I(inode)->i_mmap_sem);
 330         }
 331         result = dax_iomap_fault(vmf, pe_size, &pfn, &error, &ext4_iomap_ops);
 332         if (write) {
 333                 ext4_journal_stop(handle);
 334 
 335                 if ((result & VM_FAULT_ERROR) && error == -ENOSPC &&
 336                     ext4_should_retry_alloc(sb, &retries))
 337                         goto retry;
 338                 /* Handling synchronous page fault? */
 339                 if (result & VM_FAULT_NEEDDSYNC)
 340                         result = dax_finish_sync_fault(vmf, pe_size, pfn);
 341                 up_read(&EXT4_I(inode)->i_mmap_sem);
 342                 sb_end_pagefault(sb);
 343         } else {
 344                 up_read(&EXT4_I(inode)->i_mmap_sem);
 345         }
 346 
 347         return result;
 348 }
 349 
 350 static vm_fault_t ext4_dax_fault(struct vm_fault *vmf)
 351 {
 352         return ext4_dax_huge_fault(vmf, PE_SIZE_PTE);
 353 }
 354 
 355 static const struct vm_operations_struct ext4_dax_vm_ops = {
 356         .fault          = ext4_dax_fault,
 357         .huge_fault     = ext4_dax_huge_fault,
 358         .page_mkwrite   = ext4_dax_fault,
 359         .pfn_mkwrite    = ext4_dax_fault,
 360 };
 361 #else
 362 #define ext4_dax_vm_ops ext4_file_vm_ops
 363 #endif
 364 
 365 static const struct vm_operations_struct ext4_file_vm_ops = {
 366         .fault          = ext4_filemap_fault,
 367         .map_pages      = filemap_map_pages,
 368         .page_mkwrite   = ext4_page_mkwrite,
 369 };
 370 
 371 static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
 372 {
 373         struct inode *inode = file->f_mapping->host;
 374         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
 375         struct dax_device *dax_dev = sbi->s_daxdev;
 376 
 377         if (unlikely(ext4_forced_shutdown(sbi)))
 378                 return -EIO;
 379 
 380         /*
 381          * We don't support synchronous mappings for non-DAX files and
 382          * for DAX files if underneath dax_device is not synchronous.
 383          */
 384         if (!daxdev_mapping_supported(vma, dax_dev))
 385                 return -EOPNOTSUPP;
 386 
 387         file_accessed(file);
 388         if (IS_DAX(file_inode(file))) {
 389                 vma->vm_ops = &ext4_dax_vm_ops;
 390                 vma->vm_flags |= VM_HUGEPAGE;
 391         } else {
 392                 vma->vm_ops = &ext4_file_vm_ops;
 393         }
 394         return 0;
 395 }
 396 
 397 static int ext4_sample_last_mounted(struct super_block *sb,
 398                                     struct vfsmount *mnt)
 399 {
 400         struct ext4_sb_info *sbi = EXT4_SB(sb);
 401         struct path path;
 402         char buf[64], *cp;
 403         handle_t *handle;
 404         int err;
 405 
 406         if (likely(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED))
 407                 return 0;
 408 
 409         if (sb_rdonly(sb) || !sb_start_intwrite_trylock(sb))
 410                 return 0;
 411 
 412         sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED;
 413         /*
 414          * Sample where the filesystem has been mounted and
 415          * store it in the superblock for sysadmin convenience
 416          * when trying to sort through large numbers of block
 417          * devices or filesystem images.
 418          */
 419         memset(buf, 0, sizeof(buf));
 420         path.mnt = mnt;
 421         path.dentry = mnt->mnt_root;
 422         cp = d_path(&path, buf, sizeof(buf));
 423         err = 0;
 424         if (IS_ERR(cp))
 425                 goto out;
 426 
 427         handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
 428         err = PTR_ERR(handle);
 429         if (IS_ERR(handle))
 430                 goto out;
 431         BUFFER_TRACE(sbi->s_sbh, "get_write_access");
 432         err = ext4_journal_get_write_access(handle, sbi->s_sbh);
 433         if (err)
 434                 goto out_journal;
 435         strlcpy(sbi->s_es->s_last_mounted, cp,
 436                 sizeof(sbi->s_es->s_last_mounted));
 437         ext4_handle_dirty_super(handle, sb);
 438 out_journal:
 439         ext4_journal_stop(handle);
 440 out:
 441         sb_end_intwrite(sb);
 442         return err;
 443 }
 444 
 445 static int ext4_file_open(struct inode * inode, struct file * filp)
 446 {
 447         int ret;
 448 
 449         if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
 450                 return -EIO;
 451 
 452         ret = ext4_sample_last_mounted(inode->i_sb, filp->f_path.mnt);
 453         if (ret)
 454                 return ret;
 455 
 456         ret = fscrypt_file_open(inode, filp);
 457         if (ret)
 458                 return ret;
 459 
 460         ret = fsverity_file_open(inode, filp);
 461         if (ret)
 462                 return ret;
 463 
 464         /*
 465          * Set up the jbd2_inode if we are opening the inode for
 466          * writing and the journal is present
 467          */
 468         if (filp->f_mode & FMODE_WRITE) {
 469                 ret = ext4_inode_attach_jinode(inode);
 470                 if (ret < 0)
 471                         return ret;
 472         }
 473 
 474         filp->f_mode |= FMODE_NOWAIT;
 475         return dquot_file_open(inode, filp);
 476 }
 477 
 478 /*
 479  * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
 480  * by calling generic_file_llseek_size() with the appropriate maxbytes
 481  * value for each.
 482  */
 483 loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
 484 {
 485         struct inode *inode = file->f_mapping->host;
 486         loff_t maxbytes;
 487 
 488         if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
 489                 maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
 490         else
 491                 maxbytes = inode->i_sb->s_maxbytes;
 492 
 493         switch (whence) {
 494         default:
 495                 return generic_file_llseek_size(file, offset, whence,
 496                                                 maxbytes, i_size_read(inode));
 497         case SEEK_HOLE:
 498                 inode_lock_shared(inode);
 499                 offset = iomap_seek_hole(inode, offset, &ext4_iomap_ops);
 500                 inode_unlock_shared(inode);
 501                 break;
 502         case SEEK_DATA:
 503                 inode_lock_shared(inode);
 504                 offset = iomap_seek_data(inode, offset, &ext4_iomap_ops);
 505                 inode_unlock_shared(inode);
 506                 break;
 507         }
 508 
 509         if (offset < 0)
 510                 return offset;
 511         return vfs_setpos(file, offset, maxbytes);
 512 }
 513 
 514 const struct file_operations ext4_file_operations = {
 515         .llseek         = ext4_llseek,
 516         .read_iter      = ext4_file_read_iter,
 517         .write_iter     = ext4_file_write_iter,
 518         .unlocked_ioctl = ext4_ioctl,
 519 #ifdef CONFIG_COMPAT
 520         .compat_ioctl   = ext4_compat_ioctl,
 521 #endif
 522         .mmap           = ext4_file_mmap,
 523         .mmap_supported_flags = MAP_SYNC,
 524         .open           = ext4_file_open,
 525         .release        = ext4_release_file,
 526         .fsync          = ext4_sync_file,
 527         .get_unmapped_area = thp_get_unmapped_area,
 528         .splice_read    = generic_file_splice_read,
 529         .splice_write   = iter_file_splice_write,
 530         .fallocate      = ext4_fallocate,
 531 };
 532 
 533 const struct inode_operations ext4_file_inode_operations = {
 534         .setattr        = ext4_setattr,
 535         .getattr        = ext4_file_getattr,
 536         .listxattr      = ext4_listxattr,
 537         .get_acl        = ext4_get_acl,
 538         .set_acl        = ext4_set_acl,
 539         .fiemap         = ext4_fiemap,
 540 };
 541 

/* [<][>][^][v][top][bottom][index][help] */