root/fs/btrfs/inode.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. btrfs_cleanup_ordered_extents
  2. btrfs_test_inode_set_ops
  3. btrfs_init_inode_security
  4. insert_inline_extent
  5. cow_file_range_inline
  6. add_async_extent
  7. inode_can_compress
  8. inode_need_compress
  9. inode_should_defrag
  10. compress_file_range
  11. free_async_extent_pages
  12. submit_compressed_extents
  13. get_extent_allocation_hint
  14. cow_file_range
  15. async_cow_start
  16. async_cow_submit
  17. async_cow_free
  18. cow_file_range_async
  19. csum_exist_in_range
  20. run_delalloc_nocow
  21. need_force_cow
  22. btrfs_run_delalloc_range
  23. btrfs_split_delalloc_extent
  24. btrfs_merge_delalloc_extent
  25. btrfs_add_delalloc_inodes
  26. __btrfs_del_delalloc_inode
  27. btrfs_del_delalloc_inode
  28. btrfs_set_delalloc_extent
  29. btrfs_clear_delalloc_extent
  30. btrfs_bio_fits_in_stripe
  31. btrfs_submit_bio_start
  32. btrfs_submit_bio_hook
  33. add_pending_csums
  34. btrfs_set_extent_delalloc
  35. btrfs_writepage_fixup_worker
  36. btrfs_writepage_cow_fixup
  37. insert_reserved_file_extent
  38. backref_comp
  39. backref_insert
  40. record_one_backref
  41. record_extent_backrefs
  42. relink_is_mergable
  43. relink_extent_backref
  44. free_sa_defrag_extent
  45. relink_file_extents
  46. record_old_file_extents
  47. btrfs_release_delalloc_bytes
  48. btrfs_finish_ordered_io
  49. finish_ordered_fn
  50. btrfs_writepage_endio_finish_ordered
  51. __readpage_endio_check
  52. btrfs_readpage_end_io_hook
  53. btrfs_add_delayed_iput
  54. run_delayed_iput_locked
  55. btrfs_run_delayed_iput
  56. btrfs_run_delayed_iputs
  57. btrfs_wait_on_delayed_iputs
  58. btrfs_orphan_add
  59. btrfs_orphan_del
  60. btrfs_orphan_cleanup
  61. acls_after_inode_item
  62. btrfs_read_locked_inode
  63. fill_inode_item
  64. btrfs_update_inode_item
  65. btrfs_update_inode
  66. btrfs_update_inode_fallback
  67. __btrfs_unlink_inode
  68. btrfs_unlink_inode
  69. __unlink_start_trans
  70. btrfs_unlink
  71. btrfs_unlink_subvol
  72. may_destroy_subvol
  73. btrfs_prune_dentries
  74. btrfs_delete_subvolume
  75. btrfs_rmdir
  76. btrfs_truncate_inode_items
  77. btrfs_truncate_block
  78. maybe_insert_hole
  79. btrfs_cont_expand
  80. btrfs_setsize
  81. btrfs_setattr
  82. evict_inode_truncate_pages
  83. evict_refill_and_join
  84. btrfs_evict_inode
  85. btrfs_inode_by_name
  86. fixup_tree_root_location
  87. inode_tree_add
  88. inode_tree_del
  89. btrfs_init_locked_inode
  90. btrfs_find_actor
  91. btrfs_iget_locked
  92. btrfs_iget_path
  93. btrfs_iget
  94. new_simple_dir
  95. btrfs_inode_type
  96. btrfs_lookup_dentry
  97. btrfs_dentry_delete
  98. btrfs_lookup
  99. btrfs_opendir
  100. btrfs_filldir
  101. btrfs_real_readdir
  102. btrfs_dirty_inode
  103. btrfs_update_time
  104. btrfs_set_inode_index_count
  105. btrfs_set_inode_index
  106. btrfs_insert_inode_locked
  107. btrfs_inherit_iflags
  108. btrfs_new_inode
  109. btrfs_add_link
  110. btrfs_add_nondir
  111. btrfs_mknod
  112. btrfs_create
  113. btrfs_link
  114. btrfs_mkdir
  115. uncompress_inline
  116. btrfs_get_extent
  117. btrfs_get_extent_fiemap
  118. btrfs_create_dio_extent
  119. btrfs_new_extent_direct
  120. can_nocow_extent
  121. lock_extent_direct
  122. create_io_em
  123. btrfs_get_blocks_direct_read
  124. btrfs_get_blocks_direct_write
  125. btrfs_get_blocks_direct
  126. submit_dio_repair_bio
  127. btrfs_check_dio_repairable
  128. dio_read_error
  129. btrfs_retry_endio_nocsum
  130. __btrfs_correct_data_nocsum
  131. btrfs_retry_endio
  132. __btrfs_subio_endio_read
  133. btrfs_subio_endio_read
  134. btrfs_endio_direct_read
  135. __endio_write_update_ordered
  136. btrfs_endio_direct_write
  137. btrfs_submit_bio_start_direct_io
  138. btrfs_end_dio_bio
  139. btrfs_lookup_and_bind_dio_csum
  140. btrfs_submit_dio_bio
  141. btrfs_submit_direct_hook
  142. btrfs_submit_direct
  143. check_direct_IO
  144. btrfs_direct_IO
  145. btrfs_fiemap
  146. btrfs_readpage
  147. btrfs_writepage
  148. btrfs_writepages
  149. btrfs_readpages
  150. __btrfs_releasepage
  151. btrfs_releasepage
  152. btrfs_invalidatepage
  153. btrfs_page_mkwrite
  154. btrfs_truncate
  155. btrfs_create_subvol_root
  156. btrfs_alloc_inode
  157. btrfs_test_destroy_inode
  158. btrfs_free_inode
  159. btrfs_destroy_inode
  160. btrfs_drop_inode
  161. init_once
  162. btrfs_destroy_cachep
  163. btrfs_init_cachep
  164. btrfs_getattr
  165. btrfs_rename_exchange
  166. btrfs_whiteout_for_rename
  167. btrfs_rename
  168. btrfs_rename2
  169. btrfs_run_delalloc_work
  170. btrfs_alloc_delalloc_work
  171. start_delalloc_inodes
  172. btrfs_start_delalloc_snapshot
  173. btrfs_start_delalloc_roots
  174. btrfs_symlink
  175. __btrfs_prealloc_file_range
  176. btrfs_prealloc_file_range
  177. btrfs_prealloc_file_range_trans
  178. btrfs_set_page_dirty
  179. btrfs_permission
  180. btrfs_tmpfile
  181. btrfs_set_range_writeback
  182. btrfs_add_swapfile_pin
  183. btrfs_free_swapfile_pins
  184. btrfs_add_swap_extent
  185. btrfs_swap_deactivate
  186. btrfs_swap_activate
  187. btrfs_swap_deactivate
  188. btrfs_swap_activate

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Copyright (C) 2007 Oracle.  All rights reserved.
   4  */
   5 
   6 #include <linux/kernel.h>
   7 #include <linux/bio.h>
   8 #include <linux/buffer_head.h>
   9 #include <linux/file.h>
  10 #include <linux/fs.h>
  11 #include <linux/pagemap.h>
  12 #include <linux/highmem.h>
  13 #include <linux/time.h>
  14 #include <linux/init.h>
  15 #include <linux/string.h>
  16 #include <linux/backing-dev.h>
  17 #include <linux/writeback.h>
  18 #include <linux/compat.h>
  19 #include <linux/xattr.h>
  20 #include <linux/posix_acl.h>
  21 #include <linux/falloc.h>
  22 #include <linux/slab.h>
  23 #include <linux/ratelimit.h>
  24 #include <linux/btrfs.h>
  25 #include <linux/blkdev.h>
  26 #include <linux/posix_acl_xattr.h>
  27 #include <linux/uio.h>
  28 #include <linux/magic.h>
  29 #include <linux/iversion.h>
  30 #include <linux/swap.h>
  31 #include <linux/sched/mm.h>
  32 #include <asm/unaligned.h>
  33 #include "misc.h"
  34 #include "ctree.h"
  35 #include "disk-io.h"
  36 #include "transaction.h"
  37 #include "btrfs_inode.h"
  38 #include "print-tree.h"
  39 #include "ordered-data.h"
  40 #include "xattr.h"
  41 #include "tree-log.h"
  42 #include "volumes.h"
  43 #include "compression.h"
  44 #include "locking.h"
  45 #include "free-space-cache.h"
  46 #include "inode-map.h"
  47 #include "backref.h"
  48 #include "props.h"
  49 #include "qgroup.h"
  50 #include "delalloc-space.h"
  51 #include "block-group.h"
  52 
  53 struct btrfs_iget_args {
  54         struct btrfs_key *location;
  55         struct btrfs_root *root;
  56 };
  57 
  58 struct btrfs_dio_data {
  59         u64 reserve;
  60         u64 unsubmitted_oe_range_start;
  61         u64 unsubmitted_oe_range_end;
  62         int overwrite;
  63 };
  64 
  65 static const struct inode_operations btrfs_dir_inode_operations;
  66 static const struct inode_operations btrfs_symlink_inode_operations;
  67 static const struct inode_operations btrfs_dir_ro_inode_operations;
  68 static const struct inode_operations btrfs_special_inode_operations;
  69 static const struct inode_operations btrfs_file_inode_operations;
  70 static const struct address_space_operations btrfs_aops;
  71 static const struct file_operations btrfs_dir_file_operations;
  72 static const struct extent_io_ops btrfs_extent_io_ops;
  73 
  74 static struct kmem_cache *btrfs_inode_cachep;
  75 struct kmem_cache *btrfs_trans_handle_cachep;
  76 struct kmem_cache *btrfs_path_cachep;
  77 struct kmem_cache *btrfs_free_space_cachep;
  78 struct kmem_cache *btrfs_free_space_bitmap_cachep;
  79 
  80 static int btrfs_setsize(struct inode *inode, struct iattr *attr);
  81 static int btrfs_truncate(struct inode *inode, bool skip_writeback);
  82 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent);
  83 static noinline int cow_file_range(struct inode *inode,
  84                                    struct page *locked_page,
  85                                    u64 start, u64 end, int *page_started,
  86                                    unsigned long *nr_written, int unlock);
  87 static struct extent_map *create_io_em(struct inode *inode, u64 start, u64 len,
  88                                        u64 orig_start, u64 block_start,
  89                                        u64 block_len, u64 orig_block_len,
  90                                        u64 ram_bytes, int compress_type,
  91                                        int type);
  92 
  93 static void __endio_write_update_ordered(struct inode *inode,
  94                                          const u64 offset, const u64 bytes,
  95                                          const bool uptodate);
  96 
  97 /*
  98  * Cleanup all submitted ordered extents in specified range to handle errors
  99  * from the btrfs_run_delalloc_range() callback.
 100  *
 101  * NOTE: caller must ensure that when an error happens, it can not call
 102  * extent_clear_unlock_delalloc() to clear both the bits EXTENT_DO_ACCOUNTING
 103  * and EXTENT_DELALLOC simultaneously, because that causes the reserved metadata
 104  * to be released, which we want to happen only when finishing the ordered
 105  * extent (btrfs_finish_ordered_io()).
 106  */
 107 static inline void btrfs_cleanup_ordered_extents(struct inode *inode,
 108                                                  struct page *locked_page,
 109                                                  u64 offset, u64 bytes)
 110 {
 111         unsigned long index = offset >> PAGE_SHIFT;
 112         unsigned long end_index = (offset + bytes - 1) >> PAGE_SHIFT;
 113         u64 page_start = page_offset(locked_page);
 114         u64 page_end = page_start + PAGE_SIZE - 1;
 115 
 116         struct page *page;
 117 
 118         while (index <= end_index) {
 119                 page = find_get_page(inode->i_mapping, index);
 120                 index++;
 121                 if (!page)
 122                         continue;
 123                 ClearPagePrivate2(page);
 124                 put_page(page);
 125         }
 126 
 127         /*
 128          * In case this page belongs to the delalloc range being instantiated
 129          * then skip it, since the first page of a range is going to be
 130          * properly cleaned up by the caller of run_delalloc_range
 131          */
 132         if (page_start >= offset && page_end <= (offset + bytes - 1)) {
 133                 offset += PAGE_SIZE;
 134                 bytes -= PAGE_SIZE;
 135         }
 136 
 137         return __endio_write_update_ordered(inode, offset, bytes, false);
 138 }
 139 
 140 static int btrfs_dirty_inode(struct inode *inode);
 141 
 142 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
 143 void btrfs_test_inode_set_ops(struct inode *inode)
 144 {
 145         BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
 146 }
 147 #endif
 148 
 149 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
 150                                      struct inode *inode,  struct inode *dir,
 151                                      const struct qstr *qstr)
 152 {
 153         int err;
 154 
 155         err = btrfs_init_acl(trans, inode, dir);
 156         if (!err)
 157                 err = btrfs_xattr_security_init(trans, inode, dir, qstr);
 158         return err;
 159 }
 160 
 161 /*
 162  * this does all the hard work for inserting an inline extent into
 163  * the btree.  The caller should have done a btrfs_drop_extents so that
 164  * no overlapping inline items exist in the btree
 165  */
 166 static int insert_inline_extent(struct btrfs_trans_handle *trans,
 167                                 struct btrfs_path *path, int extent_inserted,
 168                                 struct btrfs_root *root, struct inode *inode,
 169                                 u64 start, size_t size, size_t compressed_size,
 170                                 int compress_type,
 171                                 struct page **compressed_pages)
 172 {
 173         struct extent_buffer *leaf;
 174         struct page *page = NULL;
 175         char *kaddr;
 176         unsigned long ptr;
 177         struct btrfs_file_extent_item *ei;
 178         int ret;
 179         size_t cur_size = size;
 180         unsigned long offset;
 181 
 182         ASSERT((compressed_size > 0 && compressed_pages) ||
 183                (compressed_size == 0 && !compressed_pages));
 184 
 185         if (compressed_size && compressed_pages)
 186                 cur_size = compressed_size;
 187 
 188         inode_add_bytes(inode, size);
 189 
 190         if (!extent_inserted) {
 191                 struct btrfs_key key;
 192                 size_t datasize;
 193 
 194                 key.objectid = btrfs_ino(BTRFS_I(inode));
 195                 key.offset = start;
 196                 key.type = BTRFS_EXTENT_DATA_KEY;
 197 
 198                 datasize = btrfs_file_extent_calc_inline_size(cur_size);
 199                 path->leave_spinning = 1;
 200                 ret = btrfs_insert_empty_item(trans, root, path, &key,
 201                                               datasize);
 202                 if (ret)
 203                         goto fail;
 204         }
 205         leaf = path->nodes[0];
 206         ei = btrfs_item_ptr(leaf, path->slots[0],
 207                             struct btrfs_file_extent_item);
 208         btrfs_set_file_extent_generation(leaf, ei, trans->transid);
 209         btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
 210         btrfs_set_file_extent_encryption(leaf, ei, 0);
 211         btrfs_set_file_extent_other_encoding(leaf, ei, 0);
 212         btrfs_set_file_extent_ram_bytes(leaf, ei, size);
 213         ptr = btrfs_file_extent_inline_start(ei);
 214 
 215         if (compress_type != BTRFS_COMPRESS_NONE) {
 216                 struct page *cpage;
 217                 int i = 0;
 218                 while (compressed_size > 0) {
 219                         cpage = compressed_pages[i];
 220                         cur_size = min_t(unsigned long, compressed_size,
 221                                        PAGE_SIZE);
 222 
 223                         kaddr = kmap_atomic(cpage);
 224                         write_extent_buffer(leaf, kaddr, ptr, cur_size);
 225                         kunmap_atomic(kaddr);
 226 
 227                         i++;
 228                         ptr += cur_size;
 229                         compressed_size -= cur_size;
 230                 }
 231                 btrfs_set_file_extent_compression(leaf, ei,
 232                                                   compress_type);
 233         } else {
 234                 page = find_get_page(inode->i_mapping,
 235                                      start >> PAGE_SHIFT);
 236                 btrfs_set_file_extent_compression(leaf, ei, 0);
 237                 kaddr = kmap_atomic(page);
 238                 offset = offset_in_page(start);
 239                 write_extent_buffer(leaf, kaddr + offset, ptr, size);
 240                 kunmap_atomic(kaddr);
 241                 put_page(page);
 242         }
 243         btrfs_mark_buffer_dirty(leaf);
 244         btrfs_release_path(path);
 245 
 246         /*
 247          * we're an inline extent, so nobody can
 248          * extend the file past i_size without locking
 249          * a page we already have locked.
 250          *
 251          * We must do any isize and inode updates
 252          * before we unlock the pages.  Otherwise we
 253          * could end up racing with unlink.
 254          */
 255         BTRFS_I(inode)->disk_i_size = inode->i_size;
 256         ret = btrfs_update_inode(trans, root, inode);
 257 
 258 fail:
 259         return ret;
 260 }
 261 
 262 
 263 /*
 264  * conditionally insert an inline extent into the file.  This
 265  * does the checks required to make sure the data is small enough
 266  * to fit as an inline extent.
 267  */
 268 static noinline int cow_file_range_inline(struct inode *inode, u64 start,
 269                                           u64 end, size_t compressed_size,
 270                                           int compress_type,
 271                                           struct page **compressed_pages)
 272 {
 273         struct btrfs_root *root = BTRFS_I(inode)->root;
 274         struct btrfs_fs_info *fs_info = root->fs_info;
 275         struct btrfs_trans_handle *trans;
 276         u64 isize = i_size_read(inode);
 277         u64 actual_end = min(end + 1, isize);
 278         u64 inline_len = actual_end - start;
 279         u64 aligned_end = ALIGN(end, fs_info->sectorsize);
 280         u64 data_len = inline_len;
 281         int ret;
 282         struct btrfs_path *path;
 283         int extent_inserted = 0;
 284         u32 extent_item_size;
 285 
 286         if (compressed_size)
 287                 data_len = compressed_size;
 288 
 289         if (start > 0 ||
 290             actual_end > fs_info->sectorsize ||
 291             data_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info) ||
 292             (!compressed_size &&
 293             (actual_end & (fs_info->sectorsize - 1)) == 0) ||
 294             end + 1 < isize ||
 295             data_len > fs_info->max_inline) {
 296                 return 1;
 297         }
 298 
 299         path = btrfs_alloc_path();
 300         if (!path)
 301                 return -ENOMEM;
 302 
 303         trans = btrfs_join_transaction(root);
 304         if (IS_ERR(trans)) {
 305                 btrfs_free_path(path);
 306                 return PTR_ERR(trans);
 307         }
 308         trans->block_rsv = &BTRFS_I(inode)->block_rsv;
 309 
 310         if (compressed_size && compressed_pages)
 311                 extent_item_size = btrfs_file_extent_calc_inline_size(
 312                    compressed_size);
 313         else
 314                 extent_item_size = btrfs_file_extent_calc_inline_size(
 315                     inline_len);
 316 
 317         ret = __btrfs_drop_extents(trans, root, inode, path,
 318                                    start, aligned_end, NULL,
 319                                    1, 1, extent_item_size, &extent_inserted);
 320         if (ret) {
 321                 btrfs_abort_transaction(trans, ret);
 322                 goto out;
 323         }
 324 
 325         if (isize > actual_end)
 326                 inline_len = min_t(u64, isize, actual_end);
 327         ret = insert_inline_extent(trans, path, extent_inserted,
 328                                    root, inode, start,
 329                                    inline_len, compressed_size,
 330                                    compress_type, compressed_pages);
 331         if (ret && ret != -ENOSPC) {
 332                 btrfs_abort_transaction(trans, ret);
 333                 goto out;
 334         } else if (ret == -ENOSPC) {
 335                 ret = 1;
 336                 goto out;
 337         }
 338 
 339         set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
 340         btrfs_drop_extent_cache(BTRFS_I(inode), start, aligned_end - 1, 0);
 341 out:
 342         /*
 343          * Don't forget to free the reserved space, as for inlined extent
 344          * it won't count as data extent, free them directly here.
 345          * And at reserve time, it's always aligned to page size, so
 346          * just free one page here.
 347          */
 348         btrfs_qgroup_free_data(inode, NULL, 0, PAGE_SIZE);
 349         btrfs_free_path(path);
 350         btrfs_end_transaction(trans);
 351         return ret;
 352 }
 353 
 354 struct async_extent {
 355         u64 start;
 356         u64 ram_size;
 357         u64 compressed_size;
 358         struct page **pages;
 359         unsigned long nr_pages;
 360         int compress_type;
 361         struct list_head list;
 362 };
 363 
 364 struct async_chunk {
 365         struct inode *inode;
 366         struct page *locked_page;
 367         u64 start;
 368         u64 end;
 369         unsigned int write_flags;
 370         struct list_head extents;
 371         struct btrfs_work work;
 372         atomic_t *pending;
 373 };
 374 
 375 struct async_cow {
 376         /* Number of chunks in flight; must be first in the structure */
 377         atomic_t num_chunks;
 378         struct async_chunk chunks[];
 379 };
 380 
 381 static noinline int add_async_extent(struct async_chunk *cow,
 382                                      u64 start, u64 ram_size,
 383                                      u64 compressed_size,
 384                                      struct page **pages,
 385                                      unsigned long nr_pages,
 386                                      int compress_type)
 387 {
 388         struct async_extent *async_extent;
 389 
 390         async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
 391         BUG_ON(!async_extent); /* -ENOMEM */
 392         async_extent->start = start;
 393         async_extent->ram_size = ram_size;
 394         async_extent->compressed_size = compressed_size;
 395         async_extent->pages = pages;
 396         async_extent->nr_pages = nr_pages;
 397         async_extent->compress_type = compress_type;
 398         list_add_tail(&async_extent->list, &cow->extents);
 399         return 0;
 400 }
 401 
 402 /*
 403  * Check if the inode has flags compatible with compression
 404  */
 405 static inline bool inode_can_compress(struct inode *inode)
 406 {
 407         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW ||
 408             BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
 409                 return false;
 410         return true;
 411 }
 412 
 413 /*
 414  * Check if the inode needs to be submitted to compression, based on mount
 415  * options, defragmentation, properties or heuristics.
 416  */
 417 static inline int inode_need_compress(struct inode *inode, u64 start, u64 end)
 418 {
 419         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 420 
 421         if (!inode_can_compress(inode)) {
 422                 WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG),
 423                         KERN_ERR "BTRFS: unexpected compression for ino %llu\n",
 424                         btrfs_ino(BTRFS_I(inode)));
 425                 return 0;
 426         }
 427         /* force compress */
 428         if (btrfs_test_opt(fs_info, FORCE_COMPRESS))
 429                 return 1;
 430         /* defrag ioctl */
 431         if (BTRFS_I(inode)->defrag_compress)
 432                 return 1;
 433         /* bad compression ratios */
 434         if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
 435                 return 0;
 436         if (btrfs_test_opt(fs_info, COMPRESS) ||
 437             BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS ||
 438             BTRFS_I(inode)->prop_compress)
 439                 return btrfs_compress_heuristic(inode, start, end);
 440         return 0;
 441 }
 442 
 443 static inline void inode_should_defrag(struct btrfs_inode *inode,
 444                 u64 start, u64 end, u64 num_bytes, u64 small_write)
 445 {
 446         /* If this is a small write inside eof, kick off a defrag */
 447         if (num_bytes < small_write &&
 448             (start > 0 || end + 1 < inode->disk_i_size))
 449                 btrfs_add_inode_defrag(NULL, inode);
 450 }
 451 
 452 /*
 453  * we create compressed extents in two phases.  The first
 454  * phase compresses a range of pages that have already been
 455  * locked (both pages and state bits are locked).
 456  *
 457  * This is done inside an ordered work queue, and the compression
 458  * is spread across many cpus.  The actual IO submission is step
 459  * two, and the ordered work queue takes care of making sure that
 460  * happens in the same order things were put onto the queue by
 461  * writepages and friends.
 462  *
 463  * If this code finds it can't get good compression, it puts an
 464  * entry onto the work queue to write the uncompressed bytes.  This
 465  * makes sure that both compressed inodes and uncompressed inodes
 466  * are written in the same order that the flusher thread sent them
 467  * down.
 468  */
 469 static noinline int compress_file_range(struct async_chunk *async_chunk)
 470 {
 471         struct inode *inode = async_chunk->inode;
 472         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 473         u64 blocksize = fs_info->sectorsize;
 474         u64 start = async_chunk->start;
 475         u64 end = async_chunk->end;
 476         u64 actual_end;
 477         u64 i_size;
 478         int ret = 0;
 479         struct page **pages = NULL;
 480         unsigned long nr_pages;
 481         unsigned long total_compressed = 0;
 482         unsigned long total_in = 0;
 483         int i;
 484         int will_compress;
 485         int compress_type = fs_info->compress_type;
 486         int compressed_extents = 0;
 487         int redirty = 0;
 488 
 489         inode_should_defrag(BTRFS_I(inode), start, end, end - start + 1,
 490                         SZ_16K);
 491 
 492         /*
 493          * We need to save i_size before now because it could change in between
 494          * us evaluating the size and assigning it.  This is because we lock and
 495          * unlock the page in truncate and fallocate, and then modify the i_size
 496          * later on.
 497          *
 498          * The barriers are to emulate READ_ONCE, remove that once i_size_read
 499          * does that for us.
 500          */
 501         barrier();
 502         i_size = i_size_read(inode);
 503         barrier();
 504         actual_end = min_t(u64, i_size, end + 1);
 505 again:
 506         will_compress = 0;
 507         nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
 508         BUILD_BUG_ON((BTRFS_MAX_COMPRESSED % PAGE_SIZE) != 0);
 509         nr_pages = min_t(unsigned long, nr_pages,
 510                         BTRFS_MAX_COMPRESSED / PAGE_SIZE);
 511 
 512         /*
 513          * we don't want to send crud past the end of i_size through
 514          * compression, that's just a waste of CPU time.  So, if the
 515          * end of the file is before the start of our current
 516          * requested range of bytes, we bail out to the uncompressed
 517          * cleanup code that can deal with all of this.
 518          *
 519          * It isn't really the fastest way to fix things, but this is a
 520          * very uncommon corner.
 521          */
 522         if (actual_end <= start)
 523                 goto cleanup_and_bail_uncompressed;
 524 
 525         total_compressed = actual_end - start;
 526 
 527         /*
 528          * skip compression for a small file range(<=blocksize) that
 529          * isn't an inline extent, since it doesn't save disk space at all.
 530          */
 531         if (total_compressed <= blocksize &&
 532            (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
 533                 goto cleanup_and_bail_uncompressed;
 534 
 535         total_compressed = min_t(unsigned long, total_compressed,
 536                         BTRFS_MAX_UNCOMPRESSED);
 537         total_in = 0;
 538         ret = 0;
 539 
 540         /*
 541          * we do compression for mount -o compress and when the
 542          * inode has not been flagged as nocompress.  This flag can
 543          * change at any time if we discover bad compression ratios.
 544          */
 545         if (inode_need_compress(inode, start, end)) {
 546                 WARN_ON(pages);
 547                 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
 548                 if (!pages) {
 549                         /* just bail out to the uncompressed code */
 550                         nr_pages = 0;
 551                         goto cont;
 552                 }
 553 
 554                 if (BTRFS_I(inode)->defrag_compress)
 555                         compress_type = BTRFS_I(inode)->defrag_compress;
 556                 else if (BTRFS_I(inode)->prop_compress)
 557                         compress_type = BTRFS_I(inode)->prop_compress;
 558 
 559                 /*
 560                  * we need to call clear_page_dirty_for_io on each
 561                  * page in the range.  Otherwise applications with the file
 562                  * mmap'd can wander in and change the page contents while
 563                  * we are compressing them.
 564                  *
 565                  * If the compression fails for any reason, we set the pages
 566                  * dirty again later on.
 567                  *
 568                  * Note that the remaining part is redirtied, the start pointer
 569                  * has moved, the end is the original one.
 570                  */
 571                 if (!redirty) {
 572                         extent_range_clear_dirty_for_io(inode, start, end);
 573                         redirty = 1;
 574                 }
 575 
 576                 /* Compression level is applied here and only here */
 577                 ret = btrfs_compress_pages(
 578                         compress_type | (fs_info->compress_level << 4),
 579                                            inode->i_mapping, start,
 580                                            pages,
 581                                            &nr_pages,
 582                                            &total_in,
 583                                            &total_compressed);
 584 
 585                 if (!ret) {
 586                         unsigned long offset = offset_in_page(total_compressed);
 587                         struct page *page = pages[nr_pages - 1];
 588                         char *kaddr;
 589 
 590                         /* zero the tail end of the last page, we might be
 591                          * sending it down to disk
 592                          */
 593                         if (offset) {
 594                                 kaddr = kmap_atomic(page);
 595                                 memset(kaddr + offset, 0,
 596                                        PAGE_SIZE - offset);
 597                                 kunmap_atomic(kaddr);
 598                         }
 599                         will_compress = 1;
 600                 }
 601         }
 602 cont:
 603         if (start == 0) {
 604                 /* lets try to make an inline extent */
 605                 if (ret || total_in < actual_end) {
 606                         /* we didn't compress the entire range, try
 607                          * to make an uncompressed inline extent.
 608                          */
 609                         ret = cow_file_range_inline(inode, start, end, 0,
 610                                                     BTRFS_COMPRESS_NONE, NULL);
 611                 } else {
 612                         /* try making a compressed inline extent */
 613                         ret = cow_file_range_inline(inode, start, end,
 614                                                     total_compressed,
 615                                                     compress_type, pages);
 616                 }
 617                 if (ret <= 0) {
 618                         unsigned long clear_flags = EXTENT_DELALLOC |
 619                                 EXTENT_DELALLOC_NEW | EXTENT_DEFRAG |
 620                                 EXTENT_DO_ACCOUNTING;
 621                         unsigned long page_error_op;
 622 
 623                         page_error_op = ret < 0 ? PAGE_SET_ERROR : 0;
 624 
 625                         /*
 626                          * inline extent creation worked or returned error,
 627                          * we don't need to create any more async work items.
 628                          * Unlock and free up our temp pages.
 629                          *
 630                          * We use DO_ACCOUNTING here because we need the
 631                          * delalloc_release_metadata to be done _after_ we drop
 632                          * our outstanding extent for clearing delalloc for this
 633                          * range.
 634                          */
 635                         extent_clear_unlock_delalloc(inode, start, end, NULL,
 636                                                      clear_flags,
 637                                                      PAGE_UNLOCK |
 638                                                      PAGE_CLEAR_DIRTY |
 639                                                      PAGE_SET_WRITEBACK |
 640                                                      page_error_op |
 641                                                      PAGE_END_WRITEBACK);
 642 
 643                         for (i = 0; i < nr_pages; i++) {
 644                                 WARN_ON(pages[i]->mapping);
 645                                 put_page(pages[i]);
 646                         }
 647                         kfree(pages);
 648 
 649                         return 0;
 650                 }
 651         }
 652 
 653         if (will_compress) {
 654                 /*
 655                  * we aren't doing an inline extent round the compressed size
 656                  * up to a block size boundary so the allocator does sane
 657                  * things
 658                  */
 659                 total_compressed = ALIGN(total_compressed, blocksize);
 660 
 661                 /*
 662                  * one last check to make sure the compression is really a
 663                  * win, compare the page count read with the blocks on disk,
 664                  * compression must free at least one sector size
 665                  */
 666                 total_in = ALIGN(total_in, PAGE_SIZE);
 667                 if (total_compressed + blocksize <= total_in) {
 668                         compressed_extents++;
 669 
 670                         /*
 671                          * The async work queues will take care of doing actual
 672                          * allocation on disk for these compressed pages, and
 673                          * will submit them to the elevator.
 674                          */
 675                         add_async_extent(async_chunk, start, total_in,
 676                                         total_compressed, pages, nr_pages,
 677                                         compress_type);
 678 
 679                         if (start + total_in < end) {
 680                                 start += total_in;
 681                                 pages = NULL;
 682                                 cond_resched();
 683                                 goto again;
 684                         }
 685                         return compressed_extents;
 686                 }
 687         }
 688         if (pages) {
 689                 /*
 690                  * the compression code ran but failed to make things smaller,
 691                  * free any pages it allocated and our page pointer array
 692                  */
 693                 for (i = 0; i < nr_pages; i++) {
 694                         WARN_ON(pages[i]->mapping);
 695                         put_page(pages[i]);
 696                 }
 697                 kfree(pages);
 698                 pages = NULL;
 699                 total_compressed = 0;
 700                 nr_pages = 0;
 701 
 702                 /* flag the file so we don't compress in the future */
 703                 if (!btrfs_test_opt(fs_info, FORCE_COMPRESS) &&
 704                     !(BTRFS_I(inode)->prop_compress)) {
 705                         BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
 706                 }
 707         }
 708 cleanup_and_bail_uncompressed:
 709         /*
 710          * No compression, but we still need to write the pages in the file
 711          * we've been given so far.  redirty the locked page if it corresponds
 712          * to our extent and set things up for the async work queue to run
 713          * cow_file_range to do the normal delalloc dance.
 714          */
 715         if (async_chunk->locked_page &&
 716             (page_offset(async_chunk->locked_page) >= start &&
 717              page_offset(async_chunk->locked_page)) <= end) {
 718                 __set_page_dirty_nobuffers(async_chunk->locked_page);
 719                 /* unlocked later on in the async handlers */
 720         }
 721 
 722         if (redirty)
 723                 extent_range_redirty_for_io(inode, start, end);
 724         add_async_extent(async_chunk, start, end - start + 1, 0, NULL, 0,
 725                          BTRFS_COMPRESS_NONE);
 726         compressed_extents++;
 727 
 728         return compressed_extents;
 729 }
 730 
 731 static void free_async_extent_pages(struct async_extent *async_extent)
 732 {
 733         int i;
 734 
 735         if (!async_extent->pages)
 736                 return;
 737 
 738         for (i = 0; i < async_extent->nr_pages; i++) {
 739                 WARN_ON(async_extent->pages[i]->mapping);
 740                 put_page(async_extent->pages[i]);
 741         }
 742         kfree(async_extent->pages);
 743         async_extent->nr_pages = 0;
 744         async_extent->pages = NULL;
 745 }
 746 
 747 /*
 748  * phase two of compressed writeback.  This is the ordered portion
 749  * of the code, which only gets called in the order the work was
 750  * queued.  We walk all the async extents created by compress_file_range
 751  * and send them down to the disk.
 752  */
 753 static noinline void submit_compressed_extents(struct async_chunk *async_chunk)
 754 {
 755         struct inode *inode = async_chunk->inode;
 756         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 757         struct async_extent *async_extent;
 758         u64 alloc_hint = 0;
 759         struct btrfs_key ins;
 760         struct extent_map *em;
 761         struct btrfs_root *root = BTRFS_I(inode)->root;
 762         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
 763         int ret = 0;
 764 
 765 again:
 766         while (!list_empty(&async_chunk->extents)) {
 767                 async_extent = list_entry(async_chunk->extents.next,
 768                                           struct async_extent, list);
 769                 list_del(&async_extent->list);
 770 
 771 retry:
 772                 lock_extent(io_tree, async_extent->start,
 773                             async_extent->start + async_extent->ram_size - 1);
 774                 /* did the compression code fall back to uncompressed IO? */
 775                 if (!async_extent->pages) {
 776                         int page_started = 0;
 777                         unsigned long nr_written = 0;
 778 
 779                         /* allocate blocks */
 780                         ret = cow_file_range(inode, async_chunk->locked_page,
 781                                              async_extent->start,
 782                                              async_extent->start +
 783                                              async_extent->ram_size - 1,
 784                                              &page_started, &nr_written, 0);
 785 
 786                         /* JDM XXX */
 787 
 788                         /*
 789                          * if page_started, cow_file_range inserted an
 790                          * inline extent and took care of all the unlocking
 791                          * and IO for us.  Otherwise, we need to submit
 792                          * all those pages down to the drive.
 793                          */
 794                         if (!page_started && !ret)
 795                                 extent_write_locked_range(inode,
 796                                                   async_extent->start,
 797                                                   async_extent->start +
 798                                                   async_extent->ram_size - 1,
 799                                                   WB_SYNC_ALL);
 800                         else if (ret && async_chunk->locked_page)
 801                                 unlock_page(async_chunk->locked_page);
 802                         kfree(async_extent);
 803                         cond_resched();
 804                         continue;
 805                 }
 806 
 807                 ret = btrfs_reserve_extent(root, async_extent->ram_size,
 808                                            async_extent->compressed_size,
 809                                            async_extent->compressed_size,
 810                                            0, alloc_hint, &ins, 1, 1);
 811                 if (ret) {
 812                         free_async_extent_pages(async_extent);
 813 
 814                         if (ret == -ENOSPC) {
 815                                 unlock_extent(io_tree, async_extent->start,
 816                                               async_extent->start +
 817                                               async_extent->ram_size - 1);
 818 
 819                                 /*
 820                                  * we need to redirty the pages if we decide to
 821                                  * fallback to uncompressed IO, otherwise we
 822                                  * will not submit these pages down to lower
 823                                  * layers.
 824                                  */
 825                                 extent_range_redirty_for_io(inode,
 826                                                 async_extent->start,
 827                                                 async_extent->start +
 828                                                 async_extent->ram_size - 1);
 829 
 830                                 goto retry;
 831                         }
 832                         goto out_free;
 833                 }
 834                 /*
 835                  * here we're doing allocation and writeback of the
 836                  * compressed pages
 837                  */
 838                 em = create_io_em(inode, async_extent->start,
 839                                   async_extent->ram_size, /* len */
 840                                   async_extent->start, /* orig_start */
 841                                   ins.objectid, /* block_start */
 842                                   ins.offset, /* block_len */
 843                                   ins.offset, /* orig_block_len */
 844                                   async_extent->ram_size, /* ram_bytes */
 845                                   async_extent->compress_type,
 846                                   BTRFS_ORDERED_COMPRESSED);
 847                 if (IS_ERR(em))
 848                         /* ret value is not necessary due to void function */
 849                         goto out_free_reserve;
 850                 free_extent_map(em);
 851 
 852                 ret = btrfs_add_ordered_extent_compress(inode,
 853                                                 async_extent->start,
 854                                                 ins.objectid,
 855                                                 async_extent->ram_size,
 856                                                 ins.offset,
 857                                                 BTRFS_ORDERED_COMPRESSED,
 858                                                 async_extent->compress_type);
 859                 if (ret) {
 860                         btrfs_drop_extent_cache(BTRFS_I(inode),
 861                                                 async_extent->start,
 862                                                 async_extent->start +
 863                                                 async_extent->ram_size - 1, 0);
 864                         goto out_free_reserve;
 865                 }
 866                 btrfs_dec_block_group_reservations(fs_info, ins.objectid);
 867 
 868                 /*
 869                  * clear dirty, set writeback and unlock the pages.
 870                  */
 871                 extent_clear_unlock_delalloc(inode, async_extent->start,
 872                                 async_extent->start +
 873                                 async_extent->ram_size - 1,
 874                                 NULL, EXTENT_LOCKED | EXTENT_DELALLOC,
 875                                 PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
 876                                 PAGE_SET_WRITEBACK);
 877                 if (btrfs_submit_compressed_write(inode,
 878                                     async_extent->start,
 879                                     async_extent->ram_size,
 880                                     ins.objectid,
 881                                     ins.offset, async_extent->pages,
 882                                     async_extent->nr_pages,
 883                                     async_chunk->write_flags)) {
 884                         struct page *p = async_extent->pages[0];
 885                         const u64 start = async_extent->start;
 886                         const u64 end = start + async_extent->ram_size - 1;
 887 
 888                         p->mapping = inode->i_mapping;
 889                         btrfs_writepage_endio_finish_ordered(p, start, end, 0);
 890 
 891                         p->mapping = NULL;
 892                         extent_clear_unlock_delalloc(inode, start, end,
 893                                                      NULL, 0,
 894                                                      PAGE_END_WRITEBACK |
 895                                                      PAGE_SET_ERROR);
 896                         free_async_extent_pages(async_extent);
 897                 }
 898                 alloc_hint = ins.objectid + ins.offset;
 899                 kfree(async_extent);
 900                 cond_resched();
 901         }
 902         return;
 903 out_free_reserve:
 904         btrfs_dec_block_group_reservations(fs_info, ins.objectid);
 905         btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
 906 out_free:
 907         extent_clear_unlock_delalloc(inode, async_extent->start,
 908                                      async_extent->start +
 909                                      async_extent->ram_size - 1,
 910                                      NULL, EXTENT_LOCKED | EXTENT_DELALLOC |
 911                                      EXTENT_DELALLOC_NEW |
 912                                      EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING,
 913                                      PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
 914                                      PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK |
 915                                      PAGE_SET_ERROR);
 916         free_async_extent_pages(async_extent);
 917         kfree(async_extent);
 918         goto again;
 919 }
 920 
 921 static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
 922                                       u64 num_bytes)
 923 {
 924         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
 925         struct extent_map *em;
 926         u64 alloc_hint = 0;
 927 
 928         read_lock(&em_tree->lock);
 929         em = search_extent_mapping(em_tree, start, num_bytes);
 930         if (em) {
 931                 /*
 932                  * if block start isn't an actual block number then find the
 933                  * first block in this inode and use that as a hint.  If that
 934                  * block is also bogus then just don't worry about it.
 935                  */
 936                 if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
 937                         free_extent_map(em);
 938                         em = search_extent_mapping(em_tree, 0, 0);
 939                         if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
 940                                 alloc_hint = em->block_start;
 941                         if (em)
 942                                 free_extent_map(em);
 943                 } else {
 944                         alloc_hint = em->block_start;
 945                         free_extent_map(em);
 946                 }
 947         }
 948         read_unlock(&em_tree->lock);
 949 
 950         return alloc_hint;
 951 }
 952 
 953 /*
 954  * when extent_io.c finds a delayed allocation range in the file,
 955  * the call backs end up in this code.  The basic idea is to
 956  * allocate extents on disk for the range, and create ordered data structs
 957  * in ram to track those extents.
 958  *
 959  * locked_page is the page that writepage had locked already.  We use
 960  * it to make sure we don't do extra locks or unlocks.
 961  *
 962  * *page_started is set to one if we unlock locked_page and do everything
 963  * required to start IO on it.  It may be clean and already done with
 964  * IO when we return.
 965  */
 966 static noinline int cow_file_range(struct inode *inode,
 967                                    struct page *locked_page,
 968                                    u64 start, u64 end, int *page_started,
 969                                    unsigned long *nr_written, int unlock)
 970 {
 971         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 972         struct btrfs_root *root = BTRFS_I(inode)->root;
 973         u64 alloc_hint = 0;
 974         u64 num_bytes;
 975         unsigned long ram_size;
 976         u64 cur_alloc_size = 0;
 977         u64 blocksize = fs_info->sectorsize;
 978         struct btrfs_key ins;
 979         struct extent_map *em;
 980         unsigned clear_bits;
 981         unsigned long page_ops;
 982         bool extent_reserved = false;
 983         int ret = 0;
 984 
 985         if (btrfs_is_free_space_inode(BTRFS_I(inode))) {
 986                 WARN_ON_ONCE(1);
 987                 ret = -EINVAL;
 988                 goto out_unlock;
 989         }
 990 
 991         num_bytes = ALIGN(end - start + 1, blocksize);
 992         num_bytes = max(blocksize,  num_bytes);
 993         ASSERT(num_bytes <= btrfs_super_total_bytes(fs_info->super_copy));
 994 
 995         inode_should_defrag(BTRFS_I(inode), start, end, num_bytes, SZ_64K);
 996 
 997         if (start == 0) {
 998                 /* lets try to make an inline extent */
 999                 ret = cow_file_range_inline(inode, start, end, 0,
1000                                             BTRFS_COMPRESS_NONE, NULL);
1001                 if (ret == 0) {
1002                         /*
1003                          * We use DO_ACCOUNTING here because we need the
1004                          * delalloc_release_metadata to be run _after_ we drop
1005                          * our outstanding extent for clearing delalloc for this
1006                          * range.
1007                          */
1008                         extent_clear_unlock_delalloc(inode, start, end, NULL,
1009                                      EXTENT_LOCKED | EXTENT_DELALLOC |
1010                                      EXTENT_DELALLOC_NEW | EXTENT_DEFRAG |
1011                                      EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
1012                                      PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK |
1013                                      PAGE_END_WRITEBACK);
1014                         *nr_written = *nr_written +
1015                              (end - start + PAGE_SIZE) / PAGE_SIZE;
1016                         *page_started = 1;
1017                         goto out;
1018                 } else if (ret < 0) {
1019                         goto out_unlock;
1020                 }
1021         }
1022 
1023         alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
1024         btrfs_drop_extent_cache(BTRFS_I(inode), start,
1025                         start + num_bytes - 1, 0);
1026 
1027         while (num_bytes > 0) {
1028                 cur_alloc_size = num_bytes;
1029                 ret = btrfs_reserve_extent(root, cur_alloc_size, cur_alloc_size,
1030                                            fs_info->sectorsize, 0, alloc_hint,
1031                                            &ins, 1, 1);
1032                 if (ret < 0)
1033                         goto out_unlock;
1034                 cur_alloc_size = ins.offset;
1035                 extent_reserved = true;
1036 
1037                 ram_size = ins.offset;
1038                 em = create_io_em(inode, start, ins.offset, /* len */
1039                                   start, /* orig_start */
1040                                   ins.objectid, /* block_start */
1041                                   ins.offset, /* block_len */
1042                                   ins.offset, /* orig_block_len */
1043                                   ram_size, /* ram_bytes */
1044                                   BTRFS_COMPRESS_NONE, /* compress_type */
1045                                   BTRFS_ORDERED_REGULAR /* type */);
1046                 if (IS_ERR(em)) {
1047                         ret = PTR_ERR(em);
1048                         goto out_reserve;
1049                 }
1050                 free_extent_map(em);
1051 
1052                 ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
1053                                                ram_size, cur_alloc_size, 0);
1054                 if (ret)
1055                         goto out_drop_extent_cache;
1056 
1057                 if (root->root_key.objectid ==
1058                     BTRFS_DATA_RELOC_TREE_OBJECTID) {
1059                         ret = btrfs_reloc_clone_csums(inode, start,
1060                                                       cur_alloc_size);
1061                         /*
1062                          * Only drop cache here, and process as normal.
1063                          *
1064                          * We must not allow extent_clear_unlock_delalloc()
1065                          * at out_unlock label to free meta of this ordered
1066                          * extent, as its meta should be freed by
1067                          * btrfs_finish_ordered_io().
1068                          *
1069                          * So we must continue until @start is increased to
1070                          * skip current ordered extent.
1071                          */
1072                         if (ret)
1073                                 btrfs_drop_extent_cache(BTRFS_I(inode), start,
1074                                                 start + ram_size - 1, 0);
1075                 }
1076 
1077                 btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1078 
1079                 /* we're not doing compressed IO, don't unlock the first
1080                  * page (which the caller expects to stay locked), don't
1081                  * clear any dirty bits and don't set any writeback bits
1082                  *
1083                  * Do set the Private2 bit so we know this page was properly
1084                  * setup for writepage
1085                  */
1086                 page_ops = unlock ? PAGE_UNLOCK : 0;
1087                 page_ops |= PAGE_SET_PRIVATE2;
1088 
1089                 extent_clear_unlock_delalloc(inode, start,
1090                                              start + ram_size - 1,
1091                                              locked_page,
1092                                              EXTENT_LOCKED | EXTENT_DELALLOC,
1093                                              page_ops);
1094                 if (num_bytes < cur_alloc_size)
1095                         num_bytes = 0;
1096                 else
1097                         num_bytes -= cur_alloc_size;
1098                 alloc_hint = ins.objectid + ins.offset;
1099                 start += cur_alloc_size;
1100                 extent_reserved = false;
1101 
1102                 /*
1103                  * btrfs_reloc_clone_csums() error, since start is increased
1104                  * extent_clear_unlock_delalloc() at out_unlock label won't
1105                  * free metadata of current ordered extent, we're OK to exit.
1106                  */
1107                 if (ret)
1108                         goto out_unlock;
1109         }
1110 out:
1111         return ret;
1112 
1113 out_drop_extent_cache:
1114         btrfs_drop_extent_cache(BTRFS_I(inode), start, start + ram_size - 1, 0);
1115 out_reserve:
1116         btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1117         btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
1118 out_unlock:
1119         clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
1120                 EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV;
1121         page_ops = PAGE_UNLOCK | PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK |
1122                 PAGE_END_WRITEBACK;
1123         /*
1124          * If we reserved an extent for our delalloc range (or a subrange) and
1125          * failed to create the respective ordered extent, then it means that
1126          * when we reserved the extent we decremented the extent's size from
1127          * the data space_info's bytes_may_use counter and incremented the
1128          * space_info's bytes_reserved counter by the same amount. We must make
1129          * sure extent_clear_unlock_delalloc() does not try to decrement again
1130          * the data space_info's bytes_may_use counter, therefore we do not pass
1131          * it the flag EXTENT_CLEAR_DATA_RESV.
1132          */
1133         if (extent_reserved) {
1134                 extent_clear_unlock_delalloc(inode, start,
1135                                              start + cur_alloc_size,
1136                                              locked_page,
1137                                              clear_bits,
1138                                              page_ops);
1139                 start += cur_alloc_size;
1140                 if (start >= end)
1141                         goto out;
1142         }
1143         extent_clear_unlock_delalloc(inode, start, end, locked_page,
1144                                      clear_bits | EXTENT_CLEAR_DATA_RESV,
1145                                      page_ops);
1146         goto out;
1147 }
1148 
1149 /*
1150  * work queue call back to started compression on a file and pages
1151  */
1152 static noinline void async_cow_start(struct btrfs_work *work)
1153 {
1154         struct async_chunk *async_chunk;
1155         int compressed_extents;
1156 
1157         async_chunk = container_of(work, struct async_chunk, work);
1158 
1159         compressed_extents = compress_file_range(async_chunk);
1160         if (compressed_extents == 0) {
1161                 btrfs_add_delayed_iput(async_chunk->inode);
1162                 async_chunk->inode = NULL;
1163         }
1164 }
1165 
1166 /*
1167  * work queue call back to submit previously compressed pages
1168  */
1169 static noinline void async_cow_submit(struct btrfs_work *work)
1170 {
1171         struct async_chunk *async_chunk = container_of(work, struct async_chunk,
1172                                                      work);
1173         struct btrfs_fs_info *fs_info = btrfs_work_owner(work);
1174         unsigned long nr_pages;
1175 
1176         nr_pages = (async_chunk->end - async_chunk->start + PAGE_SIZE) >>
1177                 PAGE_SHIFT;
1178 
1179         /* atomic_sub_return implies a barrier */
1180         if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) <
1181             5 * SZ_1M)
1182                 cond_wake_up_nomb(&fs_info->async_submit_wait);
1183 
1184         /*
1185          * ->inode could be NULL if async_chunk_start has failed to compress,
1186          * in which case we don't have anything to submit, yet we need to
1187          * always adjust ->async_delalloc_pages as its paired with the init
1188          * happening in cow_file_range_async
1189          */
1190         if (async_chunk->inode)
1191                 submit_compressed_extents(async_chunk);
1192 }
1193 
1194 static noinline void async_cow_free(struct btrfs_work *work)
1195 {
1196         struct async_chunk *async_chunk;
1197 
1198         async_chunk = container_of(work, struct async_chunk, work);
1199         if (async_chunk->inode)
1200                 btrfs_add_delayed_iput(async_chunk->inode);
1201         /*
1202          * Since the pointer to 'pending' is at the beginning of the array of
1203          * async_chunk's, freeing it ensures the whole array has been freed.
1204          */
1205         if (atomic_dec_and_test(async_chunk->pending))
1206                 kvfree(async_chunk->pending);
1207 }
1208 
1209 static int cow_file_range_async(struct inode *inode, struct page *locked_page,
1210                                 u64 start, u64 end, int *page_started,
1211                                 unsigned long *nr_written,
1212                                 unsigned int write_flags)
1213 {
1214         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1215         struct async_cow *ctx;
1216         struct async_chunk *async_chunk;
1217         unsigned long nr_pages;
1218         u64 cur_end;
1219         u64 num_chunks = DIV_ROUND_UP(end - start, SZ_512K);
1220         int i;
1221         bool should_compress;
1222         unsigned nofs_flag;
1223 
1224         unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
1225 
1226         if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS &&
1227             !btrfs_test_opt(fs_info, FORCE_COMPRESS)) {
1228                 num_chunks = 1;
1229                 should_compress = false;
1230         } else {
1231                 should_compress = true;
1232         }
1233 
1234         nofs_flag = memalloc_nofs_save();
1235         ctx = kvmalloc(struct_size(ctx, chunks, num_chunks), GFP_KERNEL);
1236         memalloc_nofs_restore(nofs_flag);
1237 
1238         if (!ctx) {
1239                 unsigned clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC |
1240                         EXTENT_DELALLOC_NEW | EXTENT_DEFRAG |
1241                         EXTENT_DO_ACCOUNTING;
1242                 unsigned long page_ops = PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
1243                         PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK |
1244                         PAGE_SET_ERROR;
1245 
1246                 extent_clear_unlock_delalloc(inode, start, end, locked_page,
1247                                              clear_bits, page_ops);
1248                 return -ENOMEM;
1249         }
1250 
1251         async_chunk = ctx->chunks;
1252         atomic_set(&ctx->num_chunks, num_chunks);
1253 
1254         for (i = 0; i < num_chunks; i++) {
1255                 if (should_compress)
1256                         cur_end = min(end, start + SZ_512K - 1);
1257                 else
1258                         cur_end = end;
1259 
1260                 /*
1261                  * igrab is called higher up in the call chain, take only the
1262                  * lightweight reference for the callback lifetime
1263                  */
1264                 ihold(inode);
1265                 async_chunk[i].pending = &ctx->num_chunks;
1266                 async_chunk[i].inode = inode;
1267                 async_chunk[i].start = start;
1268                 async_chunk[i].end = cur_end;
1269                 async_chunk[i].write_flags = write_flags;
1270                 INIT_LIST_HEAD(&async_chunk[i].extents);
1271 
1272                 /*
1273                  * The locked_page comes all the way from writepage and its
1274                  * the original page we were actually given.  As we spread
1275                  * this large delalloc region across multiple async_chunk
1276                  * structs, only the first struct needs a pointer to locked_page
1277                  *
1278                  * This way we don't need racey decisions about who is supposed
1279                  * to unlock it.
1280                  */
1281                 if (locked_page) {
1282                         async_chunk[i].locked_page = locked_page;
1283                         locked_page = NULL;
1284                 } else {
1285                         async_chunk[i].locked_page = NULL;
1286                 }
1287 
1288                 btrfs_init_work(&async_chunk[i].work, async_cow_start,
1289                                 async_cow_submit, async_cow_free);
1290 
1291                 nr_pages = DIV_ROUND_UP(cur_end - start, PAGE_SIZE);
1292                 atomic_add(nr_pages, &fs_info->async_delalloc_pages);
1293 
1294                 btrfs_queue_work(fs_info->delalloc_workers, &async_chunk[i].work);
1295 
1296                 *nr_written += nr_pages;
1297                 start = cur_end + 1;
1298         }
1299         *page_started = 1;
1300         return 0;
1301 }
1302 
1303 static noinline int csum_exist_in_range(struct btrfs_fs_info *fs_info,
1304                                         u64 bytenr, u64 num_bytes)
1305 {
1306         int ret;
1307         struct btrfs_ordered_sum *sums;
1308         LIST_HEAD(list);
1309 
1310         ret = btrfs_lookup_csums_range(fs_info->csum_root, bytenr,
1311                                        bytenr + num_bytes - 1, &list, 0);
1312         if (ret == 0 && list_empty(&list))
1313                 return 0;
1314 
1315         while (!list_empty(&list)) {
1316                 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
1317                 list_del(&sums->list);
1318                 kfree(sums);
1319         }
1320         if (ret < 0)
1321                 return ret;
1322         return 1;
1323 }
1324 
1325 /*
1326  * when nowcow writeback call back.  This checks for snapshots or COW copies
1327  * of the extents that exist in the file, and COWs the file as required.
1328  *
1329  * If no cow copies or snapshots exist, we write directly to the existing
1330  * blocks on disk
1331  */
1332 static noinline int run_delalloc_nocow(struct inode *inode,
1333                                        struct page *locked_page,
1334                                        const u64 start, const u64 end,
1335                                        int *page_started, int force,
1336                                        unsigned long *nr_written)
1337 {
1338         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1339         struct btrfs_root *root = BTRFS_I(inode)->root;
1340         struct btrfs_path *path;
1341         u64 cow_start = (u64)-1;
1342         u64 cur_offset = start;
1343         int ret;
1344         bool check_prev = true;
1345         const bool freespace_inode = btrfs_is_free_space_inode(BTRFS_I(inode));
1346         u64 ino = btrfs_ino(BTRFS_I(inode));
1347         bool nocow = false;
1348         u64 disk_bytenr = 0;
1349 
1350         path = btrfs_alloc_path();
1351         if (!path) {
1352                 extent_clear_unlock_delalloc(inode, start, end, locked_page,
1353                                              EXTENT_LOCKED | EXTENT_DELALLOC |
1354                                              EXTENT_DO_ACCOUNTING |
1355                                              EXTENT_DEFRAG, PAGE_UNLOCK |
1356                                              PAGE_CLEAR_DIRTY |
1357                                              PAGE_SET_WRITEBACK |
1358                                              PAGE_END_WRITEBACK);
1359                 return -ENOMEM;
1360         }
1361 
1362         while (1) {
1363                 struct btrfs_key found_key;
1364                 struct btrfs_file_extent_item *fi;
1365                 struct extent_buffer *leaf;
1366                 u64 extent_end;
1367                 u64 extent_offset;
1368                 u64 num_bytes = 0;
1369                 u64 disk_num_bytes;
1370                 u64 ram_bytes;
1371                 int extent_type;
1372 
1373                 nocow = false;
1374 
1375                 ret = btrfs_lookup_file_extent(NULL, root, path, ino,
1376                                                cur_offset, 0);
1377                 if (ret < 0)
1378                         goto error;
1379 
1380                 /*
1381                  * If there is no extent for our range when doing the initial
1382                  * search, then go back to the previous slot as it will be the
1383                  * one containing the search offset
1384                  */
1385                 if (ret > 0 && path->slots[0] > 0 && check_prev) {
1386                         leaf = path->nodes[0];
1387                         btrfs_item_key_to_cpu(leaf, &found_key,
1388                                               path->slots[0] - 1);
1389                         if (found_key.objectid == ino &&
1390                             found_key.type == BTRFS_EXTENT_DATA_KEY)
1391                                 path->slots[0]--;
1392                 }
1393                 check_prev = false;
1394 next_slot:
1395                 /* Go to next leaf if we have exhausted the current one */
1396                 leaf = path->nodes[0];
1397                 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1398                         ret = btrfs_next_leaf(root, path);
1399                         if (ret < 0) {
1400                                 if (cow_start != (u64)-1)
1401                                         cur_offset = cow_start;
1402                                 goto error;
1403                         }
1404                         if (ret > 0)
1405                                 break;
1406                         leaf = path->nodes[0];
1407                 }
1408 
1409                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1410 
1411                 /* Didn't find anything for our INO */
1412                 if (found_key.objectid > ino)
1413                         break;
1414                 /*
1415                  * Keep searching until we find an EXTENT_ITEM or there are no
1416                  * more extents for this inode
1417                  */
1418                 if (WARN_ON_ONCE(found_key.objectid < ino) ||
1419                     found_key.type < BTRFS_EXTENT_DATA_KEY) {
1420                         path->slots[0]++;
1421                         goto next_slot;
1422                 }
1423 
1424                 /* Found key is not EXTENT_DATA_KEY or starts after req range */
1425                 if (found_key.type > BTRFS_EXTENT_DATA_KEY ||
1426                     found_key.offset > end)
1427                         break;
1428 
1429                 /*
1430                  * If the found extent starts after requested offset, then
1431                  * adjust extent_end to be right before this extent begins
1432                  */
1433                 if (found_key.offset > cur_offset) {
1434                         extent_end = found_key.offset;
1435                         extent_type = 0;
1436                         goto out_check;
1437                 }
1438 
1439                 /*
1440                  * Found extent which begins before our range and potentially
1441                  * intersect it
1442                  */
1443                 fi = btrfs_item_ptr(leaf, path->slots[0],
1444                                     struct btrfs_file_extent_item);
1445                 extent_type = btrfs_file_extent_type(leaf, fi);
1446 
1447                 ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
1448                 if (extent_type == BTRFS_FILE_EXTENT_REG ||
1449                     extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1450                         disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1451                         extent_offset = btrfs_file_extent_offset(leaf, fi);
1452                         extent_end = found_key.offset +
1453                                 btrfs_file_extent_num_bytes(leaf, fi);
1454                         disk_num_bytes =
1455                                 btrfs_file_extent_disk_num_bytes(leaf, fi);
1456                         /*
1457                          * If the extent we got ends before our current offset,
1458                          * skip to the next extent.
1459                          */
1460                         if (extent_end <= cur_offset) {
1461                                 path->slots[0]++;
1462                                 goto next_slot;
1463                         }
1464                         /* Skip holes */
1465                         if (disk_bytenr == 0)
1466                                 goto out_check;
1467                         /* Skip compressed/encrypted/encoded extents */
1468                         if (btrfs_file_extent_compression(leaf, fi) ||
1469                             btrfs_file_extent_encryption(leaf, fi) ||
1470                             btrfs_file_extent_other_encoding(leaf, fi))
1471                                 goto out_check;
1472                         /*
1473                          * If extent is created before the last volume's snapshot
1474                          * this implies the extent is shared, hence we can't do
1475                          * nocow. This is the same check as in
1476                          * btrfs_cross_ref_exist but without calling
1477                          * btrfs_search_slot.
1478                          */
1479                         if (!freespace_inode &&
1480                             btrfs_file_extent_generation(leaf, fi) <=
1481                             btrfs_root_last_snapshot(&root->root_item))
1482                                 goto out_check;
1483                         if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
1484                                 goto out_check;
1485                         /* If extent is RO, we must COW it */
1486                         if (btrfs_extent_readonly(fs_info, disk_bytenr))
1487                                 goto out_check;
1488                         ret = btrfs_cross_ref_exist(root, ino,
1489                                                     found_key.offset -
1490                                                     extent_offset, disk_bytenr);
1491                         if (ret) {
1492                                 /*
1493                                  * ret could be -EIO if the above fails to read
1494                                  * metadata.
1495                                  */
1496                                 if (ret < 0) {
1497                                         if (cow_start != (u64)-1)
1498                                                 cur_offset = cow_start;
1499                                         goto error;
1500                                 }
1501 
1502                                 WARN_ON_ONCE(freespace_inode);
1503                                 goto out_check;
1504                         }
1505                         disk_bytenr += extent_offset;
1506                         disk_bytenr += cur_offset - found_key.offset;
1507                         num_bytes = min(end + 1, extent_end) - cur_offset;
1508                         /*
1509                          * If there are pending snapshots for this root, we
1510                          * fall into common COW way
1511                          */
1512                         if (!freespace_inode && atomic_read(&root->snapshot_force_cow))
1513                                 goto out_check;
1514                         /*
1515                          * force cow if csum exists in the range.
1516                          * this ensure that csum for a given extent are
1517                          * either valid or do not exist.
1518                          */
1519                         ret = csum_exist_in_range(fs_info, disk_bytenr,
1520                                                   num_bytes);
1521                         if (ret) {
1522                                 /*
1523                                  * ret could be -EIO if the above fails to read
1524                                  * metadata.
1525                                  */
1526                                 if (ret < 0) {
1527                                         if (cow_start != (u64)-1)
1528                                                 cur_offset = cow_start;
1529                                         goto error;
1530                                 }
1531                                 WARN_ON_ONCE(freespace_inode);
1532                                 goto out_check;
1533                         }
1534                         if (!btrfs_inc_nocow_writers(fs_info, disk_bytenr))
1535                                 goto out_check;
1536                         nocow = true;
1537                 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1538                         extent_end = found_key.offset + ram_bytes;
1539                         extent_end = ALIGN(extent_end, fs_info->sectorsize);
1540                         /* Skip extents outside of our requested range */
1541                         if (extent_end <= start) {
1542                                 path->slots[0]++;
1543                                 goto next_slot;
1544                         }
1545                 } else {
1546                         /* If this triggers then we have a memory corruption */
1547                         BUG();
1548                 }
1549 out_check:
1550                 /*
1551                  * If nocow is false then record the beginning of the range
1552                  * that needs to be COWed
1553                  */
1554                 if (!nocow) {
1555                         if (cow_start == (u64)-1)
1556                                 cow_start = cur_offset;
1557                         cur_offset = extent_end;
1558                         if (cur_offset > end)
1559                                 break;
1560                         path->slots[0]++;
1561                         goto next_slot;
1562                 }
1563 
1564                 btrfs_release_path(path);
1565 
1566                 /*
1567                  * COW range from cow_start to found_key.offset - 1. As the key
1568                  * will contain the beginning of the first extent that can be
1569                  * NOCOW, following one which needs to be COW'ed
1570                  */
1571                 if (cow_start != (u64)-1) {
1572                         ret = cow_file_range(inode, locked_page,
1573                                              cow_start, found_key.offset - 1,
1574                                              page_started, nr_written, 1);
1575                         if (ret) {
1576                                 if (nocow)
1577                                         btrfs_dec_nocow_writers(fs_info,
1578                                                                 disk_bytenr);
1579                                 goto error;
1580                         }
1581                         cow_start = (u64)-1;
1582                 }
1583 
1584                 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1585                         u64 orig_start = found_key.offset - extent_offset;
1586                         struct extent_map *em;
1587 
1588                         em = create_io_em(inode, cur_offset, num_bytes,
1589                                           orig_start,
1590                                           disk_bytenr, /* block_start */
1591                                           num_bytes, /* block_len */
1592                                           disk_num_bytes, /* orig_block_len */
1593                                           ram_bytes, BTRFS_COMPRESS_NONE,
1594                                           BTRFS_ORDERED_PREALLOC);
1595                         if (IS_ERR(em)) {
1596                                 if (nocow)
1597                                         btrfs_dec_nocow_writers(fs_info,
1598                                                                 disk_bytenr);
1599                                 ret = PTR_ERR(em);
1600                                 goto error;
1601                         }
1602                         free_extent_map(em);
1603                         ret = btrfs_add_ordered_extent(inode, cur_offset,
1604                                                        disk_bytenr, num_bytes,
1605                                                        num_bytes,
1606                                                        BTRFS_ORDERED_PREALLOC);
1607                         if (ret) {
1608                                 btrfs_drop_extent_cache(BTRFS_I(inode),
1609                                                         cur_offset,
1610                                                         cur_offset + num_bytes - 1,
1611                                                         0);
1612                                 goto error;
1613                         }
1614                 } else {
1615                         ret = btrfs_add_ordered_extent(inode, cur_offset,
1616                                                        disk_bytenr, num_bytes,
1617                                                        num_bytes,
1618                                                        BTRFS_ORDERED_NOCOW);
1619                         if (ret)
1620                                 goto error;
1621                 }
1622 
1623                 if (nocow)
1624                         btrfs_dec_nocow_writers(fs_info, disk_bytenr);
1625                 nocow = false;
1626 
1627                 if (root->root_key.objectid ==
1628                     BTRFS_DATA_RELOC_TREE_OBJECTID)
1629                         /*
1630                          * Error handled later, as we must prevent
1631                          * extent_clear_unlock_delalloc() in error handler
1632                          * from freeing metadata of created ordered extent.
1633                          */
1634                         ret = btrfs_reloc_clone_csums(inode, cur_offset,
1635                                                       num_bytes);
1636 
1637                 extent_clear_unlock_delalloc(inode, cur_offset,
1638                                              cur_offset + num_bytes - 1,
1639                                              locked_page, EXTENT_LOCKED |
1640                                              EXTENT_DELALLOC |
1641                                              EXTENT_CLEAR_DATA_RESV,
1642                                              PAGE_UNLOCK | PAGE_SET_PRIVATE2);
1643 
1644                 cur_offset = extent_end;
1645 
1646                 /*
1647                  * btrfs_reloc_clone_csums() error, now we're OK to call error
1648                  * handler, as metadata for created ordered extent will only
1649                  * be freed by btrfs_finish_ordered_io().
1650                  */
1651                 if (ret)
1652                         goto error;
1653                 if (cur_offset > end)
1654                         break;
1655         }
1656         btrfs_release_path(path);
1657 
1658         if (cur_offset <= end && cow_start == (u64)-1)
1659                 cow_start = cur_offset;
1660 
1661         if (cow_start != (u64)-1) {
1662                 cur_offset = end;
1663                 ret = cow_file_range(inode, locked_page, cow_start, end,
1664                                      page_started, nr_written, 1);
1665                 if (ret)
1666                         goto error;
1667         }
1668 
1669 error:
1670         if (nocow)
1671                 btrfs_dec_nocow_writers(fs_info, disk_bytenr);
1672 
1673         if (ret && cur_offset < end)
1674                 extent_clear_unlock_delalloc(inode, cur_offset, end,
1675                                              locked_page, EXTENT_LOCKED |
1676                                              EXTENT_DELALLOC | EXTENT_DEFRAG |
1677                                              EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
1678                                              PAGE_CLEAR_DIRTY |
1679                                              PAGE_SET_WRITEBACK |
1680                                              PAGE_END_WRITEBACK);
1681         btrfs_free_path(path);
1682         return ret;
1683 }
1684 
1685 static inline int need_force_cow(struct inode *inode, u64 start, u64 end)
1686 {
1687 
1688         if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
1689             !(BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC))
1690                 return 0;
1691 
1692         /*
1693          * @defrag_bytes is a hint value, no spinlock held here,
1694          * if is not zero, it means the file is defragging.
1695          * Force cow if given extent needs to be defragged.
1696          */
1697         if (BTRFS_I(inode)->defrag_bytes &&
1698             test_range_bit(&BTRFS_I(inode)->io_tree, start, end,
1699                            EXTENT_DEFRAG, 0, NULL))
1700                 return 1;
1701 
1702         return 0;
1703 }
1704 
1705 /*
1706  * Function to process delayed allocation (create CoW) for ranges which are
1707  * being touched for the first time.
1708  */
1709 int btrfs_run_delalloc_range(struct inode *inode, struct page *locked_page,
1710                 u64 start, u64 end, int *page_started, unsigned long *nr_written,
1711                 struct writeback_control *wbc)
1712 {
1713         int ret;
1714         int force_cow = need_force_cow(inode, start, end);
1715         unsigned int write_flags = wbc_to_write_flags(wbc);
1716 
1717         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW && !force_cow) {
1718                 ret = run_delalloc_nocow(inode, locked_page, start, end,
1719                                          page_started, 1, nr_written);
1720         } else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC && !force_cow) {
1721                 ret = run_delalloc_nocow(inode, locked_page, start, end,
1722                                          page_started, 0, nr_written);
1723         } else if (!inode_can_compress(inode) ||
1724                    !inode_need_compress(inode, start, end)) {
1725                 ret = cow_file_range(inode, locked_page, start, end,
1726                                       page_started, nr_written, 1);
1727         } else {
1728                 set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
1729                         &BTRFS_I(inode)->runtime_flags);
1730                 ret = cow_file_range_async(inode, locked_page, start, end,
1731                                            page_started, nr_written,
1732                                            write_flags);
1733         }
1734         if (ret)
1735                 btrfs_cleanup_ordered_extents(inode, locked_page, start,
1736                                               end - start + 1);
1737         return ret;
1738 }
1739 
1740 void btrfs_split_delalloc_extent(struct inode *inode,
1741                                  struct extent_state *orig, u64 split)
1742 {
1743         u64 size;
1744 
1745         /* not delalloc, ignore it */
1746         if (!(orig->state & EXTENT_DELALLOC))
1747                 return;
1748 
1749         size = orig->end - orig->start + 1;
1750         if (size > BTRFS_MAX_EXTENT_SIZE) {
1751                 u32 num_extents;
1752                 u64 new_size;
1753 
1754                 /*
1755                  * See the explanation in btrfs_merge_delalloc_extent, the same
1756                  * applies here, just in reverse.
1757                  */
1758                 new_size = orig->end - split + 1;
1759                 num_extents = count_max_extents(new_size);
1760                 new_size = split - orig->start;
1761                 num_extents += count_max_extents(new_size);
1762                 if (count_max_extents(size) >= num_extents)
1763                         return;
1764         }
1765 
1766         spin_lock(&BTRFS_I(inode)->lock);
1767         btrfs_mod_outstanding_extents(BTRFS_I(inode), 1);
1768         spin_unlock(&BTRFS_I(inode)->lock);
1769 }
1770 
1771 /*
1772  * Handle merged delayed allocation extents so we can keep track of new extents
1773  * that are just merged onto old extents, such as when we are doing sequential
1774  * writes, so we can properly account for the metadata space we'll need.
1775  */
1776 void btrfs_merge_delalloc_extent(struct inode *inode, struct extent_state *new,
1777                                  struct extent_state *other)
1778 {
1779         u64 new_size, old_size;
1780         u32 num_extents;
1781 
1782         /* not delalloc, ignore it */
1783         if (!(other->state & EXTENT_DELALLOC))
1784                 return;
1785 
1786         if (new->start > other->start)
1787                 new_size = new->end - other->start + 1;
1788         else
1789                 new_size = other->end - new->start + 1;
1790 
1791         /* we're not bigger than the max, unreserve the space and go */
1792         if (new_size <= BTRFS_MAX_EXTENT_SIZE) {
1793                 spin_lock(&BTRFS_I(inode)->lock);
1794                 btrfs_mod_outstanding_extents(BTRFS_I(inode), -1);
1795                 spin_unlock(&BTRFS_I(inode)->lock);
1796                 return;
1797         }
1798 
1799         /*
1800          * We have to add up either side to figure out how many extents were
1801          * accounted for before we merged into one big extent.  If the number of
1802          * extents we accounted for is <= the amount we need for the new range
1803          * then we can return, otherwise drop.  Think of it like this
1804          *
1805          * [ 4k][MAX_SIZE]
1806          *
1807          * So we've grown the extent by a MAX_SIZE extent, this would mean we
1808          * need 2 outstanding extents, on one side we have 1 and the other side
1809          * we have 1 so they are == and we can return.  But in this case
1810          *
1811          * [MAX_SIZE+4k][MAX_SIZE+4k]
1812          *
1813          * Each range on their own accounts for 2 extents, but merged together
1814          * they are only 3 extents worth of accounting, so we need to drop in
1815          * this case.
1816          */
1817         old_size = other->end - other->start + 1;
1818         num_extents = count_max_extents(old_size);
1819         old_size = new->end - new->start + 1;
1820         num_extents += count_max_extents(old_size);
1821         if (count_max_extents(new_size) >= num_extents)
1822                 return;
1823 
1824         spin_lock(&BTRFS_I(inode)->lock);
1825         btrfs_mod_outstanding_extents(BTRFS_I(inode), -1);
1826         spin_unlock(&BTRFS_I(inode)->lock);
1827 }
1828 
1829 static void btrfs_add_delalloc_inodes(struct btrfs_root *root,
1830                                       struct inode *inode)
1831 {
1832         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1833 
1834         spin_lock(&root->delalloc_lock);
1835         if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1836                 list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
1837                               &root->delalloc_inodes);
1838                 set_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1839                         &BTRFS_I(inode)->runtime_flags);
1840                 root->nr_delalloc_inodes++;
1841                 if (root->nr_delalloc_inodes == 1) {
1842                         spin_lock(&fs_info->delalloc_root_lock);
1843                         BUG_ON(!list_empty(&root->delalloc_root));
1844                         list_add_tail(&root->delalloc_root,
1845                                       &fs_info->delalloc_roots);
1846                         spin_unlock(&fs_info->delalloc_root_lock);
1847                 }
1848         }
1849         spin_unlock(&root->delalloc_lock);
1850 }
1851 
1852 
1853 void __btrfs_del_delalloc_inode(struct btrfs_root *root,
1854                                 struct btrfs_inode *inode)
1855 {
1856         struct btrfs_fs_info *fs_info = root->fs_info;
1857 
1858         if (!list_empty(&inode->delalloc_inodes)) {
1859                 list_del_init(&inode->delalloc_inodes);
1860                 clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1861                           &inode->runtime_flags);
1862                 root->nr_delalloc_inodes--;
1863                 if (!root->nr_delalloc_inodes) {
1864                         ASSERT(list_empty(&root->delalloc_inodes));
1865                         spin_lock(&fs_info->delalloc_root_lock);
1866                         BUG_ON(list_empty(&root->delalloc_root));
1867                         list_del_init(&root->delalloc_root);
1868                         spin_unlock(&fs_info->delalloc_root_lock);
1869                 }
1870         }
1871 }
1872 
1873 static void btrfs_del_delalloc_inode(struct btrfs_root *root,
1874                                      struct btrfs_inode *inode)
1875 {
1876         spin_lock(&root->delalloc_lock);
1877         __btrfs_del_delalloc_inode(root, inode);
1878         spin_unlock(&root->delalloc_lock);
1879 }
1880 
1881 /*
1882  * Properly track delayed allocation bytes in the inode and to maintain the
1883  * list of inodes that have pending delalloc work to be done.
1884  */
1885 void btrfs_set_delalloc_extent(struct inode *inode, struct extent_state *state,
1886                                unsigned *bits)
1887 {
1888         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1889 
1890         if ((*bits & EXTENT_DEFRAG) && !(*bits & EXTENT_DELALLOC))
1891                 WARN_ON(1);
1892         /*
1893          * set_bit and clear bit hooks normally require _irqsave/restore
1894          * but in this case, we are only testing for the DELALLOC
1895          * bit, which is only set or cleared with irqs on
1896          */
1897         if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1898                 struct btrfs_root *root = BTRFS_I(inode)->root;
1899                 u64 len = state->end + 1 - state->start;
1900                 u32 num_extents = count_max_extents(len);
1901                 bool do_list = !btrfs_is_free_space_inode(BTRFS_I(inode));
1902 
1903                 spin_lock(&BTRFS_I(inode)->lock);
1904                 btrfs_mod_outstanding_extents(BTRFS_I(inode), num_extents);
1905                 spin_unlock(&BTRFS_I(inode)->lock);
1906 
1907                 /* For sanity tests */
1908                 if (btrfs_is_testing(fs_info))
1909                         return;
1910 
1911                 percpu_counter_add_batch(&fs_info->delalloc_bytes, len,
1912                                          fs_info->delalloc_batch);
1913                 spin_lock(&BTRFS_I(inode)->lock);
1914                 BTRFS_I(inode)->delalloc_bytes += len;
1915                 if (*bits & EXTENT_DEFRAG)
1916                         BTRFS_I(inode)->defrag_bytes += len;
1917                 if (do_list && !test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1918                                          &BTRFS_I(inode)->runtime_flags))
1919                         btrfs_add_delalloc_inodes(root, inode);
1920                 spin_unlock(&BTRFS_I(inode)->lock);
1921         }
1922 
1923         if (!(state->state & EXTENT_DELALLOC_NEW) &&
1924             (*bits & EXTENT_DELALLOC_NEW)) {
1925                 spin_lock(&BTRFS_I(inode)->lock);
1926                 BTRFS_I(inode)->new_delalloc_bytes += state->end + 1 -
1927                         state->start;
1928                 spin_unlock(&BTRFS_I(inode)->lock);
1929         }
1930 }
1931 
1932 /*
1933  * Once a range is no longer delalloc this function ensures that proper
1934  * accounting happens.
1935  */
1936 void btrfs_clear_delalloc_extent(struct inode *vfs_inode,
1937                                  struct extent_state *state, unsigned *bits)
1938 {
1939         struct btrfs_inode *inode = BTRFS_I(vfs_inode);
1940         struct btrfs_fs_info *fs_info = btrfs_sb(vfs_inode->i_sb);
1941         u64 len = state->end + 1 - state->start;
1942         u32 num_extents = count_max_extents(len);
1943 
1944         if ((state->state & EXTENT_DEFRAG) && (*bits & EXTENT_DEFRAG)) {
1945                 spin_lock(&inode->lock);
1946                 inode->defrag_bytes -= len;
1947                 spin_unlock(&inode->lock);
1948         }
1949 
1950         /*
1951          * set_bit and clear bit hooks normally require _irqsave/restore
1952          * but in this case, we are only testing for the DELALLOC
1953          * bit, which is only set or cleared with irqs on
1954          */
1955         if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1956                 struct btrfs_root *root = inode->root;
1957                 bool do_list = !btrfs_is_free_space_inode(inode);
1958 
1959                 spin_lock(&inode->lock);
1960                 btrfs_mod_outstanding_extents(inode, -num_extents);
1961                 spin_unlock(&inode->lock);
1962 
1963                 /*
1964                  * We don't reserve metadata space for space cache inodes so we
1965                  * don't need to call delalloc_release_metadata if there is an
1966                  * error.
1967                  */
1968                 if (*bits & EXTENT_CLEAR_META_RESV &&
1969                     root != fs_info->tree_root)
1970                         btrfs_delalloc_release_metadata(inode, len, false);
1971 
1972                 /* For sanity tests. */
1973                 if (btrfs_is_testing(fs_info))
1974                         return;
1975 
1976                 if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID &&
1977                     do_list && !(state->state & EXTENT_NORESERVE) &&
1978                     (*bits & EXTENT_CLEAR_DATA_RESV))
1979                         btrfs_free_reserved_data_space_noquota(
1980                                         &inode->vfs_inode,
1981                                         state->start, len);
1982 
1983                 percpu_counter_add_batch(&fs_info->delalloc_bytes, -len,
1984                                          fs_info->delalloc_batch);
1985                 spin_lock(&inode->lock);
1986                 inode->delalloc_bytes -= len;
1987                 if (do_list && inode->delalloc_bytes == 0 &&
1988                     test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1989                                         &inode->runtime_flags))
1990                         btrfs_del_delalloc_inode(root, inode);
1991                 spin_unlock(&inode->lock);
1992         }
1993 
1994         if ((state->state & EXTENT_DELALLOC_NEW) &&
1995             (*bits & EXTENT_DELALLOC_NEW)) {
1996                 spin_lock(&inode->lock);
1997                 ASSERT(inode->new_delalloc_bytes >= len);
1998                 inode->new_delalloc_bytes -= len;
1999                 spin_unlock(&inode->lock);
2000         }
2001 }
2002 
2003 /*
2004  * btrfs_bio_fits_in_stripe - Checks whether the size of the given bio will fit
2005  * in a chunk's stripe. This function ensures that bios do not span a
2006  * stripe/chunk
2007  *
2008  * @page - The page we are about to add to the bio
2009  * @size - size we want to add to the bio
2010  * @bio - bio we want to ensure is smaller than a stripe
2011  * @bio_flags - flags of the bio
2012  *
2013  * return 1 if page cannot be added to the bio
2014  * return 0 if page can be added to the bio
2015  * return error otherwise
2016  */
2017 int btrfs_bio_fits_in_stripe(struct page *page, size_t size, struct bio *bio,
2018                              unsigned long bio_flags)
2019 {
2020         struct inode *inode = page->mapping->host;
2021         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2022         u64 logical = (u64)bio->bi_iter.bi_sector << 9;
2023         u64 length = 0;
2024         u64 map_length;
2025         int ret;
2026         struct btrfs_io_geometry geom;
2027 
2028         if (bio_flags & EXTENT_BIO_COMPRESSED)
2029                 return 0;
2030 
2031         length = bio->bi_iter.bi_size;
2032         map_length = length;
2033         ret = btrfs_get_io_geometry(fs_info, btrfs_op(bio), logical, map_length,
2034                                     &geom);
2035         if (ret < 0)
2036                 return ret;
2037 
2038         if (geom.len < length + size)
2039                 return 1;
2040         return 0;
2041 }
2042 
2043 /*
2044  * in order to insert checksums into the metadata in large chunks,
2045  * we wait until bio submission time.   All the pages in the bio are
2046  * checksummed and sums are attached onto the ordered extent record.
2047  *
2048  * At IO completion time the cums attached on the ordered extent record
2049  * are inserted into the btree
2050  */
2051 static blk_status_t btrfs_submit_bio_start(void *private_data, struct bio *bio,
2052                                     u64 bio_offset)
2053 {
2054         struct inode *inode = private_data;
2055         blk_status_t ret = 0;
2056 
2057         ret = btrfs_csum_one_bio(inode, bio, 0, 0);
2058         BUG_ON(ret); /* -ENOMEM */
2059         return 0;
2060 }
2061 
2062 /*
2063  * extent_io.c submission hook. This does the right thing for csum calculation
2064  * on write, or reading the csums from the tree before a read.
2065  *
2066  * Rules about async/sync submit,
2067  * a) read:                             sync submit
2068  *
2069  * b) write without checksum:           sync submit
2070  *
2071  * c) write with checksum:
2072  *    c-1) if bio is issued by fsync:   sync submit
2073  *         (sync_writers != 0)
2074  *
2075  *    c-2) if root is reloc root:       sync submit
2076  *         (only in case of buffered IO)
2077  *
2078  *    c-3) otherwise:                   async submit
2079  */
2080 static blk_status_t btrfs_submit_bio_hook(struct inode *inode, struct bio *bio,
2081                                           int mirror_num,
2082                                           unsigned long bio_flags)
2083 
2084 {
2085         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2086         struct btrfs_root *root = BTRFS_I(inode)->root;
2087         enum btrfs_wq_endio_type metadata = BTRFS_WQ_ENDIO_DATA;
2088         blk_status_t ret = 0;
2089         int skip_sum;
2090         int async = !atomic_read(&BTRFS_I(inode)->sync_writers);
2091 
2092         skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
2093 
2094         if (btrfs_is_free_space_inode(BTRFS_I(inode)))
2095                 metadata = BTRFS_WQ_ENDIO_FREE_SPACE;
2096 
2097         if (bio_op(bio) != REQ_OP_WRITE) {
2098                 ret = btrfs_bio_wq_end_io(fs_info, bio, metadata);
2099                 if (ret)
2100                         goto out;
2101 
2102                 if (bio_flags & EXTENT_BIO_COMPRESSED) {
2103                         ret = btrfs_submit_compressed_read(inode, bio,
2104                                                            mirror_num,
2105                                                            bio_flags);
2106                         goto out;
2107                 } else if (!skip_sum) {
2108                         ret = btrfs_lookup_bio_sums(inode, bio, NULL);
2109                         if (ret)
2110                                 goto out;
2111                 }
2112                 goto mapit;
2113         } else if (async && !skip_sum) {
2114                 /* csum items have already been cloned */
2115                 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
2116                         goto mapit;
2117                 /* we're doing a write, do the async checksumming */
2118                 ret = btrfs_wq_submit_bio(fs_info, bio, mirror_num, bio_flags,
2119                                           0, inode, btrfs_submit_bio_start);
2120                 goto out;
2121         } else if (!skip_sum) {
2122                 ret = btrfs_csum_one_bio(inode, bio, 0, 0);
2123                 if (ret)
2124                         goto out;
2125         }
2126 
2127 mapit:
2128         ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
2129 
2130 out:
2131         if (ret) {
2132                 bio->bi_status = ret;
2133                 bio_endio(bio);
2134         }
2135         return ret;
2136 }
2137 
2138 /*
2139  * given a list of ordered sums record them in the inode.  This happens
2140  * at IO completion time based on sums calculated at bio submission time.
2141  */
2142 static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
2143                              struct inode *inode, struct list_head *list)
2144 {
2145         struct btrfs_ordered_sum *sum;
2146         int ret;
2147 
2148         list_for_each_entry(sum, list, list) {
2149                 trans->adding_csums = true;
2150                 ret = btrfs_csum_file_blocks(trans,
2151                        BTRFS_I(inode)->root->fs_info->csum_root, sum);
2152                 trans->adding_csums = false;
2153                 if (ret)
2154                         return ret;
2155         }
2156         return 0;
2157 }
2158 
2159 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
2160                               unsigned int extra_bits,
2161                               struct extent_state **cached_state)
2162 {
2163         WARN_ON(PAGE_ALIGNED(end));
2164         return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
2165                                    extra_bits, cached_state);
2166 }
2167 
2168 /* see btrfs_writepage_start_hook for details on why this is required */
2169 struct btrfs_writepage_fixup {
2170         struct page *page;
2171         struct inode *inode;
2172         struct btrfs_work work;
2173 };
2174 
2175 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
2176 {
2177         struct btrfs_writepage_fixup *fixup;
2178         struct btrfs_ordered_extent *ordered;
2179         struct extent_state *cached_state = NULL;
2180         struct extent_changeset *data_reserved = NULL;
2181         struct page *page;
2182         struct inode *inode;
2183         u64 page_start;
2184         u64 page_end;
2185         int ret = 0;
2186         bool free_delalloc_space = true;
2187 
2188         fixup = container_of(work, struct btrfs_writepage_fixup, work);
2189         page = fixup->page;
2190         inode = fixup->inode;
2191         page_start = page_offset(page);
2192         page_end = page_offset(page) + PAGE_SIZE - 1;
2193 
2194         /*
2195          * This is similar to page_mkwrite, we need to reserve the space before
2196          * we take the page lock.
2197          */
2198         ret = btrfs_delalloc_reserve_space(inode, &data_reserved, page_start,
2199                                            PAGE_SIZE);
2200 again:
2201         lock_page(page);
2202 
2203         /*
2204          * Before we queued this fixup, we took a reference on the page.
2205          * page->mapping may go NULL, but it shouldn't be moved to a different
2206          * address space.
2207          */
2208         if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
2209                 /*
2210                  * Unfortunately this is a little tricky, either
2211                  *
2212                  * 1) We got here and our page had already been dealt with and
2213                  *    we reserved our space, thus ret == 0, so we need to just
2214                  *    drop our space reservation and bail.  This can happen the
2215                  *    first time we come into the fixup worker, or could happen
2216                  *    while waiting for the ordered extent.
2217                  * 2) Our page was already dealt with, but we happened to get an
2218                  *    ENOSPC above from the btrfs_delalloc_reserve_space.  In
2219                  *    this case we obviously don't have anything to release, but
2220                  *    because the page was already dealt with we don't want to
2221                  *    mark the page with an error, so make sure we're resetting
2222                  *    ret to 0.  This is why we have this check _before_ the ret
2223                  *    check, because we do not want to have a surprise ENOSPC
2224                  *    when the page was already properly dealt with.
2225                  */
2226                 if (!ret) {
2227                         btrfs_delalloc_release_extents(BTRFS_I(inode),
2228                                                        PAGE_SIZE);
2229                         btrfs_delalloc_release_space(inode, data_reserved,
2230                                                      page_start, PAGE_SIZE,
2231                                                      true);
2232                 }
2233                 ret = 0;
2234                 goto out_page;
2235         }
2236 
2237         /*
2238          * We can't mess with the page state unless it is locked, so now that
2239          * it is locked bail if we failed to make our space reservation.
2240          */
2241         if (ret)
2242                 goto out_page;
2243 
2244         lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end,
2245                          &cached_state);
2246 
2247         /* already ordered? We're done */
2248         if (PagePrivate2(page))
2249                 goto out_reserved;
2250 
2251         ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start,
2252                                         PAGE_SIZE);
2253         if (ordered) {
2254                 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
2255                                      page_end, &cached_state);
2256                 unlock_page(page);
2257                 btrfs_start_ordered_extent(inode, ordered, 1);
2258                 btrfs_put_ordered_extent(ordered);
2259                 goto again;
2260         }
2261 
2262         ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0,
2263                                         &cached_state);
2264         if (ret)
2265                 goto out_reserved;
2266 
2267         /*
2268          * Everything went as planned, we're now the owner of a dirty page with
2269          * delayed allocation bits set and space reserved for our COW
2270          * destination.
2271          *
2272          * The page was dirty when we started, nothing should have cleaned it.
2273          */
2274         BUG_ON(!PageDirty(page));
2275         free_delalloc_space = false;
2276 out_reserved:
2277         btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
2278         if (free_delalloc_space)
2279                 btrfs_delalloc_release_space(inode, data_reserved, page_start,
2280                                              PAGE_SIZE, true);
2281         unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
2282                              &cached_state);
2283 out_page:
2284         if (ret) {
2285                 /*
2286                  * We hit ENOSPC or other errors.  Update the mapping and page
2287                  * to reflect the errors and clean the page.
2288                  */
2289                 mapping_set_error(page->mapping, ret);
2290                 end_extent_writepage(page, ret, page_start, page_end);
2291                 clear_page_dirty_for_io(page);
2292                 SetPageError(page);
2293         }
2294         ClearPageChecked(page);
2295         unlock_page(page);
2296         put_page(page);
2297         kfree(fixup);
2298         extent_changeset_free(data_reserved);
2299         /*
2300          * As a precaution, do a delayed iput in case it would be the last iput
2301          * that could need flushing space. Recursing back to fixup worker would
2302          * deadlock.
2303          */
2304         btrfs_add_delayed_iput(inode);
2305 }
2306 
2307 /*
2308  * There are a few paths in the higher layers of the kernel that directly
2309  * set the page dirty bit without asking the filesystem if it is a
2310  * good idea.  This causes problems because we want to make sure COW
2311  * properly happens and the data=ordered rules are followed.
2312  *
2313  * In our case any range that doesn't have the ORDERED bit set
2314  * hasn't been properly setup for IO.  We kick off an async process
2315  * to fix it up.  The async helper will wait for ordered extents, set
2316  * the delalloc bit and make it safe to write the page.
2317  */
2318 int btrfs_writepage_cow_fixup(struct page *page, u64 start, u64 end)
2319 {
2320         struct inode *inode = page->mapping->host;
2321         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2322         struct btrfs_writepage_fixup *fixup;
2323 
2324         /* this page is properly in the ordered list */
2325         if (TestClearPagePrivate2(page))
2326                 return 0;
2327 
2328         /*
2329          * PageChecked is set below when we create a fixup worker for this page,
2330          * don't try to create another one if we're already PageChecked()
2331          *
2332          * The extent_io writepage code will redirty the page if we send back
2333          * EAGAIN.
2334          */
2335         if (PageChecked(page))
2336                 return -EAGAIN;
2337 
2338         fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
2339         if (!fixup)
2340                 return -EAGAIN;
2341 
2342         /*
2343          * We are already holding a reference to this inode from
2344          * write_cache_pages.  We need to hold it because the space reservation
2345          * takes place outside of the page lock, and we can't trust
2346          * page->mapping outside of the page lock.
2347          */
2348         ihold(inode);
2349         SetPageChecked(page);
2350         get_page(page);
2351         btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL, NULL);
2352         fixup->page = page;
2353         fixup->inode = inode;
2354         btrfs_queue_work(fs_info->fixup_workers, &fixup->work);
2355 
2356         return -EAGAIN;
2357 }
2358 
2359 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
2360                                        struct inode *inode, u64 file_pos,
2361                                        u64 disk_bytenr, u64 disk_num_bytes,
2362                                        u64 num_bytes, u64 ram_bytes,
2363                                        u8 compression, u8 encryption,
2364                                        u16 other_encoding, int extent_type)
2365 {
2366         struct btrfs_root *root = BTRFS_I(inode)->root;
2367         struct btrfs_file_extent_item *fi;
2368         struct btrfs_path *path;
2369         struct extent_buffer *leaf;
2370         struct btrfs_key ins;
2371         u64 qg_released;
2372         int extent_inserted = 0;
2373         int ret;
2374 
2375         path = btrfs_alloc_path();
2376         if (!path)
2377                 return -ENOMEM;
2378 
2379         /*
2380          * we may be replacing one extent in the tree with another.
2381          * The new extent is pinned in the extent map, and we don't want
2382          * to drop it from the cache until it is completely in the btree.
2383          *
2384          * So, tell btrfs_drop_extents to leave this extent in the cache.
2385          * the caller is expected to unpin it and allow it to be merged
2386          * with the others.
2387          */
2388         ret = __btrfs_drop_extents(trans, root, inode, path, file_pos,
2389                                    file_pos + num_bytes, NULL, 0,
2390                                    1, sizeof(*fi), &extent_inserted);
2391         if (ret)
2392                 goto out;
2393 
2394         if (!extent_inserted) {
2395                 ins.objectid = btrfs_ino(BTRFS_I(inode));
2396                 ins.offset = file_pos;
2397                 ins.type = BTRFS_EXTENT_DATA_KEY;
2398 
2399                 path->leave_spinning = 1;
2400                 ret = btrfs_insert_empty_item(trans, root, path, &ins,
2401                                               sizeof(*fi));
2402                 if (ret)
2403                         goto out;
2404         }
2405         leaf = path->nodes[0];
2406         fi = btrfs_item_ptr(leaf, path->slots[0],
2407                             struct btrfs_file_extent_item);
2408         btrfs_set_file_extent_generation(leaf, fi, trans->transid);
2409         btrfs_set_file_extent_type(leaf, fi, extent_type);
2410         btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
2411         btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
2412         btrfs_set_file_extent_offset(leaf, fi, 0);
2413         btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2414         btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
2415         btrfs_set_file_extent_compression(leaf, fi, compression);
2416         btrfs_set_file_extent_encryption(leaf, fi, encryption);
2417         btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
2418 
2419         btrfs_mark_buffer_dirty(leaf);
2420         btrfs_release_path(path);
2421 
2422         inode_add_bytes(inode, num_bytes);
2423 
2424         ins.objectid = disk_bytenr;
2425         ins.offset = disk_num_bytes;
2426         ins.type = BTRFS_EXTENT_ITEM_KEY;
2427 
2428         /*
2429          * Release the reserved range from inode dirty range map, as it is
2430          * already moved into delayed_ref_head
2431          */
2432         ret = btrfs_qgroup_release_data(inode, file_pos, ram_bytes);
2433         if (ret < 0)
2434                 goto out;
2435         qg_released = ret;
2436         ret = btrfs_alloc_reserved_file_extent(trans, root,
2437                                                btrfs_ino(BTRFS_I(inode)),
2438                                                file_pos, qg_released, &ins);
2439 out:
2440         btrfs_free_path(path);
2441 
2442         return ret;
2443 }
2444 
2445 /* snapshot-aware defrag */
2446 struct sa_defrag_extent_backref {
2447         struct rb_node node;
2448         struct old_sa_defrag_extent *old;
2449         u64 root_id;
2450         u64 inum;
2451         u64 file_pos;
2452         u64 extent_offset;
2453         u64 num_bytes;
2454         u64 generation;
2455 };
2456 
2457 struct old_sa_defrag_extent {
2458         struct list_head list;
2459         struct new_sa_defrag_extent *new;
2460 
2461         u64 extent_offset;
2462         u64 bytenr;
2463         u64 offset;
2464         u64 len;
2465         int count;
2466 };
2467 
2468 struct new_sa_defrag_extent {
2469         struct rb_root root;
2470         struct list_head head;
2471         struct btrfs_path *path;
2472         struct inode *inode;
2473         u64 file_pos;
2474         u64 len;
2475         u64 bytenr;
2476         u64 disk_len;
2477         u8 compress_type;
2478 };
2479 
2480 static int backref_comp(struct sa_defrag_extent_backref *b1,
2481                         struct sa_defrag_extent_backref *b2)
2482 {
2483         if (b1->root_id < b2->root_id)
2484                 return -1;
2485         else if (b1->root_id > b2->root_id)
2486                 return 1;
2487 
2488         if (b1->inum < b2->inum)
2489                 return -1;
2490         else if (b1->inum > b2->inum)
2491                 return 1;
2492 
2493         if (b1->file_pos < b2->file_pos)
2494                 return -1;
2495         else if (b1->file_pos > b2->file_pos)
2496                 return 1;
2497 
2498         /*
2499          * [------------------------------] ===> (a range of space)
2500          *     |<--->|   |<---->| =============> (fs/file tree A)
2501          * |<---------------------------->| ===> (fs/file tree B)
2502          *
2503          * A range of space can refer to two file extents in one tree while
2504          * refer to only one file extent in another tree.
2505          *
2506          * So we may process a disk offset more than one time(two extents in A)
2507          * and locate at the same extent(one extent in B), then insert two same
2508          * backrefs(both refer to the extent in B).
2509          */
2510         return 0;
2511 }
2512 
2513 static void backref_insert(struct rb_root *root,
2514                            struct sa_defrag_extent_backref *backref)
2515 {
2516         struct rb_node **p = &root->rb_node;
2517         struct rb_node *parent = NULL;
2518         struct sa_defrag_extent_backref *entry;
2519         int ret;
2520 
2521         while (*p) {
2522                 parent = *p;
2523                 entry = rb_entry(parent, struct sa_defrag_extent_backref, node);
2524 
2525                 ret = backref_comp(backref, entry);
2526                 if (ret < 0)
2527                         p = &(*p)->rb_left;
2528                 else
2529                         p = &(*p)->rb_right;
2530         }
2531 
2532         rb_link_node(&backref->node, parent, p);
2533         rb_insert_color(&backref->node, root);
2534 }
2535 
2536 /*
2537  * Note the backref might has changed, and in this case we just return 0.
2538  */
2539 static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id,
2540                                        void *ctx)
2541 {
2542         struct btrfs_file_extent_item *extent;
2543         struct old_sa_defrag_extent *old = ctx;
2544         struct new_sa_defrag_extent *new = old->new;
2545         struct btrfs_path *path = new->path;
2546         struct btrfs_key key;
2547         struct btrfs_root *root;
2548         struct sa_defrag_extent_backref *backref;
2549         struct extent_buffer *leaf;
2550         struct inode *inode = new->inode;
2551         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2552         int slot;
2553         int ret;
2554         u64 extent_offset;
2555         u64 num_bytes;
2556 
2557         if (BTRFS_I(inode)->root->root_key.objectid == root_id &&
2558             inum == btrfs_ino(BTRFS_I(inode)))
2559                 return 0;
2560 
2561         key.objectid = root_id;
2562         key.type = BTRFS_ROOT_ITEM_KEY;
2563         key.offset = (u64)-1;
2564 
2565         root = btrfs_read_fs_root_no_name(fs_info, &key);
2566         if (IS_ERR(root)) {
2567                 if (PTR_ERR(root) == -ENOENT)
2568                         return 0;
2569                 WARN_ON(1);
2570                 btrfs_debug(fs_info, "inum=%llu, offset=%llu, root_id=%llu",
2571                          inum, offset, root_id);
2572                 return PTR_ERR(root);
2573         }
2574 
2575         key.objectid = inum;
2576         key.type = BTRFS_EXTENT_DATA_KEY;
2577         if (offset > (u64)-1 << 32)
2578                 key.offset = 0;
2579         else
2580                 key.offset = offset;
2581 
2582         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2583         if (WARN_ON(ret < 0))
2584                 return ret;
2585         ret = 0;
2586 
2587         while (1) {
2588                 cond_resched();
2589 
2590                 leaf = path->nodes[0];
2591                 slot = path->slots[0];
2592 
2593                 if (slot >= btrfs_header_nritems(leaf)) {
2594                         ret = btrfs_next_leaf(root, path);
2595                         if (ret < 0) {
2596                                 goto out;
2597                         } else if (ret > 0) {
2598                                 ret = 0;
2599                                 goto out;
2600                         }
2601                         continue;
2602                 }
2603 
2604                 path->slots[0]++;
2605 
2606                 btrfs_item_key_to_cpu(leaf, &key, slot);
2607 
2608                 if (key.objectid > inum)
2609                         goto out;
2610 
2611                 if (key.objectid < inum || key.type != BTRFS_EXTENT_DATA_KEY)
2612                         continue;
2613 
2614                 extent = btrfs_item_ptr(leaf, slot,
2615                                         struct btrfs_file_extent_item);
2616 
2617                 if (btrfs_file_extent_disk_bytenr(leaf, extent) != old->bytenr)
2618                         continue;
2619 
2620                 /*
2621                  * 'offset' refers to the exact key.offset,
2622                  * NOT the 'offset' field in btrfs_extent_data_ref, ie.
2623                  * (key.offset - extent_offset).
2624                  */
2625                 if (key.offset != offset)
2626                         continue;
2627 
2628                 extent_offset = btrfs_file_extent_offset(leaf, extent);
2629                 num_bytes = btrfs_file_extent_num_bytes(leaf, extent);
2630 
2631                 if (extent_offset >= old->extent_offset + old->offset +
2632                     old->len || extent_offset + num_bytes <=
2633                     old->extent_offset + old->offset)
2634                         continue;
2635                 break;
2636         }
2637 
2638         backref = kmalloc(sizeof(*backref), GFP_NOFS);
2639         if (!backref) {
2640                 ret = -ENOENT;
2641                 goto out;
2642         }
2643 
2644         backref->root_id = root_id;
2645         backref->inum = inum;
2646         backref->file_pos = offset;
2647         backref->num_bytes = num_bytes;
2648         backref->extent_offset = extent_offset;
2649         backref->generation = btrfs_file_extent_generation(leaf, extent);
2650         backref->old = old;
2651         backref_insert(&new->root, backref);
2652         old->count++;
2653 out:
2654         btrfs_release_path(path);
2655         WARN_ON(ret);
2656         return ret;
2657 }
2658 
2659 static noinline bool record_extent_backrefs(struct btrfs_path *path,
2660                                    struct new_sa_defrag_extent *new)
2661 {
2662         struct btrfs_fs_info *fs_info = btrfs_sb(new->inode->i_sb);
2663         struct old_sa_defrag_extent *old, *tmp;
2664         int ret;
2665 
2666         new->path = path;
2667 
2668         list_for_each_entry_safe(old, tmp, &new->head, list) {
2669                 ret = iterate_inodes_from_logical(old->bytenr +
2670                                                   old->extent_offset, fs_info,
2671                                                   path, record_one_backref,
2672                                                   old, false);
2673                 if (ret < 0 && ret != -ENOENT)
2674                         return false;
2675 
2676                 /* no backref to be processed for this extent */
2677                 if (!old->count) {
2678                         list_del(&old->list);
2679                         kfree(old);
2680                 }
2681         }
2682 
2683         if (list_empty(&new->head))
2684                 return false;
2685 
2686         return true;
2687 }
2688 
2689 static int relink_is_mergable(struct extent_buffer *leaf,
2690                               struct btrfs_file_extent_item *fi,
2691                               struct new_sa_defrag_extent *new)
2692 {
2693         if (btrfs_file_extent_disk_bytenr(leaf, fi) != new->bytenr)
2694                 return 0;
2695 
2696         if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
2697                 return 0;
2698 
2699         if (btrfs_file_extent_compression(leaf, fi) != new->compress_type)
2700                 return 0;
2701 
2702         if (btrfs_file_extent_encryption(leaf, fi) ||
2703             btrfs_file_extent_other_encoding(leaf, fi))
2704                 return 0;
2705 
2706         return 1;
2707 }
2708 
2709 /*
2710  * Note the backref might has changed, and in this case we just return 0.
2711  */
2712 static noinline int relink_extent_backref(struct btrfs_path *path,
2713                                  struct sa_defrag_extent_backref *prev,
2714                                  struct sa_defrag_extent_backref *backref)
2715 {
2716         struct btrfs_file_extent_item *extent;
2717         struct btrfs_file_extent_item *item;
2718         struct btrfs_ordered_extent *ordered;
2719         struct btrfs_trans_handle *trans;
2720         struct btrfs_ref ref = { 0 };
2721         struct btrfs_root *root;
2722         struct btrfs_key key;
2723         struct extent_buffer *leaf;
2724         struct old_sa_defrag_extent *old = backref->old;
2725         struct new_sa_defrag_extent *new = old->new;
2726         struct btrfs_fs_info *fs_info = btrfs_sb(new->inode->i_sb);
2727         struct inode *inode;
2728         struct extent_state *cached = NULL;
2729         int ret = 0;
2730         u64 start;
2731         u64 len;
2732         u64 lock_start;
2733         u64 lock_end;
2734         bool merge = false;
2735         int index;
2736 
2737         if (prev && prev->root_id == backref->root_id &&
2738             prev->inum == backref->inum &&
2739             prev->file_pos + prev->num_bytes == backref->file_pos)
2740                 merge = true;
2741 
2742         /* step 1: get root */
2743         key.objectid = backref->root_id;
2744         key.type = BTRFS_ROOT_ITEM_KEY;
2745         key.offset = (u64)-1;
2746 
2747         index = srcu_read_lock(&fs_info->subvol_srcu);
2748 
2749         root = btrfs_read_fs_root_no_name(fs_info, &key);
2750         if (IS_ERR(root)) {
2751                 srcu_read_unlock(&fs_info->subvol_srcu, index);
2752                 if (PTR_ERR(root) == -ENOENT)
2753                         return 0;
2754                 return PTR_ERR(root);
2755         }
2756 
2757         if (btrfs_root_readonly(root)) {
2758                 srcu_read_unlock(&fs_info->subvol_srcu, index);
2759                 return 0;
2760         }
2761 
2762         /* step 2: get inode */
2763         key.objectid = backref->inum;
2764         key.type = BTRFS_INODE_ITEM_KEY;
2765         key.offset = 0;
2766 
2767         inode = btrfs_iget(fs_info->sb, &key, root, NULL);
2768         if (IS_ERR(inode)) {
2769                 srcu_read_unlock(&fs_info->subvol_srcu, index);
2770                 return 0;
2771         }
2772 
2773         srcu_read_unlock(&fs_info->subvol_srcu, index);
2774 
2775         /* step 3: relink backref */
2776         lock_start = backref->file_pos;
2777         lock_end = backref->file_pos + backref->num_bytes - 1;
2778         lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
2779                          &cached);
2780 
2781         ordered = btrfs_lookup_first_ordered_extent(inode, lock_end);
2782         if (ordered) {
2783                 btrfs_put_ordered_extent(ordered);
2784                 goto out_unlock;
2785         }
2786 
2787         trans = btrfs_join_transaction(root);
2788         if (IS_ERR(trans)) {
2789                 ret = PTR_ERR(trans);
2790                 goto out_unlock;
2791         }
2792 
2793         key.objectid = backref->inum;
2794         key.type = BTRFS_EXTENT_DATA_KEY;
2795         key.offset = backref->file_pos;
2796 
2797         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2798         if (ret < 0) {
2799                 goto out_free_path;
2800         } else if (ret > 0) {
2801                 ret = 0;
2802                 goto out_free_path;
2803         }
2804 
2805         extent = btrfs_item_ptr(path->nodes[0], path->slots[0],
2806                                 struct btrfs_file_extent_item);
2807 
2808         if (btrfs_file_extent_generation(path->nodes[0], extent) !=
2809             backref->generation)
2810                 goto out_free_path;
2811 
2812         btrfs_release_path(path);
2813 
2814         start = backref->file_pos;
2815         if (backref->extent_offset < old->extent_offset + old->offset)
2816                 start += old->extent_offset + old->offset -
2817                          backref->extent_offset;
2818 
2819         len = min(backref->extent_offset + backref->num_bytes,
2820                   old->extent_offset + old->offset + old->len);
2821         len -= max(backref->extent_offset, old->extent_offset + old->offset);
2822 
2823         ret = btrfs_drop_extents(trans, root, inode, start,
2824                                  start + len, 1);
2825         if (ret)
2826                 goto out_free_path;
2827 again:
2828         key.objectid = btrfs_ino(BTRFS_I(inode));
2829         key.type = BTRFS_EXTENT_DATA_KEY;
2830         key.offset = start;
2831 
2832         path->leave_spinning = 1;
2833         if (merge) {
2834                 struct btrfs_file_extent_item *fi;
2835                 u64 extent_len;
2836                 struct btrfs_key found_key;
2837 
2838                 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2839                 if (ret < 0)
2840                         goto out_free_path;
2841 
2842                 path->slots[0]--;
2843                 leaf = path->nodes[0];
2844                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2845 
2846                 fi = btrfs_item_ptr(leaf, path->slots[0],
2847                                     struct btrfs_file_extent_item);
2848                 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
2849 
2850                 if (extent_len + found_key.offset == start &&
2851                     relink_is_mergable(leaf, fi, new)) {
2852                         btrfs_set_file_extent_num_bytes(leaf, fi,
2853                                                         extent_len + len);
2854                         btrfs_mark_buffer_dirty(leaf);
2855                         inode_add_bytes(inode, len);
2856 
2857                         ret = 1;
2858                         goto out_free_path;
2859                 } else {
2860                         merge = false;
2861                         btrfs_release_path(path);
2862                         goto again;
2863                 }
2864         }
2865 
2866         ret = btrfs_insert_empty_item(trans, root, path, &key,
2867                                         sizeof(*extent));
2868         if (ret) {
2869                 btrfs_abort_transaction(trans, ret);
2870                 goto out_free_path;
2871         }
2872 
2873         leaf = path->nodes[0];
2874         item = btrfs_item_ptr(leaf, path->slots[0],
2875                                 struct btrfs_file_extent_item);
2876         btrfs_set_file_extent_disk_bytenr(leaf, item, new->bytenr);
2877         btrfs_set_file_extent_disk_num_bytes(leaf, item, new->disk_len);
2878         btrfs_set_file_extent_offset(leaf, item, start - new->file_pos);
2879         btrfs_set_file_extent_num_bytes(leaf, item, len);
2880         btrfs_set_file_extent_ram_bytes(leaf, item, new->len);
2881         btrfs_set_file_extent_generation(leaf, item, trans->transid);
2882         btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG);
2883         btrfs_set_file_extent_compression(leaf, item, new->compress_type);
2884         btrfs_set_file_extent_encryption(leaf, item, 0);
2885         btrfs_set_file_extent_other_encoding(leaf, item, 0);
2886 
2887         btrfs_mark_buffer_dirty(leaf);
2888         inode_add_bytes(inode, len);
2889         btrfs_release_path(path);
2890 
2891         btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, new->bytenr,
2892                                new->disk_len, 0);
2893         btrfs_init_data_ref(&ref, backref->root_id, backref->inum,
2894                             new->file_pos);  /* start - extent_offset */
2895         ret = btrfs_inc_extent_ref(trans, &ref);
2896         if (ret) {
2897                 btrfs_abort_transaction(trans, ret);
2898                 goto out_free_path;
2899         }
2900 
2901         ret = 1;
2902 out_free_path:
2903         btrfs_release_path(path);
2904         path->leave_spinning = 0;
2905         btrfs_end_transaction(trans);
2906 out_unlock:
2907         unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
2908                              &cached);
2909         iput(inode);
2910         return ret;
2911 }
2912 
2913 static void free_sa_defrag_extent(struct new_sa_defrag_extent *new)
2914 {
2915         struct old_sa_defrag_extent *old, *tmp;
2916 
2917         if (!new)
2918                 return;
2919 
2920         list_for_each_entry_safe(old, tmp, &new->head, list) {
2921                 kfree(old);
2922         }
2923         kfree(new);
2924 }
2925 
2926 static void relink_file_extents(struct new_sa_defrag_extent *new)
2927 {
2928         struct btrfs_fs_info *fs_info = btrfs_sb(new->inode->i_sb);
2929         struct btrfs_path *path;
2930         struct sa_defrag_extent_backref *backref;
2931         struct sa_defrag_extent_backref *prev = NULL;
2932         struct rb_node *node;
2933         int ret;
2934 
2935         path = btrfs_alloc_path();
2936         if (!path)
2937                 return;
2938 
2939         if (!record_extent_backrefs(path, new)) {
2940                 btrfs_free_path(path);
2941                 goto out;
2942         }
2943         btrfs_release_path(path);
2944 
2945         while (1) {
2946                 node = rb_first(&new->root);
2947                 if (!node)
2948                         break;
2949                 rb_erase(node, &new->root);
2950 
2951                 backref = rb_entry(node, struct sa_defrag_extent_backref, node);
2952 
2953                 ret = relink_extent_backref(path, prev, backref);
2954                 WARN_ON(ret < 0);
2955 
2956                 kfree(prev);
2957 
2958                 if (ret == 1)
2959                         prev = backref;
2960                 else
2961                         prev = NULL;
2962                 cond_resched();
2963         }
2964         kfree(prev);
2965 
2966         btrfs_free_path(path);
2967 out:
2968         free_sa_defrag_extent(new);
2969 
2970         atomic_dec(&fs_info->defrag_running);
2971         wake_up(&fs_info->transaction_wait);
2972 }
2973 
2974 static struct new_sa_defrag_extent *
2975 record_old_file_extents(struct inode *inode,
2976                         struct btrfs_ordered_extent *ordered)
2977 {
2978         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2979         struct btrfs_root *root = BTRFS_I(inode)->root;
2980         struct btrfs_path *path;
2981         struct btrfs_key key;
2982         struct old_sa_defrag_extent *old;
2983         struct new_sa_defrag_extent *new;
2984         int ret;
2985 
2986         new = kmalloc(sizeof(*new), GFP_NOFS);
2987         if (!new)
2988                 return NULL;
2989 
2990         new->inode = inode;
2991         new->file_pos = ordered->file_offset;
2992         new->len = ordered->len;
2993         new->bytenr = ordered->start;
2994         new->disk_len = ordered->disk_len;
2995         new->compress_type = ordered->compress_type;
2996         new->root = RB_ROOT;
2997         INIT_LIST_HEAD(&new->head);
2998 
2999         path = btrfs_alloc_path();
3000         if (!path)
3001                 goto out_kfree;
3002 
3003         key.objectid = btrfs_ino(BTRFS_I(inode));
3004         key.type = BTRFS_EXTENT_DATA_KEY;
3005         key.offset = new->file_pos;
3006 
3007         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3008         if (ret < 0)
3009                 goto out_free_path;
3010         if (ret > 0 && path->slots[0] > 0)
3011                 path->slots[0]--;
3012 
3013         /* find out all the old extents for the file range */
3014         while (1) {
3015                 struct btrfs_file_extent_item *extent;
3016                 struct extent_buffer *l;
3017                 int slot;
3018                 u64 num_bytes;
3019                 u64 offset;
3020                 u64 end;
3021                 u64 disk_bytenr;
3022                 u64 extent_offset;
3023 
3024                 l = path->nodes[0];
3025                 slot = path->slots[0];
3026 
3027                 if (slot >= btrfs_header_nritems(l)) {
3028                         ret = btrfs_next_leaf(root, path);
3029                         if (ret < 0)
3030                                 goto out_free_path;
3031                         else if (ret > 0)
3032                                 break;
3033                         continue;
3034                 }
3035 
3036                 btrfs_item_key_to_cpu(l, &key, slot);
3037 
3038                 if (key.objectid != btrfs_ino(BTRFS_I(inode)))
3039                         break;
3040                 if (key.type != BTRFS_EXTENT_DATA_KEY)
3041                         break;
3042                 if (key.offset >= new->file_pos + new->len)
3043                         break;
3044 
3045                 extent = btrfs_item_ptr(l, slot, struct btrfs_file_extent_item);
3046 
3047                 num_bytes = btrfs_file_extent_num_bytes(l, extent);
3048                 if (key.offset + num_bytes < new->file_pos)
3049                         goto next;
3050 
3051                 disk_bytenr = btrfs_file_extent_disk_bytenr(l, extent);
3052                 if (!disk_bytenr)
3053                         goto next;
3054 
3055                 extent_offset = btrfs_file_extent_offset(l, extent);
3056 
3057                 old = kmalloc(sizeof(*old), GFP_NOFS);
3058                 if (!old)
3059                         goto out_free_path;
3060 
3061                 offset = max(new->file_pos, key.offset);
3062                 end = min(new->file_pos + new->len, key.offset + num_bytes);
3063 
3064                 old->bytenr = disk_bytenr;
3065                 old->extent_offset = extent_offset;
3066                 old->offset = offset - key.offset;
3067                 old->len = end - offset;
3068                 old->new = new;
3069                 old->count = 0;
3070                 list_add_tail(&old->list, &new->head);
3071 next:
3072                 path->slots[0]++;
3073                 cond_resched();
3074         }
3075 
3076         btrfs_free_path(path);
3077         atomic_inc(&fs_info->defrag_running);
3078 
3079         return new;
3080 
3081 out_free_path:
3082         btrfs_free_path(path);
3083 out_kfree:
3084         free_sa_defrag_extent(new);
3085         return NULL;
3086 }
3087 
3088 static void btrfs_release_delalloc_bytes(struct btrfs_fs_info *fs_info,
3089                                          u64 start, u64 len)
3090 {
3091         struct btrfs_block_group_cache *cache;
3092 
3093         cache = btrfs_lookup_block_group(fs_info, start);
3094         ASSERT(cache);
3095 
3096         spin_lock(&cache->lock);
3097         cache->delalloc_bytes -= len;
3098         spin_unlock(&cache->lock);
3099 
3100         btrfs_put_block_group(cache);
3101 }
3102 
3103 /* as ordered data IO finishes, this gets called so we can finish
3104  * an ordered extent if the range of bytes in the file it covers are
3105  * fully written.
3106  */
3107 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
3108 {
3109         struct inode *inode = ordered_extent->inode;
3110         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3111         struct btrfs_root *root = BTRFS_I(inode)->root;
3112         struct btrfs_trans_handle *trans = NULL;
3113         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3114         struct extent_state *cached_state = NULL;
3115         struct new_sa_defrag_extent *new = NULL;
3116         int compress_type = 0;
3117         int ret = 0;
3118         u64 logical_len = ordered_extent->len;
3119         bool nolock;
3120         bool truncated = false;
3121         bool range_locked = false;
3122         bool clear_new_delalloc_bytes = false;
3123         bool clear_reserved_extent = true;
3124 
3125         if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
3126             !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags) &&
3127             !test_bit(BTRFS_ORDERED_DIRECT, &ordered_extent->flags))
3128                 clear_new_delalloc_bytes = true;
3129 
3130         nolock = btrfs_is_free_space_inode(BTRFS_I(inode));
3131 
3132         if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) {
3133                 ret = -EIO;
3134                 goto out;
3135         }
3136 
3137         btrfs_free_io_failure_record(BTRFS_I(inode),
3138                         ordered_extent->file_offset,
3139                         ordered_extent->file_offset +
3140                         ordered_extent->len - 1);
3141 
3142         if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) {
3143                 truncated = true;
3144                 logical_len = ordered_extent->truncated_len;
3145                 /* Truncated the entire extent, don't bother adding */
3146                 if (!logical_len)
3147                         goto out;
3148         }
3149 
3150         if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
3151                 BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */
3152 
3153                 /*
3154                  * For mwrite(mmap + memset to write) case, we still reserve
3155                  * space for NOCOW range.
3156                  * As NOCOW won't cause a new delayed ref, just free the space
3157                  */
3158                 btrfs_qgroup_free_data(inode, NULL, ordered_extent->file_offset,
3159                                        ordered_extent->len);
3160                 btrfs_ordered_update_i_size(inode, 0, ordered_extent);
3161                 if (nolock)
3162                         trans = btrfs_join_transaction_nolock(root);
3163                 else
3164                         trans = btrfs_join_transaction(root);
3165                 if (IS_ERR(trans)) {
3166                         ret = PTR_ERR(trans);
3167                         trans = NULL;
3168                         goto out;
3169                 }
3170                 trans->block_rsv = &BTRFS_I(inode)->block_rsv;
3171                 ret = btrfs_update_inode_fallback(trans, root, inode);
3172                 if (ret) /* -ENOMEM or corruption */
3173                         btrfs_abort_transaction(trans, ret);
3174                 goto out;
3175         }
3176 
3177         range_locked = true;
3178         lock_extent_bits(io_tree, ordered_extent->file_offset,
3179                          ordered_extent->file_offset + ordered_extent->len - 1,
3180                          &cached_state);
3181 
3182         ret = test_range_bit(io_tree, ordered_extent->file_offset,
3183                         ordered_extent->file_offset + ordered_extent->len - 1,
3184                         EXTENT_DEFRAG, 0, cached_state);
3185         if (ret) {
3186                 u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
3187                 if (0 && last_snapshot >= BTRFS_I(inode)->generation)
3188                         /* the inode is shared */
3189                         new = record_old_file_extents(inode, ordered_extent);
3190 
3191                 clear_extent_bit(io_tree, ordered_extent->file_offset,
3192                         ordered_extent->file_offset + ordered_extent->len - 1,
3193                         EXTENT_DEFRAG, 0, 0, &cached_state);
3194         }
3195 
3196         if (nolock)
3197                 trans = btrfs_join_transaction_nolock(root);
3198         else
3199                 trans = btrfs_join_transaction(root);
3200         if (IS_ERR(trans)) {
3201                 ret = PTR_ERR(trans);
3202                 trans = NULL;
3203                 goto out;
3204         }
3205 
3206         trans->block_rsv = &BTRFS_I(inode)->block_rsv;
3207 
3208         if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
3209                 compress_type = ordered_extent->compress_type;
3210         if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
3211                 BUG_ON(compress_type);
3212                 btrfs_qgroup_free_data(inode, NULL, ordered_extent->file_offset,
3213                                        ordered_extent->len);
3214                 ret = btrfs_mark_extent_written(trans, BTRFS_I(inode),
3215                                                 ordered_extent->file_offset,
3216                                                 ordered_extent->file_offset +
3217                                                 logical_len);
3218         } else {
3219                 BUG_ON(root == fs_info->tree_root);
3220                 ret = insert_reserved_file_extent(trans, inode,
3221                                                 ordered_extent->file_offset,
3222                                                 ordered_extent->start,
3223                                                 ordered_extent->disk_len,
3224                                                 logical_len, logical_len,
3225                                                 compress_type, 0, 0,
3226                                                 BTRFS_FILE_EXTENT_REG);
3227                 if (!ret) {
3228                         clear_reserved_extent = false;
3229                         btrfs_release_delalloc_bytes(fs_info,
3230                                                      ordered_extent->start,
3231                                                      ordered_extent->disk_len);
3232                 }
3233         }
3234         unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
3235                            ordered_extent->file_offset, ordered_extent->len,
3236                            trans->transid);
3237         if (ret < 0) {
3238                 btrfs_abort_transaction(trans, ret);
3239                 goto out;
3240         }
3241 
3242         ret = add_pending_csums(trans, inode, &ordered_extent->list);
3243         if (ret) {
3244                 btrfs_abort_transaction(trans, ret);
3245                 goto out;
3246         }
3247 
3248         btrfs_ordered_update_i_size(inode, 0, ordered_extent);
3249         ret = btrfs_update_inode_fallback(trans, root, inode);
3250         if (ret) { /* -ENOMEM or corruption */
3251                 btrfs_abort_transaction(trans, ret);
3252                 goto out;
3253         }
3254         ret = 0;
3255 out:
3256         if (range_locked || clear_new_delalloc_bytes) {
3257                 unsigned int clear_bits = 0;
3258 
3259                 if (range_locked)
3260                         clear_bits |= EXTENT_LOCKED;
3261                 if (clear_new_delalloc_bytes)
3262                         clear_bits |= EXTENT_DELALLOC_NEW;
3263                 clear_extent_bit(&BTRFS_I(inode)->io_tree,
3264                                  ordered_extent->file_offset,
3265                                  ordered_extent->file_offset +
3266                                  ordered_extent->len - 1,
3267                                  clear_bits,
3268                                  (clear_bits & EXTENT_LOCKED) ? 1 : 0,
3269                                  0, &cached_state);
3270         }
3271 
3272         if (trans)
3273                 btrfs_end_transaction(trans);
3274 
3275         if (ret || truncated) {
3276                 u64 start, end;
3277 
3278                 if (truncated)
3279                         start = ordered_extent->file_offset + logical_len;
3280                 else
3281                         start = ordered_extent->file_offset;
3282                 end = ordered_extent->file_offset + ordered_extent->len - 1;
3283                 clear_extent_uptodate(io_tree, start, end, NULL);
3284 
3285                 /* Drop the cache for the part of the extent we didn't write. */
3286                 btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 0);
3287 
3288                 /*
3289                  * If the ordered extent had an IOERR or something else went
3290                  * wrong we need to return the space for this ordered extent
3291                  * back to the allocator.  We only free the extent in the
3292                  * truncated case if we didn't write out the extent at all.
3293                  *
3294                  * If we made it past insert_reserved_file_extent before we
3295                  * errored out then we don't need to do this as the accounting
3296                  * has already been done.
3297                  */
3298                 if ((ret || !logical_len) &&
3299                     clear_reserved_extent &&
3300                     !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
3301                     !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags))
3302                         btrfs_free_reserved_extent(fs_info,
3303                                                    ordered_extent->start,
3304                                                    ordered_extent->disk_len, 1);
3305         }
3306 
3307 
3308         /*
3309          * This needs to be done to make sure anybody waiting knows we are done
3310          * updating everything for this ordered extent.
3311          */
3312         btrfs_remove_ordered_extent(inode, ordered_extent);
3313 
3314         /* for snapshot-aware defrag */
3315         if (new) {
3316                 if (ret) {
3317                         free_sa_defrag_extent(new);
3318                         atomic_dec(&fs_info->defrag_running);
3319                 } else {
3320                         relink_file_extents(new);
3321                 }
3322         }
3323 
3324         /* once for us */
3325         btrfs_put_ordered_extent(ordered_extent);
3326         /* once for the tree */
3327         btrfs_put_ordered_extent(ordered_extent);
3328 
3329         return ret;
3330 }
3331 
3332 static void finish_ordered_fn(struct btrfs_work *work)
3333 {
3334         struct btrfs_ordered_extent *ordered_extent;
3335         ordered_extent = container_of(work, struct btrfs_ordered_extent, work);
3336         btrfs_finish_ordered_io(ordered_extent);
3337 }
3338 
3339 void btrfs_writepage_endio_finish_ordered(struct page *page, u64 start,
3340                                           u64 end, int uptodate)
3341 {
3342         struct inode *inode = page->mapping->host;
3343         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3344         struct btrfs_ordered_extent *ordered_extent = NULL;
3345         struct btrfs_workqueue *wq;
3346 
3347         trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
3348 
3349         ClearPagePrivate2(page);
3350         if (!btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
3351                                             end - start + 1, uptodate))
3352                 return;
3353 
3354         if (btrfs_is_free_space_inode(BTRFS_I(inode)))
3355                 wq = fs_info->endio_freespace_worker;
3356         else
3357                 wq = fs_info->endio_write_workers;
3358 
3359         btrfs_init_work(&ordered_extent->work, finish_ordered_fn, NULL, NULL);
3360         btrfs_queue_work(wq, &ordered_extent->work);
3361 }
3362 
3363 static int __readpage_endio_check(struct inode *inode,
3364                                   struct btrfs_io_bio *io_bio,
3365                                   int icsum, struct page *page,
3366                                   int pgoff, u64 start, size_t len)
3367 {
3368         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3369         SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
3370         char *kaddr;
3371         u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
3372         u8 *csum_expected;
3373         u8 csum[BTRFS_CSUM_SIZE];
3374 
3375         csum_expected = ((u8 *)io_bio->csum) + icsum * csum_size;
3376 
3377         kaddr = kmap_atomic(page);
3378         shash->tfm = fs_info->csum_shash;
3379 
3380         crypto_shash_init(shash);
3381         crypto_shash_update(shash, kaddr + pgoff, len);
3382         crypto_shash_final(shash, csum);
3383 
3384         if (memcmp(csum, csum_expected, csum_size))
3385                 goto zeroit;
3386 
3387         kunmap_atomic(kaddr);
3388         return 0;
3389 zeroit:
3390         btrfs_print_data_csum_error(BTRFS_I(inode), start, csum, csum_expected,
3391                                     io_bio->mirror_num);
3392         memset(kaddr + pgoff, 1, len);
3393         flush_dcache_page(page);
3394         kunmap_atomic(kaddr);
3395         return -EIO;
3396 }
3397 
3398 /*
3399  * when reads are done, we need to check csums to verify the data is correct
3400  * if there's a match, we allow the bio to finish.  If not, the code in
3401  * extent_io.c will try to find good copies for us.
3402  */
3403 static int btrfs_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
3404                                       u64 phy_offset, struct page *page,
3405                                       u64 start, u64 end, int mirror)
3406 {
3407         size_t offset = start - page_offset(page);
3408         struct inode *inode = page->mapping->host;
3409         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3410         struct btrfs_root *root = BTRFS_I(inode)->root;
3411 
3412         if (PageChecked(page)) {
3413                 ClearPageChecked(page);
3414                 return 0;
3415         }
3416 
3417         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
3418                 return 0;
3419 
3420         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
3421             test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
3422                 clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM);
3423                 return 0;
3424         }
3425 
3426         phy_offset >>= inode->i_sb->s_blocksize_bits;
3427         return __readpage_endio_check(inode, io_bio, phy_offset, page, offset,
3428                                       start, (size_t)(end - start + 1));
3429 }
3430 
3431 /*
3432  * btrfs_add_delayed_iput - perform a delayed iput on @inode
3433  *
3434  * @inode: The inode we want to perform iput on
3435  *
3436  * This function uses the generic vfs_inode::i_count to track whether we should
3437  * just decrement it (in case it's > 1) or if this is the last iput then link
3438  * the inode to the delayed iput machinery. Delayed iputs are processed at
3439  * transaction commit time/superblock commit/cleaner kthread.
3440  */
3441 void btrfs_add_delayed_iput(struct inode *inode)
3442 {
3443         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3444         struct btrfs_inode *binode = BTRFS_I(inode);
3445 
3446         if (atomic_add_unless(&inode->i_count, -1, 1))
3447                 return;
3448 
3449         atomic_inc(&fs_info->nr_delayed_iputs);
3450         spin_lock(&fs_info->delayed_iput_lock);
3451         ASSERT(list_empty(&binode->delayed_iput));
3452         list_add_tail(&binode->delayed_iput, &fs_info->delayed_iputs);
3453         spin_unlock(&fs_info->delayed_iput_lock);
3454         if (!test_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags))
3455                 wake_up_process(fs_info->cleaner_kthread);
3456 }
3457 
3458 static void run_delayed_iput_locked(struct btrfs_fs_info *fs_info,
3459                                     struct btrfs_inode *inode)
3460 {
3461         list_del_init(&inode->delayed_iput);
3462         spin_unlock(&fs_info->delayed_iput_lock);
3463         iput(&inode->vfs_inode);
3464         if (atomic_dec_and_test(&fs_info->nr_delayed_iputs))
3465                 wake_up(&fs_info->delayed_iputs_wait);
3466         spin_lock(&fs_info->delayed_iput_lock);
3467 }
3468 
3469 static void btrfs_run_delayed_iput(struct btrfs_fs_info *fs_info,
3470                                    struct btrfs_inode *inode)
3471 {
3472         if (!list_empty(&inode->delayed_iput)) {
3473                 spin_lock(&fs_info->delayed_iput_lock);
3474                 if (!list_empty(&inode->delayed_iput))
3475                         run_delayed_iput_locked(fs_info, inode);
3476                 spin_unlock(&fs_info->delayed_iput_lock);
3477         }
3478 }
3479 
3480 void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info)
3481 {
3482 
3483         spin_lock(&fs_info->delayed_iput_lock);
3484         while (!list_empty(&fs_info->delayed_iputs)) {
3485                 struct btrfs_inode *inode;
3486 
3487                 inode = list_first_entry(&fs_info->delayed_iputs,
3488                                 struct btrfs_inode, delayed_iput);
3489                 run_delayed_iput_locked(fs_info, inode);
3490         }
3491         spin_unlock(&fs_info->delayed_iput_lock);
3492 }
3493 
3494 /**
3495  * btrfs_wait_on_delayed_iputs - wait on the delayed iputs to be done running
3496  * @fs_info - the fs_info for this fs
3497  * @return - EINTR if we were killed, 0 if nothing's pending
3498  *
3499  * This will wait on any delayed iputs that are currently running with KILLABLE
3500  * set.  Once they are all done running we will return, unless we are killed in
3501  * which case we return EINTR. This helps in user operations like fallocate etc
3502  * that might get blocked on the iputs.
3503  */
3504 int btrfs_wait_on_delayed_iputs(struct btrfs_fs_info *fs_info)
3505 {
3506         int ret = wait_event_killable(fs_info->delayed_iputs_wait,
3507                         atomic_read(&fs_info->nr_delayed_iputs) == 0);
3508         if (ret)
3509                 return -EINTR;
3510         return 0;
3511 }
3512 
3513 /*
3514  * This creates an orphan entry for the given inode in case something goes wrong
3515  * in the middle of an unlink.
3516  */
3517 int btrfs_orphan_add(struct btrfs_trans_handle *trans,
3518                      struct btrfs_inode *inode)
3519 {
3520         int ret;
3521 
3522         ret = btrfs_insert_orphan_item(trans, inode->root, btrfs_ino(inode));
3523         if (ret && ret != -EEXIST) {
3524                 btrfs_abort_transaction(trans, ret);
3525                 return ret;
3526         }
3527 
3528         return 0;
3529 }
3530 
3531 /*
3532  * We have done the delete so we can go ahead and remove the orphan item for
3533  * this particular inode.
3534  */
3535 static int btrfs_orphan_del(struct btrfs_trans_handle *trans,
3536                             struct btrfs_inode *inode)
3537 {
3538         return btrfs_del_orphan_item(trans, inode->root, btrfs_ino(inode));
3539 }
3540 
3541 /*
3542  * this cleans up any orphans that may be left on the list from the last use
3543  * of this root.
3544  */
3545 int btrfs_orphan_cleanup(struct btrfs_root *root)
3546 {
3547         struct btrfs_fs_info *fs_info = root->fs_info;
3548         struct btrfs_path *path;
3549         struct extent_buffer *leaf;
3550         struct btrfs_key key, found_key;
3551         struct btrfs_trans_handle *trans;
3552         struct inode *inode;
3553         u64 last_objectid = 0;
3554         int ret = 0, nr_unlink = 0;
3555 
3556         if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED))
3557                 return 0;
3558 
3559         path = btrfs_alloc_path();
3560         if (!path) {
3561                 ret = -ENOMEM;
3562                 goto out;
3563         }
3564         path->reada = READA_BACK;
3565 
3566         key.objectid = BTRFS_ORPHAN_OBJECTID;
3567         key.type = BTRFS_ORPHAN_ITEM_KEY;
3568         key.offset = (u64)-1;
3569 
3570         while (1) {
3571                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3572                 if (ret < 0)
3573                         goto out;
3574 
3575                 /*
3576                  * if ret == 0 means we found what we were searching for, which
3577                  * is weird, but possible, so only screw with path if we didn't
3578                  * find the key and see if we have stuff that matches
3579                  */
3580                 if (ret > 0) {
3581                         ret = 0;
3582                         if (path->slots[0] == 0)
3583                                 break;
3584                         path->slots[0]--;
3585                 }
3586 
3587                 /* pull out the item */
3588                 leaf = path->nodes[0];
3589                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3590 
3591                 /* make sure the item matches what we want */
3592                 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
3593                         break;
3594                 if (found_key.type != BTRFS_ORPHAN_ITEM_KEY)
3595                         break;
3596 
3597                 /* release the path since we're done with it */
3598                 btrfs_release_path(path);
3599 
3600                 /*
3601                  * this is where we are basically btrfs_lookup, without the
3602                  * crossing root thing.  we store the inode number in the
3603                  * offset of the orphan item.
3604                  */
3605 
3606                 if (found_key.offset == last_objectid) {
3607                         btrfs_err(fs_info,
3608                                   "Error removing orphan entry, stopping orphan cleanup");
3609                         ret = -EINVAL;
3610                         goto out;
3611                 }
3612 
3613                 last_objectid = found_key.offset;
3614 
3615                 found_key.objectid = found_key.offset;
3616                 found_key.type = BTRFS_INODE_ITEM_KEY;
3617                 found_key.offset = 0;
3618                 inode = btrfs_iget(fs_info->sb, &found_key, root, NULL);
3619                 ret = PTR_ERR_OR_ZERO(inode);
3620                 if (ret && ret != -ENOENT)
3621                         goto out;
3622 
3623                 if (ret == -ENOENT && root == fs_info->tree_root) {
3624                         struct btrfs_root *dead_root;
3625                         struct btrfs_fs_info *fs_info = root->fs_info;
3626                         int is_dead_root = 0;
3627 
3628                         /*
3629                          * this is an orphan in the tree root. Currently these
3630                          * could come from 2 sources:
3631                          *  a) a snapshot deletion in progress
3632                          *  b) a free space cache inode
3633                          * We need to distinguish those two, as the snapshot
3634                          * orphan must not get deleted.
3635                          * find_dead_roots already ran before us, so if this
3636                          * is a snapshot deletion, we should find the root
3637                          * in the dead_roots list
3638                          */
3639                         spin_lock(&fs_info->trans_lock);
3640                         list_for_each_entry(dead_root, &fs_info->dead_roots,
3641                                             root_list) {
3642                                 if (dead_root->root_key.objectid ==
3643                                     found_key.objectid) {
3644                                         is_dead_root = 1;
3645                                         break;
3646                                 }
3647                         }
3648                         spin_unlock(&fs_info->trans_lock);
3649                         if (is_dead_root) {
3650                                 /* prevent this orphan from being found again */
3651                                 key.offset = found_key.objectid - 1;
3652                                 continue;
3653                         }
3654 
3655                 }
3656 
3657                 /*
3658                  * If we have an inode with links, there are a couple of
3659                  * possibilities. Old kernels (before v3.12) used to create an
3660                  * orphan item for truncate indicating that there were possibly
3661                  * extent items past i_size that needed to be deleted. In v3.12,
3662                  * truncate was changed to update i_size in sync with the extent
3663                  * items, but the (useless) orphan item was still created. Since
3664                  * v4.18, we don't create the orphan item for truncate at all.
3665                  *
3666                  * So, this item could mean that we need to do a truncate, but
3667                  * only if this filesystem was last used on a pre-v3.12 kernel
3668                  * and was not cleanly unmounted. The odds of that are quite
3669                  * slim, and it's a pain to do the truncate now, so just delete
3670                  * the orphan item.
3671                  *
3672                  * It's also possible that this orphan item was supposed to be
3673                  * deleted but wasn't. The inode number may have been reused,
3674                  * but either way, we can delete the orphan item.
3675                  */
3676                 if (ret == -ENOENT || inode->i_nlink) {
3677                         if (!ret)
3678                                 iput(inode);
3679                         trans = btrfs_start_transaction(root, 1);
3680                         if (IS_ERR(trans)) {
3681                                 ret = PTR_ERR(trans);
3682                                 goto out;
3683                         }
3684                         btrfs_debug(fs_info, "auto deleting %Lu",
3685                                     found_key.objectid);
3686                         ret = btrfs_del_orphan_item(trans, root,
3687                                                     found_key.objectid);
3688                         btrfs_end_transaction(trans);
3689                         if (ret)
3690                                 goto out;
3691                         continue;
3692                 }
3693 
3694                 nr_unlink++;
3695 
3696                 /* this will do delete_inode and everything for us */
3697                 iput(inode);
3698         }
3699         /* release the path since we're done with it */
3700         btrfs_release_path(path);
3701 
3702         root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;
3703 
3704         if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)) {
3705                 trans = btrfs_join_transaction(root);
3706                 if (!IS_ERR(trans))
3707                         btrfs_end_transaction(trans);
3708         }
3709 
3710         if (nr_unlink)
3711                 btrfs_debug(fs_info, "unlinked %d orphans", nr_unlink);
3712 
3713 out:
3714         if (ret)
3715                 btrfs_err(fs_info, "could not do orphan cleanup %d", ret);
3716         btrfs_free_path(path);
3717         return ret;
3718 }
3719 
3720 /*
3721  * very simple check to peek ahead in the leaf looking for xattrs.  If we
3722  * don't find any xattrs, we know there can't be any acls.
3723  *
3724  * slot is the slot the inode is in, objectid is the objectid of the inode
3725  */
3726 static noinline int acls_after_inode_item(struct extent_buffer *leaf,
3727                                           int slot, u64 objectid,
3728                                           int *first_xattr_slot)
3729 {
3730         u32 nritems = btrfs_header_nritems(leaf);
3731         struct btrfs_key found_key;
3732         static u64 xattr_access = 0;
3733         static u64 xattr_default = 0;
3734         int scanned = 0;
3735 
3736         if (!xattr_access) {
3737                 xattr_access = btrfs_name_hash(XATTR_NAME_POSIX_ACL_ACCESS,
3738                                         strlen(XATTR_NAME_POSIX_ACL_ACCESS));
3739                 xattr_default = btrfs_name_hash(XATTR_NAME_POSIX_ACL_DEFAULT,
3740                                         strlen(XATTR_NAME_POSIX_ACL_DEFAULT));
3741         }
3742 
3743         slot++;
3744         *first_xattr_slot = -1;
3745         while (slot < nritems) {
3746                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3747 
3748                 /* we found a different objectid, there must not be acls */
3749                 if (found_key.objectid != objectid)
3750                         return 0;
3751 
3752                 /* we found an xattr, assume we've got an acl */
3753                 if (found_key.type == BTRFS_XATTR_ITEM_KEY) {
3754                         if (*first_xattr_slot == -1)
3755                                 *first_xattr_slot = slot;
3756                         if (found_key.offset == xattr_access ||
3757                             found_key.offset == xattr_default)
3758                                 return 1;
3759                 }
3760 
3761                 /*
3762                  * we found a key greater than an xattr key, there can't
3763                  * be any acls later on
3764                  */
3765                 if (found_key.type > BTRFS_XATTR_ITEM_KEY)
3766                         return 0;
3767 
3768                 slot++;
3769                 scanned++;
3770 
3771                 /*
3772                  * it goes inode, inode backrefs, xattrs, extents,
3773                  * so if there are a ton of hard links to an inode there can
3774                  * be a lot of backrefs.  Don't waste time searching too hard,
3775                  * this is just an optimization
3776                  */
3777                 if (scanned >= 8)
3778                         break;
3779         }
3780         /* we hit the end of the leaf before we found an xattr or
3781          * something larger than an xattr.  We have to assume the inode
3782          * has acls
3783          */
3784         if (*first_xattr_slot == -1)
3785                 *first_xattr_slot = slot;
3786         return 1;
3787 }
3788 
3789 /*
3790  * read an inode from the btree into the in-memory inode
3791  */
3792 static int btrfs_read_locked_inode(struct inode *inode,
3793                                    struct btrfs_path *in_path)
3794 {
3795         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3796         struct btrfs_path *path = in_path;
3797         struct extent_buffer *leaf;
3798         struct btrfs_inode_item *inode_item;
3799         struct btrfs_root *root = BTRFS_I(inode)->root;
3800         struct btrfs_key location;
3801         unsigned long ptr;
3802         int maybe_acls;
3803         u32 rdev;
3804         int ret;
3805         bool filled = false;
3806         int first_xattr_slot;
3807 
3808         ret = btrfs_fill_inode(inode, &rdev);
3809         if (!ret)
3810                 filled = true;
3811 
3812         if (!path) {
3813                 path = btrfs_alloc_path();
3814                 if (!path)
3815                         return -ENOMEM;
3816         }
3817 
3818         memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
3819 
3820         ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
3821         if (ret) {
3822                 if (path != in_path)
3823                         btrfs_free_path(path);
3824                 return ret;
3825         }
3826 
3827         leaf = path->nodes[0];
3828 
3829         if (filled)
3830                 goto cache_index;
3831 
3832         inode_item = btrfs_item_ptr(leaf, path->slots[0],
3833                                     struct btrfs_inode_item);
3834         inode->i_mode = btrfs_inode_mode(leaf, inode_item);
3835         set_nlink(inode, btrfs_inode_nlink(leaf, inode_item));
3836         i_uid_write(inode, btrfs_inode_uid(leaf, inode_item));
3837         i_gid_write(inode, btrfs_inode_gid(leaf, inode_item));
3838         btrfs_i_size_write(BTRFS_I(inode), btrfs_inode_size(leaf, inode_item));
3839 
3840         inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->atime);
3841         inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->atime);
3842 
3843         inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->mtime);
3844         inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->mtime);
3845 
3846         inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->ctime);
3847         inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->ctime);
3848 
3849         BTRFS_I(inode)->i_otime.tv_sec =
3850                 btrfs_timespec_sec(leaf, &inode_item->otime);
3851         BTRFS_I(inode)->i_otime.tv_nsec =
3852                 btrfs_timespec_nsec(leaf, &inode_item->otime);
3853 
3854         inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
3855         BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
3856         BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item);
3857 
3858         inode_set_iversion_queried(inode,
3859                                    btrfs_inode_sequence(leaf, inode_item));
3860         inode->i_generation = BTRFS_I(inode)->generation;
3861         inode->i_rdev = 0;
3862         rdev = btrfs_inode_rdev(leaf, inode_item);
3863 
3864         BTRFS_I(inode)->index_cnt = (u64)-1;
3865         BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
3866 
3867 cache_index:
3868         /*
3869          * If we were modified in the current generation and evicted from memory
3870          * and then re-read we need to do a full sync since we don't have any
3871          * idea about which extents were modified before we were evicted from
3872          * cache.
3873          *
3874          * This is required for both inode re-read from disk and delayed inode
3875          * in delayed_nodes_tree.
3876          */
3877         if (BTRFS_I(inode)->last_trans == fs_info->generation)
3878                 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3879                         &BTRFS_I(inode)->runtime_flags);
3880 
3881         /*
3882          * We don't persist the id of the transaction where an unlink operation
3883          * against the inode was last made. So here we assume the inode might
3884          * have been evicted, and therefore the exact value of last_unlink_trans
3885          * lost, and set it to last_trans to avoid metadata inconsistencies
3886          * between the inode and its parent if the inode is fsync'ed and the log
3887          * replayed. For example, in the scenario:
3888          *
3889          * touch mydir/foo
3890          * ln mydir/foo mydir/bar
3891          * sync
3892          * unlink mydir/bar
3893          * echo 2 > /proc/sys/vm/drop_caches   # evicts inode
3894          * xfs_io -c fsync mydir/foo
3895          * <power failure>
3896          * mount fs, triggers fsync log replay
3897          *
3898          * We must make sure that when we fsync our inode foo we also log its
3899          * parent inode, otherwise after log replay the parent still has the
3900          * dentry with the "bar" name but our inode foo has a link count of 1
3901          * and doesn't have an inode ref with the name "bar" anymore.
3902          *
3903          * Setting last_unlink_trans to last_trans is a pessimistic approach,
3904          * but it guarantees correctness at the expense of occasional full
3905          * transaction commits on fsync if our inode is a directory, or if our
3906          * inode is not a directory, logging its parent unnecessarily.
3907          */
3908         BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans;
3909 
3910         path->slots[0]++;
3911         if (inode->i_nlink != 1 ||
3912             path->slots[0] >= btrfs_header_nritems(leaf))
3913                 goto cache_acl;
3914 
3915         btrfs_item_key_to_cpu(leaf, &location, path->slots[0]);
3916         if (location.objectid != btrfs_ino(BTRFS_I(inode)))
3917                 goto cache_acl;
3918 
3919         ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
3920         if (location.type == BTRFS_INODE_REF_KEY) {
3921                 struct btrfs_inode_ref *ref;
3922 
3923                 ref = (struct btrfs_inode_ref *)ptr;
3924                 BTRFS_I(inode)->dir_index = btrfs_inode_ref_index(leaf, ref);
3925         } else if (location.type == BTRFS_INODE_EXTREF_KEY) {
3926                 struct btrfs_inode_extref *extref;
3927 
3928                 extref = (struct btrfs_inode_extref *)ptr;
3929                 BTRFS_I(inode)->dir_index = btrfs_inode_extref_index(leaf,
3930                                                                      extref);
3931         }
3932 cache_acl:
3933         /*
3934          * try to precache a NULL acl entry for files that don't have
3935          * any xattrs or acls
3936          */
3937         maybe_acls = acls_after_inode_item(leaf, path->slots[0],
3938                         btrfs_ino(BTRFS_I(inode)), &first_xattr_slot);
3939         if (first_xattr_slot != -1) {
3940                 path->slots[0] = first_xattr_slot;
3941                 ret = btrfs_load_inode_props(inode, path);
3942                 if (ret)
3943                         btrfs_err(fs_info,
3944                                   "error loading props for ino %llu (root %llu): %d",
3945                                   btrfs_ino(BTRFS_I(inode)),
3946                                   root->root_key.objectid, ret);
3947         }
3948         if (path != in_path)
3949                 btrfs_free_path(path);
3950 
3951         if (!maybe_acls)
3952                 cache_no_acl(inode);
3953 
3954         switch (inode->i_mode & S_IFMT) {
3955         case S_IFREG:
3956                 inode->i_mapping->a_ops = &btrfs_aops;
3957                 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
3958                 inode->i_fop = &btrfs_file_operations;
3959                 inode->i_op = &btrfs_file_inode_operations;
3960                 break;
3961         case S_IFDIR:
3962                 inode->i_fop = &btrfs_dir_file_operations;
3963                 inode->i_op = &btrfs_dir_inode_operations;
3964                 break;
3965         case S_IFLNK:
3966                 inode->i_op = &btrfs_symlink_inode_operations;
3967                 inode_nohighmem(inode);
3968                 inode->i_mapping->a_ops = &btrfs_aops;
3969                 break;
3970         default:
3971                 inode->i_op = &btrfs_special_inode_operations;
3972                 init_special_inode(inode, inode->i_mode, rdev);
3973                 break;
3974         }
3975 
3976         btrfs_sync_inode_flags_to_i_flags(inode);
3977         return 0;
3978 }
3979 
3980 /*
3981  * given a leaf and an inode, copy the inode fields into the leaf
3982  */
3983 static void fill_inode_item(struct btrfs_trans_handle *trans,
3984                             struct extent_buffer *leaf,
3985                             struct btrfs_inode_item *item,
3986                             struct inode *inode)
3987 {
3988         struct btrfs_map_token token;
3989 
3990         btrfs_init_map_token(&token, leaf);
3991 
3992         btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token);
3993         btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token);
3994         btrfs_set_token_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size,
3995                                    &token);
3996         btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
3997         btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
3998 
3999         btrfs_set_token_timespec_sec(leaf, &item->atime,
4000                                      inode->i_atime.tv_sec, &token);
4001         btrfs_set_token_timespec_nsec(leaf, &item->atime,
4002                                       inode->i_atime.tv_nsec, &token);
4003 
4004         btrfs_set_token_timespec_sec(leaf, &item->mtime,
4005                                      inode->i_mtime.tv_sec, &token);
4006         btrfs_set_token_timespec_nsec(leaf, &item->mtime,
4007                                       inode->i_mtime.tv_nsec, &token);
4008 
4009         btrfs_set_token_timespec_sec(leaf, &item->ctime,
4010                                      inode->i_ctime.tv_sec, &token);
4011         btrfs_set_token_timespec_nsec(leaf, &item->ctime,
4012                                       inode->i_ctime.tv_nsec, &token);
4013 
4014         btrfs_set_token_timespec_sec(leaf, &item->otime,
4015                                      BTRFS_I(inode)->i_otime.tv_sec, &token);
4016         btrfs_set_token_timespec_nsec(leaf, &item->otime,
4017                                       BTRFS_I(inode)->i_otime.tv_nsec, &token);
4018 
4019         btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
4020                                      &token);
4021         btrfs_set_token_inode_generation(leaf, item, BTRFS_I(inode)->generation,
4022                                          &token);
4023         btrfs_set_token_inode_sequence(leaf, item, inode_peek_iversion(inode),
4024                                        &token);
4025         btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
4026         btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
4027         btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
4028         btrfs_set_token_inode_block_group(leaf, item, 0, &token);
4029 }
4030 
4031 /*
4032  * copy everything in the in-memory inode into the btree.
4033  */
4034 static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
4035                                 struct btrfs_root *root, struct inode *inode)
4036 {
4037         struct btrfs_inode_item *inode_item;
4038         struct btrfs_path *path;
4039         struct extent_buffer *leaf;
4040         int ret;
4041 
4042         path = btrfs_alloc_path();
4043         if (!path)
4044                 return -ENOMEM;
4045 
4046         path->leave_spinning = 1;
4047         ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location,
4048                                  1);
4049         if (ret) {
4050                 if (ret > 0)
4051                         ret = -ENOENT;
4052                 goto failed;
4053         }
4054 
4055         leaf = path->nodes[0];
4056         inode_item = btrfs_item_ptr(leaf, path->slots[0],
4057                                     struct btrfs_inode_item);
4058 
4059         fill_inode_item(trans, leaf, inode_item, inode);
4060         btrfs_mark_buffer_dirty(leaf);
4061         btrfs_set_inode_last_trans(trans, inode);
4062         ret = 0;
4063 failed:
4064         btrfs_free_path(path);
4065         return ret;
4066 }
4067 
4068 /*
4069  * copy everything in the in-memory inode into the btree.
4070  */
4071 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
4072                                 struct btrfs_root *root, struct inode *inode)
4073 {
4074         struct btrfs_fs_info *fs_info = root->fs_info;
4075         int ret;
4076 
4077         /*
4078          * If the inode is a free space inode, we can deadlock during commit
4079          * if we put it into the delayed code.
4080          *
4081          * The data relocation inode should also be directly updated
4082          * without delay
4083          */
4084         if (!btrfs_is_free_space_inode(BTRFS_I(inode))
4085             && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
4086             && !test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) {
4087                 btrfs_update_root_times(trans, root);
4088 
4089                 ret = btrfs_delayed_update_inode(trans, root, inode);
4090                 if (!ret)
4091                         btrfs_set_inode_last_trans(trans, inode);
4092                 return ret;
4093         }
4094 
4095         return btrfs_update_inode_item(trans, root, inode);
4096 }
4097 
4098 noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
4099                                          struct btrfs_root *root,
4100                                          struct inode *inode)
4101 {
4102         int ret;
4103 
4104         ret = btrfs_update_inode(trans, root, inode);
4105         if (ret == -ENOSPC)
4106                 return btrfs_update_inode_item(trans, root, inode);
4107         return ret;
4108 }
4109 
4110 /*
4111  * unlink helper that gets used here in inode.c and in the tree logging
4112  * recovery code.  It remove a link in a directory with a given name, and
4113  * also drops the back refs in the inode to the directory
4114  */
4115 static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
4116                                 struct btrfs_root *root,
4117                                 struct btrfs_inode *dir,
4118                                 struct btrfs_inode *inode,
4119                                 const char *name, int name_len)
4120 {
4121         struct btrfs_fs_info *fs_info = root->fs_info;
4122         struct btrfs_path *path;
4123         int ret = 0;
4124         struct btrfs_dir_item *di;
4125         u64 index;
4126         u64 ino = btrfs_ino(inode);
4127         u64 dir_ino = btrfs_ino(dir);
4128 
4129         path = btrfs_alloc_path();
4130         if (!path) {
4131                 ret = -ENOMEM;
4132                 goto out;
4133         }
4134 
4135         path->leave_spinning = 1;
4136         di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
4137                                     name, name_len, -1);
4138         if (IS_ERR_OR_NULL(di)) {
4139                 ret = di ? PTR_ERR(di) : -ENOENT;
4140                 goto err;
4141         }
4142         ret = btrfs_delete_one_dir_name(trans, root, path, di);
4143         if (ret)
4144                 goto err;
4145         btrfs_release_path(path);
4146 
4147         /*
4148          * If we don't have dir index, we have to get it by looking up
4149          * the inode ref, since we get the inode ref, remove it directly,
4150          * it is unnecessary to do delayed deletion.
4151          *
4152          * But if we have dir index, needn't search inode ref to get it.
4153          * Since the inode ref is close to the inode item, it is better
4154          * that we delay to delete it, and just do this deletion when
4155          * we update the inode item.
4156          */
4157         if (inode->dir_index) {
4158                 ret = btrfs_delayed_delete_inode_ref(inode);
4159                 if (!ret) {
4160                         index = inode->dir_index;
4161                         goto skip_backref;
4162                 }
4163         }
4164 
4165         ret = btrfs_del_inode_ref(trans, root, name, name_len, ino,
4166                                   dir_ino, &index);
4167         if (ret) {
4168                 btrfs_info(fs_info,
4169                         "failed to delete reference to %.*s, inode %llu parent %llu",
4170                         name_len, name, ino, dir_ino);
4171                 btrfs_abort_transaction(trans, ret);
4172                 goto err;
4173         }
4174 skip_backref:
4175         ret = btrfs_delete_delayed_dir_index(trans, dir, index);
4176         if (ret) {
4177                 btrfs_abort_transaction(trans, ret);
4178                 goto err;
4179         }
4180 
4181         ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len, inode,
4182                         dir_ino);
4183         if (ret != 0 && ret != -ENOENT) {
4184                 btrfs_abort_transaction(trans, ret);
4185                 goto err;
4186         }
4187 
4188         ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len, dir,
4189                         index);
4190         if (ret == -ENOENT)
4191                 ret = 0;
4192         else if (ret)
4193                 btrfs_abort_transaction(trans, ret);
4194 
4195         /*
4196          * If we have a pending delayed iput we could end up with the final iput
4197          * being run in btrfs-cleaner context.  If we have enough of these built
4198          * up we can end up burning a lot of time in btrfs-cleaner without any
4199          * way to throttle the unlinks.  Since we're currently holding a ref on
4200          * the inode we can run the delayed iput here without any issues as the
4201          * final iput won't be done until after we drop the ref we're currently
4202          * holding.
4203          */
4204         btrfs_run_delayed_iput(fs_info, inode);
4205 err:
4206         btrfs_free_path(path);
4207         if (ret)
4208                 goto out;
4209 
4210         btrfs_i_size_write(dir, dir->vfs_inode.i_size - name_len * 2);
4211         inode_inc_iversion(&inode->vfs_inode);
4212         inode_inc_iversion(&dir->vfs_inode);
4213         inode->vfs_inode.i_ctime = dir->vfs_inode.i_mtime =
4214                 dir->vfs_inode.i_ctime = current_time(&inode->vfs_inode);
4215         ret = btrfs_update_inode(trans, root, &dir->vfs_inode);
4216 out:
4217         return ret;
4218 }
4219 
4220 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
4221                        struct btrfs_root *root,
4222                        struct btrfs_inode *dir, struct btrfs_inode *inode,
4223                        const char *name, int name_len)
4224 {
4225         int ret;
4226         ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
4227         if (!ret) {
4228                 drop_nlink(&inode->vfs_inode);
4229                 ret = btrfs_update_inode(trans, root, &inode->vfs_inode);
4230         }
4231         return ret;
4232 }
4233 
4234 /*
4235  * helper to start transaction for unlink and rmdir.
4236  *
4237  * unlink and rmdir are special in btrfs, they do not always free space, so
4238  * if we cannot make our reservations the normal way try and see if there is
4239  * plenty of slack room in the global reserve to migrate, otherwise we cannot
4240  * allow the unlink to occur.
4241  */
4242 static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir)
4243 {
4244         struct btrfs_root *root = BTRFS_I(dir)->root;
4245 
4246         /*
4247          * 1 for the possible orphan item
4248          * 1 for the dir item
4249          * 1 for the dir index
4250          * 1 for the inode ref
4251          * 1 for the inode
4252          */
4253         return btrfs_start_transaction_fallback_global_rsv(root, 5, 5);
4254 }
4255 
4256 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
4257 {
4258         struct btrfs_root *root = BTRFS_I(dir)->root;
4259         struct btrfs_trans_handle *trans;
4260         struct inode *inode = d_inode(dentry);
4261         int ret;
4262 
4263         trans = __unlink_start_trans(dir);
4264         if (IS_ERR(trans))
4265                 return PTR_ERR(trans);
4266 
4267         btrfs_record_unlink_dir(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)),
4268                         0);
4269 
4270         ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir),
4271                         BTRFS_I(d_inode(dentry)), dentry->d_name.name,
4272                         dentry->d_name.len);
4273         if (ret)
4274                 goto out;
4275 
4276         if (inode->i_nlink == 0) {
4277                 ret = btrfs_orphan_add(trans, BTRFS_I(inode));
4278                 if (ret)
4279                         goto out;
4280         }
4281 
4282 out:
4283         btrfs_end_transaction(trans);
4284         btrfs_btree_balance_dirty(root->fs_info);
4285         return ret;
4286 }
4287 
4288 static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
4289                                struct inode *dir, struct dentry *dentry)
4290 {
4291         struct btrfs_root *root = BTRFS_I(dir)->root;
4292         struct btrfs_inode *inode = BTRFS_I(d_inode(dentry));
4293         struct btrfs_path *path;
4294         struct extent_buffer *leaf;
4295         struct btrfs_dir_item *di;
4296         struct btrfs_key key;
4297         const char *name = dentry->d_name.name;
4298         int name_len = dentry->d_name.len;
4299         u64 index;
4300         int ret;
4301         u64 objectid;
4302         u64 dir_ino = btrfs_ino(BTRFS_I(dir));
4303 
4304         if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) {
4305                 objectid = inode->root->root_key.objectid;
4306         } else if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) {
4307                 objectid = inode->location.objectid;
4308         } else {
4309                 WARN_ON(1);
4310                 return -EINVAL;
4311         }
4312 
4313         path = btrfs_alloc_path();
4314         if (!path)
4315                 return -ENOMEM;
4316 
4317         di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
4318                                    name, name_len, -1);
4319         if (IS_ERR_OR_NULL(di)) {
4320                 ret = di ? PTR_ERR(di) : -ENOENT;
4321                 goto out;
4322         }
4323 
4324         leaf = path->nodes[0];
4325         btrfs_dir_item_key_to_cpu(leaf, di, &key);
4326         WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
4327         ret = btrfs_delete_one_dir_name(trans, root, path, di);
4328         if (ret) {
4329                 btrfs_abort_transaction(trans, ret);
4330                 goto out;
4331         }
4332         btrfs_release_path(path);
4333 
4334         /*
4335          * This is a placeholder inode for a subvolume we didn't have a
4336          * reference to at the time of the snapshot creation.  In the meantime
4337          * we could have renamed the real subvol link into our snapshot, so
4338          * depending on btrfs_del_root_ref to return -ENOENT here is incorret.
4339          * Instead simply lookup the dir_index_item for this entry so we can
4340          * remove it.  Otherwise we know we have a ref to the root and we can
4341          * call btrfs_del_root_ref, and it _shouldn't_ fail.
4342          */
4343         if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) {
4344                 di = btrfs_search_dir_index_item(root, path, dir_ino,
4345                                                  name, name_len);
4346                 if (IS_ERR_OR_NULL(di)) {
4347                         if (!di)
4348                                 ret = -ENOENT;
4349                         else
4350                                 ret = PTR_ERR(di);
4351                         btrfs_abort_transaction(trans, ret);
4352                         goto out;
4353                 }
4354 
4355                 leaf = path->nodes[0];
4356                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4357                 index = key.offset;
4358                 btrfs_release_path(path);
4359         } else {
4360                 ret = btrfs_del_root_ref(trans, objectid,
4361                                          root->root_key.objectid, dir_ino,
4362                                          &index, name, name_len);
4363                 if (ret) {
4364                         btrfs_abort_transaction(trans, ret);
4365                         goto out;
4366                 }
4367         }
4368 
4369         ret = btrfs_delete_delayed_dir_index(trans, BTRFS_I(dir), index);
4370         if (ret) {
4371                 btrfs_abort_transaction(trans, ret);
4372                 goto out;
4373         }
4374 
4375         btrfs_i_size_write(BTRFS_I(dir), dir->i_size - name_len * 2);
4376         inode_inc_iversion(dir);
4377         dir->i_mtime = dir->i_ctime = current_time(dir);
4378         ret = btrfs_update_inode_fallback(trans, root, dir);
4379         if (ret)
4380                 btrfs_abort_transaction(trans, ret);
4381 out:
4382         btrfs_free_path(path);
4383         return ret;
4384 }
4385 
4386 /*
4387  * Helper to check if the subvolume references other subvolumes or if it's
4388  * default.
4389  */
4390 static noinline int may_destroy_subvol(struct btrfs_root *root)
4391 {
4392         struct btrfs_fs_info *fs_info = root->fs_info;
4393         struct btrfs_path *path;
4394         struct btrfs_dir_item *di;
4395         struct btrfs_key key;
4396         u64 dir_id;
4397         int ret;
4398 
4399         path = btrfs_alloc_path();
4400         if (!path)
4401                 return -ENOMEM;
4402 
4403         /* Make sure this root isn't set as the default subvol */
4404         dir_id = btrfs_super_root_dir(fs_info->super_copy);
4405         di = btrfs_lookup_dir_item(NULL, fs_info->tree_root, path,
4406                                    dir_id, "default", 7, 0);
4407         if (di && !IS_ERR(di)) {
4408                 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
4409                 if (key.objectid == root->root_key.objectid) {
4410                         ret = -EPERM;
4411                         btrfs_err(fs_info,
4412                                   "deleting default subvolume %llu is not allowed",
4413                                   key.objectid);
4414                         goto out;
4415                 }
4416                 btrfs_release_path(path);
4417         }
4418 
4419         key.objectid = root->root_key.objectid;
4420         key.type = BTRFS_ROOT_REF_KEY;
4421         key.offset = (u64)-1;
4422 
4423         ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
4424         if (ret < 0)
4425                 goto out;
4426         BUG_ON(ret == 0);
4427 
4428         ret = 0;
4429         if (path->slots[0] > 0) {
4430                 path->slots[0]--;
4431                 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
4432                 if (key.objectid == root->root_key.objectid &&
4433                     key.type == BTRFS_ROOT_REF_KEY)
4434                         ret = -ENOTEMPTY;
4435         }
4436 out:
4437         btrfs_free_path(path);
4438         return ret;
4439 }
4440 
4441 /* Delete all dentries for inodes belonging to the root */
4442 static void btrfs_prune_dentries(struct btrfs_root *root)
4443 {
4444         struct btrfs_fs_info *fs_info = root->fs_info;
4445         struct rb_node *node;
4446         struct rb_node *prev;
4447         struct btrfs_inode *entry;
4448         struct inode *inode;
4449         u64 objectid = 0;
4450 
4451         if (!test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
4452                 WARN_ON(btrfs_root_refs(&root->root_item) != 0);
4453 
4454         spin_lock(&root->inode_lock);
4455 again:
4456         node = root->inode_tree.rb_node;
4457         prev = NULL;
4458         while (node) {
4459                 prev = node;
4460                 entry = rb_entry(node, struct btrfs_inode, rb_node);
4461 
4462                 if (objectid < btrfs_ino(entry))
4463                         node = node->rb_left;
4464                 else if (objectid > btrfs_ino(entry))
4465                         node = node->rb_right;
4466                 else
4467                         break;
4468         }
4469         if (!node) {
4470                 while (prev) {
4471                         entry = rb_entry(prev, struct btrfs_inode, rb_node);
4472                         if (objectid <= btrfs_ino(entry)) {
4473                                 node = prev;
4474                                 break;
4475                         }
4476                         prev = rb_next(prev);
4477                 }
4478         }
4479         while (node) {
4480                 entry = rb_entry(node, struct btrfs_inode, rb_node);
4481                 objectid = btrfs_ino(entry) + 1;
4482                 inode = igrab(&entry->vfs_inode);
4483                 if (inode) {
4484                         spin_unlock(&root->inode_lock);
4485                         if (atomic_read(&inode->i_count) > 1)
4486                                 d_prune_aliases(inode);
4487                         /*
4488                          * btrfs_drop_inode will have it removed from the inode
4489                          * cache when its usage count hits zero.
4490                          */
4491                         iput(inode);
4492                         cond_resched();
4493                         spin_lock(&root->inode_lock);
4494                         goto again;
4495                 }
4496 
4497                 if (cond_resched_lock(&root->inode_lock))
4498                         goto again;
4499 
4500                 node = rb_next(node);
4501         }
4502         spin_unlock(&root->inode_lock);
4503 }
4504 
4505 int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry)
4506 {
4507         struct btrfs_fs_info *fs_info = btrfs_sb(dentry->d_sb);
4508         struct btrfs_root *root = BTRFS_I(dir)->root;
4509         struct inode *inode = d_inode(dentry);
4510         struct btrfs_root *dest = BTRFS_I(inode)->root;
4511         struct btrfs_trans_handle *trans;
4512         struct btrfs_block_rsv block_rsv;
4513         u64 root_flags;
4514         int ret;
4515         int err;
4516 
4517         /*
4518          * Don't allow to delete a subvolume with send in progress. This is
4519          * inside the inode lock so the error handling that has to drop the bit
4520          * again is not run concurrently.
4521          */
4522         spin_lock(&dest->root_item_lock);
4523         if (dest->send_in_progress) {
4524                 spin_unlock(&dest->root_item_lock);
4525                 btrfs_warn(fs_info,
4526                            "attempt to delete subvolume %llu during send",
4527                            dest->root_key.objectid);
4528                 return -EPERM;
4529         }
4530         root_flags = btrfs_root_flags(&dest->root_item);
4531         btrfs_set_root_flags(&dest->root_item,
4532                              root_flags | BTRFS_ROOT_SUBVOL_DEAD);
4533         spin_unlock(&dest->root_item_lock);
4534 
4535         down_write(&fs_info->subvol_sem);
4536 
4537         err = may_destroy_subvol(dest);
4538         if (err)
4539                 goto out_up_write;
4540 
4541         btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
4542         /*
4543          * One for dir inode,
4544          * two for dir entries,
4545          * two for root ref/backref.
4546          */
4547         err = btrfs_subvolume_reserve_metadata(root, &block_rsv, 5, true);
4548         if (err)
4549                 goto out_up_write;
4550 
4551         trans = btrfs_start_transaction(root, 0);
4552         if (IS_ERR(trans)) {
4553                 err = PTR_ERR(trans);
4554                 goto out_release;
4555         }
4556         trans->block_rsv = &block_rsv;
4557         trans->bytes_reserved = block_rsv.size;
4558 
4559         btrfs_record_snapshot_destroy(trans, BTRFS_I(dir));
4560 
4561         ret = btrfs_unlink_subvol(trans, dir, dentry);
4562         if (ret) {
4563                 err = ret;
4564                 btrfs_abort_transaction(trans, ret);
4565                 goto out_end_trans;
4566         }
4567 
4568         btrfs_record_root_in_trans(trans, dest);
4569 
4570         memset(&dest->root_item.drop_progress, 0,
4571                 sizeof(dest->root_item.drop_progress));
4572         dest->root_item.drop_level = 0;
4573         btrfs_set_root_refs(&dest->root_item, 0);
4574 
4575         if (!test_and_set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &dest->state)) {
4576                 ret = btrfs_insert_orphan_item(trans,
4577                                         fs_info->tree_root,
4578                                         dest->root_key.objectid);
4579                 if (ret) {
4580                         btrfs_abort_transaction(trans, ret);
4581                         err = ret;
4582                         goto out_end_trans;
4583                 }
4584         }
4585 
4586         ret = btrfs_uuid_tree_remove(trans, dest->root_item.uuid,
4587                                   BTRFS_UUID_KEY_SUBVOL,
4588                                   dest->root_key.objectid);
4589         if (ret && ret != -ENOENT) {
4590                 btrfs_abort_transaction(trans, ret);
4591                 err = ret;
4592                 goto out_end_trans;
4593         }
4594         if (!btrfs_is_empty_uuid(dest->root_item.received_uuid)) {
4595                 ret = btrfs_uuid_tree_remove(trans,
4596                                           dest->root_item.received_uuid,
4597                                           BTRFS_UUID_KEY_RECEIVED_SUBVOL,
4598                                           dest->root_key.objectid);
4599                 if (ret && ret != -ENOENT) {
4600                         btrfs_abort_transaction(trans, ret);
4601                         err = ret;
4602                         goto out_end_trans;
4603                 }
4604         }
4605 
4606 out_end_trans:
4607         trans->block_rsv = NULL;
4608         trans->bytes_reserved = 0;
4609         ret = btrfs_end_transaction(trans);
4610         if (ret && !err)
4611                 err = ret;
4612         inode->i_flags |= S_DEAD;
4613 out_release:
4614         btrfs_subvolume_release_metadata(fs_info, &block_rsv);
4615 out_up_write:
4616         up_write(&fs_info->subvol_sem);
4617         if (err) {
4618                 spin_lock(&dest->root_item_lock);
4619                 root_flags = btrfs_root_flags(&dest->root_item);
4620                 btrfs_set_root_flags(&dest->root_item,
4621                                 root_flags & ~BTRFS_ROOT_SUBVOL_DEAD);
4622                 spin_unlock(&dest->root_item_lock);
4623         } else {
4624                 d_invalidate(dentry);
4625                 btrfs_prune_dentries(dest);
4626                 ASSERT(dest->send_in_progress == 0);
4627 
4628                 /* the last ref */
4629                 if (dest->ino_cache_inode) {
4630                         iput(dest->ino_cache_inode);
4631                         dest->ino_cache_inode = NULL;
4632                 }
4633         }
4634 
4635         return err;
4636 }
4637 
4638 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
4639 {
4640         struct inode *inode = d_inode(dentry);
4641         int err = 0;
4642         struct btrfs_root *root = BTRFS_I(dir)->root;
4643         struct btrfs_trans_handle *trans;
4644         u64 last_unlink_trans;
4645 
4646         if (inode->i_size > BTRFS_EMPTY_DIR_SIZE)
4647                 return -ENOTEMPTY;
4648         if (btrfs_ino(BTRFS_I(inode)) == BTRFS_FIRST_FREE_OBJECTID)
4649                 return btrfs_delete_subvolume(dir, dentry);
4650 
4651         trans = __unlink_start_trans(dir);
4652         if (IS_ERR(trans))
4653                 return PTR_ERR(trans);
4654 
4655         if (unlikely(btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
4656                 err = btrfs_unlink_subvol(trans, dir, dentry);
4657                 goto out;
4658         }
4659 
4660         err = btrfs_orphan_add(trans, BTRFS_I(inode));
4661         if (err)
4662                 goto out;
4663 
4664         last_unlink_trans = BTRFS_I(inode)->last_unlink_trans;
4665 
4666         /* now the directory is empty */
4667         err = btrfs_unlink_inode(trans, root, BTRFS_I(dir),
4668                         BTRFS_I(d_inode(dentry)), dentry->d_name.name,
4669                         dentry->d_name.len);
4670         if (!err) {
4671                 btrfs_i_size_write(BTRFS_I(inode), 0);
4672                 /*
4673                  * Propagate the last_unlink_trans value of the deleted dir to
4674                  * its parent directory. This is to prevent an unrecoverable
4675                  * log tree in the case we do something like this:
4676                  * 1) create dir foo
4677                  * 2) create snapshot under dir foo
4678                  * 3) delete the snapshot
4679                  * 4) rmdir foo
4680                  * 5) mkdir foo
4681                  * 6) fsync foo or some file inside foo
4682                  */
4683                 if (last_unlink_trans >= trans->transid)
4684                         BTRFS_I(dir)->last_unlink_trans = last_unlink_trans;
4685         }
4686 out:
4687         btrfs_end_transaction(trans);
4688         btrfs_btree_balance_dirty(root->fs_info);
4689 
4690         return err;
4691 }
4692 
4693 /*
4694  * Return this if we need to call truncate_block for the last bit of the
4695  * truncate.
4696  */
4697 #define NEED_TRUNCATE_BLOCK 1
4698 
4699 /*
4700  * this can truncate away extent items, csum items and directory items.
4701  * It starts at a high offset and removes keys until it can't find
4702  * any higher than new_size
4703  *
4704  * csum items that cross the new i_size are truncated to the new size
4705  * as well.
4706  *
4707  * min_type is the minimum key type to truncate down to.  If set to 0, this
4708  * will kill all the items on this inode, including the INODE_ITEM_KEY.
4709  */
4710 int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
4711                                struct btrfs_root *root,
4712                                struct inode *inode,
4713                                u64 new_size, u32 min_type)
4714 {
4715         struct btrfs_fs_info *fs_info = root->fs_info;
4716         struct btrfs_path *path;
4717         struct extent_buffer *leaf;
4718         struct btrfs_file_extent_item *fi;
4719         struct btrfs_key key;
4720         struct btrfs_key found_key;
4721         u64 extent_start = 0;
4722         u64 extent_num_bytes = 0;
4723         u64 extent_offset = 0;
4724         u64 item_end = 0;
4725         u64 last_size = new_size;
4726         u32 found_type = (u8)-1;
4727         int found_extent;
4728         int del_item;
4729         int pending_del_nr = 0;
4730         int pending_del_slot = 0;
4731         int extent_type = -1;
4732         int ret;
4733         u64 ino = btrfs_ino(BTRFS_I(inode));
4734         u64 bytes_deleted = 0;
4735         bool be_nice = false;
4736         bool should_throttle = false;
4737         const u64 lock_start = ALIGN_DOWN(new_size, fs_info->sectorsize);
4738         struct extent_state *cached_state = NULL;
4739 
4740         BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
4741 
4742         /*
4743          * for non-free space inodes and ref cows, we want to back off from
4744          * time to time
4745          */
4746         if (!btrfs_is_free_space_inode(BTRFS_I(inode)) &&
4747             test_bit(BTRFS_ROOT_REF_COWS, &root->state))
4748                 be_nice = true;
4749 
4750         path = btrfs_alloc_path();
4751         if (!path)
4752                 return -ENOMEM;
4753         path->reada = READA_BACK;
4754 
4755         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
4756                 lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, (u64)-1,
4757                                  &cached_state);
4758 
4759         /*
4760          * We want to drop from the next block forward in case this new size is
4761          * not block aligned since we will be keeping the last block of the
4762          * extent just the way it is.
4763          */
4764         if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
4765             root == fs_info->tree_root)
4766                 btrfs_drop_extent_cache(BTRFS_I(inode), ALIGN(new_size,
4767                                         fs_info->sectorsize),
4768                                         (u64)-1, 0);
4769 
4770         /*
4771          * This function is also used to drop the items in the log tree before
4772          * we relog the inode, so if root != BTRFS_I(inode)->root, it means
4773          * it is used to drop the logged items. So we shouldn't kill the delayed
4774          * items.
4775          */
4776         if (min_type == 0 && root == BTRFS_I(inode)->root)
4777                 btrfs_kill_delayed_inode_items(BTRFS_I(inode));
4778 
4779         key.objectid = ino;
4780         key.offset = (u64)-1;
4781         key.type = (u8)-1;
4782 
4783 search_again:
4784         /*
4785          * with a 16K leaf size and 128MB extents, you can actually queue
4786          * up a huge file in a single leaf.  Most of the time that
4787          * bytes_deleted is > 0, it will be huge by the time we get here
4788          */
4789         if (be_nice && bytes_deleted > SZ_32M &&
4790             btrfs_should_end_transaction(trans)) {
4791                 ret = -EAGAIN;
4792                 goto out;
4793         }
4794 
4795         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
4796         if (ret < 0)
4797                 goto out;
4798 
4799         if (ret > 0) {
4800                 ret = 0;
4801                 /* there are no items in the tree for us to truncate, we're
4802                  * done
4803                  */
4804                 if (path->slots[0] == 0)
4805                         goto out;
4806                 path->slots[0]--;
4807         }
4808 
4809         while (1) {
4810                 fi = NULL;
4811                 leaf = path->nodes[0];
4812                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4813                 found_type = found_key.type;
4814 
4815                 if (found_key.objectid != ino)
4816                         break;
4817 
4818                 if (found_type < min_type)
4819                         break;
4820 
4821                 item_end = found_key.offset;
4822                 if (found_type == BTRFS_EXTENT_DATA_KEY) {
4823                         fi = btrfs_item_ptr(leaf, path->slots[0],
4824                                             struct btrfs_file_extent_item);
4825                         extent_type = btrfs_file_extent_type(leaf, fi);
4826                         if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
4827                                 item_end +=
4828                                     btrfs_file_extent_num_bytes(leaf, fi);
4829 
4830                                 trace_btrfs_truncate_show_fi_regular(
4831                                         BTRFS_I(inode), leaf, fi,
4832                                         found_key.offset);
4833                         } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
4834                                 item_end += btrfs_file_extent_ram_bytes(leaf,
4835                                                                         fi);
4836 
4837                                 trace_btrfs_truncate_show_fi_inline(
4838                                         BTRFS_I(inode), leaf, fi, path->slots[0],
4839                                         found_key.offset);
4840                         }
4841                         item_end--;
4842                 }
4843                 if (found_type > min_type) {
4844                         del_item = 1;
4845                 } else {
4846                         if (item_end < new_size)
4847                                 break;
4848                         if (found_key.offset >= new_size)
4849                                 del_item = 1;
4850                         else
4851                                 del_item = 0;
4852                 }
4853                 found_extent = 0;
4854                 /* FIXME, shrink the extent if the ref count is only 1 */
4855                 if (found_type != BTRFS_EXTENT_DATA_KEY)
4856                         goto delete;
4857 
4858                 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
4859                         u64 num_dec;
4860                         extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
4861                         if (!del_item) {
4862                                 u64 orig_num_bytes =
4863                                         btrfs_file_extent_num_bytes(leaf, fi);
4864                                 extent_num_bytes = ALIGN(new_size -
4865                                                 found_key.offset,
4866                                                 fs_info->sectorsize);
4867                                 btrfs_set_file_extent_num_bytes(leaf, fi,
4868                                                          extent_num_bytes);
4869                                 num_dec = (orig_num_bytes -
4870                                            extent_num_bytes);
4871                                 if (test_bit(BTRFS_ROOT_REF_COWS,
4872                                              &root->state) &&
4873                                     extent_start != 0)
4874                                         inode_sub_bytes(inode, num_dec);
4875                                 btrfs_mark_buffer_dirty(leaf);
4876                         } else {
4877                                 extent_num_bytes =
4878                                         btrfs_file_extent_disk_num_bytes(leaf,
4879                                                                          fi);
4880                                 extent_offset = found_key.offset -
4881                                         btrfs_file_extent_offset(leaf, fi);
4882 
4883                                 /* FIXME blocksize != 4096 */
4884                                 num_dec = btrfs_file_extent_num_bytes(leaf, fi);
4885                                 if (extent_start != 0) {
4886                                         found_extent = 1;
4887                                         if (test_bit(BTRFS_ROOT_REF_COWS,
4888                                                      &root->state))
4889                                                 inode_sub_bytes(inode, num_dec);
4890                                 }
4891                         }
4892                 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
4893                         /*
4894                          * we can't truncate inline items that have had
4895                          * special encodings
4896                          */
4897                         if (!del_item &&
4898                             btrfs_file_extent_encryption(leaf, fi) == 0 &&
4899                             btrfs_file_extent_other_encoding(leaf, fi) == 0 &&
4900                             btrfs_file_extent_compression(leaf, fi) == 0) {
4901                                 u32 size = (u32)(new_size - found_key.offset);
4902 
4903                                 btrfs_set_file_extent_ram_bytes(leaf, fi, size);
4904                                 size = btrfs_file_extent_calc_inline_size(size);
4905                                 btrfs_truncate_item(path, size, 1);
4906                         } else if (!del_item) {
4907                                 /*
4908                                  * We have to bail so the last_size is set to
4909                                  * just before this extent.
4910                                  */
4911                                 ret = NEED_TRUNCATE_BLOCK;
4912                                 break;
4913                         }
4914 
4915                         if (test_bit(BTRFS_ROOT_REF_COWS, &root->state))
4916                                 inode_sub_bytes(inode, item_end + 1 - new_size);
4917                 }
4918 delete:
4919                 if (del_item)
4920                         last_size = found_key.offset;
4921                 else
4922                         last_size = new_size;
4923                 if (del_item) {
4924                         if (!pending_del_nr) {
4925                                 /* no pending yet, add ourselves */
4926                                 pending_del_slot = path->slots[0];
4927                                 pending_del_nr = 1;
4928                         } else if (pending_del_nr &&
4929                                    path->slots[0] + 1 == pending_del_slot) {
4930                                 /* hop on the pending chunk */
4931                                 pending_del_nr++;
4932                                 pending_del_slot = path->slots[0];
4933                         } else {
4934                                 BUG();
4935                         }
4936                 } else {
4937                         break;
4938                 }
4939                 should_throttle = false;
4940 
4941                 if (found_extent &&
4942                     (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
4943                      root == fs_info->tree_root)) {
4944                         struct btrfs_ref ref = { 0 };
4945 
4946                         bytes_deleted += extent_num_bytes;
4947 
4948                         btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF,
4949                                         extent_start, extent_num_bytes, 0);
4950                         ref.real_root = root->root_key.objectid;
4951                         btrfs_init_data_ref(&ref, btrfs_header_owner(leaf),
4952                                         ino, extent_offset);
4953                         ret = btrfs_free_extent(trans, &ref);
4954                         if (ret) {
4955                                 btrfs_abort_transaction(trans, ret);
4956                                 break;
4957                         }
4958                         if (be_nice) {
4959                                 if (btrfs_should_throttle_delayed_refs(trans))
4960                                         should_throttle = true;
4961                         }
4962                 }
4963 
4964                 if (found_type == BTRFS_INODE_ITEM_KEY)
4965                         break;
4966 
4967                 if (path->slots[0] == 0 ||
4968                     path->slots[0] != pending_del_slot ||
4969                     should_throttle) {
4970                         if (pending_del_nr) {
4971                                 ret = btrfs_del_items(trans, root, path,
4972                                                 pending_del_slot,
4973                                                 pending_del_nr);
4974                                 if (ret) {
4975                                         btrfs_abort_transaction(trans, ret);
4976                                         break;
4977                                 }
4978                                 pending_del_nr = 0;
4979                         }
4980                         btrfs_release_path(path);
4981 
4982                         /*
4983                          * We can generate a lot of delayed refs, so we need to
4984                          * throttle every once and a while and make sure we're
4985                          * adding enough space to keep up with the work we are
4986                          * generating.  Since we hold a transaction here we
4987                          * can't flush, and we don't want to FLUSH_LIMIT because
4988                          * we could have generated too many delayed refs to
4989                          * actually allocate, so just bail if we're short and
4990                          * let the normal reservation dance happen higher up.
4991                          */
4992                         if (should_throttle) {
4993                                 ret = btrfs_delayed_refs_rsv_refill(fs_info,
4994                                                         BTRFS_RESERVE_NO_FLUSH);
4995                                 if (ret) {
4996                                         ret = -EAGAIN;
4997                                         break;
4998                                 }
4999                         }
5000                         goto search_again;
5001                 } else {
5002                         path->slots[0]--;
5003                 }
5004         }
5005 out:
5006         if (ret >= 0 && pending_del_nr) {
5007                 int err;
5008 
5009                 err = btrfs_del_items(trans, root, path, pending_del_slot,
5010                                       pending_del_nr);
5011                 if (err) {
5012                         btrfs_abort_transaction(trans, err);
5013                         ret = err;
5014                 }
5015         }
5016         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
5017                 ASSERT(last_size >= new_size);
5018                 if (!ret && last_size > new_size)
5019                         last_size = new_size;
5020                 btrfs_ordered_update_i_size(inode, last_size, NULL);
5021                 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start,
5022                                      (u64)-1, &cached_state);
5023         }
5024 
5025         btrfs_free_path(path);
5026         return ret;
5027 }
5028 
5029 /*
5030  * btrfs_truncate_block - read, zero a chunk and write a block
5031  * @inode - inode that we're zeroing
5032  * @from - the offset to start zeroing
5033  * @len - the length to zero, 0 to zero the entire range respective to the
5034  *      offset
5035  * @front - zero up to the offset instead of from the offset on
5036  *
5037  * This will find the block for the "from" offset and cow the block and zero the
5038  * part we want to zero.  This is used with truncate and hole punching.
5039  */
5040 int btrfs_truncate_block(struct inode *inode, loff_t from, loff_t len,
5041                         int front)
5042 {
5043         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5044         struct address_space *mapping = inode->i_mapping;
5045         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5046         struct btrfs_ordered_extent *ordered;
5047         struct extent_state *cached_state = NULL;
5048         struct extent_changeset *data_reserved = NULL;
5049         char *kaddr;
5050         u32 blocksize = fs_info->sectorsize;
5051         pgoff_t index = from >> PAGE_SHIFT;
5052         unsigned offset = from & (blocksize - 1);
5053         struct page *page;
5054         gfp_t mask = btrfs_alloc_write_mask(mapping);
5055         int ret = 0;
5056         u64 block_start;
5057         u64 block_end;
5058 
5059         if (IS_ALIGNED(offset, blocksize) &&
5060             (!len || IS_ALIGNED(len, blocksize)))
5061                 goto out;
5062 
5063         block_start = round_down(from, blocksize);
5064         block_end = block_start + blocksize - 1;
5065 
5066         ret = btrfs_delalloc_reserve_space(inode, &data_reserved,
5067                                            block_start, blocksize);
5068         if (ret)
5069                 goto out;
5070 
5071 again:
5072         page = find_or_create_page(mapping, index, mask);
5073         if (!page) {
5074                 btrfs_delalloc_release_space(inode, data_reserved,
5075                                              block_start, blocksize, true);
5076                 btrfs_delalloc_release_extents(BTRFS_I(inode), blocksize);
5077                 ret = -ENOMEM;
5078                 goto out;
5079         }
5080 
5081         if (!PageUptodate(page)) {
5082                 ret = btrfs_readpage(NULL, page);
5083                 lock_page(page);
5084                 if (page->mapping != mapping) {
5085                         unlock_page(page);
5086                         put_page(page);
5087                         goto again;
5088                 }
5089                 if (!PageUptodate(page)) {
5090                         ret = -EIO;
5091                         goto out_unlock;
5092                 }
5093         }
5094         wait_on_page_writeback(page);
5095 
5096         lock_extent_bits(io_tree, block_start, block_end, &cached_state);
5097         set_page_extent_mapped(page);
5098 
5099         ordered = btrfs_lookup_ordered_extent(inode, block_start);
5100         if (ordered) {
5101                 unlock_extent_cached(io_tree, block_start, block_end,
5102                                      &cached_state);
5103                 unlock_page(page);
5104                 put_page(page);
5105                 btrfs_start_ordered_extent(inode, ordered, 1);
5106                 btrfs_put_ordered_extent(ordered);
5107                 goto again;
5108         }
5109 
5110         clear_extent_bit(&BTRFS_I(inode)->io_tree, block_start, block_end,
5111                          EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
5112                          0, 0, &cached_state);
5113 
5114         ret = btrfs_set_extent_delalloc(inode, block_start, block_end, 0,
5115                                         &cached_state);
5116         if (ret) {
5117                 unlock_extent_cached(io_tree, block_start, block_end,
5118                                      &cached_state);
5119                 goto out_unlock;
5120         }
5121 
5122         if (offset != blocksize) {
5123                 if (!len)
5124                         len = blocksize - offset;
5125                 kaddr = kmap(page);
5126                 if (front)
5127                         memset(kaddr + (block_start - page_offset(page)),
5128                                 0, offset);
5129                 else
5130                         memset(kaddr + (block_start - page_offset(page)) +  offset,
5131                                 0, len);
5132                 flush_dcache_page(page);
5133                 kunmap(page);
5134         }
5135         ClearPageChecked(page);
5136         set_page_dirty(page);
5137         unlock_extent_cached(io_tree, block_start, block_end, &cached_state);
5138 
5139 out_unlock:
5140         if (ret)
5141                 btrfs_delalloc_release_space(inode, data_reserved, block_start,
5142                                              blocksize, true);
5143         btrfs_delalloc_release_extents(BTRFS_I(inode), blocksize);
5144         unlock_page(page);
5145         put_page(page);
5146 out:
5147         extent_changeset_free(data_reserved);
5148         return ret;
5149 }
5150 
5151 static int maybe_insert_hole(struct btrfs_root *root, struct inode *inode,
5152                              u64 offset, u64 len)
5153 {
5154         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5155         struct btrfs_trans_handle *trans;
5156         int ret;
5157 
5158         /*
5159          * Still need to make sure the inode looks like it's been updated so
5160          * that any holes get logged if we fsync.
5161          */
5162         if (btrfs_fs_incompat(fs_info, NO_HOLES)) {
5163                 BTRFS_I(inode)->last_trans = fs_info->generation;
5164                 BTRFS_I(inode)->last_sub_trans = root->log_transid;
5165                 BTRFS_I(inode)->last_log_commit = root->last_log_commit;
5166                 return 0;
5167         }
5168 
5169         /*
5170          * 1 - for the one we're dropping
5171          * 1 - for the one we're adding
5172          * 1 - for updating the inode.
5173          */
5174         trans = btrfs_start_transaction(root, 3);
5175         if (IS_ERR(trans))
5176                 return PTR_ERR(trans);
5177 
5178         ret = btrfs_drop_extents(trans, root, inode, offset, offset + len, 1);
5179         if (ret) {
5180                 btrfs_abort_transaction(trans, ret);
5181                 btrfs_end_transaction(trans);
5182                 return ret;
5183         }
5184 
5185         ret = btrfs_insert_file_extent(trans, root, btrfs_ino(BTRFS_I(inode)),
5186                         offset, 0, 0, len, 0, len, 0, 0, 0);
5187         if (ret)
5188                 btrfs_abort_transaction(trans, ret);
5189         else
5190                 btrfs_update_inode(trans, root, inode);
5191         btrfs_end_transaction(trans);
5192         return ret;
5193 }
5194 
5195 /*
5196  * This function puts in dummy file extents for the area we're creating a hole
5197  * for.  So if we are truncating this file to a larger size we need to insert
5198  * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
5199  * the range between oldsize and size
5200  */
5201 int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
5202 {
5203         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5204         struct btrfs_root *root = BTRFS_I(inode)->root;
5205         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5206         struct extent_map *em = NULL;
5207         struct extent_state *cached_state = NULL;
5208         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
5209         u64 hole_start = ALIGN(oldsize, fs_info->sectorsize);
5210         u64 block_end = ALIGN(size, fs_info->sectorsize);
5211         u64 last_byte;
5212         u64 cur_offset;
5213         u64 hole_size;
5214         int err = 0;
5215 
5216         /*
5217          * If our size started in the middle of a block we need to zero out the
5218          * rest of the block before we expand the i_size, otherwise we could
5219          * expose stale data.
5220          */
5221         err = btrfs_truncate_block(inode, oldsize, 0, 0);
5222         if (err)
5223                 return err;
5224 
5225         if (size <= hole_start)
5226                 return 0;
5227 
5228         btrfs_lock_and_flush_ordered_range(io_tree, BTRFS_I(inode), hole_start,
5229                                            block_end - 1, &cached_state);
5230         cur_offset = hole_start;
5231         while (1) {
5232                 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur_offset,
5233                                 block_end - cur_offset, 0);
5234                 if (IS_ERR(em)) {
5235                         err = PTR_ERR(em);
5236                         em = NULL;
5237                         break;
5238                 }
5239                 last_byte = min(extent_map_end(em), block_end);
5240                 last_byte = ALIGN(last_byte, fs_info->sectorsize);
5241                 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
5242                         struct extent_map *hole_em;
5243                         hole_size = last_byte - cur_offset;
5244 
5245                         err = maybe_insert_hole(root, inode, cur_offset,
5246                                                 hole_size);
5247                         if (err)
5248                                 break;
5249                         btrfs_drop_extent_cache(BTRFS_I(inode), cur_offset,
5250                                                 cur_offset + hole_size - 1, 0);
5251                         hole_em = alloc_extent_map();
5252                         if (!hole_em) {
5253                                 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
5254                                         &BTRFS_I(inode)->runtime_flags);
5255                                 goto next;
5256                         }
5257                         hole_em->start = cur_offset;
5258                         hole_em->len = hole_size;
5259                         hole_em->orig_start = cur_offset;
5260 
5261                         hole_em->block_start = EXTENT_MAP_HOLE;
5262                         hole_em->block_len = 0;
5263                         hole_em->orig_block_len = 0;
5264                         hole_em->ram_bytes = hole_size;
5265                         hole_em->bdev = fs_info->fs_devices->latest_bdev;
5266                         hole_em->compress_type = BTRFS_COMPRESS_NONE;
5267                         hole_em->generation = fs_info->generation;
5268 
5269                         while (1) {
5270                                 write_lock(&em_tree->lock);
5271                                 err = add_extent_mapping(em_tree, hole_em, 1);
5272                                 write_unlock(&em_tree->lock);
5273                                 if (err != -EEXIST)
5274                                         break;
5275                                 btrfs_drop_extent_cache(BTRFS_I(inode),
5276                                                         cur_offset,
5277                                                         cur_offset +
5278                                                         hole_size - 1, 0);
5279                         }
5280                         free_extent_map(hole_em);
5281                 }
5282 next:
5283                 free_extent_map(em);
5284                 em = NULL;
5285                 cur_offset = last_byte;
5286                 if (cur_offset >= block_end)
5287                         break;
5288         }
5289         free_extent_map(em);
5290         unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state);
5291         return err;
5292 }
5293 
5294 static int btrfs_setsize(struct inode *inode, struct iattr *attr)
5295 {
5296         struct btrfs_root *root = BTRFS_I(inode)->root;
5297         struct btrfs_trans_handle *trans;
5298         loff_t oldsize = i_size_read(inode);
5299         loff_t newsize = attr->ia_size;
5300         int mask = attr->ia_valid;
5301         int ret;
5302 
5303         /*
5304          * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
5305          * special case where we need to update the times despite not having
5306          * these flags set.  For all other operations the VFS set these flags
5307          * explicitly if it wants a timestamp update.
5308          */
5309         if (newsize != oldsize) {
5310                 inode_inc_iversion(inode);
5311                 if (!(mask & (ATTR_CTIME | ATTR_MTIME)))
5312                         inode->i_ctime = inode->i_mtime =
5313                                 current_time(inode);
5314         }
5315 
5316         if (newsize > oldsize) {
5317                 /*
5318                  * Don't do an expanding truncate while snapshotting is ongoing.
5319                  * This is to ensure the snapshot captures a fully consistent
5320                  * state of this file - if the snapshot captures this expanding
5321                  * truncation, it must capture all writes that happened before
5322                  * this truncation.
5323                  */
5324                 btrfs_wait_for_snapshot_creation(root);
5325                 ret = btrfs_cont_expand(inode, oldsize, newsize);
5326                 if (ret) {
5327                         btrfs_end_write_no_snapshotting(root);
5328                         return ret;
5329                 }
5330 
5331                 trans = btrfs_start_transaction(root, 1);
5332                 if (IS_ERR(trans)) {
5333                         btrfs_end_write_no_snapshotting(root);
5334                         return PTR_ERR(trans);
5335                 }
5336 
5337                 i_size_write(inode, newsize);
5338                 btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
5339                 pagecache_isize_extended(inode, oldsize, newsize);
5340                 ret = btrfs_update_inode(trans, root, inode);
5341                 btrfs_end_write_no_snapshotting(root);
5342                 btrfs_end_transaction(trans);
5343         } else {
5344 
5345                 /*
5346                  * We're truncating a file that used to have good data down to
5347                  * zero. Make sure it gets into the ordered flush list so that
5348                  * any new writes get down to disk quickly.
5349                  */
5350                 if (newsize == 0)
5351                         set_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
5352                                 &BTRFS_I(inode)->runtime_flags);
5353 
5354                 truncate_setsize(inode, newsize);
5355 
5356                 /* Disable nonlocked read DIO to avoid the endless truncate */
5357                 btrfs_inode_block_unlocked_dio(BTRFS_I(inode));
5358                 inode_dio_wait(inode);
5359                 btrfs_inode_resume_unlocked_dio(BTRFS_I(inode));
5360 
5361                 ret = btrfs_truncate(inode, newsize == oldsize);
5362                 if (ret && inode->i_nlink) {
5363                         int err;
5364 
5365                         /*
5366                          * Truncate failed, so fix up the in-memory size. We
5367                          * adjusted disk_i_size down as we removed extents, so
5368                          * wait for disk_i_size to be stable and then update the
5369                          * in-memory size to match.
5370                          */
5371                         err = btrfs_wait_ordered_range(inode, 0, (u64)-1);
5372                         if (err)
5373                                 return err;
5374                         i_size_write(inode, BTRFS_I(inode)->disk_i_size);
5375                 }
5376         }
5377 
5378         return ret;
5379 }
5380 
5381 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
5382 {
5383         struct inode *inode = d_inode(dentry);
5384         struct btrfs_root *root = BTRFS_I(inode)->root;
5385         int err;
5386 
5387         if (btrfs_root_readonly(root))
5388                 return -EROFS;
5389 
5390         err = setattr_prepare(dentry, attr);
5391         if (err)
5392                 return err;
5393 
5394         if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
5395                 err = btrfs_setsize(inode, attr);
5396                 if (err)
5397                         return err;
5398         }
5399 
5400         if (attr->ia_valid) {
5401                 setattr_copy(inode, attr);
5402                 inode_inc_iversion(inode);
5403                 err = btrfs_dirty_inode(inode);
5404 
5405                 if (!err && attr->ia_valid & ATTR_MODE)
5406                         err = posix_acl_chmod(inode, inode->i_mode);
5407         }
5408 
5409         return err;
5410 }
5411 
5412 /*
5413  * While truncating the inode pages during eviction, we get the VFS calling
5414  * btrfs_invalidatepage() against each page of the inode. This is slow because
5415  * the calls to btrfs_invalidatepage() result in a huge amount of calls to
5416  * lock_extent_bits() and clear_extent_bit(), which keep merging and splitting
5417  * extent_state structures over and over, wasting lots of time.
5418  *
5419  * Therefore if the inode is being evicted, let btrfs_invalidatepage() skip all
5420  * those expensive operations on a per page basis and do only the ordered io
5421  * finishing, while we release here the extent_map and extent_state structures,
5422  * without the excessive merging and splitting.
5423  */
5424 static void evict_inode_truncate_pages(struct inode *inode)
5425 {
5426         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5427         struct extent_map_tree *map_tree = &BTRFS_I(inode)->extent_tree;
5428         struct rb_node *node;
5429 
5430         ASSERT(inode->i_state & I_FREEING);
5431         truncate_inode_pages_final(&inode->i_data);
5432 
5433         write_lock(&map_tree->lock);
5434         while (!RB_EMPTY_ROOT(&map_tree->map.rb_root)) {
5435                 struct extent_map *em;
5436 
5437                 node = rb_first_cached(&map_tree->map);
5438                 em = rb_entry(node, struct extent_map, rb_node);
5439                 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
5440                 clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
5441                 remove_extent_mapping(map_tree, em);
5442                 free_extent_map(em);
5443                 if (need_resched()) {
5444                         write_unlock(&map_tree->lock);
5445                         cond_resched();
5446                         write_lock(&map_tree->lock);
5447                 }
5448         }
5449         write_unlock(&map_tree->lock);
5450 
5451         /*
5452          * Keep looping until we have no more ranges in the io tree.
5453          * We can have ongoing bios started by readpages (called from readahead)
5454          * that have their endio callback (extent_io.c:end_bio_extent_readpage)
5455          * still in progress (unlocked the pages in the bio but did not yet
5456          * unlocked the ranges in the io tree). Therefore this means some
5457          * ranges can still be locked and eviction started because before
5458          * submitting those bios, which are executed by a separate task (work
5459          * queue kthread), inode references (inode->i_count) were not taken
5460          * (which would be dropped in the end io callback of each bio).
5461          * Therefore here we effectively end up waiting for those bios and
5462          * anyone else holding locked ranges without having bumped the inode's
5463          * reference count - if we don't do it, when they access the inode's
5464          * io_tree to unlock a range it may be too late, leading to an
5465          * use-after-free issue.
5466          */
5467         spin_lock(&io_tree->lock);
5468         while (!RB_EMPTY_ROOT(&io_tree->state)) {
5469                 struct extent_state *state;
5470                 struct extent_state *cached_state = NULL;
5471                 u64 start;
5472                 u64 end;
5473                 unsigned state_flags;
5474 
5475                 node = rb_first(&io_tree->state);
5476                 state = rb_entry(node, struct extent_state, rb_node);
5477                 start = state->start;
5478                 end = state->end;
5479                 state_flags = state->state;
5480                 spin_unlock(&io_tree->lock);
5481 
5482                 lock_extent_bits(io_tree, start, end, &cached_state);
5483 
5484                 /*
5485                  * If still has DELALLOC flag, the extent didn't reach disk,
5486                  * and its reserved space won't be freed by delayed_ref.
5487                  * So we need to free its reserved space here.
5488                  * (Refer to comment in btrfs_invalidatepage, case 2)
5489                  *
5490                  * Note, end is the bytenr of last byte, so we need + 1 here.
5491                  */
5492                 if (state_flags & EXTENT_DELALLOC)
5493                         btrfs_qgroup_free_data(inode, NULL, start, end - start + 1);
5494 
5495                 clear_extent_bit(io_tree, start, end,
5496                                  EXTENT_LOCKED | EXTENT_DELALLOC |
5497                                  EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 1, 1,
5498                                  &cached_state);
5499 
5500                 cond_resched();
5501                 spin_lock(&io_tree->lock);
5502         }
5503         spin_unlock(&io_tree->lock);
5504 }
5505 
5506 static struct btrfs_trans_handle *evict_refill_and_join(struct btrfs_root *root,
5507                                                         struct btrfs_block_rsv *rsv)
5508 {
5509         struct btrfs_fs_info *fs_info = root->fs_info;
5510         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5511         struct btrfs_trans_handle *trans;
5512         u64 delayed_refs_extra = btrfs_calc_insert_metadata_size(fs_info, 1);
5513         int ret;
5514 
5515         /*
5516          * Eviction should be taking place at some place safe because of our
5517          * delayed iputs.  However the normal flushing code will run delayed
5518          * iputs, so we cannot use FLUSH_ALL otherwise we'll deadlock.
5519          *
5520          * We reserve the delayed_refs_extra here again because we can't use
5521          * btrfs_start_transaction(root, 0) for the same deadlocky reason as
5522          * above.  We reserve our extra bit here because we generate a ton of
5523          * delayed refs activity by truncating.
5524          *
5525          * If we cannot make our reservation we'll attempt to steal from the
5526          * global reserve, because we really want to be able to free up space.
5527          */
5528         ret = btrfs_block_rsv_refill(root, rsv, rsv->size + delayed_refs_extra,
5529                                      BTRFS_RESERVE_FLUSH_EVICT);
5530         if (ret) {
5531                 /*
5532                  * Try to steal from the global reserve if there is space for
5533                  * it.
5534                  */
5535                 if (btrfs_check_space_for_delayed_refs(fs_info) ||
5536                     btrfs_block_rsv_migrate(global_rsv, rsv, rsv->size, 0)) {
5537                         btrfs_warn(fs_info,
5538                                    "could not allocate space for delete; will truncate on mount");
5539                         return ERR_PTR(-ENOSPC);
5540                 }
5541                 delayed_refs_extra = 0;
5542         }
5543 
5544         trans = btrfs_join_transaction(root);
5545         if (IS_ERR(trans))
5546                 return trans;
5547 
5548         if (delayed_refs_extra) {
5549                 trans->block_rsv = &fs_info->trans_block_rsv;
5550                 trans->bytes_reserved = delayed_refs_extra;
5551                 btrfs_block_rsv_migrate(rsv, trans->block_rsv,
5552                                         delayed_refs_extra, 1);
5553         }
5554         return trans;
5555 }
5556 
5557 void btrfs_evict_inode(struct inode *inode)
5558 {
5559         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5560         struct btrfs_trans_handle *trans;
5561         struct btrfs_root *root = BTRFS_I(inode)->root;
5562         struct btrfs_block_rsv *rsv;
5563         int ret;
5564 
5565         trace_btrfs_inode_evict(inode);
5566 
5567         if (!root) {
5568                 clear_inode(inode);
5569                 return;
5570         }
5571 
5572         evict_inode_truncate_pages(inode);
5573 
5574         if (inode->i_nlink &&
5575             ((btrfs_root_refs(&root->root_item) != 0 &&
5576               root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID) ||
5577              btrfs_is_free_space_inode(BTRFS_I(inode))))
5578                 goto no_delete;
5579 
5580         if (is_bad_inode(inode))
5581                 goto no_delete;
5582 
5583         btrfs_free_io_failure_record(BTRFS_I(inode), 0, (u64)-1);
5584 
5585         if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
5586                 goto no_delete;
5587 
5588         if (inode->i_nlink > 0) {
5589                 BUG_ON(btrfs_root_refs(&root->root_item) != 0 &&
5590                        root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID);
5591                 goto no_delete;
5592         }
5593 
5594         ret = btrfs_commit_inode_delayed_inode(BTRFS_I(inode));
5595         if (ret)
5596                 goto no_delete;
5597 
5598         rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
5599         if (!rsv)
5600                 goto no_delete;
5601         rsv->size = btrfs_calc_metadata_size(fs_info, 1);
5602         rsv->failfast = 1;
5603 
5604         btrfs_i_size_write(BTRFS_I(inode), 0);
5605 
5606         while (1) {
5607                 trans = evict_refill_and_join(root, rsv);
5608                 if (IS_ERR(trans))
5609                         goto free_rsv;
5610 
5611                 trans->block_rsv = rsv;
5612 
5613                 ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
5614                 trans->block_rsv = &fs_info->trans_block_rsv;
5615                 btrfs_end_transaction(trans);
5616                 btrfs_btree_balance_dirty(fs_info);
5617                 if (ret && ret != -ENOSPC && ret != -EAGAIN)
5618                         goto free_rsv;
5619                 else if (!ret)
5620                         break;
5621         }
5622 
5623         /*
5624          * Errors here aren't a big deal, it just means we leave orphan items in
5625          * the tree. They will be cleaned up on the next mount. If the inode
5626          * number gets reused, cleanup deletes the orphan item without doing
5627          * anything, and unlink reuses the existing orphan item.
5628          *
5629          * If it turns out that we are dropping too many of these, we might want
5630          * to add a mechanism for retrying these after a commit.
5631          */
5632         trans = evict_refill_and_join(root, rsv);
5633         if (!IS_ERR(trans)) {
5634                 trans->block_rsv = rsv;
5635                 btrfs_orphan_del(trans, BTRFS_I(inode));
5636                 trans->block_rsv = &fs_info->trans_block_rsv;
5637                 btrfs_end_transaction(trans);
5638         }
5639 
5640         if (!(root == fs_info->tree_root ||
5641               root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID))
5642                 btrfs_return_ino(root, btrfs_ino(BTRFS_I(inode)));
5643 
5644 free_rsv:
5645         btrfs_free_block_rsv(fs_info, rsv);
5646 no_delete:
5647         /*
5648          * If we didn't successfully delete, the orphan item will still be in
5649          * the tree and we'll retry on the next mount. Again, we might also want
5650          * to retry these periodically in the future.
5651          */
5652         btrfs_remove_delayed_node(BTRFS_I(inode));
5653         clear_inode(inode);
5654 }
5655 
5656 /*
5657  * Return the key found in the dir entry in the location pointer, fill @type
5658  * with BTRFS_FT_*, and return 0.
5659  *
5660  * If no dir entries were found, returns -ENOENT.
5661  * If found a corrupted location in dir entry, returns -EUCLEAN.
5662  */
5663 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
5664                                struct btrfs_key *location, u8 *type)
5665 {
5666         const char *name = dentry->d_name.name;
5667         int namelen = dentry->d_name.len;
5668         struct btrfs_dir_item *di;
5669         struct btrfs_path *path;
5670         struct btrfs_root *root = BTRFS_I(dir)->root;
5671         int ret = 0;
5672 
5673         path = btrfs_alloc_path();
5674         if (!path)
5675                 return -ENOMEM;
5676 
5677         di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(BTRFS_I(dir)),
5678                         name, namelen, 0);
5679         if (IS_ERR_OR_NULL(di)) {
5680                 ret = di ? PTR_ERR(di) : -ENOENT;
5681                 goto out;
5682         }
5683 
5684         btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
5685         if (location->type != BTRFS_INODE_ITEM_KEY &&
5686             location->type != BTRFS_ROOT_ITEM_KEY) {
5687                 ret = -EUCLEAN;
5688                 btrfs_warn(root->fs_info,
5689 "%s gets something invalid in DIR_ITEM (name %s, directory ino %llu, location(%llu %u %llu))",
5690                            __func__, name, btrfs_ino(BTRFS_I(dir)),
5691                            location->objectid, location->type, location->offset);
5692         }
5693         if (!ret)
5694                 *type = btrfs_dir_type(path->nodes[0], di);
5695 out:
5696         btrfs_free_path(path);
5697         return ret;
5698 }
5699 
5700 /*
5701  * when we hit a tree root in a directory, the btrfs part of the inode
5702  * needs to be changed to reflect the root directory of the tree root.  This
5703  * is kind of like crossing a mount point.
5704  */
5705 static int fixup_tree_root_location(struct btrfs_fs_info *fs_info,
5706                                     struct inode *dir,
5707                                     struct dentry *dentry,
5708                                     struct btrfs_key *location,
5709                                     struct btrfs_root **sub_root)
5710 {
5711         struct btrfs_path *path;
5712         struct btrfs_root *new_root;
5713         struct btrfs_root_ref *ref;
5714         struct extent_buffer *leaf;
5715         struct btrfs_key key;
5716         int ret;
5717         int err = 0;
5718 
5719         path = btrfs_alloc_path();
5720         if (!path) {
5721                 err = -ENOMEM;
5722                 goto out;
5723         }
5724 
5725         err = -ENOENT;
5726         key.objectid = BTRFS_I(dir)->root->root_key.objectid;
5727         key.type = BTRFS_ROOT_REF_KEY;
5728         key.offset = location->objectid;
5729 
5730         ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
5731         if (ret) {
5732                 if (ret < 0)
5733                         err = ret;
5734                 goto out;
5735         }
5736 
5737         leaf = path->nodes[0];
5738         ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
5739         if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(BTRFS_I(dir)) ||
5740             btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
5741                 goto out;
5742 
5743         ret = memcmp_extent_buffer(leaf, dentry->d_name.name,
5744                                    (unsigned long)(ref + 1),
5745                                    dentry->d_name.len);
5746         if (ret)
5747                 goto out;
5748 
5749         btrfs_release_path(path);
5750 
5751         new_root = btrfs_read_fs_root_no_name(fs_info, location);
5752         if (IS_ERR(new_root)) {
5753                 err = PTR_ERR(new_root);
5754                 goto out;
5755         }
5756 
5757         *sub_root = new_root;
5758         location->objectid = btrfs_root_dirid(&new_root->root_item);
5759         location->type = BTRFS_INODE_ITEM_KEY;
5760         location->offset = 0;
5761         err = 0;
5762 out:
5763         btrfs_free_path(path);
5764         return err;
5765 }
5766 
5767 static void inode_tree_add(struct inode *inode)
5768 {
5769         struct btrfs_root *root = BTRFS_I(inode)->root;
5770         struct btrfs_inode *entry;
5771         struct rb_node **p;
5772         struct rb_node *parent;
5773         struct rb_node *new = &BTRFS_I(inode)->rb_node;
5774         u64 ino = btrfs_ino(BTRFS_I(inode));
5775 
5776         if (inode_unhashed(inode))
5777                 return;
5778         parent = NULL;
5779         spin_lock(&root->inode_lock);
5780         p = &root->inode_tree.rb_node;
5781         while (*p) {
5782                 parent = *p;
5783                 entry = rb_entry(parent, struct btrfs_inode, rb_node);
5784 
5785                 if (ino < btrfs_ino(entry))
5786                         p = &parent->rb_left;
5787                 else if (ino > btrfs_ino(entry))
5788                         p = &parent->rb_right;
5789                 else {
5790                         WARN_ON(!(entry->vfs_inode.i_state &
5791                                   (I_WILL_FREE | I_FREEING)));
5792                         rb_replace_node(parent, new, &root->inode_tree);
5793                         RB_CLEAR_NODE(parent);
5794                         spin_unlock(&root->inode_lock);
5795                         return;
5796                 }
5797         }
5798         rb_link_node(new, parent, p);
5799         rb_insert_color(new, &root->inode_tree);
5800         spin_unlock(&root->inode_lock);
5801 }
5802 
5803 static void inode_tree_del(struct inode *inode)
5804 {
5805         struct btrfs_root *root = BTRFS_I(inode)->root;
5806         int empty = 0;
5807 
5808         spin_lock(&root->inode_lock);
5809         if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) {
5810                 rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree);
5811                 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
5812                 empty = RB_EMPTY_ROOT(&root->inode_tree);
5813         }
5814         spin_unlock(&root->inode_lock);
5815 
5816         if (empty && btrfs_root_refs(&root->root_item) == 0) {
5817                 spin_lock(&root->inode_lock);
5818                 empty = RB_EMPTY_ROOT(&root->inode_tree);
5819                 spin_unlock(&root->inode_lock);
5820                 if (empty)
5821                         btrfs_add_dead_root(root);
5822         }
5823 }
5824 
5825 
5826 static int btrfs_init_locked_inode(struct inode *inode, void *p)
5827 {
5828         struct btrfs_iget_args *args = p;
5829         inode->i_ino = args->location->objectid;
5830         memcpy(&BTRFS_I(inode)->location, args->location,
5831                sizeof(*args->location));
5832         BTRFS_I(inode)->root = args->root;
5833         return 0;
5834 }
5835 
5836 static int btrfs_find_actor(struct inode *inode, void *opaque)
5837 {
5838         struct btrfs_iget_args *args = opaque;
5839         return args->location->objectid == BTRFS_I(inode)->location.objectid &&
5840                 args->root == BTRFS_I(inode)->root;
5841 }
5842 
5843 static struct inode *btrfs_iget_locked(struct super_block *s,
5844                                        struct btrfs_key *location,
5845                                        struct btrfs_root *root)
5846 {
5847         struct inode *inode;
5848         struct btrfs_iget_args args;
5849         unsigned long hashval = btrfs_inode_hash(location->objectid, root);
5850 
5851         args.location = location;
5852         args.root = root;
5853 
5854         inode = iget5_locked(s, hashval, btrfs_find_actor,
5855                              btrfs_init_locked_inode,
5856                              (void *)&args);
5857         return inode;
5858 }
5859 
5860 /* Get an inode object given its location and corresponding root.
5861  * Returns in *is_new if the inode was read from disk
5862  */
5863 struct inode *btrfs_iget_path(struct super_block *s, struct btrfs_key *location,
5864                               struct btrfs_root *root, int *new,
5865                               struct btrfs_path *path)
5866 {
5867         struct inode *inode;
5868 
5869         inode = btrfs_iget_locked(s, location, root);
5870         if (!inode)
5871                 return ERR_PTR(-ENOMEM);
5872 
5873         if (inode->i_state & I_NEW) {
5874                 int ret;
5875 
5876                 ret = btrfs_read_locked_inode(inode, path);
5877                 if (!ret) {
5878                         inode_tree_add(inode);
5879                         unlock_new_inode(inode);
5880                         if (new)
5881                                 *new = 1;
5882                 } else {
5883                         iget_failed(inode);
5884                         /*
5885                          * ret > 0 can come from btrfs_search_slot called by
5886                          * btrfs_read_locked_inode, this means the inode item
5887                          * was not found.
5888                          */
5889                         if (ret > 0)
5890                                 ret = -ENOENT;
5891                         inode = ERR_PTR(ret);
5892                 }
5893         }
5894 
5895         return inode;
5896 }
5897 
5898 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
5899                          struct btrfs_root *root, int *new)
5900 {
5901         return btrfs_iget_path(s, location, root, new, NULL);
5902 }
5903 
5904 static struct inode *new_simple_dir(struct super_block *s,
5905                                     struct btrfs_key *key,
5906                                     struct btrfs_root *root)
5907 {
5908         struct inode *inode = new_inode(s);
5909 
5910         if (!inode)
5911                 return ERR_PTR(-ENOMEM);
5912 
5913         BTRFS_I(inode)->root = root;
5914         memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
5915         set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
5916 
5917         inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
5918         inode->i_op = &btrfs_dir_ro_inode_operations;
5919         inode->i_opflags &= ~IOP_XATTR;
5920         inode->i_fop = &simple_dir_operations;
5921         inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
5922         inode->i_mtime = current_time(inode);
5923         inode->i_atime = inode->i_mtime;
5924         inode->i_ctime = inode->i_mtime;
5925         BTRFS_I(inode)->i_otime = inode->i_mtime;
5926 
5927         return inode;
5928 }
5929 
5930 static inline u8 btrfs_inode_type(struct inode *inode)
5931 {
5932         /*
5933          * Compile-time asserts that generic FT_* types still match
5934          * BTRFS_FT_* types
5935          */
5936         BUILD_BUG_ON(BTRFS_FT_UNKNOWN != FT_UNKNOWN);
5937         BUILD_BUG_ON(BTRFS_FT_REG_FILE != FT_REG_FILE);
5938         BUILD_BUG_ON(BTRFS_FT_DIR != FT_DIR);
5939         BUILD_BUG_ON(BTRFS_FT_CHRDEV != FT_CHRDEV);
5940         BUILD_BUG_ON(BTRFS_FT_BLKDEV != FT_BLKDEV);
5941         BUILD_BUG_ON(BTRFS_FT_FIFO != FT_FIFO);
5942         BUILD_BUG_ON(BTRFS_FT_SOCK != FT_SOCK);
5943         BUILD_BUG_ON(BTRFS_FT_SYMLINK != FT_SYMLINK);
5944 
5945         return fs_umode_to_ftype(inode->i_mode);
5946 }
5947 
5948 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
5949 {
5950         struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
5951         struct inode *inode;
5952         struct btrfs_root *root = BTRFS_I(dir)->root;
5953         struct btrfs_root *sub_root = root;
5954         struct btrfs_key location;
5955         u8 di_type = 0;
5956         int index;
5957         int ret = 0;
5958 
5959         if (dentry->d_name.len > BTRFS_NAME_LEN)
5960                 return ERR_PTR(-ENAMETOOLONG);
5961 
5962         ret = btrfs_inode_by_name(dir, dentry, &location, &di_type);
5963         if (ret < 0)
5964                 return ERR_PTR(ret);
5965 
5966         if (location.type == BTRFS_INODE_ITEM_KEY) {
5967                 inode = btrfs_iget(dir->i_sb, &location, root, NULL);
5968                 if (IS_ERR(inode))
5969                         return inode;
5970 
5971                 /* Do extra check against inode mode with di_type */
5972                 if (btrfs_inode_type(inode) != di_type) {
5973                         btrfs_crit(fs_info,
5974 "inode mode mismatch with dir: inode mode=0%o btrfs type=%u dir type=%u",
5975                                   inode->i_mode, btrfs_inode_type(inode),
5976                                   di_type);
5977                         iput(inode);
5978                         return ERR_PTR(-EUCLEAN);
5979                 }
5980                 return inode;
5981         }
5982 
5983         index = srcu_read_lock(&fs_info->subvol_srcu);
5984         ret = fixup_tree_root_location(fs_info, dir, dentry,
5985                                        &location, &sub_root);
5986         if (ret < 0) {
5987                 if (ret != -ENOENT)
5988                         inode = ERR_PTR(ret);
5989                 else
5990                         inode = new_simple_dir(dir->i_sb, &location, sub_root);
5991         } else {
5992                 inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL);
5993         }
5994         srcu_read_unlock(&fs_info->subvol_srcu, index);
5995 
5996         if (!IS_ERR(inode) && root != sub_root) {
5997                 down_read(&fs_info->cleanup_work_sem);
5998                 if (!sb_rdonly(inode->i_sb))
5999                         ret = btrfs_orphan_cleanup(sub_root);
6000                 up_read(&fs_info->cleanup_work_sem);
6001                 if (ret) {
6002                         iput(inode);
6003                         inode = ERR_PTR(ret);
6004                 }
6005         }
6006 
6007         return inode;
6008 }
6009 
6010 static int btrfs_dentry_delete(const struct dentry *dentry)
6011 {
6012         struct btrfs_root *root;
6013         struct inode *inode = d_inode(dentry);
6014 
6015         if (!inode && !IS_ROOT(dentry))
6016                 inode = d_inode(dentry->d_parent);
6017 
6018         if (inode) {
6019                 root = BTRFS_I(inode)->root;
6020                 if (btrfs_root_refs(&root->root_item) == 0)
6021                         return 1;
6022 
6023                 if (btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
6024                         return 1;
6025         }
6026         return 0;
6027 }
6028 
6029 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
6030                                    unsigned int flags)
6031 {
6032         struct inode *inode = btrfs_lookup_dentry(dir, dentry);
6033 
6034         if (inode == ERR_PTR(-ENOENT))
6035                 inode = NULL;
6036         return d_splice_alias(inode, dentry);
6037 }
6038 
6039 /*
6040  * All this infrastructure exists because dir_emit can fault, and we are holding
6041  * the tree lock when doing readdir.  For now just allocate a buffer and copy
6042  * our information into that, and then dir_emit from the buffer.  This is
6043  * similar to what NFS does, only we don't keep the buffer around in pagecache
6044  * because I'm afraid I'll mess that up.  Long term we need to make filldir do
6045  * copy_to_user_inatomic so we don't have to worry about page faulting under the
6046  * tree lock.
6047  */
6048 static int btrfs_opendir(struct inode *inode, struct file *file)
6049 {
6050         struct btrfs_file_private *private;
6051 
6052         private = kzalloc(sizeof(struct btrfs_file_private), GFP_KERNEL);
6053         if (!private)
6054                 return -ENOMEM;
6055         private->filldir_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
6056         if (!private->filldir_buf) {
6057                 kfree(private);
6058                 return -ENOMEM;
6059         }
6060         file->private_data = private;
6061         return 0;
6062 }
6063 
6064 struct dir_entry {
6065         u64 ino;
6066         u64 offset;
6067         unsigned type;
6068         int name_len;
6069 };
6070 
6071 static int btrfs_filldir(void *addr, int entries, struct dir_context *ctx)
6072 {
6073         while (entries--) {
6074                 struct dir_entry *entry = addr;
6075                 char *name = (char *)(entry + 1);
6076 
6077                 ctx->pos = get_unaligned(&entry->offset);
6078                 if (!dir_emit(ctx, name, get_unaligned(&entry->name_len),
6079                                          get_unaligned(&entry->ino),
6080                                          get_unaligned(&entry->type)))
6081                         return 1;
6082                 addr += sizeof(struct dir_entry) +
6083                         get_unaligned(&entry->name_len);
6084                 ctx->pos++;
6085         }
6086         return 0;
6087 }
6088 
6089 static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
6090 {
6091         struct inode *inode = file_inode(file);
6092         struct btrfs_root *root = BTRFS_I(inode)->root;
6093         struct btrfs_file_private *private = file->private_data;
6094         struct btrfs_dir_item *di;
6095         struct btrfs_key key;
6096         struct btrfs_key found_key;
6097         struct btrfs_path *path;
6098         void *addr;
6099         struct list_head ins_list;
6100         struct list_head del_list;
6101         int ret;
6102         struct extent_buffer *leaf;
6103         int slot;
6104         char *name_ptr;
6105         int name_len;
6106         int entries = 0;
6107         int total_len = 0;
6108         bool put = false;
6109         struct btrfs_key location;
6110 
6111         if (!dir_emit_dots(file, ctx))
6112                 return 0;
6113 
6114         path = btrfs_alloc_path();
6115         if (!path)
6116                 return -ENOMEM;
6117 
6118         addr = private->filldir_buf;
6119         path->reada = READA_FORWARD;
6120 
6121         INIT_LIST_HEAD(&ins_list);
6122         INIT_LIST_HEAD(&del_list);
6123         put = btrfs_readdir_get_delayed_items(inode, &ins_list, &del_list);
6124 
6125 again:
6126         key.type = BTRFS_DIR_INDEX_KEY;
6127         key.offset = ctx->pos;
6128         key.objectid = btrfs_ino(BTRFS_I(inode));
6129 
6130         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6131         if (ret < 0)
6132                 goto err;
6133 
6134         while (1) {
6135                 struct dir_entry *entry;
6136 
6137                 leaf = path->nodes[0];
6138                 slot = path->slots[0];
6139                 if (slot >= btrfs_header_nritems(leaf)) {
6140                         ret = btrfs_next_leaf(root, path);
6141                         if (ret < 0)
6142                                 goto err;
6143                         else if (ret > 0)
6144                                 break;
6145                         continue;
6146                 }
6147 
6148                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
6149 
6150                 if (found_key.objectid != key.objectid)
6151                         break;
6152                 if (found_key.type != BTRFS_DIR_INDEX_KEY)
6153                         break;
6154                 if (found_key.offset < ctx->pos)
6155                         goto next;
6156                 if (btrfs_should_delete_dir_index(&del_list, found_key.offset))
6157                         goto next;
6158                 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
6159                 name_len = btrfs_dir_name_len(leaf, di);
6160                 if ((total_len + sizeof(struct dir_entry) + name_len) >=
6161                     PAGE_SIZE) {
6162                         btrfs_release_path(path);
6163                         ret = btrfs_filldir(private->filldir_buf, entries, ctx);
6164                         if (ret)
6165                                 goto nopos;
6166                         addr = private->filldir_buf;
6167                         entries = 0;
6168                         total_len = 0;
6169                         goto again;
6170                 }
6171 
6172                 entry = addr;
6173                 put_unaligned(name_len, &entry->name_len);
6174                 name_ptr = (char *)(entry + 1);
6175                 read_extent_buffer(leaf, name_ptr, (unsigned long)(di + 1),
6176                                    name_len);
6177                 put_unaligned(fs_ftype_to_dtype(btrfs_dir_type(leaf, di)),
6178                                 &entry->type);
6179                 btrfs_dir_item_key_to_cpu(leaf, di, &location);
6180                 put_unaligned(location.objectid, &entry->ino);
6181                 put_unaligned(found_key.offset, &entry->offset);
6182                 entries++;
6183                 addr += sizeof(struct dir_entry) + name_len;
6184                 total_len += sizeof(struct dir_entry) + name_len;
6185 next:
6186                 path->slots[0]++;
6187         }
6188         btrfs_release_path(path);
6189 
6190         ret = btrfs_filldir(private->filldir_buf, entries, ctx);
6191         if (ret)
6192                 goto nopos;
6193 
6194         ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list);
6195         if (ret)
6196                 goto nopos;
6197 
6198         /*
6199          * Stop new entries from being returned after we return the last
6200          * entry.
6201          *
6202          * New directory entries are assigned a strictly increasing
6203          * offset.  This means that new entries created during readdir
6204          * are *guaranteed* to be seen in the future by that readdir.
6205          * This has broken buggy programs which operate on names as
6206          * they're returned by readdir.  Until we re-use freed offsets
6207          * we have this hack to stop new entries from being returned
6208          * under the assumption that they'll never reach this huge
6209          * offset.
6210          *
6211          * This is being careful not to overflow 32bit loff_t unless the
6212          * last entry requires it because doing so has broken 32bit apps
6213          * in the past.
6214          */
6215         if (ctx->pos >= INT_MAX)
6216                 ctx->pos = LLONG_MAX;
6217         else
6218                 ctx->pos = INT_MAX;
6219 nopos:
6220         ret = 0;
6221 err:
6222         if (put)
6223                 btrfs_readdir_put_delayed_items(inode, &ins_list, &del_list);
6224         btrfs_free_path(path);
6225         return ret;
6226 }
6227 
6228 /*
6229  * This is somewhat expensive, updating the tree every time the
6230  * inode changes.  But, it is most likely to find the inode in cache.
6231  * FIXME, needs more benchmarking...there are no reasons other than performance
6232  * to keep or drop this code.
6233  */
6234 static int btrfs_dirty_inode(struct inode *inode)
6235 {
6236         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
6237         struct btrfs_root *root = BTRFS_I(inode)->root;
6238         struct btrfs_trans_handle *trans;
6239         int ret;
6240 
6241         if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
6242                 return 0;
6243 
6244         trans = btrfs_join_transaction(root);
6245         if (IS_ERR(trans))
6246                 return PTR_ERR(trans);
6247 
6248         ret = btrfs_update_inode(trans, root, inode);
6249         if (ret && ret == -ENOSPC) {
6250                 /* whoops, lets try again with the full transaction */
6251                 btrfs_end_transaction(trans);
6252                 trans = btrfs_start_transaction(root, 1);
6253                 if (IS_ERR(trans))
6254                         return PTR_ERR(trans);
6255 
6256                 ret = btrfs_update_inode(trans, root, inode);
6257         }
6258         btrfs_end_transaction(trans);
6259         if (BTRFS_I(inode)->delayed_node)
6260                 btrfs_balance_delayed_items(fs_info);
6261 
6262         return ret;
6263 }
6264 
6265 /*
6266  * This is a copy of file_update_time.  We need this so we can return error on
6267  * ENOSPC for updating the inode in the case of file write and mmap writes.
6268  */
6269 static int btrfs_update_time(struct inode *inode, struct timespec64 *now,
6270                              int flags)
6271 {
6272         struct btrfs_root *root = BTRFS_I(inode)->root;
6273         bool dirty = flags & ~S_VERSION;
6274 
6275         if (btrfs_root_readonly(root))
6276                 return -EROFS;
6277 
6278         if (flags & S_VERSION)
6279                 dirty |= inode_maybe_inc_iversion(inode, dirty);
6280         if (flags & S_CTIME)
6281                 inode->i_ctime = *now;
6282         if (flags & S_MTIME)
6283                 inode->i_mtime = *now;
6284         if (flags & S_ATIME)
6285                 inode->i_atime = *now;
6286         return dirty ? btrfs_dirty_inode(inode) : 0;
6287 }
6288 
6289 /*
6290  * find the highest existing sequence number in a directory
6291  * and then set the in-memory index_cnt variable to reflect
6292  * free sequence numbers
6293  */
6294 static int btrfs_set_inode_index_count(struct btrfs_inode *inode)
6295 {
6296         struct btrfs_root *root = inode->root;
6297         struct btrfs_key key, found_key;
6298         struct btrfs_path *path;
6299         struct extent_buffer *leaf;
6300         int ret;
6301 
6302         key.objectid = btrfs_ino(inode);
6303         key.type = BTRFS_DIR_INDEX_KEY;
6304         key.offset = (u64)-1;
6305 
6306         path = btrfs_alloc_path();
6307         if (!path)
6308                 return -ENOMEM;
6309 
6310         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6311         if (ret < 0)
6312                 goto out;
6313         /* FIXME: we should be able to handle this */
6314         if (ret == 0)
6315                 goto out;
6316         ret = 0;
6317 
6318         /*
6319          * MAGIC NUMBER EXPLANATION:
6320          * since we search a directory based on f_pos we have to start at 2
6321          * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
6322          * else has to start at 2
6323          */
6324         if (path->slots[0] == 0) {
6325                 inode->index_cnt = 2;
6326                 goto out;
6327         }
6328 
6329         path->slots[0]--;
6330 
6331         leaf = path->nodes[0];
6332         btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6333 
6334         if (found_key.objectid != btrfs_ino(inode) ||
6335             found_key.type != BTRFS_DIR_INDEX_KEY) {
6336                 inode->index_cnt = 2;
6337                 goto out;
6338         }
6339 
6340         inode->index_cnt = found_key.offset + 1;
6341 out:
6342         btrfs_free_path(path);
6343         return ret;
6344 }
6345 
6346 /*
6347  * helper to find a free sequence number in a given directory.  This current
6348  * code is very simple, later versions will do smarter things in the btree
6349  */
6350 int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index)
6351 {
6352         int ret = 0;
6353 
6354         if (dir->index_cnt == (u64)-1) {
6355                 ret = btrfs_inode_delayed_dir_index_count(dir);
6356                 if (ret) {
6357                         ret = btrfs_set_inode_index_count(dir);
6358                         if (ret)
6359                                 return ret;
6360                 }
6361         }
6362 
6363         *index = dir->index_cnt;
6364         dir->index_cnt++;
6365 
6366         return ret;
6367 }
6368 
6369 static int btrfs_insert_inode_locked(struct inode *inode)
6370 {
6371         struct btrfs_iget_args args;
6372         args.location = &BTRFS_I(inode)->location;
6373         args.root = BTRFS_I(inode)->root;
6374 
6375         return insert_inode_locked4(inode,
6376                    btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root),
6377                    btrfs_find_actor, &args);
6378 }
6379 
6380 /*
6381  * Inherit flags from the parent inode.
6382  *
6383  * Currently only the compression flags and the cow flags are inherited.
6384  */
6385 static void btrfs_inherit_iflags(struct inode *inode, struct inode *dir)
6386 {
6387         unsigned int flags;
6388 
6389         if (!dir)
6390                 return;
6391 
6392         flags = BTRFS_I(dir)->flags;
6393 
6394         if (flags & BTRFS_INODE_NOCOMPRESS) {
6395                 BTRFS_I(inode)->flags &= ~BTRFS_INODE_COMPRESS;
6396                 BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
6397         } else if (flags & BTRFS_INODE_COMPRESS) {
6398                 BTRFS_I(inode)->flags &= ~BTRFS_INODE_NOCOMPRESS;
6399                 BTRFS_I(inode)->flags |= BTRFS_INODE_COMPRESS;
6400         }
6401 
6402         if (flags & BTRFS_INODE_NODATACOW) {
6403                 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
6404                 if (S_ISREG(inode->i_mode))
6405                         BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
6406         }
6407 
6408         btrfs_sync_inode_flags_to_i_flags(inode);
6409 }
6410 
6411 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
6412                                      struct btrfs_root *root,
6413                                      struct inode *dir,
6414                                      const char *name, int name_len,
6415                                      u64 ref_objectid, u64 objectid,
6416                                      umode_t mode, u64 *index)
6417 {
6418         struct btrfs_fs_info *fs_info = root->fs_info;
6419         struct inode *inode;
6420         struct btrfs_inode_item *inode_item;
6421         struct btrfs_key *location;
6422         struct btrfs_path *path;
6423         struct btrfs_inode_ref *ref;
6424         struct btrfs_key key[2];
6425         u32 sizes[2];
6426         int nitems = name ? 2 : 1;
6427         unsigned long ptr;
6428         unsigned int nofs_flag;
6429         int ret;
6430 
6431         path = btrfs_alloc_path();
6432         if (!path)
6433                 return ERR_PTR(-ENOMEM);
6434 
6435         nofs_flag = memalloc_nofs_save();
6436         inode = new_inode(fs_info->sb);
6437         memalloc_nofs_restore(nofs_flag);
6438         if (!inode) {
6439                 btrfs_free_path(path);
6440                 return ERR_PTR(-ENOMEM);
6441         }
6442 
6443         /*
6444          * O_TMPFILE, set link count to 0, so that after this point,
6445          * we fill in an inode item with the correct link count.
6446          */
6447         if (!name)
6448                 set_nlink(inode, 0);
6449 
6450         /*
6451          * we have to initialize this early, so we can reclaim the inode
6452          * number if we fail afterwards in this function.
6453          */
6454         inode->i_ino = objectid;
6455 
6456         if (dir && name) {
6457                 trace_btrfs_inode_request(dir);
6458 
6459                 ret = btrfs_set_inode_index(BTRFS_I(dir), index);
6460                 if (ret) {
6461                         btrfs_free_path(path);
6462                         iput(inode);
6463                         return ERR_PTR(ret);
6464                 }
6465         } else if (dir) {
6466                 *index = 0;
6467         }
6468         /*
6469          * index_cnt is ignored for everything but a dir,
6470          * btrfs_set_inode_index_count has an explanation for the magic
6471          * number
6472          */
6473         BTRFS_I(inode)->index_cnt = 2;
6474         BTRFS_I(inode)->dir_index = *index;
6475         BTRFS_I(inode)->root = root;
6476         BTRFS_I(inode)->generation = trans->transid;
6477         inode->i_generation = BTRFS_I(inode)->generation;
6478 
6479         /*
6480          * We could have gotten an inode number from somebody who was fsynced
6481          * and then removed in this same transaction, so let's just set full
6482          * sync since it will be a full sync anyway and this will blow away the
6483          * old info in the log.
6484          */
6485         set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
6486 
6487         key[0].objectid = objectid;
6488         key[0].type = BTRFS_INODE_ITEM_KEY;
6489         key[0].offset = 0;
6490 
6491         sizes[0] = sizeof(struct btrfs_inode_item);
6492 
6493         if (name) {
6494                 /*
6495                  * Start new inodes with an inode_ref. This is slightly more
6496                  * efficient for small numbers of hard links since they will
6497                  * be packed into one item. Extended refs will kick in if we
6498                  * add more hard links than can fit in the ref item.
6499                  */
6500                 key[1].objectid = objectid;
6501                 key[1].type = BTRFS_INODE_REF_KEY;
6502                 key[1].offset = ref_objectid;
6503 
6504                 sizes[1] = name_len + sizeof(*ref);
6505         }
6506 
6507         location = &BTRFS_I(inode)->location;
6508         location->objectid = objectid;
6509         location->offset = 0;
6510         location->type = BTRFS_INODE_ITEM_KEY;
6511 
6512         ret = btrfs_insert_inode_locked(inode);
6513         if (ret < 0) {
6514                 iput(inode);
6515                 goto fail;
6516         }
6517 
6518         path->leave_spinning = 1;
6519         ret = btrfs_insert_empty_items(trans, root, path, key, sizes, nitems);
6520         if (ret != 0)
6521                 goto fail_unlock;
6522 
6523         inode_init_owner(inode, dir, mode);
6524         inode_set_bytes(inode, 0);
6525 
6526         inode->i_mtime = current_time(inode);
6527         inode->i_atime = inode->i_mtime;
6528         inode->i_ctime = inode->i_mtime;
6529         BTRFS_I(inode)->i_otime = inode->i_mtime;
6530 
6531         inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
6532                                   struct btrfs_inode_item);
6533         memzero_extent_buffer(path->nodes[0], (unsigned long)inode_item,
6534                              sizeof(*inode_item));
6535         fill_inode_item(trans, path->nodes[0], inode_item, inode);
6536 
6537         if (name) {
6538                 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
6539                                      struct btrfs_inode_ref);
6540                 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
6541                 btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
6542                 ptr = (unsigned long)(ref + 1);
6543                 write_extent_buffer(path->nodes[0], name, ptr, name_len);
6544         }
6545 
6546         btrfs_mark_buffer_dirty(path->nodes[0]);
6547         btrfs_free_path(path);
6548 
6549         btrfs_inherit_iflags(inode, dir);
6550 
6551         if (S_ISREG(mode)) {
6552                 if (btrfs_test_opt(fs_info, NODATASUM))
6553                         BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
6554                 if (btrfs_test_opt(fs_info, NODATACOW))
6555                         BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW |
6556                                 BTRFS_INODE_NODATASUM;
6557         }
6558 
6559         inode_tree_add(inode);
6560 
6561         trace_btrfs_inode_new(inode);
6562         btrfs_set_inode_last_trans(trans, inode);
6563 
6564         btrfs_update_root_times(trans, root);
6565 
6566         ret = btrfs_inode_inherit_props(trans, inode, dir);
6567         if (ret)
6568                 btrfs_err(fs_info,
6569                           "error inheriting props for ino %llu (root %llu): %d",
6570                         btrfs_ino(BTRFS_I(inode)), root->root_key.objectid, ret);
6571 
6572         return inode;
6573 
6574 fail_unlock:
6575         discard_new_inode(inode);
6576 fail:
6577         if (dir && name)
6578                 BTRFS_I(dir)->index_cnt--;
6579         btrfs_free_path(path);
6580         return ERR_PTR(ret);
6581 }
6582 
6583 /*
6584  * utility function to add 'inode' into 'parent_inode' with
6585  * a give name and a given sequence number.
6586  * if 'add_backref' is true, also insert a backref from the
6587  * inode to the parent directory.
6588  */
6589 int btrfs_add_link(struct btrfs_trans_handle *trans,
6590                    struct btrfs_inode *parent_inode, struct btrfs_inode *inode,
6591                    const char *name, int name_len, int add_backref, u64 index)
6592 {
6593         int ret = 0;
6594         struct btrfs_key key;
6595         struct btrfs_root *root = parent_inode->root;
6596         u64 ino = btrfs_ino(inode);
6597         u64 parent_ino = btrfs_ino(parent_inode);
6598 
6599         if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6600                 memcpy(&key, &inode->root->root_key, sizeof(key));
6601         } else {
6602                 key.objectid = ino;
6603                 key.type = BTRFS_INODE_ITEM_KEY;
6604                 key.offset = 0;
6605         }
6606 
6607         if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6608                 ret = btrfs_add_root_ref(trans, key.objectid,
6609                                          root->root_key.objectid, parent_ino,
6610                                          index, name, name_len);
6611         } else if (add_backref) {
6612                 ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino,
6613                                              parent_ino, index);
6614         }
6615 
6616         /* Nothing to clean up yet */
6617         if (ret)
6618                 return ret;
6619 
6620         ret = btrfs_insert_dir_item(trans, name, name_len, parent_inode, &key,
6621                                     btrfs_inode_type(&inode->vfs_inode), index);
6622         if (ret == -EEXIST || ret == -EOVERFLOW)
6623                 goto fail_dir_item;
6624         else if (ret) {
6625                 btrfs_abort_transaction(trans, ret);
6626                 return ret;
6627         }
6628 
6629         btrfs_i_size_write(parent_inode, parent_inode->vfs_inode.i_size +
6630                            name_len * 2);
6631         inode_inc_iversion(&parent_inode->vfs_inode);
6632         /*
6633          * If we are replaying a log tree, we do not want to update the mtime
6634          * and ctime of the parent directory with the current time, since the
6635          * log replay procedure is responsible for setting them to their correct
6636          * values (the ones it had when the fsync was done).
6637          */
6638         if (!test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags)) {
6639                 struct timespec64 now = current_time(&parent_inode->vfs_inode);
6640 
6641                 parent_inode->vfs_inode.i_mtime = now;
6642                 parent_inode->vfs_inode.i_ctime = now;
6643         }
6644         ret = btrfs_update_inode(trans, root, &parent_inode->vfs_inode);
6645         if (ret)
6646                 btrfs_abort_transaction(trans, ret);
6647         return ret;
6648 
6649 fail_dir_item:
6650         if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6651                 u64 local_index;
6652                 int err;
6653                 err = btrfs_del_root_ref(trans, key.objectid,
6654                                          root->root_key.objectid, parent_ino,
6655                                          &local_index, name, name_len);
6656                 if (err)
6657                         btrfs_abort_transaction(trans, err);
6658         } else if (add_backref) {
6659                 u64 local_index;
6660                 int err;
6661 
6662                 err = btrfs_del_inode_ref(trans, root, name, name_len,
6663                                           ino, parent_ino, &local_index);
6664                 if (err)
6665                         btrfs_abort_transaction(trans, err);
6666         }
6667 
6668         /* Return the original error code */
6669         return ret;
6670 }
6671 
6672 static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
6673                             struct btrfs_inode *dir, struct dentry *dentry,
6674                             struct btrfs_inode *inode, int backref, u64 index)
6675 {
6676         int err = btrfs_add_link(trans, dir, inode,
6677                                  dentry->d_name.name, dentry->d_name.len,
6678                                  backref, index);
6679         if (err > 0)
6680                 err = -EEXIST;
6681         return err;
6682 }
6683 
6684 static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
6685                         umode_t mode, dev_t rdev)
6686 {
6687         struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
6688         struct btrfs_trans_handle *trans;
6689         struct btrfs_root *root = BTRFS_I(dir)->root;
6690         struct inode *inode = NULL;
6691         int err;
6692         u64 objectid;
6693         u64 index = 0;
6694 
6695         /*
6696          * 2 for inode item and ref
6697          * 2 for dir items
6698          * 1 for xattr if selinux is on
6699          */
6700         trans = btrfs_start_transaction(root, 5);
6701         if (IS_ERR(trans))
6702                 return PTR_ERR(trans);
6703 
6704         err = btrfs_find_free_ino(root, &objectid);
6705         if (err)
6706                 goto out_unlock;
6707 
6708         inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
6709                         dentry->d_name.len, btrfs_ino(BTRFS_I(dir)), objectid,
6710                         mode, &index);
6711         if (IS_ERR(inode)) {
6712                 err = PTR_ERR(inode);
6713                 inode = NULL;
6714                 goto out_unlock;
6715         }
6716 
6717         /*
6718         * If the active LSM wants to access the inode during
6719         * d_instantiate it needs these. Smack checks to see
6720         * if the filesystem supports xattrs by looking at the
6721         * ops vector.
6722         */
6723         inode->i_op = &btrfs_special_inode_operations;
6724         init_special_inode(inode, inode->i_mode, rdev);
6725 
6726         err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
6727         if (err)
6728                 goto out_unlock;
6729 
6730         err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, BTRFS_I(inode),
6731                         0, index);
6732         if (err)
6733                 goto out_unlock;
6734 
6735         btrfs_update_inode(trans, root, inode);
6736         d_instantiate_new(dentry, inode);
6737 
6738 out_unlock:
6739         btrfs_end_transaction(trans);
6740         btrfs_btree_balance_dirty(fs_info);
6741         if (err && inode) {
6742                 inode_dec_link_count(inode);
6743                 discard_new_inode(inode);
6744         }
6745         return err;
6746 }
6747 
6748 static int btrfs_create(struct inode *dir, struct dentry *dentry,
6749                         umode_t mode, bool excl)
6750 {
6751         struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
6752         struct btrfs_trans_handle *trans;
6753         struct btrfs_root *root = BTRFS_I(dir)->root;
6754         struct inode *inode = NULL;
6755         int err;
6756         u64 objectid;
6757         u64 index = 0;
6758 
6759         /*
6760          * 2 for inode item and ref
6761          * 2 for dir items
6762          * 1 for xattr if selinux is on
6763          */
6764         trans = btrfs_start_transaction(root, 5);
6765         if (IS_ERR(trans))
6766                 return PTR_ERR(trans);
6767 
6768         err = btrfs_find_free_ino(root, &objectid);
6769         if (err)
6770                 goto out_unlock;
6771 
6772         inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
6773                         dentry->d_name.len, btrfs_ino(BTRFS_I(dir)), objectid,
6774                         mode, &index);
6775         if (IS_ERR(inode)) {
6776                 err = PTR_ERR(inode);
6777                 inode = NULL;
6778                 goto out_unlock;
6779         }
6780         /*
6781         * If the active LSM wants to access the inode during
6782         * d_instantiate it needs these. Smack checks to see
6783         * if the filesystem supports xattrs by looking at the
6784         * ops vector.
6785         */
6786         inode->i_fop = &btrfs_file_operations;
6787         inode->i_op = &btrfs_file_inode_operations;
6788         inode->i_mapping->a_ops = &btrfs_aops;
6789 
6790         err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
6791         if (err)
6792                 goto out_unlock;
6793 
6794         err = btrfs_update_inode(trans, root, inode);
6795         if (err)
6796                 goto out_unlock;
6797 
6798         err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, BTRFS_I(inode),
6799                         0, index);
6800         if (err)
6801                 goto out_unlock;
6802 
6803         BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
6804         d_instantiate_new(dentry, inode);
6805 
6806 out_unlock:
6807         btrfs_end_transaction(trans);
6808         if (err && inode) {
6809                 inode_dec_link_count(inode);
6810                 discard_new_inode(inode);
6811         }
6812         btrfs_btree_balance_dirty(fs_info);
6813         return err;
6814 }
6815 
6816 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
6817                       struct dentry *dentry)
6818 {
6819         struct btrfs_trans_handle *trans = NULL;
6820         struct btrfs_root *root = BTRFS_I(dir)->root;
6821         struct inode *inode = d_inode(old_dentry);
6822         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
6823         u64 index;
6824         int err;
6825         int drop_inode = 0;
6826 
6827         /* do not allow sys_link's with other subvols of the same device */
6828         if (root->root_key.objectid != BTRFS_I(inode)->root->root_key.objectid)
6829                 return -EXDEV;
6830 
6831         if (inode->i_nlink >= BTRFS_LINK_MAX)
6832                 return -EMLINK;
6833 
6834         err = btrfs_set_inode_index(BTRFS_I(dir), &index);
6835         if (err)
6836                 goto fail;
6837 
6838         /*
6839          * 2 items for inode and inode ref
6840          * 2 items for dir items
6841          * 1 item for parent inode
6842          * 1 item for orphan item deletion if O_TMPFILE
6843          */
6844         trans = btrfs_start_transaction(root, inode->i_nlink ? 5 : 6);
6845         if (IS_ERR(trans)) {
6846                 err = PTR_ERR(trans);
6847                 trans = NULL;
6848                 goto fail;
6849         }
6850 
6851         /* There are several dir indexes for this inode, clear the cache. */
6852         BTRFS_I(inode)->dir_index = 0ULL;
6853         inc_nlink(inode);
6854         inode_inc_iversion(inode);
6855         inode->i_ctime = current_time(inode);
6856         ihold(inode);
6857         set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags);
6858 
6859         err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, BTRFS_I(inode),
6860                         1, index);
6861 
6862         if (err) {
6863                 drop_inode = 1;
6864         } else {
6865                 struct dentry *parent = dentry->d_parent;
6866                 int ret;
6867 
6868                 err = btrfs_update_inode(trans, root, inode);
6869                 if (err)
6870                         goto fail;
6871                 if (inode->i_nlink == 1) {
6872                         /*
6873                          * If new hard link count is 1, it's a file created
6874                          * with open(2) O_TMPFILE flag.
6875                          */
6876                         err = btrfs_orphan_del(trans, BTRFS_I(inode));
6877                         if (err)
6878                                 goto fail;
6879                 }
6880                 d_instantiate(dentry, inode);
6881                 ret = btrfs_log_new_name(trans, BTRFS_I(inode), NULL, parent,
6882                                          true, NULL);
6883                 if (ret == BTRFS_NEED_TRANS_COMMIT) {
6884                         err = btrfs_commit_transaction(trans);
6885                         trans = NULL;
6886                 }
6887         }
6888 
6889 fail:
6890         if (trans)
6891                 btrfs_end_transaction(trans);
6892         if (drop_inode) {
6893                 inode_dec_link_count(inode);
6894                 iput(inode);
6895         }
6896         btrfs_btree_balance_dirty(fs_info);
6897         return err;
6898 }
6899 
6900 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
6901 {
6902         struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
6903         struct inode *inode = NULL;
6904         struct btrfs_trans_handle *trans;
6905         struct btrfs_root *root = BTRFS_I(dir)->root;
6906         int err = 0;
6907         u64 objectid = 0;
6908         u64 index = 0;
6909 
6910         /*
6911          * 2 items for inode and ref
6912          * 2 items for dir items
6913          * 1 for xattr if selinux is on
6914          */
6915         trans = btrfs_start_transaction(root, 5);
6916         if (IS_ERR(trans))
6917                 return PTR_ERR(trans);
6918 
6919         err = btrfs_find_free_ino(root, &objectid);
6920         if (err)
6921                 goto out_fail;
6922 
6923         inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
6924                         dentry->d_name.len, btrfs_ino(BTRFS_I(dir)), objectid,
6925                         S_IFDIR | mode, &index);
6926         if (IS_ERR(inode)) {
6927                 err = PTR_ERR(inode);
6928                 inode = NULL;
6929                 goto out_fail;
6930         }
6931 
6932         /* these must be set before we unlock the inode */
6933         inode->i_op = &btrfs_dir_inode_operations;
6934         inode->i_fop = &btrfs_dir_file_operations;
6935 
6936         err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
6937         if (err)
6938                 goto out_fail;
6939 
6940         btrfs_i_size_write(BTRFS_I(inode), 0);
6941         err = btrfs_update_inode(trans, root, inode);
6942         if (err)
6943                 goto out_fail;
6944 
6945         err = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),
6946                         dentry->d_name.name,
6947                         dentry->d_name.len, 0, index);
6948         if (err)
6949                 goto out_fail;
6950 
6951         d_instantiate_new(dentry, inode);
6952 
6953 out_fail:
6954         btrfs_end_transaction(trans);
6955         if (err && inode) {
6956                 inode_dec_link_count(inode);
6957                 discard_new_inode(inode);
6958         }
6959         btrfs_btree_balance_dirty(fs_info);
6960         return err;
6961 }
6962 
6963 static noinline int uncompress_inline(struct btrfs_path *path,
6964                                       struct page *page,
6965                                       size_t pg_offset, u64 extent_offset,
6966                                       struct btrfs_file_extent_item *item)
6967 {
6968         int ret;
6969         struct extent_buffer *leaf = path->nodes[0];
6970         char *tmp;
6971         size_t max_size;
6972         unsigned long inline_size;
6973         unsigned long ptr;
6974         int compress_type;
6975 
6976         WARN_ON(pg_offset != 0);
6977         compress_type = btrfs_file_extent_compression(leaf, item);
6978         max_size = btrfs_file_extent_ram_bytes(leaf, item);
6979         inline_size = btrfs_file_extent_inline_item_len(leaf,
6980                                         btrfs_item_nr(path->slots[0]));
6981         tmp = kmalloc(inline_size, GFP_NOFS);
6982         if (!tmp)
6983                 return -ENOMEM;
6984         ptr = btrfs_file_extent_inline_start(item);
6985 
6986         read_extent_buffer(leaf, tmp, ptr, inline_size);
6987 
6988         max_size = min_t(unsigned long, PAGE_SIZE, max_size);
6989         ret = btrfs_decompress(compress_type, tmp, page,
6990                                extent_offset, inline_size, max_size);
6991 
6992         /*
6993          * decompression code contains a memset to fill in any space between the end
6994          * of the uncompressed data and the end of max_size in case the decompressed
6995          * data ends up shorter than ram_bytes.  That doesn't cover the hole between
6996          * the end of an inline extent and the beginning of the next block, so we
6997          * cover that region here.
6998          */
6999 
7000         if (max_size + pg_offset < PAGE_SIZE) {
7001                 char *map = kmap(page);
7002                 memset(map + pg_offset + max_size, 0, PAGE_SIZE - max_size - pg_offset);
7003                 kunmap(page);
7004         }
7005         kfree(tmp);
7006         return ret;
7007 }
7008 
7009 /*
7010  * a bit scary, this does extent mapping from logical file offset to the disk.
7011  * the ugly parts come from merging extents from the disk with the in-ram
7012  * representation.  This gets more complex because of the data=ordered code,
7013  * where the in-ram extents might be locked pending data=ordered completion.
7014  *
7015  * This also copies inline extents directly into the page.
7016  */
7017 struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
7018                                     struct page *page,
7019                                     size_t pg_offset, u64 start, u64 len,
7020                                     int create)
7021 {
7022         struct btrfs_fs_info *fs_info = inode->root->fs_info;
7023         int ret;
7024         int err = 0;
7025         u64 extent_start = 0;
7026         u64 extent_end = 0;
7027         u64 objectid = btrfs_ino(inode);
7028         int extent_type = -1;
7029         struct btrfs_path *path = NULL;
7030         struct btrfs_root *root = inode->root;
7031         struct btrfs_file_extent_item *item;
7032         struct extent_buffer *leaf;
7033         struct btrfs_key found_key;
7034         struct extent_map *em = NULL;
7035         struct extent_map_tree *em_tree = &inode->extent_tree;
7036         struct extent_io_tree *io_tree = &inode->io_tree;
7037         const bool new_inline = !page || create;
7038 
7039         read_lock(&em_tree->lock);
7040         em = lookup_extent_mapping(em_tree, start, len);
7041         if (em)
7042                 em->bdev = fs_info->fs_devices->latest_bdev;
7043         read_unlock(&em_tree->lock);
7044 
7045         if (em) {
7046                 if (em->start > start || em->start + em->len <= start)
7047                         free_extent_map(em);
7048                 else if (em->block_start == EXTENT_MAP_INLINE && page)
7049                         free_extent_map(em);
7050                 else
7051                         goto out;
7052         }
7053         em = alloc_extent_map();
7054         if (!em) {
7055                 err = -ENOMEM;
7056                 goto out;
7057         }
7058         em->bdev = fs_info->fs_devices->latest_bdev;
7059         em->start = EXTENT_MAP_HOLE;
7060         em->orig_start = EXTENT_MAP_HOLE;
7061         em->len = (u64)-1;
7062         em->block_len = (u64)-1;
7063 
7064         path = btrfs_alloc_path();
7065         if (!path) {
7066                 err = -ENOMEM;
7067                 goto out;
7068         }
7069 
7070         /* Chances are we'll be called again, so go ahead and do readahead */
7071         path->reada = READA_FORWARD;
7072 
7073         /*
7074          * Unless we're going to uncompress the inline extent, no sleep would
7075          * happen.
7076          */
7077         path->leave_spinning = 1;
7078 
7079         ret = btrfs_lookup_file_extent(NULL, root, path, objectid, start, 0);
7080         if (ret < 0) {
7081                 err = ret;
7082                 goto out;
7083         } else if (ret > 0) {
7084                 if (path->slots[0] == 0)
7085                         goto not_found;
7086                 path->slots[0]--;
7087         }
7088 
7089         leaf = path->nodes[0];
7090         item = btrfs_item_ptr(leaf, path->slots[0],
7091                               struct btrfs_file_extent_item);
7092         btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
7093         if (found_key.objectid != objectid ||
7094             found_key.type != BTRFS_EXTENT_DATA_KEY) {
7095                 /*
7096                  * If we backup past the first extent we want to move forward
7097                  * and see if there is an extent in front of us, otherwise we'll
7098                  * say there is a hole for our whole search range which can
7099                  * cause problems.
7100                  */
7101                 extent_end = start;
7102                 goto next;
7103         }
7104 
7105         extent_type = btrfs_file_extent_type(leaf, item);
7106         extent_start = found_key.offset;
7107         if (extent_type == BTRFS_FILE_EXTENT_REG ||
7108             extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
7109                 /* Only regular file could have regular/prealloc extent */
7110                 if (!S_ISREG(inode->vfs_inode.i_mode)) {
7111                         ret = -EUCLEAN;
7112                         btrfs_crit(fs_info,
7113                 "regular/prealloc extent found for non-regular inode %llu",
7114                                    btrfs_ino(inode));
7115                         goto out;
7116                 }
7117                 extent_end = extent_start +
7118                        btrfs_file_extent_num_bytes(leaf, item);
7119 
7120                 trace_btrfs_get_extent_show_fi_regular(inode, leaf, item,
7121                                                        extent_start);
7122         } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
7123                 size_t size;
7124 
7125                 size = btrfs_file_extent_ram_bytes(leaf, item);
7126                 extent_end = ALIGN(extent_start + size,
7127                                    fs_info->sectorsize);
7128 
7129                 trace_btrfs_get_extent_show_fi_inline(inode, leaf, item,
7130                                                       path->slots[0],
7131                                                       extent_start);
7132         }
7133 next:
7134         if (start >= extent_end) {
7135                 path->slots[0]++;
7136                 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
7137                         ret = btrfs_next_leaf(root, path);
7138                         if (ret < 0) {
7139                                 err = ret;
7140                                 goto out;
7141                         } else if (ret > 0) {
7142                                 goto not_found;
7143                         }
7144                         leaf = path->nodes[0];
7145                 }
7146                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
7147                 if (found_key.objectid != objectid ||
7148                     found_key.type != BTRFS_EXTENT_DATA_KEY)
7149                         goto not_found;
7150                 if (start + len <= found_key.offset)
7151                         goto not_found;
7152                 if (start > found_key.offset)
7153                         goto next;
7154 
7155                 /* New extent overlaps with existing one */
7156                 em->start = start;
7157                 em->orig_start = start;
7158                 em->len = found_key.offset - start;
7159                 em->block_start = EXTENT_MAP_HOLE;
7160                 goto insert;
7161         }
7162 
7163         btrfs_extent_item_to_extent_map(inode, path, item,
7164                         new_inline, em);
7165 
7166         if (extent_type == BTRFS_FILE_EXTENT_REG ||
7167             extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
7168                 goto insert;
7169         } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
7170                 unsigned long ptr;
7171                 char *map;
7172                 size_t size;
7173                 size_t extent_offset;
7174                 size_t copy_size;
7175 
7176                 if (new_inline)
7177                         goto out;
7178 
7179                 size = btrfs_file_extent_ram_bytes(leaf, item);
7180                 extent_offset = page_offset(page) + pg_offset - extent_start;
7181                 copy_size = min_t(u64, PAGE_SIZE - pg_offset,
7182                                   size - extent_offset);
7183                 em->start = extent_start + extent_offset;
7184                 em->len = ALIGN(copy_size, fs_info->sectorsize);
7185                 em->orig_block_len = em->len;
7186                 em->orig_start = em->start;
7187                 ptr = btrfs_file_extent_inline_start(item) + extent_offset;
7188 
7189                 btrfs_set_path_blocking(path);
7190                 if (!PageUptodate(page)) {
7191                         if (btrfs_file_extent_compression(leaf, item) !=
7192                             BTRFS_COMPRESS_NONE) {
7193                                 ret = uncompress_inline(path, page, pg_offset,
7194                                                         extent_offset, item);
7195                                 if (ret) {
7196                                         err = ret;
7197                                         goto out;
7198                                 }
7199                         } else {
7200                                 map = kmap(page);
7201                                 read_extent_buffer(leaf, map + pg_offset, ptr,
7202                                                    copy_size);
7203                                 if (pg_offset + copy_size < PAGE_SIZE) {
7204                                         memset(map + pg_offset + copy_size, 0,
7205                                                PAGE_SIZE - pg_offset -
7206                                                copy_size);
7207                                 }
7208                                 kunmap(page);
7209                         }
7210                         flush_dcache_page(page);
7211                 }
7212                 set_extent_uptodate(io_tree, em->start,
7213                                     extent_map_end(em) - 1, NULL, GFP_NOFS);
7214                 goto insert;
7215         }
7216 not_found:
7217         em->start = start;
7218         em->orig_start = start;
7219         em->len = len;
7220         em->block_start = EXTENT_MAP_HOLE;
7221 insert:
7222         btrfs_release_path(path);
7223         if (em->start > start || extent_map_end(em) <= start) {
7224                 btrfs_err(fs_info,
7225                           "bad extent! em: [%llu %llu] passed [%llu %llu]",
7226                           em->start, em->len, start, len);
7227                 err = -EIO;
7228                 goto out;
7229         }
7230 
7231         err = 0;
7232         write_lock(&em_tree->lock);
7233         err = btrfs_add_extent_mapping(fs_info, em_tree, &em, start, len);
7234         write_unlock(&em_tree->lock);
7235 out:
7236         btrfs_free_path(path);
7237 
7238         trace_btrfs_get_extent(root, inode, em);
7239 
7240         if (err) {
7241                 free_extent_map(em);
7242                 return ERR_PTR(err);
7243         }
7244         BUG_ON(!em); /* Error is always set */
7245         return em;
7246 }
7247 
7248 struct extent_map *btrfs_get_extent_fiemap(struct btrfs_inode *inode,
7249                                            u64 start, u64 len)
7250 {
7251         struct extent_map *em;
7252         struct extent_map *hole_em = NULL;
7253         u64 delalloc_start = start;
7254         u64 end;
7255         u64 delalloc_len;
7256         u64 delalloc_end;
7257         int err = 0;
7258 
7259         em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
7260         if (IS_ERR(em))
7261                 return em;
7262         /*
7263          * If our em maps to:
7264          * - a hole or
7265          * - a pre-alloc extent,
7266          * there might actually be delalloc bytes behind it.
7267          */
7268         if (em->block_start != EXTENT_MAP_HOLE &&
7269             !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
7270                 return em;
7271         else
7272                 hole_em = em;
7273 
7274         /* check to see if we've wrapped (len == -1 or similar) */
7275         end = start + len;
7276         if (end < start)
7277                 end = (u64)-1;
7278         else
7279                 end -= 1;
7280 
7281         em = NULL;
7282 
7283         /* ok, we didn't find anything, lets look for delalloc */
7284         delalloc_len = count_range_bits(&inode->io_tree, &delalloc_start,
7285                                  end, len, EXTENT_DELALLOC, 1);
7286         delalloc_end = delalloc_start + delalloc_len;
7287         if (delalloc_end < delalloc_start)
7288                 delalloc_end = (u64)-1;
7289 
7290         /*
7291          * We didn't find anything useful, return the original results from
7292          * get_extent()
7293          */
7294         if (delalloc_start > end || delalloc_end <= start) {
7295                 em = hole_em;
7296                 hole_em = NULL;
7297                 goto out;
7298         }
7299 
7300         /*
7301          * Adjust the delalloc_start to make sure it doesn't go backwards from
7302          * the start they passed in
7303          */
7304         delalloc_start = max(start, delalloc_start);
7305         delalloc_len = delalloc_end - delalloc_start;
7306 
7307         if (delalloc_len > 0) {
7308                 u64 hole_start;
7309                 u64 hole_len;
7310                 const u64 hole_end = extent_map_end(hole_em);
7311 
7312                 em = alloc_extent_map();
7313                 if (!em) {
7314                         err = -ENOMEM;
7315                         goto out;
7316                 }
7317                 em->bdev = NULL;
7318 
7319                 ASSERT(hole_em);
7320                 /*
7321                  * When btrfs_get_extent can't find anything it returns one
7322                  * huge hole
7323                  *
7324                  * Make sure what it found really fits our range, and adjust to
7325                  * make sure it is based on the start from the caller
7326                  */
7327                 if (hole_end <= start || hole_em->start > end) {
7328                        free_extent_map(hole_em);
7329                        hole_em = NULL;
7330                 } else {
7331                        hole_start = max(hole_em->start, start);
7332                        hole_len = hole_end - hole_start;
7333                 }
7334 
7335                 if (hole_em && delalloc_start > hole_start) {
7336                         /*
7337                          * Our hole starts before our delalloc, so we have to
7338                          * return just the parts of the hole that go until the
7339                          * delalloc starts
7340                          */
7341                         em->len = min(hole_len, delalloc_start - hole_start);
7342                         em->start = hole_start;
7343                         em->orig_start = hole_start;
7344                         /*
7345                          * Don't adjust block start at all, it is fixed at
7346                          * EXTENT_MAP_HOLE
7347                          */
7348                         em->block_start = hole_em->block_start;
7349                         em->block_len = hole_len;
7350                         if (test_bit(EXTENT_FLAG_PREALLOC, &hole_em->flags))
7351                                 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
7352                 } else {
7353                         /*
7354                          * Hole is out of passed range or it starts after
7355                          * delalloc range
7356                          */
7357                         em->start = delalloc_start;
7358                         em->len = delalloc_len;
7359                         em->orig_start = delalloc_start;
7360                         em->block_start = EXTENT_MAP_DELALLOC;
7361                         em->block_len = delalloc_len;
7362                 }
7363         } else {
7364                 return hole_em;
7365         }
7366 out:
7367 
7368         free_extent_map(hole_em);
7369         if (err) {
7370                 free_extent_map(em);
7371                 return ERR_PTR(err);
7372         }
7373         return em;
7374 }
7375 
7376 static struct extent_map *btrfs_create_dio_extent(struct inode *inode,
7377                                                   const u64 start,
7378                                                   const u64 len,
7379                                                   const u64 orig_start,
7380                                                   const u64 block_start,
7381                                                   const u64 block_len,
7382                                                   const u64 orig_block_len,
7383                                                   const u64 ram_bytes,
7384                                                   const int type)
7385 {
7386         struct extent_map *em = NULL;
7387         int ret;
7388 
7389         if (type != BTRFS_ORDERED_NOCOW) {
7390                 em = create_io_em(inode, start, len, orig_start,
7391                                   block_start, block_len, orig_block_len,
7392                                   ram_bytes,
7393                                   BTRFS_COMPRESS_NONE, /* compress_type */
7394                                   type);
7395                 if (IS_ERR(em))
7396                         goto out;
7397         }
7398         ret = btrfs_add_ordered_extent_dio(inode, start, block_start,
7399                                            len, block_len, type);
7400         if (ret) {
7401                 if (em) {
7402                         free_extent_map(em);
7403                         btrfs_drop_extent_cache(BTRFS_I(inode), start,
7404                                                 start + len - 1, 0);
7405                 }
7406                 em = ERR_PTR(ret);
7407         }
7408  out:
7409 
7410         return em;
7411 }
7412 
7413 static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
7414                                                   u64 start, u64 len)
7415 {
7416         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7417         struct btrfs_root *root = BTRFS_I(inode)->root;
7418         struct extent_map *em;
7419         struct btrfs_key ins;
7420         u64 alloc_hint;
7421         int ret;
7422 
7423         alloc_hint = get_extent_allocation_hint(inode, start, len);
7424         ret = btrfs_reserve_extent(root, len, len, fs_info->sectorsize,
7425                                    0, alloc_hint, &ins, 1, 1);
7426         if (ret)
7427                 return ERR_PTR(ret);
7428 
7429         em = btrfs_create_dio_extent(inode, start, ins.offset, start,
7430                                      ins.objectid, ins.offset, ins.offset,
7431                                      ins.offset, BTRFS_ORDERED_REGULAR);
7432         btrfs_dec_block_group_reservations(fs_info, ins.objectid);
7433         if (IS_ERR(em))
7434                 btrfs_free_reserved_extent(fs_info, ins.objectid,
7435                                            ins.offset, 1);
7436 
7437         return em;
7438 }
7439 
7440 /*
7441  * returns 1 when the nocow is safe, < 1 on error, 0 if the
7442  * block must be cow'd
7443  */
7444 noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
7445                               u64 *orig_start, u64 *orig_block_len,
7446                               u64 *ram_bytes)
7447 {
7448         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7449         struct btrfs_path *path;
7450         int ret;
7451         struct extent_buffer *leaf;
7452         struct btrfs_root *root = BTRFS_I(inode)->root;
7453         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
7454         struct btrfs_file_extent_item *fi;
7455         struct btrfs_key key;
7456         u64 disk_bytenr;
7457         u64 backref_offset;
7458         u64 extent_end;
7459         u64 num_bytes;
7460         int slot;
7461         int found_type;
7462         bool nocow = (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW);
7463 
7464         path = btrfs_alloc_path();
7465         if (!path)
7466                 return -ENOMEM;
7467 
7468         ret = btrfs_lookup_file_extent(NULL, root, path,
7469                         btrfs_ino(BTRFS_I(inode)), offset, 0);
7470         if (ret < 0)
7471                 goto out;
7472 
7473         slot = path->slots[0];
7474         if (ret == 1) {
7475                 if (slot == 0) {
7476                         /* can't find the item, must cow */
7477                         ret = 0;
7478                         goto out;
7479                 }
7480                 slot--;
7481         }
7482         ret = 0;
7483         leaf = path->nodes[0];
7484         btrfs_item_key_to_cpu(leaf, &key, slot);
7485         if (key.objectid != btrfs_ino(BTRFS_I(inode)) ||
7486             key.type != BTRFS_EXTENT_DATA_KEY) {
7487                 /* not our file or wrong item type, must cow */
7488                 goto out;
7489         }
7490 
7491         if (key.offset > offset) {
7492                 /* Wrong offset, must cow */
7493                 goto out;
7494         }
7495 
7496         fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
7497         found_type = btrfs_file_extent_type(leaf, fi);
7498         if (found_type != BTRFS_FILE_EXTENT_REG &&
7499             found_type != BTRFS_FILE_EXTENT_PREALLOC) {
7500                 /* not a regular extent, must cow */
7501                 goto out;
7502         }
7503 
7504         if (!nocow && found_type == BTRFS_FILE_EXTENT_REG)
7505                 goto out;
7506 
7507         extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
7508         if (extent_end <= offset)
7509                 goto out;
7510 
7511         disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
7512         if (disk_bytenr == 0)
7513                 goto out;
7514 
7515         if (btrfs_file_extent_compression(leaf, fi) ||
7516             btrfs_file_extent_encryption(leaf, fi) ||
7517             btrfs_file_extent_other_encoding(leaf, fi))
7518                 goto out;
7519 
7520         /*
7521          * Do the same check as in btrfs_cross_ref_exist but without the
7522          * unnecessary search.
7523          */
7524         if (btrfs_file_extent_generation(leaf, fi) <=
7525             btrfs_root_last_snapshot(&root->root_item))
7526                 goto out;
7527 
7528         backref_offset = btrfs_file_extent_offset(leaf, fi);
7529 
7530         if (orig_start) {
7531                 *orig_start = key.offset - backref_offset;
7532                 *orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi);
7533                 *ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
7534         }
7535 
7536         if (btrfs_extent_readonly(fs_info, disk_bytenr))
7537                 goto out;
7538 
7539         num_bytes = min(offset + *len, extent_end) - offset;
7540         if (!nocow && found_type == BTRFS_FILE_EXTENT_PREALLOC) {
7541                 u64 range_end;
7542 
7543                 range_end = round_up(offset + num_bytes,
7544                                      root->fs_info->sectorsize) - 1;
7545                 ret = test_range_bit(io_tree, offset, range_end,
7546                                      EXTENT_DELALLOC, 0, NULL);
7547                 if (ret) {
7548                         ret = -EAGAIN;
7549                         goto out;
7550                 }
7551         }
7552 
7553         btrfs_release_path(path);
7554 
7555         /*
7556          * look for other files referencing this extent, if we
7557          * find any we must cow
7558          */
7559 
7560         ret = btrfs_cross_ref_exist(root, btrfs_ino(BTRFS_I(inode)),
7561                                     key.offset - backref_offset, disk_bytenr);
7562         if (ret) {
7563                 ret = 0;
7564                 goto out;
7565         }
7566 
7567         /*
7568          * adjust disk_bytenr and num_bytes to cover just the bytes
7569          * in this extent we are about to write.  If there
7570          * are any csums in that range we have to cow in order
7571          * to keep the csums correct
7572          */
7573         disk_bytenr += backref_offset;
7574         disk_bytenr += offset - key.offset;
7575         if (csum_exist_in_range(fs_info, disk_bytenr, num_bytes))
7576                 goto out;
7577         /*
7578          * all of the above have passed, it is safe to overwrite this extent
7579          * without cow
7580          */
7581         *len = num_bytes;
7582         ret = 1;
7583 out:
7584         btrfs_free_path(path);
7585         return ret;
7586 }
7587 
7588 static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
7589                               struct extent_state **cached_state, int writing)
7590 {
7591         struct btrfs_ordered_extent *ordered;
7592         int ret = 0;
7593 
7594         while (1) {
7595                 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7596                                  cached_state);
7597                 /*
7598                  * We're concerned with the entire range that we're going to be
7599                  * doing DIO to, so we need to make sure there's no ordered
7600                  * extents in this range.
7601                  */
7602                 ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), lockstart,
7603                                                      lockend - lockstart + 1);
7604 
7605                 /*
7606                  * We need to make sure there are no buffered pages in this
7607                  * range either, we could have raced between the invalidate in
7608                  * generic_file_direct_write and locking the extent.  The
7609                  * invalidate needs to happen so that reads after a write do not
7610                  * get stale data.
7611                  */
7612                 if (!ordered &&
7613                     (!writing || !filemap_range_has_page(inode->i_mapping,
7614                                                          lockstart, lockend)))
7615                         break;
7616 
7617                 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7618                                      cached_state);
7619 
7620                 if (ordered) {
7621                         /*
7622                          * If we are doing a DIO read and the ordered extent we
7623                          * found is for a buffered write, we can not wait for it
7624                          * to complete and retry, because if we do so we can
7625                          * deadlock with concurrent buffered writes on page
7626                          * locks. This happens only if our DIO read covers more
7627                          * than one extent map, if at this point has already
7628                          * created an ordered extent for a previous extent map
7629                          * and locked its range in the inode's io tree, and a
7630                          * concurrent write against that previous extent map's
7631                          * range and this range started (we unlock the ranges
7632                          * in the io tree only when the bios complete and
7633                          * buffered writes always lock pages before attempting
7634                          * to lock range in the io tree).
7635                          */
7636                         if (writing ||
7637                             test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags))
7638                                 btrfs_start_ordered_extent(inode, ordered, 1);
7639                         else
7640                                 ret = -ENOTBLK;
7641                         btrfs_put_ordered_extent(ordered);
7642                 } else {
7643                         /*
7644                          * We could trigger writeback for this range (and wait
7645                          * for it to complete) and then invalidate the pages for
7646                          * this range (through invalidate_inode_pages2_range()),
7647                          * but that can lead us to a deadlock with a concurrent
7648                          * call to readpages() (a buffered read or a defrag call
7649                          * triggered a readahead) on a page lock due to an
7650                          * ordered dio extent we created before but did not have
7651                          * yet a corresponding bio submitted (whence it can not
7652                          * complete), which makes readpages() wait for that
7653                          * ordered extent to complete while holding a lock on
7654                          * that page.
7655                          */
7656                         ret = -ENOTBLK;
7657                 }
7658 
7659                 if (ret)
7660                         break;
7661 
7662                 cond_resched();
7663         }
7664 
7665         return ret;
7666 }
7667 
7668 /* The callers of this must take lock_extent() */
7669 static struct extent_map *create_io_em(struct inode *inode, u64 start, u64 len,
7670                                        u64 orig_start, u64 block_start,
7671                                        u64 block_len, u64 orig_block_len,
7672                                        u64 ram_bytes, int compress_type,
7673                                        int type)
7674 {
7675         struct extent_map_tree *em_tree;
7676         struct extent_map *em;
7677         struct btrfs_root *root = BTRFS_I(inode)->root;
7678         int ret;
7679 
7680         ASSERT(type == BTRFS_ORDERED_PREALLOC ||
7681                type == BTRFS_ORDERED_COMPRESSED ||
7682                type == BTRFS_ORDERED_NOCOW ||
7683                type == BTRFS_ORDERED_REGULAR);
7684 
7685         em_tree = &BTRFS_I(inode)->extent_tree;
7686         em = alloc_extent_map();
7687         if (!em)
7688                 return ERR_PTR(-ENOMEM);
7689 
7690         em->start = start;
7691         em->orig_start = orig_start;
7692         em->len = len;
7693         em->block_len = block_len;
7694         em->block_start = block_start;
7695         em->bdev = root->fs_info->fs_devices->latest_bdev;
7696         em->orig_block_len = orig_block_len;
7697         em->ram_bytes = ram_bytes;
7698         em->generation = -1;
7699         set_bit(EXTENT_FLAG_PINNED, &em->flags);
7700         if (type == BTRFS_ORDERED_PREALLOC) {
7701                 set_bit(EXTENT_FLAG_FILLING, &em->flags);
7702         } else if (type == BTRFS_ORDERED_COMPRESSED) {
7703                 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
7704                 em->compress_type = compress_type;
7705         }
7706 
7707         do {
7708                 btrfs_drop_extent_cache(BTRFS_I(inode), em->start,
7709                                 em->start + em->len - 1, 0);
7710                 write_lock(&em_tree->lock);
7711                 ret = add_extent_mapping(em_tree, em, 1);
7712                 write_unlock(&em_tree->lock);
7713                 /*
7714                  * The caller has taken lock_extent(), who could race with us
7715                  * to add em?
7716                  */
7717         } while (ret == -EEXIST);
7718 
7719         if (ret) {
7720                 free_extent_map(em);
7721                 return ERR_PTR(ret);
7722         }
7723 
7724         /* em got 2 refs now, callers needs to do free_extent_map once. */
7725         return em;
7726 }
7727 
7728 
7729 static int btrfs_get_blocks_direct_read(struct extent_map *em,
7730                                         struct buffer_head *bh_result,
7731                                         struct inode *inode,
7732                                         u64 start, u64 len)
7733 {
7734         if (em->block_start == EXTENT_MAP_HOLE ||
7735                         test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
7736                 return -ENOENT;
7737 
7738         len = min(len, em->len - (start - em->start));
7739 
7740         bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
7741                 inode->i_blkbits;
7742         bh_result->b_size = len;
7743         bh_result->b_bdev = em->bdev;
7744         set_buffer_mapped(bh_result);
7745 
7746         return 0;
7747 }
7748 
7749 static int btrfs_get_blocks_direct_write(struct extent_map **map,
7750                                          struct buffer_head *bh_result,
7751                                          struct inode *inode,
7752                                          struct btrfs_dio_data *dio_data,
7753                                          u64 start, u64 len)
7754 {
7755         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7756         struct extent_map *em = *map;
7757         int ret = 0;
7758 
7759         /*
7760          * We don't allocate a new extent in the following cases
7761          *
7762          * 1) The inode is marked as NODATACOW. In this case we'll just use the
7763          * existing extent.
7764          * 2) The extent is marked as PREALLOC. We're good to go here and can
7765          * just use the extent.
7766          *
7767          */
7768         if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
7769             ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
7770              em->block_start != EXTENT_MAP_HOLE)) {
7771                 int type;
7772                 u64 block_start, orig_start, orig_block_len, ram_bytes;
7773 
7774                 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
7775                         type = BTRFS_ORDERED_PREALLOC;
7776                 else
7777                         type = BTRFS_ORDERED_NOCOW;
7778                 len = min(len, em->len - (start - em->start));
7779                 block_start = em->block_start + (start - em->start);
7780 
7781                 if (can_nocow_extent(inode, start, &len, &orig_start,
7782                                      &orig_block_len, &ram_bytes) == 1 &&
7783                     btrfs_inc_nocow_writers(fs_info, block_start)) {
7784                         struct extent_map *em2;
7785 
7786                         em2 = btrfs_create_dio_extent(inode, start, len,
7787                                                       orig_start, block_start,
7788                                                       len, orig_block_len,
7789                                                       ram_bytes, type);
7790                         btrfs_dec_nocow_writers(fs_info, block_start);
7791                         if (type == BTRFS_ORDERED_PREALLOC) {
7792                                 free_extent_map(em);
7793                                 *map = em = em2;
7794                         }
7795 
7796                         if (em2 && IS_ERR(em2)) {
7797                                 ret = PTR_ERR(em2);
7798                                 goto out;
7799                         }
7800                         /*
7801                          * For inode marked NODATACOW or extent marked PREALLOC,
7802                          * use the existing or preallocated extent, so does not
7803                          * need to adjust btrfs_space_info's bytes_may_use.
7804                          */
7805                         btrfs_free_reserved_data_space_noquota(inode, start,
7806                                                                len);
7807                         goto skip_cow;
7808                 }
7809         }
7810 
7811         /* this will cow the extent */
7812         len = bh_result->b_size;
7813         free_extent_map(em);
7814         *map = em = btrfs_new_extent_direct(inode, start, len);
7815         if (IS_ERR(em)) {
7816                 ret = PTR_ERR(em);
7817                 goto out;
7818         }
7819 
7820         len = min(len, em->len - (start - em->start));
7821 
7822 skip_cow:
7823         bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
7824                 inode->i_blkbits;
7825         bh_result->b_size = len;
7826         bh_result->b_bdev = em->bdev;
7827         set_buffer_mapped(bh_result);
7828 
7829         if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
7830                 set_buffer_new(bh_result);
7831 
7832         /*
7833          * Need to update the i_size under the extent lock so buffered
7834          * readers will get the updated i_size when we unlock.
7835          */
7836         if (!dio_data->overwrite && start + len > i_size_read(inode))
7837                 i_size_write(inode, start + len);
7838 
7839         WARN_ON(dio_data->reserve < len);
7840         dio_data->reserve -= len;
7841         dio_data->unsubmitted_oe_range_end = start + len;
7842         current->journal_info = dio_data;
7843 out:
7844         return ret;
7845 }
7846 
7847 static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
7848                                    struct buffer_head *bh_result, int create)
7849 {
7850         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7851         struct extent_map *em;
7852         struct extent_state *cached_state = NULL;
7853         struct btrfs_dio_data *dio_data = NULL;
7854         u64 start = iblock << inode->i_blkbits;
7855         u64 lockstart, lockend;
7856         u64 len = bh_result->b_size;
7857         int ret = 0;
7858 
7859         if (!create)
7860                 len = min_t(u64, len, fs_info->sectorsize);
7861 
7862         lockstart = start;
7863         lockend = start + len - 1;
7864 
7865         if (current->journal_info) {
7866                 /*
7867                  * Need to pull our outstanding extents and set journal_info to NULL so
7868                  * that anything that needs to check if there's a transaction doesn't get
7869                  * confused.
7870                  */
7871                 dio_data = current->journal_info;
7872                 current->journal_info = NULL;
7873         }
7874 
7875         /*
7876          * If this errors out it's because we couldn't invalidate pagecache for
7877          * this range and we need to fallback to buffered.
7878          */
7879         if (lock_extent_direct(inode, lockstart, lockend, &cached_state,
7880                                create)) {
7881                 ret = -ENOTBLK;
7882                 goto err;
7883         }
7884 
7885         em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len, 0);
7886         if (IS_ERR(em)) {
7887                 ret = PTR_ERR(em);
7888                 goto unlock_err;
7889         }
7890 
7891         /*
7892          * Ok for INLINE and COMPRESSED extents we need to fallback on buffered
7893          * io.  INLINE is special, and we could probably kludge it in here, but
7894          * it's still buffered so for safety lets just fall back to the generic
7895          * buffered path.
7896          *
7897          * For COMPRESSED we _have_ to read the entire extent in so we can
7898          * decompress it, so there will be buffering required no matter what we
7899          * do, so go ahead and fallback to buffered.
7900          *
7901          * We return -ENOTBLK because that's what makes DIO go ahead and go back
7902          * to buffered IO.  Don't blame me, this is the price we pay for using
7903          * the generic code.
7904          */
7905         if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) ||
7906             em->block_start == EXTENT_MAP_INLINE) {
7907                 free_extent_map(em);
7908                 ret = -ENOTBLK;
7909                 goto unlock_err;
7910         }
7911 
7912         if (create) {
7913                 ret = btrfs_get_blocks_direct_write(&em, bh_result, inode,
7914                                                     dio_data, start, len);
7915                 if (ret < 0)
7916                         goto unlock_err;
7917 
7918                 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
7919                                      lockend, &cached_state);
7920         } else {
7921                 ret = btrfs_get_blocks_direct_read(em, bh_result, inode,
7922                                                    start, len);
7923                 /* Can be negative only if we read from a hole */
7924                 if (ret < 0) {
7925                         ret = 0;
7926                         free_extent_map(em);
7927                         goto unlock_err;
7928                 }
7929                 /*
7930                  * We need to unlock only the end area that we aren't using.
7931                  * The rest is going to be unlocked by the endio routine.
7932                  */
7933                 lockstart = start + bh_result->b_size;
7934                 if (lockstart < lockend) {
7935                         unlock_extent_cached(&BTRFS_I(inode)->io_tree,
7936                                              lockstart, lockend, &cached_state);
7937                 } else {
7938                         free_extent_state(cached_state);
7939                 }
7940         }
7941 
7942         free_extent_map(em);
7943 
7944         return 0;
7945 
7946 unlock_err:
7947         unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7948                              &cached_state);
7949 err:
7950         if (dio_data)
7951                 current->journal_info = dio_data;
7952         return ret;
7953 }
7954 
7955 static inline blk_status_t submit_dio_repair_bio(struct inode *inode,
7956                                                  struct bio *bio,
7957                                                  int mirror_num)
7958 {
7959         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7960         blk_status_t ret;
7961 
7962         BUG_ON(bio_op(bio) == REQ_OP_WRITE);
7963 
7964         ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DIO_REPAIR);
7965         if (ret)
7966                 return ret;
7967 
7968         ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
7969 
7970         return ret;
7971 }
7972 
7973 static int btrfs_check_dio_repairable(struct inode *inode,
7974                                       struct bio *failed_bio,
7975                                       struct io_failure_record *failrec,
7976                                       int failed_mirror)
7977 {
7978         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7979         int num_copies;
7980 
7981         num_copies = btrfs_num_copies(fs_info, failrec->logical, failrec->len);
7982         if (num_copies == 1) {
7983                 /*
7984                  * we only have a single copy of the data, so don't bother with
7985                  * all the retry and error correction code that follows. no
7986                  * matter what the error is, it is very likely to persist.
7987                  */
7988                 btrfs_debug(fs_info,
7989                         "Check DIO Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d",
7990                         num_copies, failrec->this_mirror, failed_mirror);
7991                 return 0;
7992         }
7993 
7994         failrec->failed_mirror = failed_mirror;
7995         failrec->this_mirror++;
7996         if (failrec->this_mirror == failed_mirror)
7997                 failrec->this_mirror++;
7998 
7999         if (failrec->this_mirror > num_copies) {
8000                 btrfs_debug(fs_info,
8001                         "Check DIO Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d",
8002                         num_copies, failrec->this_mirror, failed_mirror);
8003                 return 0;
8004         }
8005 
8006         return 1;
8007 }
8008 
8009 static blk_status_t dio_read_error(struct inode *inode, struct bio *failed_bio,
8010                                    struct page *page, unsigned int pgoff,
8011                                    u64 start, u64 end, int failed_mirror,
8012                                    bio_end_io_t *repair_endio, void *repair_arg)
8013 {
8014         struct io_failure_record *failrec;
8015         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
8016         struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
8017         struct bio *bio;
8018         int isector;
8019         unsigned int read_mode = 0;
8020         int segs;
8021         int ret;
8022         blk_status_t status;
8023         struct bio_vec bvec;
8024 
8025         BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
8026 
8027         ret = btrfs_get_io_failure_record(inode, start, end, &failrec);
8028         if (ret)
8029                 return errno_to_blk_status(ret);
8030 
8031         ret = btrfs_check_dio_repairable(inode, failed_bio, failrec,
8032                                          failed_mirror);
8033         if (!ret) {
8034                 free_io_failure(failure_tree, io_tree, failrec);
8035                 return BLK_STS_IOERR;
8036         }
8037 
8038         segs = bio_segments(failed_bio);
8039         bio_get_first_bvec(failed_bio, &bvec);
8040         if (segs > 1 ||
8041             (bvec.bv_len > btrfs_inode_sectorsize(inode)))
8042                 read_mode |= REQ_FAILFAST_DEV;
8043 
8044         isector = start - btrfs_io_bio(failed_bio)->logical;
8045         isector >>= inode->i_sb->s_blocksize_bits;
8046         bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page,
8047                                 pgoff, isector, repair_endio, repair_arg);
8048         bio->bi_opf = REQ_OP_READ | read_mode;
8049 
8050         btrfs_debug(BTRFS_I(inode)->root->fs_info,
8051                     "repair DIO read error: submitting new dio read[%#x] to this_mirror=%d, in_validation=%d",
8052                     read_mode, failrec->this_mirror, failrec->in_validation);
8053 
8054         status = submit_dio_repair_bio(inode, bio, failrec->this_mirror);
8055         if (status) {
8056                 free_io_failure(failure_tree, io_tree, failrec);
8057                 bio_put(bio);
8058         }
8059 
8060         return status;
8061 }
8062 
8063 struct btrfs_retry_complete {
8064         struct completion done;
8065         struct inode *inode;
8066         u64 start;
8067         int uptodate;
8068 };
8069 
8070 static void btrfs_retry_endio_nocsum(struct bio *bio)
8071 {
8072         struct btrfs_retry_complete *done = bio->bi_private;
8073         struct inode *inode = done->inode;
8074         struct bio_vec *bvec;
8075         struct extent_io_tree *io_tree, *failure_tree;
8076         struct bvec_iter_all iter_all;
8077 
8078         if (bio->bi_status)
8079                 goto end;
8080 
8081         ASSERT(bio->bi_vcnt == 1);
8082         io_tree = &BTRFS_I(inode)->io_tree;
8083         failure_tree = &BTRFS_I(inode)->io_failure_tree;
8084         ASSERT(bio_first_bvec_all(bio)->bv_len == btrfs_inode_sectorsize(inode));
8085 
8086         done->uptodate = 1;
8087         ASSERT(!bio_flagged(bio, BIO_CLONED));
8088         bio_for_each_segment_all(bvec, bio, iter_all)
8089                 clean_io_failure(BTRFS_I(inode)->root->fs_info, failure_tree,
8090                                  io_tree, done->start, bvec->bv_page,
8091                                  btrfs_ino(BTRFS_I(inode)), 0);
8092 end:
8093         complete(&done->done);
8094         bio_put(bio);
8095 }
8096 
8097 static blk_status_t __btrfs_correct_data_nocsum(struct inode *inode,
8098                                                 struct btrfs_io_bio *io_bio)
8099 {
8100         struct btrfs_fs_info *fs_info;
8101         struct bio_vec bvec;
8102         struct bvec_iter iter;
8103         struct btrfs_retry_complete done;
8104         u64 start;
8105         unsigned int pgoff;
8106         u32 sectorsize;
8107         int nr_sectors;
8108         blk_status_t ret;
8109         blk_status_t err = BLK_STS_OK;
8110 
8111         fs_info = BTRFS_I(inode)->root->fs_info;
8112         sectorsize = fs_info->sectorsize;
8113 
8114         start = io_bio->logical;
8115         done.inode = inode;
8116         io_bio->bio.bi_iter = io_bio->iter;
8117 
8118         bio_for_each_segment(bvec, &io_bio->bio, iter) {
8119                 nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bvec.bv_len);
8120                 pgoff = bvec.bv_offset;
8121 
8122 next_block_or_try_again:
8123                 done.uptodate = 0;
8124                 done.start = start;
8125                 init_completion(&done.done);
8126 
8127                 ret = dio_read_error(inode, &io_bio->bio, bvec.bv_page,
8128                                 pgoff, start, start + sectorsize - 1,
8129                                 io_bio->mirror_num,
8130                                 btrfs_retry_endio_nocsum, &done);
8131                 if (ret) {
8132                         err = ret;
8133                         goto next;
8134                 }
8135 
8136                 wait_for_completion_io(&done.done);
8137 
8138                 if (!done.uptodate) {
8139                         /* We might have another mirror, so try again */
8140                         goto next_block_or_try_again;
8141                 }
8142 
8143 next:
8144                 start += sectorsize;
8145 
8146                 nr_sectors--;
8147                 if (nr_sectors) {
8148                         pgoff += sectorsize;
8149                         ASSERT(pgoff < PAGE_SIZE);
8150                         goto next_block_or_try_again;
8151                 }
8152         }
8153 
8154         return err;
8155 }
8156 
8157 static void btrfs_retry_endio(struct bio *bio)
8158 {
8159         struct btrfs_retry_complete *done = bio->bi_private;
8160         struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
8161         struct extent_io_tree *io_tree, *failure_tree;
8162         struct inode *inode = done->inode;
8163         struct bio_vec *bvec;
8164         int uptodate;
8165         int ret;
8166         int i = 0;
8167         struct bvec_iter_all iter_all;
8168 
8169         if (bio->bi_status)
8170                 goto end;
8171 
8172         uptodate = 1;
8173 
8174         ASSERT(bio->bi_vcnt == 1);
8175         ASSERT(bio_first_bvec_all(bio)->bv_len == btrfs_inode_sectorsize(done->inode));
8176 
8177         io_tree = &BTRFS_I(inode)->io_tree;
8178         failure_tree = &BTRFS_I(inode)->io_failure_tree;
8179 
8180         ASSERT(!bio_flagged(bio, BIO_CLONED));
8181         bio_for_each_segment_all(bvec, bio, iter_all) {
8182                 ret = __readpage_endio_check(inode, io_bio, i, bvec->bv_page,
8183                                              bvec->bv_offset, done->start,
8184                                              bvec->bv_len);
8185                 if (!ret)
8186                         clean_io_failure(BTRFS_I(inode)->root->fs_info,
8187                                          failure_tree, io_tree, done->start,
8188                                          bvec->bv_page,
8189                                          btrfs_ino(BTRFS_I(inode)),
8190                                          bvec->bv_offset);
8191                 else
8192                         uptodate = 0;
8193                 i++;
8194         }
8195 
8196         done->uptodate = uptodate;
8197 end:
8198         complete(&done->done);
8199         bio_put(bio);
8200 }
8201 
8202 static blk_status_t __btrfs_subio_endio_read(struct inode *inode,
8203                 struct btrfs_io_bio *io_bio, blk_status_t err)
8204 {
8205         struct btrfs_fs_info *fs_info;
8206         struct bio_vec bvec;
8207         struct bvec_iter iter;
8208         struct btrfs_retry_complete done;
8209         u64 start;
8210         u64 offset = 0;
8211         u32 sectorsize;
8212         int nr_sectors;
8213         unsigned int pgoff;
8214         int csum_pos;
8215         bool uptodate = (err == 0);
8216         int ret;
8217         blk_status_t status;
8218 
8219         fs_info = BTRFS_I(inode)->root->fs_info;
8220         sectorsize = fs_info->sectorsize;
8221 
8222         err = BLK_STS_OK;
8223         start = io_bio->logical;
8224         done.inode = inode;
8225         io_bio->bio.bi_iter = io_bio->iter;
8226 
8227         bio_for_each_segment(bvec, &io_bio->bio, iter) {
8228                 nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bvec.bv_len);
8229 
8230                 pgoff = bvec.bv_offset;
8231 next_block:
8232                 if (uptodate) {
8233                         csum_pos = BTRFS_BYTES_TO_BLKS(fs_info, offset);
8234                         ret = __readpage_endio_check(inode, io_bio, csum_pos,
8235                                         bvec.bv_page, pgoff, start, sectorsize);
8236                         if (likely(!ret))
8237                                 goto next;
8238                 }
8239 try_again:
8240                 done.uptodate = 0;
8241                 done.start = start;
8242                 init_completion(&done.done);
8243 
8244                 status = dio_read_error(inode, &io_bio->bio, bvec.bv_page,
8245                                         pgoff, start, start + sectorsize - 1,
8246                                         io_bio->mirror_num, btrfs_retry_endio,
8247                                         &done);
8248                 if (status) {
8249                         err = status;
8250                         goto next;
8251                 }
8252 
8253                 wait_for_completion_io(&done.done);
8254 
8255                 if (!done.uptodate) {
8256                         /* We might have another mirror, so try again */
8257                         goto try_again;
8258                 }
8259 next:
8260                 offset += sectorsize;
8261                 start += sectorsize;
8262 
8263                 ASSERT(nr_sectors);
8264 
8265                 nr_sectors--;
8266                 if (nr_sectors) {
8267                         pgoff += sectorsize;
8268                         ASSERT(pgoff < PAGE_SIZE);
8269                         goto next_block;
8270                 }
8271         }
8272 
8273         return err;
8274 }
8275 
8276 static blk_status_t btrfs_subio_endio_read(struct inode *inode,
8277                 struct btrfs_io_bio *io_bio, blk_status_t err)
8278 {
8279         bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
8280 
8281         if (skip_csum) {
8282                 if (unlikely(err))
8283                         return __btrfs_correct_data_nocsum(inode, io_bio);
8284                 else
8285                         return BLK_STS_OK;
8286         } else {
8287                 return __btrfs_subio_endio_read(inode, io_bio, err);
8288         }
8289 }
8290 
8291 static void btrfs_endio_direct_read(struct bio *bio)
8292 {
8293         struct btrfs_dio_private *dip = bio->bi_private;
8294         struct inode *inode = dip->inode;
8295         struct bio *dio_bio;
8296         struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
8297         blk_status_t err = bio->bi_status;
8298 
8299         if (dip->flags & BTRFS_DIO_ORIG_BIO_SUBMITTED)
8300                 err = btrfs_subio_endio_read(inode, io_bio, err);
8301 
8302         unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
8303                       dip->logical_offset + dip->bytes - 1);
8304         dio_bio = dip->dio_bio;
8305 
8306         kfree(dip);
8307 
8308         dio_bio->bi_status = err;
8309         dio_end_io(dio_bio);
8310         btrfs_io_bio_free_csum(io_bio);
8311         bio_put(bio);
8312 }
8313 
8314 static void __endio_write_update_ordered(struct inode *inode,
8315                                          const u64 offset, const u64 bytes,
8316                                          const bool uptodate)
8317 {
8318         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
8319         struct btrfs_ordered_extent *ordered = NULL;
8320         struct btrfs_workqueue *wq;
8321         u64 ordered_offset = offset;
8322         u64 ordered_bytes = bytes;
8323         u64 last_offset;
8324 
8325         if (btrfs_is_free_space_inode(BTRFS_I(inode)))
8326                 wq = fs_info->endio_freespace_worker;
8327         else
8328                 wq = fs_info->endio_write_workers;
8329 
8330         while (ordered_offset < offset + bytes) {
8331                 last_offset = ordered_offset;
8332                 if (btrfs_dec_test_first_ordered_pending(inode, &ordered,
8333                                                            &ordered_offset,
8334                                                            ordered_bytes,
8335                                                            uptodate)) {
8336                         btrfs_init_work(&ordered->work, finish_ordered_fn, NULL,
8337                                         NULL);
8338                         btrfs_queue_work(wq, &ordered->work);
8339                 }
8340                 /*
8341                  * If btrfs_dec_test_ordered_pending does not find any ordered
8342                  * extent in the range, we can exit.
8343                  */
8344                 if (ordered_offset == last_offset)
8345                         return;
8346                 /*
8347                  * Our bio might span multiple ordered extents. In this case
8348                  * we keep going until we have accounted the whole dio.
8349                  */
8350                 if (ordered_offset < offset + bytes) {
8351                         ordered_bytes = offset + bytes - ordered_offset;
8352                         ordered = NULL;
8353                 }
8354         }
8355 }
8356 
8357 static void btrfs_endio_direct_write(struct bio *bio)
8358 {
8359         struct btrfs_dio_private *dip = bio->bi_private;
8360         struct bio *dio_bio = dip->dio_bio;
8361 
8362         __endio_write_update_ordered(dip->inode, dip->logical_offset,
8363                                      dip->bytes, !bio->bi_status);
8364 
8365         kfree(dip);
8366 
8367         dio_bio->bi_status = bio->bi_status;
8368         dio_end_io(dio_bio);
8369         bio_put(bio);
8370 }
8371 
8372 static blk_status_t btrfs_submit_bio_start_direct_io(void *private_data,
8373                                     struct bio *bio, u64 offset)
8374 {
8375         struct inode *inode = private_data;
8376         blk_status_t ret;
8377         ret = btrfs_csum_one_bio(inode, bio, offset, 1);
8378         BUG_ON(ret); /* -ENOMEM */
8379         return 0;
8380 }
8381 
8382 static void btrfs_end_dio_bio(struct bio *bio)
8383 {
8384         struct btrfs_dio_private *dip = bio->bi_private;
8385         blk_status_t err = bio->bi_status;
8386 
8387         if (err)
8388                 btrfs_warn(BTRFS_I(dip->inode)->root->fs_info,
8389                            "direct IO failed ino %llu rw %d,%u sector %#Lx len %u err no %d",
8390                            btrfs_ino(BTRFS_I(dip->inode)), bio_op(bio),
8391                            bio->bi_opf,
8392                            (unsigned long long)bio->bi_iter.bi_sector,
8393                            bio->bi_iter.bi_size, err);
8394 
8395         if (dip->subio_endio)
8396                 err = dip->subio_endio(dip->inode, btrfs_io_bio(bio), err);
8397 
8398         if (err) {
8399                 /*
8400                  * We want to perceive the errors flag being set before
8401                  * decrementing the reference count. We don't need a barrier
8402                  * since atomic operations with a return value are fully
8403                  * ordered as per atomic_t.txt
8404                  */
8405                 dip->errors = 1;
8406         }
8407 
8408         /* if there are more bios still pending for this dio, just exit */
8409         if (!atomic_dec_and_test(&dip->pending_bios))
8410                 goto out;
8411 
8412         if (dip->errors) {
8413                 bio_io_error(dip->orig_bio);
8414         } else {
8415                 dip->dio_bio->bi_status = BLK_STS_OK;
8416                 bio_endio(dip->orig_bio);
8417         }
8418 out:
8419         bio_put(bio);
8420 }
8421 
8422 static inline blk_status_t btrfs_lookup_and_bind_dio_csum(struct inode *inode,
8423                                                  struct btrfs_dio_private *dip,
8424                                                  struct bio *bio,
8425                                                  u64 file_offset)
8426 {
8427         struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
8428         struct btrfs_io_bio *orig_io_bio = btrfs_io_bio(dip->orig_bio);
8429         u16 csum_size;
8430         blk_status_t ret;
8431 
8432         /*
8433          * We load all the csum data we need when we submit
8434          * the first bio to reduce the csum tree search and
8435          * contention.
8436          */
8437         if (dip->logical_offset == file_offset) {
8438                 ret = btrfs_lookup_bio_sums_dio(inode, dip->orig_bio,
8439                                                 file_offset);
8440                 if (ret)
8441                         return ret;
8442         }
8443 
8444         if (bio == dip->orig_bio)
8445                 return 0;
8446 
8447         file_offset -= dip->logical_offset;
8448         file_offset >>= inode->i_sb->s_blocksize_bits;
8449         csum_size = btrfs_super_csum_size(btrfs_sb(inode->i_sb)->super_copy);
8450         io_bio->csum = orig_io_bio->csum + csum_size * file_offset;
8451 
8452         return 0;
8453 }
8454 
8455 static inline blk_status_t btrfs_submit_dio_bio(struct bio *bio,
8456                 struct inode *inode, u64 file_offset, int async_submit)
8457 {
8458         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
8459         struct btrfs_dio_private *dip = bio->bi_private;
8460         bool write = bio_op(bio) == REQ_OP_WRITE;
8461         blk_status_t ret;
8462 
8463         /* Check btrfs_submit_bio_hook() for rules about async submit. */
8464         if (async_submit)
8465                 async_submit = !atomic_read(&BTRFS_I(inode)->sync_writers);
8466 
8467         if (!write) {
8468                 ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
8469                 if (ret)
8470                         goto err;
8471         }
8472 
8473         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
8474                 goto map;
8475 
8476         if (write && async_submit) {
8477                 ret = btrfs_wq_submit_bio(fs_info, bio, 0, 0,
8478                                           file_offset, inode,
8479                                           btrfs_submit_bio_start_direct_io);
8480                 goto err;
8481         } else if (write) {
8482                 /*
8483                  * If we aren't doing async submit, calculate the csum of the
8484                  * bio now.
8485                  */
8486                 ret = btrfs_csum_one_bio(inode, bio, file_offset, 1);
8487                 if (ret)
8488                         goto err;
8489         } else {
8490                 ret = btrfs_lookup_and_bind_dio_csum(inode, dip, bio,
8491                                                      file_offset);
8492                 if (ret)
8493                         goto err;
8494         }
8495 map:
8496         ret = btrfs_map_bio(fs_info, bio, 0, 0);
8497 err:
8498         return ret;
8499 }
8500 
8501 static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip)
8502 {
8503         struct inode *inode = dip->inode;
8504         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
8505         struct bio *bio;
8506         struct bio *orig_bio = dip->orig_bio;
8507         u64 start_sector = orig_bio->bi_iter.bi_sector;
8508         u64 file_offset = dip->logical_offset;
8509         int async_submit = 0;
8510         u64 submit_len;
8511         int clone_offset = 0;
8512         int clone_len;
8513         int ret;
8514         blk_status_t status;
8515         struct btrfs_io_geometry geom;
8516 
8517         submit_len = orig_bio->bi_iter.bi_size;
8518         ret = btrfs_get_io_geometry(fs_info, btrfs_op(orig_bio),
8519                                     start_sector << 9, submit_len, &geom);
8520         if (ret)
8521                 return -EIO;
8522 
8523         if (geom.len >= submit_len) {
8524                 bio = orig_bio;
8525                 dip->flags |= BTRFS_DIO_ORIG_BIO_SUBMITTED;
8526                 goto submit;
8527         }
8528 
8529         /* async crcs make it difficult to collect full stripe writes. */
8530         if (btrfs_data_alloc_profile(fs_info) & BTRFS_BLOCK_GROUP_RAID56_MASK)
8531                 async_submit = 0;
8532         else
8533                 async_submit = 1;
8534 
8535         /* bio split */
8536         ASSERT(geom.len <= INT_MAX);
8537         atomic_inc(&dip->pending_bios);
8538         do {
8539                 clone_len = min_t(int, submit_len, geom.len);
8540 
8541                 /*
8542                  * This will never fail as it's passing GPF_NOFS and
8543                  * the allocation is backed by btrfs_bioset.
8544                  */
8545                 bio = btrfs_bio_clone_partial(orig_bio, clone_offset,
8546                                               clone_len);
8547                 bio->bi_private = dip;
8548                 bio->bi_end_io = btrfs_end_dio_bio;
8549                 btrfs_io_bio(bio)->logical = file_offset;
8550 
8551                 ASSERT(submit_len >= clone_len);
8552                 submit_len -= clone_len;
8553                 if (submit_len == 0)
8554                         break;
8555 
8556                 /*
8557                  * Increase the count before we submit the bio so we know
8558                  * the end IO handler won't happen before we increase the
8559                  * count. Otherwise, the dip might get freed before we're
8560                  * done setting it up.
8561                  */
8562                 atomic_inc(&dip->pending_bios);
8563 
8564                 status = btrfs_submit_dio_bio(bio, inode, file_offset,
8565                                                 async_submit);
8566                 if (status) {
8567                         bio_put(bio);
8568                         atomic_dec(&dip->pending_bios);
8569                         goto out_err;
8570                 }
8571 
8572                 clone_offset += clone_len;
8573                 start_sector += clone_len >> 9;
8574                 file_offset += clone_len;
8575 
8576                 ret = btrfs_get_io_geometry(fs_info, btrfs_op(orig_bio),
8577                                       start_sector << 9, submit_len, &geom);
8578                 if (ret)
8579                         goto out_err;
8580         } while (submit_len > 0);
8581 
8582 submit:
8583         status = btrfs_submit_dio_bio(bio, inode, file_offset, async_submit);
8584         if (!status)
8585                 return 0;
8586 
8587         bio_put(bio);
8588 out_err:
8589         dip->errors = 1;
8590         /*
8591          * Before atomic variable goto zero, we must  make sure dip->errors is
8592          * perceived to be set. This ordering is ensured by the fact that an
8593          * atomic operations with a return value are fully ordered as per
8594          * atomic_t.txt
8595          */
8596         if (atomic_dec_and_test(&dip->pending_bios))
8597                 bio_io_error(dip->orig_bio);
8598 
8599         /* bio_end_io() will handle error, so we needn't return it */
8600         return 0;
8601 }
8602 
8603 static void btrfs_submit_direct(struct bio *dio_bio, struct inode *inode,
8604                                 loff_t file_offset)
8605 {
8606         struct btrfs_dio_private *dip = NULL;
8607         struct bio *bio = NULL;
8608         struct btrfs_io_bio *io_bio;
8609         bool write = (bio_op(dio_bio) == REQ_OP_WRITE);
8610         int ret = 0;
8611 
8612         bio = btrfs_bio_clone(dio_bio);
8613 
8614         dip = kzalloc(sizeof(*dip), GFP_NOFS);
8615         if (!dip) {
8616                 ret = -ENOMEM;
8617                 goto free_ordered;
8618         }
8619 
8620         dip->private = dio_bio->bi_private;
8621         dip->inode = inode;
8622         dip->logical_offset = file_offset;
8623         dip->bytes = dio_bio->bi_iter.bi_size;
8624         dip->disk_bytenr = (u64)dio_bio->bi_iter.bi_sector << 9;
8625         bio->bi_private = dip;
8626         dip->orig_bio = bio;
8627         dip->dio_bio = dio_bio;
8628         atomic_set(&dip->pending_bios, 0);
8629         io_bio = btrfs_io_bio(bio);
8630         io_bio->logical = file_offset;
8631 
8632         if (write) {
8633                 bio->bi_end_io = btrfs_endio_direct_write;
8634         } else {
8635                 bio->bi_end_io = btrfs_endio_direct_read;
8636                 dip->subio_endio = btrfs_subio_endio_read;
8637         }
8638 
8639         /*
8640          * Reset the range for unsubmitted ordered extents (to a 0 length range)
8641          * even if we fail to submit a bio, because in such case we do the
8642          * corresponding error handling below and it must not be done a second
8643          * time by btrfs_direct_IO().
8644          */
8645         if (write) {
8646                 struct btrfs_dio_data *dio_data = current->journal_info;
8647 
8648                 dio_data->unsubmitted_oe_range_end = dip->logical_offset +
8649                         dip->bytes;
8650                 dio_data->unsubmitted_oe_range_start =
8651                         dio_data->unsubmitted_oe_range_end;
8652         }
8653 
8654         ret = btrfs_submit_direct_hook(dip);
8655         if (!ret)
8656                 return;
8657 
8658         btrfs_io_bio_free_csum(io_bio);
8659 
8660 free_ordered:
8661         /*
8662          * If we arrived here it means either we failed to submit the dip
8663          * or we either failed to clone the dio_bio or failed to allocate the
8664          * dip. If we cloned the dio_bio and allocated the dip, we can just
8665          * call bio_endio against our io_bio so that we get proper resource
8666          * cleanup if we fail to submit the dip, otherwise, we must do the
8667          * same as btrfs_endio_direct_[write|read] because we can't call these
8668          * callbacks - they require an allocated dip and a clone of dio_bio.
8669          */
8670         if (bio && dip) {
8671                 bio_io_error(bio);
8672                 /*
8673                  * The end io callbacks free our dip, do the final put on bio
8674                  * and all the cleanup and final put for dio_bio (through
8675                  * dio_end_io()).
8676                  */
8677                 dip = NULL;
8678                 bio = NULL;
8679         } else {
8680                 if (write)
8681                         __endio_write_update_ordered(inode,
8682                                                 file_offset,
8683                                                 dio_bio->bi_iter.bi_size,
8684                                                 false);
8685                 else
8686                         unlock_extent(&BTRFS_I(inode)->io_tree, file_offset,
8687                               file_offset + dio_bio->bi_iter.bi_size - 1);
8688 
8689                 dio_bio->bi_status = BLK_STS_IOERR;
8690                 /*
8691                  * Releases and cleans up our dio_bio, no need to bio_put()
8692                  * nor bio_endio()/bio_io_error() against dio_bio.
8693                  */
8694                 dio_end_io(dio_bio);
8695         }
8696         if (bio)
8697                 bio_put(bio);
8698         kfree(dip);
8699 }
8700 
8701 static ssize_t check_direct_IO(struct btrfs_fs_info *fs_info,
8702                                const struct iov_iter *iter, loff_t offset)
8703 {
8704         int seg;
8705         int i;
8706         unsigned int blocksize_mask = fs_info->sectorsize - 1;
8707         ssize_t retval = -EINVAL;
8708 
8709         if (offset & blocksize_mask)
8710                 goto out;
8711 
8712         if (iov_iter_alignment(iter) & blocksize_mask)
8713                 goto out;
8714 
8715         /* If this is a write we don't need to check anymore */
8716         if (iov_iter_rw(iter) != READ || !iter_is_iovec(iter))
8717                 return 0;
8718         /*
8719          * Check to make sure we don't have duplicate iov_base's in this
8720          * iovec, if so return EINVAL, otherwise we'll get csum errors
8721          * when reading back.
8722          */
8723         for (seg = 0; seg < iter->nr_segs; seg++) {
8724                 for (i = seg + 1; i < iter->nr_segs; i++) {
8725                         if (iter->iov[seg].iov_base == iter->iov[i].iov_base)
8726                                 goto out;
8727                 }
8728         }
8729         retval = 0;
8730 out:
8731         return retval;
8732 }
8733 
8734 static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
8735 {
8736         struct file *file = iocb->ki_filp;
8737         struct inode *inode = file->f_mapping->host;
8738         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
8739         struct btrfs_dio_data dio_data = { 0 };
8740         struct extent_changeset *data_reserved = NULL;
8741         loff_t offset = iocb->ki_pos;
8742         size_t count = 0;
8743         int flags = 0;
8744         bool wakeup = true;
8745         bool relock = false;
8746         ssize_t ret;
8747 
8748         if (check_direct_IO(fs_info, iter, offset))
8749                 return 0;
8750 
8751         inode_dio_begin(inode);
8752 
8753         /*
8754          * The generic stuff only does filemap_write_and_wait_range, which
8755          * isn't enough if we've written compressed pages to this area, so
8756          * we need to flush the dirty pages again to make absolutely sure
8757          * that any outstanding dirty pages are on disk.
8758          */
8759         count = iov_iter_count(iter);
8760         if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
8761                      &BTRFS_I(inode)->runtime_flags))
8762                 filemap_fdatawrite_range(inode->i_mapping, offset,
8763                                          offset + count - 1);
8764 
8765         if (iov_iter_rw(iter) == WRITE) {
8766                 /*
8767                  * If the write DIO is beyond the EOF, we need update
8768                  * the isize, but it is protected by i_mutex. So we can
8769                  * not unlock the i_mutex at this case.
8770                  */
8771                 if (offset + count <= inode->i_size) {
8772                         dio_data.overwrite = 1;
8773                         inode_unlock(inode);
8774                         relock = true;
8775                 } else if (iocb->ki_flags & IOCB_NOWAIT) {
8776                         ret = -EAGAIN;
8777                         goto out;
8778                 }
8779                 ret = btrfs_delalloc_reserve_space(inode, &data_reserved,
8780                                                    offset, count);
8781                 if (ret)
8782                         goto out;
8783 
8784                 /*
8785                  * We need to know how many extents we reserved so that we can
8786                  * do the accounting properly if we go over the number we
8787                  * originally calculated.  Abuse current->journal_info for this.
8788                  */
8789                 dio_data.reserve = round_up(count,
8790                                             fs_info->sectorsize);
8791                 dio_data.unsubmitted_oe_range_start = (u64)offset;
8792                 dio_data.unsubmitted_oe_range_end = (u64)offset;
8793                 current->journal_info = &dio_data;
8794                 down_read(&BTRFS_I(inode)->dio_sem);
8795         } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK,
8796                                      &BTRFS_I(inode)->runtime_flags)) {
8797                 inode_dio_end(inode);
8798                 flags = DIO_LOCKING | DIO_SKIP_HOLES;
8799                 wakeup = false;
8800         }
8801 
8802         ret = __blockdev_direct_IO(iocb, inode,
8803                                    fs_info->fs_devices->latest_bdev,
8804                                    iter, btrfs_get_blocks_direct, NULL,
8805                                    btrfs_submit_direct, flags);
8806         if (iov_iter_rw(iter) == WRITE) {
8807                 up_read(&BTRFS_I(inode)->dio_sem);
8808                 current->journal_info = NULL;
8809                 if (ret < 0 && ret != -EIOCBQUEUED) {
8810                         if (dio_data.reserve)
8811                                 btrfs_delalloc_release_space(inode, data_reserved,
8812                                         offset, dio_data.reserve, true);
8813                         /*
8814                          * On error we might have left some ordered extents
8815                          * without submitting corresponding bios for them, so
8816                          * cleanup them up to avoid other tasks getting them
8817                          * and waiting for them to complete forever.
8818                          */
8819                         if (dio_data.unsubmitted_oe_range_start <
8820                             dio_data.unsubmitted_oe_range_end)
8821                                 __endio_write_update_ordered(inode,
8822                                         dio_data.unsubmitted_oe_range_start,
8823                                         dio_data.unsubmitted_oe_range_end -
8824                                         dio_data.unsubmitted_oe_range_start,
8825                                         false);
8826                 } else if (ret >= 0 && (size_t)ret < count)
8827                         btrfs_delalloc_release_space(inode, data_reserved,
8828                                         offset, count - (size_t)ret, true);
8829                 btrfs_delalloc_release_extents(BTRFS_I(inode), count);
8830         }
8831 out:
8832         if (wakeup)
8833                 inode_dio_end(inode);
8834         if (relock)
8835                 inode_lock(inode);
8836 
8837         extent_changeset_free(data_reserved);
8838         return ret;
8839 }
8840 
8841 #define BTRFS_FIEMAP_FLAGS      (FIEMAP_FLAG_SYNC)
8842 
8843 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
8844                 __u64 start, __u64 len)
8845 {
8846         int     ret;
8847 
8848         ret = fiemap_check_flags(fieinfo, BTRFS_FIEMAP_FLAGS);
8849         if (ret)
8850                 return ret;
8851 
8852         return extent_fiemap(inode, fieinfo, start, len);
8853 }
8854 
8855 int btrfs_readpage(struct file *file, struct page *page)
8856 {
8857         struct extent_io_tree *tree;
8858         tree = &BTRFS_I(page->mapping->host)->io_tree;
8859         return extent_read_full_page(tree, page, btrfs_get_extent, 0);
8860 }
8861 
8862 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
8863 {
8864         struct inode *inode = page->mapping->host;
8865         int ret;
8866 
8867         if (current->flags & PF_MEMALLOC) {
8868                 redirty_page_for_writepage(wbc, page);
8869                 unlock_page(page);
8870                 return 0;
8871         }
8872 
8873         /*
8874          * If we are under memory pressure we will call this directly from the
8875          * VM, we need to make sure we have the inode referenced for the ordered
8876          * extent.  If not just return like we didn't do anything.
8877          */
8878         if (!igrab(inode)) {
8879                 redirty_page_for_writepage(wbc, page);
8880                 return AOP_WRITEPAGE_ACTIVATE;
8881         }
8882         ret = extent_write_full_page(page, wbc);
8883         btrfs_add_delayed_iput(inode);
8884         return ret;
8885 }
8886 
8887 static int btrfs_writepages(struct address_space *mapping,
8888                             struct writeback_control *wbc)
8889 {
8890         return extent_writepages(mapping, wbc);
8891 }
8892 
8893 static int
8894 btrfs_readpages(struct file *file, struct address_space *mapping,
8895                 struct list_head *pages, unsigned nr_pages)
8896 {
8897         return extent_readpages(mapping, pages, nr_pages);
8898 }
8899 
8900 static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
8901 {
8902         int ret = try_release_extent_mapping(page, gfp_flags);
8903         if (ret == 1) {
8904                 ClearPagePrivate(page);
8905                 set_page_private(page, 0);
8906                 put_page(page);
8907         }
8908         return ret;
8909 }
8910 
8911 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
8912 {
8913         if (PageWriteback(page) || PageDirty(page))
8914                 return 0;
8915         return __btrfs_releasepage(page, gfp_flags);
8916 }
8917 
8918 static void btrfs_invalidatepage(struct page *page, unsigned int offset,
8919                                  unsigned int length)
8920 {
8921         struct inode *inode = page->mapping->host;
8922         struct extent_io_tree *tree;
8923         struct btrfs_ordered_extent *ordered;
8924         struct extent_state *cached_state = NULL;
8925         u64 page_start = page_offset(page);
8926         u64 page_end = page_start + PAGE_SIZE - 1;
8927         u64 start;
8928         u64 end;
8929         int inode_evicting = inode->i_state & I_FREEING;
8930 
8931         /*
8932          * we have the page locked, so new writeback can't start,
8933          * and the dirty bit won't be cleared while we are here.
8934          *
8935          * Wait for IO on this page so that we can safely clear
8936          * the PagePrivate2 bit and do ordered accounting
8937          */
8938         wait_on_page_writeback(page);
8939 
8940         tree = &BTRFS_I(inode)->io_tree;
8941         if (offset) {
8942                 btrfs_releasepage(page, GFP_NOFS);
8943                 return;
8944         }
8945 
8946         if (!inode_evicting)
8947                 lock_extent_bits(tree, page_start, page_end, &cached_state);
8948 again:
8949         start = page_start;
8950         ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), start,
8951                                         page_end - start + 1);
8952         if (ordered) {
8953                 end = min(page_end, ordered->file_offset + ordered->len - 1);
8954                 /*
8955                  * IO on this page will never be started, so we need
8956                  * to account for any ordered extents now
8957                  */
8958                 if (!inode_evicting)
8959                         clear_extent_bit(tree, start, end,
8960                                          EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
8961                                          EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
8962                                          EXTENT_DEFRAG, 1, 0, &cached_state);
8963                 /*
8964                  * whoever cleared the private bit is responsible
8965                  * for the finish_ordered_io
8966                  */
8967                 if (TestClearPagePrivate2(page)) {
8968                         struct btrfs_ordered_inode_tree *tree;
8969                         u64 new_len;
8970 
8971                         tree = &BTRFS_I(inode)->ordered_tree;
8972 
8973                         spin_lock_irq(&tree->lock);
8974                         set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags);
8975                         new_len = start - ordered->file_offset;
8976                         if (new_len < ordered->truncated_len)
8977                                 ordered->truncated_len = new_len;
8978                         spin_unlock_irq(&tree->lock);
8979 
8980                         if (btrfs_dec_test_ordered_pending(inode, &ordered,
8981                                                            start,
8982                                                            end - start + 1, 1))
8983                                 btrfs_finish_ordered_io(ordered);
8984                 }
8985                 btrfs_put_ordered_extent(ordered);
8986                 if (!inode_evicting) {
8987                         cached_state = NULL;
8988                         lock_extent_bits(tree, start, end,
8989                                          &cached_state);
8990                 }
8991 
8992                 start = end + 1;
8993                 if (start < page_end)
8994                         goto again;
8995         }
8996 
8997         /*
8998          * Qgroup reserved space handler
8999          * Page here will be either
9000          * 1) Already written to disk
9001          *    In this case, its reserved space is released from data rsv map
9002          *    and will be freed by delayed_ref handler finally.
9003          *    So even we call qgroup_free_data(), it won't decrease reserved
9004          *    space.
9005          * 2) Not written to disk
9006          *    This means the reserved space should be freed here. However,
9007          *    if a truncate invalidates the page (by clearing PageDirty)
9008          *    and the page is accounted for while allocating extent
9009          *    in btrfs_check_data_free_space() we let delayed_ref to
9010          *    free the entire extent.
9011          */
9012         if (PageDirty(page))
9013                 btrfs_qgroup_free_data(inode, NULL, page_start, PAGE_SIZE);
9014         if (!inode_evicting) {
9015                 clear_extent_bit(tree, page_start, page_end, EXTENT_LOCKED |
9016                                  EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
9017                                  EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 1, 1,
9018                                  &cached_state);
9019 
9020                 __btrfs_releasepage(page, GFP_NOFS);
9021         }
9022 
9023         ClearPageChecked(page);
9024         if (PagePrivate(page)) {
9025                 ClearPagePrivate(page);
9026                 set_page_private(page, 0);
9027                 put_page(page);
9028         }
9029 }
9030 
9031 /*
9032  * btrfs_page_mkwrite() is not allowed to change the file size as it gets
9033  * called from a page fault handler when a page is first dirtied. Hence we must
9034  * be careful to check for EOF conditions here. We set the page up correctly
9035  * for a written page which means we get ENOSPC checking when writing into
9036  * holes and correct delalloc and unwritten extent mapping on filesystems that
9037  * support these features.
9038  *
9039  * We are not allowed to take the i_mutex here so we have to play games to
9040  * protect against truncate races as the page could now be beyond EOF.  Because
9041  * truncate_setsize() writes the inode size before removing pages, once we have
9042  * the page lock we can determine safely if the page is beyond EOF. If it is not
9043  * beyond EOF, then the page is guaranteed safe against truncation until we
9044  * unlock the page.
9045  */
9046 vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
9047 {
9048         struct page *page = vmf->page;
9049         struct inode *inode = file_inode(vmf->vma->vm_file);
9050         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
9051         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
9052         struct btrfs_ordered_extent *ordered;
9053         struct extent_state *cached_state = NULL;
9054         struct extent_changeset *data_reserved = NULL;
9055         char *kaddr;
9056         unsigned long zero_start;
9057         loff_t size;
9058         vm_fault_t ret;
9059         int ret2;
9060         int reserved = 0;
9061         u64 reserved_space;
9062         u64 page_start;
9063         u64 page_end;
9064         u64 end;
9065 
9066         reserved_space = PAGE_SIZE;
9067 
9068         sb_start_pagefault(inode->i_sb);
9069         page_start = page_offset(page);
9070         page_end = page_start + PAGE_SIZE - 1;
9071         end = page_end;
9072 
9073         /*
9074          * Reserving delalloc space after obtaining the page lock can lead to
9075          * deadlock. For example, if a dirty page is locked by this function
9076          * and the call to btrfs_delalloc_reserve_space() ends up triggering
9077          * dirty page write out, then the btrfs_writepage() function could
9078          * end up waiting indefinitely to get a lock on the page currently
9079          * being processed by btrfs_page_mkwrite() function.
9080          */
9081         ret2 = btrfs_delalloc_reserve_space(inode, &data_reserved, page_start,
9082                                            reserved_space);
9083         if (!ret2) {
9084                 ret2 = file_update_time(vmf->vma->vm_file);
9085                 reserved = 1;
9086         }
9087         if (ret2) {
9088                 ret = vmf_error(ret2);
9089                 if (reserved)
9090                         goto out;
9091                 goto out_noreserve;
9092         }
9093 
9094         ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
9095 again:
9096         lock_page(page);
9097         size = i_size_read(inode);
9098 
9099         if ((page->mapping != inode->i_mapping) ||
9100             (page_start >= size)) {
9101                 /* page got truncated out from underneath us */
9102                 goto out_unlock;
9103         }
9104         wait_on_page_writeback(page);
9105 
9106         lock_extent_bits(io_tree, page_start, page_end, &cached_state);
9107         set_page_extent_mapped(page);
9108 
9109         /*
9110          * we can't set the delalloc bits if there are pending ordered
9111          * extents.  Drop our locks and wait for them to finish
9112          */
9113         ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start,
9114                         PAGE_SIZE);
9115         if (ordered) {
9116                 unlock_extent_cached(io_tree, page_start, page_end,
9117                                      &cached_state);
9118                 unlock_page(page);
9119                 btrfs_start_ordered_extent(inode, ordered, 1);
9120                 btrfs_put_ordered_extent(ordered);
9121                 goto again;
9122         }
9123 
9124         if (page->index == ((size - 1) >> PAGE_SHIFT)) {
9125                 reserved_space = round_up(size - page_start,
9126                                           fs_info->sectorsize);
9127                 if (reserved_space < PAGE_SIZE) {
9128                         end = page_start + reserved_space - 1;
9129                         btrfs_delalloc_release_space(inode, data_reserved,
9130                                         page_start, PAGE_SIZE - reserved_space,
9131                                         true);
9132                 }
9133         }
9134 
9135         /*
9136          * page_mkwrite gets called when the page is firstly dirtied after it's
9137          * faulted in, but write(2) could also dirty a page and set delalloc
9138          * bits, thus in this case for space account reason, we still need to
9139          * clear any delalloc bits within this page range since we have to
9140          * reserve data&meta space before lock_page() (see above comments).
9141          */
9142         clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, end,
9143                           EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
9144                           EXTENT_DEFRAG, 0, 0, &cached_state);
9145 
9146         ret2 = btrfs_set_extent_delalloc(inode, page_start, end, 0,
9147                                         &cached_state);
9148         if (ret2) {
9149                 unlock_extent_cached(io_tree, page_start, page_end,
9150                                      &cached_state);
9151                 ret = VM_FAULT_SIGBUS;
9152                 goto out_unlock;
9153         }
9154         ret2 = 0;
9155 
9156         /* page is wholly or partially inside EOF */
9157         if (page_start + PAGE_SIZE > size)
9158                 zero_start = offset_in_page(size);
9159         else
9160                 zero_start = PAGE_SIZE;
9161 
9162         if (zero_start != PAGE_SIZE) {
9163                 kaddr = kmap(page);
9164                 memset(kaddr + zero_start, 0, PAGE_SIZE - zero_start);
9165                 flush_dcache_page(page);
9166                 kunmap(page);
9167         }
9168         ClearPageChecked(page);
9169         set_page_dirty(page);
9170         SetPageUptodate(page);
9171 
9172         BTRFS_I(inode)->last_trans = fs_info->generation;
9173         BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
9174         BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit;
9175 
9176         unlock_extent_cached(io_tree, page_start, page_end, &cached_state);
9177 
9178         if (!ret2) {
9179                 btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
9180                 sb_end_pagefault(inode->i_sb);
9181                 extent_changeset_free(data_reserved);
9182                 return VM_FAULT_LOCKED;
9183         }
9184 
9185 out_unlock:
9186         unlock_page(page);
9187 out:
9188         btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
9189         btrfs_delalloc_release_space(inode, data_reserved, page_start,
9190                                      reserved_space, (ret != 0));
9191 out_noreserve:
9192         sb_end_pagefault(inode->i_sb);
9193         extent_changeset_free(data_reserved);
9194         return ret;
9195 }
9196 
9197 static int btrfs_truncate(struct inode *inode, bool skip_writeback)
9198 {
9199         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
9200         struct btrfs_root *root = BTRFS_I(inode)->root;
9201         struct btrfs_block_rsv *rsv;
9202         int ret;
9203         struct btrfs_trans_handle *trans;
9204         u64 mask = fs_info->sectorsize - 1;
9205         u64 min_size = btrfs_calc_metadata_size(fs_info, 1);
9206 
9207         if (!skip_writeback) {
9208                 ret = btrfs_wait_ordered_range(inode, inode->i_size & (~mask),
9209                                                (u64)-1);
9210                 if (ret)
9211                         return ret;
9212         }
9213 
9214         /*
9215          * Yes ladies and gentlemen, this is indeed ugly.  We have a couple of
9216          * things going on here:
9217          *
9218          * 1) We need to reserve space to update our inode.
9219          *
9220          * 2) We need to have something to cache all the space that is going to
9221          * be free'd up by the truncate operation, but also have some slack
9222          * space reserved in case it uses space during the truncate (thank you
9223          * very much snapshotting).
9224          *
9225          * And we need these to be separate.  The fact is we can use a lot of
9226          * space doing the truncate, and we have no earthly idea how much space
9227          * we will use, so we need the truncate reservation to be separate so it
9228          * doesn't end up using space reserved for updating the inode.  We also
9229          * need to be able to stop the transaction and start a new one, which
9230          * means we need to be able to update the inode several times, and we
9231          * have no idea of knowing how many times that will be, so we can't just
9232          * reserve 1 item for the entirety of the operation, so that has to be
9233          * done separately as well.
9234          *
9235          * So that leaves us with
9236          *
9237          * 1) rsv - for the truncate reservation, which we will steal from the
9238          * transaction reservation.
9239          * 2) fs_info->trans_block_rsv - this will have 1 items worth left for
9240          * updating the inode.
9241          */
9242         rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
9243         if (!rsv)
9244                 return -ENOMEM;
9245         rsv->size = min_size;
9246         rsv->failfast = 1;
9247 
9248         /*
9249          * 1 for the truncate slack space
9250          * 1 for updating the inode.
9251          */
9252         trans = btrfs_start_transaction(root, 2);
9253         if (IS_ERR(trans)) {
9254                 ret = PTR_ERR(trans);
9255                 goto out;
9256         }
9257 
9258         /* Migrate the slack space for the truncate to our reserve */
9259         ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
9260                                       min_size, false);
9261         BUG_ON(ret);
9262 
9263         /*
9264          * So if we truncate and then write and fsync we normally would just
9265          * write the extents that changed, which is a problem if we need to
9266          * first truncate that entire inode.  So set this flag so we write out
9267          * all of the extents in the inode to the sync log so we're completely
9268          * safe.
9269          */
9270         set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
9271         trans->block_rsv = rsv;
9272 
9273         while (1) {
9274                 ret = btrfs_truncate_inode_items(trans, root, inode,
9275                                                  inode->i_size,
9276                                                  BTRFS_EXTENT_DATA_KEY);
9277                 trans->block_rsv = &fs_info->trans_block_rsv;
9278                 if (ret != -ENOSPC && ret != -EAGAIN)
9279                         break;
9280 
9281                 ret = btrfs_update_inode(trans, root, inode);
9282                 if (ret)
9283                         break;
9284 
9285                 btrfs_end_transaction(trans);
9286                 btrfs_btree_balance_dirty(fs_info);
9287 
9288                 trans = btrfs_start_transaction(root, 2);
9289                 if (IS_ERR(trans)) {
9290                         ret = PTR_ERR(trans);
9291                         trans = NULL;
9292                         break;
9293                 }
9294 
9295                 btrfs_block_rsv_release(fs_info, rsv, -1);
9296                 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
9297                                               rsv, min_size, false);
9298                 BUG_ON(ret);    /* shouldn't happen */
9299                 trans->block_rsv = rsv;
9300         }
9301 
9302         /*
9303          * We can't call btrfs_truncate_block inside a trans handle as we could
9304          * deadlock with freeze, if we got NEED_TRUNCATE_BLOCK then we know
9305          * we've truncated everything except the last little bit, and can do
9306          * btrfs_truncate_block and then update the disk_i_size.
9307          */
9308         if (ret == NEED_TRUNCATE_BLOCK) {
9309                 btrfs_end_transaction(trans);
9310                 btrfs_btree_balance_dirty(fs_info);
9311 
9312                 ret = btrfs_truncate_block(inode, inode->i_size, 0, 0);
9313                 if (ret)
9314                         goto out;
9315                 trans = btrfs_start_transaction(root, 1);
9316                 if (IS_ERR(trans)) {
9317                         ret = PTR_ERR(trans);
9318                         goto out;
9319                 }
9320                 btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
9321         }
9322 
9323         if (trans) {
9324                 int ret2;
9325 
9326                 trans->block_rsv = &fs_info->trans_block_rsv;
9327                 ret2 = btrfs_update_inode(trans, root, inode);
9328                 if (ret2 && !ret)
9329                         ret = ret2;
9330 
9331                 ret2 = btrfs_end_transaction(trans);
9332                 if (ret2 && !ret)
9333                         ret = ret2;
9334                 btrfs_btree_balance_dirty(fs_info);
9335         }
9336 out:
9337         btrfs_free_block_rsv(fs_info, rsv);
9338 
9339         return ret;
9340 }
9341 
9342 /*
9343  * create a new subvolume directory/inode (helper for the ioctl).
9344  */
9345 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
9346                              struct btrfs_root *new_root,
9347                              struct btrfs_root *parent_root,
9348                              u64 new_dirid)
9349 {
9350         struct inode *inode;
9351         int err;
9352         u64 index = 0;
9353 
9354         inode = btrfs_new_inode(trans, new_root, NULL, "..", 2,
9355                                 new_dirid, new_dirid,
9356                                 S_IFDIR | (~current_umask() & S_IRWXUGO),
9357                                 &index);
9358         if (IS_ERR(inode))
9359                 return PTR_ERR(inode);
9360         inode->i_op = &btrfs_dir_inode_operations;
9361         inode->i_fop = &btrfs_dir_file_operations;
9362 
9363         set_nlink(inode, 1);
9364         btrfs_i_size_write(BTRFS_I(inode), 0);
9365         unlock_new_inode(inode);
9366 
9367         err = btrfs_subvol_inherit_props(trans, new_root, parent_root);
9368         if (err)
9369                 btrfs_err(new_root->fs_info,
9370                           "error inheriting subvolume %llu properties: %d",
9371                           new_root->root_key.objectid, err);
9372 
9373         err = btrfs_update_inode(trans, new_root, inode);
9374 
9375         iput(inode);
9376         return err;
9377 }
9378 
9379 struct inode *btrfs_alloc_inode(struct super_block *sb)
9380 {
9381         struct btrfs_fs_info *fs_info = btrfs_sb(sb);
9382         struct btrfs_inode *ei;
9383         struct inode *inode;
9384 
9385         ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_KERNEL);
9386         if (!ei)
9387                 return NULL;
9388 
9389         ei->root = NULL;
9390         ei->generation = 0;
9391         ei->last_trans = 0;
9392         ei->last_sub_trans = 0;
9393         ei->logged_trans = 0;
9394         ei->delalloc_bytes = 0;
9395         ei->new_delalloc_bytes = 0;
9396         ei->defrag_bytes = 0;
9397         ei->disk_i_size = 0;
9398         ei->flags = 0;
9399         ei->csum_bytes = 0;
9400         ei->index_cnt = (u64)-1;
9401         ei->dir_index = 0;
9402         ei->last_unlink_trans = 0;
9403         ei->last_log_commit = 0;
9404 
9405         spin_lock_init(&ei->lock);
9406         ei->outstanding_extents = 0;
9407         if (sb->s_magic != BTRFS_TEST_MAGIC)
9408                 btrfs_init_metadata_block_rsv(fs_info, &ei->block_rsv,
9409                                               BTRFS_BLOCK_RSV_DELALLOC);
9410         ei->runtime_flags = 0;
9411         ei->prop_compress = BTRFS_COMPRESS_NONE;
9412         ei->defrag_compress = BTRFS_COMPRESS_NONE;
9413 
9414         ei->delayed_node = NULL;
9415 
9416         ei->i_otime.tv_sec = 0;
9417         ei->i_otime.tv_nsec = 0;
9418 
9419         inode = &ei->vfs_inode;
9420         extent_map_tree_init(&ei->extent_tree);
9421         extent_io_tree_init(fs_info, &ei->io_tree, IO_TREE_INODE_IO, inode);
9422         extent_io_tree_init(fs_info, &ei->io_failure_tree,
9423                             IO_TREE_INODE_IO_FAILURE, inode);
9424         ei->io_tree.track_uptodate = true;
9425         ei->io_failure_tree.track_uptodate = true;
9426         atomic_set(&ei->sync_writers, 0);
9427         mutex_init(&ei->log_mutex);
9428         mutex_init(&ei->delalloc_mutex);
9429         btrfs_ordered_inode_tree_init(&ei->ordered_tree);
9430         INIT_LIST_HEAD(&ei->delalloc_inodes);
9431         INIT_LIST_HEAD(&ei->delayed_iput);
9432         RB_CLEAR_NODE(&ei->rb_node);
9433         init_rwsem(&ei->dio_sem);
9434 
9435         return inode;
9436 }
9437 
9438 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
9439 void btrfs_test_destroy_inode(struct inode *inode)
9440 {
9441         btrfs_drop_extent_cache(BTRFS_I(inode), 0, (u64)-1, 0);
9442         kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
9443 }
9444 #endif
9445 
9446 void btrfs_free_inode(struct inode *inode)
9447 {
9448         kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
9449 }
9450 
9451 void btrfs_destroy_inode(struct inode *inode)
9452 {
9453         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
9454         struct btrfs_ordered_extent *ordered;
9455         struct btrfs_root *root = BTRFS_I(inode)->root;
9456 
9457         WARN_ON(!hlist_empty(&inode->i_dentry));
9458         WARN_ON(inode->i_data.nrpages);
9459         WARN_ON(BTRFS_I(inode)->block_rsv.reserved);
9460         WARN_ON(BTRFS_I(inode)->block_rsv.size);
9461         WARN_ON(BTRFS_I(inode)->outstanding_extents);
9462         WARN_ON(BTRFS_I(inode)->delalloc_bytes);
9463         WARN_ON(BTRFS_I(inode)->new_delalloc_bytes);
9464         WARN_ON(BTRFS_I(inode)->csum_bytes);
9465         WARN_ON(BTRFS_I(inode)->defrag_bytes);
9466 
9467         /*
9468          * This can happen where we create an inode, but somebody else also
9469          * created the same inode and we need to destroy the one we already
9470          * created.
9471          */
9472         if (!root)
9473                 return;
9474 
9475         while (1) {
9476                 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
9477                 if (!ordered)
9478                         break;
9479                 else {
9480                         btrfs_err(fs_info,
9481                                   "found ordered extent %llu %llu on inode cleanup",
9482                                   ordered->file_offset, ordered->len);
9483                         btrfs_remove_ordered_extent(inode, ordered);
9484                         btrfs_put_ordered_extent(ordered);
9485                         btrfs_put_ordered_extent(ordered);
9486                 }
9487         }
9488         btrfs_qgroup_check_reserved_leak(inode);
9489         inode_tree_del(inode);
9490         btrfs_drop_extent_cache(BTRFS_I(inode), 0, (u64)-1, 0);
9491 }
9492 
9493 int btrfs_drop_inode(struct inode *inode)
9494 {
9495         struct btrfs_root *root = BTRFS_I(inode)->root;
9496 
9497         if (root == NULL)
9498                 return 1;
9499 
9500         /* the snap/subvol tree is on deleting */
9501         if (btrfs_root_refs(&root->root_item) == 0)
9502                 return 1;
9503         else
9504                 return generic_drop_inode(inode);
9505 }
9506 
9507 static void init_once(void *foo)
9508 {
9509         struct btrfs_inode *ei = (struct btrfs_inode *) foo;
9510 
9511         inode_init_once(&ei->vfs_inode);
9512 }
9513 
9514 void __cold btrfs_destroy_cachep(void)
9515 {
9516         /*
9517          * Make sure all delayed rcu free inodes are flushed before we
9518          * destroy cache.
9519          */
9520         rcu_barrier();
9521         kmem_cache_destroy(btrfs_inode_cachep);
9522         kmem_cache_destroy(btrfs_trans_handle_cachep);
9523         kmem_cache_destroy(btrfs_path_cachep);
9524         kmem_cache_destroy(btrfs_free_space_cachep);
9525         kmem_cache_destroy(btrfs_free_space_bitmap_cachep);
9526 }
9527 
9528 int __init btrfs_init_cachep(void)
9529 {
9530         btrfs_inode_cachep = kmem_cache_create("btrfs_inode",
9531                         sizeof(struct btrfs_inode), 0,
9532                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD | SLAB_ACCOUNT,
9533                         init_once);
9534         if (!btrfs_inode_cachep)
9535                 goto fail;
9536 
9537         btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle",
9538                         sizeof(struct btrfs_trans_handle), 0,
9539                         SLAB_TEMPORARY | SLAB_MEM_SPREAD, NULL);
9540         if (!btrfs_trans_handle_cachep)
9541                 goto fail;
9542 
9543         btrfs_path_cachep = kmem_cache_create("btrfs_path",
9544                         sizeof(struct btrfs_path), 0,
9545                         SLAB_MEM_SPREAD, NULL);
9546         if (!btrfs_path_cachep)
9547                 goto fail;
9548 
9549         btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space",
9550                         sizeof(struct btrfs_free_space), 0,
9551                         SLAB_MEM_SPREAD, NULL);
9552         if (!btrfs_free_space_cachep)
9553                 goto fail;
9554 
9555         btrfs_free_space_bitmap_cachep = kmem_cache_create("btrfs_free_space_bitmap",
9556                                                         PAGE_SIZE, PAGE_SIZE,
9557                                                         SLAB_RED_ZONE, NULL);
9558         if (!btrfs_free_space_bitmap_cachep)
9559                 goto fail;
9560 
9561         return 0;
9562 fail:
9563         btrfs_destroy_cachep();
9564         return -ENOMEM;
9565 }
9566 
9567 static int btrfs_getattr(const struct path *path, struct kstat *stat,
9568                          u32 request_mask, unsigned int flags)
9569 {
9570         u64 delalloc_bytes;
9571         struct inode *inode = d_inode(path->dentry);
9572         u32 blocksize = inode->i_sb->s_blocksize;
9573         u32 bi_flags = BTRFS_I(inode)->flags;
9574 
9575         stat->result_mask |= STATX_BTIME;
9576         stat->btime.tv_sec = BTRFS_I(inode)->i_otime.tv_sec;
9577         stat->btime.tv_nsec = BTRFS_I(inode)->i_otime.tv_nsec;
9578         if (bi_flags & BTRFS_INODE_APPEND)
9579                 stat->attributes |= STATX_ATTR_APPEND;
9580         if (bi_flags & BTRFS_INODE_COMPRESS)
9581                 stat->attributes |= STATX_ATTR_COMPRESSED;
9582         if (bi_flags & BTRFS_INODE_IMMUTABLE)
9583                 stat->attributes |= STATX_ATTR_IMMUTABLE;
9584         if (bi_flags & BTRFS_INODE_NODUMP)
9585                 stat->attributes |= STATX_ATTR_NODUMP;
9586 
9587         stat->attributes_mask |= (STATX_ATTR_APPEND |
9588                                   STATX_ATTR_COMPRESSED |
9589                                   STATX_ATTR_IMMUTABLE |
9590                                   STATX_ATTR_NODUMP);
9591 
9592         generic_fillattr(inode, stat);
9593         stat->dev = BTRFS_I(inode)->root->anon_dev;
9594 
9595         spin_lock(&BTRFS_I(inode)->lock);
9596         delalloc_bytes = BTRFS_I(inode)->new_delalloc_bytes;
9597         spin_unlock(&BTRFS_I(inode)->lock);
9598         stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) +
9599                         ALIGN(delalloc_bytes, blocksize)) >> 9;
9600         return 0;
9601 }
9602 
9603 static int btrfs_rename_exchange(struct inode *old_dir,
9604                               struct dentry *old_dentry,
9605                               struct inode *new_dir,
9606                               struct dentry *new_dentry)
9607 {
9608         struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb);
9609         struct btrfs_trans_handle *trans;
9610         struct btrfs_root *root = BTRFS_I(old_dir)->root;
9611         struct btrfs_root *dest = BTRFS_I(new_dir)->root;
9612         struct inode *new_inode = new_dentry->d_inode;
9613         struct inode *old_inode = old_dentry->d_inode;
9614         struct timespec64 ctime = current_time(old_inode);
9615         struct dentry *parent;
9616         u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
9617         u64 new_ino = btrfs_ino(BTRFS_I(new_inode));
9618         u64 old_idx = 0;
9619         u64 new_idx = 0;
9620         int ret;
9621         bool root_log_pinned = false;
9622         bool dest_log_pinned = false;
9623         struct btrfs_log_ctx ctx_root;
9624         struct btrfs_log_ctx ctx_dest;
9625         bool sync_log_root = false;
9626         bool sync_log_dest = false;
9627         bool commit_transaction = false;
9628 
9629         /* we only allow rename subvolume link between subvolumes */
9630         if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
9631                 return -EXDEV;
9632 
9633         btrfs_init_log_ctx(&ctx_root, old_inode);
9634         btrfs_init_log_ctx(&ctx_dest, new_inode);
9635 
9636         /* close the race window with snapshot create/destroy ioctl */
9637         if (old_ino == BTRFS_FIRST_FREE_OBJECTID ||
9638             new_ino == BTRFS_FIRST_FREE_OBJECTID)
9639                 down_read(&fs_info->subvol_sem);
9640 
9641         /*
9642          * We want to reserve the absolute worst case amount of items.  So if
9643          * both inodes are subvols and we need to unlink them then that would
9644          * require 4 item modifications, but if they are both normal inodes it
9645          * would require 5 item modifications, so we'll assume their normal
9646          * inodes.  So 5 * 2 is 10, plus 2 for the new links, so 12 total items
9647          * should cover the worst case number of items we'll modify.
9648          */
9649         trans = btrfs_start_transaction(root, 12);
9650         if (IS_ERR(trans)) {
9651                 ret = PTR_ERR(trans);
9652                 goto out_notrans;
9653         }
9654 
9655         if (dest != root)
9656                 btrfs_record_root_in_trans(trans, dest);
9657 
9658         /*
9659          * We need to find a free sequence number both in the source and
9660          * in the destination directory for the exchange.
9661          */
9662         ret = btrfs_set_inode_index(BTRFS_I(new_dir), &old_idx);
9663         if (ret)
9664                 goto out_fail;
9665         ret = btrfs_set_inode_index(BTRFS_I(old_dir), &new_idx);
9666         if (ret)
9667                 goto out_fail;
9668 
9669         BTRFS_I(old_inode)->dir_index = 0ULL;
9670         BTRFS_I(new_inode)->dir_index = 0ULL;
9671 
9672         /* Reference for the source. */
9673         if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
9674                 /* force full log commit if subvolume involved. */
9675                 btrfs_set_log_full_commit(trans);
9676         } else {
9677                 btrfs_pin_log_trans(root);
9678                 root_log_pinned = true;
9679                 ret = btrfs_insert_inode_ref(trans, dest,
9680                                              new_dentry->d_name.name,
9681                                              new_dentry->d_name.len,
9682                                              old_ino,
9683                                              btrfs_ino(BTRFS_I(new_dir)),
9684                                              old_idx);
9685                 if (ret)
9686                         goto out_fail;
9687         }
9688 
9689         /* And now for the dest. */
9690         if (new_ino == BTRFS_FIRST_FREE_OBJECTID) {
9691                 /* force full log commit if subvolume involved. */
9692                 btrfs_set_log_full_commit(trans);
9693         } else {
9694                 btrfs_pin_log_trans(dest);
9695                 dest_log_pinned = true;
9696                 ret = btrfs_insert_inode_ref(trans, root,
9697                                              old_dentry->d_name.name,
9698                                              old_dentry->d_name.len,
9699                                              new_ino,
9700                                              btrfs_ino(BTRFS_I(old_dir)),
9701                                              new_idx);
9702                 if (ret)
9703                         goto out_fail;
9704         }
9705 
9706         /* Update inode version and ctime/mtime. */
9707         inode_inc_iversion(old_dir);
9708         inode_inc_iversion(new_dir);
9709         inode_inc_iversion(old_inode);
9710         inode_inc_iversion(new_inode);
9711         old_dir->i_ctime = old_dir->i_mtime = ctime;
9712         new_dir->i_ctime = new_dir->i_mtime = ctime;
9713         old_inode->i_ctime = ctime;
9714         new_inode->i_ctime = ctime;
9715 
9716         if (old_dentry->d_parent != new_dentry->d_parent) {
9717                 btrfs_record_unlink_dir(trans, BTRFS_I(old_dir),
9718                                 BTRFS_I(old_inode), 1);
9719                 btrfs_record_unlink_dir(trans, BTRFS_I(new_dir),
9720                                 BTRFS_I(new_inode), 1);
9721         }
9722 
9723         /* src is a subvolume */
9724         if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
9725                 ret = btrfs_unlink_subvol(trans, old_dir, old_dentry);
9726         } else { /* src is an inode */
9727                 ret = __btrfs_unlink_inode(trans, root, BTRFS_I(old_dir),
9728                                            BTRFS_I(old_dentry->d_inode),
9729                                            old_dentry->d_name.name,
9730                                            old_dentry->d_name.len);
9731                 if (!ret)
9732                         ret = btrfs_update_inode(trans, root, old_inode);
9733         }
9734         if (ret) {
9735                 btrfs_abort_transaction(trans, ret);
9736                 goto out_fail;
9737         }
9738 
9739         /* dest is a subvolume */
9740         if (new_ino == BTRFS_FIRST_FREE_OBJECTID) {
9741                 ret = btrfs_unlink_subvol(trans, new_dir, new_dentry);
9742         } else { /* dest is an inode */
9743                 ret = __btrfs_unlink_inode(trans, dest, BTRFS_I(new_dir),
9744                                            BTRFS_I(new_dentry->d_inode),
9745                                            new_dentry->d_name.name,
9746                                            new_dentry->d_name.len);
9747                 if (!ret)
9748                         ret = btrfs_update_inode(trans, dest, new_inode);
9749         }
9750         if (ret) {
9751                 btrfs_abort_transaction(trans, ret);
9752                 goto out_fail;
9753         }
9754 
9755         ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode),
9756                              new_dentry->d_name.name,
9757                              new_dentry->d_name.len, 0, old_idx);
9758         if (ret) {
9759                 btrfs_abort_transaction(trans, ret);
9760                 goto out_fail;
9761         }
9762 
9763         ret = btrfs_add_link(trans, BTRFS_I(old_dir), BTRFS_I(new_inode),
9764                              old_dentry->d_name.name,
9765                              old_dentry->d_name.len, 0, new_idx);
9766         if (ret) {
9767                 btrfs_abort_transaction(trans, ret);
9768                 goto out_fail;
9769         }
9770 
9771         if (old_inode->i_nlink == 1)
9772                 BTRFS_I(old_inode)->dir_index = old_idx;
9773         if (new_inode->i_nlink == 1)
9774                 BTRFS_I(new_inode)->dir_index = new_idx;
9775 
9776         if (root_log_pinned) {
9777                 parent = new_dentry->d_parent;
9778                 ret = btrfs_log_new_name(trans, BTRFS_I(old_inode),
9779                                          BTRFS_I(old_dir), parent,
9780                                          false, &ctx_root);
9781                 if (ret == BTRFS_NEED_LOG_SYNC)
9782                         sync_log_root = true;
9783                 else if (ret == BTRFS_NEED_TRANS_COMMIT)
9784                         commit_transaction = true;
9785                 ret = 0;
9786                 btrfs_end_log_trans(root);
9787                 root_log_pinned = false;
9788         }
9789         if (dest_log_pinned) {
9790                 if (!commit_transaction) {
9791                         parent = old_dentry->d_parent;
9792                         ret = btrfs_log_new_name(trans, BTRFS_I(new_inode),
9793                                                  BTRFS_I(new_dir), parent,
9794                                                  false, &ctx_dest);
9795                         if (ret == BTRFS_NEED_LOG_SYNC)
9796                                 sync_log_dest = true;
9797                         else if (ret == BTRFS_NEED_TRANS_COMMIT)
9798                                 commit_transaction = true;
9799                         ret = 0;
9800                 }
9801                 btrfs_end_log_trans(dest);
9802                 dest_log_pinned = false;
9803         }
9804 out_fail:
9805         /*
9806          * If we have pinned a log and an error happened, we unpin tasks
9807          * trying to sync the log and force them to fallback to a transaction
9808          * commit if the log currently contains any of the inodes involved in
9809          * this rename operation (to ensure we do not persist a log with an
9810          * inconsistent state for any of these inodes or leading to any
9811          * inconsistencies when replayed). If the transaction was aborted, the
9812          * abortion reason is propagated to userspace when attempting to commit
9813          * the transaction. If the log does not contain any of these inodes, we
9814          * allow the tasks to sync it.
9815          */
9816         if (ret && (root_log_pinned || dest_log_pinned)) {
9817                 if (btrfs_inode_in_log(BTRFS_I(old_dir), fs_info->generation) ||
9818                     btrfs_inode_in_log(BTRFS_I(new_dir), fs_info->generation) ||
9819                     btrfs_inode_in_log(BTRFS_I(old_inode), fs_info->generation) ||
9820                     (new_inode &&
9821                      btrfs_inode_in_log(BTRFS_I(new_inode), fs_info->generation)))
9822                         btrfs_set_log_full_commit(trans);
9823 
9824                 if (root_log_pinned) {
9825                         btrfs_end_log_trans(root);
9826                         root_log_pinned = false;
9827                 }
9828                 if (dest_log_pinned) {
9829                         btrfs_end_log_trans(dest);
9830                         dest_log_pinned = false;
9831                 }
9832         }
9833         if (!ret && sync_log_root && !commit_transaction) {
9834                 ret = btrfs_sync_log(trans, BTRFS_I(old_inode)->root,
9835                                      &ctx_root);
9836                 if (ret)
9837                         commit_transaction = true;
9838         }
9839         if (!ret && sync_log_dest && !commit_transaction) {
9840                 ret = btrfs_sync_log(trans, BTRFS_I(new_inode)->root,
9841                                      &ctx_dest);
9842                 if (ret)
9843                         commit_transaction = true;
9844         }
9845         if (commit_transaction) {
9846                 /*
9847                  * We may have set commit_transaction when logging the new name
9848                  * in the destination root, in which case we left the source
9849                  * root context in the list of log contextes. So make sure we
9850                  * remove it to avoid invalid memory accesses, since the context
9851                  * was allocated in our stack frame.
9852                  */
9853                 if (sync_log_root) {
9854                         mutex_lock(&root->log_mutex);
9855                         list_del_init(&ctx_root.list);
9856                         mutex_unlock(&root->log_mutex);
9857                 }
9858                 ret = btrfs_commit_transaction(trans);
9859         } else {
9860                 int ret2;
9861 
9862                 ret2 = btrfs_end_transaction(trans);
9863                 ret = ret ? ret : ret2;
9864         }
9865 out_notrans:
9866         if (new_ino == BTRFS_FIRST_FREE_OBJECTID ||
9867             old_ino == BTRFS_FIRST_FREE_OBJECTID)
9868                 up_read(&fs_info->subvol_sem);
9869 
9870         ASSERT(list_empty(&ctx_root.list));
9871         ASSERT(list_empty(&ctx_dest.list));
9872 
9873         return ret;
9874 }
9875 
9876 static int btrfs_whiteout_for_rename(struct btrfs_trans_handle *trans,
9877                                      struct btrfs_root *root,
9878                                      struct inode *dir,
9879                                      struct dentry *dentry)
9880 {
9881         int ret;
9882         struct inode *inode;
9883         u64 objectid;
9884         u64 index;
9885 
9886         ret = btrfs_find_free_ino(root, &objectid);
9887         if (ret)
9888                 return ret;
9889 
9890         inode = btrfs_new_inode(trans, root, dir,
9891                                 dentry->d_name.name,
9892                                 dentry->d_name.len,
9893                                 btrfs_ino(BTRFS_I(dir)),
9894                                 objectid,
9895                                 S_IFCHR | WHITEOUT_MODE,
9896                                 &index);
9897 
9898         if (IS_ERR(inode)) {
9899                 ret = PTR_ERR(inode);
9900                 return ret;
9901         }
9902 
9903         inode->i_op = &btrfs_special_inode_operations;
9904         init_special_inode(inode, inode->i_mode,
9905                 WHITEOUT_DEV);
9906 
9907         ret = btrfs_init_inode_security(trans, inode, dir,
9908                                 &dentry->d_name);
9909         if (ret)
9910                 goto out;
9911 
9912         ret = btrfs_add_nondir(trans, BTRFS_I(dir), dentry,
9913                                 BTRFS_I(inode), 0, index);
9914         if (ret)
9915                 goto out;
9916 
9917         ret = btrfs_update_inode(trans, root, inode);
9918 out:
9919         unlock_new_inode(inode);
9920         if (ret)
9921                 inode_dec_link_count(inode);
9922         iput(inode);
9923 
9924         return ret;
9925 }
9926 
9927 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
9928                            struct inode *new_dir, struct dentry *new_dentry,
9929                            unsigned int flags)
9930 {
9931         struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb);
9932         struct btrfs_trans_handle *trans;
9933         unsigned int trans_num_items;
9934         struct btrfs_root *root = BTRFS_I(old_dir)->root;
9935         struct btrfs_root *dest = BTRFS_I(new_dir)->root;
9936         struct inode *new_inode = d_inode(new_dentry);
9937         struct inode *old_inode = d_inode(old_dentry);
9938         u64 index = 0;
9939         int ret;
9940         u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
9941         bool log_pinned = false;
9942         struct btrfs_log_ctx ctx;
9943         bool sync_log = false;
9944         bool commit_transaction = false;
9945 
9946         if (btrfs_ino(BTRFS_I(new_dir)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
9947                 return -EPERM;
9948 
9949         /* we only allow rename subvolume link between subvolumes */
9950         if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
9951                 return -EXDEV;
9952 
9953         if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
9954             (new_inode && btrfs_ino(BTRFS_I(new_inode)) == BTRFS_FIRST_FREE_OBJECTID))
9955                 return -ENOTEMPTY;
9956 
9957         if (S_ISDIR(old_inode->i_mode) && new_inode &&
9958             new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
9959                 return -ENOTEMPTY;
9960 
9961 
9962         /* check for collisions, even if the  name isn't there */
9963         ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino,
9964                              new_dentry->d_name.name,
9965                              new_dentry->d_name.len);
9966 
9967         if (ret) {
9968                 if (ret == -EEXIST) {
9969                         /* we shouldn't get
9970                          * eexist without a new_inode */
9971                         if (WARN_ON(!new_inode)) {
9972                                 return ret;
9973                         }
9974                 } else {
9975                         /* maybe -EOVERFLOW */
9976                         return ret;
9977                 }
9978         }
9979         ret = 0;
9980 
9981         /*
9982          * we're using rename to replace one file with another.  Start IO on it
9983          * now so  we don't add too much work to the end of the transaction
9984          */
9985         if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size)
9986                 filemap_flush(old_inode->i_mapping);
9987 
9988         /* close the racy window with snapshot create/destroy ioctl */
9989         if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
9990                 down_read(&fs_info->subvol_sem);
9991         /*
9992          * We want to reserve the absolute worst case amount of items.  So if
9993          * both inodes are subvols and we need to unlink them then that would
9994          * require 4 item modifications, but if they are both normal inodes it
9995          * would require 5 item modifications, so we'll assume they are normal
9996          * inodes.  So 5 * 2 is 10, plus 1 for the new link, so 11 total items
9997          * should cover the worst case number of items we'll modify.
9998          * If our rename has the whiteout flag, we need more 5 units for the
9999          * new inode (1 inode item, 1 inode ref, 2 dir items and 1 xattr item
10000          * when selinux is enabled).
10001          */
10002         trans_num_items = 11;
10003         if (flags & RENAME_WHITEOUT)
10004                 trans_num_items += 5;
10005         trans = btrfs_start_transaction(root, trans_num_items);
10006         if (IS_ERR(trans)) {
10007                 ret = PTR_ERR(trans);
10008                 goto out_notrans;
10009         }
10010 
10011         if (dest != root)
10012                 btrfs_record_root_in_trans(trans, dest);
10013 
10014         ret = btrfs_set_inode_index(BTRFS_I(new_dir), &index);
10015         if (ret)
10016                 goto out_fail;
10017 
10018         BTRFS_I(old_inode)->dir_index = 0ULL;
10019         if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
10020                 /* force full log commit if subvolume involved. */
10021                 btrfs_set_log_full_commit(trans);
10022         } else {
10023                 btrfs_pin_log_trans(root);
10024                 log_pinned = true;
10025                 ret = btrfs_insert_inode_ref(trans, dest,
10026                                              new_dentry->d_name.name,
10027                                              new_dentry->d_name.len,
10028                                              old_ino,
10029                                              btrfs_ino(BTRFS_I(new_dir)), index);
10030                 if (ret)
10031                         goto out_fail;
10032         }
10033 
10034         inode_inc_iversion(old_dir);
10035         inode_inc_iversion(new_dir);
10036         inode_inc_iversion(old_inode);
10037         old_dir->i_ctime = old_dir->i_mtime =
10038         new_dir->i_ctime = new_dir->i_mtime =
10039         old_inode->i_ctime = current_time(old_dir);
10040 
10041         if (old_dentry->d_parent != new_dentry->d_parent)
10042                 btrfs_record_unlink_dir(trans, BTRFS_I(old_dir),
10043                                 BTRFS_I(old_inode), 1);
10044 
10045         if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
10046                 ret = btrfs_unlink_subvol(trans, old_dir, old_dentry);
10047         } else {
10048                 ret = __btrfs_unlink_inode(trans, root, BTRFS_I(old_dir),
10049                                         BTRFS_I(d_inode(old_dentry)),
10050                                         old_dentry->d_name.name,
10051                                         old_dentry->d_name.len);
10052                 if (!ret)
10053                         ret = btrfs_update_inode(trans, root, old_inode);
10054         }
10055         if (ret) {
10056                 btrfs_abort_transaction(trans, ret);
10057                 goto out_fail;
10058         }
10059 
10060         if (new_inode) {
10061                 inode_inc_iversion(new_inode);
10062                 new_inode->i_ctime = current_time(new_inode);
10063                 if (unlikely(btrfs_ino(BTRFS_I(new_inode)) ==
10064                              BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
10065                         ret = btrfs_unlink_subvol(trans, new_dir, new_dentry);
10066                         BUG_ON(new_inode->i_nlink == 0);
10067                 } else {
10068                         ret = btrfs_unlink_inode(trans, dest, BTRFS_I(new_dir),
10069                                                  BTRFS_I(d_inode(new_dentry)),
10070                                                  new_dentry->d_name.name,
10071                                                  new_dentry->d_name.len);
10072                 }
10073                 if (!ret && new_inode->i_nlink == 0)
10074                         ret = btrfs_orphan_add(trans,
10075                                         BTRFS_I(d_inode(new_dentry)));
10076                 if (ret) {
10077                         btrfs_abort_transaction(trans, ret);
10078                         goto out_fail;
10079                 }
10080         }
10081 
10082         ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode),
10083                              new_dentry->d_name.name,
10084                              new_dentry->d_name.len, 0, index);
10085         if (ret) {
10086                 btrfs_abort_transaction(trans, ret);
10087                 goto out_fail;
10088         }
10089 
10090         if (old_inode->i_nlink == 1)
10091                 BTRFS_I(old_inode)->dir_index = index;
10092 
10093         if (log_pinned) {
10094                 struct dentry *parent = new_dentry->d_parent;
10095 
10096                 btrfs_init_log_ctx(&ctx, old_inode);
10097                 ret = btrfs_log_new_name(trans, BTRFS_I(old_inode),
10098                                          BTRFS_I(old_dir), parent,
10099                                          false, &ctx);
10100                 if (ret == BTRFS_NEED_LOG_SYNC)
10101                         sync_log = true;
10102                 else if (ret == BTRFS_NEED_TRANS_COMMIT)
10103                         commit_transaction = true;
10104                 ret = 0;
10105                 btrfs_end_log_trans(root);
10106                 log_pinned = false;
10107         }
10108 
10109         if (flags & RENAME_WHITEOUT) {
10110                 ret = btrfs_whiteout_for_rename(trans, root, old_dir,
10111                                                 old_dentry);
10112 
10113                 if (ret) {
10114                         btrfs_abort_transaction(trans, ret);
10115                         goto out_fail;
10116                 }
10117         }
10118 out_fail:
10119         /*
10120          * If we have pinned the log and an error happened, we unpin tasks
10121          * trying to sync the log and force them to fallback to a transaction
10122          * commit if the log currently contains any of the inodes involved in
10123          * this rename operation (to ensure we do not persist a log with an
10124          * inconsistent state for any of these inodes or leading to any
10125          * inconsistencies when replayed). If the transaction was aborted, the
10126          * abortion reason is propagated to userspace when attempting to commit
10127          * the transaction. If the log does not contain any of these inodes, we
10128          * allow the tasks to sync it.
10129          */
10130         if (ret && log_pinned) {
10131                 if (btrfs_inode_in_log(BTRFS_I(old_dir), fs_info->generation) ||
10132                     btrfs_inode_in_log(BTRFS_I(new_dir), fs_info->generation) ||
10133                     btrfs_inode_in_log(BTRFS_I(old_inode), fs_info->generation) ||
10134                     (new_inode &&
10135                      btrfs_inode_in_log(BTRFS_I(new_inode), fs_info->generation)))
10136                         btrfs_set_log_full_commit(trans);
10137 
10138                 btrfs_end_log_trans(root);
10139                 log_pinned = false;
10140         }
10141         if (!ret && sync_log) {
10142                 ret = btrfs_sync_log(trans, BTRFS_I(old_inode)->root, &ctx);
10143                 if (ret)
10144                         commit_transaction = true;
10145         } else if (sync_log) {
10146                 mutex_lock(&root->log_mutex);
10147                 list_del(&ctx.list);
10148                 mutex_unlock(&root->log_mutex);
10149         }
10150         if (commit_transaction) {
10151                 ret = btrfs_commit_transaction(trans);
10152         } else {
10153                 int ret2;
10154 
10155                 ret2 = btrfs_end_transaction(trans);
10156                 ret = ret ? ret : ret2;
10157         }
10158 out_notrans:
10159         if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
10160                 up_read(&fs_info->subvol_sem);
10161 
10162         return ret;
10163 }
10164 
10165 static int btrfs_rename2(struct inode *old_dir, struct dentry *old_dentry,
10166                          struct inode *new_dir, struct dentry *new_dentry,
10167                          unsigned int flags)
10168 {
10169         if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
10170                 return -EINVAL;
10171 
10172         if (flags & RENAME_EXCHANGE)
10173                 return btrfs_rename_exchange(old_dir, old_dentry, new_dir,
10174                                           new_dentry);
10175 
10176         return btrfs_rename(old_dir, old_dentry, new_dir, new_dentry, flags);
10177 }
10178 
10179 struct btrfs_delalloc_work {
10180         struct inode *inode;
10181         struct completion completion;
10182         struct list_head list;
10183         struct btrfs_work work;
10184 };
10185 
10186 static void btrfs_run_delalloc_work(struct btrfs_work *work)
10187 {
10188         struct btrfs_delalloc_work *delalloc_work;
10189         struct inode *inode;
10190 
10191         delalloc_work = container_of(work, struct btrfs_delalloc_work,
10192                                      work);
10193         inode = delalloc_work->inode;
10194         filemap_flush(inode->i_mapping);
10195         if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
10196                                 &BTRFS_I(inode)->runtime_flags))
10197                 filemap_flush(inode->i_mapping);
10198 
10199         iput(inode);
10200         complete(&delalloc_work->completion);
10201 }
10202 
10203 static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode)
10204 {
10205         struct btrfs_delalloc_work *work;
10206 
10207         work = kmalloc(sizeof(*work), GFP_NOFS);
10208         if (!work)
10209                 return NULL;
10210 
10211         init_completion(&work->completion);
10212         INIT_LIST_HEAD(&work->list);
10213         work->inode = inode;
10214         btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL, NULL);
10215 
10216         return work;
10217 }
10218 
10219 /*
10220  * some fairly slow code that needs optimization. This walks the list
10221  * of all the inodes with pending delalloc and forces them to disk.
10222  */
10223 static int start_delalloc_inodes(struct btrfs_root *root, int nr, bool snapshot)
10224 {
10225         struct btrfs_inode *binode;
10226         struct inode *inode;
10227         struct btrfs_delalloc_work *work, *next;
10228         struct list_head works;
10229         struct list_head splice;
10230         int ret = 0;
10231 
10232         INIT_LIST_HEAD(&works);
10233         INIT_LIST_HEAD(&splice);
10234 
10235         mutex_lock(&root->delalloc_mutex);
10236         spin_lock(&root->delalloc_lock);
10237         list_splice_init(&root->delalloc_inodes, &splice);
10238         while (!list_empty(&splice)) {
10239                 binode = list_entry(splice.next, struct btrfs_inode,
10240                                     delalloc_inodes);
10241 
10242                 list_move_tail(&binode->delalloc_inodes,
10243                                &root->delalloc_inodes);
10244                 inode = igrab(&binode->vfs_inode);
10245                 if (!inode) {
10246                         cond_resched_lock(&root->delalloc_lock);
10247                         continue;
10248                 }
10249                 spin_unlock(&root->delalloc_lock);
10250 
10251                 if (snapshot)
10252                         set_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
10253                                 &binode->runtime_flags);
10254                 work = btrfs_alloc_delalloc_work(inode);
10255                 if (!work) {
10256                         iput(inode);
10257                         ret = -ENOMEM;
10258                         goto out;
10259                 }
10260                 list_add_tail(&work->list, &works);
10261                 btrfs_queue_work(root->fs_info->flush_workers,
10262                                  &work->work);
10263                 ret++;
10264                 if (nr != -1 && ret >= nr)
10265                         goto out;
10266                 cond_resched();
10267                 spin_lock(&root->delalloc_lock);
10268         }
10269         spin_unlock(&root->delalloc_lock);
10270 
10271 out:
10272         list_for_each_entry_safe(work, next, &works, list) {
10273                 list_del_init(&work->list);
10274                 wait_for_completion(&work->completion);
10275                 kfree(work);
10276         }
10277 
10278         if (!list_empty(&splice)) {
10279                 spin_lock(&root->delalloc_lock);
10280                 list_splice_tail(&splice, &root->delalloc_inodes);
10281                 spin_unlock(&root->delalloc_lock);
10282         }
10283         mutex_unlock(&root->delalloc_mutex);
10284         return ret;
10285 }
10286 
10287 int btrfs_start_delalloc_snapshot(struct btrfs_root *root)
10288 {
10289         struct btrfs_fs_info *fs_info = root->fs_info;
10290         int ret;
10291 
10292         if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
10293                 return -EROFS;
10294 
10295         ret = start_delalloc_inodes(root, -1, true);
10296         if (ret > 0)
10297                 ret = 0;
10298         return ret;
10299 }
10300 
10301 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int nr)
10302 {
10303         struct btrfs_root *root;
10304         struct list_head splice;
10305         int ret;
10306 
10307         if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
10308                 return -EROFS;
10309 
10310         INIT_LIST_HEAD(&splice);
10311 
10312         mutex_lock(&fs_info->delalloc_root_mutex);
10313         spin_lock(&fs_info->delalloc_root_lock);
10314         list_splice_init(&fs_info->delalloc_roots, &splice);
10315         while (!list_empty(&splice) && nr) {
10316                 root = list_first_entry(&splice, struct btrfs_root,
10317                                         delalloc_root);
10318                 root = btrfs_grab_fs_root(root);
10319                 BUG_ON(!root);
10320                 list_move_tail(&root->delalloc_root,
10321                                &fs_info->delalloc_roots);
10322                 spin_unlock(&fs_info->delalloc_root_lock);
10323 
10324                 ret = start_delalloc_inodes(root, nr, false);
10325                 btrfs_put_fs_root(root);
10326                 if (ret < 0)
10327                         goto out;
10328 
10329                 if (nr != -1) {
10330                         nr -= ret;
10331                         WARN_ON(nr < 0);
10332                 }
10333                 spin_lock(&fs_info->delalloc_root_lock);
10334         }
10335         spin_unlock(&fs_info->delalloc_root_lock);
10336 
10337         ret = 0;
10338 out:
10339         if (!list_empty(&splice)) {
10340                 spin_lock(&fs_info->delalloc_root_lock);
10341                 list_splice_tail(&splice, &fs_info->delalloc_roots);
10342                 spin_unlock(&fs_info->delalloc_root_lock);
10343         }
10344         mutex_unlock(&fs_info->delalloc_root_mutex);
10345         return ret;
10346 }
10347 
10348 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
10349                          const char *symname)
10350 {
10351         struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
10352         struct btrfs_trans_handle *trans;
10353         struct btrfs_root *root = BTRFS_I(dir)->root;
10354         struct btrfs_path *path;
10355         struct btrfs_key key;
10356         struct inode *inode = NULL;
10357         int err;
10358         u64 objectid;
10359         u64 index = 0;
10360         int name_len;
10361         int datasize;
10362         unsigned long ptr;
10363         struct btrfs_file_extent_item *ei;
10364         struct extent_buffer *leaf;
10365 
10366         name_len = strlen(symname);
10367         if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info))
10368                 return -ENAMETOOLONG;
10369 
10370         /*
10371          * 2 items for inode item and ref
10372          * 2 items for dir items
10373          * 1 item for updating parent inode item
10374          * 1 item for the inline extent item
10375          * 1 item for xattr if selinux is on
10376          */
10377         trans = btrfs_start_transaction(root, 7);
10378         if (IS_ERR(trans))
10379                 return PTR_ERR(trans);
10380 
10381         err = btrfs_find_free_ino(root, &objectid);
10382         if (err)
10383                 goto out_unlock;
10384 
10385         inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
10386                                 dentry->d_name.len, btrfs_ino(BTRFS_I(dir)),
10387                                 objectid, S_IFLNK|S_IRWXUGO, &index);
10388         if (IS_ERR(inode)) {
10389                 err = PTR_ERR(inode);
10390                 inode = NULL;
10391                 goto out_unlock;
10392         }
10393 
10394         /*
10395         * If the active LSM wants to access the inode during
10396         * d_instantiate it needs these. Smack checks to see
10397         * if the filesystem supports xattrs by looking at the
10398         * ops vector.
10399         */
10400         inode->i_fop = &btrfs_file_operations;
10401         inode->i_op = &btrfs_file_inode_operations;
10402         inode->i_mapping->a_ops = &btrfs_aops;
10403         BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
10404 
10405         err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
10406         if (err)
10407                 goto out_unlock;
10408 
10409         path = btrfs_alloc_path();
10410         if (!path) {
10411                 err = -ENOMEM;
10412                 goto out_unlock;
10413         }
10414         key.objectid = btrfs_ino(BTRFS_I(inode));
10415         key.offset = 0;
10416         key.type = BTRFS_EXTENT_DATA_KEY;
10417         datasize = btrfs_file_extent_calc_inline_size(name_len);
10418         err = btrfs_insert_empty_item(trans, root, path, &key,
10419                                       datasize);
10420         if (err) {
10421                 btrfs_free_path(path);
10422                 goto out_unlock;
10423         }
10424         leaf = path->nodes[0];
10425         ei = btrfs_item_ptr(leaf, path->slots[0],
10426                             struct btrfs_file_extent_item);
10427         btrfs_set_file_extent_generation(leaf, ei, trans->transid);
10428         btrfs_set_file_extent_type(leaf, ei,
10429                                    BTRFS_FILE_EXTENT_INLINE);
10430         btrfs_set_file_extent_encryption(leaf, ei, 0);
10431         btrfs_set_file_extent_compression(leaf, ei, 0);
10432         btrfs_set_file_extent_other_encoding(leaf, ei, 0);
10433         btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
10434 
10435         ptr = btrfs_file_extent_inline_start(ei);
10436         write_extent_buffer(leaf, symname, ptr, name_len);
10437         btrfs_mark_buffer_dirty(leaf);
10438         btrfs_free_path(path);
10439 
10440         inode->i_op = &btrfs_symlink_inode_operations;
10441         inode_nohighmem(inode);
10442         inode_set_bytes(inode, name_len);
10443         btrfs_i_size_write(BTRFS_I(inode), name_len);
10444         err = btrfs_update_inode(trans, root, inode);
10445         /*
10446          * Last step, add directory indexes for our symlink inode. This is the
10447          * last step to avoid extra cleanup of these indexes if an error happens
10448          * elsewhere above.
10449          */
10450         if (!err)
10451                 err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry,
10452                                 BTRFS_I(inode), 0, index);
10453         if (err)
10454                 goto out_unlock;
10455 
10456         d_instantiate_new(dentry, inode);
10457 
10458 out_unlock:
10459         btrfs_end_transaction(trans);
10460         if (err && inode) {
10461                 inode_dec_link_count(inode);
10462                 discard_new_inode(inode);
10463         }
10464         btrfs_btree_balance_dirty(fs_info);
10465         return err;
10466 }
10467 
10468 static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
10469                                        u64 start, u64 num_bytes, u64 min_size,
10470                                        loff_t actual_len, u64 *alloc_hint,
10471                                        struct btrfs_trans_handle *trans)
10472 {
10473         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
10474         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
10475         struct extent_map *em;
10476         struct btrfs_root *root = BTRFS_I(inode)->root;
10477         struct btrfs_key ins;
10478         u64 cur_offset = start;
10479         u64 clear_offset = start;
10480         u64 i_size;
10481         u64 cur_bytes;
10482         u64 last_alloc = (u64)-1;
10483         int ret = 0;
10484         bool own_trans = true;
10485         u64 end = start + num_bytes - 1;
10486 
10487         if (trans)
10488                 own_trans = false;
10489         while (num_bytes > 0) {
10490                 if (own_trans) {
10491                         trans = btrfs_start_transaction(root, 3);
10492                         if (IS_ERR(trans)) {
10493                                 ret = PTR_ERR(trans);
10494                                 break;
10495                         }
10496                 }
10497 
10498                 cur_bytes = min_t(u64, num_bytes, SZ_256M);
10499                 cur_bytes = max(cur_bytes, min_size);
10500                 /*
10501                  * If we are severely fragmented we could end up with really
10502                  * small allocations, so if the allocator is returning small
10503                  * chunks lets make its job easier by only searching for those
10504                  * sized chunks.
10505                  */
10506                 cur_bytes = min(cur_bytes, last_alloc);
10507                 ret = btrfs_reserve_extent(root, cur_bytes, cur_bytes,
10508                                 min_size, 0, *alloc_hint, &ins, 1, 0);
10509                 if (ret) {
10510                         if (own_trans)
10511                                 btrfs_end_transaction(trans);
10512                         break;
10513                 }
10514 
10515                 /*
10516                  * We've reserved this space, and thus converted it from
10517                  * ->bytes_may_use to ->bytes_reserved.  Any error that happens
10518                  * from here on out we will only need to clear our reservation
10519                  * for the remaining unreserved area, so advance our
10520                  * clear_offset by our extent size.
10521                  */
10522                 clear_offset += ins.offset;
10523                 btrfs_dec_block_group_reservations(fs_info, ins.objectid);
10524 
10525                 last_alloc = ins.offset;
10526                 ret = insert_reserved_file_extent(trans, inode,
10527                                                   cur_offset, ins.objectid,
10528                                                   ins.offset, ins.offset,
10529                                                   ins.offset, 0, 0, 0,
10530                                                   BTRFS_FILE_EXTENT_PREALLOC);
10531                 if (ret) {
10532                         btrfs_free_reserved_extent(fs_info, ins.objectid,
10533                                                    ins.offset, 0);
10534                         btrfs_abort_transaction(trans, ret);
10535                         if (own_trans)
10536                                 btrfs_end_transaction(trans);
10537                         break;
10538                 }
10539 
10540                 btrfs_drop_extent_cache(BTRFS_I(inode), cur_offset,
10541                                         cur_offset + ins.offset -1, 0);
10542 
10543                 em = alloc_extent_map();
10544                 if (!em) {
10545                         set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
10546                                 &BTRFS_I(inode)->runtime_flags);
10547                         goto next;
10548                 }
10549 
10550                 em->start = cur_offset;
10551                 em->orig_start = cur_offset;
10552                 em->len = ins.offset;
10553                 em->block_start = ins.objectid;
10554                 em->block_len = ins.offset;
10555                 em->orig_block_len = ins.offset;
10556                 em->ram_bytes = ins.offset;
10557                 em->bdev = fs_info->fs_devices->latest_bdev;
10558                 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
10559                 em->generation = trans->transid;
10560 
10561                 while (1) {
10562                         write_lock(&em_tree->lock);
10563                         ret = add_extent_mapping(em_tree, em, 1);
10564                         write_unlock(&em_tree->lock);
10565                         if (ret != -EEXIST)
10566                                 break;
10567                         btrfs_drop_extent_cache(BTRFS_I(inode), cur_offset,
10568                                                 cur_offset + ins.offset - 1,
10569                                                 0);
10570                 }
10571                 free_extent_map(em);
10572 next:
10573                 num_bytes -= ins.offset;
10574                 cur_offset += ins.offset;
10575                 *alloc_hint = ins.objectid + ins.offset;
10576 
10577                 inode_inc_iversion(inode);
10578                 inode->i_ctime = current_time(inode);
10579                 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
10580                 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
10581                     (actual_len > inode->i_size) &&
10582                     (cur_offset > inode->i_size)) {
10583                         if (cur_offset > actual_len)
10584                                 i_size = actual_len;
10585                         else
10586                                 i_size = cur_offset;
10587                         i_size_write(inode, i_size);
10588                         btrfs_ordered_update_i_size(inode, i_size, NULL);
10589                 }
10590 
10591                 ret = btrfs_update_inode(trans, root, inode);
10592 
10593                 if (ret) {
10594                         btrfs_abort_transaction(trans, ret);
10595                         if (own_trans)
10596                                 btrfs_end_transaction(trans);
10597                         break;
10598                 }
10599 
10600                 if (own_trans)
10601                         btrfs_end_transaction(trans);
10602         }
10603         if (clear_offset < end)
10604                 btrfs_free_reserved_data_space(inode, NULL, clear_offset,
10605                         end - clear_offset + 1);
10606         return ret;
10607 }
10608 
10609 int btrfs_prealloc_file_range(struct inode *inode, int mode,
10610                               u64 start, u64 num_bytes, u64 min_size,
10611                               loff_t actual_len, u64 *alloc_hint)
10612 {
10613         return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
10614                                            min_size, actual_len, alloc_hint,
10615                                            NULL);
10616 }
10617 
10618 int btrfs_prealloc_file_range_trans(struct inode *inode,
10619                                     struct btrfs_trans_handle *trans, int mode,
10620                                     u64 start, u64 num_bytes, u64 min_size,
10621                                     loff_t actual_len, u64 *alloc_hint)
10622 {
10623         return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
10624                                            min_size, actual_len, alloc_hint, trans);
10625 }
10626 
10627 static int btrfs_set_page_dirty(struct page *page)
10628 {
10629         return __set_page_dirty_nobuffers(page);
10630 }
10631 
10632 static int btrfs_permission(struct inode *inode, int mask)
10633 {
10634         struct btrfs_root *root = BTRFS_I(inode)->root;
10635         umode_t mode = inode->i_mode;
10636 
10637         if (mask & MAY_WRITE &&
10638             (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) {
10639                 if (btrfs_root_readonly(root))
10640                         return -EROFS;
10641                 if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY)
10642                         return -EACCES;
10643         }
10644         return generic_permission(inode, mask);
10645 }
10646 
10647 static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
10648 {
10649         struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
10650         struct btrfs_trans_handle *trans;
10651         struct btrfs_root *root = BTRFS_I(dir)->root;
10652         struct inode *inode = NULL;
10653         u64 objectid;
10654         u64 index;
10655         int ret = 0;
10656 
10657         /*
10658          * 5 units required for adding orphan entry
10659          */
10660         trans = btrfs_start_transaction(root, 5);
10661         if (IS_ERR(trans))
10662                 return PTR_ERR(trans);
10663 
10664         ret = btrfs_find_free_ino(root, &objectid);
10665         if (ret)
10666                 goto out;
10667 
10668         inode = btrfs_new_inode(trans, root, dir, NULL, 0,
10669                         btrfs_ino(BTRFS_I(dir)), objectid, mode, &index);
10670         if (IS_ERR(inode)) {
10671                 ret = PTR_ERR(inode);
10672                 inode = NULL;
10673                 goto out;
10674         }
10675 
10676         inode->i_fop = &btrfs_file_operations;
10677         inode->i_op = &btrfs_file_inode_operations;
10678 
10679         inode->i_mapping->a_ops = &btrfs_aops;
10680         BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
10681 
10682         ret = btrfs_init_inode_security(trans, inode, dir, NULL);
10683         if (ret)
10684                 goto out;
10685 
10686         ret = btrfs_update_inode(trans, root, inode);
10687         if (ret)
10688                 goto out;
10689         ret = btrfs_orphan_add(trans, BTRFS_I(inode));
10690         if (ret)
10691                 goto out;
10692 
10693         /*
10694          * We set number of links to 0 in btrfs_new_inode(), and here we set
10695          * it to 1 because d_tmpfile() will issue a warning if the count is 0,
10696          * through:
10697          *
10698          *    d_tmpfile() -> inode_dec_link_count() -> drop_nlink()
10699          */
10700         set_nlink(inode, 1);
10701         d_tmpfile(dentry, inode);
10702         unlock_new_inode(inode);
10703         mark_inode_dirty(inode);
10704 out:
10705         btrfs_end_transaction(trans);
10706         if (ret && inode)
10707                 discard_new_inode(inode);
10708         btrfs_btree_balance_dirty(fs_info);
10709         return ret;
10710 }
10711 
10712 void btrfs_set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
10713 {
10714         struct inode *inode = tree->private_data;
10715         unsigned long index = start >> PAGE_SHIFT;
10716         unsigned long end_index = end >> PAGE_SHIFT;
10717         struct page *page;
10718 
10719         while (index <= end_index) {
10720                 page = find_get_page(inode->i_mapping, index);
10721                 ASSERT(page); /* Pages should be in the extent_io_tree */
10722                 set_page_writeback(page);
10723                 put_page(page);
10724                 index++;
10725         }
10726 }
10727 
10728 #ifdef CONFIG_SWAP
10729 /*
10730  * Add an entry indicating a block group or device which is pinned by a
10731  * swapfile. Returns 0 on success, 1 if there is already an entry for it, or a
10732  * negative errno on failure.
10733  */
10734 static int btrfs_add_swapfile_pin(struct inode *inode, void *ptr,
10735                                   bool is_block_group)
10736 {
10737         struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
10738         struct btrfs_swapfile_pin *sp, *entry;
10739         struct rb_node **p;
10740         struct rb_node *parent = NULL;
10741 
10742         sp = kmalloc(sizeof(*sp), GFP_NOFS);
10743         if (!sp)
10744                 return -ENOMEM;
10745         sp->ptr = ptr;
10746         sp->inode = inode;
10747         sp->is_block_group = is_block_group;
10748 
10749         spin_lock(&fs_info->swapfile_pins_lock);
10750         p = &fs_info->swapfile_pins.rb_node;
10751         while (*p) {
10752                 parent = *p;
10753                 entry = rb_entry(parent, struct btrfs_swapfile_pin, node);
10754                 if (sp->ptr < entry->ptr ||
10755                     (sp->ptr == entry->ptr && sp->inode < entry->inode)) {
10756                         p = &(*p)->rb_left;
10757                 } else if (sp->ptr > entry->ptr ||
10758                            (sp->ptr == entry->ptr && sp->inode > entry->inode)) {
10759                         p = &(*p)->rb_right;
10760                 } else {
10761                         spin_unlock(&fs_info->swapfile_pins_lock);
10762                         kfree(sp);
10763                         return 1;
10764                 }
10765         }
10766         rb_link_node(&sp->node, parent, p);
10767         rb_insert_color(&sp->node, &fs_info->swapfile_pins);
10768         spin_unlock(&fs_info->swapfile_pins_lock);
10769         return 0;
10770 }
10771 
10772 /* Free all of the entries pinned by this swapfile. */
10773 static void btrfs_free_swapfile_pins(struct inode *inode)
10774 {
10775         struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
10776         struct btrfs_swapfile_pin *sp;
10777         struct rb_node *node, *next;
10778 
10779         spin_lock(&fs_info->swapfile_pins_lock);
10780         node = rb_first(&fs_info->swapfile_pins);
10781         while (node) {
10782                 next = rb_next(node);
10783                 sp = rb_entry(node, struct btrfs_swapfile_pin, node);
10784                 if (sp->inode == inode) {
10785                         rb_erase(&sp->node, &fs_info->swapfile_pins);
10786                         if (sp->is_block_group)
10787                                 btrfs_put_block_group(sp->ptr);
10788                         kfree(sp);
10789                 }
10790                 node = next;
10791         }
10792         spin_unlock(&fs_info->swapfile_pins_lock);
10793 }
10794 
10795 struct btrfs_swap_info {
10796         u64 start;
10797         u64 block_start;
10798         u64 block_len;
10799         u64 lowest_ppage;
10800         u64 highest_ppage;
10801         unsigned long nr_pages;
10802         int nr_extents;
10803 };
10804 
10805 static int btrfs_add_swap_extent(struct swap_info_struct *sis,
10806                                  struct btrfs_swap_info *bsi)
10807 {
10808         unsigned long nr_pages;
10809         u64 first_ppage, first_ppage_reported, next_ppage;
10810         int ret;
10811 
10812         first_ppage = ALIGN(bsi->block_start, PAGE_SIZE) >> PAGE_SHIFT;
10813         next_ppage = ALIGN_DOWN(bsi->block_start + bsi->block_len,
10814                                 PAGE_SIZE) >> PAGE_SHIFT;
10815 
10816         if (first_ppage >= next_ppage)
10817                 return 0;
10818         nr_pages = next_ppage - first_ppage;
10819 
10820         first_ppage_reported = first_ppage;
10821         if (bsi->start == 0)
10822                 first_ppage_reported++;
10823         if (bsi->lowest_ppage > first_ppage_reported)
10824                 bsi->lowest_ppage = first_ppage_reported;
10825         if (bsi->highest_ppage < (next_ppage - 1))
10826                 bsi->highest_ppage = next_ppage - 1;
10827 
10828         ret = add_swap_extent(sis, bsi->nr_pages, nr_pages, first_ppage);
10829         if (ret < 0)
10830                 return ret;
10831         bsi->nr_extents += ret;
10832         bsi->nr_pages += nr_pages;
10833         return 0;
10834 }
10835 
10836 static void btrfs_swap_deactivate(struct file *file)
10837 {
10838         struct inode *inode = file_inode(file);
10839 
10840         btrfs_free_swapfile_pins(inode);
10841         atomic_dec(&BTRFS_I(inode)->root->nr_swapfiles);
10842 }
10843 
10844 static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
10845                                sector_t *span)
10846 {
10847         struct inode *inode = file_inode(file);
10848         struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
10849         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
10850         struct extent_state *cached_state = NULL;
10851         struct extent_map *em = NULL;
10852         struct btrfs_device *device = NULL;
10853         struct btrfs_swap_info bsi = {
10854                 .lowest_ppage = (sector_t)-1ULL,
10855         };
10856         int ret = 0;
10857         u64 isize;
10858         u64 start;
10859 
10860         /*
10861          * If the swap file was just created, make sure delalloc is done. If the
10862          * file changes again after this, the user is doing something stupid and
10863          * we don't really care.
10864          */
10865         ret = btrfs_wait_ordered_range(inode, 0, (u64)-1);
10866         if (ret)
10867                 return ret;
10868 
10869         /*
10870          * The inode is locked, so these flags won't change after we check them.
10871          */
10872         if (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS) {
10873                 btrfs_warn(fs_info, "swapfile must not be compressed");
10874                 return -EINVAL;
10875         }
10876         if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)) {
10877                 btrfs_warn(fs_info, "swapfile must not be copy-on-write");
10878                 return -EINVAL;
10879         }
10880         if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
10881                 btrfs_warn(fs_info, "swapfile must not be checksummed");
10882                 return -EINVAL;
10883         }
10884 
10885         /*
10886          * Balance or device remove/replace/resize can move stuff around from
10887          * under us. The EXCL_OP flag makes sure they aren't running/won't run
10888          * concurrently while we are mapping the swap extents, and
10889          * fs_info->swapfile_pins prevents them from running while the swap file
10890          * is active and moving the extents. Note that this also prevents a
10891          * concurrent device add which isn't actually necessary, but it's not
10892          * really worth the trouble to allow it.
10893          */
10894         if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) {
10895                 btrfs_warn(fs_info,
10896            "cannot activate swapfile while exclusive operation is running");
10897                 return -EBUSY;
10898         }
10899         /*
10900          * Snapshots can create extents which require COW even if NODATACOW is
10901          * set. We use this counter to prevent snapshots. We must increment it
10902          * before walking the extents because we don't want a concurrent
10903          * snapshot to run after we've already checked the extents.
10904          */
10905         atomic_inc(&BTRFS_I(inode)->root->nr_swapfiles);
10906 
10907         isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize);
10908 
10909         lock_extent_bits(io_tree, 0, isize - 1, &cached_state);
10910         start = 0;
10911         while (start < isize) {
10912                 u64 logical_block_start, physical_block_start;
10913                 struct btrfs_block_group_cache *bg;
10914                 u64 len = isize - start;
10915 
10916                 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len, 0);
10917                 if (IS_ERR(em)) {
10918                         ret = PTR_ERR(em);
10919                         goto out;
10920                 }
10921 
10922                 if (em->block_start == EXTENT_MAP_HOLE) {
10923                         btrfs_warn(fs_info, "swapfile must not have holes");
10924                         ret = -EINVAL;
10925                         goto out;
10926                 }
10927                 if (em->block_start == EXTENT_MAP_INLINE) {
10928                         /*
10929                          * It's unlikely we'll ever actually find ourselves
10930                          * here, as a file small enough to fit inline won't be
10931                          * big enough to store more than the swap header, but in
10932                          * case something changes in the future, let's catch it
10933                          * here rather than later.
10934                          */
10935                         btrfs_warn(fs_info, "swapfile must not be inline");
10936                         ret = -EINVAL;
10937                         goto out;
10938                 }
10939                 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
10940                         btrfs_warn(fs_info, "swapfile must not be compressed");
10941                         ret = -EINVAL;
10942                         goto out;
10943                 }
10944 
10945                 logical_block_start = em->block_start + (start - em->start);
10946                 len = min(len, em->len - (start - em->start));
10947                 free_extent_map(em);
10948                 em = NULL;
10949 
10950                 ret = can_nocow_extent(inode, start, &len, NULL, NULL, NULL);
10951                 if (ret < 0) {
10952                         goto out;
10953                 } else if (ret) {
10954                         ret = 0;
10955                 } else {
10956                         btrfs_warn(fs_info,
10957                                    "swapfile must not be copy-on-write");
10958                         ret = -EINVAL;
10959                         goto out;
10960                 }
10961 
10962                 em = btrfs_get_chunk_map(fs_info, logical_block_start, len);
10963                 if (IS_ERR(em)) {
10964                         ret = PTR_ERR(em);
10965                         goto out;
10966                 }
10967 
10968                 if (em->map_lookup->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
10969                         btrfs_warn(fs_info,
10970                                    "swapfile must have single data profile");
10971                         ret = -EINVAL;
10972                         goto out;
10973                 }
10974 
10975                 if (device == NULL) {
10976                         device = em->map_lookup->stripes[0].dev;
10977                         ret = btrfs_add_swapfile_pin(inode, device, false);
10978                         if (ret == 1)
10979                                 ret = 0;
10980                         else if (ret)
10981                                 goto out;
10982                 } else if (device != em->map_lookup->stripes[0].dev) {
10983                         btrfs_warn(fs_info, "swapfile must be on one device");
10984                         ret = -EINVAL;
10985                         goto out;
10986                 }
10987 
10988                 physical_block_start = (em->map_lookup->stripes[0].physical +
10989                                         (logical_block_start - em->start));
10990                 len = min(len, em->len - (logical_block_start - em->start));
10991                 free_extent_map(em);
10992                 em = NULL;
10993 
10994                 bg = btrfs_lookup_block_group(fs_info, logical_block_start);
10995                 if (!bg) {
10996                         btrfs_warn(fs_info,
10997                            "could not find block group containing swapfile");
10998                         ret = -EINVAL;
10999                         goto out;
11000                 }
11001 
11002                 ret = btrfs_add_swapfile_pin(inode, bg, true);
11003                 if (ret) {
11004                         btrfs_put_block_group(bg);
11005                         if (ret == 1)
11006                                 ret = 0;
11007                         else
11008                                 goto out;
11009                 }
11010 
11011                 if (bsi.block_len &&
11012                     bsi.block_start + bsi.block_len == physical_block_start) {
11013                         bsi.block_len += len;
11014                 } else {
11015                         if (bsi.block_len) {
11016                                 ret = btrfs_add_swap_extent(sis, &bsi);
11017                                 if (ret)
11018                                         goto out;
11019                         }
11020                         bsi.start = start;
11021                         bsi.block_start = physical_block_start;
11022                         bsi.block_len = len;
11023                 }
11024 
11025                 start += len;
11026         }
11027 
11028         if (bsi.block_len)
11029                 ret = btrfs_add_swap_extent(sis, &bsi);
11030 
11031 out:
11032         if (!IS_ERR_OR_NULL(em))
11033                 free_extent_map(em);
11034 
11035         unlock_extent_cached(io_tree, 0, isize - 1, &cached_state);
11036 
11037         if (ret)
11038                 btrfs_swap_deactivate(file);
11039 
11040         clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
11041 
11042         if (ret)
11043                 return ret;
11044 
11045         if (device)
11046                 sis->bdev = device->bdev;
11047         *span = bsi.highest_ppage - bsi.lowest_ppage + 1;
11048         sis->max = bsi.nr_pages;
11049         sis->pages = bsi.nr_pages - 1;
11050         sis->highest_bit = bsi.nr_pages - 1;
11051         return bsi.nr_extents;
11052 }
11053 #else
11054 static void btrfs_swap_deactivate(struct file *file)
11055 {
11056 }
11057 
11058 static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
11059                                sector_t *span)
11060 {
11061         return -EOPNOTSUPP;
11062 }
11063 #endif
11064 
11065 static const struct inode_operations btrfs_dir_inode_operations = {
11066         .getattr        = btrfs_getattr,
11067         .lookup         = btrfs_lookup,
11068         .create         = btrfs_create,
11069         .unlink         = btrfs_unlink,
11070         .link           = btrfs_link,
11071         .mkdir          = btrfs_mkdir,
11072         .rmdir          = btrfs_rmdir,
11073         .rename         = btrfs_rename2,
11074         .symlink        = btrfs_symlink,
11075         .setattr        = btrfs_setattr,
11076         .mknod          = btrfs_mknod,
11077         .listxattr      = btrfs_listxattr,
11078         .permission     = btrfs_permission,
11079         .get_acl        = btrfs_get_acl,
11080         .set_acl        = btrfs_set_acl,
11081         .update_time    = btrfs_update_time,
11082         .tmpfile        = btrfs_tmpfile,
11083 };
11084 static const struct inode_operations btrfs_dir_ro_inode_operations = {
11085         .lookup         = btrfs_lookup,
11086         .permission     = btrfs_permission,
11087         .update_time    = btrfs_update_time,
11088 };
11089 
11090 static const struct file_operations btrfs_dir_file_operations = {
11091         .llseek         = generic_file_llseek,
11092         .read           = generic_read_dir,
11093         .iterate_shared = btrfs_real_readdir,
11094         .open           = btrfs_opendir,
11095         .unlocked_ioctl = btrfs_ioctl,
11096 #ifdef CONFIG_COMPAT
11097         .compat_ioctl   = btrfs_compat_ioctl,
11098 #endif
11099         .release        = btrfs_release_file,
11100         .fsync          = btrfs_sync_file,
11101 };
11102 
11103 static const struct extent_io_ops btrfs_extent_io_ops = {
11104         /* mandatory callbacks */
11105         .submit_bio_hook = btrfs_submit_bio_hook,
11106         .readpage_end_io_hook = btrfs_readpage_end_io_hook,
11107 };
11108 
11109 /*
11110  * btrfs doesn't support the bmap operation because swapfiles
11111  * use bmap to make a mapping of extents in the file.  They assume
11112  * these extents won't change over the life of the file and they
11113  * use the bmap result to do IO directly to the drive.
11114  *
11115  * the btrfs bmap call would return logical addresses that aren't
11116  * suitable for IO and they also will change frequently as COW
11117  * operations happen.  So, swapfile + btrfs == corruption.
11118  *
11119  * For now we're avoiding this by dropping bmap.
11120  */
11121 static const struct address_space_operations btrfs_aops = {
11122         .readpage       = btrfs_readpage,
11123         .writepage      = btrfs_writepage,
11124         .writepages     = btrfs_writepages,
11125         .readpages      = btrfs_readpages,
11126         .direct_IO      = btrfs_direct_IO,
11127         .invalidatepage = btrfs_invalidatepage,
11128         .releasepage    = btrfs_releasepage,
11129         .set_page_dirty = btrfs_set_page_dirty,
11130         .error_remove_page = generic_error_remove_page,
11131         .swap_activate  = btrfs_swap_activate,
11132         .swap_deactivate = btrfs_swap_deactivate,
11133 };
11134 
11135 static const struct inode_operations btrfs_file_inode_operations = {
11136         .getattr        = btrfs_getattr,
11137         .setattr        = btrfs_setattr,
11138         .listxattr      = btrfs_listxattr,
11139         .permission     = btrfs_permission,
11140         .fiemap         = btrfs_fiemap,
11141         .get_acl        = btrfs_get_acl,
11142         .set_acl        = btrfs_set_acl,
11143         .update_time    = btrfs_update_time,
11144 };
11145 static const struct inode_operations btrfs_special_inode_operations = {
11146         .getattr        = btrfs_getattr,
11147         .setattr        = btrfs_setattr,
11148         .permission     = btrfs_permission,
11149         .listxattr      = btrfs_listxattr,
11150         .get_acl        = btrfs_get_acl,
11151         .set_acl        = btrfs_set_acl,
11152         .update_time    = btrfs_update_time,
11153 };
11154 static const struct inode_operations btrfs_symlink_inode_operations = {
11155         .get_link       = page_get_link,
11156         .getattr        = btrfs_getattr,
11157         .setattr        = btrfs_setattr,
11158         .permission     = btrfs_permission,
11159         .listxattr      = btrfs_listxattr,
11160         .update_time    = btrfs_update_time,
11161 };
11162 
11163 const struct dentry_operations btrfs_dentry_operations = {
11164         .d_delete       = btrfs_dentry_delete,
11165 };

/* [<][>][^][v][top][bottom][index][help] */