root/fs/btrfs/ordered-data.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. entry_end
  2. tree_insert
  3. ordered_data_tree_panic
  4. __tree_search
  5. offset_in_entry
  6. range_overlaps
  7. tree_search
  8. __btrfs_add_ordered_extent
  9. btrfs_add_ordered_extent
  10. btrfs_add_ordered_extent_dio
  11. btrfs_add_ordered_extent_compress
  12. btrfs_add_ordered_sum
  13. btrfs_dec_test_first_ordered_pending
  14. btrfs_dec_test_ordered_pending
  15. btrfs_put_ordered_extent
  16. btrfs_remove_ordered_extent
  17. btrfs_run_ordered_extent_work
  18. btrfs_wait_ordered_extents
  19. btrfs_wait_ordered_roots
  20. btrfs_start_ordered_extent
  21. btrfs_wait_ordered_range
  22. btrfs_lookup_ordered_extent
  23. btrfs_lookup_ordered_range
  24. btrfs_lookup_first_ordered_extent
  25. btrfs_ordered_update_i_size
  26. btrfs_find_ordered_sum
  27. btrfs_lock_and_flush_ordered_range
  28. ordered_data_init
  29. ordered_data_exit

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Copyright (C) 2007 Oracle.  All rights reserved.
   4  */
   5 
   6 #include <linux/slab.h>
   7 #include <linux/blkdev.h>
   8 #include <linux/writeback.h>
   9 #include <linux/sched/mm.h>
  10 #include "misc.h"
  11 #include "ctree.h"
  12 #include "transaction.h"
  13 #include "btrfs_inode.h"
  14 #include "extent_io.h"
  15 #include "disk-io.h"
  16 #include "compression.h"
  17 #include "delalloc-space.h"
  18 
  19 static struct kmem_cache *btrfs_ordered_extent_cache;
  20 
  21 static u64 entry_end(struct btrfs_ordered_extent *entry)
  22 {
  23         if (entry->file_offset + entry->len < entry->file_offset)
  24                 return (u64)-1;
  25         return entry->file_offset + entry->len;
  26 }
  27 
  28 /* returns NULL if the insertion worked, or it returns the node it did find
  29  * in the tree
  30  */
  31 static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
  32                                    struct rb_node *node)
  33 {
  34         struct rb_node **p = &root->rb_node;
  35         struct rb_node *parent = NULL;
  36         struct btrfs_ordered_extent *entry;
  37 
  38         while (*p) {
  39                 parent = *p;
  40                 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
  41 
  42                 if (file_offset < entry->file_offset)
  43                         p = &(*p)->rb_left;
  44                 else if (file_offset >= entry_end(entry))
  45                         p = &(*p)->rb_right;
  46                 else
  47                         return parent;
  48         }
  49 
  50         rb_link_node(node, parent, p);
  51         rb_insert_color(node, root);
  52         return NULL;
  53 }
  54 
  55 static void ordered_data_tree_panic(struct inode *inode, int errno,
  56                                                u64 offset)
  57 {
  58         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
  59         btrfs_panic(fs_info, errno,
  60                     "Inconsistency in ordered tree at offset %llu", offset);
  61 }
  62 
  63 /*
  64  * look for a given offset in the tree, and if it can't be found return the
  65  * first lesser offset
  66  */
  67 static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
  68                                      struct rb_node **prev_ret)
  69 {
  70         struct rb_node *n = root->rb_node;
  71         struct rb_node *prev = NULL;
  72         struct rb_node *test;
  73         struct btrfs_ordered_extent *entry;
  74         struct btrfs_ordered_extent *prev_entry = NULL;
  75 
  76         while (n) {
  77                 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
  78                 prev = n;
  79                 prev_entry = entry;
  80 
  81                 if (file_offset < entry->file_offset)
  82                         n = n->rb_left;
  83                 else if (file_offset >= entry_end(entry))
  84                         n = n->rb_right;
  85                 else
  86                         return n;
  87         }
  88         if (!prev_ret)
  89                 return NULL;
  90 
  91         while (prev && file_offset >= entry_end(prev_entry)) {
  92                 test = rb_next(prev);
  93                 if (!test)
  94                         break;
  95                 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
  96                                       rb_node);
  97                 if (file_offset < entry_end(prev_entry))
  98                         break;
  99 
 100                 prev = test;
 101         }
 102         if (prev)
 103                 prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
 104                                       rb_node);
 105         while (prev && file_offset < entry_end(prev_entry)) {
 106                 test = rb_prev(prev);
 107                 if (!test)
 108                         break;
 109                 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
 110                                       rb_node);
 111                 prev = test;
 112         }
 113         *prev_ret = prev;
 114         return NULL;
 115 }
 116 
 117 /*
 118  * helper to check if a given offset is inside a given entry
 119  */
 120 static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset)
 121 {
 122         if (file_offset < entry->file_offset ||
 123             entry->file_offset + entry->len <= file_offset)
 124                 return 0;
 125         return 1;
 126 }
 127 
 128 static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
 129                           u64 len)
 130 {
 131         if (file_offset + len <= entry->file_offset ||
 132             entry->file_offset + entry->len <= file_offset)
 133                 return 0;
 134         return 1;
 135 }
 136 
 137 /*
 138  * look find the first ordered struct that has this offset, otherwise
 139  * the first one less than this offset
 140  */
 141 static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
 142                                           u64 file_offset)
 143 {
 144         struct rb_root *root = &tree->tree;
 145         struct rb_node *prev = NULL;
 146         struct rb_node *ret;
 147         struct btrfs_ordered_extent *entry;
 148 
 149         if (tree->last) {
 150                 entry = rb_entry(tree->last, struct btrfs_ordered_extent,
 151                                  rb_node);
 152                 if (offset_in_entry(entry, file_offset))
 153                         return tree->last;
 154         }
 155         ret = __tree_search(root, file_offset, &prev);
 156         if (!ret)
 157                 ret = prev;
 158         if (ret)
 159                 tree->last = ret;
 160         return ret;
 161 }
 162 
 163 /* allocate and add a new ordered_extent into the per-inode tree.
 164  * file_offset is the logical offset in the file
 165  *
 166  * start is the disk block number of an extent already reserved in the
 167  * extent allocation tree
 168  *
 169  * len is the length of the extent
 170  *
 171  * The tree is given a single reference on the ordered extent that was
 172  * inserted.
 173  */
 174 static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
 175                                       u64 start, u64 len, u64 disk_len,
 176                                       int type, int dio, int compress_type)
 177 {
 178         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 179         struct btrfs_root *root = BTRFS_I(inode)->root;
 180         struct btrfs_ordered_inode_tree *tree;
 181         struct rb_node *node;
 182         struct btrfs_ordered_extent *entry;
 183 
 184         tree = &BTRFS_I(inode)->ordered_tree;
 185         entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
 186         if (!entry)
 187                 return -ENOMEM;
 188 
 189         entry->file_offset = file_offset;
 190         entry->start = start;
 191         entry->len = len;
 192         entry->disk_len = disk_len;
 193         entry->bytes_left = len;
 194         entry->inode = igrab(inode);
 195         entry->compress_type = compress_type;
 196         entry->truncated_len = (u64)-1;
 197         if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE)
 198                 set_bit(type, &entry->flags);
 199 
 200         if (dio) {
 201                 percpu_counter_add_batch(&fs_info->dio_bytes, len,
 202                                          fs_info->delalloc_batch);
 203                 set_bit(BTRFS_ORDERED_DIRECT, &entry->flags);
 204         }
 205 
 206         /* one ref for the tree */
 207         refcount_set(&entry->refs, 1);
 208         init_waitqueue_head(&entry->wait);
 209         INIT_LIST_HEAD(&entry->list);
 210         INIT_LIST_HEAD(&entry->root_extent_list);
 211         INIT_LIST_HEAD(&entry->work_list);
 212         init_completion(&entry->completion);
 213         INIT_LIST_HEAD(&entry->log_list);
 214         INIT_LIST_HEAD(&entry->trans_list);
 215 
 216         trace_btrfs_ordered_extent_add(inode, entry);
 217 
 218         spin_lock_irq(&tree->lock);
 219         node = tree_insert(&tree->tree, file_offset,
 220                            &entry->rb_node);
 221         if (node)
 222                 ordered_data_tree_panic(inode, -EEXIST, file_offset);
 223         spin_unlock_irq(&tree->lock);
 224 
 225         spin_lock(&root->ordered_extent_lock);
 226         list_add_tail(&entry->root_extent_list,
 227                       &root->ordered_extents);
 228         root->nr_ordered_extents++;
 229         if (root->nr_ordered_extents == 1) {
 230                 spin_lock(&fs_info->ordered_root_lock);
 231                 BUG_ON(!list_empty(&root->ordered_root));
 232                 list_add_tail(&root->ordered_root, &fs_info->ordered_roots);
 233                 spin_unlock(&fs_info->ordered_root_lock);
 234         }
 235         spin_unlock(&root->ordered_extent_lock);
 236 
 237         /*
 238          * We don't need the count_max_extents here, we can assume that all of
 239          * that work has been done at higher layers, so this is truly the
 240          * smallest the extent is going to get.
 241          */
 242         spin_lock(&BTRFS_I(inode)->lock);
 243         btrfs_mod_outstanding_extents(BTRFS_I(inode), 1);
 244         spin_unlock(&BTRFS_I(inode)->lock);
 245 
 246         return 0;
 247 }
 248 
 249 int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
 250                              u64 start, u64 len, u64 disk_len, int type)
 251 {
 252         return __btrfs_add_ordered_extent(inode, file_offset, start, len,
 253                                           disk_len, type, 0,
 254                                           BTRFS_COMPRESS_NONE);
 255 }
 256 
 257 int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset,
 258                                  u64 start, u64 len, u64 disk_len, int type)
 259 {
 260         return __btrfs_add_ordered_extent(inode, file_offset, start, len,
 261                                           disk_len, type, 1,
 262                                           BTRFS_COMPRESS_NONE);
 263 }
 264 
 265 int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset,
 266                                       u64 start, u64 len, u64 disk_len,
 267                                       int type, int compress_type)
 268 {
 269         return __btrfs_add_ordered_extent(inode, file_offset, start, len,
 270                                           disk_len, type, 0,
 271                                           compress_type);
 272 }
 273 
 274 /*
 275  * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
 276  * when an ordered extent is finished.  If the list covers more than one
 277  * ordered extent, it is split across multiples.
 278  */
 279 void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry,
 280                            struct btrfs_ordered_sum *sum)
 281 {
 282         struct btrfs_ordered_inode_tree *tree;
 283 
 284         tree = &BTRFS_I(entry->inode)->ordered_tree;
 285         spin_lock_irq(&tree->lock);
 286         list_add_tail(&sum->list, &entry->list);
 287         spin_unlock_irq(&tree->lock);
 288 }
 289 
 290 /*
 291  * this is used to account for finished IO across a given range
 292  * of the file.  The IO may span ordered extents.  If
 293  * a given ordered_extent is completely done, 1 is returned, otherwise
 294  * 0.
 295  *
 296  * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
 297  * to make sure this function only returns 1 once for a given ordered extent.
 298  *
 299  * file_offset is updated to one byte past the range that is recorded as
 300  * complete.  This allows you to walk forward in the file.
 301  */
 302 int btrfs_dec_test_first_ordered_pending(struct inode *inode,
 303                                    struct btrfs_ordered_extent **cached,
 304                                    u64 *file_offset, u64 io_size, int uptodate)
 305 {
 306         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 307         struct btrfs_ordered_inode_tree *tree;
 308         struct rb_node *node;
 309         struct btrfs_ordered_extent *entry = NULL;
 310         int ret;
 311         unsigned long flags;
 312         u64 dec_end;
 313         u64 dec_start;
 314         u64 to_dec;
 315 
 316         tree = &BTRFS_I(inode)->ordered_tree;
 317         spin_lock_irqsave(&tree->lock, flags);
 318         node = tree_search(tree, *file_offset);
 319         if (!node) {
 320                 ret = 1;
 321                 goto out;
 322         }
 323 
 324         entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
 325         if (!offset_in_entry(entry, *file_offset)) {
 326                 ret = 1;
 327                 goto out;
 328         }
 329 
 330         dec_start = max(*file_offset, entry->file_offset);
 331         dec_end = min(*file_offset + io_size, entry->file_offset +
 332                       entry->len);
 333         *file_offset = dec_end;
 334         if (dec_start > dec_end) {
 335                 btrfs_crit(fs_info, "bad ordering dec_start %llu end %llu",
 336                            dec_start, dec_end);
 337         }
 338         to_dec = dec_end - dec_start;
 339         if (to_dec > entry->bytes_left) {
 340                 btrfs_crit(fs_info,
 341                            "bad ordered accounting left %llu size %llu",
 342                            entry->bytes_left, to_dec);
 343         }
 344         entry->bytes_left -= to_dec;
 345         if (!uptodate)
 346                 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
 347 
 348         if (entry->bytes_left == 0) {
 349                 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
 350                 /* test_and_set_bit implies a barrier */
 351                 cond_wake_up_nomb(&entry->wait);
 352         } else {
 353                 ret = 1;
 354         }
 355 out:
 356         if (!ret && cached && entry) {
 357                 *cached = entry;
 358                 refcount_inc(&entry->refs);
 359         }
 360         spin_unlock_irqrestore(&tree->lock, flags);
 361         return ret == 0;
 362 }
 363 
 364 /*
 365  * this is used to account for finished IO across a given range
 366  * of the file.  The IO should not span ordered extents.  If
 367  * a given ordered_extent is completely done, 1 is returned, otherwise
 368  * 0.
 369  *
 370  * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
 371  * to make sure this function only returns 1 once for a given ordered extent.
 372  */
 373 int btrfs_dec_test_ordered_pending(struct inode *inode,
 374                                    struct btrfs_ordered_extent **cached,
 375                                    u64 file_offset, u64 io_size, int uptodate)
 376 {
 377         struct btrfs_ordered_inode_tree *tree;
 378         struct rb_node *node;
 379         struct btrfs_ordered_extent *entry = NULL;
 380         unsigned long flags;
 381         int ret;
 382 
 383         tree = &BTRFS_I(inode)->ordered_tree;
 384         spin_lock_irqsave(&tree->lock, flags);
 385         if (cached && *cached) {
 386                 entry = *cached;
 387                 goto have_entry;
 388         }
 389 
 390         node = tree_search(tree, file_offset);
 391         if (!node) {
 392                 ret = 1;
 393                 goto out;
 394         }
 395 
 396         entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
 397 have_entry:
 398         if (!offset_in_entry(entry, file_offset)) {
 399                 ret = 1;
 400                 goto out;
 401         }
 402 
 403         if (io_size > entry->bytes_left) {
 404                 btrfs_crit(BTRFS_I(inode)->root->fs_info,
 405                            "bad ordered accounting left %llu size %llu",
 406                        entry->bytes_left, io_size);
 407         }
 408         entry->bytes_left -= io_size;
 409         if (!uptodate)
 410                 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
 411 
 412         if (entry->bytes_left == 0) {
 413                 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
 414                 /* test_and_set_bit implies a barrier */
 415                 cond_wake_up_nomb(&entry->wait);
 416         } else {
 417                 ret = 1;
 418         }
 419 out:
 420         if (!ret && cached && entry) {
 421                 *cached = entry;
 422                 refcount_inc(&entry->refs);
 423         }
 424         spin_unlock_irqrestore(&tree->lock, flags);
 425         return ret == 0;
 426 }
 427 
 428 /*
 429  * used to drop a reference on an ordered extent.  This will free
 430  * the extent if the last reference is dropped
 431  */
 432 void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
 433 {
 434         struct list_head *cur;
 435         struct btrfs_ordered_sum *sum;
 436 
 437         trace_btrfs_ordered_extent_put(entry->inode, entry);
 438 
 439         if (refcount_dec_and_test(&entry->refs)) {
 440                 ASSERT(list_empty(&entry->log_list));
 441                 ASSERT(list_empty(&entry->trans_list));
 442                 ASSERT(list_empty(&entry->root_extent_list));
 443                 ASSERT(RB_EMPTY_NODE(&entry->rb_node));
 444                 if (entry->inode)
 445                         btrfs_add_delayed_iput(entry->inode);
 446                 while (!list_empty(&entry->list)) {
 447                         cur = entry->list.next;
 448                         sum = list_entry(cur, struct btrfs_ordered_sum, list);
 449                         list_del(&sum->list);
 450                         kvfree(sum);
 451                 }
 452                 kmem_cache_free(btrfs_ordered_extent_cache, entry);
 453         }
 454 }
 455 
 456 /*
 457  * remove an ordered extent from the tree.  No references are dropped
 458  * and waiters are woken up.
 459  */
 460 void btrfs_remove_ordered_extent(struct inode *inode,
 461                                  struct btrfs_ordered_extent *entry)
 462 {
 463         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 464         struct btrfs_ordered_inode_tree *tree;
 465         struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
 466         struct btrfs_root *root = btrfs_inode->root;
 467         struct rb_node *node;
 468 
 469         /* This is paired with btrfs_add_ordered_extent. */
 470         spin_lock(&btrfs_inode->lock);
 471         btrfs_mod_outstanding_extents(btrfs_inode, -1);
 472         spin_unlock(&btrfs_inode->lock);
 473         if (root != fs_info->tree_root)
 474                 btrfs_delalloc_release_metadata(btrfs_inode, entry->len, false);
 475 
 476         if (test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
 477                 percpu_counter_add_batch(&fs_info->dio_bytes, -entry->len,
 478                                          fs_info->delalloc_batch);
 479 
 480         tree = &btrfs_inode->ordered_tree;
 481         spin_lock_irq(&tree->lock);
 482         node = &entry->rb_node;
 483         rb_erase(node, &tree->tree);
 484         RB_CLEAR_NODE(node);
 485         if (tree->last == node)
 486                 tree->last = NULL;
 487         set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
 488         spin_unlock_irq(&tree->lock);
 489 
 490         spin_lock(&root->ordered_extent_lock);
 491         list_del_init(&entry->root_extent_list);
 492         root->nr_ordered_extents--;
 493 
 494         trace_btrfs_ordered_extent_remove(inode, entry);
 495 
 496         if (!root->nr_ordered_extents) {
 497                 spin_lock(&fs_info->ordered_root_lock);
 498                 BUG_ON(list_empty(&root->ordered_root));
 499                 list_del_init(&root->ordered_root);
 500                 spin_unlock(&fs_info->ordered_root_lock);
 501         }
 502         spin_unlock(&root->ordered_extent_lock);
 503         wake_up(&entry->wait);
 504 }
 505 
 506 static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
 507 {
 508         struct btrfs_ordered_extent *ordered;
 509 
 510         ordered = container_of(work, struct btrfs_ordered_extent, flush_work);
 511         btrfs_start_ordered_extent(ordered->inode, ordered, 1);
 512         complete(&ordered->completion);
 513 }
 514 
 515 /*
 516  * wait for all the ordered extents in a root.  This is done when balancing
 517  * space between drives.
 518  */
 519 u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
 520                                const u64 range_start, const u64 range_len)
 521 {
 522         struct btrfs_fs_info *fs_info = root->fs_info;
 523         LIST_HEAD(splice);
 524         LIST_HEAD(skipped);
 525         LIST_HEAD(works);
 526         struct btrfs_ordered_extent *ordered, *next;
 527         u64 count = 0;
 528         const u64 range_end = range_start + range_len;
 529 
 530         mutex_lock(&root->ordered_extent_mutex);
 531         spin_lock(&root->ordered_extent_lock);
 532         list_splice_init(&root->ordered_extents, &splice);
 533         while (!list_empty(&splice) && nr) {
 534                 ordered = list_first_entry(&splice, struct btrfs_ordered_extent,
 535                                            root_extent_list);
 536 
 537                 if (range_end <= ordered->start ||
 538                     ordered->start + ordered->disk_len <= range_start) {
 539                         list_move_tail(&ordered->root_extent_list, &skipped);
 540                         cond_resched_lock(&root->ordered_extent_lock);
 541                         continue;
 542                 }
 543 
 544                 list_move_tail(&ordered->root_extent_list,
 545                                &root->ordered_extents);
 546                 refcount_inc(&ordered->refs);
 547                 spin_unlock(&root->ordered_extent_lock);
 548 
 549                 btrfs_init_work(&ordered->flush_work,
 550                                 btrfs_run_ordered_extent_work, NULL, NULL);
 551                 list_add_tail(&ordered->work_list, &works);
 552                 btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work);
 553 
 554                 cond_resched();
 555                 spin_lock(&root->ordered_extent_lock);
 556                 if (nr != U64_MAX)
 557                         nr--;
 558                 count++;
 559         }
 560         list_splice_tail(&skipped, &root->ordered_extents);
 561         list_splice_tail(&splice, &root->ordered_extents);
 562         spin_unlock(&root->ordered_extent_lock);
 563 
 564         list_for_each_entry_safe(ordered, next, &works, work_list) {
 565                 list_del_init(&ordered->work_list);
 566                 wait_for_completion(&ordered->completion);
 567                 btrfs_put_ordered_extent(ordered);
 568                 cond_resched();
 569         }
 570         mutex_unlock(&root->ordered_extent_mutex);
 571 
 572         return count;
 573 }
 574 
 575 u64 btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
 576                              const u64 range_start, const u64 range_len)
 577 {
 578         struct btrfs_root *root;
 579         struct list_head splice;
 580         u64 total_done = 0;
 581         u64 done;
 582 
 583         INIT_LIST_HEAD(&splice);
 584 
 585         mutex_lock(&fs_info->ordered_operations_mutex);
 586         spin_lock(&fs_info->ordered_root_lock);
 587         list_splice_init(&fs_info->ordered_roots, &splice);
 588         while (!list_empty(&splice) && nr) {
 589                 root = list_first_entry(&splice, struct btrfs_root,
 590                                         ordered_root);
 591                 root = btrfs_grab_fs_root(root);
 592                 BUG_ON(!root);
 593                 list_move_tail(&root->ordered_root,
 594                                &fs_info->ordered_roots);
 595                 spin_unlock(&fs_info->ordered_root_lock);
 596 
 597                 done = btrfs_wait_ordered_extents(root, nr,
 598                                                   range_start, range_len);
 599                 btrfs_put_fs_root(root);
 600                 total_done += done;
 601 
 602                 spin_lock(&fs_info->ordered_root_lock);
 603                 if (nr != U64_MAX) {
 604                         nr -= done;
 605                 }
 606         }
 607         list_splice_tail(&splice, &fs_info->ordered_roots);
 608         spin_unlock(&fs_info->ordered_root_lock);
 609         mutex_unlock(&fs_info->ordered_operations_mutex);
 610 
 611         return total_done;
 612 }
 613 
 614 /*
 615  * Used to start IO or wait for a given ordered extent to finish.
 616  *
 617  * If wait is one, this effectively waits on page writeback for all the pages
 618  * in the extent, and it waits on the io completion code to insert
 619  * metadata into the btree corresponding to the extent
 620  */
 621 void btrfs_start_ordered_extent(struct inode *inode,
 622                                        struct btrfs_ordered_extent *entry,
 623                                        int wait)
 624 {
 625         u64 start = entry->file_offset;
 626         u64 end = start + entry->len - 1;
 627 
 628         trace_btrfs_ordered_extent_start(inode, entry);
 629 
 630         /*
 631          * pages in the range can be dirty, clean or writeback.  We
 632          * start IO on any dirty ones so the wait doesn't stall waiting
 633          * for the flusher thread to find them
 634          */
 635         if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
 636                 filemap_fdatawrite_range(inode->i_mapping, start, end);
 637         if (wait) {
 638                 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
 639                                                  &entry->flags));
 640         }
 641 }
 642 
 643 /*
 644  * Used to wait on ordered extents across a large range of bytes.
 645  */
 646 int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
 647 {
 648         int ret = 0;
 649         int ret_wb = 0;
 650         u64 end;
 651         u64 orig_end;
 652         struct btrfs_ordered_extent *ordered;
 653 
 654         if (start + len < start) {
 655                 orig_end = INT_LIMIT(loff_t);
 656         } else {
 657                 orig_end = start + len - 1;
 658                 if (orig_end > INT_LIMIT(loff_t))
 659                         orig_end = INT_LIMIT(loff_t);
 660         }
 661 
 662         /* start IO across the range first to instantiate any delalloc
 663          * extents
 664          */
 665         ret = btrfs_fdatawrite_range(inode, start, orig_end);
 666         if (ret)
 667                 return ret;
 668 
 669         /*
 670          * If we have a writeback error don't return immediately. Wait first
 671          * for any ordered extents that haven't completed yet. This is to make
 672          * sure no one can dirty the same page ranges and call writepages()
 673          * before the ordered extents complete - to avoid failures (-EEXIST)
 674          * when adding the new ordered extents to the ordered tree.
 675          */
 676         ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end);
 677 
 678         end = orig_end;
 679         while (1) {
 680                 ordered = btrfs_lookup_first_ordered_extent(inode, end);
 681                 if (!ordered)
 682                         break;
 683                 if (ordered->file_offset > orig_end) {
 684                         btrfs_put_ordered_extent(ordered);
 685                         break;
 686                 }
 687                 if (ordered->file_offset + ordered->len <= start) {
 688                         btrfs_put_ordered_extent(ordered);
 689                         break;
 690                 }
 691                 btrfs_start_ordered_extent(inode, ordered, 1);
 692                 end = ordered->file_offset;
 693                 /*
 694                  * If the ordered extent had an error save the error but don't
 695                  * exit without waiting first for all other ordered extents in
 696                  * the range to complete.
 697                  */
 698                 if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
 699                         ret = -EIO;
 700                 btrfs_put_ordered_extent(ordered);
 701                 if (end == 0 || end == start)
 702                         break;
 703                 end--;
 704         }
 705         return ret_wb ? ret_wb : ret;
 706 }
 707 
 708 /*
 709  * find an ordered extent corresponding to file_offset.  return NULL if
 710  * nothing is found, otherwise take a reference on the extent and return it
 711  */
 712 struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
 713                                                          u64 file_offset)
 714 {
 715         struct btrfs_ordered_inode_tree *tree;
 716         struct rb_node *node;
 717         struct btrfs_ordered_extent *entry = NULL;
 718 
 719         tree = &BTRFS_I(inode)->ordered_tree;
 720         spin_lock_irq(&tree->lock);
 721         node = tree_search(tree, file_offset);
 722         if (!node)
 723                 goto out;
 724 
 725         entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
 726         if (!offset_in_entry(entry, file_offset))
 727                 entry = NULL;
 728         if (entry)
 729                 refcount_inc(&entry->refs);
 730 out:
 731         spin_unlock_irq(&tree->lock);
 732         return entry;
 733 }
 734 
 735 /* Since the DIO code tries to lock a wide area we need to look for any ordered
 736  * extents that exist in the range, rather than just the start of the range.
 737  */
 738 struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
 739                 struct btrfs_inode *inode, u64 file_offset, u64 len)
 740 {
 741         struct btrfs_ordered_inode_tree *tree;
 742         struct rb_node *node;
 743         struct btrfs_ordered_extent *entry = NULL;
 744 
 745         tree = &inode->ordered_tree;
 746         spin_lock_irq(&tree->lock);
 747         node = tree_search(tree, file_offset);
 748         if (!node) {
 749                 node = tree_search(tree, file_offset + len);
 750                 if (!node)
 751                         goto out;
 752         }
 753 
 754         while (1) {
 755                 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
 756                 if (range_overlaps(entry, file_offset, len))
 757                         break;
 758 
 759                 if (entry->file_offset >= file_offset + len) {
 760                         entry = NULL;
 761                         break;
 762                 }
 763                 entry = NULL;
 764                 node = rb_next(node);
 765                 if (!node)
 766                         break;
 767         }
 768 out:
 769         if (entry)
 770                 refcount_inc(&entry->refs);
 771         spin_unlock_irq(&tree->lock);
 772         return entry;
 773 }
 774 
 775 /*
 776  * lookup and return any extent before 'file_offset'.  NULL is returned
 777  * if none is found
 778  */
 779 struct btrfs_ordered_extent *
 780 btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset)
 781 {
 782         struct btrfs_ordered_inode_tree *tree;
 783         struct rb_node *node;
 784         struct btrfs_ordered_extent *entry = NULL;
 785 
 786         tree = &BTRFS_I(inode)->ordered_tree;
 787         spin_lock_irq(&tree->lock);
 788         node = tree_search(tree, file_offset);
 789         if (!node)
 790                 goto out;
 791 
 792         entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
 793         refcount_inc(&entry->refs);
 794 out:
 795         spin_unlock_irq(&tree->lock);
 796         return entry;
 797 }
 798 
 799 /*
 800  * After an extent is done, call this to conditionally update the on disk
 801  * i_size.  i_size is updated to cover any fully written part of the file.
 802  */
 803 int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
 804                                 struct btrfs_ordered_extent *ordered)
 805 {
 806         struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
 807         u64 disk_i_size;
 808         u64 new_i_size;
 809         u64 i_size = i_size_read(inode);
 810         struct rb_node *node;
 811         struct rb_node *prev = NULL;
 812         struct btrfs_ordered_extent *test;
 813         int ret = 1;
 814         u64 orig_offset = offset;
 815 
 816         spin_lock_irq(&tree->lock);
 817         if (ordered) {
 818                 offset = entry_end(ordered);
 819                 if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags))
 820                         offset = min(offset,
 821                                      ordered->file_offset +
 822                                      ordered->truncated_len);
 823         } else {
 824                 offset = ALIGN(offset, btrfs_inode_sectorsize(inode));
 825         }
 826         disk_i_size = BTRFS_I(inode)->disk_i_size;
 827 
 828         /*
 829          * truncate file.
 830          * If ordered is not NULL, then this is called from endio and
 831          * disk_i_size will be updated by either truncate itself or any
 832          * in-flight IOs which are inside the disk_i_size.
 833          *
 834          * Because btrfs_setsize() may set i_size with disk_i_size if truncate
 835          * fails somehow, we need to make sure we have a precise disk_i_size by
 836          * updating it as usual.
 837          *
 838          */
 839         if (!ordered && disk_i_size > i_size) {
 840                 BTRFS_I(inode)->disk_i_size = orig_offset;
 841                 ret = 0;
 842                 goto out;
 843         }
 844 
 845         /*
 846          * if the disk i_size is already at the inode->i_size, or
 847          * this ordered extent is inside the disk i_size, we're done
 848          */
 849         if (disk_i_size == i_size)
 850                 goto out;
 851 
 852         /*
 853          * We still need to update disk_i_size if outstanding_isize is greater
 854          * than disk_i_size.
 855          */
 856         if (offset <= disk_i_size &&
 857             (!ordered || ordered->outstanding_isize <= disk_i_size))
 858                 goto out;
 859 
 860         /*
 861          * walk backward from this ordered extent to disk_i_size.
 862          * if we find an ordered extent then we can't update disk i_size
 863          * yet
 864          */
 865         if (ordered) {
 866                 node = rb_prev(&ordered->rb_node);
 867         } else {
 868                 prev = tree_search(tree, offset);
 869                 /*
 870                  * we insert file extents without involving ordered struct,
 871                  * so there should be no ordered struct cover this offset
 872                  */
 873                 if (prev) {
 874                         test = rb_entry(prev, struct btrfs_ordered_extent,
 875                                         rb_node);
 876                         BUG_ON(offset_in_entry(test, offset));
 877                 }
 878                 node = prev;
 879         }
 880         for (; node; node = rb_prev(node)) {
 881                 test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
 882 
 883                 /* We treat this entry as if it doesn't exist */
 884                 if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE, &test->flags))
 885                         continue;
 886 
 887                 if (entry_end(test) <= disk_i_size)
 888                         break;
 889                 if (test->file_offset >= i_size)
 890                         break;
 891 
 892                 /*
 893                  * We don't update disk_i_size now, so record this undealt
 894                  * i_size. Or we will not know the real i_size.
 895                  */
 896                 if (test->outstanding_isize < offset)
 897                         test->outstanding_isize = offset;
 898                 if (ordered &&
 899                     ordered->outstanding_isize > test->outstanding_isize)
 900                         test->outstanding_isize = ordered->outstanding_isize;
 901                 goto out;
 902         }
 903         new_i_size = min_t(u64, offset, i_size);
 904 
 905         /*
 906          * Some ordered extents may completed before the current one, and
 907          * we hold the real i_size in ->outstanding_isize.
 908          */
 909         if (ordered && ordered->outstanding_isize > new_i_size)
 910                 new_i_size = min_t(u64, ordered->outstanding_isize, i_size);
 911         BTRFS_I(inode)->disk_i_size = new_i_size;
 912         ret = 0;
 913 out:
 914         /*
 915          * We need to do this because we can't remove ordered extents until
 916          * after the i_disk_size has been updated and then the inode has been
 917          * updated to reflect the change, so we need to tell anybody who finds
 918          * this ordered extent that we've already done all the real work, we
 919          * just haven't completed all the other work.
 920          */
 921         if (ordered)
 922                 set_bit(BTRFS_ORDERED_UPDATED_ISIZE, &ordered->flags);
 923         spin_unlock_irq(&tree->lock);
 924         return ret;
 925 }
 926 
 927 /*
 928  * search the ordered extents for one corresponding to 'offset' and
 929  * try to find a checksum.  This is used because we allow pages to
 930  * be reclaimed before their checksum is actually put into the btree
 931  */
 932 int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
 933                            u8 *sum, int len)
 934 {
 935         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 936         struct btrfs_ordered_sum *ordered_sum;
 937         struct btrfs_ordered_extent *ordered;
 938         struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
 939         unsigned long num_sectors;
 940         unsigned long i;
 941         u32 sectorsize = btrfs_inode_sectorsize(inode);
 942         const u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
 943         int index = 0;
 944 
 945         ordered = btrfs_lookup_ordered_extent(inode, offset);
 946         if (!ordered)
 947                 return 0;
 948 
 949         spin_lock_irq(&tree->lock);
 950         list_for_each_entry_reverse(ordered_sum, &ordered->list, list) {
 951                 if (disk_bytenr >= ordered_sum->bytenr &&
 952                     disk_bytenr < ordered_sum->bytenr + ordered_sum->len) {
 953                         i = (disk_bytenr - ordered_sum->bytenr) >>
 954                             inode->i_sb->s_blocksize_bits;
 955                         num_sectors = ordered_sum->len >>
 956                                       inode->i_sb->s_blocksize_bits;
 957                         num_sectors = min_t(int, len - index, num_sectors - i);
 958                         memcpy(sum + index, ordered_sum->sums + i * csum_size,
 959                                num_sectors * csum_size);
 960 
 961                         index += (int)num_sectors * csum_size;
 962                         if (index == len)
 963                                 goto out;
 964                         disk_bytenr += num_sectors * sectorsize;
 965                 }
 966         }
 967 out:
 968         spin_unlock_irq(&tree->lock);
 969         btrfs_put_ordered_extent(ordered);
 970         return index;
 971 }
 972 
 973 /*
 974  * btrfs_flush_ordered_range - Lock the passed range and ensures all pending
 975  * ordered extents in it are run to completion.
 976  *
 977  * @tree:         IO tree used for locking out other users of the range
 978  * @inode:        Inode whose ordered tree is to be searched
 979  * @start:        Beginning of range to flush
 980  * @end:          Last byte of range to lock
 981  * @cached_state: If passed, will return the extent state responsible for the
 982  * locked range. It's the caller's responsibility to free the cached state.
 983  *
 984  * This function always returns with the given range locked, ensuring after it's
 985  * called no order extent can be pending.
 986  */
 987 void btrfs_lock_and_flush_ordered_range(struct extent_io_tree *tree,
 988                                         struct btrfs_inode *inode, u64 start,
 989                                         u64 end,
 990                                         struct extent_state **cached_state)
 991 {
 992         struct btrfs_ordered_extent *ordered;
 993         struct extent_state *cache = NULL;
 994         struct extent_state **cachedp = &cache;
 995 
 996         if (cached_state)
 997                 cachedp = cached_state;
 998 
 999         while (1) {
1000                 lock_extent_bits(tree, start, end, cachedp);
1001                 ordered = btrfs_lookup_ordered_range(inode, start,
1002                                                      end - start + 1);
1003                 if (!ordered) {
1004                         /*
1005                          * If no external cached_state has been passed then
1006                          * decrement the extra ref taken for cachedp since we
1007                          * aren't exposing it outside of this function
1008                          */
1009                         if (!cached_state)
1010                                 refcount_dec(&cache->refs);
1011                         break;
1012                 }
1013                 unlock_extent_cached(tree, start, end, cachedp);
1014                 btrfs_start_ordered_extent(&inode->vfs_inode, ordered, 1);
1015                 btrfs_put_ordered_extent(ordered);
1016         }
1017 }
1018 
1019 int __init ordered_data_init(void)
1020 {
1021         btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
1022                                      sizeof(struct btrfs_ordered_extent), 0,
1023                                      SLAB_MEM_SPREAD,
1024                                      NULL);
1025         if (!btrfs_ordered_extent_cache)
1026                 return -ENOMEM;
1027 
1028         return 0;
1029 }
1030 
1031 void __cold ordered_data_exit(void)
1032 {
1033         kmem_cache_destroy(btrfs_ordered_extent_cache);
1034 }

/* [<][>][^][v][top][bottom][index][help] */