root/fs/btrfs/delayed-inode.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. btrfs_delayed_inode_init
  2. btrfs_delayed_inode_exit
  3. btrfs_init_delayed_node
  4. btrfs_is_continuous_delayed_item
  5. btrfs_get_delayed_node
  6. btrfs_get_or_create_delayed_node
  7. btrfs_queue_delayed_node
  8. btrfs_dequeue_delayed_node
  9. btrfs_first_delayed_node
  10. btrfs_next_delayed_node
  11. __btrfs_release_delayed_node
  12. btrfs_release_delayed_node
  13. btrfs_first_prepared_delayed_node
  14. btrfs_release_prepared_delayed_node
  15. btrfs_alloc_delayed_item
  16. __btrfs_lookup_delayed_item
  17. __btrfs_lookup_delayed_insertion_item
  18. __btrfs_add_delayed_item
  19. __btrfs_add_delayed_insertion_item
  20. __btrfs_add_delayed_deletion_item
  21. finish_one_item
  22. __btrfs_remove_delayed_item
  23. btrfs_release_delayed_item
  24. __btrfs_first_delayed_insertion_item
  25. __btrfs_first_delayed_deletion_item
  26. __btrfs_next_delayed_item
  27. btrfs_delayed_item_reserve_metadata
  28. btrfs_delayed_item_release_metadata
  29. btrfs_delayed_inode_reserve_metadata
  30. btrfs_delayed_inode_release_metadata
  31. btrfs_batch_insert_items
  32. btrfs_insert_delayed_item
  33. btrfs_insert_delayed_items
  34. btrfs_batch_delete_items
  35. btrfs_delete_delayed_items
  36. btrfs_release_delayed_inode
  37. btrfs_release_delayed_iref
  38. __btrfs_update_delayed_inode
  39. btrfs_update_delayed_inode
  40. __btrfs_commit_inode_delayed_items
  41. __btrfs_run_delayed_items
  42. btrfs_run_delayed_items
  43. btrfs_run_delayed_items_nr
  44. btrfs_commit_inode_delayed_items
  45. btrfs_commit_inode_delayed_inode
  46. btrfs_remove_delayed_node
  47. btrfs_async_run_delayed_root
  48. btrfs_wq_run_delayed_node
  49. btrfs_assert_delayed_root_empty
  50. could_end_wait
  51. btrfs_balance_delayed_items
  52. btrfs_insert_delayed_dir_index
  53. btrfs_delete_delayed_insertion_item
  54. btrfs_delete_delayed_dir_index
  55. btrfs_inode_delayed_dir_index_count
  56. btrfs_readdir_get_delayed_items
  57. btrfs_readdir_put_delayed_items
  58. btrfs_should_delete_dir_index
  59. btrfs_readdir_delayed_dir_index
  60. fill_stack_inode_item
  61. btrfs_fill_inode
  62. btrfs_delayed_update_inode
  63. btrfs_delayed_delete_inode_ref
  64. __btrfs_kill_delayed_node
  65. btrfs_kill_delayed_inode_items
  66. btrfs_kill_all_delayed_nodes
  67. btrfs_destroy_delayed_inodes

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Copyright (C) 2011 Fujitsu.  All rights reserved.
   4  * Written by Miao Xie <miaox@cn.fujitsu.com>
   5  */
   6 
   7 #include <linux/slab.h>
   8 #include <linux/iversion.h>
   9 #include <linux/sched/mm.h>
  10 #include "misc.h"
  11 #include "delayed-inode.h"
  12 #include "disk-io.h"
  13 #include "transaction.h"
  14 #include "ctree.h"
  15 #include "qgroup.h"
  16 
  17 #define BTRFS_DELAYED_WRITEBACK         512
  18 #define BTRFS_DELAYED_BACKGROUND        128
  19 #define BTRFS_DELAYED_BATCH             16
  20 
  21 static struct kmem_cache *delayed_node_cache;
  22 
  23 int __init btrfs_delayed_inode_init(void)
  24 {
  25         delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
  26                                         sizeof(struct btrfs_delayed_node),
  27                                         0,
  28                                         SLAB_MEM_SPREAD,
  29                                         NULL);
  30         if (!delayed_node_cache)
  31                 return -ENOMEM;
  32         return 0;
  33 }
  34 
  35 void __cold btrfs_delayed_inode_exit(void)
  36 {
  37         kmem_cache_destroy(delayed_node_cache);
  38 }
  39 
  40 static inline void btrfs_init_delayed_node(
  41                                 struct btrfs_delayed_node *delayed_node,
  42                                 struct btrfs_root *root, u64 inode_id)
  43 {
  44         delayed_node->root = root;
  45         delayed_node->inode_id = inode_id;
  46         refcount_set(&delayed_node->refs, 0);
  47         delayed_node->ins_root = RB_ROOT_CACHED;
  48         delayed_node->del_root = RB_ROOT_CACHED;
  49         mutex_init(&delayed_node->mutex);
  50         INIT_LIST_HEAD(&delayed_node->n_list);
  51         INIT_LIST_HEAD(&delayed_node->p_list);
  52 }
  53 
  54 static inline int btrfs_is_continuous_delayed_item(
  55                                         struct btrfs_delayed_item *item1,
  56                                         struct btrfs_delayed_item *item2)
  57 {
  58         if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
  59             item1->key.objectid == item2->key.objectid &&
  60             item1->key.type == item2->key.type &&
  61             item1->key.offset + 1 == item2->key.offset)
  62                 return 1;
  63         return 0;
  64 }
  65 
  66 static struct btrfs_delayed_node *btrfs_get_delayed_node(
  67                 struct btrfs_inode *btrfs_inode)
  68 {
  69         struct btrfs_root *root = btrfs_inode->root;
  70         u64 ino = btrfs_ino(btrfs_inode);
  71         struct btrfs_delayed_node *node;
  72 
  73         node = READ_ONCE(btrfs_inode->delayed_node);
  74         if (node) {
  75                 refcount_inc(&node->refs);
  76                 return node;
  77         }
  78 
  79         spin_lock(&root->inode_lock);
  80         node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
  81 
  82         if (node) {
  83                 if (btrfs_inode->delayed_node) {
  84                         refcount_inc(&node->refs);      /* can be accessed */
  85                         BUG_ON(btrfs_inode->delayed_node != node);
  86                         spin_unlock(&root->inode_lock);
  87                         return node;
  88                 }
  89 
  90                 /*
  91                  * It's possible that we're racing into the middle of removing
  92                  * this node from the radix tree.  In this case, the refcount
  93                  * was zero and it should never go back to one.  Just return
  94                  * NULL like it was never in the radix at all; our release
  95                  * function is in the process of removing it.
  96                  *
  97                  * Some implementations of refcount_inc refuse to bump the
  98                  * refcount once it has hit zero.  If we don't do this dance
  99                  * here, refcount_inc() may decide to just WARN_ONCE() instead
 100                  * of actually bumping the refcount.
 101                  *
 102                  * If this node is properly in the radix, we want to bump the
 103                  * refcount twice, once for the inode and once for this get
 104                  * operation.
 105                  */
 106                 if (refcount_inc_not_zero(&node->refs)) {
 107                         refcount_inc(&node->refs);
 108                         btrfs_inode->delayed_node = node;
 109                 } else {
 110                         node = NULL;
 111                 }
 112 
 113                 spin_unlock(&root->inode_lock);
 114                 return node;
 115         }
 116         spin_unlock(&root->inode_lock);
 117 
 118         return NULL;
 119 }
 120 
 121 /* Will return either the node or PTR_ERR(-ENOMEM) */
 122 static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
 123                 struct btrfs_inode *btrfs_inode)
 124 {
 125         struct btrfs_delayed_node *node;
 126         struct btrfs_root *root = btrfs_inode->root;
 127         u64 ino = btrfs_ino(btrfs_inode);
 128         int ret;
 129 
 130 again:
 131         node = btrfs_get_delayed_node(btrfs_inode);
 132         if (node)
 133                 return node;
 134 
 135         node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
 136         if (!node)
 137                 return ERR_PTR(-ENOMEM);
 138         btrfs_init_delayed_node(node, root, ino);
 139 
 140         /* cached in the btrfs inode and can be accessed */
 141         refcount_set(&node->refs, 2);
 142 
 143         ret = radix_tree_preload(GFP_NOFS);
 144         if (ret) {
 145                 kmem_cache_free(delayed_node_cache, node);
 146                 return ERR_PTR(ret);
 147         }
 148 
 149         spin_lock(&root->inode_lock);
 150         ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
 151         if (ret == -EEXIST) {
 152                 spin_unlock(&root->inode_lock);
 153                 kmem_cache_free(delayed_node_cache, node);
 154                 radix_tree_preload_end();
 155                 goto again;
 156         }
 157         btrfs_inode->delayed_node = node;
 158         spin_unlock(&root->inode_lock);
 159         radix_tree_preload_end();
 160 
 161         return node;
 162 }
 163 
 164 /*
 165  * Call it when holding delayed_node->mutex
 166  *
 167  * If mod = 1, add this node into the prepared list.
 168  */
 169 static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
 170                                      struct btrfs_delayed_node *node,
 171                                      int mod)
 172 {
 173         spin_lock(&root->lock);
 174         if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
 175                 if (!list_empty(&node->p_list))
 176                         list_move_tail(&node->p_list, &root->prepare_list);
 177                 else if (mod)
 178                         list_add_tail(&node->p_list, &root->prepare_list);
 179         } else {
 180                 list_add_tail(&node->n_list, &root->node_list);
 181                 list_add_tail(&node->p_list, &root->prepare_list);
 182                 refcount_inc(&node->refs);      /* inserted into list */
 183                 root->nodes++;
 184                 set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
 185         }
 186         spin_unlock(&root->lock);
 187 }
 188 
 189 /* Call it when holding delayed_node->mutex */
 190 static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
 191                                        struct btrfs_delayed_node *node)
 192 {
 193         spin_lock(&root->lock);
 194         if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
 195                 root->nodes--;
 196                 refcount_dec(&node->refs);      /* not in the list */
 197                 list_del_init(&node->n_list);
 198                 if (!list_empty(&node->p_list))
 199                         list_del_init(&node->p_list);
 200                 clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
 201         }
 202         spin_unlock(&root->lock);
 203 }
 204 
 205 static struct btrfs_delayed_node *btrfs_first_delayed_node(
 206                         struct btrfs_delayed_root *delayed_root)
 207 {
 208         struct list_head *p;
 209         struct btrfs_delayed_node *node = NULL;
 210 
 211         spin_lock(&delayed_root->lock);
 212         if (list_empty(&delayed_root->node_list))
 213                 goto out;
 214 
 215         p = delayed_root->node_list.next;
 216         node = list_entry(p, struct btrfs_delayed_node, n_list);
 217         refcount_inc(&node->refs);
 218 out:
 219         spin_unlock(&delayed_root->lock);
 220 
 221         return node;
 222 }
 223 
 224 static struct btrfs_delayed_node *btrfs_next_delayed_node(
 225                                                 struct btrfs_delayed_node *node)
 226 {
 227         struct btrfs_delayed_root *delayed_root;
 228         struct list_head *p;
 229         struct btrfs_delayed_node *next = NULL;
 230 
 231         delayed_root = node->root->fs_info->delayed_root;
 232         spin_lock(&delayed_root->lock);
 233         if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
 234                 /* not in the list */
 235                 if (list_empty(&delayed_root->node_list))
 236                         goto out;
 237                 p = delayed_root->node_list.next;
 238         } else if (list_is_last(&node->n_list, &delayed_root->node_list))
 239                 goto out;
 240         else
 241                 p = node->n_list.next;
 242 
 243         next = list_entry(p, struct btrfs_delayed_node, n_list);
 244         refcount_inc(&next->refs);
 245 out:
 246         spin_unlock(&delayed_root->lock);
 247 
 248         return next;
 249 }
 250 
 251 static void __btrfs_release_delayed_node(
 252                                 struct btrfs_delayed_node *delayed_node,
 253                                 int mod)
 254 {
 255         struct btrfs_delayed_root *delayed_root;
 256 
 257         if (!delayed_node)
 258                 return;
 259 
 260         delayed_root = delayed_node->root->fs_info->delayed_root;
 261 
 262         mutex_lock(&delayed_node->mutex);
 263         if (delayed_node->count)
 264                 btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
 265         else
 266                 btrfs_dequeue_delayed_node(delayed_root, delayed_node);
 267         mutex_unlock(&delayed_node->mutex);
 268 
 269         if (refcount_dec_and_test(&delayed_node->refs)) {
 270                 struct btrfs_root *root = delayed_node->root;
 271 
 272                 spin_lock(&root->inode_lock);
 273                 /*
 274                  * Once our refcount goes to zero, nobody is allowed to bump it
 275                  * back up.  We can delete it now.
 276                  */
 277                 ASSERT(refcount_read(&delayed_node->refs) == 0);
 278                 radix_tree_delete(&root->delayed_nodes_tree,
 279                                   delayed_node->inode_id);
 280                 spin_unlock(&root->inode_lock);
 281                 kmem_cache_free(delayed_node_cache, delayed_node);
 282         }
 283 }
 284 
 285 static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
 286 {
 287         __btrfs_release_delayed_node(node, 0);
 288 }
 289 
 290 static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
 291                                         struct btrfs_delayed_root *delayed_root)
 292 {
 293         struct list_head *p;
 294         struct btrfs_delayed_node *node = NULL;
 295 
 296         spin_lock(&delayed_root->lock);
 297         if (list_empty(&delayed_root->prepare_list))
 298                 goto out;
 299 
 300         p = delayed_root->prepare_list.next;
 301         list_del_init(p);
 302         node = list_entry(p, struct btrfs_delayed_node, p_list);
 303         refcount_inc(&node->refs);
 304 out:
 305         spin_unlock(&delayed_root->lock);
 306 
 307         return node;
 308 }
 309 
 310 static inline void btrfs_release_prepared_delayed_node(
 311                                         struct btrfs_delayed_node *node)
 312 {
 313         __btrfs_release_delayed_node(node, 1);
 314 }
 315 
 316 static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
 317 {
 318         struct btrfs_delayed_item *item;
 319         item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
 320         if (item) {
 321                 item->data_len = data_len;
 322                 item->ins_or_del = 0;
 323                 item->bytes_reserved = 0;
 324                 item->delayed_node = NULL;
 325                 refcount_set(&item->refs, 1);
 326         }
 327         return item;
 328 }
 329 
 330 /*
 331  * __btrfs_lookup_delayed_item - look up the delayed item by key
 332  * @delayed_node: pointer to the delayed node
 333  * @key:          the key to look up
 334  * @prev:         used to store the prev item if the right item isn't found
 335  * @next:         used to store the next item if the right item isn't found
 336  *
 337  * Note: if we don't find the right item, we will return the prev item and
 338  * the next item.
 339  */
 340 static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
 341                                 struct rb_root *root,
 342                                 struct btrfs_key *key,
 343                                 struct btrfs_delayed_item **prev,
 344                                 struct btrfs_delayed_item **next)
 345 {
 346         struct rb_node *node, *prev_node = NULL;
 347         struct btrfs_delayed_item *delayed_item = NULL;
 348         int ret = 0;
 349 
 350         node = root->rb_node;
 351 
 352         while (node) {
 353                 delayed_item = rb_entry(node, struct btrfs_delayed_item,
 354                                         rb_node);
 355                 prev_node = node;
 356                 ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
 357                 if (ret < 0)
 358                         node = node->rb_right;
 359                 else if (ret > 0)
 360                         node = node->rb_left;
 361                 else
 362                         return delayed_item;
 363         }
 364 
 365         if (prev) {
 366                 if (!prev_node)
 367                         *prev = NULL;
 368                 else if (ret < 0)
 369                         *prev = delayed_item;
 370                 else if ((node = rb_prev(prev_node)) != NULL) {
 371                         *prev = rb_entry(node, struct btrfs_delayed_item,
 372                                          rb_node);
 373                 } else
 374                         *prev = NULL;
 375         }
 376 
 377         if (next) {
 378                 if (!prev_node)
 379                         *next = NULL;
 380                 else if (ret > 0)
 381                         *next = delayed_item;
 382                 else if ((node = rb_next(prev_node)) != NULL) {
 383                         *next = rb_entry(node, struct btrfs_delayed_item,
 384                                          rb_node);
 385                 } else
 386                         *next = NULL;
 387         }
 388         return NULL;
 389 }
 390 
 391 static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
 392                                         struct btrfs_delayed_node *delayed_node,
 393                                         struct btrfs_key *key)
 394 {
 395         return __btrfs_lookup_delayed_item(&delayed_node->ins_root.rb_root, key,
 396                                            NULL, NULL);
 397 }
 398 
 399 static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
 400                                     struct btrfs_delayed_item *ins,
 401                                     int action)
 402 {
 403         struct rb_node **p, *node;
 404         struct rb_node *parent_node = NULL;
 405         struct rb_root_cached *root;
 406         struct btrfs_delayed_item *item;
 407         int cmp;
 408         bool leftmost = true;
 409 
 410         if (action == BTRFS_DELAYED_INSERTION_ITEM)
 411                 root = &delayed_node->ins_root;
 412         else if (action == BTRFS_DELAYED_DELETION_ITEM)
 413                 root = &delayed_node->del_root;
 414         else
 415                 BUG();
 416         p = &root->rb_root.rb_node;
 417         node = &ins->rb_node;
 418 
 419         while (*p) {
 420                 parent_node = *p;
 421                 item = rb_entry(parent_node, struct btrfs_delayed_item,
 422                                  rb_node);
 423 
 424                 cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
 425                 if (cmp < 0) {
 426                         p = &(*p)->rb_right;
 427                         leftmost = false;
 428                 } else if (cmp > 0) {
 429                         p = &(*p)->rb_left;
 430                 } else {
 431                         return -EEXIST;
 432                 }
 433         }
 434 
 435         rb_link_node(node, parent_node, p);
 436         rb_insert_color_cached(node, root, leftmost);
 437         ins->delayed_node = delayed_node;
 438         ins->ins_or_del = action;
 439 
 440         if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
 441             action == BTRFS_DELAYED_INSERTION_ITEM &&
 442             ins->key.offset >= delayed_node->index_cnt)
 443                         delayed_node->index_cnt = ins->key.offset + 1;
 444 
 445         delayed_node->count++;
 446         atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
 447         return 0;
 448 }
 449 
 450 static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
 451                                               struct btrfs_delayed_item *item)
 452 {
 453         return __btrfs_add_delayed_item(node, item,
 454                                         BTRFS_DELAYED_INSERTION_ITEM);
 455 }
 456 
 457 static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
 458                                              struct btrfs_delayed_item *item)
 459 {
 460         return __btrfs_add_delayed_item(node, item,
 461                                         BTRFS_DELAYED_DELETION_ITEM);
 462 }
 463 
 464 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
 465 {
 466         int seq = atomic_inc_return(&delayed_root->items_seq);
 467 
 468         /* atomic_dec_return implies a barrier */
 469         if ((atomic_dec_return(&delayed_root->items) <
 470             BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0))
 471                 cond_wake_up_nomb(&delayed_root->wait);
 472 }
 473 
 474 static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
 475 {
 476         struct rb_root_cached *root;
 477         struct btrfs_delayed_root *delayed_root;
 478 
 479         /* Not associated with any delayed_node */
 480         if (!delayed_item->delayed_node)
 481                 return;
 482         delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
 483 
 484         BUG_ON(!delayed_root);
 485         BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
 486                delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
 487 
 488         if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
 489                 root = &delayed_item->delayed_node->ins_root;
 490         else
 491                 root = &delayed_item->delayed_node->del_root;
 492 
 493         rb_erase_cached(&delayed_item->rb_node, root);
 494         delayed_item->delayed_node->count--;
 495 
 496         finish_one_item(delayed_root);
 497 }
 498 
 499 static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
 500 {
 501         if (item) {
 502                 __btrfs_remove_delayed_item(item);
 503                 if (refcount_dec_and_test(&item->refs))
 504                         kfree(item);
 505         }
 506 }
 507 
 508 static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
 509                                         struct btrfs_delayed_node *delayed_node)
 510 {
 511         struct rb_node *p;
 512         struct btrfs_delayed_item *item = NULL;
 513 
 514         p = rb_first_cached(&delayed_node->ins_root);
 515         if (p)
 516                 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
 517 
 518         return item;
 519 }
 520 
 521 static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
 522                                         struct btrfs_delayed_node *delayed_node)
 523 {
 524         struct rb_node *p;
 525         struct btrfs_delayed_item *item = NULL;
 526 
 527         p = rb_first_cached(&delayed_node->del_root);
 528         if (p)
 529                 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
 530 
 531         return item;
 532 }
 533 
 534 static struct btrfs_delayed_item *__btrfs_next_delayed_item(
 535                                                 struct btrfs_delayed_item *item)
 536 {
 537         struct rb_node *p;
 538         struct btrfs_delayed_item *next = NULL;
 539 
 540         p = rb_next(&item->rb_node);
 541         if (p)
 542                 next = rb_entry(p, struct btrfs_delayed_item, rb_node);
 543 
 544         return next;
 545 }
 546 
 547 static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
 548                                                struct btrfs_root *root,
 549                                                struct btrfs_delayed_item *item)
 550 {
 551         struct btrfs_block_rsv *src_rsv;
 552         struct btrfs_block_rsv *dst_rsv;
 553         struct btrfs_fs_info *fs_info = root->fs_info;
 554         u64 num_bytes;
 555         int ret;
 556 
 557         if (!trans->bytes_reserved)
 558                 return 0;
 559 
 560         src_rsv = trans->block_rsv;
 561         dst_rsv = &fs_info->delayed_block_rsv;
 562 
 563         num_bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
 564 
 565         /*
 566          * Here we migrate space rsv from transaction rsv, since have already
 567          * reserved space when starting a transaction.  So no need to reserve
 568          * qgroup space here.
 569          */
 570         ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
 571         if (!ret) {
 572                 trace_btrfs_space_reservation(fs_info, "delayed_item",
 573                                               item->key.objectid,
 574                                               num_bytes, 1);
 575                 item->bytes_reserved = num_bytes;
 576         }
 577 
 578         return ret;
 579 }
 580 
 581 static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
 582                                                 struct btrfs_delayed_item *item)
 583 {
 584         struct btrfs_block_rsv *rsv;
 585         struct btrfs_fs_info *fs_info = root->fs_info;
 586 
 587         if (!item->bytes_reserved)
 588                 return;
 589 
 590         rsv = &fs_info->delayed_block_rsv;
 591         /*
 592          * Check btrfs_delayed_item_reserve_metadata() to see why we don't need
 593          * to release/reserve qgroup space.
 594          */
 595         trace_btrfs_space_reservation(fs_info, "delayed_item",
 596                                       item->key.objectid, item->bytes_reserved,
 597                                       0);
 598         btrfs_block_rsv_release(fs_info, rsv,
 599                                 item->bytes_reserved);
 600 }
 601 
 602 static int btrfs_delayed_inode_reserve_metadata(
 603                                         struct btrfs_trans_handle *trans,
 604                                         struct btrfs_root *root,
 605                                         struct btrfs_inode *inode,
 606                                         struct btrfs_delayed_node *node)
 607 {
 608         struct btrfs_fs_info *fs_info = root->fs_info;
 609         struct btrfs_block_rsv *src_rsv;
 610         struct btrfs_block_rsv *dst_rsv;
 611         u64 num_bytes;
 612         int ret;
 613 
 614         src_rsv = trans->block_rsv;
 615         dst_rsv = &fs_info->delayed_block_rsv;
 616 
 617         num_bytes = btrfs_calc_metadata_size(fs_info, 1);
 618 
 619         /*
 620          * btrfs_dirty_inode will update the inode under btrfs_join_transaction
 621          * which doesn't reserve space for speed.  This is a problem since we
 622          * still need to reserve space for this update, so try to reserve the
 623          * space.
 624          *
 625          * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
 626          * we always reserve enough to update the inode item.
 627          */
 628         if (!src_rsv || (!trans->bytes_reserved &&
 629                          src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
 630                 ret = btrfs_qgroup_reserve_meta_prealloc(root,
 631                                 fs_info->nodesize, true);
 632                 if (ret < 0)
 633                         return ret;
 634                 ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
 635                                           BTRFS_RESERVE_NO_FLUSH);
 636                 /*
 637                  * Since we're under a transaction reserve_metadata_bytes could
 638                  * try to commit the transaction which will make it return
 639                  * EAGAIN to make us stop the transaction we have, so return
 640                  * ENOSPC instead so that btrfs_dirty_inode knows what to do.
 641                  */
 642                 if (ret == -EAGAIN) {
 643                         ret = -ENOSPC;
 644                         btrfs_qgroup_free_meta_prealloc(root, num_bytes);
 645                 }
 646                 if (!ret) {
 647                         node->bytes_reserved = num_bytes;
 648                         trace_btrfs_space_reservation(fs_info,
 649                                                       "delayed_inode",
 650                                                       btrfs_ino(inode),
 651                                                       num_bytes, 1);
 652                 } else {
 653                         btrfs_qgroup_free_meta_prealloc(root, fs_info->nodesize);
 654                 }
 655                 return ret;
 656         }
 657 
 658         ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
 659         if (!ret) {
 660                 trace_btrfs_space_reservation(fs_info, "delayed_inode",
 661                                               btrfs_ino(inode), num_bytes, 1);
 662                 node->bytes_reserved = num_bytes;
 663         }
 664 
 665         return ret;
 666 }
 667 
 668 static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
 669                                                 struct btrfs_delayed_node *node,
 670                                                 bool qgroup_free)
 671 {
 672         struct btrfs_block_rsv *rsv;
 673 
 674         if (!node->bytes_reserved)
 675                 return;
 676 
 677         rsv = &fs_info->delayed_block_rsv;
 678         trace_btrfs_space_reservation(fs_info, "delayed_inode",
 679                                       node->inode_id, node->bytes_reserved, 0);
 680         btrfs_block_rsv_release(fs_info, rsv,
 681                                 node->bytes_reserved);
 682         if (qgroup_free)
 683                 btrfs_qgroup_free_meta_prealloc(node->root,
 684                                 node->bytes_reserved);
 685         else
 686                 btrfs_qgroup_convert_reserved_meta(node->root,
 687                                 node->bytes_reserved);
 688         node->bytes_reserved = 0;
 689 }
 690 
 691 /*
 692  * This helper will insert some continuous items into the same leaf according
 693  * to the free space of the leaf.
 694  */
 695 static int btrfs_batch_insert_items(struct btrfs_root *root,
 696                                     struct btrfs_path *path,
 697                                     struct btrfs_delayed_item *item)
 698 {
 699         struct btrfs_delayed_item *curr, *next;
 700         int free_space;
 701         int total_data_size = 0, total_size = 0;
 702         struct extent_buffer *leaf;
 703         char *data_ptr;
 704         struct btrfs_key *keys;
 705         u32 *data_size;
 706         struct list_head head;
 707         int slot;
 708         int nitems;
 709         int i;
 710         int ret = 0;
 711 
 712         BUG_ON(!path->nodes[0]);
 713 
 714         leaf = path->nodes[0];
 715         free_space = btrfs_leaf_free_space(leaf);
 716         INIT_LIST_HEAD(&head);
 717 
 718         next = item;
 719         nitems = 0;
 720 
 721         /*
 722          * count the number of the continuous items that we can insert in batch
 723          */
 724         while (total_size + next->data_len + sizeof(struct btrfs_item) <=
 725                free_space) {
 726                 total_data_size += next->data_len;
 727                 total_size += next->data_len + sizeof(struct btrfs_item);
 728                 list_add_tail(&next->tree_list, &head);
 729                 nitems++;
 730 
 731                 curr = next;
 732                 next = __btrfs_next_delayed_item(curr);
 733                 if (!next)
 734                         break;
 735 
 736                 if (!btrfs_is_continuous_delayed_item(curr, next))
 737                         break;
 738         }
 739 
 740         if (!nitems) {
 741                 ret = 0;
 742                 goto out;
 743         }
 744 
 745         /*
 746          * we need allocate some memory space, but it might cause the task
 747          * to sleep, so we set all locked nodes in the path to blocking locks
 748          * first.
 749          */
 750         btrfs_set_path_blocking(path);
 751 
 752         keys = kmalloc_array(nitems, sizeof(struct btrfs_key), GFP_NOFS);
 753         if (!keys) {
 754                 ret = -ENOMEM;
 755                 goto out;
 756         }
 757 
 758         data_size = kmalloc_array(nitems, sizeof(u32), GFP_NOFS);
 759         if (!data_size) {
 760                 ret = -ENOMEM;
 761                 goto error;
 762         }
 763 
 764         /* get keys of all the delayed items */
 765         i = 0;
 766         list_for_each_entry(next, &head, tree_list) {
 767                 keys[i] = next->key;
 768                 data_size[i] = next->data_len;
 769                 i++;
 770         }
 771 
 772         /* insert the keys of the items */
 773         setup_items_for_insert(root, path, keys, data_size,
 774                                total_data_size, total_size, nitems);
 775 
 776         /* insert the dir index items */
 777         slot = path->slots[0];
 778         list_for_each_entry_safe(curr, next, &head, tree_list) {
 779                 data_ptr = btrfs_item_ptr(leaf, slot, char);
 780                 write_extent_buffer(leaf, &curr->data,
 781                                     (unsigned long)data_ptr,
 782                                     curr->data_len);
 783                 slot++;
 784 
 785                 btrfs_delayed_item_release_metadata(root, curr);
 786 
 787                 list_del(&curr->tree_list);
 788                 btrfs_release_delayed_item(curr);
 789         }
 790 
 791 error:
 792         kfree(data_size);
 793         kfree(keys);
 794 out:
 795         return ret;
 796 }
 797 
 798 /*
 799  * This helper can just do simple insertion that needn't extend item for new
 800  * data, such as directory name index insertion, inode insertion.
 801  */
 802 static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
 803                                      struct btrfs_root *root,
 804                                      struct btrfs_path *path,
 805                                      struct btrfs_delayed_item *delayed_item)
 806 {
 807         struct extent_buffer *leaf;
 808         unsigned int nofs_flag;
 809         char *ptr;
 810         int ret;
 811 
 812         nofs_flag = memalloc_nofs_save();
 813         ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
 814                                       delayed_item->data_len);
 815         memalloc_nofs_restore(nofs_flag);
 816         if (ret < 0 && ret != -EEXIST)
 817                 return ret;
 818 
 819         leaf = path->nodes[0];
 820 
 821         ptr = btrfs_item_ptr(leaf, path->slots[0], char);
 822 
 823         write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
 824                             delayed_item->data_len);
 825         btrfs_mark_buffer_dirty(leaf);
 826 
 827         btrfs_delayed_item_release_metadata(root, delayed_item);
 828         return 0;
 829 }
 830 
 831 /*
 832  * we insert an item first, then if there are some continuous items, we try
 833  * to insert those items into the same leaf.
 834  */
 835 static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
 836                                       struct btrfs_path *path,
 837                                       struct btrfs_root *root,
 838                                       struct btrfs_delayed_node *node)
 839 {
 840         struct btrfs_delayed_item *curr, *prev;
 841         int ret = 0;
 842 
 843 do_again:
 844         mutex_lock(&node->mutex);
 845         curr = __btrfs_first_delayed_insertion_item(node);
 846         if (!curr)
 847                 goto insert_end;
 848 
 849         ret = btrfs_insert_delayed_item(trans, root, path, curr);
 850         if (ret < 0) {
 851                 btrfs_release_path(path);
 852                 goto insert_end;
 853         }
 854 
 855         prev = curr;
 856         curr = __btrfs_next_delayed_item(prev);
 857         if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
 858                 /* insert the continuous items into the same leaf */
 859                 path->slots[0]++;
 860                 btrfs_batch_insert_items(root, path, curr);
 861         }
 862         btrfs_release_delayed_item(prev);
 863         btrfs_mark_buffer_dirty(path->nodes[0]);
 864 
 865         btrfs_release_path(path);
 866         mutex_unlock(&node->mutex);
 867         goto do_again;
 868 
 869 insert_end:
 870         mutex_unlock(&node->mutex);
 871         return ret;
 872 }
 873 
 874 static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
 875                                     struct btrfs_root *root,
 876                                     struct btrfs_path *path,
 877                                     struct btrfs_delayed_item *item)
 878 {
 879         struct btrfs_delayed_item *curr, *next;
 880         struct extent_buffer *leaf;
 881         struct btrfs_key key;
 882         struct list_head head;
 883         int nitems, i, last_item;
 884         int ret = 0;
 885 
 886         BUG_ON(!path->nodes[0]);
 887 
 888         leaf = path->nodes[0];
 889 
 890         i = path->slots[0];
 891         last_item = btrfs_header_nritems(leaf) - 1;
 892         if (i > last_item)
 893                 return -ENOENT; /* FIXME: Is errno suitable? */
 894 
 895         next = item;
 896         INIT_LIST_HEAD(&head);
 897         btrfs_item_key_to_cpu(leaf, &key, i);
 898         nitems = 0;
 899         /*
 900          * count the number of the dir index items that we can delete in batch
 901          */
 902         while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
 903                 list_add_tail(&next->tree_list, &head);
 904                 nitems++;
 905 
 906                 curr = next;
 907                 next = __btrfs_next_delayed_item(curr);
 908                 if (!next)
 909                         break;
 910 
 911                 if (!btrfs_is_continuous_delayed_item(curr, next))
 912                         break;
 913 
 914                 i++;
 915                 if (i > last_item)
 916                         break;
 917                 btrfs_item_key_to_cpu(leaf, &key, i);
 918         }
 919 
 920         if (!nitems)
 921                 return 0;
 922 
 923         ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
 924         if (ret)
 925                 goto out;
 926 
 927         list_for_each_entry_safe(curr, next, &head, tree_list) {
 928                 btrfs_delayed_item_release_metadata(root, curr);
 929                 list_del(&curr->tree_list);
 930                 btrfs_release_delayed_item(curr);
 931         }
 932 
 933 out:
 934         return ret;
 935 }
 936 
 937 static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
 938                                       struct btrfs_path *path,
 939                                       struct btrfs_root *root,
 940                                       struct btrfs_delayed_node *node)
 941 {
 942         struct btrfs_delayed_item *curr, *prev;
 943         unsigned int nofs_flag;
 944         int ret = 0;
 945 
 946 do_again:
 947         mutex_lock(&node->mutex);
 948         curr = __btrfs_first_delayed_deletion_item(node);
 949         if (!curr)
 950                 goto delete_fail;
 951 
 952         nofs_flag = memalloc_nofs_save();
 953         ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
 954         memalloc_nofs_restore(nofs_flag);
 955         if (ret < 0)
 956                 goto delete_fail;
 957         else if (ret > 0) {
 958                 /*
 959                  * can't find the item which the node points to, so this node
 960                  * is invalid, just drop it.
 961                  */
 962                 prev = curr;
 963                 curr = __btrfs_next_delayed_item(prev);
 964                 btrfs_release_delayed_item(prev);
 965                 ret = 0;
 966                 btrfs_release_path(path);
 967                 if (curr) {
 968                         mutex_unlock(&node->mutex);
 969                         goto do_again;
 970                 } else
 971                         goto delete_fail;
 972         }
 973 
 974         btrfs_batch_delete_items(trans, root, path, curr);
 975         btrfs_release_path(path);
 976         mutex_unlock(&node->mutex);
 977         goto do_again;
 978 
 979 delete_fail:
 980         btrfs_release_path(path);
 981         mutex_unlock(&node->mutex);
 982         return ret;
 983 }
 984 
 985 static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
 986 {
 987         struct btrfs_delayed_root *delayed_root;
 988 
 989         if (delayed_node &&
 990             test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
 991                 BUG_ON(!delayed_node->root);
 992                 clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
 993                 delayed_node->count--;
 994 
 995                 delayed_root = delayed_node->root->fs_info->delayed_root;
 996                 finish_one_item(delayed_root);
 997         }
 998 }
 999 
1000 static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
1001 {
1002         struct btrfs_delayed_root *delayed_root;
1003 
1004         ASSERT(delayed_node->root);
1005         clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1006         delayed_node->count--;
1007 
1008         delayed_root = delayed_node->root->fs_info->delayed_root;
1009         finish_one_item(delayed_root);
1010 }
1011 
1012 static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1013                                         struct btrfs_root *root,
1014                                         struct btrfs_path *path,
1015                                         struct btrfs_delayed_node *node)
1016 {
1017         struct btrfs_fs_info *fs_info = root->fs_info;
1018         struct btrfs_key key;
1019         struct btrfs_inode_item *inode_item;
1020         struct extent_buffer *leaf;
1021         unsigned int nofs_flag;
1022         int mod;
1023         int ret;
1024 
1025         key.objectid = node->inode_id;
1026         key.type = BTRFS_INODE_ITEM_KEY;
1027         key.offset = 0;
1028 
1029         if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1030                 mod = -1;
1031         else
1032                 mod = 1;
1033 
1034         nofs_flag = memalloc_nofs_save();
1035         ret = btrfs_lookup_inode(trans, root, path, &key, mod);
1036         memalloc_nofs_restore(nofs_flag);
1037         if (ret > 0) {
1038                 btrfs_release_path(path);
1039                 return -ENOENT;
1040         } else if (ret < 0) {
1041                 return ret;
1042         }
1043 
1044         leaf = path->nodes[0];
1045         inode_item = btrfs_item_ptr(leaf, path->slots[0],
1046                                     struct btrfs_inode_item);
1047         write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1048                             sizeof(struct btrfs_inode_item));
1049         btrfs_mark_buffer_dirty(leaf);
1050 
1051         if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1052                 goto no_iref;
1053 
1054         path->slots[0]++;
1055         if (path->slots[0] >= btrfs_header_nritems(leaf))
1056                 goto search;
1057 again:
1058         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1059         if (key.objectid != node->inode_id)
1060                 goto out;
1061 
1062         if (key.type != BTRFS_INODE_REF_KEY &&
1063             key.type != BTRFS_INODE_EXTREF_KEY)
1064                 goto out;
1065 
1066         /*
1067          * Delayed iref deletion is for the inode who has only one link,
1068          * so there is only one iref. The case that several irefs are
1069          * in the same item doesn't exist.
1070          */
1071         btrfs_del_item(trans, root, path);
1072 out:
1073         btrfs_release_delayed_iref(node);
1074 no_iref:
1075         btrfs_release_path(path);
1076 err_out:
1077         btrfs_delayed_inode_release_metadata(fs_info, node, (ret < 0));
1078         btrfs_release_delayed_inode(node);
1079 
1080         return ret;
1081 
1082 search:
1083         btrfs_release_path(path);
1084 
1085         key.type = BTRFS_INODE_EXTREF_KEY;
1086         key.offset = -1;
1087 
1088         nofs_flag = memalloc_nofs_save();
1089         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1090         memalloc_nofs_restore(nofs_flag);
1091         if (ret < 0)
1092                 goto err_out;
1093         ASSERT(ret);
1094 
1095         ret = 0;
1096         leaf = path->nodes[0];
1097         path->slots[0]--;
1098         goto again;
1099 }
1100 
1101 static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1102                                              struct btrfs_root *root,
1103                                              struct btrfs_path *path,
1104                                              struct btrfs_delayed_node *node)
1105 {
1106         int ret;
1107 
1108         mutex_lock(&node->mutex);
1109         if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
1110                 mutex_unlock(&node->mutex);
1111                 return 0;
1112         }
1113 
1114         ret = __btrfs_update_delayed_inode(trans, root, path, node);
1115         mutex_unlock(&node->mutex);
1116         return ret;
1117 }
1118 
1119 static inline int
1120 __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1121                                    struct btrfs_path *path,
1122                                    struct btrfs_delayed_node *node)
1123 {
1124         int ret;
1125 
1126         ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1127         if (ret)
1128                 return ret;
1129 
1130         ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1131         if (ret)
1132                 return ret;
1133 
1134         ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1135         return ret;
1136 }
1137 
1138 /*
1139  * Called when committing the transaction.
1140  * Returns 0 on success.
1141  * Returns < 0 on error and returns with an aborted transaction with any
1142  * outstanding delayed items cleaned up.
1143  */
1144 static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr)
1145 {
1146         struct btrfs_fs_info *fs_info = trans->fs_info;
1147         struct btrfs_delayed_root *delayed_root;
1148         struct btrfs_delayed_node *curr_node, *prev_node;
1149         struct btrfs_path *path;
1150         struct btrfs_block_rsv *block_rsv;
1151         int ret = 0;
1152         bool count = (nr > 0);
1153 
1154         if (trans->aborted)
1155                 return -EIO;
1156 
1157         path = btrfs_alloc_path();
1158         if (!path)
1159                 return -ENOMEM;
1160         path->leave_spinning = 1;
1161 
1162         block_rsv = trans->block_rsv;
1163         trans->block_rsv = &fs_info->delayed_block_rsv;
1164 
1165         delayed_root = fs_info->delayed_root;
1166 
1167         curr_node = btrfs_first_delayed_node(delayed_root);
1168         while (curr_node && (!count || (count && nr--))) {
1169                 ret = __btrfs_commit_inode_delayed_items(trans, path,
1170                                                          curr_node);
1171                 if (ret) {
1172                         btrfs_release_delayed_node(curr_node);
1173                         curr_node = NULL;
1174                         btrfs_abort_transaction(trans, ret);
1175                         break;
1176                 }
1177 
1178                 prev_node = curr_node;
1179                 curr_node = btrfs_next_delayed_node(curr_node);
1180                 btrfs_release_delayed_node(prev_node);
1181         }
1182 
1183         if (curr_node)
1184                 btrfs_release_delayed_node(curr_node);
1185         btrfs_free_path(path);
1186         trans->block_rsv = block_rsv;
1187 
1188         return ret;
1189 }
1190 
1191 int btrfs_run_delayed_items(struct btrfs_trans_handle *trans)
1192 {
1193         return __btrfs_run_delayed_items(trans, -1);
1194 }
1195 
1196 int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans, int nr)
1197 {
1198         return __btrfs_run_delayed_items(trans, nr);
1199 }
1200 
1201 int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1202                                      struct btrfs_inode *inode)
1203 {
1204         struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1205         struct btrfs_path *path;
1206         struct btrfs_block_rsv *block_rsv;
1207         int ret;
1208 
1209         if (!delayed_node)
1210                 return 0;
1211 
1212         mutex_lock(&delayed_node->mutex);
1213         if (!delayed_node->count) {
1214                 mutex_unlock(&delayed_node->mutex);
1215                 btrfs_release_delayed_node(delayed_node);
1216                 return 0;
1217         }
1218         mutex_unlock(&delayed_node->mutex);
1219 
1220         path = btrfs_alloc_path();
1221         if (!path) {
1222                 btrfs_release_delayed_node(delayed_node);
1223                 return -ENOMEM;
1224         }
1225         path->leave_spinning = 1;
1226 
1227         block_rsv = trans->block_rsv;
1228         trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1229 
1230         ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1231 
1232         btrfs_release_delayed_node(delayed_node);
1233         btrfs_free_path(path);
1234         trans->block_rsv = block_rsv;
1235 
1236         return ret;
1237 }
1238 
1239 int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode)
1240 {
1241         struct btrfs_fs_info *fs_info = inode->root->fs_info;
1242         struct btrfs_trans_handle *trans;
1243         struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1244         struct btrfs_path *path;
1245         struct btrfs_block_rsv *block_rsv;
1246         int ret;
1247 
1248         if (!delayed_node)
1249                 return 0;
1250 
1251         mutex_lock(&delayed_node->mutex);
1252         if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1253                 mutex_unlock(&delayed_node->mutex);
1254                 btrfs_release_delayed_node(delayed_node);
1255                 return 0;
1256         }
1257         mutex_unlock(&delayed_node->mutex);
1258 
1259         trans = btrfs_join_transaction(delayed_node->root);
1260         if (IS_ERR(trans)) {
1261                 ret = PTR_ERR(trans);
1262                 goto out;
1263         }
1264 
1265         path = btrfs_alloc_path();
1266         if (!path) {
1267                 ret = -ENOMEM;
1268                 goto trans_out;
1269         }
1270         path->leave_spinning = 1;
1271 
1272         block_rsv = trans->block_rsv;
1273         trans->block_rsv = &fs_info->delayed_block_rsv;
1274 
1275         mutex_lock(&delayed_node->mutex);
1276         if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
1277                 ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1278                                                    path, delayed_node);
1279         else
1280                 ret = 0;
1281         mutex_unlock(&delayed_node->mutex);
1282 
1283         btrfs_free_path(path);
1284         trans->block_rsv = block_rsv;
1285 trans_out:
1286         btrfs_end_transaction(trans);
1287         btrfs_btree_balance_dirty(fs_info);
1288 out:
1289         btrfs_release_delayed_node(delayed_node);
1290 
1291         return ret;
1292 }
1293 
1294 void btrfs_remove_delayed_node(struct btrfs_inode *inode)
1295 {
1296         struct btrfs_delayed_node *delayed_node;
1297 
1298         delayed_node = READ_ONCE(inode->delayed_node);
1299         if (!delayed_node)
1300                 return;
1301 
1302         inode->delayed_node = NULL;
1303         btrfs_release_delayed_node(delayed_node);
1304 }
1305 
1306 struct btrfs_async_delayed_work {
1307         struct btrfs_delayed_root *delayed_root;
1308         int nr;
1309         struct btrfs_work work;
1310 };
1311 
1312 static void btrfs_async_run_delayed_root(struct btrfs_work *work)
1313 {
1314         struct btrfs_async_delayed_work *async_work;
1315         struct btrfs_delayed_root *delayed_root;
1316         struct btrfs_trans_handle *trans;
1317         struct btrfs_path *path;
1318         struct btrfs_delayed_node *delayed_node = NULL;
1319         struct btrfs_root *root;
1320         struct btrfs_block_rsv *block_rsv;
1321         int total_done = 0;
1322 
1323         async_work = container_of(work, struct btrfs_async_delayed_work, work);
1324         delayed_root = async_work->delayed_root;
1325 
1326         path = btrfs_alloc_path();
1327         if (!path)
1328                 goto out;
1329 
1330         do {
1331                 if (atomic_read(&delayed_root->items) <
1332                     BTRFS_DELAYED_BACKGROUND / 2)
1333                         break;
1334 
1335                 delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1336                 if (!delayed_node)
1337                         break;
1338 
1339                 path->leave_spinning = 1;
1340                 root = delayed_node->root;
1341 
1342                 trans = btrfs_join_transaction(root);
1343                 if (IS_ERR(trans)) {
1344                         btrfs_release_path(path);
1345                         btrfs_release_prepared_delayed_node(delayed_node);
1346                         total_done++;
1347                         continue;
1348                 }
1349 
1350                 block_rsv = trans->block_rsv;
1351                 trans->block_rsv = &root->fs_info->delayed_block_rsv;
1352 
1353                 __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1354 
1355                 trans->block_rsv = block_rsv;
1356                 btrfs_end_transaction(trans);
1357                 btrfs_btree_balance_dirty_nodelay(root->fs_info);
1358 
1359                 btrfs_release_path(path);
1360                 btrfs_release_prepared_delayed_node(delayed_node);
1361                 total_done++;
1362 
1363         } while ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK)
1364                  || total_done < async_work->nr);
1365 
1366         btrfs_free_path(path);
1367 out:
1368         wake_up(&delayed_root->wait);
1369         kfree(async_work);
1370 }
1371 
1372 
1373 static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1374                                      struct btrfs_fs_info *fs_info, int nr)
1375 {
1376         struct btrfs_async_delayed_work *async_work;
1377 
1378         async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1379         if (!async_work)
1380                 return -ENOMEM;
1381 
1382         async_work->delayed_root = delayed_root;
1383         btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root, NULL,
1384                         NULL);
1385         async_work->nr = nr;
1386 
1387         btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
1388         return 0;
1389 }
1390 
1391 void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info)
1392 {
1393         WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
1394 }
1395 
1396 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
1397 {
1398         int val = atomic_read(&delayed_root->items_seq);
1399 
1400         if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
1401                 return 1;
1402 
1403         if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1404                 return 1;
1405 
1406         return 0;
1407 }
1408 
1409 void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
1410 {
1411         struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
1412 
1413         if ((atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) ||
1414                 btrfs_workqueue_normal_congested(fs_info->delayed_workers))
1415                 return;
1416 
1417         if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1418                 int seq;
1419                 int ret;
1420 
1421                 seq = atomic_read(&delayed_root->items_seq);
1422 
1423                 ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
1424                 if (ret)
1425                         return;
1426 
1427                 wait_event_interruptible(delayed_root->wait,
1428                                          could_end_wait(delayed_root, seq));
1429                 return;
1430         }
1431 
1432         btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
1433 }
1434 
1435 /* Will return 0 or -ENOMEM */
1436 int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1437                                    const char *name, int name_len,
1438                                    struct btrfs_inode *dir,
1439                                    struct btrfs_disk_key *disk_key, u8 type,
1440                                    u64 index)
1441 {
1442         struct btrfs_delayed_node *delayed_node;
1443         struct btrfs_delayed_item *delayed_item;
1444         struct btrfs_dir_item *dir_item;
1445         int ret;
1446 
1447         delayed_node = btrfs_get_or_create_delayed_node(dir);
1448         if (IS_ERR(delayed_node))
1449                 return PTR_ERR(delayed_node);
1450 
1451         delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
1452         if (!delayed_item) {
1453                 ret = -ENOMEM;
1454                 goto release_node;
1455         }
1456 
1457         delayed_item->key.objectid = btrfs_ino(dir);
1458         delayed_item->key.type = BTRFS_DIR_INDEX_KEY;
1459         delayed_item->key.offset = index;
1460 
1461         dir_item = (struct btrfs_dir_item *)delayed_item->data;
1462         dir_item->location = *disk_key;
1463         btrfs_set_stack_dir_transid(dir_item, trans->transid);
1464         btrfs_set_stack_dir_data_len(dir_item, 0);
1465         btrfs_set_stack_dir_name_len(dir_item, name_len);
1466         btrfs_set_stack_dir_type(dir_item, type);
1467         memcpy((char *)(dir_item + 1), name, name_len);
1468 
1469         ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, delayed_item);
1470         /*
1471          * we have reserved enough space when we start a new transaction,
1472          * so reserving metadata failure is impossible
1473          */
1474         BUG_ON(ret);
1475 
1476         mutex_lock(&delayed_node->mutex);
1477         ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
1478         if (unlikely(ret)) {
1479                 btrfs_err(trans->fs_info,
1480                           "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1481                           name_len, name, delayed_node->root->root_key.objectid,
1482                           delayed_node->inode_id, ret);
1483                 BUG();
1484         }
1485         mutex_unlock(&delayed_node->mutex);
1486 
1487 release_node:
1488         btrfs_release_delayed_node(delayed_node);
1489         return ret;
1490 }
1491 
1492 static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
1493                                                struct btrfs_delayed_node *node,
1494                                                struct btrfs_key *key)
1495 {
1496         struct btrfs_delayed_item *item;
1497 
1498         mutex_lock(&node->mutex);
1499         item = __btrfs_lookup_delayed_insertion_item(node, key);
1500         if (!item) {
1501                 mutex_unlock(&node->mutex);
1502                 return 1;
1503         }
1504 
1505         btrfs_delayed_item_release_metadata(node->root, item);
1506         btrfs_release_delayed_item(item);
1507         mutex_unlock(&node->mutex);
1508         return 0;
1509 }
1510 
1511 int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1512                                    struct btrfs_inode *dir, u64 index)
1513 {
1514         struct btrfs_delayed_node *node;
1515         struct btrfs_delayed_item *item;
1516         struct btrfs_key item_key;
1517         int ret;
1518 
1519         node = btrfs_get_or_create_delayed_node(dir);
1520         if (IS_ERR(node))
1521                 return PTR_ERR(node);
1522 
1523         item_key.objectid = btrfs_ino(dir);
1524         item_key.type = BTRFS_DIR_INDEX_KEY;
1525         item_key.offset = index;
1526 
1527         ret = btrfs_delete_delayed_insertion_item(trans->fs_info, node,
1528                                                   &item_key);
1529         if (!ret)
1530                 goto end;
1531 
1532         item = btrfs_alloc_delayed_item(0);
1533         if (!item) {
1534                 ret = -ENOMEM;
1535                 goto end;
1536         }
1537 
1538         item->key = item_key;
1539 
1540         ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, item);
1541         /*
1542          * we have reserved enough space when we start a new transaction,
1543          * so reserving metadata failure is impossible.
1544          */
1545         if (ret < 0) {
1546                 btrfs_err(trans->fs_info,
1547 "metadata reservation failed for delayed dir item deltiona, should have been reserved");
1548                 btrfs_release_delayed_item(item);
1549                 goto end;
1550         }
1551 
1552         mutex_lock(&node->mutex);
1553         ret = __btrfs_add_delayed_deletion_item(node, item);
1554         if (unlikely(ret)) {
1555                 btrfs_err(trans->fs_info,
1556                           "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1557                           index, node->root->root_key.objectid,
1558                           node->inode_id, ret);
1559                 btrfs_delayed_item_release_metadata(dir->root, item);
1560                 btrfs_release_delayed_item(item);
1561         }
1562         mutex_unlock(&node->mutex);
1563 end:
1564         btrfs_release_delayed_node(node);
1565         return ret;
1566 }
1567 
1568 int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode)
1569 {
1570         struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1571 
1572         if (!delayed_node)
1573                 return -ENOENT;
1574 
1575         /*
1576          * Since we have held i_mutex of this directory, it is impossible that
1577          * a new directory index is added into the delayed node and index_cnt
1578          * is updated now. So we needn't lock the delayed node.
1579          */
1580         if (!delayed_node->index_cnt) {
1581                 btrfs_release_delayed_node(delayed_node);
1582                 return -EINVAL;
1583         }
1584 
1585         inode->index_cnt = delayed_node->index_cnt;
1586         btrfs_release_delayed_node(delayed_node);
1587         return 0;
1588 }
1589 
1590 bool btrfs_readdir_get_delayed_items(struct inode *inode,
1591                                      struct list_head *ins_list,
1592                                      struct list_head *del_list)
1593 {
1594         struct btrfs_delayed_node *delayed_node;
1595         struct btrfs_delayed_item *item;
1596 
1597         delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1598         if (!delayed_node)
1599                 return false;
1600 
1601         /*
1602          * We can only do one readdir with delayed items at a time because of
1603          * item->readdir_list.
1604          */
1605         inode_unlock_shared(inode);
1606         inode_lock(inode);
1607 
1608         mutex_lock(&delayed_node->mutex);
1609         item = __btrfs_first_delayed_insertion_item(delayed_node);
1610         while (item) {
1611                 refcount_inc(&item->refs);
1612                 list_add_tail(&item->readdir_list, ins_list);
1613                 item = __btrfs_next_delayed_item(item);
1614         }
1615 
1616         item = __btrfs_first_delayed_deletion_item(delayed_node);
1617         while (item) {
1618                 refcount_inc(&item->refs);
1619                 list_add_tail(&item->readdir_list, del_list);
1620                 item = __btrfs_next_delayed_item(item);
1621         }
1622         mutex_unlock(&delayed_node->mutex);
1623         /*
1624          * This delayed node is still cached in the btrfs inode, so refs
1625          * must be > 1 now, and we needn't check it is going to be freed
1626          * or not.
1627          *
1628          * Besides that, this function is used to read dir, we do not
1629          * insert/delete delayed items in this period. So we also needn't
1630          * requeue or dequeue this delayed node.
1631          */
1632         refcount_dec(&delayed_node->refs);
1633 
1634         return true;
1635 }
1636 
1637 void btrfs_readdir_put_delayed_items(struct inode *inode,
1638                                      struct list_head *ins_list,
1639                                      struct list_head *del_list)
1640 {
1641         struct btrfs_delayed_item *curr, *next;
1642 
1643         list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1644                 list_del(&curr->readdir_list);
1645                 if (refcount_dec_and_test(&curr->refs))
1646                         kfree(curr);
1647         }
1648 
1649         list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1650                 list_del(&curr->readdir_list);
1651                 if (refcount_dec_and_test(&curr->refs))
1652                         kfree(curr);
1653         }
1654 
1655         /*
1656          * The VFS is going to do up_read(), so we need to downgrade back to a
1657          * read lock.
1658          */
1659         downgrade_write(&inode->i_rwsem);
1660 }
1661 
1662 int btrfs_should_delete_dir_index(struct list_head *del_list,
1663                                   u64 index)
1664 {
1665         struct btrfs_delayed_item *curr;
1666         int ret = 0;
1667 
1668         list_for_each_entry(curr, del_list, readdir_list) {
1669                 if (curr->key.offset > index)
1670                         break;
1671                 if (curr->key.offset == index) {
1672                         ret = 1;
1673                         break;
1674                 }
1675         }
1676         return ret;
1677 }
1678 
1679 /*
1680  * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1681  *
1682  */
1683 int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
1684                                     struct list_head *ins_list)
1685 {
1686         struct btrfs_dir_item *di;
1687         struct btrfs_delayed_item *curr, *next;
1688         struct btrfs_key location;
1689         char *name;
1690         int name_len;
1691         int over = 0;
1692         unsigned char d_type;
1693 
1694         if (list_empty(ins_list))
1695                 return 0;
1696 
1697         /*
1698          * Changing the data of the delayed item is impossible. So
1699          * we needn't lock them. And we have held i_mutex of the
1700          * directory, nobody can delete any directory indexes now.
1701          */
1702         list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1703                 list_del(&curr->readdir_list);
1704 
1705                 if (curr->key.offset < ctx->pos) {
1706                         if (refcount_dec_and_test(&curr->refs))
1707                                 kfree(curr);
1708                         continue;
1709                 }
1710 
1711                 ctx->pos = curr->key.offset;
1712 
1713                 di = (struct btrfs_dir_item *)curr->data;
1714                 name = (char *)(di + 1);
1715                 name_len = btrfs_stack_dir_name_len(di);
1716 
1717                 d_type = fs_ftype_to_dtype(di->type);
1718                 btrfs_disk_key_to_cpu(&location, &di->location);
1719 
1720                 over = !dir_emit(ctx, name, name_len,
1721                                location.objectid, d_type);
1722 
1723                 if (refcount_dec_and_test(&curr->refs))
1724                         kfree(curr);
1725 
1726                 if (over)
1727                         return 1;
1728                 ctx->pos++;
1729         }
1730         return 0;
1731 }
1732 
1733 static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1734                                   struct btrfs_inode_item *inode_item,
1735                                   struct inode *inode)
1736 {
1737         btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
1738         btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
1739         btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1740         btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1741         btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1742         btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1743         btrfs_set_stack_inode_generation(inode_item,
1744                                          BTRFS_I(inode)->generation);
1745         btrfs_set_stack_inode_sequence(inode_item,
1746                                        inode_peek_iversion(inode));
1747         btrfs_set_stack_inode_transid(inode_item, trans->transid);
1748         btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1749         btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
1750         btrfs_set_stack_inode_block_group(inode_item, 0);
1751 
1752         btrfs_set_stack_timespec_sec(&inode_item->atime,
1753                                      inode->i_atime.tv_sec);
1754         btrfs_set_stack_timespec_nsec(&inode_item->atime,
1755                                       inode->i_atime.tv_nsec);
1756 
1757         btrfs_set_stack_timespec_sec(&inode_item->mtime,
1758                                      inode->i_mtime.tv_sec);
1759         btrfs_set_stack_timespec_nsec(&inode_item->mtime,
1760                                       inode->i_mtime.tv_nsec);
1761 
1762         btrfs_set_stack_timespec_sec(&inode_item->ctime,
1763                                      inode->i_ctime.tv_sec);
1764         btrfs_set_stack_timespec_nsec(&inode_item->ctime,
1765                                       inode->i_ctime.tv_nsec);
1766 
1767         btrfs_set_stack_timespec_sec(&inode_item->otime,
1768                                      BTRFS_I(inode)->i_otime.tv_sec);
1769         btrfs_set_stack_timespec_nsec(&inode_item->otime,
1770                                      BTRFS_I(inode)->i_otime.tv_nsec);
1771 }
1772 
1773 int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1774 {
1775         struct btrfs_delayed_node *delayed_node;
1776         struct btrfs_inode_item *inode_item;
1777 
1778         delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1779         if (!delayed_node)
1780                 return -ENOENT;
1781 
1782         mutex_lock(&delayed_node->mutex);
1783         if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1784                 mutex_unlock(&delayed_node->mutex);
1785                 btrfs_release_delayed_node(delayed_node);
1786                 return -ENOENT;
1787         }
1788 
1789         inode_item = &delayed_node->inode_item;
1790 
1791         i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
1792         i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
1793         btrfs_i_size_write(BTRFS_I(inode), btrfs_stack_inode_size(inode_item));
1794         inode->i_mode = btrfs_stack_inode_mode(inode_item);
1795         set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
1796         inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1797         BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
1798         BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
1799 
1800         inode_set_iversion_queried(inode,
1801                                    btrfs_stack_inode_sequence(inode_item));
1802         inode->i_rdev = 0;
1803         *rdev = btrfs_stack_inode_rdev(inode_item);
1804         BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
1805 
1806         inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime);
1807         inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime);
1808 
1809         inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime);
1810         inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime);
1811 
1812         inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime);
1813         inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime);
1814 
1815         BTRFS_I(inode)->i_otime.tv_sec =
1816                 btrfs_stack_timespec_sec(&inode_item->otime);
1817         BTRFS_I(inode)->i_otime.tv_nsec =
1818                 btrfs_stack_timespec_nsec(&inode_item->otime);
1819 
1820         inode->i_generation = BTRFS_I(inode)->generation;
1821         BTRFS_I(inode)->index_cnt = (u64)-1;
1822 
1823         mutex_unlock(&delayed_node->mutex);
1824         btrfs_release_delayed_node(delayed_node);
1825         return 0;
1826 }
1827 
1828 int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1829                                struct btrfs_root *root, struct inode *inode)
1830 {
1831         struct btrfs_delayed_node *delayed_node;
1832         int ret = 0;
1833 
1834         delayed_node = btrfs_get_or_create_delayed_node(BTRFS_I(inode));
1835         if (IS_ERR(delayed_node))
1836                 return PTR_ERR(delayed_node);
1837 
1838         mutex_lock(&delayed_node->mutex);
1839         if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1840                 fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1841                 goto release_node;
1842         }
1843 
1844         ret = btrfs_delayed_inode_reserve_metadata(trans, root, BTRFS_I(inode),
1845                                                    delayed_node);
1846         if (ret)
1847                 goto release_node;
1848 
1849         fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1850         set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1851         delayed_node->count++;
1852         atomic_inc(&root->fs_info->delayed_root->items);
1853 release_node:
1854         mutex_unlock(&delayed_node->mutex);
1855         btrfs_release_delayed_node(delayed_node);
1856         return ret;
1857 }
1858 
1859 int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode)
1860 {
1861         struct btrfs_fs_info *fs_info = inode->root->fs_info;
1862         struct btrfs_delayed_node *delayed_node;
1863 
1864         /*
1865          * we don't do delayed inode updates during log recovery because it
1866          * leads to enospc problems.  This means we also can't do
1867          * delayed inode refs
1868          */
1869         if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
1870                 return -EAGAIN;
1871 
1872         delayed_node = btrfs_get_or_create_delayed_node(inode);
1873         if (IS_ERR(delayed_node))
1874                 return PTR_ERR(delayed_node);
1875 
1876         /*
1877          * We don't reserve space for inode ref deletion is because:
1878          * - We ONLY do async inode ref deletion for the inode who has only
1879          *   one link(i_nlink == 1), it means there is only one inode ref.
1880          *   And in most case, the inode ref and the inode item are in the
1881          *   same leaf, and we will deal with them at the same time.
1882          *   Since we are sure we will reserve the space for the inode item,
1883          *   it is unnecessary to reserve space for inode ref deletion.
1884          * - If the inode ref and the inode item are not in the same leaf,
1885          *   We also needn't worry about enospc problem, because we reserve
1886          *   much more space for the inode update than it needs.
1887          * - At the worst, we can steal some space from the global reservation.
1888          *   It is very rare.
1889          */
1890         mutex_lock(&delayed_node->mutex);
1891         if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1892                 goto release_node;
1893 
1894         set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1895         delayed_node->count++;
1896         atomic_inc(&fs_info->delayed_root->items);
1897 release_node:
1898         mutex_unlock(&delayed_node->mutex);
1899         btrfs_release_delayed_node(delayed_node);
1900         return 0;
1901 }
1902 
1903 static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1904 {
1905         struct btrfs_root *root = delayed_node->root;
1906         struct btrfs_fs_info *fs_info = root->fs_info;
1907         struct btrfs_delayed_item *curr_item, *prev_item;
1908 
1909         mutex_lock(&delayed_node->mutex);
1910         curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
1911         while (curr_item) {
1912                 btrfs_delayed_item_release_metadata(root, curr_item);
1913                 prev_item = curr_item;
1914                 curr_item = __btrfs_next_delayed_item(prev_item);
1915                 btrfs_release_delayed_item(prev_item);
1916         }
1917 
1918         curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
1919         while (curr_item) {
1920                 btrfs_delayed_item_release_metadata(root, curr_item);
1921                 prev_item = curr_item;
1922                 curr_item = __btrfs_next_delayed_item(prev_item);
1923                 btrfs_release_delayed_item(prev_item);
1924         }
1925 
1926         if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1927                 btrfs_release_delayed_iref(delayed_node);
1928 
1929         if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1930                 btrfs_delayed_inode_release_metadata(fs_info, delayed_node, false);
1931                 btrfs_release_delayed_inode(delayed_node);
1932         }
1933         mutex_unlock(&delayed_node->mutex);
1934 }
1935 
1936 void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode)
1937 {
1938         struct btrfs_delayed_node *delayed_node;
1939 
1940         delayed_node = btrfs_get_delayed_node(inode);
1941         if (!delayed_node)
1942                 return;
1943 
1944         __btrfs_kill_delayed_node(delayed_node);
1945         btrfs_release_delayed_node(delayed_node);
1946 }
1947 
1948 void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
1949 {
1950         u64 inode_id = 0;
1951         struct btrfs_delayed_node *delayed_nodes[8];
1952         int i, n;
1953 
1954         while (1) {
1955                 spin_lock(&root->inode_lock);
1956                 n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
1957                                            (void **)delayed_nodes, inode_id,
1958                                            ARRAY_SIZE(delayed_nodes));
1959                 if (!n) {
1960                         spin_unlock(&root->inode_lock);
1961                         break;
1962                 }
1963 
1964                 inode_id = delayed_nodes[n - 1]->inode_id + 1;
1965                 for (i = 0; i < n; i++) {
1966                         /*
1967                          * Don't increase refs in case the node is dead and
1968                          * about to be removed from the tree in the loop below
1969                          */
1970                         if (!refcount_inc_not_zero(&delayed_nodes[i]->refs))
1971                                 delayed_nodes[i] = NULL;
1972                 }
1973                 spin_unlock(&root->inode_lock);
1974 
1975                 for (i = 0; i < n; i++) {
1976                         if (!delayed_nodes[i])
1977                                 continue;
1978                         __btrfs_kill_delayed_node(delayed_nodes[i]);
1979                         btrfs_release_delayed_node(delayed_nodes[i]);
1980                 }
1981         }
1982 }
1983 
1984 void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info)
1985 {
1986         struct btrfs_delayed_node *curr_node, *prev_node;
1987 
1988         curr_node = btrfs_first_delayed_node(fs_info->delayed_root);
1989         while (curr_node) {
1990                 __btrfs_kill_delayed_node(curr_node);
1991 
1992                 prev_node = curr_node;
1993                 curr_node = btrfs_next_delayed_node(curr_node);
1994                 btrfs_release_delayed_node(prev_node);
1995         }
1996 }
1997 

/* [<][>][^][v][top][bottom][index][help] */