root/fs/btrfs/block-group.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. get_restripe_target
  2. btrfs_reduce_alloc_profile
  3. get_alloc_profile
  4. btrfs_get_alloc_profile
  5. btrfs_get_block_group
  6. btrfs_put_block_group
  7. btrfs_add_block_group_cache
  8. block_group_cache_tree_search
  9. btrfs_lookup_first_block_group
  10. btrfs_lookup_block_group
  11. btrfs_next_block_group
  12. btrfs_inc_nocow_writers
  13. btrfs_dec_nocow_writers
  14. btrfs_wait_nocow_writers
  15. btrfs_dec_block_group_reservations
  16. btrfs_wait_block_group_reservations
  17. btrfs_get_caching_control
  18. btrfs_put_caching_control
  19. btrfs_wait_block_group_cache_progress
  20. btrfs_wait_block_group_cache_done
  21. fragment_free_space
  22. add_new_free_space
  23. load_extent_tree_free
  24. caching_thread
  25. btrfs_cache_block_group
  26. clear_avail_alloc_bits
  27. clear_incompat_bg_bits
  28. btrfs_remove_block_group
  29. btrfs_start_trans_remove_block_group
  30. inc_block_group_ro
  31. btrfs_delete_unused_bgs
  32. btrfs_mark_bg_unused
  33. find_first_block_group
  34. set_avail_alloc_bits
  35. exclude_super_stripes
  36. link_block_group
  37. btrfs_create_block_group_cache
  38. check_chunk_block_group_mappings
  39. btrfs_read_block_groups
  40. btrfs_create_pending_block_groups
  41. btrfs_make_block_group
  42. update_block_group_flags
  43. btrfs_inc_block_group_ro
  44. btrfs_dec_block_group_ro
  45. write_one_cache_group
  46. cache_save_setup
  47. btrfs_setup_space_cache
  48. btrfs_start_dirty_block_groups
  49. btrfs_write_dirty_block_groups
  50. btrfs_update_block_group
  51. btrfs_add_reserved_bytes
  52. btrfs_free_reserved_bytes
  53. force_metadata_allocation
  54. should_alloc_chunk
  55. btrfs_force_chunk_alloc
  56. btrfs_chunk_alloc
  57. get_profile_num_devs
  58. check_system_chunk
  59. btrfs_put_block_group_cache
  60. btrfs_free_block_groups

   1 // SPDX-License-Identifier: GPL-2.0
   2 
   3 #include "misc.h"
   4 #include "ctree.h"
   5 #include "block-group.h"
   6 #include "space-info.h"
   7 #include "disk-io.h"
   8 #include "free-space-cache.h"
   9 #include "free-space-tree.h"
  10 #include "disk-io.h"
  11 #include "volumes.h"
  12 #include "transaction.h"
  13 #include "ref-verify.h"
  14 #include "sysfs.h"
  15 #include "tree-log.h"
  16 #include "delalloc-space.h"
  17 
  18 /*
  19  * Return target flags in extended format or 0 if restripe for this chunk_type
  20  * is not in progress
  21  *
  22  * Should be called with balance_lock held
  23  */
  24 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
  25 {
  26         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
  27         u64 target = 0;
  28 
  29         if (!bctl)
  30                 return 0;
  31 
  32         if (flags & BTRFS_BLOCK_GROUP_DATA &&
  33             bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
  34                 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
  35         } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
  36                    bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
  37                 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
  38         } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
  39                    bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
  40                 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
  41         }
  42 
  43         return target;
  44 }
  45 
  46 /*
  47  * @flags: available profiles in extended format (see ctree.h)
  48  *
  49  * Return reduced profile in chunk format.  If profile changing is in progress
  50  * (either running or paused) picks the target profile (if it's already
  51  * available), otherwise falls back to plain reducing.
  52  */
  53 static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags)
  54 {
  55         u64 num_devices = fs_info->fs_devices->rw_devices;
  56         u64 target;
  57         u64 raid_type;
  58         u64 allowed = 0;
  59 
  60         /*
  61          * See if restripe for this chunk_type is in progress, if so try to
  62          * reduce to the target profile
  63          */
  64         spin_lock(&fs_info->balance_lock);
  65         target = get_restripe_target(fs_info, flags);
  66         if (target) {
  67                 /* Pick target profile only if it's already available */
  68                 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
  69                         spin_unlock(&fs_info->balance_lock);
  70                         return extended_to_chunk(target);
  71                 }
  72         }
  73         spin_unlock(&fs_info->balance_lock);
  74 
  75         /* First, mask out the RAID levels which aren't possible */
  76         for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
  77                 if (num_devices >= btrfs_raid_array[raid_type].devs_min)
  78                         allowed |= btrfs_raid_array[raid_type].bg_flag;
  79         }
  80         allowed &= flags;
  81 
  82         if (allowed & BTRFS_BLOCK_GROUP_RAID6)
  83                 allowed = BTRFS_BLOCK_GROUP_RAID6;
  84         else if (allowed & BTRFS_BLOCK_GROUP_RAID5)
  85                 allowed = BTRFS_BLOCK_GROUP_RAID5;
  86         else if (allowed & BTRFS_BLOCK_GROUP_RAID10)
  87                 allowed = BTRFS_BLOCK_GROUP_RAID10;
  88         else if (allowed & BTRFS_BLOCK_GROUP_RAID1)
  89                 allowed = BTRFS_BLOCK_GROUP_RAID1;
  90         else if (allowed & BTRFS_BLOCK_GROUP_RAID0)
  91                 allowed = BTRFS_BLOCK_GROUP_RAID0;
  92 
  93         flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK;
  94 
  95         return extended_to_chunk(flags | allowed);
  96 }
  97 
  98 static u64 get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags)
  99 {
 100         unsigned seq;
 101         u64 flags;
 102 
 103         do {
 104                 flags = orig_flags;
 105                 seq = read_seqbegin(&fs_info->profiles_lock);
 106 
 107                 if (flags & BTRFS_BLOCK_GROUP_DATA)
 108                         flags |= fs_info->avail_data_alloc_bits;
 109                 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
 110                         flags |= fs_info->avail_system_alloc_bits;
 111                 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
 112                         flags |= fs_info->avail_metadata_alloc_bits;
 113         } while (read_seqretry(&fs_info->profiles_lock, seq));
 114 
 115         return btrfs_reduce_alloc_profile(fs_info, flags);
 116 }
 117 
 118 u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags)
 119 {
 120         return get_alloc_profile(fs_info, orig_flags);
 121 }
 122 
 123 void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
 124 {
 125         atomic_inc(&cache->count);
 126 }
 127 
 128 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
 129 {
 130         if (atomic_dec_and_test(&cache->count)) {
 131                 WARN_ON(cache->pinned > 0);
 132                 WARN_ON(cache->reserved > 0);
 133 
 134                 /*
 135                  * If not empty, someone is still holding mutex of
 136                  * full_stripe_lock, which can only be released by caller.
 137                  * And it will definitely cause use-after-free when caller
 138                  * tries to release full stripe lock.
 139                  *
 140                  * No better way to resolve, but only to warn.
 141                  */
 142                 WARN_ON(!RB_EMPTY_ROOT(&cache->full_stripe_locks_root.root));
 143                 kfree(cache->free_space_ctl);
 144                 kfree(cache);
 145         }
 146 }
 147 
 148 /*
 149  * This adds the block group to the fs_info rb tree for the block group cache
 150  */
 151 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
 152                                 struct btrfs_block_group_cache *block_group)
 153 {
 154         struct rb_node **p;
 155         struct rb_node *parent = NULL;
 156         struct btrfs_block_group_cache *cache;
 157 
 158         spin_lock(&info->block_group_cache_lock);
 159         p = &info->block_group_cache_tree.rb_node;
 160 
 161         while (*p) {
 162                 parent = *p;
 163                 cache = rb_entry(parent, struct btrfs_block_group_cache,
 164                                  cache_node);
 165                 if (block_group->key.objectid < cache->key.objectid) {
 166                         p = &(*p)->rb_left;
 167                 } else if (block_group->key.objectid > cache->key.objectid) {
 168                         p = &(*p)->rb_right;
 169                 } else {
 170                         spin_unlock(&info->block_group_cache_lock);
 171                         return -EEXIST;
 172                 }
 173         }
 174 
 175         rb_link_node(&block_group->cache_node, parent, p);
 176         rb_insert_color(&block_group->cache_node,
 177                         &info->block_group_cache_tree);
 178 
 179         if (info->first_logical_byte > block_group->key.objectid)
 180                 info->first_logical_byte = block_group->key.objectid;
 181 
 182         spin_unlock(&info->block_group_cache_lock);
 183 
 184         return 0;
 185 }
 186 
 187 /*
 188  * This will return the block group at or after bytenr if contains is 0, else
 189  * it will return the block group that contains the bytenr
 190  */
 191 static struct btrfs_block_group_cache *block_group_cache_tree_search(
 192                 struct btrfs_fs_info *info, u64 bytenr, int contains)
 193 {
 194         struct btrfs_block_group_cache *cache, *ret = NULL;
 195         struct rb_node *n;
 196         u64 end, start;
 197 
 198         spin_lock(&info->block_group_cache_lock);
 199         n = info->block_group_cache_tree.rb_node;
 200 
 201         while (n) {
 202                 cache = rb_entry(n, struct btrfs_block_group_cache,
 203                                  cache_node);
 204                 end = cache->key.objectid + cache->key.offset - 1;
 205                 start = cache->key.objectid;
 206 
 207                 if (bytenr < start) {
 208                         if (!contains && (!ret || start < ret->key.objectid))
 209                                 ret = cache;
 210                         n = n->rb_left;
 211                 } else if (bytenr > start) {
 212                         if (contains && bytenr <= end) {
 213                                 ret = cache;
 214                                 break;
 215                         }
 216                         n = n->rb_right;
 217                 } else {
 218                         ret = cache;
 219                         break;
 220                 }
 221         }
 222         if (ret) {
 223                 btrfs_get_block_group(ret);
 224                 if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
 225                         info->first_logical_byte = ret->key.objectid;
 226         }
 227         spin_unlock(&info->block_group_cache_lock);
 228 
 229         return ret;
 230 }
 231 
 232 /*
 233  * Return the block group that starts at or after bytenr
 234  */
 235 struct btrfs_block_group_cache *btrfs_lookup_first_block_group(
 236                 struct btrfs_fs_info *info, u64 bytenr)
 237 {
 238         return block_group_cache_tree_search(info, bytenr, 0);
 239 }
 240 
 241 /*
 242  * Return the block group that contains the given bytenr
 243  */
 244 struct btrfs_block_group_cache *btrfs_lookup_block_group(
 245                 struct btrfs_fs_info *info, u64 bytenr)
 246 {
 247         return block_group_cache_tree_search(info, bytenr, 1);
 248 }
 249 
 250 struct btrfs_block_group_cache *btrfs_next_block_group(
 251                 struct btrfs_block_group_cache *cache)
 252 {
 253         struct btrfs_fs_info *fs_info = cache->fs_info;
 254         struct rb_node *node;
 255 
 256         spin_lock(&fs_info->block_group_cache_lock);
 257 
 258         /* If our block group was removed, we need a full search. */
 259         if (RB_EMPTY_NODE(&cache->cache_node)) {
 260                 const u64 next_bytenr = cache->key.objectid + cache->key.offset;
 261 
 262                 spin_unlock(&fs_info->block_group_cache_lock);
 263                 btrfs_put_block_group(cache);
 264                 cache = btrfs_lookup_first_block_group(fs_info, next_bytenr); return cache;
 265         }
 266         node = rb_next(&cache->cache_node);
 267         btrfs_put_block_group(cache);
 268         if (node) {
 269                 cache = rb_entry(node, struct btrfs_block_group_cache,
 270                                  cache_node);
 271                 btrfs_get_block_group(cache);
 272         } else
 273                 cache = NULL;
 274         spin_unlock(&fs_info->block_group_cache_lock);
 275         return cache;
 276 }
 277 
 278 bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
 279 {
 280         struct btrfs_block_group_cache *bg;
 281         bool ret = true;
 282 
 283         bg = btrfs_lookup_block_group(fs_info, bytenr);
 284         if (!bg)
 285                 return false;
 286 
 287         spin_lock(&bg->lock);
 288         if (bg->ro)
 289                 ret = false;
 290         else
 291                 atomic_inc(&bg->nocow_writers);
 292         spin_unlock(&bg->lock);
 293 
 294         /* No put on block group, done by btrfs_dec_nocow_writers */
 295         if (!ret)
 296                 btrfs_put_block_group(bg);
 297 
 298         return ret;
 299 }
 300 
 301 void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
 302 {
 303         struct btrfs_block_group_cache *bg;
 304 
 305         bg = btrfs_lookup_block_group(fs_info, bytenr);
 306         ASSERT(bg);
 307         if (atomic_dec_and_test(&bg->nocow_writers))
 308                 wake_up_var(&bg->nocow_writers);
 309         /*
 310          * Once for our lookup and once for the lookup done by a previous call
 311          * to btrfs_inc_nocow_writers()
 312          */
 313         btrfs_put_block_group(bg);
 314         btrfs_put_block_group(bg);
 315 }
 316 
 317 void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg)
 318 {
 319         wait_var_event(&bg->nocow_writers, !atomic_read(&bg->nocow_writers));
 320 }
 321 
 322 void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
 323                                         const u64 start)
 324 {
 325         struct btrfs_block_group_cache *bg;
 326 
 327         bg = btrfs_lookup_block_group(fs_info, start);
 328         ASSERT(bg);
 329         if (atomic_dec_and_test(&bg->reservations))
 330                 wake_up_var(&bg->reservations);
 331         btrfs_put_block_group(bg);
 332 }
 333 
 334 void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg)
 335 {
 336         struct btrfs_space_info *space_info = bg->space_info;
 337 
 338         ASSERT(bg->ro);
 339 
 340         if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA))
 341                 return;
 342 
 343         /*
 344          * Our block group is read only but before we set it to read only,
 345          * some task might have had allocated an extent from it already, but it
 346          * has not yet created a respective ordered extent (and added it to a
 347          * root's list of ordered extents).
 348          * Therefore wait for any task currently allocating extents, since the
 349          * block group's reservations counter is incremented while a read lock
 350          * on the groups' semaphore is held and decremented after releasing
 351          * the read access on that semaphore and creating the ordered extent.
 352          */
 353         down_write(&space_info->groups_sem);
 354         up_write(&space_info->groups_sem);
 355 
 356         wait_var_event(&bg->reservations, !atomic_read(&bg->reservations));
 357 }
 358 
 359 struct btrfs_caching_control *btrfs_get_caching_control(
 360                 struct btrfs_block_group_cache *cache)
 361 {
 362         struct btrfs_caching_control *ctl;
 363 
 364         spin_lock(&cache->lock);
 365         if (!cache->caching_ctl) {
 366                 spin_unlock(&cache->lock);
 367                 return NULL;
 368         }
 369 
 370         ctl = cache->caching_ctl;
 371         refcount_inc(&ctl->count);
 372         spin_unlock(&cache->lock);
 373         return ctl;
 374 }
 375 
 376 void btrfs_put_caching_control(struct btrfs_caching_control *ctl)
 377 {
 378         if (refcount_dec_and_test(&ctl->count))
 379                 kfree(ctl);
 380 }
 381 
 382 /*
 383  * When we wait for progress in the block group caching, its because our
 384  * allocation attempt failed at least once.  So, we must sleep and let some
 385  * progress happen before we try again.
 386  *
 387  * This function will sleep at least once waiting for new free space to show
 388  * up, and then it will check the block group free space numbers for our min
 389  * num_bytes.  Another option is to have it go ahead and look in the rbtree for
 390  * a free extent of a given size, but this is a good start.
 391  *
 392  * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
 393  * any of the information in this block group.
 394  */
 395 void btrfs_wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
 396                                            u64 num_bytes)
 397 {
 398         struct btrfs_caching_control *caching_ctl;
 399 
 400         caching_ctl = btrfs_get_caching_control(cache);
 401         if (!caching_ctl)
 402                 return;
 403 
 404         wait_event(caching_ctl->wait, btrfs_block_group_cache_done(cache) ||
 405                    (cache->free_space_ctl->free_space >= num_bytes));
 406 
 407         btrfs_put_caching_control(caching_ctl);
 408 }
 409 
 410 int btrfs_wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
 411 {
 412         struct btrfs_caching_control *caching_ctl;
 413         int ret = 0;
 414 
 415         caching_ctl = btrfs_get_caching_control(cache);
 416         if (!caching_ctl)
 417                 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
 418 
 419         wait_event(caching_ctl->wait, btrfs_block_group_cache_done(cache));
 420         if (cache->cached == BTRFS_CACHE_ERROR)
 421                 ret = -EIO;
 422         btrfs_put_caching_control(caching_ctl);
 423         return ret;
 424 }
 425 
 426 #ifdef CONFIG_BTRFS_DEBUG
 427 static void fragment_free_space(struct btrfs_block_group_cache *block_group)
 428 {
 429         struct btrfs_fs_info *fs_info = block_group->fs_info;
 430         u64 start = block_group->key.objectid;
 431         u64 len = block_group->key.offset;
 432         u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ?
 433                 fs_info->nodesize : fs_info->sectorsize;
 434         u64 step = chunk << 1;
 435 
 436         while (len > chunk) {
 437                 btrfs_remove_free_space(block_group, start, chunk);
 438                 start += step;
 439                 if (len < step)
 440                         len = 0;
 441                 else
 442                         len -= step;
 443         }
 444 }
 445 #endif
 446 
 447 /*
 448  * This is only called by btrfs_cache_block_group, since we could have freed
 449  * extents we need to check the pinned_extents for any extents that can't be
 450  * used yet since their free space will be released as soon as the transaction
 451  * commits.
 452  */
 453 u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
 454                        u64 start, u64 end)
 455 {
 456         struct btrfs_fs_info *info = block_group->fs_info;
 457         u64 extent_start, extent_end, size, total_added = 0;
 458         int ret;
 459 
 460         while (start < end) {
 461                 ret = find_first_extent_bit(info->pinned_extents, start,
 462                                             &extent_start, &extent_end,
 463                                             EXTENT_DIRTY | EXTENT_UPTODATE,
 464                                             NULL);
 465                 if (ret)
 466                         break;
 467 
 468                 if (extent_start <= start) {
 469                         start = extent_end + 1;
 470                 } else if (extent_start > start && extent_start < end) {
 471                         size = extent_start - start;
 472                         total_added += size;
 473                         ret = btrfs_add_free_space(block_group, start,
 474                                                    size);
 475                         BUG_ON(ret); /* -ENOMEM or logic error */
 476                         start = extent_end + 1;
 477                 } else {
 478                         break;
 479                 }
 480         }
 481 
 482         if (start < end) {
 483                 size = end - start;
 484                 total_added += size;
 485                 ret = btrfs_add_free_space(block_group, start, size);
 486                 BUG_ON(ret); /* -ENOMEM or logic error */
 487         }
 488 
 489         return total_added;
 490 }
 491 
 492 static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
 493 {
 494         struct btrfs_block_group_cache *block_group = caching_ctl->block_group;
 495         struct btrfs_fs_info *fs_info = block_group->fs_info;
 496         struct btrfs_root *extent_root = fs_info->extent_root;
 497         struct btrfs_path *path;
 498         struct extent_buffer *leaf;
 499         struct btrfs_key key;
 500         u64 total_found = 0;
 501         u64 last = 0;
 502         u32 nritems;
 503         int ret;
 504         bool wakeup = true;
 505 
 506         path = btrfs_alloc_path();
 507         if (!path)
 508                 return -ENOMEM;
 509 
 510         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
 511 
 512 #ifdef CONFIG_BTRFS_DEBUG
 513         /*
 514          * If we're fragmenting we don't want to make anybody think we can
 515          * allocate from this block group until we've had a chance to fragment
 516          * the free space.
 517          */
 518         if (btrfs_should_fragment_free_space(block_group))
 519                 wakeup = false;
 520 #endif
 521         /*
 522          * We don't want to deadlock with somebody trying to allocate a new
 523          * extent for the extent root while also trying to search the extent
 524          * root to add free space.  So we skip locking and search the commit
 525          * root, since its read-only
 526          */
 527         path->skip_locking = 1;
 528         path->search_commit_root = 1;
 529         path->reada = READA_FORWARD;
 530 
 531         key.objectid = last;
 532         key.offset = 0;
 533         key.type = BTRFS_EXTENT_ITEM_KEY;
 534 
 535 next:
 536         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
 537         if (ret < 0)
 538                 goto out;
 539 
 540         leaf = path->nodes[0];
 541         nritems = btrfs_header_nritems(leaf);
 542 
 543         while (1) {
 544                 if (btrfs_fs_closing(fs_info) > 1) {
 545                         last = (u64)-1;
 546                         break;
 547                 }
 548 
 549                 if (path->slots[0] < nritems) {
 550                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
 551                 } else {
 552                         ret = btrfs_find_next_key(extent_root, path, &key, 0, 0);
 553                         if (ret)
 554                                 break;
 555 
 556                         if (need_resched() ||
 557                             rwsem_is_contended(&fs_info->commit_root_sem)) {
 558                                 if (wakeup)
 559                                         caching_ctl->progress = last;
 560                                 btrfs_release_path(path);
 561                                 up_read(&fs_info->commit_root_sem);
 562                                 mutex_unlock(&caching_ctl->mutex);
 563                                 cond_resched();
 564                                 mutex_lock(&caching_ctl->mutex);
 565                                 down_read(&fs_info->commit_root_sem);
 566                                 goto next;
 567                         }
 568 
 569                         ret = btrfs_next_leaf(extent_root, path);
 570                         if (ret < 0)
 571                                 goto out;
 572                         if (ret)
 573                                 break;
 574                         leaf = path->nodes[0];
 575                         nritems = btrfs_header_nritems(leaf);
 576                         continue;
 577                 }
 578 
 579                 if (key.objectid < last) {
 580                         key.objectid = last;
 581                         key.offset = 0;
 582                         key.type = BTRFS_EXTENT_ITEM_KEY;
 583 
 584                         if (wakeup)
 585                                 caching_ctl->progress = last;
 586                         btrfs_release_path(path);
 587                         goto next;
 588                 }
 589 
 590                 if (key.objectid < block_group->key.objectid) {
 591                         path->slots[0]++;
 592                         continue;
 593                 }
 594 
 595                 if (key.objectid >= block_group->key.objectid +
 596                     block_group->key.offset)
 597                         break;
 598 
 599                 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
 600                     key.type == BTRFS_METADATA_ITEM_KEY) {
 601                         total_found += add_new_free_space(block_group, last,
 602                                                           key.objectid);
 603                         if (key.type == BTRFS_METADATA_ITEM_KEY)
 604                                 last = key.objectid +
 605                                         fs_info->nodesize;
 606                         else
 607                                 last = key.objectid + key.offset;
 608 
 609                         if (total_found > CACHING_CTL_WAKE_UP) {
 610                                 total_found = 0;
 611                                 if (wakeup)
 612                                         wake_up(&caching_ctl->wait);
 613                         }
 614                 }
 615                 path->slots[0]++;
 616         }
 617         ret = 0;
 618 
 619         total_found += add_new_free_space(block_group, last,
 620                                           block_group->key.objectid +
 621                                           block_group->key.offset);
 622         caching_ctl->progress = (u64)-1;
 623 
 624 out:
 625         btrfs_free_path(path);
 626         return ret;
 627 }
 628 
 629 static noinline void caching_thread(struct btrfs_work *work)
 630 {
 631         struct btrfs_block_group_cache *block_group;
 632         struct btrfs_fs_info *fs_info;
 633         struct btrfs_caching_control *caching_ctl;
 634         int ret;
 635 
 636         caching_ctl = container_of(work, struct btrfs_caching_control, work);
 637         block_group = caching_ctl->block_group;
 638         fs_info = block_group->fs_info;
 639 
 640         mutex_lock(&caching_ctl->mutex);
 641         down_read(&fs_info->commit_root_sem);
 642 
 643         if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
 644                 ret = load_free_space_tree(caching_ctl);
 645         else
 646                 ret = load_extent_tree_free(caching_ctl);
 647 
 648         spin_lock(&block_group->lock);
 649         block_group->caching_ctl = NULL;
 650         block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED;
 651         spin_unlock(&block_group->lock);
 652 
 653 #ifdef CONFIG_BTRFS_DEBUG
 654         if (btrfs_should_fragment_free_space(block_group)) {
 655                 u64 bytes_used;
 656 
 657                 spin_lock(&block_group->space_info->lock);
 658                 spin_lock(&block_group->lock);
 659                 bytes_used = block_group->key.offset -
 660                         btrfs_block_group_used(&block_group->item);
 661                 block_group->space_info->bytes_used += bytes_used >> 1;
 662                 spin_unlock(&block_group->lock);
 663                 spin_unlock(&block_group->space_info->lock);
 664                 fragment_free_space(block_group);
 665         }
 666 #endif
 667 
 668         caching_ctl->progress = (u64)-1;
 669 
 670         up_read(&fs_info->commit_root_sem);
 671         btrfs_free_excluded_extents(block_group);
 672         mutex_unlock(&caching_ctl->mutex);
 673 
 674         wake_up(&caching_ctl->wait);
 675 
 676         btrfs_put_caching_control(caching_ctl);
 677         btrfs_put_block_group(block_group);
 678 }
 679 
 680 int btrfs_cache_block_group(struct btrfs_block_group_cache *cache,
 681                             int load_cache_only)
 682 {
 683         DEFINE_WAIT(wait);
 684         struct btrfs_fs_info *fs_info = cache->fs_info;
 685         struct btrfs_caching_control *caching_ctl;
 686         int ret = 0;
 687 
 688         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
 689         if (!caching_ctl)
 690                 return -ENOMEM;
 691 
 692         INIT_LIST_HEAD(&caching_ctl->list);
 693         mutex_init(&caching_ctl->mutex);
 694         init_waitqueue_head(&caching_ctl->wait);
 695         caching_ctl->block_group = cache;
 696         caching_ctl->progress = cache->key.objectid;
 697         refcount_set(&caching_ctl->count, 1);
 698         btrfs_init_work(&caching_ctl->work, caching_thread, NULL, NULL);
 699 
 700         spin_lock(&cache->lock);
 701         /*
 702          * This should be a rare occasion, but this could happen I think in the
 703          * case where one thread starts to load the space cache info, and then
 704          * some other thread starts a transaction commit which tries to do an
 705          * allocation while the other thread is still loading the space cache
 706          * info.  The previous loop should have kept us from choosing this block
 707          * group, but if we've moved to the state where we will wait on caching
 708          * block groups we need to first check if we're doing a fast load here,
 709          * so we can wait for it to finish, otherwise we could end up allocating
 710          * from a block group who's cache gets evicted for one reason or
 711          * another.
 712          */
 713         while (cache->cached == BTRFS_CACHE_FAST) {
 714                 struct btrfs_caching_control *ctl;
 715 
 716                 ctl = cache->caching_ctl;
 717                 refcount_inc(&ctl->count);
 718                 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
 719                 spin_unlock(&cache->lock);
 720 
 721                 schedule();
 722 
 723                 finish_wait(&ctl->wait, &wait);
 724                 btrfs_put_caching_control(ctl);
 725                 spin_lock(&cache->lock);
 726         }
 727 
 728         if (cache->cached != BTRFS_CACHE_NO) {
 729                 spin_unlock(&cache->lock);
 730                 kfree(caching_ctl);
 731                 return 0;
 732         }
 733         WARN_ON(cache->caching_ctl);
 734         cache->caching_ctl = caching_ctl;
 735         cache->cached = BTRFS_CACHE_FAST;
 736         spin_unlock(&cache->lock);
 737 
 738         if (btrfs_test_opt(fs_info, SPACE_CACHE)) {
 739                 mutex_lock(&caching_ctl->mutex);
 740                 ret = load_free_space_cache(cache);
 741 
 742                 spin_lock(&cache->lock);
 743                 if (ret == 1) {
 744                         cache->caching_ctl = NULL;
 745                         cache->cached = BTRFS_CACHE_FINISHED;
 746                         cache->last_byte_to_unpin = (u64)-1;
 747                         caching_ctl->progress = (u64)-1;
 748                 } else {
 749                         if (load_cache_only) {
 750                                 cache->caching_ctl = NULL;
 751                                 cache->cached = BTRFS_CACHE_NO;
 752                         } else {
 753                                 cache->cached = BTRFS_CACHE_STARTED;
 754                                 cache->has_caching_ctl = 1;
 755                         }
 756                 }
 757                 spin_unlock(&cache->lock);
 758 #ifdef CONFIG_BTRFS_DEBUG
 759                 if (ret == 1 &&
 760                     btrfs_should_fragment_free_space(cache)) {
 761                         u64 bytes_used;
 762 
 763                         spin_lock(&cache->space_info->lock);
 764                         spin_lock(&cache->lock);
 765                         bytes_used = cache->key.offset -
 766                                 btrfs_block_group_used(&cache->item);
 767                         cache->space_info->bytes_used += bytes_used >> 1;
 768                         spin_unlock(&cache->lock);
 769                         spin_unlock(&cache->space_info->lock);
 770                         fragment_free_space(cache);
 771                 }
 772 #endif
 773                 mutex_unlock(&caching_ctl->mutex);
 774 
 775                 wake_up(&caching_ctl->wait);
 776                 if (ret == 1) {
 777                         btrfs_put_caching_control(caching_ctl);
 778                         btrfs_free_excluded_extents(cache);
 779                         return 0;
 780                 }
 781         } else {
 782                 /*
 783                  * We're either using the free space tree or no caching at all.
 784                  * Set cached to the appropriate value and wakeup any waiters.
 785                  */
 786                 spin_lock(&cache->lock);
 787                 if (load_cache_only) {
 788                         cache->caching_ctl = NULL;
 789                         cache->cached = BTRFS_CACHE_NO;
 790                 } else {
 791                         cache->cached = BTRFS_CACHE_STARTED;
 792                         cache->has_caching_ctl = 1;
 793                 }
 794                 spin_unlock(&cache->lock);
 795                 wake_up(&caching_ctl->wait);
 796         }
 797 
 798         if (load_cache_only) {
 799                 btrfs_put_caching_control(caching_ctl);
 800                 return 0;
 801         }
 802 
 803         down_write(&fs_info->commit_root_sem);
 804         refcount_inc(&caching_ctl->count);
 805         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
 806         up_write(&fs_info->commit_root_sem);
 807 
 808         btrfs_get_block_group(cache);
 809 
 810         btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
 811 
 812         return ret;
 813 }
 814 
 815 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
 816 {
 817         u64 extra_flags = chunk_to_extended(flags) &
 818                                 BTRFS_EXTENDED_PROFILE_MASK;
 819 
 820         write_seqlock(&fs_info->profiles_lock);
 821         if (flags & BTRFS_BLOCK_GROUP_DATA)
 822                 fs_info->avail_data_alloc_bits &= ~extra_flags;
 823         if (flags & BTRFS_BLOCK_GROUP_METADATA)
 824                 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
 825         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
 826                 fs_info->avail_system_alloc_bits &= ~extra_flags;
 827         write_sequnlock(&fs_info->profiles_lock);
 828 }
 829 
 830 /*
 831  * Clear incompat bits for the following feature(s):
 832  *
 833  * - RAID56 - in case there's neither RAID5 nor RAID6 profile block group
 834  *            in the whole filesystem
 835  */
 836 static void clear_incompat_bg_bits(struct btrfs_fs_info *fs_info, u64 flags)
 837 {
 838         if (flags & BTRFS_BLOCK_GROUP_RAID56_MASK) {
 839                 struct list_head *head = &fs_info->space_info;
 840                 struct btrfs_space_info *sinfo;
 841 
 842                 list_for_each_entry_rcu(sinfo, head, list) {
 843                         bool found = false;
 844 
 845                         down_read(&sinfo->groups_sem);
 846                         if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID5]))
 847                                 found = true;
 848                         if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID6]))
 849                                 found = true;
 850                         up_read(&sinfo->groups_sem);
 851 
 852                         if (found)
 853                                 return;
 854                 }
 855                 btrfs_clear_fs_incompat(fs_info, RAID56);
 856         }
 857 }
 858 
 859 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
 860                              u64 group_start, struct extent_map *em)
 861 {
 862         struct btrfs_fs_info *fs_info = trans->fs_info;
 863         struct btrfs_root *root = fs_info->extent_root;
 864         struct btrfs_path *path;
 865         struct btrfs_block_group_cache *block_group;
 866         struct btrfs_free_cluster *cluster;
 867         struct btrfs_root *tree_root = fs_info->tree_root;
 868         struct btrfs_key key;
 869         struct inode *inode;
 870         struct kobject *kobj = NULL;
 871         int ret;
 872         int index;
 873         int factor;
 874         struct btrfs_caching_control *caching_ctl = NULL;
 875         bool remove_em;
 876         bool remove_rsv = false;
 877 
 878         block_group = btrfs_lookup_block_group(fs_info, group_start);
 879         BUG_ON(!block_group);
 880         BUG_ON(!block_group->ro);
 881 
 882         trace_btrfs_remove_block_group(block_group);
 883         /*
 884          * Free the reserved super bytes from this block group before
 885          * remove it.
 886          */
 887         btrfs_free_excluded_extents(block_group);
 888         btrfs_free_ref_tree_range(fs_info, block_group->key.objectid,
 889                                   block_group->key.offset);
 890 
 891         memcpy(&key, &block_group->key, sizeof(key));
 892         index = btrfs_bg_flags_to_raid_index(block_group->flags);
 893         factor = btrfs_bg_type_to_factor(block_group->flags);
 894 
 895         /* make sure this block group isn't part of an allocation cluster */
 896         cluster = &fs_info->data_alloc_cluster;
 897         spin_lock(&cluster->refill_lock);
 898         btrfs_return_cluster_to_free_space(block_group, cluster);
 899         spin_unlock(&cluster->refill_lock);
 900 
 901         /*
 902          * make sure this block group isn't part of a metadata
 903          * allocation cluster
 904          */
 905         cluster = &fs_info->meta_alloc_cluster;
 906         spin_lock(&cluster->refill_lock);
 907         btrfs_return_cluster_to_free_space(block_group, cluster);
 908         spin_unlock(&cluster->refill_lock);
 909 
 910         path = btrfs_alloc_path();
 911         if (!path) {
 912                 ret = -ENOMEM;
 913                 goto out_put_group;
 914         }
 915 
 916         /*
 917          * get the inode first so any iput calls done for the io_list
 918          * aren't the final iput (no unlinks allowed now)
 919          */
 920         inode = lookup_free_space_inode(block_group, path);
 921 
 922         mutex_lock(&trans->transaction->cache_write_mutex);
 923         /*
 924          * Make sure our free space cache IO is done before removing the
 925          * free space inode
 926          */
 927         spin_lock(&trans->transaction->dirty_bgs_lock);
 928         if (!list_empty(&block_group->io_list)) {
 929                 list_del_init(&block_group->io_list);
 930 
 931                 WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode);
 932 
 933                 spin_unlock(&trans->transaction->dirty_bgs_lock);
 934                 btrfs_wait_cache_io(trans, block_group, path);
 935                 btrfs_put_block_group(block_group);
 936                 spin_lock(&trans->transaction->dirty_bgs_lock);
 937         }
 938 
 939         if (!list_empty(&block_group->dirty_list)) {
 940                 list_del_init(&block_group->dirty_list);
 941                 remove_rsv = true;
 942                 btrfs_put_block_group(block_group);
 943         }
 944         spin_unlock(&trans->transaction->dirty_bgs_lock);
 945         mutex_unlock(&trans->transaction->cache_write_mutex);
 946 
 947         if (!IS_ERR(inode)) {
 948                 ret = btrfs_orphan_add(trans, BTRFS_I(inode));
 949                 if (ret) {
 950                         btrfs_add_delayed_iput(inode);
 951                         goto out_put_group;
 952                 }
 953                 clear_nlink(inode);
 954                 /* One for the block groups ref */
 955                 spin_lock(&block_group->lock);
 956                 if (block_group->iref) {
 957                         block_group->iref = 0;
 958                         block_group->inode = NULL;
 959                         spin_unlock(&block_group->lock);
 960                         iput(inode);
 961                 } else {
 962                         spin_unlock(&block_group->lock);
 963                 }
 964                 /* One for our lookup ref */
 965                 btrfs_add_delayed_iput(inode);
 966         }
 967 
 968         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
 969         key.offset = block_group->key.objectid;
 970         key.type = 0;
 971 
 972         ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
 973         if (ret < 0)
 974                 goto out_put_group;
 975         if (ret > 0)
 976                 btrfs_release_path(path);
 977         if (ret == 0) {
 978                 ret = btrfs_del_item(trans, tree_root, path);
 979                 if (ret)
 980                         goto out_put_group;
 981                 btrfs_release_path(path);
 982         }
 983 
 984         spin_lock(&fs_info->block_group_cache_lock);
 985         rb_erase(&block_group->cache_node,
 986                  &fs_info->block_group_cache_tree);
 987         RB_CLEAR_NODE(&block_group->cache_node);
 988 
 989         if (fs_info->first_logical_byte == block_group->key.objectid)
 990                 fs_info->first_logical_byte = (u64)-1;
 991         spin_unlock(&fs_info->block_group_cache_lock);
 992 
 993         down_write(&block_group->space_info->groups_sem);
 994         /*
 995          * we must use list_del_init so people can check to see if they
 996          * are still on the list after taking the semaphore
 997          */
 998         list_del_init(&block_group->list);
 999         if (list_empty(&block_group->space_info->block_groups[index])) {
1000                 kobj = block_group->space_info->block_group_kobjs[index];
1001                 block_group->space_info->block_group_kobjs[index] = NULL;
1002                 clear_avail_alloc_bits(fs_info, block_group->flags);
1003         }
1004         up_write(&block_group->space_info->groups_sem);
1005         clear_incompat_bg_bits(fs_info, block_group->flags);
1006         if (kobj) {
1007                 kobject_del(kobj);
1008                 kobject_put(kobj);
1009         }
1010 
1011         if (block_group->has_caching_ctl)
1012                 caching_ctl = btrfs_get_caching_control(block_group);
1013         if (block_group->cached == BTRFS_CACHE_STARTED)
1014                 btrfs_wait_block_group_cache_done(block_group);
1015         if (block_group->has_caching_ctl) {
1016                 down_write(&fs_info->commit_root_sem);
1017                 if (!caching_ctl) {
1018                         struct btrfs_caching_control *ctl;
1019 
1020                         list_for_each_entry(ctl,
1021                                     &fs_info->caching_block_groups, list)
1022                                 if (ctl->block_group == block_group) {
1023                                         caching_ctl = ctl;
1024                                         refcount_inc(&caching_ctl->count);
1025                                         break;
1026                                 }
1027                 }
1028                 if (caching_ctl)
1029                         list_del_init(&caching_ctl->list);
1030                 up_write(&fs_info->commit_root_sem);
1031                 if (caching_ctl) {
1032                         /* Once for the caching bgs list and once for us. */
1033                         btrfs_put_caching_control(caching_ctl);
1034                         btrfs_put_caching_control(caching_ctl);
1035                 }
1036         }
1037 
1038         spin_lock(&trans->transaction->dirty_bgs_lock);
1039         WARN_ON(!list_empty(&block_group->dirty_list));
1040         WARN_ON(!list_empty(&block_group->io_list));
1041         spin_unlock(&trans->transaction->dirty_bgs_lock);
1042 
1043         btrfs_remove_free_space_cache(block_group);
1044 
1045         spin_lock(&block_group->space_info->lock);
1046         list_del_init(&block_group->ro_list);
1047 
1048         if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
1049                 WARN_ON(block_group->space_info->total_bytes
1050                         < block_group->key.offset);
1051                 WARN_ON(block_group->space_info->bytes_readonly
1052                         < block_group->key.offset);
1053                 WARN_ON(block_group->space_info->disk_total
1054                         < block_group->key.offset * factor);
1055         }
1056         block_group->space_info->total_bytes -= block_group->key.offset;
1057         block_group->space_info->bytes_readonly -= block_group->key.offset;
1058         block_group->space_info->disk_total -= block_group->key.offset * factor;
1059 
1060         spin_unlock(&block_group->space_info->lock);
1061 
1062         memcpy(&key, &block_group->key, sizeof(key));
1063 
1064         mutex_lock(&fs_info->chunk_mutex);
1065         spin_lock(&block_group->lock);
1066         block_group->removed = 1;
1067         /*
1068          * At this point trimming can't start on this block group, because we
1069          * removed the block group from the tree fs_info->block_group_cache_tree
1070          * so no one can't find it anymore and even if someone already got this
1071          * block group before we removed it from the rbtree, they have already
1072          * incremented block_group->trimming - if they didn't, they won't find
1073          * any free space entries because we already removed them all when we
1074          * called btrfs_remove_free_space_cache().
1075          *
1076          * And we must not remove the extent map from the fs_info->mapping_tree
1077          * to prevent the same logical address range and physical device space
1078          * ranges from being reused for a new block group. This is because our
1079          * fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
1080          * completely transactionless, so while it is trimming a range the
1081          * currently running transaction might finish and a new one start,
1082          * allowing for new block groups to be created that can reuse the same
1083          * physical device locations unless we take this special care.
1084          *
1085          * There may also be an implicit trim operation if the file system
1086          * is mounted with -odiscard. The same protections must remain
1087          * in place until the extents have been discarded completely when
1088          * the transaction commit has completed.
1089          */
1090         remove_em = (atomic_read(&block_group->trimming) == 0);
1091         spin_unlock(&block_group->lock);
1092 
1093         mutex_unlock(&fs_info->chunk_mutex);
1094 
1095         ret = remove_block_group_free_space(trans, block_group);
1096         if (ret)
1097                 goto out_put_group;
1098 
1099         /* Once for the block groups rbtree */
1100         btrfs_put_block_group(block_group);
1101 
1102         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1103         if (ret > 0)
1104                 ret = -EIO;
1105         if (ret < 0)
1106                 goto out;
1107 
1108         ret = btrfs_del_item(trans, root, path);
1109         if (ret)
1110                 goto out;
1111 
1112         if (remove_em) {
1113                 struct extent_map_tree *em_tree;
1114 
1115                 em_tree = &fs_info->mapping_tree;
1116                 write_lock(&em_tree->lock);
1117                 remove_extent_mapping(em_tree, em);
1118                 write_unlock(&em_tree->lock);
1119                 /* once for the tree */
1120                 free_extent_map(em);
1121         }
1122 
1123 out_put_group:
1124         /* Once for the lookup reference */
1125         btrfs_put_block_group(block_group);
1126 out:
1127         if (remove_rsv)
1128                 btrfs_delayed_refs_rsv_release(fs_info, 1);
1129         btrfs_free_path(path);
1130         return ret;
1131 }
1132 
1133 struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
1134                 struct btrfs_fs_info *fs_info, const u64 chunk_offset)
1135 {
1136         struct extent_map_tree *em_tree = &fs_info->mapping_tree;
1137         struct extent_map *em;
1138         struct map_lookup *map;
1139         unsigned int num_items;
1140 
1141         read_lock(&em_tree->lock);
1142         em = lookup_extent_mapping(em_tree, chunk_offset, 1);
1143         read_unlock(&em_tree->lock);
1144         ASSERT(em && em->start == chunk_offset);
1145 
1146         /*
1147          * We need to reserve 3 + N units from the metadata space info in order
1148          * to remove a block group (done at btrfs_remove_chunk() and at
1149          * btrfs_remove_block_group()), which are used for:
1150          *
1151          * 1 unit for adding the free space inode's orphan (located in the tree
1152          * of tree roots).
1153          * 1 unit for deleting the block group item (located in the extent
1154          * tree).
1155          * 1 unit for deleting the free space item (located in tree of tree
1156          * roots).
1157          * N units for deleting N device extent items corresponding to each
1158          * stripe (located in the device tree).
1159          *
1160          * In order to remove a block group we also need to reserve units in the
1161          * system space info in order to update the chunk tree (update one or
1162          * more device items and remove one chunk item), but this is done at
1163          * btrfs_remove_chunk() through a call to check_system_chunk().
1164          */
1165         map = em->map_lookup;
1166         num_items = 3 + map->num_stripes;
1167         free_extent_map(em);
1168 
1169         return btrfs_start_transaction_fallback_global_rsv(fs_info->extent_root,
1170                                                            num_items, 1);
1171 }
1172 
1173 /*
1174  * Mark block group @cache read-only, so later write won't happen to block
1175  * group @cache.
1176  *
1177  * If @force is not set, this function will only mark the block group readonly
1178  * if we have enough free space (1M) in other metadata/system block groups.
1179  * If @force is not set, this function will mark the block group readonly
1180  * without checking free space.
1181  *
1182  * NOTE: This function doesn't care if other block groups can contain all the
1183  * data in this block group. That check should be done by relocation routine,
1184  * not this function.
1185  */
1186 static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force)
1187 {
1188         struct btrfs_space_info *sinfo = cache->space_info;
1189         u64 num_bytes;
1190         u64 sinfo_used;
1191         u64 min_allocable_bytes;
1192         int ret = -ENOSPC;
1193 
1194         /*
1195          * We need some metadata space and system metadata space for
1196          * allocating chunks in some corner cases until we force to set
1197          * it to be readonly.
1198          */
1199         if ((sinfo->flags &
1200              (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
1201             !force)
1202                 min_allocable_bytes = SZ_1M;
1203         else
1204                 min_allocable_bytes = 0;
1205 
1206         spin_lock(&sinfo->lock);
1207         spin_lock(&cache->lock);
1208 
1209         if (cache->ro) {
1210                 cache->ro++;
1211                 ret = 0;
1212                 goto out;
1213         }
1214 
1215         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
1216                     cache->bytes_super - btrfs_block_group_used(&cache->item);
1217         sinfo_used = btrfs_space_info_used(sinfo, true);
1218 
1219         /*
1220          * sinfo_used + num_bytes should always <= sinfo->total_bytes.
1221          *
1222          * Here we make sure if we mark this bg RO, we still have enough
1223          * free space as buffer (if min_allocable_bytes is not 0).
1224          */
1225         if (sinfo_used + num_bytes + min_allocable_bytes <=
1226             sinfo->total_bytes) {
1227                 sinfo->bytes_readonly += num_bytes;
1228                 cache->ro++;
1229                 list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
1230                 ret = 0;
1231         }
1232 out:
1233         spin_unlock(&cache->lock);
1234         spin_unlock(&sinfo->lock);
1235         if (ret == -ENOSPC && btrfs_test_opt(cache->fs_info, ENOSPC_DEBUG)) {
1236                 btrfs_info(cache->fs_info,
1237                         "unable to make block group %llu ro",
1238                         cache->key.objectid);
1239                 btrfs_info(cache->fs_info,
1240                         "sinfo_used=%llu bg_num_bytes=%llu min_allocable=%llu",
1241                         sinfo_used, num_bytes, min_allocable_bytes);
1242                 btrfs_dump_space_info(cache->fs_info, cache->space_info, 0, 0);
1243         }
1244         return ret;
1245 }
1246 
1247 /*
1248  * Process the unused_bgs list and remove any that don't have any allocated
1249  * space inside of them.
1250  */
1251 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
1252 {
1253         struct btrfs_block_group_cache *block_group;
1254         struct btrfs_space_info *space_info;
1255         struct btrfs_trans_handle *trans;
1256         int ret = 0;
1257 
1258         if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
1259                 return;
1260 
1261         spin_lock(&fs_info->unused_bgs_lock);
1262         while (!list_empty(&fs_info->unused_bgs)) {
1263                 u64 start, end;
1264                 int trimming;
1265 
1266                 block_group = list_first_entry(&fs_info->unused_bgs,
1267                                                struct btrfs_block_group_cache,
1268                                                bg_list);
1269                 list_del_init(&block_group->bg_list);
1270 
1271                 space_info = block_group->space_info;
1272 
1273                 if (ret || btrfs_mixed_space_info(space_info)) {
1274                         btrfs_put_block_group(block_group);
1275                         continue;
1276                 }
1277                 spin_unlock(&fs_info->unused_bgs_lock);
1278 
1279                 mutex_lock(&fs_info->delete_unused_bgs_mutex);
1280 
1281                 /* Don't want to race with allocators so take the groups_sem */
1282                 down_write(&space_info->groups_sem);
1283                 spin_lock(&block_group->lock);
1284                 if (block_group->reserved || block_group->pinned ||
1285                     btrfs_block_group_used(&block_group->item) ||
1286                     block_group->ro ||
1287                     list_is_singular(&block_group->list)) {
1288                         /*
1289                          * We want to bail if we made new allocations or have
1290                          * outstanding allocations in this block group.  We do
1291                          * the ro check in case balance is currently acting on
1292                          * this block group.
1293                          */
1294                         trace_btrfs_skip_unused_block_group(block_group);
1295                         spin_unlock(&block_group->lock);
1296                         up_write(&space_info->groups_sem);
1297                         goto next;
1298                 }
1299                 spin_unlock(&block_group->lock);
1300 
1301                 /* We don't want to force the issue, only flip if it's ok. */
1302                 ret = inc_block_group_ro(block_group, 0);
1303                 up_write(&space_info->groups_sem);
1304                 if (ret < 0) {
1305                         ret = 0;
1306                         goto next;
1307                 }
1308 
1309                 /*
1310                  * Want to do this before we do anything else so we can recover
1311                  * properly if we fail to join the transaction.
1312                  */
1313                 trans = btrfs_start_trans_remove_block_group(fs_info,
1314                                                      block_group->key.objectid);
1315                 if (IS_ERR(trans)) {
1316                         btrfs_dec_block_group_ro(block_group);
1317                         ret = PTR_ERR(trans);
1318                         goto next;
1319                 }
1320 
1321                 /*
1322                  * We could have pending pinned extents for this block group,
1323                  * just delete them, we don't care about them anymore.
1324                  */
1325                 start = block_group->key.objectid;
1326                 end = start + block_group->key.offset - 1;
1327                 /*
1328                  * Hold the unused_bg_unpin_mutex lock to avoid racing with
1329                  * btrfs_finish_extent_commit(). If we are at transaction N,
1330                  * another task might be running finish_extent_commit() for the
1331                  * previous transaction N - 1, and have seen a range belonging
1332                  * to the block group in freed_extents[] before we were able to
1333                  * clear the whole block group range from freed_extents[]. This
1334                  * means that task can lookup for the block group after we
1335                  * unpinned it from freed_extents[] and removed it, leading to
1336                  * a BUG_ON() at btrfs_unpin_extent_range().
1337                  */
1338                 mutex_lock(&fs_info->unused_bg_unpin_mutex);
1339                 ret = clear_extent_bits(&fs_info->freed_extents[0], start, end,
1340                                   EXTENT_DIRTY);
1341                 if (ret) {
1342                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
1343                         btrfs_dec_block_group_ro(block_group);
1344                         goto end_trans;
1345                 }
1346                 ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
1347                                   EXTENT_DIRTY);
1348                 if (ret) {
1349                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
1350                         btrfs_dec_block_group_ro(block_group);
1351                         goto end_trans;
1352                 }
1353                 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
1354 
1355                 /* Reset pinned so btrfs_put_block_group doesn't complain */
1356                 spin_lock(&space_info->lock);
1357                 spin_lock(&block_group->lock);
1358 
1359                 btrfs_space_info_update_bytes_pinned(fs_info, space_info,
1360                                                      -block_group->pinned);
1361                 space_info->bytes_readonly += block_group->pinned;
1362                 percpu_counter_add_batch(&space_info->total_bytes_pinned,
1363                                    -block_group->pinned,
1364                                    BTRFS_TOTAL_BYTES_PINNED_BATCH);
1365                 block_group->pinned = 0;
1366 
1367                 spin_unlock(&block_group->lock);
1368                 spin_unlock(&space_info->lock);
1369 
1370                 /* DISCARD can flip during remount */
1371                 trimming = btrfs_test_opt(fs_info, DISCARD);
1372 
1373                 /* Implicit trim during transaction commit. */
1374                 if (trimming)
1375                         btrfs_get_block_group_trimming(block_group);
1376 
1377                 /*
1378                  * Btrfs_remove_chunk will abort the transaction if things go
1379                  * horribly wrong.
1380                  */
1381                 ret = btrfs_remove_chunk(trans, block_group->key.objectid);
1382 
1383                 if (ret) {
1384                         if (trimming)
1385                                 btrfs_put_block_group_trimming(block_group);
1386                         goto end_trans;
1387                 }
1388 
1389                 /*
1390                  * If we're not mounted with -odiscard, we can just forget
1391                  * about this block group. Otherwise we'll need to wait
1392                  * until transaction commit to do the actual discard.
1393                  */
1394                 if (trimming) {
1395                         spin_lock(&fs_info->unused_bgs_lock);
1396                         /*
1397                          * A concurrent scrub might have added us to the list
1398                          * fs_info->unused_bgs, so use a list_move operation
1399                          * to add the block group to the deleted_bgs list.
1400                          */
1401                         list_move(&block_group->bg_list,
1402                                   &trans->transaction->deleted_bgs);
1403                         spin_unlock(&fs_info->unused_bgs_lock);
1404                         btrfs_get_block_group(block_group);
1405                 }
1406 end_trans:
1407                 btrfs_end_transaction(trans);
1408 next:
1409                 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
1410                 btrfs_put_block_group(block_group);
1411                 spin_lock(&fs_info->unused_bgs_lock);
1412         }
1413         spin_unlock(&fs_info->unused_bgs_lock);
1414 }
1415 
1416 void btrfs_mark_bg_unused(struct btrfs_block_group_cache *bg)
1417 {
1418         struct btrfs_fs_info *fs_info = bg->fs_info;
1419 
1420         spin_lock(&fs_info->unused_bgs_lock);
1421         if (list_empty(&bg->bg_list)) {
1422                 btrfs_get_block_group(bg);
1423                 trace_btrfs_add_unused_block_group(bg);
1424                 list_add_tail(&bg->bg_list, &fs_info->unused_bgs);
1425         }
1426         spin_unlock(&fs_info->unused_bgs_lock);
1427 }
1428 
1429 static int find_first_block_group(struct btrfs_fs_info *fs_info,
1430                                   struct btrfs_path *path,
1431                                   struct btrfs_key *key)
1432 {
1433         struct btrfs_root *root = fs_info->extent_root;
1434         int ret = 0;
1435         struct btrfs_key found_key;
1436         struct extent_buffer *leaf;
1437         struct btrfs_block_group_item bg;
1438         u64 flags;
1439         int slot;
1440 
1441         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
1442         if (ret < 0)
1443                 goto out;
1444 
1445         while (1) {
1446                 slot = path->slots[0];
1447                 leaf = path->nodes[0];
1448                 if (slot >= btrfs_header_nritems(leaf)) {
1449                         ret = btrfs_next_leaf(root, path);
1450                         if (ret == 0)
1451                                 continue;
1452                         if (ret < 0)
1453                                 goto out;
1454                         break;
1455                 }
1456                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
1457 
1458                 if (found_key.objectid >= key->objectid &&
1459                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
1460                         struct extent_map_tree *em_tree;
1461                         struct extent_map *em;
1462 
1463                         em_tree = &root->fs_info->mapping_tree;
1464                         read_lock(&em_tree->lock);
1465                         em = lookup_extent_mapping(em_tree, found_key.objectid,
1466                                                    found_key.offset);
1467                         read_unlock(&em_tree->lock);
1468                         if (!em) {
1469                                 btrfs_err(fs_info,
1470                         "logical %llu len %llu found bg but no related chunk",
1471                                           found_key.objectid, found_key.offset);
1472                                 ret = -ENOENT;
1473                         } else if (em->start != found_key.objectid ||
1474                                    em->len != found_key.offset) {
1475                                 btrfs_err(fs_info,
1476                 "block group %llu len %llu mismatch with chunk %llu len %llu",
1477                                           found_key.objectid, found_key.offset,
1478                                           em->start, em->len);
1479                                 ret = -EUCLEAN;
1480                         } else {
1481                                 read_extent_buffer(leaf, &bg,
1482                                         btrfs_item_ptr_offset(leaf, slot),
1483                                         sizeof(bg));
1484                                 flags = btrfs_block_group_flags(&bg) &
1485                                         BTRFS_BLOCK_GROUP_TYPE_MASK;
1486 
1487                                 if (flags != (em->map_lookup->type &
1488                                               BTRFS_BLOCK_GROUP_TYPE_MASK)) {
1489                                         btrfs_err(fs_info,
1490 "block group %llu len %llu type flags 0x%llx mismatch with chunk type flags 0x%llx",
1491                                                 found_key.objectid,
1492                                                 found_key.offset, flags,
1493                                                 (BTRFS_BLOCK_GROUP_TYPE_MASK &
1494                                                  em->map_lookup->type));
1495                                         ret = -EUCLEAN;
1496                                 } else {
1497                                         ret = 0;
1498                                 }
1499                         }
1500                         free_extent_map(em);
1501                         goto out;
1502                 }
1503                 path->slots[0]++;
1504         }
1505 out:
1506         return ret;
1507 }
1508 
1509 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
1510 {
1511         u64 extra_flags = chunk_to_extended(flags) &
1512                                 BTRFS_EXTENDED_PROFILE_MASK;
1513 
1514         write_seqlock(&fs_info->profiles_lock);
1515         if (flags & BTRFS_BLOCK_GROUP_DATA)
1516                 fs_info->avail_data_alloc_bits |= extra_flags;
1517         if (flags & BTRFS_BLOCK_GROUP_METADATA)
1518                 fs_info->avail_metadata_alloc_bits |= extra_flags;
1519         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
1520                 fs_info->avail_system_alloc_bits |= extra_flags;
1521         write_sequnlock(&fs_info->profiles_lock);
1522 }
1523 
1524 static int exclude_super_stripes(struct btrfs_block_group_cache *cache)
1525 {
1526         struct btrfs_fs_info *fs_info = cache->fs_info;
1527         u64 bytenr;
1528         u64 *logical;
1529         int stripe_len;
1530         int i, nr, ret;
1531 
1532         if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
1533                 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
1534                 cache->bytes_super += stripe_len;
1535                 ret = btrfs_add_excluded_extent(fs_info, cache->key.objectid,
1536                                                 stripe_len);
1537                 if (ret)
1538                         return ret;
1539         }
1540 
1541         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
1542                 bytenr = btrfs_sb_offset(i);
1543                 ret = btrfs_rmap_block(fs_info, cache->key.objectid,
1544                                        bytenr, &logical, &nr, &stripe_len);
1545                 if (ret)
1546                         return ret;
1547 
1548                 while (nr--) {
1549                         u64 start, len;
1550 
1551                         if (logical[nr] > cache->key.objectid +
1552                             cache->key.offset)
1553                                 continue;
1554 
1555                         if (logical[nr] + stripe_len <= cache->key.objectid)
1556                                 continue;
1557 
1558                         start = logical[nr];
1559                         if (start < cache->key.objectid) {
1560                                 start = cache->key.objectid;
1561                                 len = (logical[nr] + stripe_len) - start;
1562                         } else {
1563                                 len = min_t(u64, stripe_len,
1564                                             cache->key.objectid +
1565                                             cache->key.offset - start);
1566                         }
1567 
1568                         cache->bytes_super += len;
1569                         ret = btrfs_add_excluded_extent(fs_info, start, len);
1570                         if (ret) {
1571                                 kfree(logical);
1572                                 return ret;
1573                         }
1574                 }
1575 
1576                 kfree(logical);
1577         }
1578         return 0;
1579 }
1580 
1581 static void link_block_group(struct btrfs_block_group_cache *cache)
1582 {
1583         struct btrfs_space_info *space_info = cache->space_info;
1584         int index = btrfs_bg_flags_to_raid_index(cache->flags);
1585         bool first = false;
1586 
1587         down_write(&space_info->groups_sem);
1588         if (list_empty(&space_info->block_groups[index]))
1589                 first = true;
1590         list_add_tail(&cache->list, &space_info->block_groups[index]);
1591         up_write(&space_info->groups_sem);
1592 
1593         if (first)
1594                 btrfs_sysfs_add_block_group_type(cache);
1595 }
1596 
1597 static struct btrfs_block_group_cache *btrfs_create_block_group_cache(
1598                 struct btrfs_fs_info *fs_info, u64 start, u64 size)
1599 {
1600         struct btrfs_block_group_cache *cache;
1601 
1602         cache = kzalloc(sizeof(*cache), GFP_NOFS);
1603         if (!cache)
1604                 return NULL;
1605 
1606         cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
1607                                         GFP_NOFS);
1608         if (!cache->free_space_ctl) {
1609                 kfree(cache);
1610                 return NULL;
1611         }
1612 
1613         cache->key.objectid = start;
1614         cache->key.offset = size;
1615         cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
1616 
1617         cache->fs_info = fs_info;
1618         cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start);
1619         set_free_space_tree_thresholds(cache);
1620 
1621         atomic_set(&cache->count, 1);
1622         spin_lock_init(&cache->lock);
1623         init_rwsem(&cache->data_rwsem);
1624         INIT_LIST_HEAD(&cache->list);
1625         INIT_LIST_HEAD(&cache->cluster_list);
1626         INIT_LIST_HEAD(&cache->bg_list);
1627         INIT_LIST_HEAD(&cache->ro_list);
1628         INIT_LIST_HEAD(&cache->dirty_list);
1629         INIT_LIST_HEAD(&cache->io_list);
1630         btrfs_init_free_space_ctl(cache);
1631         atomic_set(&cache->trimming, 0);
1632         mutex_init(&cache->free_space_lock);
1633         btrfs_init_full_stripe_locks_tree(&cache->full_stripe_locks_root);
1634 
1635         return cache;
1636 }
1637 
1638 /*
1639  * Iterate all chunks and verify that each of them has the corresponding block
1640  * group
1641  */
1642 static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info)
1643 {
1644         struct extent_map_tree *map_tree = &fs_info->mapping_tree;
1645         struct extent_map *em;
1646         struct btrfs_block_group_cache *bg;
1647         u64 start = 0;
1648         int ret = 0;
1649 
1650         while (1) {
1651                 read_lock(&map_tree->lock);
1652                 /*
1653                  * lookup_extent_mapping will return the first extent map
1654                  * intersecting the range, so setting @len to 1 is enough to
1655                  * get the first chunk.
1656                  */
1657                 em = lookup_extent_mapping(map_tree, start, 1);
1658                 read_unlock(&map_tree->lock);
1659                 if (!em)
1660                         break;
1661 
1662                 bg = btrfs_lookup_block_group(fs_info, em->start);
1663                 if (!bg) {
1664                         btrfs_err(fs_info,
1665         "chunk start=%llu len=%llu doesn't have corresponding block group",
1666                                      em->start, em->len);
1667                         ret = -EUCLEAN;
1668                         free_extent_map(em);
1669                         break;
1670                 }
1671                 if (bg->key.objectid != em->start ||
1672                     bg->key.offset != em->len ||
1673                     (bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) !=
1674                     (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
1675                         btrfs_err(fs_info,
1676 "chunk start=%llu len=%llu flags=0x%llx doesn't match block group start=%llu len=%llu flags=0x%llx",
1677                                 em->start, em->len,
1678                                 em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK,
1679                                 bg->key.objectid, bg->key.offset,
1680                                 bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK);
1681                         ret = -EUCLEAN;
1682                         free_extent_map(em);
1683                         btrfs_put_block_group(bg);
1684                         break;
1685                 }
1686                 start = em->start + em->len;
1687                 free_extent_map(em);
1688                 btrfs_put_block_group(bg);
1689         }
1690         return ret;
1691 }
1692 
1693 int btrfs_read_block_groups(struct btrfs_fs_info *info)
1694 {
1695         struct btrfs_path *path;
1696         int ret;
1697         struct btrfs_block_group_cache *cache;
1698         struct btrfs_space_info *space_info;
1699         struct btrfs_key key;
1700         struct btrfs_key found_key;
1701         struct extent_buffer *leaf;
1702         int need_clear = 0;
1703         u64 cache_gen;
1704         u64 feature;
1705         int mixed;
1706 
1707         feature = btrfs_super_incompat_flags(info->super_copy);
1708         mixed = !!(feature & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS);
1709 
1710         key.objectid = 0;
1711         key.offset = 0;
1712         key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
1713         path = btrfs_alloc_path();
1714         if (!path)
1715                 return -ENOMEM;
1716         path->reada = READA_FORWARD;
1717 
1718         cache_gen = btrfs_super_cache_generation(info->super_copy);
1719         if (btrfs_test_opt(info, SPACE_CACHE) &&
1720             btrfs_super_generation(info->super_copy) != cache_gen)
1721                 need_clear = 1;
1722         if (btrfs_test_opt(info, CLEAR_CACHE))
1723                 need_clear = 1;
1724 
1725         while (1) {
1726                 ret = find_first_block_group(info, path, &key);
1727                 if (ret > 0)
1728                         break;
1729                 if (ret != 0)
1730                         goto error;
1731 
1732                 leaf = path->nodes[0];
1733                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1734 
1735                 cache = btrfs_create_block_group_cache(info, found_key.objectid,
1736                                                        found_key.offset);
1737                 if (!cache) {
1738                         ret = -ENOMEM;
1739                         goto error;
1740                 }
1741 
1742                 if (need_clear) {
1743                         /*
1744                          * When we mount with old space cache, we need to
1745                          * set BTRFS_DC_CLEAR and set dirty flag.
1746                          *
1747                          * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
1748                          *    truncate the old free space cache inode and
1749                          *    setup a new one.
1750                          * b) Setting 'dirty flag' makes sure that we flush
1751                          *    the new space cache info onto disk.
1752                          */
1753                         if (btrfs_test_opt(info, SPACE_CACHE))
1754                                 cache->disk_cache_state = BTRFS_DC_CLEAR;
1755                 }
1756 
1757                 read_extent_buffer(leaf, &cache->item,
1758                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
1759                                    sizeof(cache->item));
1760                 cache->flags = btrfs_block_group_flags(&cache->item);
1761                 if (!mixed &&
1762                     ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) &&
1763                     (cache->flags & BTRFS_BLOCK_GROUP_DATA))) {
1764                         btrfs_err(info,
1765 "bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
1766                                   cache->key.objectid);
1767                         btrfs_put_block_group(cache);
1768                         ret = -EINVAL;
1769                         goto error;
1770                 }
1771 
1772                 key.objectid = found_key.objectid + found_key.offset;
1773                 btrfs_release_path(path);
1774 
1775                 /*
1776                  * We need to exclude the super stripes now so that the space
1777                  * info has super bytes accounted for, otherwise we'll think
1778                  * we have more space than we actually do.
1779                  */
1780                 ret = exclude_super_stripes(cache);
1781                 if (ret) {
1782                         /*
1783                          * We may have excluded something, so call this just in
1784                          * case.
1785                          */
1786                         btrfs_free_excluded_extents(cache);
1787                         btrfs_put_block_group(cache);
1788                         goto error;
1789                 }
1790 
1791                 /*
1792                  * Check for two cases, either we are full, and therefore
1793                  * don't need to bother with the caching work since we won't
1794                  * find any space, or we are empty, and we can just add all
1795                  * the space in and be done with it.  This saves us _a_lot_ of
1796                  * time, particularly in the full case.
1797                  */
1798                 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
1799                         cache->last_byte_to_unpin = (u64)-1;
1800                         cache->cached = BTRFS_CACHE_FINISHED;
1801                         btrfs_free_excluded_extents(cache);
1802                 } else if (btrfs_block_group_used(&cache->item) == 0) {
1803                         cache->last_byte_to_unpin = (u64)-1;
1804                         cache->cached = BTRFS_CACHE_FINISHED;
1805                         add_new_free_space(cache, found_key.objectid,
1806                                            found_key.objectid +
1807                                            found_key.offset);
1808                         btrfs_free_excluded_extents(cache);
1809                 }
1810 
1811                 ret = btrfs_add_block_group_cache(info, cache);
1812                 if (ret) {
1813                         btrfs_remove_free_space_cache(cache);
1814                         btrfs_put_block_group(cache);
1815                         goto error;
1816                 }
1817 
1818                 trace_btrfs_add_block_group(info, cache, 0);
1819                 btrfs_update_space_info(info, cache->flags, found_key.offset,
1820                                         btrfs_block_group_used(&cache->item),
1821                                         cache->bytes_super, &space_info);
1822 
1823                 cache->space_info = space_info;
1824 
1825                 link_block_group(cache);
1826 
1827                 set_avail_alloc_bits(info, cache->flags);
1828                 if (btrfs_chunk_readonly(info, cache->key.objectid)) {
1829                         inc_block_group_ro(cache, 1);
1830                 } else if (btrfs_block_group_used(&cache->item) == 0) {
1831                         ASSERT(list_empty(&cache->bg_list));
1832                         btrfs_mark_bg_unused(cache);
1833                 }
1834         }
1835 
1836         rcu_read_lock();
1837         list_for_each_entry_rcu(space_info, &info->space_info, list) {
1838                 if (!(btrfs_get_alloc_profile(info, space_info->flags) &
1839                       (BTRFS_BLOCK_GROUP_RAID10 |
1840                        BTRFS_BLOCK_GROUP_RAID1_MASK |
1841                        BTRFS_BLOCK_GROUP_RAID56_MASK |
1842                        BTRFS_BLOCK_GROUP_DUP)))
1843                         continue;
1844                 /*
1845                  * Avoid allocating from un-mirrored block group if there are
1846                  * mirrored block groups.
1847                  */
1848                 list_for_each_entry(cache,
1849                                 &space_info->block_groups[BTRFS_RAID_RAID0],
1850                                 list)
1851                         inc_block_group_ro(cache, 1);
1852                 list_for_each_entry(cache,
1853                                 &space_info->block_groups[BTRFS_RAID_SINGLE],
1854                                 list)
1855                         inc_block_group_ro(cache, 1);
1856         }
1857         rcu_read_unlock();
1858 
1859         btrfs_init_global_block_rsv(info);
1860         ret = check_chunk_block_group_mappings(info);
1861 error:
1862         btrfs_free_path(path);
1863         return ret;
1864 }
1865 
1866 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
1867 {
1868         struct btrfs_fs_info *fs_info = trans->fs_info;
1869         struct btrfs_block_group_cache *block_group;
1870         struct btrfs_root *extent_root = fs_info->extent_root;
1871         struct btrfs_block_group_item item;
1872         struct btrfs_key key;
1873         int ret = 0;
1874 
1875         if (!trans->can_flush_pending_bgs)
1876                 return;
1877 
1878         while (!list_empty(&trans->new_bgs)) {
1879                 block_group = list_first_entry(&trans->new_bgs,
1880                                                struct btrfs_block_group_cache,
1881                                                bg_list);
1882                 if (ret)
1883                         goto next;
1884 
1885                 spin_lock(&block_group->lock);
1886                 memcpy(&item, &block_group->item, sizeof(item));
1887                 memcpy(&key, &block_group->key, sizeof(key));
1888                 spin_unlock(&block_group->lock);
1889 
1890                 ret = btrfs_insert_item(trans, extent_root, &key, &item,
1891                                         sizeof(item));
1892                 if (ret)
1893                         btrfs_abort_transaction(trans, ret);
1894                 ret = btrfs_finish_chunk_alloc(trans, key.objectid, key.offset);
1895                 if (ret)
1896                         btrfs_abort_transaction(trans, ret);
1897                 add_block_group_free_space(trans, block_group);
1898                 /* Already aborted the transaction if it failed. */
1899 next:
1900                 btrfs_delayed_refs_rsv_release(fs_info, 1);
1901                 list_del_init(&block_group->bg_list);
1902         }
1903         btrfs_trans_release_chunk_metadata(trans);
1904 }
1905 
1906 int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
1907                            u64 type, u64 chunk_offset, u64 size)
1908 {
1909         struct btrfs_fs_info *fs_info = trans->fs_info;
1910         struct btrfs_block_group_cache *cache;
1911         int ret;
1912 
1913         btrfs_set_log_full_commit(trans);
1914 
1915         cache = btrfs_create_block_group_cache(fs_info, chunk_offset, size);
1916         if (!cache)
1917                 return -ENOMEM;
1918 
1919         btrfs_set_block_group_used(&cache->item, bytes_used);
1920         btrfs_set_block_group_chunk_objectid(&cache->item,
1921                                              BTRFS_FIRST_CHUNK_TREE_OBJECTID);
1922         btrfs_set_block_group_flags(&cache->item, type);
1923 
1924         cache->flags = type;
1925         cache->last_byte_to_unpin = (u64)-1;
1926         cache->cached = BTRFS_CACHE_FINISHED;
1927         cache->needs_free_space = 1;
1928         ret = exclude_super_stripes(cache);
1929         if (ret) {
1930                 /* We may have excluded something, so call this just in case */
1931                 btrfs_free_excluded_extents(cache);
1932                 btrfs_put_block_group(cache);
1933                 return ret;
1934         }
1935 
1936         add_new_free_space(cache, chunk_offset, chunk_offset + size);
1937 
1938         btrfs_free_excluded_extents(cache);
1939 
1940 #ifdef CONFIG_BTRFS_DEBUG
1941         if (btrfs_should_fragment_free_space(cache)) {
1942                 u64 new_bytes_used = size - bytes_used;
1943 
1944                 bytes_used += new_bytes_used >> 1;
1945                 fragment_free_space(cache);
1946         }
1947 #endif
1948         /*
1949          * Ensure the corresponding space_info object is created and
1950          * assigned to our block group. We want our bg to be added to the rbtree
1951          * with its ->space_info set.
1952          */
1953         cache->space_info = btrfs_find_space_info(fs_info, cache->flags);
1954         ASSERT(cache->space_info);
1955 
1956         ret = btrfs_add_block_group_cache(fs_info, cache);
1957         if (ret) {
1958                 btrfs_remove_free_space_cache(cache);
1959                 btrfs_put_block_group(cache);
1960                 return ret;
1961         }
1962 
1963         /*
1964          * Now that our block group has its ->space_info set and is inserted in
1965          * the rbtree, update the space info's counters.
1966          */
1967         trace_btrfs_add_block_group(fs_info, cache, 1);
1968         btrfs_update_space_info(fs_info, cache->flags, size, bytes_used,
1969                                 cache->bytes_super, &cache->space_info);
1970         btrfs_update_global_block_rsv(fs_info);
1971 
1972         link_block_group(cache);
1973 
1974         list_add_tail(&cache->bg_list, &trans->new_bgs);
1975         trans->delayed_ref_updates++;
1976         btrfs_update_delayed_refs_rsv(trans);
1977 
1978         set_avail_alloc_bits(fs_info, type);
1979         return 0;
1980 }
1981 
1982 static u64 update_block_group_flags(struct btrfs_fs_info *fs_info, u64 flags)
1983 {
1984         u64 num_devices;
1985         u64 stripped;
1986 
1987         /*
1988          * if restripe for this chunk_type is on pick target profile and
1989          * return, otherwise do the usual balance
1990          */
1991         stripped = get_restripe_target(fs_info, flags);
1992         if (stripped)
1993                 return extended_to_chunk(stripped);
1994 
1995         num_devices = fs_info->fs_devices->rw_devices;
1996 
1997         stripped = BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID56_MASK |
1998                 BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10;
1999 
2000         if (num_devices == 1) {
2001                 stripped |= BTRFS_BLOCK_GROUP_DUP;
2002                 stripped = flags & ~stripped;
2003 
2004                 /* turn raid0 into single device chunks */
2005                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
2006                         return stripped;
2007 
2008                 /* turn mirroring into duplication */
2009                 if (flags & (BTRFS_BLOCK_GROUP_RAID1_MASK |
2010                              BTRFS_BLOCK_GROUP_RAID10))
2011                         return stripped | BTRFS_BLOCK_GROUP_DUP;
2012         } else {
2013                 /* they already had raid on here, just return */
2014                 if (flags & stripped)
2015                         return flags;
2016 
2017                 stripped |= BTRFS_BLOCK_GROUP_DUP;
2018                 stripped = flags & ~stripped;
2019 
2020                 /* switch duplicated blocks with raid1 */
2021                 if (flags & BTRFS_BLOCK_GROUP_DUP)
2022                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
2023 
2024                 /* this is drive concat, leave it alone */
2025         }
2026 
2027         return flags;
2028 }
2029 
2030 int btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache)
2031 
2032 {
2033         struct btrfs_fs_info *fs_info = cache->fs_info;
2034         struct btrfs_trans_handle *trans;
2035         u64 alloc_flags;
2036         int ret;
2037 
2038 again:
2039         trans = btrfs_join_transaction(fs_info->extent_root);
2040         if (IS_ERR(trans))
2041                 return PTR_ERR(trans);
2042 
2043         /*
2044          * we're not allowed to set block groups readonly after the dirty
2045          * block groups cache has started writing.  If it already started,
2046          * back off and let this transaction commit
2047          */
2048         mutex_lock(&fs_info->ro_block_group_mutex);
2049         if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) {
2050                 u64 transid = trans->transid;
2051 
2052                 mutex_unlock(&fs_info->ro_block_group_mutex);
2053                 btrfs_end_transaction(trans);
2054 
2055                 ret = btrfs_wait_for_commit(fs_info, transid);
2056                 if (ret)
2057                         return ret;
2058                 goto again;
2059         }
2060 
2061         /*
2062          * if we are changing raid levels, try to allocate a corresponding
2063          * block group with the new raid level.
2064          */
2065         alloc_flags = update_block_group_flags(fs_info, cache->flags);
2066         if (alloc_flags != cache->flags) {
2067                 ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
2068                 /*
2069                  * ENOSPC is allowed here, we may have enough space
2070                  * already allocated at the new raid level to
2071                  * carry on
2072                  */
2073                 if (ret == -ENOSPC)
2074                         ret = 0;
2075                 if (ret < 0)
2076                         goto out;
2077         }
2078 
2079         ret = inc_block_group_ro(cache, 0);
2080         if (!ret)
2081                 goto out;
2082         alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags);
2083         ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
2084         if (ret < 0)
2085                 goto out;
2086         ret = inc_block_group_ro(cache, 0);
2087 out:
2088         if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
2089                 alloc_flags = update_block_group_flags(fs_info, cache->flags);
2090                 mutex_lock(&fs_info->chunk_mutex);
2091                 check_system_chunk(trans, alloc_flags);
2092                 mutex_unlock(&fs_info->chunk_mutex);
2093         }
2094         mutex_unlock(&fs_info->ro_block_group_mutex);
2095 
2096         btrfs_end_transaction(trans);
2097         return ret;
2098 }
2099 
2100 void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache)
2101 {
2102         struct btrfs_space_info *sinfo = cache->space_info;
2103         u64 num_bytes;
2104 
2105         BUG_ON(!cache->ro);
2106 
2107         spin_lock(&sinfo->lock);
2108         spin_lock(&cache->lock);
2109         if (!--cache->ro) {
2110                 num_bytes = cache->key.offset - cache->reserved -
2111                             cache->pinned - cache->bytes_super -
2112                             btrfs_block_group_used(&cache->item);
2113                 sinfo->bytes_readonly -= num_bytes;
2114                 list_del_init(&cache->ro_list);
2115         }
2116         spin_unlock(&cache->lock);
2117         spin_unlock(&sinfo->lock);
2118 }
2119 
2120 static int write_one_cache_group(struct btrfs_trans_handle *trans,
2121                                  struct btrfs_path *path,
2122                                  struct btrfs_block_group_cache *cache)
2123 {
2124         struct btrfs_fs_info *fs_info = trans->fs_info;
2125         int ret;
2126         struct btrfs_root *extent_root = fs_info->extent_root;
2127         unsigned long bi;
2128         struct extent_buffer *leaf;
2129 
2130         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
2131         if (ret) {
2132                 if (ret > 0)
2133                         ret = -ENOENT;
2134                 goto fail;
2135         }
2136 
2137         leaf = path->nodes[0];
2138         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
2139         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
2140         btrfs_mark_buffer_dirty(leaf);
2141 fail:
2142         btrfs_release_path(path);
2143         return ret;
2144 
2145 }
2146 
2147 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
2148                             struct btrfs_trans_handle *trans,
2149                             struct btrfs_path *path)
2150 {
2151         struct btrfs_fs_info *fs_info = block_group->fs_info;
2152         struct btrfs_root *root = fs_info->tree_root;
2153         struct inode *inode = NULL;
2154         struct extent_changeset *data_reserved = NULL;
2155         u64 alloc_hint = 0;
2156         int dcs = BTRFS_DC_ERROR;
2157         u64 num_pages = 0;
2158         int retries = 0;
2159         int ret = 0;
2160 
2161         /*
2162          * If this block group is smaller than 100 megs don't bother caching the
2163          * block group.
2164          */
2165         if (block_group->key.offset < (100 * SZ_1M)) {
2166                 spin_lock(&block_group->lock);
2167                 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
2168                 spin_unlock(&block_group->lock);
2169                 return 0;
2170         }
2171 
2172         if (trans->aborted)
2173                 return 0;
2174 again:
2175         inode = lookup_free_space_inode(block_group, path);
2176         if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
2177                 ret = PTR_ERR(inode);
2178                 btrfs_release_path(path);
2179                 goto out;
2180         }
2181 
2182         if (IS_ERR(inode)) {
2183                 BUG_ON(retries);
2184                 retries++;
2185 
2186                 if (block_group->ro)
2187                         goto out_free;
2188 
2189                 ret = create_free_space_inode(trans, block_group, path);
2190                 if (ret)
2191                         goto out_free;
2192                 goto again;
2193         }
2194 
2195         /*
2196          * We want to set the generation to 0, that way if anything goes wrong
2197          * from here on out we know not to trust this cache when we load up next
2198          * time.
2199          */
2200         BTRFS_I(inode)->generation = 0;
2201         ret = btrfs_update_inode(trans, root, inode);
2202         if (ret) {
2203                 /*
2204                  * So theoretically we could recover from this, simply set the
2205                  * super cache generation to 0 so we know to invalidate the
2206                  * cache, but then we'd have to keep track of the block groups
2207                  * that fail this way so we know we _have_ to reset this cache
2208                  * before the next commit or risk reading stale cache.  So to
2209                  * limit our exposure to horrible edge cases lets just abort the
2210                  * transaction, this only happens in really bad situations
2211                  * anyway.
2212                  */
2213                 btrfs_abort_transaction(trans, ret);
2214                 goto out_put;
2215         }
2216         WARN_ON(ret);
2217 
2218         /* We've already setup this transaction, go ahead and exit */
2219         if (block_group->cache_generation == trans->transid &&
2220             i_size_read(inode)) {
2221                 dcs = BTRFS_DC_SETUP;
2222                 goto out_put;
2223         }
2224 
2225         if (i_size_read(inode) > 0) {
2226                 ret = btrfs_check_trunc_cache_free_space(fs_info,
2227                                         &fs_info->global_block_rsv);
2228                 if (ret)
2229                         goto out_put;
2230 
2231                 ret = btrfs_truncate_free_space_cache(trans, NULL, inode);
2232                 if (ret)
2233                         goto out_put;
2234         }
2235 
2236         spin_lock(&block_group->lock);
2237         if (block_group->cached != BTRFS_CACHE_FINISHED ||
2238             !btrfs_test_opt(fs_info, SPACE_CACHE)) {
2239                 /*
2240                  * don't bother trying to write stuff out _if_
2241                  * a) we're not cached,
2242                  * b) we're with nospace_cache mount option,
2243                  * c) we're with v2 space_cache (FREE_SPACE_TREE).
2244                  */
2245                 dcs = BTRFS_DC_WRITTEN;
2246                 spin_unlock(&block_group->lock);
2247                 goto out_put;
2248         }
2249         spin_unlock(&block_group->lock);
2250 
2251         /*
2252          * We hit an ENOSPC when setting up the cache in this transaction, just
2253          * skip doing the setup, we've already cleared the cache so we're safe.
2254          */
2255         if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) {
2256                 ret = -ENOSPC;
2257                 goto out_put;
2258         }
2259 
2260         /*
2261          * Try to preallocate enough space based on how big the block group is.
2262          * Keep in mind this has to include any pinned space which could end up
2263          * taking up quite a bit since it's not folded into the other space
2264          * cache.
2265          */
2266         num_pages = div_u64(block_group->key.offset, SZ_256M);
2267         if (!num_pages)
2268                 num_pages = 1;
2269 
2270         num_pages *= 16;
2271         num_pages *= PAGE_SIZE;
2272 
2273         ret = btrfs_check_data_free_space(inode, &data_reserved, 0, num_pages);
2274         if (ret)
2275                 goto out_put;
2276 
2277         ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
2278                                               num_pages, num_pages,
2279                                               &alloc_hint);
2280         /*
2281          * Our cache requires contiguous chunks so that we don't modify a bunch
2282          * of metadata or split extents when writing the cache out, which means
2283          * we can enospc if we are heavily fragmented in addition to just normal
2284          * out of space conditions.  So if we hit this just skip setting up any
2285          * other block groups for this transaction, maybe we'll unpin enough
2286          * space the next time around.
2287          */
2288         if (!ret)
2289                 dcs = BTRFS_DC_SETUP;
2290         else if (ret == -ENOSPC)
2291                 set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags);
2292 
2293 out_put:
2294         iput(inode);
2295 out_free:
2296         btrfs_release_path(path);
2297 out:
2298         spin_lock(&block_group->lock);
2299         if (!ret && dcs == BTRFS_DC_SETUP)
2300                 block_group->cache_generation = trans->transid;
2301         block_group->disk_cache_state = dcs;
2302         spin_unlock(&block_group->lock);
2303 
2304         extent_changeset_free(data_reserved);
2305         return ret;
2306 }
2307 
2308 int btrfs_setup_space_cache(struct btrfs_trans_handle *trans)
2309 {
2310         struct btrfs_fs_info *fs_info = trans->fs_info;
2311         struct btrfs_block_group_cache *cache, *tmp;
2312         struct btrfs_transaction *cur_trans = trans->transaction;
2313         struct btrfs_path *path;
2314 
2315         if (list_empty(&cur_trans->dirty_bgs) ||
2316             !btrfs_test_opt(fs_info, SPACE_CACHE))
2317                 return 0;
2318 
2319         path = btrfs_alloc_path();
2320         if (!path)
2321                 return -ENOMEM;
2322 
2323         /* Could add new block groups, use _safe just in case */
2324         list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs,
2325                                  dirty_list) {
2326                 if (cache->disk_cache_state == BTRFS_DC_CLEAR)
2327                         cache_save_setup(cache, trans, path);
2328         }
2329 
2330         btrfs_free_path(path);
2331         return 0;
2332 }
2333 
2334 /*
2335  * Transaction commit does final block group cache writeback during a critical
2336  * section where nothing is allowed to change the FS.  This is required in
2337  * order for the cache to actually match the block group, but can introduce a
2338  * lot of latency into the commit.
2339  *
2340  * So, btrfs_start_dirty_block_groups is here to kick off block group cache IO.
2341  * There's a chance we'll have to redo some of it if the block group changes
2342  * again during the commit, but it greatly reduces the commit latency by
2343  * getting rid of the easy block groups while we're still allowing others to
2344  * join the commit.
2345  */
2346 int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans)
2347 {
2348         struct btrfs_fs_info *fs_info = trans->fs_info;
2349         struct btrfs_block_group_cache *cache;
2350         struct btrfs_transaction *cur_trans = trans->transaction;
2351         int ret = 0;
2352         int should_put;
2353         struct btrfs_path *path = NULL;
2354         LIST_HEAD(dirty);
2355         struct list_head *io = &cur_trans->io_bgs;
2356         int num_started = 0;
2357         int loops = 0;
2358 
2359         spin_lock(&cur_trans->dirty_bgs_lock);
2360         if (list_empty(&cur_trans->dirty_bgs)) {
2361                 spin_unlock(&cur_trans->dirty_bgs_lock);
2362                 return 0;
2363         }
2364         list_splice_init(&cur_trans->dirty_bgs, &dirty);
2365         spin_unlock(&cur_trans->dirty_bgs_lock);
2366 
2367 again:
2368         /* Make sure all the block groups on our dirty list actually exist */
2369         btrfs_create_pending_block_groups(trans);
2370 
2371         if (!path) {
2372                 path = btrfs_alloc_path();
2373                 if (!path)
2374                         return -ENOMEM;
2375         }
2376 
2377         /*
2378          * cache_write_mutex is here only to save us from balance or automatic
2379          * removal of empty block groups deleting this block group while we are
2380          * writing out the cache
2381          */
2382         mutex_lock(&trans->transaction->cache_write_mutex);
2383         while (!list_empty(&dirty)) {
2384                 bool drop_reserve = true;
2385 
2386                 cache = list_first_entry(&dirty,
2387                                          struct btrfs_block_group_cache,
2388                                          dirty_list);
2389                 /*
2390                  * This can happen if something re-dirties a block group that
2391                  * is already under IO.  Just wait for it to finish and then do
2392                  * it all again
2393                  */
2394                 if (!list_empty(&cache->io_list)) {
2395                         list_del_init(&cache->io_list);
2396                         btrfs_wait_cache_io(trans, cache, path);
2397                         btrfs_put_block_group(cache);
2398                 }
2399 
2400 
2401                 /*
2402                  * btrfs_wait_cache_io uses the cache->dirty_list to decide if
2403                  * it should update the cache_state.  Don't delete until after
2404                  * we wait.
2405                  *
2406                  * Since we're not running in the commit critical section
2407                  * we need the dirty_bgs_lock to protect from update_block_group
2408                  */
2409                 spin_lock(&cur_trans->dirty_bgs_lock);
2410                 list_del_init(&cache->dirty_list);
2411                 spin_unlock(&cur_trans->dirty_bgs_lock);
2412 
2413                 should_put = 1;
2414 
2415                 cache_save_setup(cache, trans, path);
2416 
2417                 if (cache->disk_cache_state == BTRFS_DC_SETUP) {
2418                         cache->io_ctl.inode = NULL;
2419                         ret = btrfs_write_out_cache(trans, cache, path);
2420                         if (ret == 0 && cache->io_ctl.inode) {
2421                                 num_started++;
2422                                 should_put = 0;
2423 
2424                                 /*
2425                                  * The cache_write_mutex is protecting the
2426                                  * io_list, also refer to the definition of
2427                                  * btrfs_transaction::io_bgs for more details
2428                                  */
2429                                 list_add_tail(&cache->io_list, io);
2430                         } else {
2431                                 /*
2432                                  * If we failed to write the cache, the
2433                                  * generation will be bad and life goes on
2434                                  */
2435                                 ret = 0;
2436                         }
2437                 }
2438                 if (!ret) {
2439                         ret = write_one_cache_group(trans, path, cache);
2440                         /*
2441                          * Our block group might still be attached to the list
2442                          * of new block groups in the transaction handle of some
2443                          * other task (struct btrfs_trans_handle->new_bgs). This
2444                          * means its block group item isn't yet in the extent
2445                          * tree. If this happens ignore the error, as we will
2446                          * try again later in the critical section of the
2447                          * transaction commit.
2448                          */
2449                         if (ret == -ENOENT) {
2450                                 ret = 0;
2451                                 spin_lock(&cur_trans->dirty_bgs_lock);
2452                                 if (list_empty(&cache->dirty_list)) {
2453                                         list_add_tail(&cache->dirty_list,
2454                                                       &cur_trans->dirty_bgs);
2455                                         btrfs_get_block_group(cache);
2456                                         drop_reserve = false;
2457                                 }
2458                                 spin_unlock(&cur_trans->dirty_bgs_lock);
2459                         } else if (ret) {
2460                                 btrfs_abort_transaction(trans, ret);
2461                         }
2462                 }
2463 
2464                 /* If it's not on the io list, we need to put the block group */
2465                 if (should_put)
2466                         btrfs_put_block_group(cache);
2467                 if (drop_reserve)
2468                         btrfs_delayed_refs_rsv_release(fs_info, 1);
2469 
2470                 if (ret)
2471                         break;
2472 
2473                 /*
2474                  * Avoid blocking other tasks for too long. It might even save
2475                  * us from writing caches for block groups that are going to be
2476                  * removed.
2477                  */
2478                 mutex_unlock(&trans->transaction->cache_write_mutex);
2479                 mutex_lock(&trans->transaction->cache_write_mutex);
2480         }
2481         mutex_unlock(&trans->transaction->cache_write_mutex);
2482 
2483         /*
2484          * Go through delayed refs for all the stuff we've just kicked off
2485          * and then loop back (just once)
2486          */
2487         ret = btrfs_run_delayed_refs(trans, 0);
2488         if (!ret && loops == 0) {
2489                 loops++;
2490                 spin_lock(&cur_trans->dirty_bgs_lock);
2491                 list_splice_init(&cur_trans->dirty_bgs, &dirty);
2492                 /*
2493                  * dirty_bgs_lock protects us from concurrent block group
2494                  * deletes too (not just cache_write_mutex).
2495                  */
2496                 if (!list_empty(&dirty)) {
2497                         spin_unlock(&cur_trans->dirty_bgs_lock);
2498                         goto again;
2499                 }
2500                 spin_unlock(&cur_trans->dirty_bgs_lock);
2501         } else if (ret < 0) {
2502                 btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
2503         }
2504 
2505         btrfs_free_path(path);
2506         return ret;
2507 }
2508 
2509 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans)
2510 {
2511         struct btrfs_fs_info *fs_info = trans->fs_info;
2512         struct btrfs_block_group_cache *cache;
2513         struct btrfs_transaction *cur_trans = trans->transaction;
2514         int ret = 0;
2515         int should_put;
2516         struct btrfs_path *path;
2517         struct list_head *io = &cur_trans->io_bgs;
2518         int num_started = 0;
2519 
2520         path = btrfs_alloc_path();
2521         if (!path)
2522                 return -ENOMEM;
2523 
2524         /*
2525          * Even though we are in the critical section of the transaction commit,
2526          * we can still have concurrent tasks adding elements to this
2527          * transaction's list of dirty block groups. These tasks correspond to
2528          * endio free space workers started when writeback finishes for a
2529          * space cache, which run inode.c:btrfs_finish_ordered_io(), and can
2530          * allocate new block groups as a result of COWing nodes of the root
2531          * tree when updating the free space inode. The writeback for the space
2532          * caches is triggered by an earlier call to
2533          * btrfs_start_dirty_block_groups() and iterations of the following
2534          * loop.
2535          * Also we want to do the cache_save_setup first and then run the
2536          * delayed refs to make sure we have the best chance at doing this all
2537          * in one shot.
2538          */
2539         spin_lock(&cur_trans->dirty_bgs_lock);
2540         while (!list_empty(&cur_trans->dirty_bgs)) {
2541                 cache = list_first_entry(&cur_trans->dirty_bgs,
2542                                          struct btrfs_block_group_cache,
2543                                          dirty_list);
2544 
2545                 /*
2546                  * This can happen if cache_save_setup re-dirties a block group
2547                  * that is already under IO.  Just wait for it to finish and
2548                  * then do it all again
2549                  */
2550                 if (!list_empty(&cache->io_list)) {
2551                         spin_unlock(&cur_trans->dirty_bgs_lock);
2552                         list_del_init(&cache->io_list);
2553                         btrfs_wait_cache_io(trans, cache, path);
2554                         btrfs_put_block_group(cache);
2555                         spin_lock(&cur_trans->dirty_bgs_lock);
2556                 }
2557 
2558                 /*
2559                  * Don't remove from the dirty list until after we've waited on
2560                  * any pending IO
2561                  */
2562                 list_del_init(&cache->dirty_list);
2563                 spin_unlock(&cur_trans->dirty_bgs_lock);
2564                 should_put = 1;
2565 
2566                 cache_save_setup(cache, trans, path);
2567 
2568                 if (!ret)
2569                         ret = btrfs_run_delayed_refs(trans,
2570                                                      (unsigned long) -1);
2571 
2572                 if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
2573                         cache->io_ctl.inode = NULL;
2574                         ret = btrfs_write_out_cache(trans, cache, path);
2575                         if (ret == 0 && cache->io_ctl.inode) {
2576                                 num_started++;
2577                                 should_put = 0;
2578                                 list_add_tail(&cache->io_list, io);
2579                         } else {
2580                                 /*
2581                                  * If we failed to write the cache, the
2582                                  * generation will be bad and life goes on
2583                                  */
2584                                 ret = 0;
2585                         }
2586                 }
2587                 if (!ret) {
2588                         ret = write_one_cache_group(trans, path, cache);
2589                         /*
2590                          * One of the free space endio workers might have
2591                          * created a new block group while updating a free space
2592                          * cache's inode (at inode.c:btrfs_finish_ordered_io())
2593                          * and hasn't released its transaction handle yet, in
2594                          * which case the new block group is still attached to
2595                          * its transaction handle and its creation has not
2596                          * finished yet (no block group item in the extent tree
2597                          * yet, etc). If this is the case, wait for all free
2598                          * space endio workers to finish and retry. This is a
2599                          * a very rare case so no need for a more efficient and
2600                          * complex approach.
2601                          */
2602                         if (ret == -ENOENT) {
2603                                 wait_event(cur_trans->writer_wait,
2604                                    atomic_read(&cur_trans->num_writers) == 1);
2605                                 ret = write_one_cache_group(trans, path, cache);
2606                         }
2607                         if (ret)
2608                                 btrfs_abort_transaction(trans, ret);
2609                 }
2610 
2611                 /* If its not on the io list, we need to put the block group */
2612                 if (should_put)
2613                         btrfs_put_block_group(cache);
2614                 btrfs_delayed_refs_rsv_release(fs_info, 1);
2615                 spin_lock(&cur_trans->dirty_bgs_lock);
2616         }
2617         spin_unlock(&cur_trans->dirty_bgs_lock);
2618 
2619         /*
2620          * Refer to the definition of io_bgs member for details why it's safe
2621          * to use it without any locking
2622          */
2623         while (!list_empty(io)) {
2624                 cache = list_first_entry(io, struct btrfs_block_group_cache,
2625                                          io_list);
2626                 list_del_init(&cache->io_list);
2627                 btrfs_wait_cache_io(trans, cache, path);
2628                 btrfs_put_block_group(cache);
2629         }
2630 
2631         btrfs_free_path(path);
2632         return ret;
2633 }
2634 
2635 int btrfs_update_block_group(struct btrfs_trans_handle *trans,
2636                              u64 bytenr, u64 num_bytes, int alloc)
2637 {
2638         struct btrfs_fs_info *info = trans->fs_info;
2639         struct btrfs_block_group_cache *cache = NULL;
2640         u64 total = num_bytes;
2641         u64 old_val;
2642         u64 byte_in_group;
2643         int factor;
2644         int ret = 0;
2645 
2646         /* Block accounting for super block */
2647         spin_lock(&info->delalloc_root_lock);
2648         old_val = btrfs_super_bytes_used(info->super_copy);
2649         if (alloc)
2650                 old_val += num_bytes;
2651         else
2652                 old_val -= num_bytes;
2653         btrfs_set_super_bytes_used(info->super_copy, old_val);
2654         spin_unlock(&info->delalloc_root_lock);
2655 
2656         while (total) {
2657                 cache = btrfs_lookup_block_group(info, bytenr);
2658                 if (!cache) {
2659                         ret = -ENOENT;
2660                         break;
2661                 }
2662                 factor = btrfs_bg_type_to_factor(cache->flags);
2663 
2664                 /*
2665                  * If this block group has free space cache written out, we
2666                  * need to make sure to load it if we are removing space.  This
2667                  * is because we need the unpinning stage to actually add the
2668                  * space back to the block group, otherwise we will leak space.
2669                  */
2670                 if (!alloc && !btrfs_block_group_cache_done(cache))
2671                         btrfs_cache_block_group(cache, 1);
2672 
2673                 byte_in_group = bytenr - cache->key.objectid;
2674                 WARN_ON(byte_in_group > cache->key.offset);
2675 
2676                 spin_lock(&cache->space_info->lock);
2677                 spin_lock(&cache->lock);
2678 
2679                 if (btrfs_test_opt(info, SPACE_CACHE) &&
2680                     cache->disk_cache_state < BTRFS_DC_CLEAR)
2681                         cache->disk_cache_state = BTRFS_DC_CLEAR;
2682 
2683                 old_val = btrfs_block_group_used(&cache->item);
2684                 num_bytes = min(total, cache->key.offset - byte_in_group);
2685                 if (alloc) {
2686                         old_val += num_bytes;
2687                         btrfs_set_block_group_used(&cache->item, old_val);
2688                         cache->reserved -= num_bytes;
2689                         cache->space_info->bytes_reserved -= num_bytes;
2690                         cache->space_info->bytes_used += num_bytes;
2691                         cache->space_info->disk_used += num_bytes * factor;
2692                         spin_unlock(&cache->lock);
2693                         spin_unlock(&cache->space_info->lock);
2694                 } else {
2695                         old_val -= num_bytes;
2696                         btrfs_set_block_group_used(&cache->item, old_val);
2697                         cache->pinned += num_bytes;
2698                         btrfs_space_info_update_bytes_pinned(info,
2699                                         cache->space_info, num_bytes);
2700                         cache->space_info->bytes_used -= num_bytes;
2701                         cache->space_info->disk_used -= num_bytes * factor;
2702                         spin_unlock(&cache->lock);
2703                         spin_unlock(&cache->space_info->lock);
2704 
2705                         percpu_counter_add_batch(
2706                                         &cache->space_info->total_bytes_pinned,
2707                                         num_bytes,
2708                                         BTRFS_TOTAL_BYTES_PINNED_BATCH);
2709                         set_extent_dirty(info->pinned_extents,
2710                                          bytenr, bytenr + num_bytes - 1,
2711                                          GFP_NOFS | __GFP_NOFAIL);
2712                 }
2713 
2714                 spin_lock(&trans->transaction->dirty_bgs_lock);
2715                 if (list_empty(&cache->dirty_list)) {
2716                         list_add_tail(&cache->dirty_list,
2717                                       &trans->transaction->dirty_bgs);
2718                         trans->delayed_ref_updates++;
2719                         btrfs_get_block_group(cache);
2720                 }
2721                 spin_unlock(&trans->transaction->dirty_bgs_lock);
2722 
2723                 /*
2724                  * No longer have used bytes in this block group, queue it for
2725                  * deletion. We do this after adding the block group to the
2726                  * dirty list to avoid races between cleaner kthread and space
2727                  * cache writeout.
2728                  */
2729                 if (!alloc && old_val == 0)
2730                         btrfs_mark_bg_unused(cache);
2731 
2732                 btrfs_put_block_group(cache);
2733                 total -= num_bytes;
2734                 bytenr += num_bytes;
2735         }
2736 
2737         /* Modified block groups are accounted for in the delayed_refs_rsv. */
2738         btrfs_update_delayed_refs_rsv(trans);
2739         return ret;
2740 }
2741 
2742 /**
2743  * btrfs_add_reserved_bytes - update the block_group and space info counters
2744  * @cache:      The cache we are manipulating
2745  * @ram_bytes:  The number of bytes of file content, and will be same to
2746  *              @num_bytes except for the compress path.
2747  * @num_bytes:  The number of bytes in question
2748  * @delalloc:   The blocks are allocated for the delalloc write
2749  *
2750  * This is called by the allocator when it reserves space. If this is a
2751  * reservation and the block group has become read only we cannot make the
2752  * reservation and return -EAGAIN, otherwise this function always succeeds.
2753  */
2754 int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache,
2755                              u64 ram_bytes, u64 num_bytes, int delalloc)
2756 {
2757         struct btrfs_space_info *space_info = cache->space_info;
2758         int ret = 0;
2759 
2760         spin_lock(&space_info->lock);
2761         spin_lock(&cache->lock);
2762         if (cache->ro) {
2763                 ret = -EAGAIN;
2764         } else {
2765                 cache->reserved += num_bytes;
2766                 space_info->bytes_reserved += num_bytes;
2767                 trace_btrfs_space_reservation(cache->fs_info, "space_info",
2768                                               space_info->flags, num_bytes, 1);
2769                 btrfs_space_info_update_bytes_may_use(cache->fs_info,
2770                                                       space_info, -ram_bytes);
2771                 if (delalloc)
2772                         cache->delalloc_bytes += num_bytes;
2773         }
2774         spin_unlock(&cache->lock);
2775         spin_unlock(&space_info->lock);
2776         return ret;
2777 }
2778 
2779 /**
2780  * btrfs_free_reserved_bytes - update the block_group and space info counters
2781  * @cache:      The cache we are manipulating
2782  * @num_bytes:  The number of bytes in question
2783  * @delalloc:   The blocks are allocated for the delalloc write
2784  *
2785  * This is called by somebody who is freeing space that was never actually used
2786  * on disk.  For example if you reserve some space for a new leaf in transaction
2787  * A and before transaction A commits you free that leaf, you call this with
2788  * reserve set to 0 in order to clear the reservation.
2789  */
2790 void btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache,
2791                                u64 num_bytes, int delalloc)
2792 {
2793         struct btrfs_space_info *space_info = cache->space_info;
2794 
2795         spin_lock(&space_info->lock);
2796         spin_lock(&cache->lock);
2797         if (cache->ro)
2798                 space_info->bytes_readonly += num_bytes;
2799         cache->reserved -= num_bytes;
2800         space_info->bytes_reserved -= num_bytes;
2801         space_info->max_extent_size = 0;
2802 
2803         if (delalloc)
2804                 cache->delalloc_bytes -= num_bytes;
2805         spin_unlock(&cache->lock);
2806         spin_unlock(&space_info->lock);
2807 }
2808 
2809 static void force_metadata_allocation(struct btrfs_fs_info *info)
2810 {
2811         struct list_head *head = &info->space_info;
2812         struct btrfs_space_info *found;
2813 
2814         rcu_read_lock();
2815         list_for_each_entry_rcu(found, head, list) {
2816                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
2817                         found->force_alloc = CHUNK_ALLOC_FORCE;
2818         }
2819         rcu_read_unlock();
2820 }
2821 
2822 static int should_alloc_chunk(struct btrfs_fs_info *fs_info,
2823                               struct btrfs_space_info *sinfo, int force)
2824 {
2825         u64 bytes_used = btrfs_space_info_used(sinfo, false);
2826         u64 thresh;
2827 
2828         if (force == CHUNK_ALLOC_FORCE)
2829                 return 1;
2830 
2831         /*
2832          * in limited mode, we want to have some free space up to
2833          * about 1% of the FS size.
2834          */
2835         if (force == CHUNK_ALLOC_LIMITED) {
2836                 thresh = btrfs_super_total_bytes(fs_info->super_copy);
2837                 thresh = max_t(u64, SZ_64M, div_factor_fine(thresh, 1));
2838 
2839                 if (sinfo->total_bytes - bytes_used < thresh)
2840                         return 1;
2841         }
2842 
2843         if (bytes_used + SZ_2M < div_factor(sinfo->total_bytes, 8))
2844                 return 0;
2845         return 1;
2846 }
2847 
2848 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type)
2849 {
2850         u64 alloc_flags = btrfs_get_alloc_profile(trans->fs_info, type);
2851 
2852         return btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
2853 }
2854 
2855 /*
2856  * If force is CHUNK_ALLOC_FORCE:
2857  *    - return 1 if it successfully allocates a chunk,
2858  *    - return errors including -ENOSPC otherwise.
2859  * If force is NOT CHUNK_ALLOC_FORCE:
2860  *    - return 0 if it doesn't need to allocate a new chunk,
2861  *    - return 1 if it successfully allocates a chunk,
2862  *    - return errors including -ENOSPC otherwise.
2863  */
2864 int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
2865                       enum btrfs_chunk_alloc_enum force)
2866 {
2867         struct btrfs_fs_info *fs_info = trans->fs_info;
2868         struct btrfs_space_info *space_info;
2869         bool wait_for_alloc = false;
2870         bool should_alloc = false;
2871         int ret = 0;
2872 
2873         /* Don't re-enter if we're already allocating a chunk */
2874         if (trans->allocating_chunk)
2875                 return -ENOSPC;
2876 
2877         space_info = btrfs_find_space_info(fs_info, flags);
2878         ASSERT(space_info);
2879 
2880         do {
2881                 spin_lock(&space_info->lock);
2882                 if (force < space_info->force_alloc)
2883                         force = space_info->force_alloc;
2884                 should_alloc = should_alloc_chunk(fs_info, space_info, force);
2885                 if (space_info->full) {
2886                         /* No more free physical space */
2887                         if (should_alloc)
2888                                 ret = -ENOSPC;
2889                         else
2890                                 ret = 0;
2891                         spin_unlock(&space_info->lock);
2892                         return ret;
2893                 } else if (!should_alloc) {
2894                         spin_unlock(&space_info->lock);
2895                         return 0;
2896                 } else if (space_info->chunk_alloc) {
2897                         /*
2898                          * Someone is already allocating, so we need to block
2899                          * until this someone is finished and then loop to
2900                          * recheck if we should continue with our allocation
2901                          * attempt.
2902                          */
2903                         wait_for_alloc = true;
2904                         spin_unlock(&space_info->lock);
2905                         mutex_lock(&fs_info->chunk_mutex);
2906                         mutex_unlock(&fs_info->chunk_mutex);
2907                 } else {
2908                         /* Proceed with allocation */
2909                         space_info->chunk_alloc = 1;
2910                         wait_for_alloc = false;
2911                         spin_unlock(&space_info->lock);
2912                 }
2913 
2914                 cond_resched();
2915         } while (wait_for_alloc);
2916 
2917         mutex_lock(&fs_info->chunk_mutex);
2918         trans->allocating_chunk = true;
2919 
2920         /*
2921          * If we have mixed data/metadata chunks we want to make sure we keep
2922          * allocating mixed chunks instead of individual chunks.
2923          */
2924         if (btrfs_mixed_space_info(space_info))
2925                 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
2926 
2927         /*
2928          * if we're doing a data chunk, go ahead and make sure that
2929          * we keep a reasonable number of metadata chunks allocated in the
2930          * FS as well.
2931          */
2932         if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
2933                 fs_info->data_chunk_allocations++;
2934                 if (!(fs_info->data_chunk_allocations %
2935                       fs_info->metadata_ratio))
2936                         force_metadata_allocation(fs_info);
2937         }
2938 
2939         /*
2940          * Check if we have enough space in SYSTEM chunk because we may need
2941          * to update devices.
2942          */
2943         check_system_chunk(trans, flags);
2944 
2945         ret = btrfs_alloc_chunk(trans, flags);
2946         trans->allocating_chunk = false;
2947 
2948         spin_lock(&space_info->lock);
2949         if (ret < 0) {
2950                 if (ret == -ENOSPC)
2951                         space_info->full = 1;
2952                 else
2953                         goto out;
2954         } else {
2955                 ret = 1;
2956                 space_info->max_extent_size = 0;
2957         }
2958 
2959         space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
2960 out:
2961         space_info->chunk_alloc = 0;
2962         spin_unlock(&space_info->lock);
2963         mutex_unlock(&fs_info->chunk_mutex);
2964         /*
2965          * When we allocate a new chunk we reserve space in the chunk block
2966          * reserve to make sure we can COW nodes/leafs in the chunk tree or
2967          * add new nodes/leafs to it if we end up needing to do it when
2968          * inserting the chunk item and updating device items as part of the
2969          * second phase of chunk allocation, performed by
2970          * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a
2971          * large number of new block groups to create in our transaction
2972          * handle's new_bgs list to avoid exhausting the chunk block reserve
2973          * in extreme cases - like having a single transaction create many new
2974          * block groups when starting to write out the free space caches of all
2975          * the block groups that were made dirty during the lifetime of the
2976          * transaction.
2977          */
2978         if (trans->chunk_bytes_reserved >= (u64)SZ_2M)
2979                 btrfs_create_pending_block_groups(trans);
2980 
2981         return ret;
2982 }
2983 
2984 static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type)
2985 {
2986         u64 num_dev;
2987 
2988         num_dev = btrfs_raid_array[btrfs_bg_flags_to_raid_index(type)].devs_max;
2989         if (!num_dev)
2990                 num_dev = fs_info->fs_devices->rw_devices;
2991 
2992         return num_dev;
2993 }
2994 
2995 /*
2996  * If @is_allocation is true, reserve space in the system space info necessary
2997  * for allocating a chunk, otherwise if it's false, reserve space necessary for
2998  * removing a chunk.
2999  */
3000 void check_system_chunk(struct btrfs_trans_handle *trans, u64 type)
3001 {
3002         struct btrfs_fs_info *fs_info = trans->fs_info;
3003         struct btrfs_space_info *info;
3004         u64 left;
3005         u64 thresh;
3006         int ret = 0;
3007         u64 num_devs;
3008 
3009         /*
3010          * Needed because we can end up allocating a system chunk and for an
3011          * atomic and race free space reservation in the chunk block reserve.
3012          */
3013         lockdep_assert_held(&fs_info->chunk_mutex);
3014 
3015         info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
3016         spin_lock(&info->lock);
3017         left = info->total_bytes - btrfs_space_info_used(info, true);
3018         spin_unlock(&info->lock);
3019 
3020         num_devs = get_profile_num_devs(fs_info, type);
3021 
3022         /* num_devs device items to update and 1 chunk item to add or remove */
3023         thresh = btrfs_calc_metadata_size(fs_info, num_devs) +
3024                 btrfs_calc_insert_metadata_size(fs_info, 1);
3025 
3026         if (left < thresh && btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
3027                 btrfs_info(fs_info, "left=%llu, need=%llu, flags=%llu",
3028                            left, thresh, type);
3029                 btrfs_dump_space_info(fs_info, info, 0, 0);
3030         }
3031 
3032         if (left < thresh) {
3033                 u64 flags = btrfs_system_alloc_profile(fs_info);
3034 
3035                 /*
3036                  * Ignore failure to create system chunk. We might end up not
3037                  * needing it, as we might not need to COW all nodes/leafs from
3038                  * the paths we visit in the chunk tree (they were already COWed
3039                  * or created in the current transaction for example).
3040                  */
3041                 ret = btrfs_alloc_chunk(trans, flags);
3042         }
3043 
3044         if (!ret) {
3045                 ret = btrfs_block_rsv_add(fs_info->chunk_root,
3046                                           &fs_info->chunk_block_rsv,
3047                                           thresh, BTRFS_RESERVE_NO_FLUSH);
3048                 if (!ret)
3049                         trans->chunk_bytes_reserved += thresh;
3050         }
3051 }
3052 
3053 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
3054 {
3055         struct btrfs_block_group_cache *block_group;
3056         u64 last = 0;
3057 
3058         while (1) {
3059                 struct inode *inode;
3060 
3061                 block_group = btrfs_lookup_first_block_group(info, last);
3062                 while (block_group) {
3063                         btrfs_wait_block_group_cache_done(block_group);
3064                         spin_lock(&block_group->lock);
3065                         if (block_group->iref)
3066                                 break;
3067                         spin_unlock(&block_group->lock);
3068                         block_group = btrfs_next_block_group(block_group);
3069                 }
3070                 if (!block_group) {
3071                         if (last == 0)
3072                                 break;
3073                         last = 0;
3074                         continue;
3075                 }
3076 
3077                 inode = block_group->inode;
3078                 block_group->iref = 0;
3079                 block_group->inode = NULL;
3080                 spin_unlock(&block_group->lock);
3081                 ASSERT(block_group->io_ctl.inode == NULL);
3082                 iput(inode);
3083                 last = block_group->key.objectid + block_group->key.offset;
3084                 btrfs_put_block_group(block_group);
3085         }
3086 }
3087 
3088 /*
3089  * Must be called only after stopping all workers, since we could have block
3090  * group caching kthreads running, and therefore they could race with us if we
3091  * freed the block groups before stopping them.
3092  */
3093 int btrfs_free_block_groups(struct btrfs_fs_info *info)
3094 {
3095         struct btrfs_block_group_cache *block_group;
3096         struct btrfs_space_info *space_info;
3097         struct btrfs_caching_control *caching_ctl;
3098         struct rb_node *n;
3099 
3100         down_write(&info->commit_root_sem);
3101         while (!list_empty(&info->caching_block_groups)) {
3102                 caching_ctl = list_entry(info->caching_block_groups.next,
3103                                          struct btrfs_caching_control, list);
3104                 list_del(&caching_ctl->list);
3105                 btrfs_put_caching_control(caching_ctl);
3106         }
3107         up_write(&info->commit_root_sem);
3108 
3109         spin_lock(&info->unused_bgs_lock);
3110         while (!list_empty(&info->unused_bgs)) {
3111                 block_group = list_first_entry(&info->unused_bgs,
3112                                                struct btrfs_block_group_cache,
3113                                                bg_list);
3114                 list_del_init(&block_group->bg_list);
3115                 btrfs_put_block_group(block_group);
3116         }
3117         spin_unlock(&info->unused_bgs_lock);
3118 
3119         spin_lock(&info->block_group_cache_lock);
3120         while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
3121                 block_group = rb_entry(n, struct btrfs_block_group_cache,
3122                                        cache_node);
3123                 rb_erase(&block_group->cache_node,
3124                          &info->block_group_cache_tree);
3125                 RB_CLEAR_NODE(&block_group->cache_node);
3126                 spin_unlock(&info->block_group_cache_lock);
3127 
3128                 down_write(&block_group->space_info->groups_sem);
3129                 list_del(&block_group->list);
3130                 up_write(&block_group->space_info->groups_sem);
3131 
3132                 /*
3133                  * We haven't cached this block group, which means we could
3134                  * possibly have excluded extents on this block group.
3135                  */
3136                 if (block_group->cached == BTRFS_CACHE_NO ||
3137                     block_group->cached == BTRFS_CACHE_ERROR)
3138                         btrfs_free_excluded_extents(block_group);
3139 
3140                 btrfs_remove_free_space_cache(block_group);
3141                 ASSERT(block_group->cached != BTRFS_CACHE_STARTED);
3142                 ASSERT(list_empty(&block_group->dirty_list));
3143                 ASSERT(list_empty(&block_group->io_list));
3144                 ASSERT(list_empty(&block_group->bg_list));
3145                 ASSERT(atomic_read(&block_group->count) == 1);
3146                 btrfs_put_block_group(block_group);
3147 
3148                 spin_lock(&info->block_group_cache_lock);
3149         }
3150         spin_unlock(&info->block_group_cache_lock);
3151 
3152         /*
3153          * Now that all the block groups are freed, go through and free all the
3154          * space_info structs.  This is only called during the final stages of
3155          * unmount, and so we know nobody is using them.  We call
3156          * synchronize_rcu() once before we start, just to be on the safe side.
3157          */
3158         synchronize_rcu();
3159 
3160         btrfs_release_global_block_rsv(info);
3161 
3162         while (!list_empty(&info->space_info)) {
3163                 space_info = list_entry(info->space_info.next,
3164                                         struct btrfs_space_info,
3165                                         list);
3166 
3167                 /*
3168                  * Do not hide this behind enospc_debug, this is actually
3169                  * important and indicates a real bug if this happens.
3170                  */
3171                 if (WARN_ON(space_info->bytes_pinned > 0 ||
3172                             space_info->bytes_reserved > 0 ||
3173                             space_info->bytes_may_use > 0))
3174                         btrfs_dump_space_info(info, space_info, 0, 0);
3175                 list_del(&space_info->list);
3176                 btrfs_sysfs_remove_space_info(space_info);
3177         }
3178         return 0;
3179 }

/* [<][>][^][v][top][bottom][index][help] */