1/* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19#include <linux/kernel.h> 20#include <linux/bio.h> 21#include <linux/buffer_head.h> 22#include <linux/file.h> 23#include <linux/fs.h> 24#include <linux/pagemap.h> 25#include <linux/highmem.h> 26#include <linux/time.h> 27#include <linux/init.h> 28#include <linux/string.h> 29#include <linux/backing-dev.h> 30#include <linux/mpage.h> 31#include <linux/swap.h> 32#include <linux/writeback.h> 33#include <linux/statfs.h> 34#include <linux/compat.h> 35#include <linux/bit_spinlock.h> 36#include <linux/xattr.h> 37#include <linux/posix_acl.h> 38#include <linux/falloc.h> 39#include <linux/slab.h> 40#include <linux/ratelimit.h> 41#include <linux/mount.h> 42#include <linux/btrfs.h> 43#include <linux/blkdev.h> 44#include <linux/posix_acl_xattr.h> 45#include <linux/uio.h> 46#include "ctree.h" 47#include "disk-io.h" 48#include "transaction.h" 49#include "btrfs_inode.h" 50#include "print-tree.h" 51#include "ordered-data.h" 52#include "xattr.h" 53#include "tree-log.h" 54#include "volumes.h" 55#include "compression.h" 56#include "locking.h" 57#include "free-space-cache.h" 58#include "inode-map.h" 59#include "backref.h" 60#include "hash.h" 61#include "props.h" 62#include "qgroup.h" 63 64struct btrfs_iget_args { 65 struct btrfs_key *location; 66 struct btrfs_root *root; 67}; 68 69static const struct inode_operations btrfs_dir_inode_operations; 70static const struct inode_operations btrfs_symlink_inode_operations; 71static const struct inode_operations btrfs_dir_ro_inode_operations; 72static const struct inode_operations btrfs_special_inode_operations; 73static const struct inode_operations btrfs_file_inode_operations; 74static const struct address_space_operations btrfs_aops; 75static const struct address_space_operations btrfs_symlink_aops; 76static const struct file_operations btrfs_dir_file_operations; 77static struct extent_io_ops btrfs_extent_io_ops; 78 79static struct kmem_cache *btrfs_inode_cachep; 80static struct kmem_cache *btrfs_delalloc_work_cachep; 81struct kmem_cache *btrfs_trans_handle_cachep; 82struct kmem_cache *btrfs_transaction_cachep; 83struct kmem_cache *btrfs_path_cachep; 84struct kmem_cache *btrfs_free_space_cachep; 85 86#define S_SHIFT 12 87static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = { 88 [S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE, 89 [S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR, 90 [S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV, 91 [S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV, 92 [S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO, 93 [S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK, 94 [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK, 95}; 96 97static int btrfs_setsize(struct inode *inode, struct iattr *attr); 98static int btrfs_truncate(struct inode *inode); 99static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent); 100static noinline int cow_file_range(struct inode *inode, 101 struct page *locked_page, 102 u64 start, u64 end, int *page_started, 103 unsigned long *nr_written, int unlock); 104static struct extent_map *create_pinned_em(struct inode *inode, u64 start, 105 u64 len, u64 orig_start, 106 u64 block_start, u64 block_len, 107 u64 orig_block_len, u64 ram_bytes, 108 int type); 109 110static int btrfs_dirty_inode(struct inode *inode); 111 112#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 113void btrfs_test_inode_set_ops(struct inode *inode) 114{ 115 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; 116} 117#endif 118 119static int btrfs_init_inode_security(struct btrfs_trans_handle *trans, 120 struct inode *inode, struct inode *dir, 121 const struct qstr *qstr) 122{ 123 int err; 124 125 err = btrfs_init_acl(trans, inode, dir); 126 if (!err) 127 err = btrfs_xattr_security_init(trans, inode, dir, qstr); 128 return err; 129} 130 131/* 132 * this does all the hard work for inserting an inline extent into 133 * the btree. The caller should have done a btrfs_drop_extents so that 134 * no overlapping inline items exist in the btree 135 */ 136static int insert_inline_extent(struct btrfs_trans_handle *trans, 137 struct btrfs_path *path, int extent_inserted, 138 struct btrfs_root *root, struct inode *inode, 139 u64 start, size_t size, size_t compressed_size, 140 int compress_type, 141 struct page **compressed_pages) 142{ 143 struct extent_buffer *leaf; 144 struct page *page = NULL; 145 char *kaddr; 146 unsigned long ptr; 147 struct btrfs_file_extent_item *ei; 148 int err = 0; 149 int ret; 150 size_t cur_size = size; 151 unsigned long offset; 152 153 if (compressed_size && compressed_pages) 154 cur_size = compressed_size; 155 156 inode_add_bytes(inode, size); 157 158 if (!extent_inserted) { 159 struct btrfs_key key; 160 size_t datasize; 161 162 key.objectid = btrfs_ino(inode); 163 key.offset = start; 164 key.type = BTRFS_EXTENT_DATA_KEY; 165 166 datasize = btrfs_file_extent_calc_inline_size(cur_size); 167 path->leave_spinning = 1; 168 ret = btrfs_insert_empty_item(trans, root, path, &key, 169 datasize); 170 if (ret) { 171 err = ret; 172 goto fail; 173 } 174 } 175 leaf = path->nodes[0]; 176 ei = btrfs_item_ptr(leaf, path->slots[0], 177 struct btrfs_file_extent_item); 178 btrfs_set_file_extent_generation(leaf, ei, trans->transid); 179 btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE); 180 btrfs_set_file_extent_encryption(leaf, ei, 0); 181 btrfs_set_file_extent_other_encoding(leaf, ei, 0); 182 btrfs_set_file_extent_ram_bytes(leaf, ei, size); 183 ptr = btrfs_file_extent_inline_start(ei); 184 185 if (compress_type != BTRFS_COMPRESS_NONE) { 186 struct page *cpage; 187 int i = 0; 188 while (compressed_size > 0) { 189 cpage = compressed_pages[i]; 190 cur_size = min_t(unsigned long, compressed_size, 191 PAGE_CACHE_SIZE); 192 193 kaddr = kmap_atomic(cpage); 194 write_extent_buffer(leaf, kaddr, ptr, cur_size); 195 kunmap_atomic(kaddr); 196 197 i++; 198 ptr += cur_size; 199 compressed_size -= cur_size; 200 } 201 btrfs_set_file_extent_compression(leaf, ei, 202 compress_type); 203 } else { 204 page = find_get_page(inode->i_mapping, 205 start >> PAGE_CACHE_SHIFT); 206 btrfs_set_file_extent_compression(leaf, ei, 0); 207 kaddr = kmap_atomic(page); 208 offset = start & (PAGE_CACHE_SIZE - 1); 209 write_extent_buffer(leaf, kaddr + offset, ptr, size); 210 kunmap_atomic(kaddr); 211 page_cache_release(page); 212 } 213 btrfs_mark_buffer_dirty(leaf); 214 btrfs_release_path(path); 215 216 /* 217 * we're an inline extent, so nobody can 218 * extend the file past i_size without locking 219 * a page we already have locked. 220 * 221 * We must do any isize and inode updates 222 * before we unlock the pages. Otherwise we 223 * could end up racing with unlink. 224 */ 225 BTRFS_I(inode)->disk_i_size = inode->i_size; 226 ret = btrfs_update_inode(trans, root, inode); 227 228 return ret; 229fail: 230 return err; 231} 232 233 234/* 235 * conditionally insert an inline extent into the file. This 236 * does the checks required to make sure the data is small enough 237 * to fit as an inline extent. 238 */ 239static noinline int cow_file_range_inline(struct btrfs_root *root, 240 struct inode *inode, u64 start, 241 u64 end, size_t compressed_size, 242 int compress_type, 243 struct page **compressed_pages) 244{ 245 struct btrfs_trans_handle *trans; 246 u64 isize = i_size_read(inode); 247 u64 actual_end = min(end + 1, isize); 248 u64 inline_len = actual_end - start; 249 u64 aligned_end = ALIGN(end, root->sectorsize); 250 u64 data_len = inline_len; 251 int ret; 252 struct btrfs_path *path; 253 int extent_inserted = 0; 254 u32 extent_item_size; 255 256 if (compressed_size) 257 data_len = compressed_size; 258 259 if (start > 0 || 260 actual_end > PAGE_CACHE_SIZE || 261 data_len > BTRFS_MAX_INLINE_DATA_SIZE(root) || 262 (!compressed_size && 263 (actual_end & (root->sectorsize - 1)) == 0) || 264 end + 1 < isize || 265 data_len > root->fs_info->max_inline) { 266 return 1; 267 } 268 269 path = btrfs_alloc_path(); 270 if (!path) 271 return -ENOMEM; 272 273 trans = btrfs_join_transaction(root); 274 if (IS_ERR(trans)) { 275 btrfs_free_path(path); 276 return PTR_ERR(trans); 277 } 278 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 279 280 if (compressed_size && compressed_pages) 281 extent_item_size = btrfs_file_extent_calc_inline_size( 282 compressed_size); 283 else 284 extent_item_size = btrfs_file_extent_calc_inline_size( 285 inline_len); 286 287 ret = __btrfs_drop_extents(trans, root, inode, path, 288 start, aligned_end, NULL, 289 1, 1, extent_item_size, &extent_inserted); 290 if (ret) { 291 btrfs_abort_transaction(trans, root, ret); 292 goto out; 293 } 294 295 if (isize > actual_end) 296 inline_len = min_t(u64, isize, actual_end); 297 ret = insert_inline_extent(trans, path, extent_inserted, 298 root, inode, start, 299 inline_len, compressed_size, 300 compress_type, compressed_pages); 301 if (ret && ret != -ENOSPC) { 302 btrfs_abort_transaction(trans, root, ret); 303 goto out; 304 } else if (ret == -ENOSPC) { 305 ret = 1; 306 goto out; 307 } 308 309 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags); 310 btrfs_delalloc_release_metadata(inode, end + 1 - start); 311 btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0); 312out: 313 btrfs_free_path(path); 314 btrfs_end_transaction(trans, root); 315 return ret; 316} 317 318struct async_extent { 319 u64 start; 320 u64 ram_size; 321 u64 compressed_size; 322 struct page **pages; 323 unsigned long nr_pages; 324 int compress_type; 325 struct list_head list; 326}; 327 328struct async_cow { 329 struct inode *inode; 330 struct btrfs_root *root; 331 struct page *locked_page; 332 u64 start; 333 u64 end; 334 struct list_head extents; 335 struct btrfs_work work; 336}; 337 338static noinline int add_async_extent(struct async_cow *cow, 339 u64 start, u64 ram_size, 340 u64 compressed_size, 341 struct page **pages, 342 unsigned long nr_pages, 343 int compress_type) 344{ 345 struct async_extent *async_extent; 346 347 async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS); 348 BUG_ON(!async_extent); /* -ENOMEM */ 349 async_extent->start = start; 350 async_extent->ram_size = ram_size; 351 async_extent->compressed_size = compressed_size; 352 async_extent->pages = pages; 353 async_extent->nr_pages = nr_pages; 354 async_extent->compress_type = compress_type; 355 list_add_tail(&async_extent->list, &cow->extents); 356 return 0; 357} 358 359static inline int inode_need_compress(struct inode *inode) 360{ 361 struct btrfs_root *root = BTRFS_I(inode)->root; 362 363 /* force compress */ 364 if (btrfs_test_opt(root, FORCE_COMPRESS)) 365 return 1; 366 /* bad compression ratios */ 367 if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) 368 return 0; 369 if (btrfs_test_opt(root, COMPRESS) || 370 BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS || 371 BTRFS_I(inode)->force_compress) 372 return 1; 373 return 0; 374} 375 376/* 377 * we create compressed extents in two phases. The first 378 * phase compresses a range of pages that have already been 379 * locked (both pages and state bits are locked). 380 * 381 * This is done inside an ordered work queue, and the compression 382 * is spread across many cpus. The actual IO submission is step 383 * two, and the ordered work queue takes care of making sure that 384 * happens in the same order things were put onto the queue by 385 * writepages and friends. 386 * 387 * If this code finds it can't get good compression, it puts an 388 * entry onto the work queue to write the uncompressed bytes. This 389 * makes sure that both compressed inodes and uncompressed inodes 390 * are written in the same order that the flusher thread sent them 391 * down. 392 */ 393static noinline void compress_file_range(struct inode *inode, 394 struct page *locked_page, 395 u64 start, u64 end, 396 struct async_cow *async_cow, 397 int *num_added) 398{ 399 struct btrfs_root *root = BTRFS_I(inode)->root; 400 u64 num_bytes; 401 u64 blocksize = root->sectorsize; 402 u64 actual_end; 403 u64 isize = i_size_read(inode); 404 int ret = 0; 405 struct page **pages = NULL; 406 unsigned long nr_pages; 407 unsigned long nr_pages_ret = 0; 408 unsigned long total_compressed = 0; 409 unsigned long total_in = 0; 410 unsigned long max_compressed = 128 * 1024; 411 unsigned long max_uncompressed = 128 * 1024; 412 int i; 413 int will_compress; 414 int compress_type = root->fs_info->compress_type; 415 int redirty = 0; 416 417 /* if this is a small write inside eof, kick off a defrag */ 418 if ((end - start + 1) < 16 * 1024 && 419 (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size)) 420 btrfs_add_inode_defrag(NULL, inode); 421 422 actual_end = min_t(u64, isize, end + 1); 423again: 424 will_compress = 0; 425 nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1; 426 nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE); 427 428 /* 429 * we don't want to send crud past the end of i_size through 430 * compression, that's just a waste of CPU time. So, if the 431 * end of the file is before the start of our current 432 * requested range of bytes, we bail out to the uncompressed 433 * cleanup code that can deal with all of this. 434 * 435 * It isn't really the fastest way to fix things, but this is a 436 * very uncommon corner. 437 */ 438 if (actual_end <= start) 439 goto cleanup_and_bail_uncompressed; 440 441 total_compressed = actual_end - start; 442 443 /* 444 * skip compression for a small file range(<=blocksize) that 445 * isn't an inline extent, since it dosen't save disk space at all. 446 */ 447 if (total_compressed <= blocksize && 448 (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size)) 449 goto cleanup_and_bail_uncompressed; 450 451 /* we want to make sure that amount of ram required to uncompress 452 * an extent is reasonable, so we limit the total size in ram 453 * of a compressed extent to 128k. This is a crucial number 454 * because it also controls how easily we can spread reads across 455 * cpus for decompression. 456 * 457 * We also want to make sure the amount of IO required to do 458 * a random read is reasonably small, so we limit the size of 459 * a compressed extent to 128k. 460 */ 461 total_compressed = min(total_compressed, max_uncompressed); 462 num_bytes = ALIGN(end - start + 1, blocksize); 463 num_bytes = max(blocksize, num_bytes); 464 total_in = 0; 465 ret = 0; 466 467 /* 468 * we do compression for mount -o compress and when the 469 * inode has not been flagged as nocompress. This flag can 470 * change at any time if we discover bad compression ratios. 471 */ 472 if (inode_need_compress(inode)) { 473 WARN_ON(pages); 474 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS); 475 if (!pages) { 476 /* just bail out to the uncompressed code */ 477 goto cont; 478 } 479 480 if (BTRFS_I(inode)->force_compress) 481 compress_type = BTRFS_I(inode)->force_compress; 482 483 /* 484 * we need to call clear_page_dirty_for_io on each 485 * page in the range. Otherwise applications with the file 486 * mmap'd can wander in and change the page contents while 487 * we are compressing them. 488 * 489 * If the compression fails for any reason, we set the pages 490 * dirty again later on. 491 */ 492 extent_range_clear_dirty_for_io(inode, start, end); 493 redirty = 1; 494 ret = btrfs_compress_pages(compress_type, 495 inode->i_mapping, start, 496 total_compressed, pages, 497 nr_pages, &nr_pages_ret, 498 &total_in, 499 &total_compressed, 500 max_compressed); 501 502 if (!ret) { 503 unsigned long offset = total_compressed & 504 (PAGE_CACHE_SIZE - 1); 505 struct page *page = pages[nr_pages_ret - 1]; 506 char *kaddr; 507 508 /* zero the tail end of the last page, we might be 509 * sending it down to disk 510 */ 511 if (offset) { 512 kaddr = kmap_atomic(page); 513 memset(kaddr + offset, 0, 514 PAGE_CACHE_SIZE - offset); 515 kunmap_atomic(kaddr); 516 } 517 will_compress = 1; 518 } 519 } 520cont: 521 if (start == 0) { 522 /* lets try to make an inline extent */ 523 if (ret || total_in < (actual_end - start)) { 524 /* we didn't compress the entire range, try 525 * to make an uncompressed inline extent. 526 */ 527 ret = cow_file_range_inline(root, inode, start, end, 528 0, 0, NULL); 529 } else { 530 /* try making a compressed inline extent */ 531 ret = cow_file_range_inline(root, inode, start, end, 532 total_compressed, 533 compress_type, pages); 534 } 535 if (ret <= 0) { 536 unsigned long clear_flags = EXTENT_DELALLOC | 537 EXTENT_DEFRAG; 538 unsigned long page_error_op; 539 540 clear_flags |= (ret < 0) ? EXTENT_DO_ACCOUNTING : 0; 541 page_error_op = ret < 0 ? PAGE_SET_ERROR : 0; 542 543 /* 544 * inline extent creation worked or returned error, 545 * we don't need to create any more async work items. 546 * Unlock and free up our temp pages. 547 */ 548 extent_clear_unlock_delalloc(inode, start, end, NULL, 549 clear_flags, PAGE_UNLOCK | 550 PAGE_CLEAR_DIRTY | 551 PAGE_SET_WRITEBACK | 552 page_error_op | 553 PAGE_END_WRITEBACK); 554 goto free_pages_out; 555 } 556 } 557 558 if (will_compress) { 559 /* 560 * we aren't doing an inline extent round the compressed size 561 * up to a block size boundary so the allocator does sane 562 * things 563 */ 564 total_compressed = ALIGN(total_compressed, blocksize); 565 566 /* 567 * one last check to make sure the compression is really a 568 * win, compare the page count read with the blocks on disk 569 */ 570 total_in = ALIGN(total_in, PAGE_CACHE_SIZE); 571 if (total_compressed >= total_in) { 572 will_compress = 0; 573 } else { 574 num_bytes = total_in; 575 } 576 } 577 if (!will_compress && pages) { 578 /* 579 * the compression code ran but failed to make things smaller, 580 * free any pages it allocated and our page pointer array 581 */ 582 for (i = 0; i < nr_pages_ret; i++) { 583 WARN_ON(pages[i]->mapping); 584 page_cache_release(pages[i]); 585 } 586 kfree(pages); 587 pages = NULL; 588 total_compressed = 0; 589 nr_pages_ret = 0; 590 591 /* flag the file so we don't compress in the future */ 592 if (!btrfs_test_opt(root, FORCE_COMPRESS) && 593 !(BTRFS_I(inode)->force_compress)) { 594 BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS; 595 } 596 } 597 if (will_compress) { 598 *num_added += 1; 599 600 /* the async work queues will take care of doing actual 601 * allocation on disk for these compressed pages, 602 * and will submit them to the elevator. 603 */ 604 add_async_extent(async_cow, start, num_bytes, 605 total_compressed, pages, nr_pages_ret, 606 compress_type); 607 608 if (start + num_bytes < end) { 609 start += num_bytes; 610 pages = NULL; 611 cond_resched(); 612 goto again; 613 } 614 } else { 615cleanup_and_bail_uncompressed: 616 /* 617 * No compression, but we still need to write the pages in 618 * the file we've been given so far. redirty the locked 619 * page if it corresponds to our extent and set things up 620 * for the async work queue to run cow_file_range to do 621 * the normal delalloc dance 622 */ 623 if (page_offset(locked_page) >= start && 624 page_offset(locked_page) <= end) { 625 __set_page_dirty_nobuffers(locked_page); 626 /* unlocked later on in the async handlers */ 627 } 628 if (redirty) 629 extent_range_redirty_for_io(inode, start, end); 630 add_async_extent(async_cow, start, end - start + 1, 631 0, NULL, 0, BTRFS_COMPRESS_NONE); 632 *num_added += 1; 633 } 634 635 return; 636 637free_pages_out: 638 for (i = 0; i < nr_pages_ret; i++) { 639 WARN_ON(pages[i]->mapping); 640 page_cache_release(pages[i]); 641 } 642 kfree(pages); 643} 644 645static void free_async_extent_pages(struct async_extent *async_extent) 646{ 647 int i; 648 649 if (!async_extent->pages) 650 return; 651 652 for (i = 0; i < async_extent->nr_pages; i++) { 653 WARN_ON(async_extent->pages[i]->mapping); 654 page_cache_release(async_extent->pages[i]); 655 } 656 kfree(async_extent->pages); 657 async_extent->nr_pages = 0; 658 async_extent->pages = NULL; 659} 660 661/* 662 * phase two of compressed writeback. This is the ordered portion 663 * of the code, which only gets called in the order the work was 664 * queued. We walk all the async extents created by compress_file_range 665 * and send them down to the disk. 666 */ 667static noinline void submit_compressed_extents(struct inode *inode, 668 struct async_cow *async_cow) 669{ 670 struct async_extent *async_extent; 671 u64 alloc_hint = 0; 672 struct btrfs_key ins; 673 struct extent_map *em; 674 struct btrfs_root *root = BTRFS_I(inode)->root; 675 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 676 struct extent_io_tree *io_tree; 677 int ret = 0; 678 679again: 680 while (!list_empty(&async_cow->extents)) { 681 async_extent = list_entry(async_cow->extents.next, 682 struct async_extent, list); 683 list_del(&async_extent->list); 684 685 io_tree = &BTRFS_I(inode)->io_tree; 686 687retry: 688 /* did the compression code fall back to uncompressed IO? */ 689 if (!async_extent->pages) { 690 int page_started = 0; 691 unsigned long nr_written = 0; 692 693 lock_extent(io_tree, async_extent->start, 694 async_extent->start + 695 async_extent->ram_size - 1); 696 697 /* allocate blocks */ 698 ret = cow_file_range(inode, async_cow->locked_page, 699 async_extent->start, 700 async_extent->start + 701 async_extent->ram_size - 1, 702 &page_started, &nr_written, 0); 703 704 /* JDM XXX */ 705 706 /* 707 * if page_started, cow_file_range inserted an 708 * inline extent and took care of all the unlocking 709 * and IO for us. Otherwise, we need to submit 710 * all those pages down to the drive. 711 */ 712 if (!page_started && !ret) 713 extent_write_locked_range(io_tree, 714 inode, async_extent->start, 715 async_extent->start + 716 async_extent->ram_size - 1, 717 btrfs_get_extent, 718 WB_SYNC_ALL); 719 else if (ret) 720 unlock_page(async_cow->locked_page); 721 kfree(async_extent); 722 cond_resched(); 723 continue; 724 } 725 726 lock_extent(io_tree, async_extent->start, 727 async_extent->start + async_extent->ram_size - 1); 728 729 ret = btrfs_reserve_extent(root, 730 async_extent->compressed_size, 731 async_extent->compressed_size, 732 0, alloc_hint, &ins, 1, 1); 733 if (ret) { 734 free_async_extent_pages(async_extent); 735 736 if (ret == -ENOSPC) { 737 unlock_extent(io_tree, async_extent->start, 738 async_extent->start + 739 async_extent->ram_size - 1); 740 741 /* 742 * we need to redirty the pages if we decide to 743 * fallback to uncompressed IO, otherwise we 744 * will not submit these pages down to lower 745 * layers. 746 */ 747 extent_range_redirty_for_io(inode, 748 async_extent->start, 749 async_extent->start + 750 async_extent->ram_size - 1); 751 752 goto retry; 753 } 754 goto out_free; 755 } 756 /* 757 * here we're doing allocation and writeback of the 758 * compressed pages 759 */ 760 btrfs_drop_extent_cache(inode, async_extent->start, 761 async_extent->start + 762 async_extent->ram_size - 1, 0); 763 764 em = alloc_extent_map(); 765 if (!em) { 766 ret = -ENOMEM; 767 goto out_free_reserve; 768 } 769 em->start = async_extent->start; 770 em->len = async_extent->ram_size; 771 em->orig_start = em->start; 772 em->mod_start = em->start; 773 em->mod_len = em->len; 774 775 em->block_start = ins.objectid; 776 em->block_len = ins.offset; 777 em->orig_block_len = ins.offset; 778 em->ram_bytes = async_extent->ram_size; 779 em->bdev = root->fs_info->fs_devices->latest_bdev; 780 em->compress_type = async_extent->compress_type; 781 set_bit(EXTENT_FLAG_PINNED, &em->flags); 782 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 783 em->generation = -1; 784 785 while (1) { 786 write_lock(&em_tree->lock); 787 ret = add_extent_mapping(em_tree, em, 1); 788 write_unlock(&em_tree->lock); 789 if (ret != -EEXIST) { 790 free_extent_map(em); 791 break; 792 } 793 btrfs_drop_extent_cache(inode, async_extent->start, 794 async_extent->start + 795 async_extent->ram_size - 1, 0); 796 } 797 798 if (ret) 799 goto out_free_reserve; 800 801 ret = btrfs_add_ordered_extent_compress(inode, 802 async_extent->start, 803 ins.objectid, 804 async_extent->ram_size, 805 ins.offset, 806 BTRFS_ORDERED_COMPRESSED, 807 async_extent->compress_type); 808 if (ret) { 809 btrfs_drop_extent_cache(inode, async_extent->start, 810 async_extent->start + 811 async_extent->ram_size - 1, 0); 812 goto out_free_reserve; 813 } 814 815 /* 816 * clear dirty, set writeback and unlock the pages. 817 */ 818 extent_clear_unlock_delalloc(inode, async_extent->start, 819 async_extent->start + 820 async_extent->ram_size - 1, 821 NULL, EXTENT_LOCKED | EXTENT_DELALLOC, 822 PAGE_UNLOCK | PAGE_CLEAR_DIRTY | 823 PAGE_SET_WRITEBACK); 824 ret = btrfs_submit_compressed_write(inode, 825 async_extent->start, 826 async_extent->ram_size, 827 ins.objectid, 828 ins.offset, async_extent->pages, 829 async_extent->nr_pages); 830 if (ret) { 831 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; 832 struct page *p = async_extent->pages[0]; 833 const u64 start = async_extent->start; 834 const u64 end = start + async_extent->ram_size - 1; 835 836 p->mapping = inode->i_mapping; 837 tree->ops->writepage_end_io_hook(p, start, end, 838 NULL, 0); 839 p->mapping = NULL; 840 extent_clear_unlock_delalloc(inode, start, end, NULL, 0, 841 PAGE_END_WRITEBACK | 842 PAGE_SET_ERROR); 843 free_async_extent_pages(async_extent); 844 } 845 alloc_hint = ins.objectid + ins.offset; 846 kfree(async_extent); 847 cond_resched(); 848 } 849 return; 850out_free_reserve: 851 btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1); 852out_free: 853 extent_clear_unlock_delalloc(inode, async_extent->start, 854 async_extent->start + 855 async_extent->ram_size - 1, 856 NULL, EXTENT_LOCKED | EXTENT_DELALLOC | 857 EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING, 858 PAGE_UNLOCK | PAGE_CLEAR_DIRTY | 859 PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK | 860 PAGE_SET_ERROR); 861 free_async_extent_pages(async_extent); 862 kfree(async_extent); 863 goto again; 864} 865 866static u64 get_extent_allocation_hint(struct inode *inode, u64 start, 867 u64 num_bytes) 868{ 869 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 870 struct extent_map *em; 871 u64 alloc_hint = 0; 872 873 read_lock(&em_tree->lock); 874 em = search_extent_mapping(em_tree, start, num_bytes); 875 if (em) { 876 /* 877 * if block start isn't an actual block number then find the 878 * first block in this inode and use that as a hint. If that 879 * block is also bogus then just don't worry about it. 880 */ 881 if (em->block_start >= EXTENT_MAP_LAST_BYTE) { 882 free_extent_map(em); 883 em = search_extent_mapping(em_tree, 0, 0); 884 if (em && em->block_start < EXTENT_MAP_LAST_BYTE) 885 alloc_hint = em->block_start; 886 if (em) 887 free_extent_map(em); 888 } else { 889 alloc_hint = em->block_start; 890 free_extent_map(em); 891 } 892 } 893 read_unlock(&em_tree->lock); 894 895 return alloc_hint; 896} 897 898/* 899 * when extent_io.c finds a delayed allocation range in the file, 900 * the call backs end up in this code. The basic idea is to 901 * allocate extents on disk for the range, and create ordered data structs 902 * in ram to track those extents. 903 * 904 * locked_page is the page that writepage had locked already. We use 905 * it to make sure we don't do extra locks or unlocks. 906 * 907 * *page_started is set to one if we unlock locked_page and do everything 908 * required to start IO on it. It may be clean and already done with 909 * IO when we return. 910 */ 911static noinline int cow_file_range(struct inode *inode, 912 struct page *locked_page, 913 u64 start, u64 end, int *page_started, 914 unsigned long *nr_written, 915 int unlock) 916{ 917 struct btrfs_root *root = BTRFS_I(inode)->root; 918 u64 alloc_hint = 0; 919 u64 num_bytes; 920 unsigned long ram_size; 921 u64 disk_num_bytes; 922 u64 cur_alloc_size; 923 u64 blocksize = root->sectorsize; 924 struct btrfs_key ins; 925 struct extent_map *em; 926 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 927 int ret = 0; 928 929 if (btrfs_is_free_space_inode(inode)) { 930 WARN_ON_ONCE(1); 931 ret = -EINVAL; 932 goto out_unlock; 933 } 934 935 num_bytes = ALIGN(end - start + 1, blocksize); 936 num_bytes = max(blocksize, num_bytes); 937 disk_num_bytes = num_bytes; 938 939 /* if this is a small write inside eof, kick off defrag */ 940 if (num_bytes < 64 * 1024 && 941 (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size)) 942 btrfs_add_inode_defrag(NULL, inode); 943 944 if (start == 0) { 945 /* lets try to make an inline extent */ 946 ret = cow_file_range_inline(root, inode, start, end, 0, 0, 947 NULL); 948 if (ret == 0) { 949 extent_clear_unlock_delalloc(inode, start, end, NULL, 950 EXTENT_LOCKED | EXTENT_DELALLOC | 951 EXTENT_DEFRAG, PAGE_UNLOCK | 952 PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK | 953 PAGE_END_WRITEBACK); 954 955 *nr_written = *nr_written + 956 (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE; 957 *page_started = 1; 958 goto out; 959 } else if (ret < 0) { 960 goto out_unlock; 961 } 962 } 963 964 BUG_ON(disk_num_bytes > 965 btrfs_super_total_bytes(root->fs_info->super_copy)); 966 967 alloc_hint = get_extent_allocation_hint(inode, start, num_bytes); 968 btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0); 969 970 while (disk_num_bytes > 0) { 971 unsigned long op; 972 973 cur_alloc_size = disk_num_bytes; 974 ret = btrfs_reserve_extent(root, cur_alloc_size, 975 root->sectorsize, 0, alloc_hint, 976 &ins, 1, 1); 977 if (ret < 0) 978 goto out_unlock; 979 980 em = alloc_extent_map(); 981 if (!em) { 982 ret = -ENOMEM; 983 goto out_reserve; 984 } 985 em->start = start; 986 em->orig_start = em->start; 987 ram_size = ins.offset; 988 em->len = ins.offset; 989 em->mod_start = em->start; 990 em->mod_len = em->len; 991 992 em->block_start = ins.objectid; 993 em->block_len = ins.offset; 994 em->orig_block_len = ins.offset; 995 em->ram_bytes = ram_size; 996 em->bdev = root->fs_info->fs_devices->latest_bdev; 997 set_bit(EXTENT_FLAG_PINNED, &em->flags); 998 em->generation = -1; 999 1000 while (1) { 1001 write_lock(&em_tree->lock); 1002 ret = add_extent_mapping(em_tree, em, 1); 1003 write_unlock(&em_tree->lock); 1004 if (ret != -EEXIST) { 1005 free_extent_map(em); 1006 break; 1007 } 1008 btrfs_drop_extent_cache(inode, start, 1009 start + ram_size - 1, 0); 1010 } 1011 if (ret) 1012 goto out_reserve; 1013 1014 cur_alloc_size = ins.offset; 1015 ret = btrfs_add_ordered_extent(inode, start, ins.objectid, 1016 ram_size, cur_alloc_size, 0); 1017 if (ret) 1018 goto out_drop_extent_cache; 1019 1020 if (root->root_key.objectid == 1021 BTRFS_DATA_RELOC_TREE_OBJECTID) { 1022 ret = btrfs_reloc_clone_csums(inode, start, 1023 cur_alloc_size); 1024 if (ret) 1025 goto out_drop_extent_cache; 1026 } 1027 1028 if (disk_num_bytes < cur_alloc_size) 1029 break; 1030 1031 /* we're not doing compressed IO, don't unlock the first 1032 * page (which the caller expects to stay locked), don't 1033 * clear any dirty bits and don't set any writeback bits 1034 * 1035 * Do set the Private2 bit so we know this page was properly 1036 * setup for writepage 1037 */ 1038 op = unlock ? PAGE_UNLOCK : 0; 1039 op |= PAGE_SET_PRIVATE2; 1040 1041 extent_clear_unlock_delalloc(inode, start, 1042 start + ram_size - 1, locked_page, 1043 EXTENT_LOCKED | EXTENT_DELALLOC, 1044 op); 1045 disk_num_bytes -= cur_alloc_size; 1046 num_bytes -= cur_alloc_size; 1047 alloc_hint = ins.objectid + ins.offset; 1048 start += cur_alloc_size; 1049 } 1050out: 1051 return ret; 1052 1053out_drop_extent_cache: 1054 btrfs_drop_extent_cache(inode, start, start + ram_size - 1, 0); 1055out_reserve: 1056 btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1); 1057out_unlock: 1058 extent_clear_unlock_delalloc(inode, start, end, locked_page, 1059 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING | 1060 EXTENT_DELALLOC | EXTENT_DEFRAG, 1061 PAGE_UNLOCK | PAGE_CLEAR_DIRTY | 1062 PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK); 1063 goto out; 1064} 1065 1066/* 1067 * work queue call back to started compression on a file and pages 1068 */ 1069static noinline void async_cow_start(struct btrfs_work *work) 1070{ 1071 struct async_cow *async_cow; 1072 int num_added = 0; 1073 async_cow = container_of(work, struct async_cow, work); 1074 1075 compress_file_range(async_cow->inode, async_cow->locked_page, 1076 async_cow->start, async_cow->end, async_cow, 1077 &num_added); 1078 if (num_added == 0) { 1079 btrfs_add_delayed_iput(async_cow->inode); 1080 async_cow->inode = NULL; 1081 } 1082} 1083 1084/* 1085 * work queue call back to submit previously compressed pages 1086 */ 1087static noinline void async_cow_submit(struct btrfs_work *work) 1088{ 1089 struct async_cow *async_cow; 1090 struct btrfs_root *root; 1091 unsigned long nr_pages; 1092 1093 async_cow = container_of(work, struct async_cow, work); 1094 1095 root = async_cow->root; 1096 nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >> 1097 PAGE_CACHE_SHIFT; 1098 1099 if (atomic_sub_return(nr_pages, &root->fs_info->async_delalloc_pages) < 1100 5 * 1024 * 1024 && 1101 waitqueue_active(&root->fs_info->async_submit_wait)) 1102 wake_up(&root->fs_info->async_submit_wait); 1103 1104 if (async_cow->inode) 1105 submit_compressed_extents(async_cow->inode, async_cow); 1106} 1107 1108static noinline void async_cow_free(struct btrfs_work *work) 1109{ 1110 struct async_cow *async_cow; 1111 async_cow = container_of(work, struct async_cow, work); 1112 if (async_cow->inode) 1113 btrfs_add_delayed_iput(async_cow->inode); 1114 kfree(async_cow); 1115} 1116 1117static int cow_file_range_async(struct inode *inode, struct page *locked_page, 1118 u64 start, u64 end, int *page_started, 1119 unsigned long *nr_written) 1120{ 1121 struct async_cow *async_cow; 1122 struct btrfs_root *root = BTRFS_I(inode)->root; 1123 unsigned long nr_pages; 1124 u64 cur_end; 1125 int limit = 10 * 1024 * 1024; 1126 1127 clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED, 1128 1, 0, NULL, GFP_NOFS); 1129 while (start < end) { 1130 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS); 1131 BUG_ON(!async_cow); /* -ENOMEM */ 1132 async_cow->inode = igrab(inode); 1133 async_cow->root = root; 1134 async_cow->locked_page = locked_page; 1135 async_cow->start = start; 1136 1137 if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS && 1138 !btrfs_test_opt(root, FORCE_COMPRESS)) 1139 cur_end = end; 1140 else 1141 cur_end = min(end, start + 512 * 1024 - 1); 1142 1143 async_cow->end = cur_end; 1144 INIT_LIST_HEAD(&async_cow->extents); 1145 1146 btrfs_init_work(&async_cow->work, 1147 btrfs_delalloc_helper, 1148 async_cow_start, async_cow_submit, 1149 async_cow_free); 1150 1151 nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >> 1152 PAGE_CACHE_SHIFT; 1153 atomic_add(nr_pages, &root->fs_info->async_delalloc_pages); 1154 1155 btrfs_queue_work(root->fs_info->delalloc_workers, 1156 &async_cow->work); 1157 1158 if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) { 1159 wait_event(root->fs_info->async_submit_wait, 1160 (atomic_read(&root->fs_info->async_delalloc_pages) < 1161 limit)); 1162 } 1163 1164 while (atomic_read(&root->fs_info->async_submit_draining) && 1165 atomic_read(&root->fs_info->async_delalloc_pages)) { 1166 wait_event(root->fs_info->async_submit_wait, 1167 (atomic_read(&root->fs_info->async_delalloc_pages) == 1168 0)); 1169 } 1170 1171 *nr_written += nr_pages; 1172 start = cur_end + 1; 1173 } 1174 *page_started = 1; 1175 return 0; 1176} 1177 1178static noinline int csum_exist_in_range(struct btrfs_root *root, 1179 u64 bytenr, u64 num_bytes) 1180{ 1181 int ret; 1182 struct btrfs_ordered_sum *sums; 1183 LIST_HEAD(list); 1184 1185 ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr, 1186 bytenr + num_bytes - 1, &list, 0); 1187 if (ret == 0 && list_empty(&list)) 1188 return 0; 1189 1190 while (!list_empty(&list)) { 1191 sums = list_entry(list.next, struct btrfs_ordered_sum, list); 1192 list_del(&sums->list); 1193 kfree(sums); 1194 } 1195 return 1; 1196} 1197 1198/* 1199 * when nowcow writeback call back. This checks for snapshots or COW copies 1200 * of the extents that exist in the file, and COWs the file as required. 1201 * 1202 * If no cow copies or snapshots exist, we write directly to the existing 1203 * blocks on disk 1204 */ 1205static noinline int run_delalloc_nocow(struct inode *inode, 1206 struct page *locked_page, 1207 u64 start, u64 end, int *page_started, int force, 1208 unsigned long *nr_written) 1209{ 1210 struct btrfs_root *root = BTRFS_I(inode)->root; 1211 struct btrfs_trans_handle *trans; 1212 struct extent_buffer *leaf; 1213 struct btrfs_path *path; 1214 struct btrfs_file_extent_item *fi; 1215 struct btrfs_key found_key; 1216 u64 cow_start; 1217 u64 cur_offset; 1218 u64 extent_end; 1219 u64 extent_offset; 1220 u64 disk_bytenr; 1221 u64 num_bytes; 1222 u64 disk_num_bytes; 1223 u64 ram_bytes; 1224 int extent_type; 1225 int ret, err; 1226 int type; 1227 int nocow; 1228 int check_prev = 1; 1229 bool nolock; 1230 u64 ino = btrfs_ino(inode); 1231 1232 path = btrfs_alloc_path(); 1233 if (!path) { 1234 extent_clear_unlock_delalloc(inode, start, end, locked_page, 1235 EXTENT_LOCKED | EXTENT_DELALLOC | 1236 EXTENT_DO_ACCOUNTING | 1237 EXTENT_DEFRAG, PAGE_UNLOCK | 1238 PAGE_CLEAR_DIRTY | 1239 PAGE_SET_WRITEBACK | 1240 PAGE_END_WRITEBACK); 1241 return -ENOMEM; 1242 } 1243 1244 nolock = btrfs_is_free_space_inode(inode); 1245 1246 if (nolock) 1247 trans = btrfs_join_transaction_nolock(root); 1248 else 1249 trans = btrfs_join_transaction(root); 1250 1251 if (IS_ERR(trans)) { 1252 extent_clear_unlock_delalloc(inode, start, end, locked_page, 1253 EXTENT_LOCKED | EXTENT_DELALLOC | 1254 EXTENT_DO_ACCOUNTING | 1255 EXTENT_DEFRAG, PAGE_UNLOCK | 1256 PAGE_CLEAR_DIRTY | 1257 PAGE_SET_WRITEBACK | 1258 PAGE_END_WRITEBACK); 1259 btrfs_free_path(path); 1260 return PTR_ERR(trans); 1261 } 1262 1263 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 1264 1265 cow_start = (u64)-1; 1266 cur_offset = start; 1267 while (1) { 1268 ret = btrfs_lookup_file_extent(trans, root, path, ino, 1269 cur_offset, 0); 1270 if (ret < 0) 1271 goto error; 1272 if (ret > 0 && path->slots[0] > 0 && check_prev) { 1273 leaf = path->nodes[0]; 1274 btrfs_item_key_to_cpu(leaf, &found_key, 1275 path->slots[0] - 1); 1276 if (found_key.objectid == ino && 1277 found_key.type == BTRFS_EXTENT_DATA_KEY) 1278 path->slots[0]--; 1279 } 1280 check_prev = 0; 1281next_slot: 1282 leaf = path->nodes[0]; 1283 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 1284 ret = btrfs_next_leaf(root, path); 1285 if (ret < 0) 1286 goto error; 1287 if (ret > 0) 1288 break; 1289 leaf = path->nodes[0]; 1290 } 1291 1292 nocow = 0; 1293 disk_bytenr = 0; 1294 num_bytes = 0; 1295 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 1296 1297 if (found_key.objectid > ino) 1298 break; 1299 if (WARN_ON_ONCE(found_key.objectid < ino) || 1300 found_key.type < BTRFS_EXTENT_DATA_KEY) { 1301 path->slots[0]++; 1302 goto next_slot; 1303 } 1304 if (found_key.type > BTRFS_EXTENT_DATA_KEY || 1305 found_key.offset > end) 1306 break; 1307 1308 if (found_key.offset > cur_offset) { 1309 extent_end = found_key.offset; 1310 extent_type = 0; 1311 goto out_check; 1312 } 1313 1314 fi = btrfs_item_ptr(leaf, path->slots[0], 1315 struct btrfs_file_extent_item); 1316 extent_type = btrfs_file_extent_type(leaf, fi); 1317 1318 ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi); 1319 if (extent_type == BTRFS_FILE_EXTENT_REG || 1320 extent_type == BTRFS_FILE_EXTENT_PREALLOC) { 1321 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 1322 extent_offset = btrfs_file_extent_offset(leaf, fi); 1323 extent_end = found_key.offset + 1324 btrfs_file_extent_num_bytes(leaf, fi); 1325 disk_num_bytes = 1326 btrfs_file_extent_disk_num_bytes(leaf, fi); 1327 if (extent_end <= start) { 1328 path->slots[0]++; 1329 goto next_slot; 1330 } 1331 if (disk_bytenr == 0) 1332 goto out_check; 1333 if (btrfs_file_extent_compression(leaf, fi) || 1334 btrfs_file_extent_encryption(leaf, fi) || 1335 btrfs_file_extent_other_encoding(leaf, fi)) 1336 goto out_check; 1337 if (extent_type == BTRFS_FILE_EXTENT_REG && !force) 1338 goto out_check; 1339 if (btrfs_extent_readonly(root, disk_bytenr)) 1340 goto out_check; 1341 if (btrfs_cross_ref_exist(trans, root, ino, 1342 found_key.offset - 1343 extent_offset, disk_bytenr)) 1344 goto out_check; 1345 disk_bytenr += extent_offset; 1346 disk_bytenr += cur_offset - found_key.offset; 1347 num_bytes = min(end + 1, extent_end) - cur_offset; 1348 /* 1349 * if there are pending snapshots for this root, 1350 * we fall into common COW way. 1351 */ 1352 if (!nolock) { 1353 err = btrfs_start_write_no_snapshoting(root); 1354 if (!err) 1355 goto out_check; 1356 } 1357 /* 1358 * force cow if csum exists in the range. 1359 * this ensure that csum for a given extent are 1360 * either valid or do not exist. 1361 */ 1362 if (csum_exist_in_range(root, disk_bytenr, num_bytes)) 1363 goto out_check; 1364 nocow = 1; 1365 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 1366 extent_end = found_key.offset + 1367 btrfs_file_extent_inline_len(leaf, 1368 path->slots[0], fi); 1369 extent_end = ALIGN(extent_end, root->sectorsize); 1370 } else { 1371 BUG_ON(1); 1372 } 1373out_check: 1374 if (extent_end <= start) { 1375 path->slots[0]++; 1376 if (!nolock && nocow) 1377 btrfs_end_write_no_snapshoting(root); 1378 goto next_slot; 1379 } 1380 if (!nocow) { 1381 if (cow_start == (u64)-1) 1382 cow_start = cur_offset; 1383 cur_offset = extent_end; 1384 if (cur_offset > end) 1385 break; 1386 path->slots[0]++; 1387 goto next_slot; 1388 } 1389 1390 btrfs_release_path(path); 1391 if (cow_start != (u64)-1) { 1392 ret = cow_file_range(inode, locked_page, 1393 cow_start, found_key.offset - 1, 1394 page_started, nr_written, 1); 1395 if (ret) { 1396 if (!nolock && nocow) 1397 btrfs_end_write_no_snapshoting(root); 1398 goto error; 1399 } 1400 cow_start = (u64)-1; 1401 } 1402 1403 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) { 1404 struct extent_map *em; 1405 struct extent_map_tree *em_tree; 1406 em_tree = &BTRFS_I(inode)->extent_tree; 1407 em = alloc_extent_map(); 1408 BUG_ON(!em); /* -ENOMEM */ 1409 em->start = cur_offset; 1410 em->orig_start = found_key.offset - extent_offset; 1411 em->len = num_bytes; 1412 em->block_len = num_bytes; 1413 em->block_start = disk_bytenr; 1414 em->orig_block_len = disk_num_bytes; 1415 em->ram_bytes = ram_bytes; 1416 em->bdev = root->fs_info->fs_devices->latest_bdev; 1417 em->mod_start = em->start; 1418 em->mod_len = em->len; 1419 set_bit(EXTENT_FLAG_PINNED, &em->flags); 1420 set_bit(EXTENT_FLAG_FILLING, &em->flags); 1421 em->generation = -1; 1422 while (1) { 1423 write_lock(&em_tree->lock); 1424 ret = add_extent_mapping(em_tree, em, 1); 1425 write_unlock(&em_tree->lock); 1426 if (ret != -EEXIST) { 1427 free_extent_map(em); 1428 break; 1429 } 1430 btrfs_drop_extent_cache(inode, em->start, 1431 em->start + em->len - 1, 0); 1432 } 1433 type = BTRFS_ORDERED_PREALLOC; 1434 } else { 1435 type = BTRFS_ORDERED_NOCOW; 1436 } 1437 1438 ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr, 1439 num_bytes, num_bytes, type); 1440 BUG_ON(ret); /* -ENOMEM */ 1441 1442 if (root->root_key.objectid == 1443 BTRFS_DATA_RELOC_TREE_OBJECTID) { 1444 ret = btrfs_reloc_clone_csums(inode, cur_offset, 1445 num_bytes); 1446 if (ret) { 1447 if (!nolock && nocow) 1448 btrfs_end_write_no_snapshoting(root); 1449 goto error; 1450 } 1451 } 1452 1453 extent_clear_unlock_delalloc(inode, cur_offset, 1454 cur_offset + num_bytes - 1, 1455 locked_page, EXTENT_LOCKED | 1456 EXTENT_DELALLOC, PAGE_UNLOCK | 1457 PAGE_SET_PRIVATE2); 1458 if (!nolock && nocow) 1459 btrfs_end_write_no_snapshoting(root); 1460 cur_offset = extent_end; 1461 if (cur_offset > end) 1462 break; 1463 } 1464 btrfs_release_path(path); 1465 1466 if (cur_offset <= end && cow_start == (u64)-1) { 1467 cow_start = cur_offset; 1468 cur_offset = end; 1469 } 1470 1471 if (cow_start != (u64)-1) { 1472 ret = cow_file_range(inode, locked_page, cow_start, end, 1473 page_started, nr_written, 1); 1474 if (ret) 1475 goto error; 1476 } 1477 1478error: 1479 err = btrfs_end_transaction(trans, root); 1480 if (!ret) 1481 ret = err; 1482 1483 if (ret && cur_offset < end) 1484 extent_clear_unlock_delalloc(inode, cur_offset, end, 1485 locked_page, EXTENT_LOCKED | 1486 EXTENT_DELALLOC | EXTENT_DEFRAG | 1487 EXTENT_DO_ACCOUNTING, PAGE_UNLOCK | 1488 PAGE_CLEAR_DIRTY | 1489 PAGE_SET_WRITEBACK | 1490 PAGE_END_WRITEBACK); 1491 btrfs_free_path(path); 1492 return ret; 1493} 1494 1495static inline int need_force_cow(struct inode *inode, u64 start, u64 end) 1496{ 1497 1498 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) && 1499 !(BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)) 1500 return 0; 1501 1502 /* 1503 * @defrag_bytes is a hint value, no spinlock held here, 1504 * if is not zero, it means the file is defragging. 1505 * Force cow if given extent needs to be defragged. 1506 */ 1507 if (BTRFS_I(inode)->defrag_bytes && 1508 test_range_bit(&BTRFS_I(inode)->io_tree, start, end, 1509 EXTENT_DEFRAG, 0, NULL)) 1510 return 1; 1511 1512 return 0; 1513} 1514 1515/* 1516 * extent_io.c call back to do delayed allocation processing 1517 */ 1518static int run_delalloc_range(struct inode *inode, struct page *locked_page, 1519 u64 start, u64 end, int *page_started, 1520 unsigned long *nr_written) 1521{ 1522 int ret; 1523 int force_cow = need_force_cow(inode, start, end); 1524 1525 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW && !force_cow) { 1526 ret = run_delalloc_nocow(inode, locked_page, start, end, 1527 page_started, 1, nr_written); 1528 } else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC && !force_cow) { 1529 ret = run_delalloc_nocow(inode, locked_page, start, end, 1530 page_started, 0, nr_written); 1531 } else if (!inode_need_compress(inode)) { 1532 ret = cow_file_range(inode, locked_page, start, end, 1533 page_started, nr_written, 1); 1534 } else { 1535 set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, 1536 &BTRFS_I(inode)->runtime_flags); 1537 ret = cow_file_range_async(inode, locked_page, start, end, 1538 page_started, nr_written); 1539 } 1540 return ret; 1541} 1542 1543static void btrfs_split_extent_hook(struct inode *inode, 1544 struct extent_state *orig, u64 split) 1545{ 1546 u64 size; 1547 1548 /* not delalloc, ignore it */ 1549 if (!(orig->state & EXTENT_DELALLOC)) 1550 return; 1551 1552 size = orig->end - orig->start + 1; 1553 if (size > BTRFS_MAX_EXTENT_SIZE) { 1554 u64 num_extents; 1555 u64 new_size; 1556 1557 /* 1558 * See the explanation in btrfs_merge_extent_hook, the same 1559 * applies here, just in reverse. 1560 */ 1561 new_size = orig->end - split + 1; 1562 num_extents = div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1, 1563 BTRFS_MAX_EXTENT_SIZE); 1564 new_size = split - orig->start; 1565 num_extents += div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1, 1566 BTRFS_MAX_EXTENT_SIZE); 1567 if (div64_u64(size + BTRFS_MAX_EXTENT_SIZE - 1, 1568 BTRFS_MAX_EXTENT_SIZE) >= num_extents) 1569 return; 1570 } 1571 1572 spin_lock(&BTRFS_I(inode)->lock); 1573 BTRFS_I(inode)->outstanding_extents++; 1574 spin_unlock(&BTRFS_I(inode)->lock); 1575} 1576 1577/* 1578 * extent_io.c merge_extent_hook, used to track merged delayed allocation 1579 * extents so we can keep track of new extents that are just merged onto old 1580 * extents, such as when we are doing sequential writes, so we can properly 1581 * account for the metadata space we'll need. 1582 */ 1583static void btrfs_merge_extent_hook(struct inode *inode, 1584 struct extent_state *new, 1585 struct extent_state *other) 1586{ 1587 u64 new_size, old_size; 1588 u64 num_extents; 1589 1590 /* not delalloc, ignore it */ 1591 if (!(other->state & EXTENT_DELALLOC)) 1592 return; 1593 1594 if (new->start > other->start) 1595 new_size = new->end - other->start + 1; 1596 else 1597 new_size = other->end - new->start + 1; 1598 1599 /* we're not bigger than the max, unreserve the space and go */ 1600 if (new_size <= BTRFS_MAX_EXTENT_SIZE) { 1601 spin_lock(&BTRFS_I(inode)->lock); 1602 BTRFS_I(inode)->outstanding_extents--; 1603 spin_unlock(&BTRFS_I(inode)->lock); 1604 return; 1605 } 1606 1607 /* 1608 * We have to add up either side to figure out how many extents were 1609 * accounted for before we merged into one big extent. If the number of 1610 * extents we accounted for is <= the amount we need for the new range 1611 * then we can return, otherwise drop. Think of it like this 1612 * 1613 * [ 4k][MAX_SIZE] 1614 * 1615 * So we've grown the extent by a MAX_SIZE extent, this would mean we 1616 * need 2 outstanding extents, on one side we have 1 and the other side 1617 * we have 1 so they are == and we can return. But in this case 1618 * 1619 * [MAX_SIZE+4k][MAX_SIZE+4k] 1620 * 1621 * Each range on their own accounts for 2 extents, but merged together 1622 * they are only 3 extents worth of accounting, so we need to drop in 1623 * this case. 1624 */ 1625 old_size = other->end - other->start + 1; 1626 num_extents = div64_u64(old_size + BTRFS_MAX_EXTENT_SIZE - 1, 1627 BTRFS_MAX_EXTENT_SIZE); 1628 old_size = new->end - new->start + 1; 1629 num_extents += div64_u64(old_size + BTRFS_MAX_EXTENT_SIZE - 1, 1630 BTRFS_MAX_EXTENT_SIZE); 1631 1632 if (div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1, 1633 BTRFS_MAX_EXTENT_SIZE) >= num_extents) 1634 return; 1635 1636 spin_lock(&BTRFS_I(inode)->lock); 1637 BTRFS_I(inode)->outstanding_extents--; 1638 spin_unlock(&BTRFS_I(inode)->lock); 1639} 1640 1641static void btrfs_add_delalloc_inodes(struct btrfs_root *root, 1642 struct inode *inode) 1643{ 1644 spin_lock(&root->delalloc_lock); 1645 if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) { 1646 list_add_tail(&BTRFS_I(inode)->delalloc_inodes, 1647 &root->delalloc_inodes); 1648 set_bit(BTRFS_INODE_IN_DELALLOC_LIST, 1649 &BTRFS_I(inode)->runtime_flags); 1650 root->nr_delalloc_inodes++; 1651 if (root->nr_delalloc_inodes == 1) { 1652 spin_lock(&root->fs_info->delalloc_root_lock); 1653 BUG_ON(!list_empty(&root->delalloc_root)); 1654 list_add_tail(&root->delalloc_root, 1655 &root->fs_info->delalloc_roots); 1656 spin_unlock(&root->fs_info->delalloc_root_lock); 1657 } 1658 } 1659 spin_unlock(&root->delalloc_lock); 1660} 1661 1662static void btrfs_del_delalloc_inode(struct btrfs_root *root, 1663 struct inode *inode) 1664{ 1665 spin_lock(&root->delalloc_lock); 1666 if (!list_empty(&BTRFS_I(inode)->delalloc_inodes)) { 1667 list_del_init(&BTRFS_I(inode)->delalloc_inodes); 1668 clear_bit(BTRFS_INODE_IN_DELALLOC_LIST, 1669 &BTRFS_I(inode)->runtime_flags); 1670 root->nr_delalloc_inodes--; 1671 if (!root->nr_delalloc_inodes) { 1672 spin_lock(&root->fs_info->delalloc_root_lock); 1673 BUG_ON(list_empty(&root->delalloc_root)); 1674 list_del_init(&root->delalloc_root); 1675 spin_unlock(&root->fs_info->delalloc_root_lock); 1676 } 1677 } 1678 spin_unlock(&root->delalloc_lock); 1679} 1680 1681/* 1682 * extent_io.c set_bit_hook, used to track delayed allocation 1683 * bytes in this file, and to maintain the list of inodes that 1684 * have pending delalloc work to be done. 1685 */ 1686static void btrfs_set_bit_hook(struct inode *inode, 1687 struct extent_state *state, unsigned *bits) 1688{ 1689 1690 if ((*bits & EXTENT_DEFRAG) && !(*bits & EXTENT_DELALLOC)) 1691 WARN_ON(1); 1692 /* 1693 * set_bit and clear bit hooks normally require _irqsave/restore 1694 * but in this case, we are only testing for the DELALLOC 1695 * bit, which is only set or cleared with irqs on 1696 */ 1697 if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { 1698 struct btrfs_root *root = BTRFS_I(inode)->root; 1699 u64 len = state->end + 1 - state->start; 1700 bool do_list = !btrfs_is_free_space_inode(inode); 1701 1702 if (*bits & EXTENT_FIRST_DELALLOC) { 1703 *bits &= ~EXTENT_FIRST_DELALLOC; 1704 } else { 1705 spin_lock(&BTRFS_I(inode)->lock); 1706 BTRFS_I(inode)->outstanding_extents++; 1707 spin_unlock(&BTRFS_I(inode)->lock); 1708 } 1709 1710 /* For sanity tests */ 1711 if (btrfs_test_is_dummy_root(root)) 1712 return; 1713 1714 __percpu_counter_add(&root->fs_info->delalloc_bytes, len, 1715 root->fs_info->delalloc_batch); 1716 spin_lock(&BTRFS_I(inode)->lock); 1717 BTRFS_I(inode)->delalloc_bytes += len; 1718 if (*bits & EXTENT_DEFRAG) 1719 BTRFS_I(inode)->defrag_bytes += len; 1720 if (do_list && !test_bit(BTRFS_INODE_IN_DELALLOC_LIST, 1721 &BTRFS_I(inode)->runtime_flags)) 1722 btrfs_add_delalloc_inodes(root, inode); 1723 spin_unlock(&BTRFS_I(inode)->lock); 1724 } 1725} 1726 1727/* 1728 * extent_io.c clear_bit_hook, see set_bit_hook for why 1729 */ 1730static void btrfs_clear_bit_hook(struct inode *inode, 1731 struct extent_state *state, 1732 unsigned *bits) 1733{ 1734 u64 len = state->end + 1 - state->start; 1735 u64 num_extents = div64_u64(len + BTRFS_MAX_EXTENT_SIZE -1, 1736 BTRFS_MAX_EXTENT_SIZE); 1737 1738 spin_lock(&BTRFS_I(inode)->lock); 1739 if ((state->state & EXTENT_DEFRAG) && (*bits & EXTENT_DEFRAG)) 1740 BTRFS_I(inode)->defrag_bytes -= len; 1741 spin_unlock(&BTRFS_I(inode)->lock); 1742 1743 /* 1744 * set_bit and clear bit hooks normally require _irqsave/restore 1745 * but in this case, we are only testing for the DELALLOC 1746 * bit, which is only set or cleared with irqs on 1747 */ 1748 if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { 1749 struct btrfs_root *root = BTRFS_I(inode)->root; 1750 bool do_list = !btrfs_is_free_space_inode(inode); 1751 1752 if (*bits & EXTENT_FIRST_DELALLOC) { 1753 *bits &= ~EXTENT_FIRST_DELALLOC; 1754 } else if (!(*bits & EXTENT_DO_ACCOUNTING)) { 1755 spin_lock(&BTRFS_I(inode)->lock); 1756 BTRFS_I(inode)->outstanding_extents -= num_extents; 1757 spin_unlock(&BTRFS_I(inode)->lock); 1758 } 1759 1760 /* 1761 * We don't reserve metadata space for space cache inodes so we 1762 * don't need to call dellalloc_release_metadata if there is an 1763 * error. 1764 */ 1765 if (*bits & EXTENT_DO_ACCOUNTING && 1766 root != root->fs_info->tree_root) 1767 btrfs_delalloc_release_metadata(inode, len); 1768 1769 /* For sanity tests. */ 1770 if (btrfs_test_is_dummy_root(root)) 1771 return; 1772 1773 if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID 1774 && do_list && !(state->state & EXTENT_NORESERVE)) 1775 btrfs_free_reserved_data_space(inode, len); 1776 1777 __percpu_counter_add(&root->fs_info->delalloc_bytes, -len, 1778 root->fs_info->delalloc_batch); 1779 spin_lock(&BTRFS_I(inode)->lock); 1780 BTRFS_I(inode)->delalloc_bytes -= len; 1781 if (do_list && BTRFS_I(inode)->delalloc_bytes == 0 && 1782 test_bit(BTRFS_INODE_IN_DELALLOC_LIST, 1783 &BTRFS_I(inode)->runtime_flags)) 1784 btrfs_del_delalloc_inode(root, inode); 1785 spin_unlock(&BTRFS_I(inode)->lock); 1786 } 1787} 1788 1789/* 1790 * extent_io.c merge_bio_hook, this must check the chunk tree to make sure 1791 * we don't create bios that span stripes or chunks 1792 */ 1793int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset, 1794 size_t size, struct bio *bio, 1795 unsigned long bio_flags) 1796{ 1797 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root; 1798 u64 logical = (u64)bio->bi_iter.bi_sector << 9; 1799 u64 length = 0; 1800 u64 map_length; 1801 int ret; 1802 1803 if (bio_flags & EXTENT_BIO_COMPRESSED) 1804 return 0; 1805 1806 length = bio->bi_iter.bi_size; 1807 map_length = length; 1808 ret = btrfs_map_block(root->fs_info, rw, logical, 1809 &map_length, NULL, 0); 1810 /* Will always return 0 with map_multi == NULL */ 1811 BUG_ON(ret < 0); 1812 if (map_length < length + size) 1813 return 1; 1814 return 0; 1815} 1816 1817/* 1818 * in order to insert checksums into the metadata in large chunks, 1819 * we wait until bio submission time. All the pages in the bio are 1820 * checksummed and sums are attached onto the ordered extent record. 1821 * 1822 * At IO completion time the cums attached on the ordered extent record 1823 * are inserted into the btree 1824 */ 1825static int __btrfs_submit_bio_start(struct inode *inode, int rw, 1826 struct bio *bio, int mirror_num, 1827 unsigned long bio_flags, 1828 u64 bio_offset) 1829{ 1830 struct btrfs_root *root = BTRFS_I(inode)->root; 1831 int ret = 0; 1832 1833 ret = btrfs_csum_one_bio(root, inode, bio, 0, 0); 1834 BUG_ON(ret); /* -ENOMEM */ 1835 return 0; 1836} 1837 1838/* 1839 * in order to insert checksums into the metadata in large chunks, 1840 * we wait until bio submission time. All the pages in the bio are 1841 * checksummed and sums are attached onto the ordered extent record. 1842 * 1843 * At IO completion time the cums attached on the ordered extent record 1844 * are inserted into the btree 1845 */ 1846static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio, 1847 int mirror_num, unsigned long bio_flags, 1848 u64 bio_offset) 1849{ 1850 struct btrfs_root *root = BTRFS_I(inode)->root; 1851 int ret; 1852 1853 ret = btrfs_map_bio(root, rw, bio, mirror_num, 1); 1854 if (ret) 1855 bio_endio(bio, ret); 1856 return ret; 1857} 1858 1859/* 1860 * extent_io.c submission hook. This does the right thing for csum calculation 1861 * on write, or reading the csums from the tree before a read 1862 */ 1863static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, 1864 int mirror_num, unsigned long bio_flags, 1865 u64 bio_offset) 1866{ 1867 struct btrfs_root *root = BTRFS_I(inode)->root; 1868 int ret = 0; 1869 int skip_sum; 1870 int metadata = 0; 1871 int async = !atomic_read(&BTRFS_I(inode)->sync_writers); 1872 1873 skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; 1874 1875 if (btrfs_is_free_space_inode(inode)) 1876 metadata = 2; 1877 1878 if (!(rw & REQ_WRITE)) { 1879 ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata); 1880 if (ret) 1881 goto out; 1882 1883 if (bio_flags & EXTENT_BIO_COMPRESSED) { 1884 ret = btrfs_submit_compressed_read(inode, bio, 1885 mirror_num, 1886 bio_flags); 1887 goto out; 1888 } else if (!skip_sum) { 1889 ret = btrfs_lookup_bio_sums(root, inode, bio, NULL); 1890 if (ret) 1891 goto out; 1892 } 1893 goto mapit; 1894 } else if (async && !skip_sum) { 1895 /* csum items have already been cloned */ 1896 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID) 1897 goto mapit; 1898 /* we're doing a write, do the async checksumming */ 1899 ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info, 1900 inode, rw, bio, mirror_num, 1901 bio_flags, bio_offset, 1902 __btrfs_submit_bio_start, 1903 __btrfs_submit_bio_done); 1904 goto out; 1905 } else if (!skip_sum) { 1906 ret = btrfs_csum_one_bio(root, inode, bio, 0, 0); 1907 if (ret) 1908 goto out; 1909 } 1910 1911mapit: 1912 ret = btrfs_map_bio(root, rw, bio, mirror_num, 0); 1913 1914out: 1915 if (ret < 0) 1916 bio_endio(bio, ret); 1917 return ret; 1918} 1919 1920/* 1921 * given a list of ordered sums record them in the inode. This happens 1922 * at IO completion time based on sums calculated at bio submission time. 1923 */ 1924static noinline int add_pending_csums(struct btrfs_trans_handle *trans, 1925 struct inode *inode, u64 file_offset, 1926 struct list_head *list) 1927{ 1928 struct btrfs_ordered_sum *sum; 1929 1930 list_for_each_entry(sum, list, list) { 1931 trans->adding_csums = 1; 1932 btrfs_csum_file_blocks(trans, 1933 BTRFS_I(inode)->root->fs_info->csum_root, sum); 1934 trans->adding_csums = 0; 1935 } 1936 return 0; 1937} 1938 1939int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, 1940 struct extent_state **cached_state) 1941{ 1942 WARN_ON((end & (PAGE_CACHE_SIZE - 1)) == 0); 1943 return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end, 1944 cached_state, GFP_NOFS); 1945} 1946 1947/* see btrfs_writepage_start_hook for details on why this is required */ 1948struct btrfs_writepage_fixup { 1949 struct page *page; 1950 struct btrfs_work work; 1951}; 1952 1953static void btrfs_writepage_fixup_worker(struct btrfs_work *work) 1954{ 1955 struct btrfs_writepage_fixup *fixup; 1956 struct btrfs_ordered_extent *ordered; 1957 struct extent_state *cached_state = NULL; 1958 struct page *page; 1959 struct inode *inode; 1960 u64 page_start; 1961 u64 page_end; 1962 int ret; 1963 1964 fixup = container_of(work, struct btrfs_writepage_fixup, work); 1965 page = fixup->page; 1966again: 1967 lock_page(page); 1968 if (!page->mapping || !PageDirty(page) || !PageChecked(page)) { 1969 ClearPageChecked(page); 1970 goto out_page; 1971 } 1972 1973 inode = page->mapping->host; 1974 page_start = page_offset(page); 1975 page_end = page_offset(page) + PAGE_CACHE_SIZE - 1; 1976 1977 lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0, 1978 &cached_state); 1979 1980 /* already ordered? We're done */ 1981 if (PagePrivate2(page)) 1982 goto out; 1983 1984 ordered = btrfs_lookup_ordered_extent(inode, page_start); 1985 if (ordered) { 1986 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, 1987 page_end, &cached_state, GFP_NOFS); 1988 unlock_page(page); 1989 btrfs_start_ordered_extent(inode, ordered, 1); 1990 btrfs_put_ordered_extent(ordered); 1991 goto again; 1992 } 1993 1994 ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE); 1995 if (ret) { 1996 mapping_set_error(page->mapping, ret); 1997 end_extent_writepage(page, ret, page_start, page_end); 1998 ClearPageChecked(page); 1999 goto out; 2000 } 2001 2002 btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state); 2003 ClearPageChecked(page); 2004 set_page_dirty(page); 2005out: 2006 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end, 2007 &cached_state, GFP_NOFS); 2008out_page: 2009 unlock_page(page); 2010 page_cache_release(page); 2011 kfree(fixup); 2012} 2013 2014/* 2015 * There are a few paths in the higher layers of the kernel that directly 2016 * set the page dirty bit without asking the filesystem if it is a 2017 * good idea. This causes problems because we want to make sure COW 2018 * properly happens and the data=ordered rules are followed. 2019 * 2020 * In our case any range that doesn't have the ORDERED bit set 2021 * hasn't been properly setup for IO. We kick off an async process 2022 * to fix it up. The async helper will wait for ordered extents, set 2023 * the delalloc bit and make it safe to write the page. 2024 */ 2025static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end) 2026{ 2027 struct inode *inode = page->mapping->host; 2028 struct btrfs_writepage_fixup *fixup; 2029 struct btrfs_root *root = BTRFS_I(inode)->root; 2030 2031 /* this page is properly in the ordered list */ 2032 if (TestClearPagePrivate2(page)) 2033 return 0; 2034 2035 if (PageChecked(page)) 2036 return -EAGAIN; 2037 2038 fixup = kzalloc(sizeof(*fixup), GFP_NOFS); 2039 if (!fixup) 2040 return -EAGAIN; 2041 2042 SetPageChecked(page); 2043 page_cache_get(page); 2044 btrfs_init_work(&fixup->work, btrfs_fixup_helper, 2045 btrfs_writepage_fixup_worker, NULL, NULL); 2046 fixup->page = page; 2047 btrfs_queue_work(root->fs_info->fixup_workers, &fixup->work); 2048 return -EBUSY; 2049} 2050 2051static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, 2052 struct inode *inode, u64 file_pos, 2053 u64 disk_bytenr, u64 disk_num_bytes, 2054 u64 num_bytes, u64 ram_bytes, 2055 u8 compression, u8 encryption, 2056 u16 other_encoding, int extent_type) 2057{ 2058 struct btrfs_root *root = BTRFS_I(inode)->root; 2059 struct btrfs_file_extent_item *fi; 2060 struct btrfs_path *path; 2061 struct extent_buffer *leaf; 2062 struct btrfs_key ins; 2063 int extent_inserted = 0; 2064 int ret; 2065 2066 path = btrfs_alloc_path(); 2067 if (!path) 2068 return -ENOMEM; 2069 2070 /* 2071 * we may be replacing one extent in the tree with another. 2072 * The new extent is pinned in the extent map, and we don't want 2073 * to drop it from the cache until it is completely in the btree. 2074 * 2075 * So, tell btrfs_drop_extents to leave this extent in the cache. 2076 * the caller is expected to unpin it and allow it to be merged 2077 * with the others. 2078 */ 2079 ret = __btrfs_drop_extents(trans, root, inode, path, file_pos, 2080 file_pos + num_bytes, NULL, 0, 2081 1, sizeof(*fi), &extent_inserted); 2082 if (ret) 2083 goto out; 2084 2085 if (!extent_inserted) { 2086 ins.objectid = btrfs_ino(inode); 2087 ins.offset = file_pos; 2088 ins.type = BTRFS_EXTENT_DATA_KEY; 2089 2090 path->leave_spinning = 1; 2091 ret = btrfs_insert_empty_item(trans, root, path, &ins, 2092 sizeof(*fi)); 2093 if (ret) 2094 goto out; 2095 } 2096 leaf = path->nodes[0]; 2097 fi = btrfs_item_ptr(leaf, path->slots[0], 2098 struct btrfs_file_extent_item); 2099 btrfs_set_file_extent_generation(leaf, fi, trans->transid); 2100 btrfs_set_file_extent_type(leaf, fi, extent_type); 2101 btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr); 2102 btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes); 2103 btrfs_set_file_extent_offset(leaf, fi, 0); 2104 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes); 2105 btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes); 2106 btrfs_set_file_extent_compression(leaf, fi, compression); 2107 btrfs_set_file_extent_encryption(leaf, fi, encryption); 2108 btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding); 2109 2110 btrfs_mark_buffer_dirty(leaf); 2111 btrfs_release_path(path); 2112 2113 inode_add_bytes(inode, num_bytes); 2114 2115 ins.objectid = disk_bytenr; 2116 ins.offset = disk_num_bytes; 2117 ins.type = BTRFS_EXTENT_ITEM_KEY; 2118 ret = btrfs_alloc_reserved_file_extent(trans, root, 2119 root->root_key.objectid, 2120 btrfs_ino(inode), file_pos, &ins); 2121out: 2122 btrfs_free_path(path); 2123 2124 return ret; 2125} 2126 2127/* snapshot-aware defrag */ 2128struct sa_defrag_extent_backref { 2129 struct rb_node node; 2130 struct old_sa_defrag_extent *old; 2131 u64 root_id; 2132 u64 inum; 2133 u64 file_pos; 2134 u64 extent_offset; 2135 u64 num_bytes; 2136 u64 generation; 2137}; 2138 2139struct old_sa_defrag_extent { 2140 struct list_head list; 2141 struct new_sa_defrag_extent *new; 2142 2143 u64 extent_offset; 2144 u64 bytenr; 2145 u64 offset; 2146 u64 len; 2147 int count; 2148}; 2149 2150struct new_sa_defrag_extent { 2151 struct rb_root root; 2152 struct list_head head; 2153 struct btrfs_path *path; 2154 struct inode *inode; 2155 u64 file_pos; 2156 u64 len; 2157 u64 bytenr; 2158 u64 disk_len; 2159 u8 compress_type; 2160}; 2161 2162static int backref_comp(struct sa_defrag_extent_backref *b1, 2163 struct sa_defrag_extent_backref *b2) 2164{ 2165 if (b1->root_id < b2->root_id) 2166 return -1; 2167 else if (b1->root_id > b2->root_id) 2168 return 1; 2169 2170 if (b1->inum < b2->inum) 2171 return -1; 2172 else if (b1->inum > b2->inum) 2173 return 1; 2174 2175 if (b1->file_pos < b2->file_pos) 2176 return -1; 2177 else if (b1->file_pos > b2->file_pos) 2178 return 1; 2179 2180 /* 2181 * [------------------------------] ===> (a range of space) 2182 * |<--->| |<---->| =============> (fs/file tree A) 2183 * |<---------------------------->| ===> (fs/file tree B) 2184 * 2185 * A range of space can refer to two file extents in one tree while 2186 * refer to only one file extent in another tree. 2187 * 2188 * So we may process a disk offset more than one time(two extents in A) 2189 * and locate at the same extent(one extent in B), then insert two same 2190 * backrefs(both refer to the extent in B). 2191 */ 2192 return 0; 2193} 2194 2195static void backref_insert(struct rb_root *root, 2196 struct sa_defrag_extent_backref *backref) 2197{ 2198 struct rb_node **p = &root->rb_node; 2199 struct rb_node *parent = NULL; 2200 struct sa_defrag_extent_backref *entry; 2201 int ret; 2202 2203 while (*p) { 2204 parent = *p; 2205 entry = rb_entry(parent, struct sa_defrag_extent_backref, node); 2206 2207 ret = backref_comp(backref, entry); 2208 if (ret < 0) 2209 p = &(*p)->rb_left; 2210 else 2211 p = &(*p)->rb_right; 2212 } 2213 2214 rb_link_node(&backref->node, parent, p); 2215 rb_insert_color(&backref->node, root); 2216} 2217 2218/* 2219 * Note the backref might has changed, and in this case we just return 0. 2220 */ 2221static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id, 2222 void *ctx) 2223{ 2224 struct btrfs_file_extent_item *extent; 2225 struct btrfs_fs_info *fs_info; 2226 struct old_sa_defrag_extent *old = ctx; 2227 struct new_sa_defrag_extent *new = old->new; 2228 struct btrfs_path *path = new->path; 2229 struct btrfs_key key; 2230 struct btrfs_root *root; 2231 struct sa_defrag_extent_backref *backref; 2232 struct extent_buffer *leaf; 2233 struct inode *inode = new->inode; 2234 int slot; 2235 int ret; 2236 u64 extent_offset; 2237 u64 num_bytes; 2238 2239 if (BTRFS_I(inode)->root->root_key.objectid == root_id && 2240 inum == btrfs_ino(inode)) 2241 return 0; 2242 2243 key.objectid = root_id; 2244 key.type = BTRFS_ROOT_ITEM_KEY; 2245 key.offset = (u64)-1; 2246 2247 fs_info = BTRFS_I(inode)->root->fs_info; 2248 root = btrfs_read_fs_root_no_name(fs_info, &key); 2249 if (IS_ERR(root)) { 2250 if (PTR_ERR(root) == -ENOENT) 2251 return 0; 2252 WARN_ON(1); 2253 pr_debug("inum=%llu, offset=%llu, root_id=%llu\n", 2254 inum, offset, root_id); 2255 return PTR_ERR(root); 2256 } 2257 2258 key.objectid = inum; 2259 key.type = BTRFS_EXTENT_DATA_KEY; 2260 if (offset > (u64)-1 << 32) 2261 key.offset = 0; 2262 else 2263 key.offset = offset; 2264 2265 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 2266 if (WARN_ON(ret < 0)) 2267 return ret; 2268 ret = 0; 2269 2270 while (1) { 2271 cond_resched(); 2272 2273 leaf = path->nodes[0]; 2274 slot = path->slots[0]; 2275 2276 if (slot >= btrfs_header_nritems(leaf)) { 2277 ret = btrfs_next_leaf(root, path); 2278 if (ret < 0) { 2279 goto out; 2280 } else if (ret > 0) { 2281 ret = 0; 2282 goto out; 2283 } 2284 continue; 2285 } 2286 2287 path->slots[0]++; 2288 2289 btrfs_item_key_to_cpu(leaf, &key, slot); 2290 2291 if (key.objectid > inum) 2292 goto out; 2293 2294 if (key.objectid < inum || key.type != BTRFS_EXTENT_DATA_KEY) 2295 continue; 2296 2297 extent = btrfs_item_ptr(leaf, slot, 2298 struct btrfs_file_extent_item); 2299 2300 if (btrfs_file_extent_disk_bytenr(leaf, extent) != old->bytenr) 2301 continue; 2302 2303 /* 2304 * 'offset' refers to the exact key.offset, 2305 * NOT the 'offset' field in btrfs_extent_data_ref, ie. 2306 * (key.offset - extent_offset). 2307 */ 2308 if (key.offset != offset) 2309 continue; 2310 2311 extent_offset = btrfs_file_extent_offset(leaf, extent); 2312 num_bytes = btrfs_file_extent_num_bytes(leaf, extent); 2313 2314 if (extent_offset >= old->extent_offset + old->offset + 2315 old->len || extent_offset + num_bytes <= 2316 old->extent_offset + old->offset) 2317 continue; 2318 break; 2319 } 2320 2321 backref = kmalloc(sizeof(*backref), GFP_NOFS); 2322 if (!backref) { 2323 ret = -ENOENT; 2324 goto out; 2325 } 2326 2327 backref->root_id = root_id; 2328 backref->inum = inum; 2329 backref->file_pos = offset; 2330 backref->num_bytes = num_bytes; 2331 backref->extent_offset = extent_offset; 2332 backref->generation = btrfs_file_extent_generation(leaf, extent); 2333 backref->old = old; 2334 backref_insert(&new->root, backref); 2335 old->count++; 2336out: 2337 btrfs_release_path(path); 2338 WARN_ON(ret); 2339 return ret; 2340} 2341 2342static noinline bool record_extent_backrefs(struct btrfs_path *path, 2343 struct new_sa_defrag_extent *new) 2344{ 2345 struct btrfs_fs_info *fs_info = BTRFS_I(new->inode)->root->fs_info; 2346 struct old_sa_defrag_extent *old, *tmp; 2347 int ret; 2348 2349 new->path = path; 2350 2351 list_for_each_entry_safe(old, tmp, &new->head, list) { 2352 ret = iterate_inodes_from_logical(old->bytenr + 2353 old->extent_offset, fs_info, 2354 path, record_one_backref, 2355 old); 2356 if (ret < 0 && ret != -ENOENT) 2357 return false; 2358 2359 /* no backref to be processed for this extent */ 2360 if (!old->count) { 2361 list_del(&old->list); 2362 kfree(old); 2363 } 2364 } 2365 2366 if (list_empty(&new->head)) 2367 return false; 2368 2369 return true; 2370} 2371 2372static int relink_is_mergable(struct extent_buffer *leaf, 2373 struct btrfs_file_extent_item *fi, 2374 struct new_sa_defrag_extent *new) 2375{ 2376 if (btrfs_file_extent_disk_bytenr(leaf, fi) != new->bytenr) 2377 return 0; 2378 2379 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG) 2380 return 0; 2381 2382 if (btrfs_file_extent_compression(leaf, fi) != new->compress_type) 2383 return 0; 2384 2385 if (btrfs_file_extent_encryption(leaf, fi) || 2386 btrfs_file_extent_other_encoding(leaf, fi)) 2387 return 0; 2388 2389 return 1; 2390} 2391 2392/* 2393 * Note the backref might has changed, and in this case we just return 0. 2394 */ 2395static noinline int relink_extent_backref(struct btrfs_path *path, 2396 struct sa_defrag_extent_backref *prev, 2397 struct sa_defrag_extent_backref *backref) 2398{ 2399 struct btrfs_file_extent_item *extent; 2400 struct btrfs_file_extent_item *item; 2401 struct btrfs_ordered_extent *ordered; 2402 struct btrfs_trans_handle *trans; 2403 struct btrfs_fs_info *fs_info; 2404 struct btrfs_root *root; 2405 struct btrfs_key key; 2406 struct extent_buffer *leaf; 2407 struct old_sa_defrag_extent *old = backref->old; 2408 struct new_sa_defrag_extent *new = old->new; 2409 struct inode *src_inode = new->inode; 2410 struct inode *inode; 2411 struct extent_state *cached = NULL; 2412 int ret = 0; 2413 u64 start; 2414 u64 len; 2415 u64 lock_start; 2416 u64 lock_end; 2417 bool merge = false; 2418 int index; 2419 2420 if (prev && prev->root_id == backref->root_id && 2421 prev->inum == backref->inum && 2422 prev->file_pos + prev->num_bytes == backref->file_pos) 2423 merge = true; 2424 2425 /* step 1: get root */ 2426 key.objectid = backref->root_id; 2427 key.type = BTRFS_ROOT_ITEM_KEY; 2428 key.offset = (u64)-1; 2429 2430 fs_info = BTRFS_I(src_inode)->root->fs_info; 2431 index = srcu_read_lock(&fs_info->subvol_srcu); 2432 2433 root = btrfs_read_fs_root_no_name(fs_info, &key); 2434 if (IS_ERR(root)) { 2435 srcu_read_unlock(&fs_info->subvol_srcu, index); 2436 if (PTR_ERR(root) == -ENOENT) 2437 return 0; 2438 return PTR_ERR(root); 2439 } 2440 2441 if (btrfs_root_readonly(root)) { 2442 srcu_read_unlock(&fs_info->subvol_srcu, index); 2443 return 0; 2444 } 2445 2446 /* step 2: get inode */ 2447 key.objectid = backref->inum; 2448 key.type = BTRFS_INODE_ITEM_KEY; 2449 key.offset = 0; 2450 2451 inode = btrfs_iget(fs_info->sb, &key, root, NULL); 2452 if (IS_ERR(inode)) { 2453 srcu_read_unlock(&fs_info->subvol_srcu, index); 2454 return 0; 2455 } 2456 2457 srcu_read_unlock(&fs_info->subvol_srcu, index); 2458 2459 /* step 3: relink backref */ 2460 lock_start = backref->file_pos; 2461 lock_end = backref->file_pos + backref->num_bytes - 1; 2462 lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, lock_end, 2463 0, &cached); 2464 2465 ordered = btrfs_lookup_first_ordered_extent(inode, lock_end); 2466 if (ordered) { 2467 btrfs_put_ordered_extent(ordered); 2468 goto out_unlock; 2469 } 2470 2471 trans = btrfs_join_transaction(root); 2472 if (IS_ERR(trans)) { 2473 ret = PTR_ERR(trans); 2474 goto out_unlock; 2475 } 2476 2477 key.objectid = backref->inum; 2478 key.type = BTRFS_EXTENT_DATA_KEY; 2479 key.offset = backref->file_pos; 2480 2481 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 2482 if (ret < 0) { 2483 goto out_free_path; 2484 } else if (ret > 0) { 2485 ret = 0; 2486 goto out_free_path; 2487 } 2488 2489 extent = btrfs_item_ptr(path->nodes[0], path->slots[0], 2490 struct btrfs_file_extent_item); 2491 2492 if (btrfs_file_extent_generation(path->nodes[0], extent) != 2493 backref->generation) 2494 goto out_free_path; 2495 2496 btrfs_release_path(path); 2497 2498 start = backref->file_pos; 2499 if (backref->extent_offset < old->extent_offset + old->offset) 2500 start += old->extent_offset + old->offset - 2501 backref->extent_offset; 2502 2503 len = min(backref->extent_offset + backref->num_bytes, 2504 old->extent_offset + old->offset + old->len); 2505 len -= max(backref->extent_offset, old->extent_offset + old->offset); 2506 2507 ret = btrfs_drop_extents(trans, root, inode, start, 2508 start + len, 1); 2509 if (ret) 2510 goto out_free_path; 2511again: 2512 key.objectid = btrfs_ino(inode); 2513 key.type = BTRFS_EXTENT_DATA_KEY; 2514 key.offset = start; 2515 2516 path->leave_spinning = 1; 2517 if (merge) { 2518 struct btrfs_file_extent_item *fi; 2519 u64 extent_len; 2520 struct btrfs_key found_key; 2521 2522 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2523 if (ret < 0) 2524 goto out_free_path; 2525 2526 path->slots[0]--; 2527 leaf = path->nodes[0]; 2528 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 2529 2530 fi = btrfs_item_ptr(leaf, path->slots[0], 2531 struct btrfs_file_extent_item); 2532 extent_len = btrfs_file_extent_num_bytes(leaf, fi); 2533 2534 if (extent_len + found_key.offset == start && 2535 relink_is_mergable(leaf, fi, new)) { 2536 btrfs_set_file_extent_num_bytes(leaf, fi, 2537 extent_len + len); 2538 btrfs_mark_buffer_dirty(leaf); 2539 inode_add_bytes(inode, len); 2540 2541 ret = 1; 2542 goto out_free_path; 2543 } else { 2544 merge = false; 2545 btrfs_release_path(path); 2546 goto again; 2547 } 2548 } 2549 2550 ret = btrfs_insert_empty_item(trans, root, path, &key, 2551 sizeof(*extent)); 2552 if (ret) { 2553 btrfs_abort_transaction(trans, root, ret); 2554 goto out_free_path; 2555 } 2556 2557 leaf = path->nodes[0]; 2558 item = btrfs_item_ptr(leaf, path->slots[0], 2559 struct btrfs_file_extent_item); 2560 btrfs_set_file_extent_disk_bytenr(leaf, item, new->bytenr); 2561 btrfs_set_file_extent_disk_num_bytes(leaf, item, new->disk_len); 2562 btrfs_set_file_extent_offset(leaf, item, start - new->file_pos); 2563 btrfs_set_file_extent_num_bytes(leaf, item, len); 2564 btrfs_set_file_extent_ram_bytes(leaf, item, new->len); 2565 btrfs_set_file_extent_generation(leaf, item, trans->transid); 2566 btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG); 2567 btrfs_set_file_extent_compression(leaf, item, new->compress_type); 2568 btrfs_set_file_extent_encryption(leaf, item, 0); 2569 btrfs_set_file_extent_other_encoding(leaf, item, 0); 2570 2571 btrfs_mark_buffer_dirty(leaf); 2572 inode_add_bytes(inode, len); 2573 btrfs_release_path(path); 2574 2575 ret = btrfs_inc_extent_ref(trans, root, new->bytenr, 2576 new->disk_len, 0, 2577 backref->root_id, backref->inum, 2578 new->file_pos, 0); /* start - extent_offset */ 2579 if (ret) { 2580 btrfs_abort_transaction(trans, root, ret); 2581 goto out_free_path; 2582 } 2583 2584 ret = 1; 2585out_free_path: 2586 btrfs_release_path(path); 2587 path->leave_spinning = 0; 2588 btrfs_end_transaction(trans, root); 2589out_unlock: 2590 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start, lock_end, 2591 &cached, GFP_NOFS); 2592 iput(inode); 2593 return ret; 2594} 2595 2596static void free_sa_defrag_extent(struct new_sa_defrag_extent *new) 2597{ 2598 struct old_sa_defrag_extent *old, *tmp; 2599 2600 if (!new) 2601 return; 2602 2603 list_for_each_entry_safe(old, tmp, &new->head, list) { 2604 list_del(&old->list); 2605 kfree(old); 2606 } 2607 kfree(new); 2608} 2609 2610static void relink_file_extents(struct new_sa_defrag_extent *new) 2611{ 2612 struct btrfs_path *path; 2613 struct sa_defrag_extent_backref *backref; 2614 struct sa_defrag_extent_backref *prev = NULL; 2615 struct inode *inode; 2616 struct btrfs_root *root; 2617 struct rb_node *node; 2618 int ret; 2619 2620 inode = new->inode; 2621 root = BTRFS_I(inode)->root; 2622 2623 path = btrfs_alloc_path(); 2624 if (!path) 2625 return; 2626 2627 if (!record_extent_backrefs(path, new)) { 2628 btrfs_free_path(path); 2629 goto out; 2630 } 2631 btrfs_release_path(path); 2632 2633 while (1) { 2634 node = rb_first(&new->root); 2635 if (!node) 2636 break; 2637 rb_erase(node, &new->root); 2638 2639 backref = rb_entry(node, struct sa_defrag_extent_backref, node); 2640 2641 ret = relink_extent_backref(path, prev, backref); 2642 WARN_ON(ret < 0); 2643 2644 kfree(prev); 2645 2646 if (ret == 1) 2647 prev = backref; 2648 else 2649 prev = NULL; 2650 cond_resched(); 2651 } 2652 kfree(prev); 2653 2654 btrfs_free_path(path); 2655out: 2656 free_sa_defrag_extent(new); 2657 2658 atomic_dec(&root->fs_info->defrag_running); 2659 wake_up(&root->fs_info->transaction_wait); 2660} 2661 2662static struct new_sa_defrag_extent * 2663record_old_file_extents(struct inode *inode, 2664 struct btrfs_ordered_extent *ordered) 2665{ 2666 struct btrfs_root *root = BTRFS_I(inode)->root; 2667 struct btrfs_path *path; 2668 struct btrfs_key key; 2669 struct old_sa_defrag_extent *old; 2670 struct new_sa_defrag_extent *new; 2671 int ret; 2672 2673 new = kmalloc(sizeof(*new), GFP_NOFS); 2674 if (!new) 2675 return NULL; 2676 2677 new->inode = inode; 2678 new->file_pos = ordered->file_offset; 2679 new->len = ordered->len; 2680 new->bytenr = ordered->start; 2681 new->disk_len = ordered->disk_len; 2682 new->compress_type = ordered->compress_type; 2683 new->root = RB_ROOT; 2684 INIT_LIST_HEAD(&new->head); 2685 2686 path = btrfs_alloc_path(); 2687 if (!path) 2688 goto out_kfree; 2689 2690 key.objectid = btrfs_ino(inode); 2691 key.type = BTRFS_EXTENT_DATA_KEY; 2692 key.offset = new->file_pos; 2693 2694 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 2695 if (ret < 0) 2696 goto out_free_path; 2697 if (ret > 0 && path->slots[0] > 0) 2698 path->slots[0]--; 2699 2700 /* find out all the old extents for the file range */ 2701 while (1) { 2702 struct btrfs_file_extent_item *extent; 2703 struct extent_buffer *l; 2704 int slot; 2705 u64 num_bytes; 2706 u64 offset; 2707 u64 end; 2708 u64 disk_bytenr; 2709 u64 extent_offset; 2710 2711 l = path->nodes[0]; 2712 slot = path->slots[0]; 2713 2714 if (slot >= btrfs_header_nritems(l)) { 2715 ret = btrfs_next_leaf(root, path); 2716 if (ret < 0) 2717 goto out_free_path; 2718 else if (ret > 0) 2719 break; 2720 continue; 2721 } 2722 2723 btrfs_item_key_to_cpu(l, &key, slot); 2724 2725 if (key.objectid != btrfs_ino(inode)) 2726 break; 2727 if (key.type != BTRFS_EXTENT_DATA_KEY) 2728 break; 2729 if (key.offset >= new->file_pos + new->len) 2730 break; 2731 2732 extent = btrfs_item_ptr(l, slot, struct btrfs_file_extent_item); 2733 2734 num_bytes = btrfs_file_extent_num_bytes(l, extent); 2735 if (key.offset + num_bytes < new->file_pos) 2736 goto next; 2737 2738 disk_bytenr = btrfs_file_extent_disk_bytenr(l, extent); 2739 if (!disk_bytenr) 2740 goto next; 2741 2742 extent_offset = btrfs_file_extent_offset(l, extent); 2743 2744 old = kmalloc(sizeof(*old), GFP_NOFS); 2745 if (!old) 2746 goto out_free_path; 2747 2748 offset = max(new->file_pos, key.offset); 2749 end = min(new->file_pos + new->len, key.offset + num_bytes); 2750 2751 old->bytenr = disk_bytenr; 2752 old->extent_offset = extent_offset; 2753 old->offset = offset - key.offset; 2754 old->len = end - offset; 2755 old->new = new; 2756 old->count = 0; 2757 list_add_tail(&old->list, &new->head); 2758next: 2759 path->slots[0]++; 2760 cond_resched(); 2761 } 2762 2763 btrfs_free_path(path); 2764 atomic_inc(&root->fs_info->defrag_running); 2765 2766 return new; 2767 2768out_free_path: 2769 btrfs_free_path(path); 2770out_kfree: 2771 free_sa_defrag_extent(new); 2772 return NULL; 2773} 2774 2775static void btrfs_release_delalloc_bytes(struct btrfs_root *root, 2776 u64 start, u64 len) 2777{ 2778 struct btrfs_block_group_cache *cache; 2779 2780 cache = btrfs_lookup_block_group(root->fs_info, start); 2781 ASSERT(cache); 2782 2783 spin_lock(&cache->lock); 2784 cache->delalloc_bytes -= len; 2785 spin_unlock(&cache->lock); 2786 2787 btrfs_put_block_group(cache); 2788} 2789 2790/* as ordered data IO finishes, this gets called so we can finish 2791 * an ordered extent if the range of bytes in the file it covers are 2792 * fully written. 2793 */ 2794static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) 2795{ 2796 struct inode *inode = ordered_extent->inode; 2797 struct btrfs_root *root = BTRFS_I(inode)->root; 2798 struct btrfs_trans_handle *trans = NULL; 2799 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 2800 struct extent_state *cached_state = NULL; 2801 struct new_sa_defrag_extent *new = NULL; 2802 int compress_type = 0; 2803 int ret = 0; 2804 u64 logical_len = ordered_extent->len; 2805 bool nolock; 2806 bool truncated = false; 2807 2808 nolock = btrfs_is_free_space_inode(inode); 2809 2810 if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) { 2811 ret = -EIO; 2812 goto out; 2813 } 2814 2815 btrfs_free_io_failure_record(inode, ordered_extent->file_offset, 2816 ordered_extent->file_offset + 2817 ordered_extent->len - 1); 2818 2819 if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) { 2820 truncated = true; 2821 logical_len = ordered_extent->truncated_len; 2822 /* Truncated the entire extent, don't bother adding */ 2823 if (!logical_len) 2824 goto out; 2825 } 2826 2827 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { 2828 BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */ 2829 btrfs_ordered_update_i_size(inode, 0, ordered_extent); 2830 if (nolock) 2831 trans = btrfs_join_transaction_nolock(root); 2832 else 2833 trans = btrfs_join_transaction(root); 2834 if (IS_ERR(trans)) { 2835 ret = PTR_ERR(trans); 2836 trans = NULL; 2837 goto out; 2838 } 2839 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 2840 ret = btrfs_update_inode_fallback(trans, root, inode); 2841 if (ret) /* -ENOMEM or corruption */ 2842 btrfs_abort_transaction(trans, root, ret); 2843 goto out; 2844 } 2845 2846 lock_extent_bits(io_tree, ordered_extent->file_offset, 2847 ordered_extent->file_offset + ordered_extent->len - 1, 2848 0, &cached_state); 2849 2850 ret = test_range_bit(io_tree, ordered_extent->file_offset, 2851 ordered_extent->file_offset + ordered_extent->len - 1, 2852 EXTENT_DEFRAG, 1, cached_state); 2853 if (ret) { 2854 u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item); 2855 if (0 && last_snapshot >= BTRFS_I(inode)->generation) 2856 /* the inode is shared */ 2857 new = record_old_file_extents(inode, ordered_extent); 2858 2859 clear_extent_bit(io_tree, ordered_extent->file_offset, 2860 ordered_extent->file_offset + ordered_extent->len - 1, 2861 EXTENT_DEFRAG, 0, 0, &cached_state, GFP_NOFS); 2862 } 2863 2864 if (nolock) 2865 trans = btrfs_join_transaction_nolock(root); 2866 else 2867 trans = btrfs_join_transaction(root); 2868 if (IS_ERR(trans)) { 2869 ret = PTR_ERR(trans); 2870 trans = NULL; 2871 goto out_unlock; 2872 } 2873 2874 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 2875 2876 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags)) 2877 compress_type = ordered_extent->compress_type; 2878 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { 2879 BUG_ON(compress_type); 2880 ret = btrfs_mark_extent_written(trans, inode, 2881 ordered_extent->file_offset, 2882 ordered_extent->file_offset + 2883 logical_len); 2884 } else { 2885 BUG_ON(root == root->fs_info->tree_root); 2886 ret = insert_reserved_file_extent(trans, inode, 2887 ordered_extent->file_offset, 2888 ordered_extent->start, 2889 ordered_extent->disk_len, 2890 logical_len, logical_len, 2891 compress_type, 0, 0, 2892 BTRFS_FILE_EXTENT_REG); 2893 if (!ret) 2894 btrfs_release_delalloc_bytes(root, 2895 ordered_extent->start, 2896 ordered_extent->disk_len); 2897 } 2898 unpin_extent_cache(&BTRFS_I(inode)->extent_tree, 2899 ordered_extent->file_offset, ordered_extent->len, 2900 trans->transid); 2901 if (ret < 0) { 2902 btrfs_abort_transaction(trans, root, ret); 2903 goto out_unlock; 2904 } 2905 2906 add_pending_csums(trans, inode, ordered_extent->file_offset, 2907 &ordered_extent->list); 2908 2909 btrfs_ordered_update_i_size(inode, 0, ordered_extent); 2910 ret = btrfs_update_inode_fallback(trans, root, inode); 2911 if (ret) { /* -ENOMEM or corruption */ 2912 btrfs_abort_transaction(trans, root, ret); 2913 goto out_unlock; 2914 } 2915 ret = 0; 2916out_unlock: 2917 unlock_extent_cached(io_tree, ordered_extent->file_offset, 2918 ordered_extent->file_offset + 2919 ordered_extent->len - 1, &cached_state, GFP_NOFS); 2920out: 2921 if (root != root->fs_info->tree_root) 2922 btrfs_delalloc_release_metadata(inode, ordered_extent->len); 2923 if (trans) 2924 btrfs_end_transaction(trans, root); 2925 2926 if (ret || truncated) { 2927 u64 start, end; 2928 2929 if (truncated) 2930 start = ordered_extent->file_offset + logical_len; 2931 else 2932 start = ordered_extent->file_offset; 2933 end = ordered_extent->file_offset + ordered_extent->len - 1; 2934 clear_extent_uptodate(io_tree, start, end, NULL, GFP_NOFS); 2935 2936 /* Drop the cache for the part of the extent we didn't write. */ 2937 btrfs_drop_extent_cache(inode, start, end, 0); 2938 2939 /* 2940 * If the ordered extent had an IOERR or something else went 2941 * wrong we need to return the space for this ordered extent 2942 * back to the allocator. We only free the extent in the 2943 * truncated case if we didn't write out the extent at all. 2944 */ 2945 if ((ret || !logical_len) && 2946 !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) && 2947 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) 2948 btrfs_free_reserved_extent(root, ordered_extent->start, 2949 ordered_extent->disk_len, 1); 2950 } 2951 2952 2953 /* 2954 * This needs to be done to make sure anybody waiting knows we are done 2955 * updating everything for this ordered extent. 2956 */ 2957 btrfs_remove_ordered_extent(inode, ordered_extent); 2958 2959 /* for snapshot-aware defrag */ 2960 if (new) { 2961 if (ret) { 2962 free_sa_defrag_extent(new); 2963 atomic_dec(&root->fs_info->defrag_running); 2964 } else { 2965 relink_file_extents(new); 2966 } 2967 } 2968 2969 /* once for us */ 2970 btrfs_put_ordered_extent(ordered_extent); 2971 /* once for the tree */ 2972 btrfs_put_ordered_extent(ordered_extent); 2973 2974 return ret; 2975} 2976 2977static void finish_ordered_fn(struct btrfs_work *work) 2978{ 2979 struct btrfs_ordered_extent *ordered_extent; 2980 ordered_extent = container_of(work, struct btrfs_ordered_extent, work); 2981 btrfs_finish_ordered_io(ordered_extent); 2982} 2983 2984static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, 2985 struct extent_state *state, int uptodate) 2986{ 2987 struct inode *inode = page->mapping->host; 2988 struct btrfs_root *root = BTRFS_I(inode)->root; 2989 struct btrfs_ordered_extent *ordered_extent = NULL; 2990 struct btrfs_workqueue *wq; 2991 btrfs_work_func_t func; 2992 2993 trace_btrfs_writepage_end_io_hook(page, start, end, uptodate); 2994 2995 ClearPagePrivate2(page); 2996 if (!btrfs_dec_test_ordered_pending(inode, &ordered_extent, start, 2997 end - start + 1, uptodate)) 2998 return 0; 2999 3000 if (btrfs_is_free_space_inode(inode)) { 3001 wq = root->fs_info->endio_freespace_worker; 3002 func = btrfs_freespace_write_helper; 3003 } else { 3004 wq = root->fs_info->endio_write_workers; 3005 func = btrfs_endio_write_helper; 3006 } 3007 3008 btrfs_init_work(&ordered_extent->work, func, finish_ordered_fn, NULL, 3009 NULL); 3010 btrfs_queue_work(wq, &ordered_extent->work); 3011 3012 return 0; 3013} 3014 3015static int __readpage_endio_check(struct inode *inode, 3016 struct btrfs_io_bio *io_bio, 3017 int icsum, struct page *page, 3018 int pgoff, u64 start, size_t len) 3019{ 3020 char *kaddr; 3021 u32 csum_expected; 3022 u32 csum = ~(u32)0; 3023 static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL, 3024 DEFAULT_RATELIMIT_BURST); 3025 3026 csum_expected = *(((u32 *)io_bio->csum) + icsum); 3027 3028 kaddr = kmap_atomic(page); 3029 csum = btrfs_csum_data(kaddr + pgoff, csum, len); 3030 btrfs_csum_final(csum, (char *)&csum); 3031 if (csum != csum_expected) 3032 goto zeroit; 3033 3034 kunmap_atomic(kaddr); 3035 return 0; 3036zeroit: 3037 if (__ratelimit(&_rs)) 3038 btrfs_warn(BTRFS_I(inode)->root->fs_info, 3039 "csum failed ino %llu off %llu csum %u expected csum %u", 3040 btrfs_ino(inode), start, csum, csum_expected); 3041 memset(kaddr + pgoff, 1, len); 3042 flush_dcache_page(page); 3043 kunmap_atomic(kaddr); 3044 if (csum_expected == 0) 3045 return 0; 3046 return -EIO; 3047} 3048 3049/* 3050 * when reads are done, we need to check csums to verify the data is correct 3051 * if there's a match, we allow the bio to finish. If not, the code in 3052 * extent_io.c will try to find good copies for us. 3053 */ 3054static int btrfs_readpage_end_io_hook(struct btrfs_io_bio *io_bio, 3055 u64 phy_offset, struct page *page, 3056 u64 start, u64 end, int mirror) 3057{ 3058 size_t offset = start - page_offset(page); 3059 struct inode *inode = page->mapping->host; 3060 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 3061 struct btrfs_root *root = BTRFS_I(inode)->root; 3062 3063 if (PageChecked(page)) { 3064 ClearPageChecked(page); 3065 return 0; 3066 } 3067 3068 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) 3069 return 0; 3070 3071 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID && 3072 test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) { 3073 clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM, 3074 GFP_NOFS); 3075 return 0; 3076 } 3077 3078 phy_offset >>= inode->i_sb->s_blocksize_bits; 3079 return __readpage_endio_check(inode, io_bio, phy_offset, page, offset, 3080 start, (size_t)(end - start + 1)); 3081} 3082 3083void btrfs_add_delayed_iput(struct inode *inode) 3084{ 3085 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 3086 struct btrfs_inode *binode = BTRFS_I(inode); 3087 3088 if (atomic_add_unless(&inode->i_count, -1, 1)) 3089 return; 3090 3091 spin_lock(&fs_info->delayed_iput_lock); 3092 if (binode->delayed_iput_count == 0) { 3093 ASSERT(list_empty(&binode->delayed_iput)); 3094 list_add_tail(&binode->delayed_iput, &fs_info->delayed_iputs); 3095 } else { 3096 binode->delayed_iput_count++; 3097 } 3098 spin_unlock(&fs_info->delayed_iput_lock); 3099} 3100 3101void btrfs_run_delayed_iputs(struct btrfs_root *root) 3102{ 3103 struct btrfs_fs_info *fs_info = root->fs_info; 3104 3105 spin_lock(&fs_info->delayed_iput_lock); 3106 while (!list_empty(&fs_info->delayed_iputs)) { 3107 struct btrfs_inode *inode; 3108 3109 inode = list_first_entry(&fs_info->delayed_iputs, 3110 struct btrfs_inode, delayed_iput); 3111 if (inode->delayed_iput_count) { 3112 inode->delayed_iput_count--; 3113 list_move_tail(&inode->delayed_iput, 3114 &fs_info->delayed_iputs); 3115 } else { 3116 list_del_init(&inode->delayed_iput); 3117 } 3118 spin_unlock(&fs_info->delayed_iput_lock); 3119 iput(&inode->vfs_inode); 3120 spin_lock(&fs_info->delayed_iput_lock); 3121 } 3122 spin_unlock(&fs_info->delayed_iput_lock); 3123} 3124 3125/* 3126 * This is called in transaction commit time. If there are no orphan 3127 * files in the subvolume, it removes orphan item and frees block_rsv 3128 * structure. 3129 */ 3130void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans, 3131 struct btrfs_root *root) 3132{ 3133 struct btrfs_block_rsv *block_rsv; 3134 int ret; 3135 3136 if (atomic_read(&root->orphan_inodes) || 3137 root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE) 3138 return; 3139 3140 spin_lock(&root->orphan_lock); 3141 if (atomic_read(&root->orphan_inodes)) { 3142 spin_unlock(&root->orphan_lock); 3143 return; 3144 } 3145 3146 if (root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE) { 3147 spin_unlock(&root->orphan_lock); 3148 return; 3149 } 3150 3151 block_rsv = root->orphan_block_rsv; 3152 root->orphan_block_rsv = NULL; 3153 spin_unlock(&root->orphan_lock); 3154 3155 if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state) && 3156 btrfs_root_refs(&root->root_item) > 0) { 3157 ret = btrfs_del_orphan_item(trans, root->fs_info->tree_root, 3158 root->root_key.objectid); 3159 if (ret) 3160 btrfs_abort_transaction(trans, root, ret); 3161 else 3162 clear_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, 3163 &root->state); 3164 } 3165 3166 if (block_rsv) { 3167 WARN_ON(block_rsv->size > 0); 3168 btrfs_free_block_rsv(root, block_rsv); 3169 } 3170} 3171 3172/* 3173 * This creates an orphan entry for the given inode in case something goes 3174 * wrong in the middle of an unlink/truncate. 3175 * 3176 * NOTE: caller of this function should reserve 5 units of metadata for 3177 * this function. 3178 */ 3179int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode) 3180{ 3181 struct btrfs_root *root = BTRFS_I(inode)->root; 3182 struct btrfs_block_rsv *block_rsv = NULL; 3183 int reserve = 0; 3184 int insert = 0; 3185 int ret; 3186 3187 if (!root->orphan_block_rsv) { 3188 block_rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP); 3189 if (!block_rsv) 3190 return -ENOMEM; 3191 } 3192 3193 spin_lock(&root->orphan_lock); 3194 if (!root->orphan_block_rsv) { 3195 root->orphan_block_rsv = block_rsv; 3196 } else if (block_rsv) { 3197 btrfs_free_block_rsv(root, block_rsv); 3198 block_rsv = NULL; 3199 } 3200 3201 if (!test_and_set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, 3202 &BTRFS_I(inode)->runtime_flags)) { 3203#if 0 3204 /* 3205 * For proper ENOSPC handling, we should do orphan 3206 * cleanup when mounting. But this introduces backward 3207 * compatibility issue. 3208 */ 3209 if (!xchg(&root->orphan_item_inserted, 1)) 3210 insert = 2; 3211 else 3212 insert = 1; 3213#endif 3214 insert = 1; 3215 atomic_inc(&root->orphan_inodes); 3216 } 3217 3218 if (!test_and_set_bit(BTRFS_INODE_ORPHAN_META_RESERVED, 3219 &BTRFS_I(inode)->runtime_flags)) 3220 reserve = 1; 3221 spin_unlock(&root->orphan_lock); 3222 3223 /* grab metadata reservation from transaction handle */ 3224 if (reserve) { 3225 ret = btrfs_orphan_reserve_metadata(trans, inode); 3226 BUG_ON(ret); /* -ENOSPC in reservation; Logic error? JDM */ 3227 } 3228 3229 /* insert an orphan item to track this unlinked/truncated file */ 3230 if (insert >= 1) { 3231 ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode)); 3232 if (ret) { 3233 atomic_dec(&root->orphan_inodes); 3234 if (reserve) { 3235 clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED, 3236 &BTRFS_I(inode)->runtime_flags); 3237 btrfs_orphan_release_metadata(inode); 3238 } 3239 if (ret != -EEXIST) { 3240 clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, 3241 &BTRFS_I(inode)->runtime_flags); 3242 btrfs_abort_transaction(trans, root, ret); 3243 return ret; 3244 } 3245 } 3246 ret = 0; 3247 } 3248 3249 /* insert an orphan item to track subvolume contains orphan files */ 3250 if (insert >= 2) { 3251 ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root, 3252 root->root_key.objectid); 3253 if (ret && ret != -EEXIST) { 3254 btrfs_abort_transaction(trans, root, ret); 3255 return ret; 3256 } 3257 } 3258 return 0; 3259} 3260 3261/* 3262 * We have done the truncate/delete so we can go ahead and remove the orphan 3263 * item for this particular inode. 3264 */ 3265static int btrfs_orphan_del(struct btrfs_trans_handle *trans, 3266 struct inode *inode) 3267{ 3268 struct btrfs_root *root = BTRFS_I(inode)->root; 3269 int delete_item = 0; 3270 int release_rsv = 0; 3271 int ret = 0; 3272 3273 spin_lock(&root->orphan_lock); 3274 if (test_and_clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, 3275 &BTRFS_I(inode)->runtime_flags)) 3276 delete_item = 1; 3277 3278 if (test_and_clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED, 3279 &BTRFS_I(inode)->runtime_flags)) 3280 release_rsv = 1; 3281 spin_unlock(&root->orphan_lock); 3282 3283 if (delete_item) { 3284 atomic_dec(&root->orphan_inodes); 3285 if (trans) 3286 ret = btrfs_del_orphan_item(trans, root, 3287 btrfs_ino(inode)); 3288 } 3289 3290 if (release_rsv) 3291 btrfs_orphan_release_metadata(inode); 3292 3293 return ret; 3294} 3295 3296/* 3297 * this cleans up any orphans that may be left on the list from the last use 3298 * of this root. 3299 */ 3300int btrfs_orphan_cleanup(struct btrfs_root *root) 3301{ 3302 struct btrfs_path *path; 3303 struct extent_buffer *leaf; 3304 struct btrfs_key key, found_key; 3305 struct btrfs_trans_handle *trans; 3306 struct inode *inode; 3307 u64 last_objectid = 0; 3308 int ret = 0, nr_unlink = 0, nr_truncate = 0; 3309 3310 if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED)) 3311 return 0; 3312 3313 path = btrfs_alloc_path(); 3314 if (!path) { 3315 ret = -ENOMEM; 3316 goto out; 3317 } 3318 path->reada = -1; 3319 3320 key.objectid = BTRFS_ORPHAN_OBJECTID; 3321 key.type = BTRFS_ORPHAN_ITEM_KEY; 3322 key.offset = (u64)-1; 3323 3324 while (1) { 3325 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 3326 if (ret < 0) 3327 goto out; 3328 3329 /* 3330 * if ret == 0 means we found what we were searching for, which 3331 * is weird, but possible, so only screw with path if we didn't 3332 * find the key and see if we have stuff that matches 3333 */ 3334 if (ret > 0) { 3335 ret = 0; 3336 if (path->slots[0] == 0) 3337 break; 3338 path->slots[0]--; 3339 } 3340 3341 /* pull out the item */ 3342 leaf = path->nodes[0]; 3343 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 3344 3345 /* make sure the item matches what we want */ 3346 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID) 3347 break; 3348 if (found_key.type != BTRFS_ORPHAN_ITEM_KEY) 3349 break; 3350 3351 /* release the path since we're done with it */ 3352 btrfs_release_path(path); 3353 3354 /* 3355 * this is where we are basically btrfs_lookup, without the 3356 * crossing root thing. we store the inode number in the 3357 * offset of the orphan item. 3358 */ 3359 3360 if (found_key.offset == last_objectid) { 3361 btrfs_err(root->fs_info, 3362 "Error removing orphan entry, stopping orphan cleanup"); 3363 ret = -EINVAL; 3364 goto out; 3365 } 3366 3367 last_objectid = found_key.offset; 3368 3369 found_key.objectid = found_key.offset; 3370 found_key.type = BTRFS_INODE_ITEM_KEY; 3371 found_key.offset = 0; 3372 inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL); 3373 ret = PTR_ERR_OR_ZERO(inode); 3374 if (ret && ret != -ESTALE) 3375 goto out; 3376 3377 if (ret == -ESTALE && root == root->fs_info->tree_root) { 3378 struct btrfs_root *dead_root; 3379 struct btrfs_fs_info *fs_info = root->fs_info; 3380 int is_dead_root = 0; 3381 3382 /* 3383 * this is an orphan in the tree root. Currently these 3384 * could come from 2 sources: 3385 * a) a snapshot deletion in progress 3386 * b) a free space cache inode 3387 * We need to distinguish those two, as the snapshot 3388 * orphan must not get deleted. 3389 * find_dead_roots already ran before us, so if this 3390 * is a snapshot deletion, we should find the root 3391 * in the dead_roots list 3392 */ 3393 spin_lock(&fs_info->trans_lock); 3394 list_for_each_entry(dead_root, &fs_info->dead_roots, 3395 root_list) { 3396 if (dead_root->root_key.objectid == 3397 found_key.objectid) { 3398 is_dead_root = 1; 3399 break; 3400 } 3401 } 3402 spin_unlock(&fs_info->trans_lock); 3403 if (is_dead_root) { 3404 /* prevent this orphan from being found again */ 3405 key.offset = found_key.objectid - 1; 3406 continue; 3407 } 3408 } 3409 /* 3410 * Inode is already gone but the orphan item is still there, 3411 * kill the orphan item. 3412 */ 3413 if (ret == -ESTALE) { 3414 trans = btrfs_start_transaction(root, 1); 3415 if (IS_ERR(trans)) { 3416 ret = PTR_ERR(trans); 3417 goto out; 3418 } 3419 btrfs_debug(root->fs_info, "auto deleting %Lu", 3420 found_key.objectid); 3421 ret = btrfs_del_orphan_item(trans, root, 3422 found_key.objectid); 3423 btrfs_end_transaction(trans, root); 3424 if (ret) 3425 goto out; 3426 continue; 3427 } 3428 3429 /* 3430 * add this inode to the orphan list so btrfs_orphan_del does 3431 * the proper thing when we hit it 3432 */ 3433 set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, 3434 &BTRFS_I(inode)->runtime_flags); 3435 atomic_inc(&root->orphan_inodes); 3436 3437 /* if we have links, this was a truncate, lets do that */ 3438 if (inode->i_nlink) { 3439 if (WARN_ON(!S_ISREG(inode->i_mode))) { 3440 iput(inode); 3441 continue; 3442 } 3443 nr_truncate++; 3444 3445 /* 1 for the orphan item deletion. */ 3446 trans = btrfs_start_transaction(root, 1); 3447 if (IS_ERR(trans)) { 3448 iput(inode); 3449 ret = PTR_ERR(trans); 3450 goto out; 3451 } 3452 ret = btrfs_orphan_add(trans, inode); 3453 btrfs_end_transaction(trans, root); 3454 if (ret) { 3455 iput(inode); 3456 goto out; 3457 } 3458 3459 ret = btrfs_truncate(inode); 3460 if (ret) 3461 btrfs_orphan_del(NULL, inode); 3462 } else { 3463 nr_unlink++; 3464 } 3465 3466 /* this will do delete_inode and everything for us */ 3467 iput(inode); 3468 if (ret) 3469 goto out; 3470 } 3471 /* release the path since we're done with it */ 3472 btrfs_release_path(path); 3473 3474 root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE; 3475 3476 if (root->orphan_block_rsv) 3477 btrfs_block_rsv_release(root, root->orphan_block_rsv, 3478 (u64)-1); 3479 3480 if (root->orphan_block_rsv || 3481 test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)) { 3482 trans = btrfs_join_transaction(root); 3483 if (!IS_ERR(trans)) 3484 btrfs_end_transaction(trans, root); 3485 } 3486 3487 if (nr_unlink) 3488 btrfs_debug(root->fs_info, "unlinked %d orphans", nr_unlink); 3489 if (nr_truncate) 3490 btrfs_debug(root->fs_info, "truncated %d orphans", nr_truncate); 3491 3492out: 3493 if (ret) 3494 btrfs_err(root->fs_info, 3495 "could not do orphan cleanup %d", ret); 3496 btrfs_free_path(path); 3497 return ret; 3498} 3499 3500/* 3501 * very simple check to peek ahead in the leaf looking for xattrs. If we 3502 * don't find any xattrs, we know there can't be any acls. 3503 * 3504 * slot is the slot the inode is in, objectid is the objectid of the inode 3505 */ 3506static noinline int acls_after_inode_item(struct extent_buffer *leaf, 3507 int slot, u64 objectid, 3508 int *first_xattr_slot) 3509{ 3510 u32 nritems = btrfs_header_nritems(leaf); 3511 struct btrfs_key found_key; 3512 static u64 xattr_access = 0; 3513 static u64 xattr_default = 0; 3514 int scanned = 0; 3515 3516 if (!xattr_access) { 3517 xattr_access = btrfs_name_hash(POSIX_ACL_XATTR_ACCESS, 3518 strlen(POSIX_ACL_XATTR_ACCESS)); 3519 xattr_default = btrfs_name_hash(POSIX_ACL_XATTR_DEFAULT, 3520 strlen(POSIX_ACL_XATTR_DEFAULT)); 3521 } 3522 3523 slot++; 3524 *first_xattr_slot = -1; 3525 while (slot < nritems) { 3526 btrfs_item_key_to_cpu(leaf, &found_key, slot); 3527 3528 /* we found a different objectid, there must not be acls */ 3529 if (found_key.objectid != objectid) 3530 return 0; 3531 3532 /* we found an xattr, assume we've got an acl */ 3533 if (found_key.type == BTRFS_XATTR_ITEM_KEY) { 3534 if (*first_xattr_slot == -1) 3535 *first_xattr_slot = slot; 3536 if (found_key.offset == xattr_access || 3537 found_key.offset == xattr_default) 3538 return 1; 3539 } 3540 3541 /* 3542 * we found a key greater than an xattr key, there can't 3543 * be any acls later on 3544 */ 3545 if (found_key.type > BTRFS_XATTR_ITEM_KEY) 3546 return 0; 3547 3548 slot++; 3549 scanned++; 3550 3551 /* 3552 * it goes inode, inode backrefs, xattrs, extents, 3553 * so if there are a ton of hard links to an inode there can 3554 * be a lot of backrefs. Don't waste time searching too hard, 3555 * this is just an optimization 3556 */ 3557 if (scanned >= 8) 3558 break; 3559 } 3560 /* we hit the end of the leaf before we found an xattr or 3561 * something larger than an xattr. We have to assume the inode 3562 * has acls 3563 */ 3564 if (*first_xattr_slot == -1) 3565 *first_xattr_slot = slot; 3566 return 1; 3567} 3568 3569/* 3570 * read an inode from the btree into the in-memory inode 3571 */ 3572static void btrfs_read_locked_inode(struct inode *inode) 3573{ 3574 struct btrfs_path *path; 3575 struct extent_buffer *leaf; 3576 struct btrfs_inode_item *inode_item; 3577 struct btrfs_root *root = BTRFS_I(inode)->root; 3578 struct btrfs_key location; 3579 unsigned long ptr; 3580 int maybe_acls; 3581 u32 rdev; 3582 int ret; 3583 bool filled = false; 3584 int first_xattr_slot; 3585 3586 ret = btrfs_fill_inode(inode, &rdev); 3587 if (!ret) 3588 filled = true; 3589 3590 path = btrfs_alloc_path(); 3591 if (!path) 3592 goto make_bad; 3593 3594 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location)); 3595 3596 ret = btrfs_lookup_inode(NULL, root, path, &location, 0); 3597 if (ret) 3598 goto make_bad; 3599 3600 leaf = path->nodes[0]; 3601 3602 if (filled) 3603 goto cache_index; 3604 3605 inode_item = btrfs_item_ptr(leaf, path->slots[0], 3606 struct btrfs_inode_item); 3607 inode->i_mode = btrfs_inode_mode(leaf, inode_item); 3608 set_nlink(inode, btrfs_inode_nlink(leaf, inode_item)); 3609 i_uid_write(inode, btrfs_inode_uid(leaf, inode_item)); 3610 i_gid_write(inode, btrfs_inode_gid(leaf, inode_item)); 3611 btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item)); 3612 3613 inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->atime); 3614 inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->atime); 3615 3616 inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->mtime); 3617 inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->mtime); 3618 3619 inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->ctime); 3620 inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->ctime); 3621 3622 BTRFS_I(inode)->i_otime.tv_sec = 3623 btrfs_timespec_sec(leaf, &inode_item->otime); 3624 BTRFS_I(inode)->i_otime.tv_nsec = 3625 btrfs_timespec_nsec(leaf, &inode_item->otime); 3626 3627 inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item)); 3628 BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item); 3629 BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item); 3630 3631 inode->i_version = btrfs_inode_sequence(leaf, inode_item); 3632 inode->i_generation = BTRFS_I(inode)->generation; 3633 inode->i_rdev = 0; 3634 rdev = btrfs_inode_rdev(leaf, inode_item); 3635 3636 BTRFS_I(inode)->index_cnt = (u64)-1; 3637 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item); 3638 3639cache_index: 3640 /* 3641 * If we were modified in the current generation and evicted from memory 3642 * and then re-read we need to do a full sync since we don't have any 3643 * idea about which extents were modified before we were evicted from 3644 * cache. 3645 * 3646 * This is required for both inode re-read from disk and delayed inode 3647 * in delayed_nodes_tree. 3648 */ 3649 if (BTRFS_I(inode)->last_trans == root->fs_info->generation) 3650 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 3651 &BTRFS_I(inode)->runtime_flags); 3652 3653 path->slots[0]++; 3654 if (inode->i_nlink != 1 || 3655 path->slots[0] >= btrfs_header_nritems(leaf)) 3656 goto cache_acl; 3657 3658 btrfs_item_key_to_cpu(leaf, &location, path->slots[0]); 3659 if (location.objectid != btrfs_ino(inode)) 3660 goto cache_acl; 3661 3662 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 3663 if (location.type == BTRFS_INODE_REF_KEY) { 3664 struct btrfs_inode_ref *ref; 3665 3666 ref = (struct btrfs_inode_ref *)ptr; 3667 BTRFS_I(inode)->dir_index = btrfs_inode_ref_index(leaf, ref); 3668 } else if (location.type == BTRFS_INODE_EXTREF_KEY) { 3669 struct btrfs_inode_extref *extref; 3670 3671 extref = (struct btrfs_inode_extref *)ptr; 3672 BTRFS_I(inode)->dir_index = btrfs_inode_extref_index(leaf, 3673 extref); 3674 } 3675cache_acl: 3676 /* 3677 * try to precache a NULL acl entry for files that don't have 3678 * any xattrs or acls 3679 */ 3680 maybe_acls = acls_after_inode_item(leaf, path->slots[0], 3681 btrfs_ino(inode), &first_xattr_slot); 3682 if (first_xattr_slot != -1) { 3683 path->slots[0] = first_xattr_slot; 3684 ret = btrfs_load_inode_props(inode, path); 3685 if (ret) 3686 btrfs_err(root->fs_info, 3687 "error loading props for ino %llu (root %llu): %d", 3688 btrfs_ino(inode), 3689 root->root_key.objectid, ret); 3690 } 3691 btrfs_free_path(path); 3692 3693 if (!maybe_acls) 3694 cache_no_acl(inode); 3695 3696 switch (inode->i_mode & S_IFMT) { 3697 case S_IFREG: 3698 inode->i_mapping->a_ops = &btrfs_aops; 3699 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; 3700 inode->i_fop = &btrfs_file_operations; 3701 inode->i_op = &btrfs_file_inode_operations; 3702 break; 3703 case S_IFDIR: 3704 inode->i_fop = &btrfs_dir_file_operations; 3705 if (root == root->fs_info->tree_root) 3706 inode->i_op = &btrfs_dir_ro_inode_operations; 3707 else 3708 inode->i_op = &btrfs_dir_inode_operations; 3709 break; 3710 case S_IFLNK: 3711 inode->i_op = &btrfs_symlink_inode_operations; 3712 inode->i_mapping->a_ops = &btrfs_symlink_aops; 3713 break; 3714 default: 3715 inode->i_op = &btrfs_special_inode_operations; 3716 init_special_inode(inode, inode->i_mode, rdev); 3717 break; 3718 } 3719 3720 btrfs_update_iflags(inode); 3721 return; 3722 3723make_bad: 3724 btrfs_free_path(path); 3725 make_bad_inode(inode); 3726} 3727 3728/* 3729 * given a leaf and an inode, copy the inode fields into the leaf 3730 */ 3731static void fill_inode_item(struct btrfs_trans_handle *trans, 3732 struct extent_buffer *leaf, 3733 struct btrfs_inode_item *item, 3734 struct inode *inode) 3735{ 3736 struct btrfs_map_token token; 3737 3738 btrfs_init_map_token(&token); 3739 3740 btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token); 3741 btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token); 3742 btrfs_set_token_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size, 3743 &token); 3744 btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token); 3745 btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token); 3746 3747 btrfs_set_token_timespec_sec(leaf, &item->atime, 3748 inode->i_atime.tv_sec, &token); 3749 btrfs_set_token_timespec_nsec(leaf, &item->atime, 3750 inode->i_atime.tv_nsec, &token); 3751 3752 btrfs_set_token_timespec_sec(leaf, &item->mtime, 3753 inode->i_mtime.tv_sec, &token); 3754 btrfs_set_token_timespec_nsec(leaf, &item->mtime, 3755 inode->i_mtime.tv_nsec, &token); 3756 3757 btrfs_set_token_timespec_sec(leaf, &item->ctime, 3758 inode->i_ctime.tv_sec, &token); 3759 btrfs_set_token_timespec_nsec(leaf, &item->ctime, 3760 inode->i_ctime.tv_nsec, &token); 3761 3762 btrfs_set_token_timespec_sec(leaf, &item->otime, 3763 BTRFS_I(inode)->i_otime.tv_sec, &token); 3764 btrfs_set_token_timespec_nsec(leaf, &item->otime, 3765 BTRFS_I(inode)->i_otime.tv_nsec, &token); 3766 3767 btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode), 3768 &token); 3769 btrfs_set_token_inode_generation(leaf, item, BTRFS_I(inode)->generation, 3770 &token); 3771 btrfs_set_token_inode_sequence(leaf, item, inode->i_version, &token); 3772 btrfs_set_token_inode_transid(leaf, item, trans->transid, &token); 3773 btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token); 3774 btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token); 3775 btrfs_set_token_inode_block_group(leaf, item, 0, &token); 3776} 3777 3778/* 3779 * copy everything in the in-memory inode into the btree. 3780 */ 3781static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans, 3782 struct btrfs_root *root, struct inode *inode) 3783{ 3784 struct btrfs_inode_item *inode_item; 3785 struct btrfs_path *path; 3786 struct extent_buffer *leaf; 3787 int ret; 3788 3789 path = btrfs_alloc_path(); 3790 if (!path) 3791 return -ENOMEM; 3792 3793 path->leave_spinning = 1; 3794 ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location, 3795 1); 3796 if (ret) { 3797 if (ret > 0) 3798 ret = -ENOENT; 3799 goto failed; 3800 } 3801 3802 leaf = path->nodes[0]; 3803 inode_item = btrfs_item_ptr(leaf, path->slots[0], 3804 struct btrfs_inode_item); 3805 3806 fill_inode_item(trans, leaf, inode_item, inode); 3807 btrfs_mark_buffer_dirty(leaf); 3808 btrfs_set_inode_last_trans(trans, inode); 3809 ret = 0; 3810failed: 3811 btrfs_free_path(path); 3812 return ret; 3813} 3814 3815/* 3816 * copy everything in the in-memory inode into the btree. 3817 */ 3818noinline int btrfs_update_inode(struct btrfs_trans_handle *trans, 3819 struct btrfs_root *root, struct inode *inode) 3820{ 3821 int ret; 3822 3823 /* 3824 * If the inode is a free space inode, we can deadlock during commit 3825 * if we put it into the delayed code. 3826 * 3827 * The data relocation inode should also be directly updated 3828 * without delay 3829 */ 3830 if (!btrfs_is_free_space_inode(inode) 3831 && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID 3832 && !root->fs_info->log_root_recovering) { 3833 btrfs_update_root_times(trans, root); 3834 3835 ret = btrfs_delayed_update_inode(trans, root, inode); 3836 if (!ret) 3837 btrfs_set_inode_last_trans(trans, inode); 3838 return ret; 3839 } 3840 3841 return btrfs_update_inode_item(trans, root, inode); 3842} 3843 3844noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans, 3845 struct btrfs_root *root, 3846 struct inode *inode) 3847{ 3848 int ret; 3849 3850 ret = btrfs_update_inode(trans, root, inode); 3851 if (ret == -ENOSPC) 3852 return btrfs_update_inode_item(trans, root, inode); 3853 return ret; 3854} 3855 3856/* 3857 * unlink helper that gets used here in inode.c and in the tree logging 3858 * recovery code. It remove a link in a directory with a given name, and 3859 * also drops the back refs in the inode to the directory 3860 */ 3861static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans, 3862 struct btrfs_root *root, 3863 struct inode *dir, struct inode *inode, 3864 const char *name, int name_len) 3865{ 3866 struct btrfs_path *path; 3867 int ret = 0; 3868 struct extent_buffer *leaf; 3869 struct btrfs_dir_item *di; 3870 struct btrfs_key key; 3871 u64 index; 3872 u64 ino = btrfs_ino(inode); 3873 u64 dir_ino = btrfs_ino(dir); 3874 3875 path = btrfs_alloc_path(); 3876 if (!path) { 3877 ret = -ENOMEM; 3878 goto out; 3879 } 3880 3881 path->leave_spinning = 1; 3882 di = btrfs_lookup_dir_item(trans, root, path, dir_ino, 3883 name, name_len, -1); 3884 if (IS_ERR(di)) { 3885 ret = PTR_ERR(di); 3886 goto err; 3887 } 3888 if (!di) { 3889 ret = -ENOENT; 3890 goto err; 3891 } 3892 leaf = path->nodes[0]; 3893 btrfs_dir_item_key_to_cpu(leaf, di, &key); 3894 ret = btrfs_delete_one_dir_name(trans, root, path, di); 3895 if (ret) 3896 goto err; 3897 btrfs_release_path(path); 3898 3899 /* 3900 * If we don't have dir index, we have to get it by looking up 3901 * the inode ref, since we get the inode ref, remove it directly, 3902 * it is unnecessary to do delayed deletion. 3903 * 3904 * But if we have dir index, needn't search inode ref to get it. 3905 * Since the inode ref is close to the inode item, it is better 3906 * that we delay to delete it, and just do this deletion when 3907 * we update the inode item. 3908 */ 3909 if (BTRFS_I(inode)->dir_index) { 3910 ret = btrfs_delayed_delete_inode_ref(inode); 3911 if (!ret) { 3912 index = BTRFS_I(inode)->dir_index; 3913 goto skip_backref; 3914 } 3915 } 3916 3917 ret = btrfs_del_inode_ref(trans, root, name, name_len, ino, 3918 dir_ino, &index); 3919 if (ret) { 3920 btrfs_info(root->fs_info, 3921 "failed to delete reference to %.*s, inode %llu parent %llu", 3922 name_len, name, ino, dir_ino); 3923 btrfs_abort_transaction(trans, root, ret); 3924 goto err; 3925 } 3926skip_backref: 3927 ret = btrfs_delete_delayed_dir_index(trans, root, dir, index); 3928 if (ret) { 3929 btrfs_abort_transaction(trans, root, ret); 3930 goto err; 3931 } 3932 3933 ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len, 3934 inode, dir_ino); 3935 if (ret != 0 && ret != -ENOENT) { 3936 btrfs_abort_transaction(trans, root, ret); 3937 goto err; 3938 } 3939 3940 ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len, 3941 dir, index); 3942 if (ret == -ENOENT) 3943 ret = 0; 3944 else if (ret) 3945 btrfs_abort_transaction(trans, root, ret); 3946err: 3947 btrfs_free_path(path); 3948 if (ret) 3949 goto out; 3950 3951 btrfs_i_size_write(dir, dir->i_size - name_len * 2); 3952 inode_inc_iversion(inode); 3953 inode_inc_iversion(dir); 3954 inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME; 3955 ret = btrfs_update_inode(trans, root, dir); 3956out: 3957 return ret; 3958} 3959 3960int btrfs_unlink_inode(struct btrfs_trans_handle *trans, 3961 struct btrfs_root *root, 3962 struct inode *dir, struct inode *inode, 3963 const char *name, int name_len) 3964{ 3965 int ret; 3966 ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len); 3967 if (!ret) { 3968 drop_nlink(inode); 3969 ret = btrfs_update_inode(trans, root, inode); 3970 } 3971 return ret; 3972} 3973 3974/* 3975 * helper to start transaction for unlink and rmdir. 3976 * 3977 * unlink and rmdir are special in btrfs, they do not always free space, so 3978 * if we cannot make our reservations the normal way try and see if there is 3979 * plenty of slack room in the global reserve to migrate, otherwise we cannot 3980 * allow the unlink to occur. 3981 */ 3982static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir) 3983{ 3984 struct btrfs_trans_handle *trans; 3985 struct btrfs_root *root = BTRFS_I(dir)->root; 3986 int ret; 3987 3988 /* 3989 * 1 for the possible orphan item 3990 * 1 for the dir item 3991 * 1 for the dir index 3992 * 1 for the inode ref 3993 * 1 for the inode 3994 */ 3995 trans = btrfs_start_transaction(root, 5); 3996 if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC) 3997 return trans; 3998 3999 if (PTR_ERR(trans) == -ENOSPC) { 4000 u64 num_bytes = btrfs_calc_trans_metadata_size(root, 5); 4001 4002 trans = btrfs_start_transaction(root, 0); 4003 if (IS_ERR(trans)) 4004 return trans; 4005 ret = btrfs_cond_migrate_bytes(root->fs_info, 4006 &root->fs_info->trans_block_rsv, 4007 num_bytes, 5); 4008 if (ret) { 4009 btrfs_end_transaction(trans, root); 4010 return ERR_PTR(ret); 4011 } 4012 trans->block_rsv = &root->fs_info->trans_block_rsv; 4013 trans->bytes_reserved = num_bytes; 4014 } 4015 return trans; 4016} 4017 4018static int btrfs_unlink(struct inode *dir, struct dentry *dentry) 4019{ 4020 struct btrfs_root *root = BTRFS_I(dir)->root; 4021 struct btrfs_trans_handle *trans; 4022 struct inode *inode = d_inode(dentry); 4023 int ret; 4024 4025 trans = __unlink_start_trans(dir); 4026 if (IS_ERR(trans)) 4027 return PTR_ERR(trans); 4028 4029 btrfs_record_unlink_dir(trans, dir, d_inode(dentry), 0); 4030 4031 ret = btrfs_unlink_inode(trans, root, dir, d_inode(dentry), 4032 dentry->d_name.name, dentry->d_name.len); 4033 if (ret) 4034 goto out; 4035 4036 if (inode->i_nlink == 0) { 4037 ret = btrfs_orphan_add(trans, inode); 4038 if (ret) 4039 goto out; 4040 } 4041 4042out: 4043 btrfs_end_transaction(trans, root); 4044 btrfs_btree_balance_dirty(root); 4045 return ret; 4046} 4047 4048int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, 4049 struct btrfs_root *root, 4050 struct inode *dir, u64 objectid, 4051 const char *name, int name_len) 4052{ 4053 struct btrfs_path *path; 4054 struct extent_buffer *leaf; 4055 struct btrfs_dir_item *di; 4056 struct btrfs_key key; 4057 u64 index; 4058 int ret; 4059 u64 dir_ino = btrfs_ino(dir); 4060 4061 path = btrfs_alloc_path(); 4062 if (!path) 4063 return -ENOMEM; 4064 4065 di = btrfs_lookup_dir_item(trans, root, path, dir_ino, 4066 name, name_len, -1); 4067 if (IS_ERR_OR_NULL(di)) { 4068 if (!di) 4069 ret = -ENOENT; 4070 else 4071 ret = PTR_ERR(di); 4072 goto out; 4073 } 4074 4075 leaf = path->nodes[0]; 4076 btrfs_dir_item_key_to_cpu(leaf, di, &key); 4077 WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid); 4078 ret = btrfs_delete_one_dir_name(trans, root, path, di); 4079 if (ret) { 4080 btrfs_abort_transaction(trans, root, ret); 4081 goto out; 4082 } 4083 btrfs_release_path(path); 4084 4085 ret = btrfs_del_root_ref(trans, root->fs_info->tree_root, 4086 objectid, root->root_key.objectid, 4087 dir_ino, &index, name, name_len); 4088 if (ret < 0) { 4089 if (ret != -ENOENT) { 4090 btrfs_abort_transaction(trans, root, ret); 4091 goto out; 4092 } 4093 di = btrfs_search_dir_index_item(root, path, dir_ino, 4094 name, name_len); 4095 if (IS_ERR_OR_NULL(di)) { 4096 if (!di) 4097 ret = -ENOENT; 4098 else 4099 ret = PTR_ERR(di); 4100 btrfs_abort_transaction(trans, root, ret); 4101 goto out; 4102 } 4103 4104 leaf = path->nodes[0]; 4105 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 4106 btrfs_release_path(path); 4107 index = key.offset; 4108 } 4109 btrfs_release_path(path); 4110 4111 ret = btrfs_delete_delayed_dir_index(trans, root, dir, index); 4112 if (ret) { 4113 btrfs_abort_transaction(trans, root, ret); 4114 goto out; 4115 } 4116 4117 btrfs_i_size_write(dir, dir->i_size - name_len * 2); 4118 inode_inc_iversion(dir); 4119 dir->i_mtime = dir->i_ctime = CURRENT_TIME; 4120 ret = btrfs_update_inode_fallback(trans, root, dir); 4121 if (ret) 4122 btrfs_abort_transaction(trans, root, ret); 4123out: 4124 btrfs_free_path(path); 4125 return ret; 4126} 4127 4128static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) 4129{ 4130 struct inode *inode = d_inode(dentry); 4131 int err = 0; 4132 struct btrfs_root *root = BTRFS_I(dir)->root; 4133 struct btrfs_trans_handle *trans; 4134 4135 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE) 4136 return -ENOTEMPTY; 4137 if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) 4138 return -EPERM; 4139 4140 trans = __unlink_start_trans(dir); 4141 if (IS_ERR(trans)) 4142 return PTR_ERR(trans); 4143 4144 if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { 4145 err = btrfs_unlink_subvol(trans, root, dir, 4146 BTRFS_I(inode)->location.objectid, 4147 dentry->d_name.name, 4148 dentry->d_name.len); 4149 goto out; 4150 } 4151 4152 err = btrfs_orphan_add(trans, inode); 4153 if (err) 4154 goto out; 4155 4156 /* now the directory is empty */ 4157 err = btrfs_unlink_inode(trans, root, dir, d_inode(dentry), 4158 dentry->d_name.name, dentry->d_name.len); 4159 if (!err) 4160 btrfs_i_size_write(inode, 0); 4161out: 4162 btrfs_end_transaction(trans, root); 4163 btrfs_btree_balance_dirty(root); 4164 4165 return err; 4166} 4167 4168static int truncate_space_check(struct btrfs_trans_handle *trans, 4169 struct btrfs_root *root, 4170 u64 bytes_deleted) 4171{ 4172 int ret; 4173 4174 bytes_deleted = btrfs_csum_bytes_to_leaves(root, bytes_deleted); 4175 ret = btrfs_block_rsv_add(root, &root->fs_info->trans_block_rsv, 4176 bytes_deleted, BTRFS_RESERVE_NO_FLUSH); 4177 if (!ret) 4178 trans->bytes_reserved += bytes_deleted; 4179 return ret; 4180 4181} 4182 4183static int truncate_inline_extent(struct inode *inode, 4184 struct btrfs_path *path, 4185 struct btrfs_key *found_key, 4186 const u64 item_end, 4187 const u64 new_size) 4188{ 4189 struct extent_buffer *leaf = path->nodes[0]; 4190 int slot = path->slots[0]; 4191 struct btrfs_file_extent_item *fi; 4192 u32 size = (u32)(new_size - found_key->offset); 4193 struct btrfs_root *root = BTRFS_I(inode)->root; 4194 4195 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); 4196 4197 if (btrfs_file_extent_compression(leaf, fi) != BTRFS_COMPRESS_NONE) { 4198 loff_t offset = new_size; 4199 loff_t page_end = ALIGN(offset, PAGE_CACHE_SIZE); 4200 4201 /* 4202 * Zero out the remaining of the last page of our inline extent, 4203 * instead of directly truncating our inline extent here - that 4204 * would be much more complex (decompressing all the data, then 4205 * compressing the truncated data, which might be bigger than 4206 * the size of the inline extent, resize the extent, etc). 4207 * We release the path because to get the page we might need to 4208 * read the extent item from disk (data not in the page cache). 4209 */ 4210 btrfs_release_path(path); 4211 return btrfs_truncate_page(inode, offset, page_end - offset, 0); 4212 } 4213 4214 btrfs_set_file_extent_ram_bytes(leaf, fi, size); 4215 size = btrfs_file_extent_calc_inline_size(size); 4216 btrfs_truncate_item(root, path, size, 1); 4217 4218 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) 4219 inode_sub_bytes(inode, item_end + 1 - new_size); 4220 4221 return 0; 4222} 4223 4224/* 4225 * this can truncate away extent items, csum items and directory items. 4226 * It starts at a high offset and removes keys until it can't find 4227 * any higher than new_size 4228 * 4229 * csum items that cross the new i_size are truncated to the new size 4230 * as well. 4231 * 4232 * min_type is the minimum key type to truncate down to. If set to 0, this 4233 * will kill all the items on this inode, including the INODE_ITEM_KEY. 4234 */ 4235int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, 4236 struct btrfs_root *root, 4237 struct inode *inode, 4238 u64 new_size, u32 min_type) 4239{ 4240 struct btrfs_path *path; 4241 struct extent_buffer *leaf; 4242 struct btrfs_file_extent_item *fi; 4243 struct btrfs_key key; 4244 struct btrfs_key found_key; 4245 u64 extent_start = 0; 4246 u64 extent_num_bytes = 0; 4247 u64 extent_offset = 0; 4248 u64 item_end = 0; 4249 u64 last_size = (u64)-1; 4250 u32 found_type = (u8)-1; 4251 int found_extent; 4252 int del_item; 4253 int pending_del_nr = 0; 4254 int pending_del_slot = 0; 4255 int extent_type = -1; 4256 int ret; 4257 int err = 0; 4258 u64 ino = btrfs_ino(inode); 4259 u64 bytes_deleted = 0; 4260 bool be_nice = 0; 4261 bool should_throttle = 0; 4262 bool should_end = 0; 4263 4264 BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY); 4265 4266 /* 4267 * for non-free space inodes and ref cows, we want to back off from 4268 * time to time 4269 */ 4270 if (!btrfs_is_free_space_inode(inode) && 4271 test_bit(BTRFS_ROOT_REF_COWS, &root->state)) 4272 be_nice = 1; 4273 4274 path = btrfs_alloc_path(); 4275 if (!path) 4276 return -ENOMEM; 4277 path->reada = -1; 4278 4279 /* 4280 * We want to drop from the next block forward in case this new size is 4281 * not block aligned since we will be keeping the last block of the 4282 * extent just the way it is. 4283 */ 4284 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) || 4285 root == root->fs_info->tree_root) 4286 btrfs_drop_extent_cache(inode, ALIGN(new_size, 4287 root->sectorsize), (u64)-1, 0); 4288 4289 /* 4290 * This function is also used to drop the items in the log tree before 4291 * we relog the inode, so if root != BTRFS_I(inode)->root, it means 4292 * it is used to drop the loged items. So we shouldn't kill the delayed 4293 * items. 4294 */ 4295 if (min_type == 0 && root == BTRFS_I(inode)->root) 4296 btrfs_kill_delayed_inode_items(inode); 4297 4298 key.objectid = ino; 4299 key.offset = (u64)-1; 4300 key.type = (u8)-1; 4301 4302search_again: 4303 /* 4304 * with a 16K leaf size and 128MB extents, you can actually queue 4305 * up a huge file in a single leaf. Most of the time that 4306 * bytes_deleted is > 0, it will be huge by the time we get here 4307 */ 4308 if (be_nice && bytes_deleted > 32 * 1024 * 1024) { 4309 if (btrfs_should_end_transaction(trans, root)) { 4310 err = -EAGAIN; 4311 goto error; 4312 } 4313 } 4314 4315 4316 path->leave_spinning = 1; 4317 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 4318 if (ret < 0) { 4319 err = ret; 4320 goto out; 4321 } 4322 4323 if (ret > 0) { 4324 /* there are no items in the tree for us to truncate, we're 4325 * done 4326 */ 4327 if (path->slots[0] == 0) 4328 goto out; 4329 path->slots[0]--; 4330 } 4331 4332 while (1) { 4333 fi = NULL; 4334 leaf = path->nodes[0]; 4335 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 4336 found_type = found_key.type; 4337 4338 if (found_key.objectid != ino) 4339 break; 4340 4341 if (found_type < min_type) 4342 break; 4343 4344 item_end = found_key.offset; 4345 if (found_type == BTRFS_EXTENT_DATA_KEY) { 4346 fi = btrfs_item_ptr(leaf, path->slots[0], 4347 struct btrfs_file_extent_item); 4348 extent_type = btrfs_file_extent_type(leaf, fi); 4349 if (extent_type != BTRFS_FILE_EXTENT_INLINE) { 4350 item_end += 4351 btrfs_file_extent_num_bytes(leaf, fi); 4352 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 4353 item_end += btrfs_file_extent_inline_len(leaf, 4354 path->slots[0], fi); 4355 } 4356 item_end--; 4357 } 4358 if (found_type > min_type) { 4359 del_item = 1; 4360 } else { 4361 if (item_end < new_size) 4362 break; 4363 if (found_key.offset >= new_size) 4364 del_item = 1; 4365 else 4366 del_item = 0; 4367 } 4368 found_extent = 0; 4369 /* FIXME, shrink the extent if the ref count is only 1 */ 4370 if (found_type != BTRFS_EXTENT_DATA_KEY) 4371 goto delete; 4372 4373 if (del_item) 4374 last_size = found_key.offset; 4375 else 4376 last_size = new_size; 4377 4378 if (extent_type != BTRFS_FILE_EXTENT_INLINE) { 4379 u64 num_dec; 4380 extent_start = btrfs_file_extent_disk_bytenr(leaf, fi); 4381 if (!del_item) { 4382 u64 orig_num_bytes = 4383 btrfs_file_extent_num_bytes(leaf, fi); 4384 extent_num_bytes = ALIGN(new_size - 4385 found_key.offset, 4386 root->sectorsize); 4387 btrfs_set_file_extent_num_bytes(leaf, fi, 4388 extent_num_bytes); 4389 num_dec = (orig_num_bytes - 4390 extent_num_bytes); 4391 if (test_bit(BTRFS_ROOT_REF_COWS, 4392 &root->state) && 4393 extent_start != 0) 4394 inode_sub_bytes(inode, num_dec); 4395 btrfs_mark_buffer_dirty(leaf); 4396 } else { 4397 extent_num_bytes = 4398 btrfs_file_extent_disk_num_bytes(leaf, 4399 fi); 4400 extent_offset = found_key.offset - 4401 btrfs_file_extent_offset(leaf, fi); 4402 4403 /* FIXME blocksize != 4096 */ 4404 num_dec = btrfs_file_extent_num_bytes(leaf, fi); 4405 if (extent_start != 0) { 4406 found_extent = 1; 4407 if (test_bit(BTRFS_ROOT_REF_COWS, 4408 &root->state)) 4409 inode_sub_bytes(inode, num_dec); 4410 } 4411 } 4412 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 4413 /* 4414 * we can't truncate inline items that have had 4415 * special encodings 4416 */ 4417 if (!del_item && 4418 btrfs_file_extent_encryption(leaf, fi) == 0 && 4419 btrfs_file_extent_other_encoding(leaf, fi) == 0) { 4420 4421 /* 4422 * Need to release path in order to truncate a 4423 * compressed extent. So delete any accumulated 4424 * extent items so far. 4425 */ 4426 if (btrfs_file_extent_compression(leaf, fi) != 4427 BTRFS_COMPRESS_NONE && pending_del_nr) { 4428 err = btrfs_del_items(trans, root, path, 4429 pending_del_slot, 4430 pending_del_nr); 4431 if (err) { 4432 btrfs_abort_transaction(trans, 4433 root, 4434 err); 4435 goto error; 4436 } 4437 pending_del_nr = 0; 4438 } 4439 4440 err = truncate_inline_extent(inode, path, 4441 &found_key, 4442 item_end, 4443 new_size); 4444 if (err) { 4445 btrfs_abort_transaction(trans, 4446 root, err); 4447 goto error; 4448 } 4449 } else if (test_bit(BTRFS_ROOT_REF_COWS, 4450 &root->state)) { 4451 inode_sub_bytes(inode, item_end + 1 - new_size); 4452 } 4453 } 4454delete: 4455 if (del_item) { 4456 if (!pending_del_nr) { 4457 /* no pending yet, add ourselves */ 4458 pending_del_slot = path->slots[0]; 4459 pending_del_nr = 1; 4460 } else if (pending_del_nr && 4461 path->slots[0] + 1 == pending_del_slot) { 4462 /* hop on the pending chunk */ 4463 pending_del_nr++; 4464 pending_del_slot = path->slots[0]; 4465 } else { 4466 BUG(); 4467 } 4468 } else { 4469 break; 4470 } 4471 should_throttle = 0; 4472 4473 if (found_extent && 4474 (test_bit(BTRFS_ROOT_REF_COWS, &root->state) || 4475 root == root->fs_info->tree_root)) { 4476 btrfs_set_path_blocking(path); 4477 bytes_deleted += extent_num_bytes; 4478 ret = btrfs_free_extent(trans, root, extent_start, 4479 extent_num_bytes, 0, 4480 btrfs_header_owner(leaf), 4481 ino, extent_offset, 0); 4482 BUG_ON(ret); 4483 if (btrfs_should_throttle_delayed_refs(trans, root)) 4484 btrfs_async_run_delayed_refs(root, 4485 trans->delayed_ref_updates * 2, 0); 4486 if (be_nice) { 4487 if (truncate_space_check(trans, root, 4488 extent_num_bytes)) { 4489 should_end = 1; 4490 } 4491 if (btrfs_should_throttle_delayed_refs(trans, 4492 root)) { 4493 should_throttle = 1; 4494 } 4495 } 4496 } 4497 4498 if (found_type == BTRFS_INODE_ITEM_KEY) 4499 break; 4500 4501 if (path->slots[0] == 0 || 4502 path->slots[0] != pending_del_slot || 4503 should_throttle || should_end) { 4504 if (pending_del_nr) { 4505 ret = btrfs_del_items(trans, root, path, 4506 pending_del_slot, 4507 pending_del_nr); 4508 if (ret) { 4509 btrfs_abort_transaction(trans, 4510 root, ret); 4511 goto error; 4512 } 4513 pending_del_nr = 0; 4514 } 4515 btrfs_release_path(path); 4516 if (should_throttle) { 4517 unsigned long updates = trans->delayed_ref_updates; 4518 if (updates) { 4519 trans->delayed_ref_updates = 0; 4520 ret = btrfs_run_delayed_refs(trans, root, updates * 2); 4521 if (ret && !err) 4522 err = ret; 4523 } 4524 } 4525 /* 4526 * if we failed to refill our space rsv, bail out 4527 * and let the transaction restart 4528 */ 4529 if (should_end) { 4530 err = -EAGAIN; 4531 goto error; 4532 } 4533 goto search_again; 4534 } else { 4535 path->slots[0]--; 4536 } 4537 } 4538out: 4539 if (pending_del_nr) { 4540 ret = btrfs_del_items(trans, root, path, pending_del_slot, 4541 pending_del_nr); 4542 if (ret) 4543 btrfs_abort_transaction(trans, root, ret); 4544 } 4545error: 4546 if (last_size != (u64)-1 && 4547 root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) 4548 btrfs_ordered_update_i_size(inode, last_size, NULL); 4549 4550 btrfs_free_path(path); 4551 4552 if (be_nice && bytes_deleted > 32 * 1024 * 1024) { 4553 unsigned long updates = trans->delayed_ref_updates; 4554 if (updates) { 4555 trans->delayed_ref_updates = 0; 4556 ret = btrfs_run_delayed_refs(trans, root, updates * 2); 4557 if (ret && !err) 4558 err = ret; 4559 } 4560 } 4561 return err; 4562} 4563 4564/* 4565 * btrfs_truncate_page - read, zero a chunk and write a page 4566 * @inode - inode that we're zeroing 4567 * @from - the offset to start zeroing 4568 * @len - the length to zero, 0 to zero the entire range respective to the 4569 * offset 4570 * @front - zero up to the offset instead of from the offset on 4571 * 4572 * This will find the page for the "from" offset and cow the page and zero the 4573 * part we want to zero. This is used with truncate and hole punching. 4574 */ 4575int btrfs_truncate_page(struct inode *inode, loff_t from, loff_t len, 4576 int front) 4577{ 4578 struct address_space *mapping = inode->i_mapping; 4579 struct btrfs_root *root = BTRFS_I(inode)->root; 4580 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 4581 struct btrfs_ordered_extent *ordered; 4582 struct extent_state *cached_state = NULL; 4583 char *kaddr; 4584 u32 blocksize = root->sectorsize; 4585 pgoff_t index = from >> PAGE_CACHE_SHIFT; 4586 unsigned offset = from & (PAGE_CACHE_SIZE-1); 4587 struct page *page; 4588 gfp_t mask = btrfs_alloc_write_mask(mapping); 4589 int ret = 0; 4590 u64 page_start; 4591 u64 page_end; 4592 4593 if ((offset & (blocksize - 1)) == 0 && 4594 (!len || ((len & (blocksize - 1)) == 0))) 4595 goto out; 4596 ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE); 4597 if (ret) 4598 goto out; 4599 4600again: 4601 page = find_or_create_page(mapping, index, mask); 4602 if (!page) { 4603 btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE); 4604 ret = -ENOMEM; 4605 goto out; 4606 } 4607 4608 page_start = page_offset(page); 4609 page_end = page_start + PAGE_CACHE_SIZE - 1; 4610 4611 if (!PageUptodate(page)) { 4612 ret = btrfs_readpage(NULL, page); 4613 lock_page(page); 4614 if (page->mapping != mapping) { 4615 unlock_page(page); 4616 page_cache_release(page); 4617 goto again; 4618 } 4619 if (!PageUptodate(page)) { 4620 ret = -EIO; 4621 goto out_unlock; 4622 } 4623 } 4624 wait_on_page_writeback(page); 4625 4626 lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state); 4627 set_page_extent_mapped(page); 4628 4629 ordered = btrfs_lookup_ordered_extent(inode, page_start); 4630 if (ordered) { 4631 unlock_extent_cached(io_tree, page_start, page_end, 4632 &cached_state, GFP_NOFS); 4633 unlock_page(page); 4634 page_cache_release(page); 4635 btrfs_start_ordered_extent(inode, ordered, 1); 4636 btrfs_put_ordered_extent(ordered); 4637 goto again; 4638 } 4639 4640 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end, 4641 EXTENT_DIRTY | EXTENT_DELALLOC | 4642 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 4643 0, 0, &cached_state, GFP_NOFS); 4644 4645 ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 4646 &cached_state); 4647 if (ret) { 4648 unlock_extent_cached(io_tree, page_start, page_end, 4649 &cached_state, GFP_NOFS); 4650 goto out_unlock; 4651 } 4652 4653 if (offset != PAGE_CACHE_SIZE) { 4654 if (!len) 4655 len = PAGE_CACHE_SIZE - offset; 4656 kaddr = kmap(page); 4657 if (front) 4658 memset(kaddr, 0, offset); 4659 else 4660 memset(kaddr + offset, 0, len); 4661 flush_dcache_page(page); 4662 kunmap(page); 4663 } 4664 ClearPageChecked(page); 4665 set_page_dirty(page); 4666 unlock_extent_cached(io_tree, page_start, page_end, &cached_state, 4667 GFP_NOFS); 4668 4669out_unlock: 4670 if (ret) 4671 btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE); 4672 unlock_page(page); 4673 page_cache_release(page); 4674out: 4675 return ret; 4676} 4677 4678static int maybe_insert_hole(struct btrfs_root *root, struct inode *inode, 4679 u64 offset, u64 len) 4680{ 4681 struct btrfs_trans_handle *trans; 4682 int ret; 4683 4684 /* 4685 * Still need to make sure the inode looks like it's been updated so 4686 * that any holes get logged if we fsync. 4687 */ 4688 if (btrfs_fs_incompat(root->fs_info, NO_HOLES)) { 4689 BTRFS_I(inode)->last_trans = root->fs_info->generation; 4690 BTRFS_I(inode)->last_sub_trans = root->log_transid; 4691 BTRFS_I(inode)->last_log_commit = root->last_log_commit; 4692 return 0; 4693 } 4694 4695 /* 4696 * 1 - for the one we're dropping 4697 * 1 - for the one we're adding 4698 * 1 - for updating the inode. 4699 */ 4700 trans = btrfs_start_transaction(root, 3); 4701 if (IS_ERR(trans)) 4702 return PTR_ERR(trans); 4703 4704 ret = btrfs_drop_extents(trans, root, inode, offset, offset + len, 1); 4705 if (ret) { 4706 btrfs_abort_transaction(trans, root, ret); 4707 btrfs_end_transaction(trans, root); 4708 return ret; 4709 } 4710 4711 ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode), offset, 4712 0, 0, len, 0, len, 0, 0, 0); 4713 if (ret) 4714 btrfs_abort_transaction(trans, root, ret); 4715 else 4716 btrfs_update_inode(trans, root, inode); 4717 btrfs_end_transaction(trans, root); 4718 return ret; 4719} 4720 4721/* 4722 * This function puts in dummy file extents for the area we're creating a hole 4723 * for. So if we are truncating this file to a larger size we need to insert 4724 * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for 4725 * the range between oldsize and size 4726 */ 4727int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size) 4728{ 4729 struct btrfs_root *root = BTRFS_I(inode)->root; 4730 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 4731 struct extent_map *em = NULL; 4732 struct extent_state *cached_state = NULL; 4733 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 4734 u64 hole_start = ALIGN(oldsize, root->sectorsize); 4735 u64 block_end = ALIGN(size, root->sectorsize); 4736 u64 last_byte; 4737 u64 cur_offset; 4738 u64 hole_size; 4739 int err = 0; 4740 4741 /* 4742 * If our size started in the middle of a page we need to zero out the 4743 * rest of the page before we expand the i_size, otherwise we could 4744 * expose stale data. 4745 */ 4746 err = btrfs_truncate_page(inode, oldsize, 0, 0); 4747 if (err) 4748 return err; 4749 4750 if (size <= hole_start) 4751 return 0; 4752 4753 while (1) { 4754 struct btrfs_ordered_extent *ordered; 4755 4756 lock_extent_bits(io_tree, hole_start, block_end - 1, 0, 4757 &cached_state); 4758 ordered = btrfs_lookup_ordered_range(inode, hole_start, 4759 block_end - hole_start); 4760 if (!ordered) 4761 break; 4762 unlock_extent_cached(io_tree, hole_start, block_end - 1, 4763 &cached_state, GFP_NOFS); 4764 btrfs_start_ordered_extent(inode, ordered, 1); 4765 btrfs_put_ordered_extent(ordered); 4766 } 4767 4768 cur_offset = hole_start; 4769 while (1) { 4770 em = btrfs_get_extent(inode, NULL, 0, cur_offset, 4771 block_end - cur_offset, 0); 4772 if (IS_ERR(em)) { 4773 err = PTR_ERR(em); 4774 em = NULL; 4775 break; 4776 } 4777 last_byte = min(extent_map_end(em), block_end); 4778 last_byte = ALIGN(last_byte , root->sectorsize); 4779 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) { 4780 struct extent_map *hole_em; 4781 hole_size = last_byte - cur_offset; 4782 4783 err = maybe_insert_hole(root, inode, cur_offset, 4784 hole_size); 4785 if (err) 4786 break; 4787 btrfs_drop_extent_cache(inode, cur_offset, 4788 cur_offset + hole_size - 1, 0); 4789 hole_em = alloc_extent_map(); 4790 if (!hole_em) { 4791 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 4792 &BTRFS_I(inode)->runtime_flags); 4793 goto next; 4794 } 4795 hole_em->start = cur_offset; 4796 hole_em->len = hole_size; 4797 hole_em->orig_start = cur_offset; 4798 4799 hole_em->block_start = EXTENT_MAP_HOLE; 4800 hole_em->block_len = 0; 4801 hole_em->orig_block_len = 0; 4802 hole_em->ram_bytes = hole_size; 4803 hole_em->bdev = root->fs_info->fs_devices->latest_bdev; 4804 hole_em->compress_type = BTRFS_COMPRESS_NONE; 4805 hole_em->generation = root->fs_info->generation; 4806 4807 while (1) { 4808 write_lock(&em_tree->lock); 4809 err = add_extent_mapping(em_tree, hole_em, 1); 4810 write_unlock(&em_tree->lock); 4811 if (err != -EEXIST) 4812 break; 4813 btrfs_drop_extent_cache(inode, cur_offset, 4814 cur_offset + 4815 hole_size - 1, 0); 4816 } 4817 free_extent_map(hole_em); 4818 } 4819next: 4820 free_extent_map(em); 4821 em = NULL; 4822 cur_offset = last_byte; 4823 if (cur_offset >= block_end) 4824 break; 4825 } 4826 free_extent_map(em); 4827 unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state, 4828 GFP_NOFS); 4829 return err; 4830} 4831 4832static int wait_snapshoting_atomic_t(atomic_t *a) 4833{ 4834 schedule(); 4835 return 0; 4836} 4837 4838static void wait_for_snapshot_creation(struct btrfs_root *root) 4839{ 4840 while (true) { 4841 int ret; 4842 4843 ret = btrfs_start_write_no_snapshoting(root); 4844 if (ret) 4845 break; 4846 wait_on_atomic_t(&root->will_be_snapshoted, 4847 wait_snapshoting_atomic_t, 4848 TASK_UNINTERRUPTIBLE); 4849 } 4850} 4851 4852static int btrfs_setsize(struct inode *inode, struct iattr *attr) 4853{ 4854 struct btrfs_root *root = BTRFS_I(inode)->root; 4855 struct btrfs_trans_handle *trans; 4856 loff_t oldsize = i_size_read(inode); 4857 loff_t newsize = attr->ia_size; 4858 int mask = attr->ia_valid; 4859 int ret; 4860 4861 /* 4862 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a 4863 * special case where we need to update the times despite not having 4864 * these flags set. For all other operations the VFS set these flags 4865 * explicitly if it wants a timestamp update. 4866 */ 4867 if (newsize != oldsize) { 4868 inode_inc_iversion(inode); 4869 if (!(mask & (ATTR_CTIME | ATTR_MTIME))) 4870 inode->i_ctime = inode->i_mtime = 4871 current_fs_time(inode->i_sb); 4872 } 4873 4874 if (newsize > oldsize) { 4875 truncate_pagecache(inode, newsize); 4876 /* 4877 * Don't do an expanding truncate while snapshoting is ongoing. 4878 * This is to ensure the snapshot captures a fully consistent 4879 * state of this file - if the snapshot captures this expanding 4880 * truncation, it must capture all writes that happened before 4881 * this truncation. 4882 */ 4883 wait_for_snapshot_creation(root); 4884 ret = btrfs_cont_expand(inode, oldsize, newsize); 4885 if (ret) { 4886 btrfs_end_write_no_snapshoting(root); 4887 return ret; 4888 } 4889 4890 trans = btrfs_start_transaction(root, 1); 4891 if (IS_ERR(trans)) { 4892 btrfs_end_write_no_snapshoting(root); 4893 return PTR_ERR(trans); 4894 } 4895 4896 i_size_write(inode, newsize); 4897 btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL); 4898 ret = btrfs_update_inode(trans, root, inode); 4899 btrfs_end_write_no_snapshoting(root); 4900 btrfs_end_transaction(trans, root); 4901 } else { 4902 4903 /* 4904 * We're truncating a file that used to have good data down to 4905 * zero. Make sure it gets into the ordered flush list so that 4906 * any new writes get down to disk quickly. 4907 */ 4908 if (newsize == 0) 4909 set_bit(BTRFS_INODE_ORDERED_DATA_CLOSE, 4910 &BTRFS_I(inode)->runtime_flags); 4911 4912 /* 4913 * 1 for the orphan item we're going to add 4914 * 1 for the orphan item deletion. 4915 */ 4916 trans = btrfs_start_transaction(root, 2); 4917 if (IS_ERR(trans)) 4918 return PTR_ERR(trans); 4919 4920 /* 4921 * We need to do this in case we fail at _any_ point during the 4922 * actual truncate. Once we do the truncate_setsize we could 4923 * invalidate pages which forces any outstanding ordered io to 4924 * be instantly completed which will give us extents that need 4925 * to be truncated. If we fail to get an orphan inode down we 4926 * could have left over extents that were never meant to live, 4927 * so we need to garuntee from this point on that everything 4928 * will be consistent. 4929 */ 4930 ret = btrfs_orphan_add(trans, inode); 4931 btrfs_end_transaction(trans, root); 4932 if (ret) 4933 return ret; 4934 4935 /* we don't support swapfiles, so vmtruncate shouldn't fail */ 4936 truncate_setsize(inode, newsize); 4937 4938 /* Disable nonlocked read DIO to avoid the end less truncate */ 4939 btrfs_inode_block_unlocked_dio(inode); 4940 inode_dio_wait(inode); 4941 btrfs_inode_resume_unlocked_dio(inode); 4942 4943 ret = btrfs_truncate(inode); 4944 if (ret && inode->i_nlink) { 4945 int err; 4946 4947 /* 4948 * failed to truncate, disk_i_size is only adjusted down 4949 * as we remove extents, so it should represent the true 4950 * size of the inode, so reset the in memory size and 4951 * delete our orphan entry. 4952 */ 4953 trans = btrfs_join_transaction(root); 4954 if (IS_ERR(trans)) { 4955 btrfs_orphan_del(NULL, inode); 4956 return ret; 4957 } 4958 i_size_write(inode, BTRFS_I(inode)->disk_i_size); 4959 err = btrfs_orphan_del(trans, inode); 4960 if (err) 4961 btrfs_abort_transaction(trans, root, err); 4962 btrfs_end_transaction(trans, root); 4963 } 4964 } 4965 4966 return ret; 4967} 4968 4969static int btrfs_setattr(struct dentry *dentry, struct iattr *attr) 4970{ 4971 struct inode *inode = d_inode(dentry); 4972 struct btrfs_root *root = BTRFS_I(inode)->root; 4973 int err; 4974 4975 if (btrfs_root_readonly(root)) 4976 return -EROFS; 4977 4978 err = inode_change_ok(inode, attr); 4979 if (err) 4980 return err; 4981 4982 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 4983 err = btrfs_setsize(inode, attr); 4984 if (err) 4985 return err; 4986 } 4987 4988 if (attr->ia_valid) { 4989 setattr_copy(inode, attr); 4990 inode_inc_iversion(inode); 4991 err = btrfs_dirty_inode(inode); 4992 4993 if (!err && attr->ia_valid & ATTR_MODE) 4994 err = posix_acl_chmod(inode, inode->i_mode); 4995 } 4996 4997 return err; 4998} 4999 5000/* 5001 * While truncating the inode pages during eviction, we get the VFS calling 5002 * btrfs_invalidatepage() against each page of the inode. This is slow because 5003 * the calls to btrfs_invalidatepage() result in a huge amount of calls to 5004 * lock_extent_bits() and clear_extent_bit(), which keep merging and splitting 5005 * extent_state structures over and over, wasting lots of time. 5006 * 5007 * Therefore if the inode is being evicted, let btrfs_invalidatepage() skip all 5008 * those expensive operations on a per page basis and do only the ordered io 5009 * finishing, while we release here the extent_map and extent_state structures, 5010 * without the excessive merging and splitting. 5011 */ 5012static void evict_inode_truncate_pages(struct inode *inode) 5013{ 5014 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 5015 struct extent_map_tree *map_tree = &BTRFS_I(inode)->extent_tree; 5016 struct rb_node *node; 5017 5018 ASSERT(inode->i_state & I_FREEING); 5019 truncate_inode_pages_final(&inode->i_data); 5020 5021 write_lock(&map_tree->lock); 5022 while (!RB_EMPTY_ROOT(&map_tree->map)) { 5023 struct extent_map *em; 5024 5025 node = rb_first(&map_tree->map); 5026 em = rb_entry(node, struct extent_map, rb_node); 5027 clear_bit(EXTENT_FLAG_PINNED, &em->flags); 5028 clear_bit(EXTENT_FLAG_LOGGING, &em->flags); 5029 remove_extent_mapping(map_tree, em); 5030 free_extent_map(em); 5031 if (need_resched()) { 5032 write_unlock(&map_tree->lock); 5033 cond_resched(); 5034 write_lock(&map_tree->lock); 5035 } 5036 } 5037 write_unlock(&map_tree->lock); 5038 5039 spin_lock(&io_tree->lock); 5040 while (!RB_EMPTY_ROOT(&io_tree->state)) { 5041 struct extent_state *state; 5042 struct extent_state *cached_state = NULL; 5043 5044 node = rb_first(&io_tree->state); 5045 state = rb_entry(node, struct extent_state, rb_node); 5046 atomic_inc(&state->refs); 5047 spin_unlock(&io_tree->lock); 5048 5049 lock_extent_bits(io_tree, state->start, state->end, 5050 0, &cached_state); 5051 clear_extent_bit(io_tree, state->start, state->end, 5052 EXTENT_LOCKED | EXTENT_DIRTY | 5053 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | 5054 EXTENT_DEFRAG, 1, 1, 5055 &cached_state, GFP_NOFS); 5056 free_extent_state(state); 5057 5058 cond_resched(); 5059 spin_lock(&io_tree->lock); 5060 } 5061 spin_unlock(&io_tree->lock); 5062} 5063 5064void btrfs_evict_inode(struct inode *inode) 5065{ 5066 struct btrfs_trans_handle *trans; 5067 struct btrfs_root *root = BTRFS_I(inode)->root; 5068 struct btrfs_block_rsv *rsv, *global_rsv; 5069 int steal_from_global = 0; 5070 u64 min_size = btrfs_calc_trunc_metadata_size(root, 1); 5071 int ret; 5072 5073 trace_btrfs_inode_evict(inode); 5074 5075 evict_inode_truncate_pages(inode); 5076 5077 if (inode->i_nlink && 5078 ((btrfs_root_refs(&root->root_item) != 0 && 5079 root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID) || 5080 btrfs_is_free_space_inode(inode))) 5081 goto no_delete; 5082 5083 if (is_bad_inode(inode)) { 5084 btrfs_orphan_del(NULL, inode); 5085 goto no_delete; 5086 } 5087 /* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */ 5088 if (!special_file(inode->i_mode)) 5089 btrfs_wait_ordered_range(inode, 0, (u64)-1); 5090 5091 btrfs_free_io_failure_record(inode, 0, (u64)-1); 5092 5093 if (root->fs_info->log_root_recovering) { 5094 BUG_ON(test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, 5095 &BTRFS_I(inode)->runtime_flags)); 5096 goto no_delete; 5097 } 5098 5099 if (inode->i_nlink > 0) { 5100 BUG_ON(btrfs_root_refs(&root->root_item) != 0 && 5101 root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID); 5102 goto no_delete; 5103 } 5104 5105 ret = btrfs_commit_inode_delayed_inode(inode); 5106 if (ret) { 5107 btrfs_orphan_del(NULL, inode); 5108 goto no_delete; 5109 } 5110 5111 rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP); 5112 if (!rsv) { 5113 btrfs_orphan_del(NULL, inode); 5114 goto no_delete; 5115 } 5116 rsv->size = min_size; 5117 rsv->failfast = 1; 5118 global_rsv = &root->fs_info->global_block_rsv; 5119 5120 btrfs_i_size_write(inode, 0); 5121 5122 /* 5123 * This is a bit simpler than btrfs_truncate since we've already 5124 * reserved our space for our orphan item in the unlink, so we just 5125 * need to reserve some slack space in case we add bytes and update 5126 * inode item when doing the truncate. 5127 */ 5128 while (1) { 5129 ret = btrfs_block_rsv_refill(root, rsv, min_size, 5130 BTRFS_RESERVE_FLUSH_LIMIT); 5131 5132 /* 5133 * Try and steal from the global reserve since we will 5134 * likely not use this space anyway, we want to try as 5135 * hard as possible to get this to work. 5136 */ 5137 if (ret) 5138 steal_from_global++; 5139 else 5140 steal_from_global = 0; 5141 ret = 0; 5142 5143 /* 5144 * steal_from_global == 0: we reserved stuff, hooray! 5145 * steal_from_global == 1: we didn't reserve stuff, boo! 5146 * steal_from_global == 2: we've committed, still not a lot of 5147 * room but maybe we'll have room in the global reserve this 5148 * time. 5149 * steal_from_global == 3: abandon all hope! 5150 */ 5151 if (steal_from_global > 2) { 5152 btrfs_warn(root->fs_info, 5153 "Could not get space for a delete, will truncate on mount %d", 5154 ret); 5155 btrfs_orphan_del(NULL, inode); 5156 btrfs_free_block_rsv(root, rsv); 5157 goto no_delete; 5158 } 5159 5160 trans = btrfs_join_transaction(root); 5161 if (IS_ERR(trans)) { 5162 btrfs_orphan_del(NULL, inode); 5163 btrfs_free_block_rsv(root, rsv); 5164 goto no_delete; 5165 } 5166 5167 /* 5168 * We can't just steal from the global reserve, we need tomake 5169 * sure there is room to do it, if not we need to commit and try 5170 * again. 5171 */ 5172 if (steal_from_global) { 5173 if (!btrfs_check_space_for_delayed_refs(trans, root)) 5174 ret = btrfs_block_rsv_migrate(global_rsv, rsv, 5175 min_size); 5176 else 5177 ret = -ENOSPC; 5178 } 5179 5180 /* 5181 * Couldn't steal from the global reserve, we have too much 5182 * pending stuff built up, commit the transaction and try it 5183 * again. 5184 */ 5185 if (ret) { 5186 ret = btrfs_commit_transaction(trans, root); 5187 if (ret) { 5188 btrfs_orphan_del(NULL, inode); 5189 btrfs_free_block_rsv(root, rsv); 5190 goto no_delete; 5191 } 5192 continue; 5193 } else { 5194 steal_from_global = 0; 5195 } 5196 5197 trans->block_rsv = rsv; 5198 5199 ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0); 5200 if (ret != -ENOSPC && ret != -EAGAIN) 5201 break; 5202 5203 trans->block_rsv = &root->fs_info->trans_block_rsv; 5204 btrfs_end_transaction(trans, root); 5205 trans = NULL; 5206 btrfs_btree_balance_dirty(root); 5207 } 5208 5209 btrfs_free_block_rsv(root, rsv); 5210 5211 /* 5212 * Errors here aren't a big deal, it just means we leave orphan items 5213 * in the tree. They will be cleaned up on the next mount. 5214 */ 5215 if (ret == 0) { 5216 trans->block_rsv = root->orphan_block_rsv; 5217 btrfs_orphan_del(trans, inode); 5218 } else { 5219 btrfs_orphan_del(NULL, inode); 5220 } 5221 5222 trans->block_rsv = &root->fs_info->trans_block_rsv; 5223 if (!(root == root->fs_info->tree_root || 5224 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)) 5225 btrfs_return_ino(root, btrfs_ino(inode)); 5226 5227 btrfs_end_transaction(trans, root); 5228 btrfs_btree_balance_dirty(root); 5229no_delete: 5230 btrfs_remove_delayed_node(inode); 5231 clear_inode(inode); 5232 return; 5233} 5234 5235/* 5236 * this returns the key found in the dir entry in the location pointer. 5237 * If no dir entries were found, location->objectid is 0. 5238 */ 5239static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry, 5240 struct btrfs_key *location) 5241{ 5242 const char *name = dentry->d_name.name; 5243 int namelen = dentry->d_name.len; 5244 struct btrfs_dir_item *di; 5245 struct btrfs_path *path; 5246 struct btrfs_root *root = BTRFS_I(dir)->root; 5247 int ret = 0; 5248 5249 path = btrfs_alloc_path(); 5250 if (!path) 5251 return -ENOMEM; 5252 5253 di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), name, 5254 namelen, 0); 5255 if (IS_ERR(di)) 5256 ret = PTR_ERR(di); 5257 5258 if (IS_ERR_OR_NULL(di)) 5259 goto out_err; 5260 5261 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location); 5262out: 5263 btrfs_free_path(path); 5264 return ret; 5265out_err: 5266 location->objectid = 0; 5267 goto out; 5268} 5269 5270/* 5271 * when we hit a tree root in a directory, the btrfs part of the inode 5272 * needs to be changed to reflect the root directory of the tree root. This 5273 * is kind of like crossing a mount point. 5274 */ 5275static int fixup_tree_root_location(struct btrfs_root *root, 5276 struct inode *dir, 5277 struct dentry *dentry, 5278 struct btrfs_key *location, 5279 struct btrfs_root **sub_root) 5280{ 5281 struct btrfs_path *path; 5282 struct btrfs_root *new_root; 5283 struct btrfs_root_ref *ref; 5284 struct extent_buffer *leaf; 5285 struct btrfs_key key; 5286 int ret; 5287 int err = 0; 5288 5289 path = btrfs_alloc_path(); 5290 if (!path) { 5291 err = -ENOMEM; 5292 goto out; 5293 } 5294 5295 err = -ENOENT; 5296 key.objectid = BTRFS_I(dir)->root->root_key.objectid; 5297 key.type = BTRFS_ROOT_REF_KEY; 5298 key.offset = location->objectid; 5299 5300 ret = btrfs_search_slot(NULL, root->fs_info->tree_root, &key, path, 5301 0, 0); 5302 if (ret) { 5303 if (ret < 0) 5304 err = ret; 5305 goto out; 5306 } 5307 5308 leaf = path->nodes[0]; 5309 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); 5310 if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) || 5311 btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len) 5312 goto out; 5313 5314 ret = memcmp_extent_buffer(leaf, dentry->d_name.name, 5315 (unsigned long)(ref + 1), 5316 dentry->d_name.len); 5317 if (ret) 5318 goto out; 5319 5320 btrfs_release_path(path); 5321 5322 new_root = btrfs_read_fs_root_no_name(root->fs_info, location); 5323 if (IS_ERR(new_root)) { 5324 err = PTR_ERR(new_root); 5325 goto out; 5326 } 5327 5328 *sub_root = new_root; 5329 location->objectid = btrfs_root_dirid(&new_root->root_item); 5330 location->type = BTRFS_INODE_ITEM_KEY; 5331 location->offset = 0; 5332 err = 0; 5333out: 5334 btrfs_free_path(path); 5335 return err; 5336} 5337 5338static void inode_tree_add(struct inode *inode) 5339{ 5340 struct btrfs_root *root = BTRFS_I(inode)->root; 5341 struct btrfs_inode *entry; 5342 struct rb_node **p; 5343 struct rb_node *parent; 5344 struct rb_node *new = &BTRFS_I(inode)->rb_node; 5345 u64 ino = btrfs_ino(inode); 5346 5347 if (inode_unhashed(inode)) 5348 return; 5349 parent = NULL; 5350 spin_lock(&root->inode_lock); 5351 p = &root->inode_tree.rb_node; 5352 while (*p) { 5353 parent = *p; 5354 entry = rb_entry(parent, struct btrfs_inode, rb_node); 5355 5356 if (ino < btrfs_ino(&entry->vfs_inode)) 5357 p = &parent->rb_left; 5358 else if (ino > btrfs_ino(&entry->vfs_inode)) 5359 p = &parent->rb_right; 5360 else { 5361 WARN_ON(!(entry->vfs_inode.i_state & 5362 (I_WILL_FREE | I_FREEING))); 5363 rb_replace_node(parent, new, &root->inode_tree); 5364 RB_CLEAR_NODE(parent); 5365 spin_unlock(&root->inode_lock); 5366 return; 5367 } 5368 } 5369 rb_link_node(new, parent, p); 5370 rb_insert_color(new, &root->inode_tree); 5371 spin_unlock(&root->inode_lock); 5372} 5373 5374static void inode_tree_del(struct inode *inode) 5375{ 5376 struct btrfs_root *root = BTRFS_I(inode)->root; 5377 int empty = 0; 5378 5379 spin_lock(&root->inode_lock); 5380 if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) { 5381 rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree); 5382 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node); 5383 empty = RB_EMPTY_ROOT(&root->inode_tree); 5384 } 5385 spin_unlock(&root->inode_lock); 5386 5387 if (empty && btrfs_root_refs(&root->root_item) == 0) { 5388 synchronize_srcu(&root->fs_info->subvol_srcu); 5389 spin_lock(&root->inode_lock); 5390 empty = RB_EMPTY_ROOT(&root->inode_tree); 5391 spin_unlock(&root->inode_lock); 5392 if (empty) 5393 btrfs_add_dead_root(root); 5394 } 5395} 5396 5397void btrfs_invalidate_inodes(struct btrfs_root *root) 5398{ 5399 struct rb_node *node; 5400 struct rb_node *prev; 5401 struct btrfs_inode *entry; 5402 struct inode *inode; 5403 u64 objectid = 0; 5404 5405 if (!test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) 5406 WARN_ON(btrfs_root_refs(&root->root_item) != 0); 5407 5408 spin_lock(&root->inode_lock); 5409again: 5410 node = root->inode_tree.rb_node; 5411 prev = NULL; 5412 while (node) { 5413 prev = node; 5414 entry = rb_entry(node, struct btrfs_inode, rb_node); 5415 5416 if (objectid < btrfs_ino(&entry->vfs_inode)) 5417 node = node->rb_left; 5418 else if (objectid > btrfs_ino(&entry->vfs_inode)) 5419 node = node->rb_right; 5420 else 5421 break; 5422 } 5423 if (!node) { 5424 while (prev) { 5425 entry = rb_entry(prev, struct btrfs_inode, rb_node); 5426 if (objectid <= btrfs_ino(&entry->vfs_inode)) { 5427 node = prev; 5428 break; 5429 } 5430 prev = rb_next(prev); 5431 } 5432 } 5433 while (node) { 5434 entry = rb_entry(node, struct btrfs_inode, rb_node); 5435 objectid = btrfs_ino(&entry->vfs_inode) + 1; 5436 inode = igrab(&entry->vfs_inode); 5437 if (inode) { 5438 spin_unlock(&root->inode_lock); 5439 if (atomic_read(&inode->i_count) > 1) 5440 d_prune_aliases(inode); 5441 /* 5442 * btrfs_drop_inode will have it removed from 5443 * the inode cache when its usage count 5444 * hits zero. 5445 */ 5446 iput(inode); 5447 cond_resched(); 5448 spin_lock(&root->inode_lock); 5449 goto again; 5450 } 5451 5452 if (cond_resched_lock(&root->inode_lock)) 5453 goto again; 5454 5455 node = rb_next(node); 5456 } 5457 spin_unlock(&root->inode_lock); 5458} 5459 5460static int btrfs_init_locked_inode(struct inode *inode, void *p) 5461{ 5462 struct btrfs_iget_args *args = p; 5463 inode->i_ino = args->location->objectid; 5464 memcpy(&BTRFS_I(inode)->location, args->location, 5465 sizeof(*args->location)); 5466 BTRFS_I(inode)->root = args->root; 5467 return 0; 5468} 5469 5470static int btrfs_find_actor(struct inode *inode, void *opaque) 5471{ 5472 struct btrfs_iget_args *args = opaque; 5473 return args->location->objectid == BTRFS_I(inode)->location.objectid && 5474 args->root == BTRFS_I(inode)->root; 5475} 5476 5477static struct inode *btrfs_iget_locked(struct super_block *s, 5478 struct btrfs_key *location, 5479 struct btrfs_root *root) 5480{ 5481 struct inode *inode; 5482 struct btrfs_iget_args args; 5483 unsigned long hashval = btrfs_inode_hash(location->objectid, root); 5484 5485 args.location = location; 5486 args.root = root; 5487 5488 inode = iget5_locked(s, hashval, btrfs_find_actor, 5489 btrfs_init_locked_inode, 5490 (void *)&args); 5491 return inode; 5492} 5493 5494/* Get an inode object given its location and corresponding root. 5495 * Returns in *is_new if the inode was read from disk 5496 */ 5497struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, 5498 struct btrfs_root *root, int *new) 5499{ 5500 struct inode *inode; 5501 5502 inode = btrfs_iget_locked(s, location, root); 5503 if (!inode) 5504 return ERR_PTR(-ENOMEM); 5505 5506 if (inode->i_state & I_NEW) { 5507 btrfs_read_locked_inode(inode); 5508 if (!is_bad_inode(inode)) { 5509 inode_tree_add(inode); 5510 unlock_new_inode(inode); 5511 if (new) 5512 *new = 1; 5513 } else { 5514 unlock_new_inode(inode); 5515 iput(inode); 5516 inode = ERR_PTR(-ESTALE); 5517 } 5518 } 5519 5520 return inode; 5521} 5522 5523static struct inode *new_simple_dir(struct super_block *s, 5524 struct btrfs_key *key, 5525 struct btrfs_root *root) 5526{ 5527 struct inode *inode = new_inode(s); 5528 5529 if (!inode) 5530 return ERR_PTR(-ENOMEM); 5531 5532 BTRFS_I(inode)->root = root; 5533 memcpy(&BTRFS_I(inode)->location, key, sizeof(*key)); 5534 set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags); 5535 5536 inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID; 5537 inode->i_op = &btrfs_dir_ro_inode_operations; 5538 inode->i_fop = &simple_dir_operations; 5539 inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO; 5540 inode->i_mtime = CURRENT_TIME; 5541 inode->i_atime = inode->i_mtime; 5542 inode->i_ctime = inode->i_mtime; 5543 BTRFS_I(inode)->i_otime = inode->i_mtime; 5544 5545 return inode; 5546} 5547 5548struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) 5549{ 5550 struct inode *inode; 5551 struct btrfs_root *root = BTRFS_I(dir)->root; 5552 struct btrfs_root *sub_root = root; 5553 struct btrfs_key location; 5554 int index; 5555 int ret = 0; 5556 5557 if (dentry->d_name.len > BTRFS_NAME_LEN) 5558 return ERR_PTR(-ENAMETOOLONG); 5559 5560 ret = btrfs_inode_by_name(dir, dentry, &location); 5561 if (ret < 0) 5562 return ERR_PTR(ret); 5563 5564 if (location.objectid == 0) 5565 return ERR_PTR(-ENOENT); 5566 5567 if (location.type == BTRFS_INODE_ITEM_KEY) { 5568 inode = btrfs_iget(dir->i_sb, &location, root, NULL); 5569 return inode; 5570 } 5571 5572 BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY); 5573 5574 index = srcu_read_lock(&root->fs_info->subvol_srcu); 5575 ret = fixup_tree_root_location(root, dir, dentry, 5576 &location, &sub_root); 5577 if (ret < 0) { 5578 if (ret != -ENOENT) 5579 inode = ERR_PTR(ret); 5580 else 5581 inode = new_simple_dir(dir->i_sb, &location, sub_root); 5582 } else { 5583 inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL); 5584 } 5585 srcu_read_unlock(&root->fs_info->subvol_srcu, index); 5586 5587 if (!IS_ERR(inode) && root != sub_root) { 5588 down_read(&root->fs_info->cleanup_work_sem); 5589 if (!(inode->i_sb->s_flags & MS_RDONLY)) 5590 ret = btrfs_orphan_cleanup(sub_root); 5591 up_read(&root->fs_info->cleanup_work_sem); 5592 if (ret) { 5593 iput(inode); 5594 inode = ERR_PTR(ret); 5595 } 5596 } 5597 5598 return inode; 5599} 5600 5601static int btrfs_dentry_delete(const struct dentry *dentry) 5602{ 5603 struct btrfs_root *root; 5604 struct inode *inode = d_inode(dentry); 5605 5606 if (!inode && !IS_ROOT(dentry)) 5607 inode = d_inode(dentry->d_parent); 5608 5609 if (inode) { 5610 root = BTRFS_I(inode)->root; 5611 if (btrfs_root_refs(&root->root_item) == 0) 5612 return 1; 5613 5614 if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) 5615 return 1; 5616 } 5617 return 0; 5618} 5619 5620static void btrfs_dentry_release(struct dentry *dentry) 5621{ 5622 kfree(dentry->d_fsdata); 5623} 5624 5625static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry, 5626 unsigned int flags) 5627{ 5628 struct inode *inode; 5629 5630 inode = btrfs_lookup_dentry(dir, dentry); 5631 if (IS_ERR(inode)) { 5632 if (PTR_ERR(inode) == -ENOENT) 5633 inode = NULL; 5634 else 5635 return ERR_CAST(inode); 5636 } 5637 5638 return d_splice_alias(inode, dentry); 5639} 5640 5641unsigned char btrfs_filetype_table[] = { 5642 DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK 5643}; 5644 5645static int btrfs_real_readdir(struct file *file, struct dir_context *ctx) 5646{ 5647 struct inode *inode = file_inode(file); 5648 struct btrfs_root *root = BTRFS_I(inode)->root; 5649 struct btrfs_item *item; 5650 struct btrfs_dir_item *di; 5651 struct btrfs_key key; 5652 struct btrfs_key found_key; 5653 struct btrfs_path *path; 5654 struct list_head ins_list; 5655 struct list_head del_list; 5656 int ret; 5657 struct extent_buffer *leaf; 5658 int slot; 5659 unsigned char d_type; 5660 int over = 0; 5661 u32 di_cur; 5662 u32 di_total; 5663 u32 di_len; 5664 int key_type = BTRFS_DIR_INDEX_KEY; 5665 char tmp_name[32]; 5666 char *name_ptr; 5667 int name_len; 5668 int is_curr = 0; /* ctx->pos points to the current index? */ 5669 bool emitted; 5670 5671 /* FIXME, use a real flag for deciding about the key type */ 5672 if (root->fs_info->tree_root == root) 5673 key_type = BTRFS_DIR_ITEM_KEY; 5674 5675 if (!dir_emit_dots(file, ctx)) 5676 return 0; 5677 5678 path = btrfs_alloc_path(); 5679 if (!path) 5680 return -ENOMEM; 5681 5682 path->reada = 1; 5683 5684 if (key_type == BTRFS_DIR_INDEX_KEY) { 5685 INIT_LIST_HEAD(&ins_list); 5686 INIT_LIST_HEAD(&del_list); 5687 btrfs_get_delayed_items(inode, &ins_list, &del_list); 5688 } 5689 5690 key.type = key_type; 5691 key.offset = ctx->pos; 5692 key.objectid = btrfs_ino(inode); 5693 5694 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 5695 if (ret < 0) 5696 goto err; 5697 5698 emitted = false; 5699 while (1) { 5700 leaf = path->nodes[0]; 5701 slot = path->slots[0]; 5702 if (slot >= btrfs_header_nritems(leaf)) { 5703 ret = btrfs_next_leaf(root, path); 5704 if (ret < 0) 5705 goto err; 5706 else if (ret > 0) 5707 break; 5708 continue; 5709 } 5710 5711 item = btrfs_item_nr(slot); 5712 btrfs_item_key_to_cpu(leaf, &found_key, slot); 5713 5714 if (found_key.objectid != key.objectid) 5715 break; 5716 if (found_key.type != key_type) 5717 break; 5718 if (found_key.offset < ctx->pos) 5719 goto next; 5720 if (key_type == BTRFS_DIR_INDEX_KEY && 5721 btrfs_should_delete_dir_index(&del_list, 5722 found_key.offset)) 5723 goto next; 5724 5725 ctx->pos = found_key.offset; 5726 is_curr = 1; 5727 5728 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item); 5729 di_cur = 0; 5730 di_total = btrfs_item_size(leaf, item); 5731 5732 while (di_cur < di_total) { 5733 struct btrfs_key location; 5734 5735 if (verify_dir_item(root, leaf, di)) 5736 break; 5737 5738 name_len = btrfs_dir_name_len(leaf, di); 5739 if (name_len <= sizeof(tmp_name)) { 5740 name_ptr = tmp_name; 5741 } else { 5742 name_ptr = kmalloc(name_len, GFP_NOFS); 5743 if (!name_ptr) { 5744 ret = -ENOMEM; 5745 goto err; 5746 } 5747 } 5748 read_extent_buffer(leaf, name_ptr, 5749 (unsigned long)(di + 1), name_len); 5750 5751 d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)]; 5752 btrfs_dir_item_key_to_cpu(leaf, di, &location); 5753 5754 5755 /* is this a reference to our own snapshot? If so 5756 * skip it. 5757 * 5758 * In contrast to old kernels, we insert the snapshot's 5759 * dir item and dir index after it has been created, so 5760 * we won't find a reference to our own snapshot. We 5761 * still keep the following code for backward 5762 * compatibility. 5763 */ 5764 if (location.type == BTRFS_ROOT_ITEM_KEY && 5765 location.objectid == root->root_key.objectid) { 5766 over = 0; 5767 goto skip; 5768 } 5769 over = !dir_emit(ctx, name_ptr, name_len, 5770 location.objectid, d_type); 5771 5772skip: 5773 if (name_ptr != tmp_name) 5774 kfree(name_ptr); 5775 5776 if (over) 5777 goto nopos; 5778 emitted = true; 5779 di_len = btrfs_dir_name_len(leaf, di) + 5780 btrfs_dir_data_len(leaf, di) + sizeof(*di); 5781 di_cur += di_len; 5782 di = (struct btrfs_dir_item *)((char *)di + di_len); 5783 } 5784next: 5785 path->slots[0]++; 5786 } 5787 5788 if (key_type == BTRFS_DIR_INDEX_KEY) { 5789 if (is_curr) 5790 ctx->pos++; 5791 ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list, &emitted); 5792 if (ret) 5793 goto nopos; 5794 } 5795 5796 /* 5797 * If we haven't emitted any dir entry, we must not touch ctx->pos as 5798 * it was was set to the termination value in previous call. We assume 5799 * that "." and ".." were emitted if we reach this point and set the 5800 * termination value as well for an empty directory. 5801 */ 5802 if (ctx->pos > 2 && !emitted) 5803 goto nopos; 5804 5805 /* Reached end of directory/root. Bump pos past the last item. */ 5806 ctx->pos++; 5807 5808 /* 5809 * Stop new entries from being returned after we return the last 5810 * entry. 5811 * 5812 * New directory entries are assigned a strictly increasing 5813 * offset. This means that new entries created during readdir 5814 * are *guaranteed* to be seen in the future by that readdir. 5815 * This has broken buggy programs which operate on names as 5816 * they're returned by readdir. Until we re-use freed offsets 5817 * we have this hack to stop new entries from being returned 5818 * under the assumption that they'll never reach this huge 5819 * offset. 5820 * 5821 * This is being careful not to overflow 32bit loff_t unless the 5822 * last entry requires it because doing so has broken 32bit apps 5823 * in the past. 5824 */ 5825 if (key_type == BTRFS_DIR_INDEX_KEY) { 5826 if (ctx->pos >= INT_MAX) 5827 ctx->pos = LLONG_MAX; 5828 else 5829 ctx->pos = INT_MAX; 5830 } 5831nopos: 5832 ret = 0; 5833err: 5834 if (key_type == BTRFS_DIR_INDEX_KEY) 5835 btrfs_put_delayed_items(&ins_list, &del_list); 5836 btrfs_free_path(path); 5837 return ret; 5838} 5839 5840int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc) 5841{ 5842 struct btrfs_root *root = BTRFS_I(inode)->root; 5843 struct btrfs_trans_handle *trans; 5844 int ret = 0; 5845 bool nolock = false; 5846 5847 if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags)) 5848 return 0; 5849 5850 if (btrfs_fs_closing(root->fs_info) && btrfs_is_free_space_inode(inode)) 5851 nolock = true; 5852 5853 if (wbc->sync_mode == WB_SYNC_ALL) { 5854 if (nolock) 5855 trans = btrfs_join_transaction_nolock(root); 5856 else 5857 trans = btrfs_join_transaction(root); 5858 if (IS_ERR(trans)) 5859 return PTR_ERR(trans); 5860 ret = btrfs_commit_transaction(trans, root); 5861 } 5862 return ret; 5863} 5864 5865/* 5866 * This is somewhat expensive, updating the tree every time the 5867 * inode changes. But, it is most likely to find the inode in cache. 5868 * FIXME, needs more benchmarking...there are no reasons other than performance 5869 * to keep or drop this code. 5870 */ 5871static int btrfs_dirty_inode(struct inode *inode) 5872{ 5873 struct btrfs_root *root = BTRFS_I(inode)->root; 5874 struct btrfs_trans_handle *trans; 5875 int ret; 5876 5877 if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags)) 5878 return 0; 5879 5880 trans = btrfs_join_transaction(root); 5881 if (IS_ERR(trans)) 5882 return PTR_ERR(trans); 5883 5884 ret = btrfs_update_inode(trans, root, inode); 5885 if (ret && ret == -ENOSPC) { 5886 /* whoops, lets try again with the full transaction */ 5887 btrfs_end_transaction(trans, root); 5888 trans = btrfs_start_transaction(root, 1); 5889 if (IS_ERR(trans)) 5890 return PTR_ERR(trans); 5891 5892 ret = btrfs_update_inode(trans, root, inode); 5893 } 5894 btrfs_end_transaction(trans, root); 5895 if (BTRFS_I(inode)->delayed_node) 5896 btrfs_balance_delayed_items(root); 5897 5898 return ret; 5899} 5900 5901/* 5902 * This is a copy of file_update_time. We need this so we can return error on 5903 * ENOSPC for updating the inode in the case of file write and mmap writes. 5904 */ 5905static int btrfs_update_time(struct inode *inode, struct timespec *now, 5906 int flags) 5907{ 5908 struct btrfs_root *root = BTRFS_I(inode)->root; 5909 5910 if (btrfs_root_readonly(root)) 5911 return -EROFS; 5912 5913 if (flags & S_VERSION) 5914 inode_inc_iversion(inode); 5915 if (flags & S_CTIME) 5916 inode->i_ctime = *now; 5917 if (flags & S_MTIME) 5918 inode->i_mtime = *now; 5919 if (flags & S_ATIME) 5920 inode->i_atime = *now; 5921 return btrfs_dirty_inode(inode); 5922} 5923 5924/* 5925 * find the highest existing sequence number in a directory 5926 * and then set the in-memory index_cnt variable to reflect 5927 * free sequence numbers 5928 */ 5929static int btrfs_set_inode_index_count(struct inode *inode) 5930{ 5931 struct btrfs_root *root = BTRFS_I(inode)->root; 5932 struct btrfs_key key, found_key; 5933 struct btrfs_path *path; 5934 struct extent_buffer *leaf; 5935 int ret; 5936 5937 key.objectid = btrfs_ino(inode); 5938 key.type = BTRFS_DIR_INDEX_KEY; 5939 key.offset = (u64)-1; 5940 5941 path = btrfs_alloc_path(); 5942 if (!path) 5943 return -ENOMEM; 5944 5945 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 5946 if (ret < 0) 5947 goto out; 5948 /* FIXME: we should be able to handle this */ 5949 if (ret == 0) 5950 goto out; 5951 ret = 0; 5952 5953 /* 5954 * MAGIC NUMBER EXPLANATION: 5955 * since we search a directory based on f_pos we have to start at 2 5956 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody 5957 * else has to start at 2 5958 */ 5959 if (path->slots[0] == 0) { 5960 BTRFS_I(inode)->index_cnt = 2; 5961 goto out; 5962 } 5963 5964 path->slots[0]--; 5965 5966 leaf = path->nodes[0]; 5967 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 5968 5969 if (found_key.objectid != btrfs_ino(inode) || 5970 found_key.type != BTRFS_DIR_INDEX_KEY) { 5971 BTRFS_I(inode)->index_cnt = 2; 5972 goto out; 5973 } 5974 5975 BTRFS_I(inode)->index_cnt = found_key.offset + 1; 5976out: 5977 btrfs_free_path(path); 5978 return ret; 5979} 5980 5981/* 5982 * helper to find a free sequence number in a given directory. This current 5983 * code is very simple, later versions will do smarter things in the btree 5984 */ 5985int btrfs_set_inode_index(struct inode *dir, u64 *index) 5986{ 5987 int ret = 0; 5988 5989 if (BTRFS_I(dir)->index_cnt == (u64)-1) { 5990 ret = btrfs_inode_delayed_dir_index_count(dir); 5991 if (ret) { 5992 ret = btrfs_set_inode_index_count(dir); 5993 if (ret) 5994 return ret; 5995 } 5996 } 5997 5998 *index = BTRFS_I(dir)->index_cnt; 5999 BTRFS_I(dir)->index_cnt++; 6000 6001 return ret; 6002} 6003 6004static int btrfs_insert_inode_locked(struct inode *inode) 6005{ 6006 struct btrfs_iget_args args; 6007 args.location = &BTRFS_I(inode)->location; 6008 args.root = BTRFS_I(inode)->root; 6009 6010 return insert_inode_locked4(inode, 6011 btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root), 6012 btrfs_find_actor, &args); 6013} 6014 6015static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, 6016 struct btrfs_root *root, 6017 struct inode *dir, 6018 const char *name, int name_len, 6019 u64 ref_objectid, u64 objectid, 6020 umode_t mode, u64 *index) 6021{ 6022 struct inode *inode; 6023 struct btrfs_inode_item *inode_item; 6024 struct btrfs_key *location; 6025 struct btrfs_path *path; 6026 struct btrfs_inode_ref *ref; 6027 struct btrfs_key key[2]; 6028 u32 sizes[2]; 6029 int nitems = name ? 2 : 1; 6030 unsigned long ptr; 6031 int ret; 6032 6033 path = btrfs_alloc_path(); 6034 if (!path) 6035 return ERR_PTR(-ENOMEM); 6036 6037 inode = new_inode(root->fs_info->sb); 6038 if (!inode) { 6039 btrfs_free_path(path); 6040 return ERR_PTR(-ENOMEM); 6041 } 6042 6043 /* 6044 * O_TMPFILE, set link count to 0, so that after this point, 6045 * we fill in an inode item with the correct link count. 6046 */ 6047 if (!name) 6048 set_nlink(inode, 0); 6049 6050 /* 6051 * we have to initialize this early, so we can reclaim the inode 6052 * number if we fail afterwards in this function. 6053 */ 6054 inode->i_ino = objectid; 6055 6056 if (dir && name) { 6057 trace_btrfs_inode_request(dir); 6058 6059 ret = btrfs_set_inode_index(dir, index); 6060 if (ret) { 6061 btrfs_free_path(path); 6062 iput(inode); 6063 return ERR_PTR(ret); 6064 } 6065 } else if (dir) { 6066 *index = 0; 6067 } 6068 /* 6069 * index_cnt is ignored for everything but a dir, 6070 * btrfs_get_inode_index_count has an explanation for the magic 6071 * number 6072 */ 6073 BTRFS_I(inode)->index_cnt = 2; 6074 BTRFS_I(inode)->dir_index = *index; 6075 BTRFS_I(inode)->root = root; 6076 BTRFS_I(inode)->generation = trans->transid; 6077 inode->i_generation = BTRFS_I(inode)->generation; 6078 6079 /* 6080 * We could have gotten an inode number from somebody who was fsynced 6081 * and then removed in this same transaction, so let's just set full 6082 * sync since it will be a full sync anyway and this will blow away the 6083 * old info in the log. 6084 */ 6085 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags); 6086 6087 key[0].objectid = objectid; 6088 key[0].type = BTRFS_INODE_ITEM_KEY; 6089 key[0].offset = 0; 6090 6091 sizes[0] = sizeof(struct btrfs_inode_item); 6092 6093 if (name) { 6094 /* 6095 * Start new inodes with an inode_ref. This is slightly more 6096 * efficient for small numbers of hard links since they will 6097 * be packed into one item. Extended refs will kick in if we 6098 * add more hard links than can fit in the ref item. 6099 */ 6100 key[1].objectid = objectid; 6101 key[1].type = BTRFS_INODE_REF_KEY; 6102 key[1].offset = ref_objectid; 6103 6104 sizes[1] = name_len + sizeof(*ref); 6105 } 6106 6107 location = &BTRFS_I(inode)->location; 6108 location->objectid = objectid; 6109 location->offset = 0; 6110 location->type = BTRFS_INODE_ITEM_KEY; 6111 6112 ret = btrfs_insert_inode_locked(inode); 6113 if (ret < 0) 6114 goto fail; 6115 6116 path->leave_spinning = 1; 6117 ret = btrfs_insert_empty_items(trans, root, path, key, sizes, nitems); 6118 if (ret != 0) 6119 goto fail_unlock; 6120 6121 inode_init_owner(inode, dir, mode); 6122 inode_set_bytes(inode, 0); 6123 6124 inode->i_mtime = CURRENT_TIME; 6125 inode->i_atime = inode->i_mtime; 6126 inode->i_ctime = inode->i_mtime; 6127 BTRFS_I(inode)->i_otime = inode->i_mtime; 6128 6129 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], 6130 struct btrfs_inode_item); 6131 memset_extent_buffer(path->nodes[0], 0, (unsigned long)inode_item, 6132 sizeof(*inode_item)); 6133 fill_inode_item(trans, path->nodes[0], inode_item, inode); 6134 6135 if (name) { 6136 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1, 6137 struct btrfs_inode_ref); 6138 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len); 6139 btrfs_set_inode_ref_index(path->nodes[0], ref, *index); 6140 ptr = (unsigned long)(ref + 1); 6141 write_extent_buffer(path->nodes[0], name, ptr, name_len); 6142 } 6143 6144 btrfs_mark_buffer_dirty(path->nodes[0]); 6145 btrfs_free_path(path); 6146 6147 btrfs_inherit_iflags(inode, dir); 6148 6149 if (S_ISREG(mode)) { 6150 if (btrfs_test_opt(root, NODATASUM)) 6151 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM; 6152 if (btrfs_test_opt(root, NODATACOW)) 6153 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW | 6154 BTRFS_INODE_NODATASUM; 6155 } 6156 6157 inode_tree_add(inode); 6158 6159 trace_btrfs_inode_new(inode); 6160 btrfs_set_inode_last_trans(trans, inode); 6161 6162 btrfs_update_root_times(trans, root); 6163 6164 ret = btrfs_inode_inherit_props(trans, inode, dir); 6165 if (ret) 6166 btrfs_err(root->fs_info, 6167 "error inheriting props for ino %llu (root %llu): %d", 6168 btrfs_ino(inode), root->root_key.objectid, ret); 6169 6170 return inode; 6171 6172fail_unlock: 6173 unlock_new_inode(inode); 6174fail: 6175 if (dir && name) 6176 BTRFS_I(dir)->index_cnt--; 6177 btrfs_free_path(path); 6178 iput(inode); 6179 return ERR_PTR(ret); 6180} 6181 6182static inline u8 btrfs_inode_type(struct inode *inode) 6183{ 6184 return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT]; 6185} 6186 6187/* 6188 * utility function to add 'inode' into 'parent_inode' with 6189 * a give name and a given sequence number. 6190 * if 'add_backref' is true, also insert a backref from the 6191 * inode to the parent directory. 6192 */ 6193int btrfs_add_link(struct btrfs_trans_handle *trans, 6194 struct inode *parent_inode, struct inode *inode, 6195 const char *name, int name_len, int add_backref, u64 index) 6196{ 6197 int ret = 0; 6198 struct btrfs_key key; 6199 struct btrfs_root *root = BTRFS_I(parent_inode)->root; 6200 u64 ino = btrfs_ino(inode); 6201 u64 parent_ino = btrfs_ino(parent_inode); 6202 6203 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { 6204 memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key)); 6205 } else { 6206 key.objectid = ino; 6207 key.type = BTRFS_INODE_ITEM_KEY; 6208 key.offset = 0; 6209 } 6210 6211 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { 6212 ret = btrfs_add_root_ref(trans, root->fs_info->tree_root, 6213 key.objectid, root->root_key.objectid, 6214 parent_ino, index, name, name_len); 6215 } else if (add_backref) { 6216 ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino, 6217 parent_ino, index); 6218 } 6219 6220 /* Nothing to clean up yet */ 6221 if (ret) 6222 return ret; 6223 6224 ret = btrfs_insert_dir_item(trans, root, name, name_len, 6225 parent_inode, &key, 6226 btrfs_inode_type(inode), index); 6227 if (ret == -EEXIST || ret == -EOVERFLOW) 6228 goto fail_dir_item; 6229 else if (ret) { 6230 btrfs_abort_transaction(trans, root, ret); 6231 return ret; 6232 } 6233 6234 btrfs_i_size_write(parent_inode, parent_inode->i_size + 6235 name_len * 2); 6236 inode_inc_iversion(parent_inode); 6237 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME; 6238 ret = btrfs_update_inode(trans, root, parent_inode); 6239 if (ret) 6240 btrfs_abort_transaction(trans, root, ret); 6241 return ret; 6242 6243fail_dir_item: 6244 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { 6245 u64 local_index; 6246 int err; 6247 err = btrfs_del_root_ref(trans, root->fs_info->tree_root, 6248 key.objectid, root->root_key.objectid, 6249 parent_ino, &local_index, name, name_len); 6250 6251 } else if (add_backref) { 6252 u64 local_index; 6253 int err; 6254 6255 err = btrfs_del_inode_ref(trans, root, name, name_len, 6256 ino, parent_ino, &local_index); 6257 } 6258 return ret; 6259} 6260 6261static int btrfs_add_nondir(struct btrfs_trans_handle *trans, 6262 struct inode *dir, struct dentry *dentry, 6263 struct inode *inode, int backref, u64 index) 6264{ 6265 int err = btrfs_add_link(trans, dir, inode, 6266 dentry->d_name.name, dentry->d_name.len, 6267 backref, index); 6268 if (err > 0) 6269 err = -EEXIST; 6270 return err; 6271} 6272 6273static int btrfs_mknod(struct inode *dir, struct dentry *dentry, 6274 umode_t mode, dev_t rdev) 6275{ 6276 struct btrfs_trans_handle *trans; 6277 struct btrfs_root *root = BTRFS_I(dir)->root; 6278 struct inode *inode = NULL; 6279 int err; 6280 int drop_inode = 0; 6281 u64 objectid; 6282 u64 index = 0; 6283 6284 if (!new_valid_dev(rdev)) 6285 return -EINVAL; 6286 6287 /* 6288 * 2 for inode item and ref 6289 * 2 for dir items 6290 * 1 for xattr if selinux is on 6291 */ 6292 trans = btrfs_start_transaction(root, 5); 6293 if (IS_ERR(trans)) 6294 return PTR_ERR(trans); 6295 6296 err = btrfs_find_free_ino(root, &objectid); 6297 if (err) 6298 goto out_unlock; 6299 6300 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 6301 dentry->d_name.len, btrfs_ino(dir), objectid, 6302 mode, &index); 6303 if (IS_ERR(inode)) { 6304 err = PTR_ERR(inode); 6305 goto out_unlock; 6306 } 6307 6308 /* 6309 * If the active LSM wants to access the inode during 6310 * d_instantiate it needs these. Smack checks to see 6311 * if the filesystem supports xattrs by looking at the 6312 * ops vector. 6313 */ 6314 inode->i_op = &btrfs_special_inode_operations; 6315 init_special_inode(inode, inode->i_mode, rdev); 6316 6317 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); 6318 if (err) 6319 goto out_unlock_inode; 6320 6321 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); 6322 if (err) { 6323 goto out_unlock_inode; 6324 } else { 6325 btrfs_update_inode(trans, root, inode); 6326 unlock_new_inode(inode); 6327 d_instantiate(dentry, inode); 6328 } 6329 6330out_unlock: 6331 btrfs_end_transaction(trans, root); 6332 btrfs_balance_delayed_items(root); 6333 btrfs_btree_balance_dirty(root); 6334 if (drop_inode) { 6335 inode_dec_link_count(inode); 6336 iput(inode); 6337 } 6338 return err; 6339 6340out_unlock_inode: 6341 drop_inode = 1; 6342 unlock_new_inode(inode); 6343 goto out_unlock; 6344 6345} 6346 6347static int btrfs_create(struct inode *dir, struct dentry *dentry, 6348 umode_t mode, bool excl) 6349{ 6350 struct btrfs_trans_handle *trans; 6351 struct btrfs_root *root = BTRFS_I(dir)->root; 6352 struct inode *inode = NULL; 6353 int drop_inode_on_err = 0; 6354 int err; 6355 u64 objectid; 6356 u64 index = 0; 6357 6358 /* 6359 * 2 for inode item and ref 6360 * 2 for dir items 6361 * 1 for xattr if selinux is on 6362 */ 6363 trans = btrfs_start_transaction(root, 5); 6364 if (IS_ERR(trans)) 6365 return PTR_ERR(trans); 6366 6367 err = btrfs_find_free_ino(root, &objectid); 6368 if (err) 6369 goto out_unlock; 6370 6371 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 6372 dentry->d_name.len, btrfs_ino(dir), objectid, 6373 mode, &index); 6374 if (IS_ERR(inode)) { 6375 err = PTR_ERR(inode); 6376 goto out_unlock; 6377 } 6378 drop_inode_on_err = 1; 6379 /* 6380 * If the active LSM wants to access the inode during 6381 * d_instantiate it needs these. Smack checks to see 6382 * if the filesystem supports xattrs by looking at the 6383 * ops vector. 6384 */ 6385 inode->i_fop = &btrfs_file_operations; 6386 inode->i_op = &btrfs_file_inode_operations; 6387 inode->i_mapping->a_ops = &btrfs_aops; 6388 6389 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); 6390 if (err) 6391 goto out_unlock_inode; 6392 6393 err = btrfs_update_inode(trans, root, inode); 6394 if (err) 6395 goto out_unlock_inode; 6396 6397 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); 6398 if (err) 6399 goto out_unlock_inode; 6400 6401 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; 6402 unlock_new_inode(inode); 6403 d_instantiate(dentry, inode); 6404 6405out_unlock: 6406 btrfs_end_transaction(trans, root); 6407 if (err && drop_inode_on_err) { 6408 inode_dec_link_count(inode); 6409 iput(inode); 6410 } 6411 btrfs_balance_delayed_items(root); 6412 btrfs_btree_balance_dirty(root); 6413 return err; 6414 6415out_unlock_inode: 6416 unlock_new_inode(inode); 6417 goto out_unlock; 6418 6419} 6420 6421static int btrfs_link(struct dentry *old_dentry, struct inode *dir, 6422 struct dentry *dentry) 6423{ 6424 struct btrfs_trans_handle *trans = NULL; 6425 struct btrfs_root *root = BTRFS_I(dir)->root; 6426 struct inode *inode = d_inode(old_dentry); 6427 u64 index; 6428 int err; 6429 int drop_inode = 0; 6430 6431 /* do not allow sys_link's with other subvols of the same device */ 6432 if (root->objectid != BTRFS_I(inode)->root->objectid) 6433 return -EXDEV; 6434 6435 if (inode->i_nlink >= BTRFS_LINK_MAX) 6436 return -EMLINK; 6437 6438 err = btrfs_set_inode_index(dir, &index); 6439 if (err) 6440 goto fail; 6441 6442 /* 6443 * 2 items for inode and inode ref 6444 * 2 items for dir items 6445 * 1 item for parent inode 6446 */ 6447 trans = btrfs_start_transaction(root, 5); 6448 if (IS_ERR(trans)) { 6449 err = PTR_ERR(trans); 6450 trans = NULL; 6451 goto fail; 6452 } 6453 6454 /* There are several dir indexes for this inode, clear the cache. */ 6455 BTRFS_I(inode)->dir_index = 0ULL; 6456 inc_nlink(inode); 6457 inode_inc_iversion(inode); 6458 inode->i_ctime = CURRENT_TIME; 6459 ihold(inode); 6460 set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags); 6461 6462 err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index); 6463 6464 if (err) { 6465 drop_inode = 1; 6466 } else { 6467 struct dentry *parent = dentry->d_parent; 6468 err = btrfs_update_inode(trans, root, inode); 6469 if (err) 6470 goto fail; 6471 if (inode->i_nlink == 1) { 6472 /* 6473 * If new hard link count is 1, it's a file created 6474 * with open(2) O_TMPFILE flag. 6475 */ 6476 err = btrfs_orphan_del(trans, inode); 6477 if (err) 6478 goto fail; 6479 } 6480 d_instantiate(dentry, inode); 6481 btrfs_log_new_name(trans, inode, NULL, parent); 6482 } 6483 6484 btrfs_balance_delayed_items(root); 6485fail: 6486 if (trans) 6487 btrfs_end_transaction(trans, root); 6488 if (drop_inode) { 6489 inode_dec_link_count(inode); 6490 iput(inode); 6491 } 6492 btrfs_btree_balance_dirty(root); 6493 return err; 6494} 6495 6496static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) 6497{ 6498 struct inode *inode = NULL; 6499 struct btrfs_trans_handle *trans; 6500 struct btrfs_root *root = BTRFS_I(dir)->root; 6501 int err = 0; 6502 int drop_on_err = 0; 6503 u64 objectid = 0; 6504 u64 index = 0; 6505 6506 /* 6507 * 2 items for inode and ref 6508 * 2 items for dir items 6509 * 1 for xattr if selinux is on 6510 */ 6511 trans = btrfs_start_transaction(root, 5); 6512 if (IS_ERR(trans)) 6513 return PTR_ERR(trans); 6514 6515 err = btrfs_find_free_ino(root, &objectid); 6516 if (err) 6517 goto out_fail; 6518 6519 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 6520 dentry->d_name.len, btrfs_ino(dir), objectid, 6521 S_IFDIR | mode, &index); 6522 if (IS_ERR(inode)) { 6523 err = PTR_ERR(inode); 6524 goto out_fail; 6525 } 6526 6527 drop_on_err = 1; 6528 /* these must be set before we unlock the inode */ 6529 inode->i_op = &btrfs_dir_inode_operations; 6530 inode->i_fop = &btrfs_dir_file_operations; 6531 6532 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); 6533 if (err) 6534 goto out_fail_inode; 6535 6536 btrfs_i_size_write(inode, 0); 6537 err = btrfs_update_inode(trans, root, inode); 6538 if (err) 6539 goto out_fail_inode; 6540 6541 err = btrfs_add_link(trans, dir, inode, dentry->d_name.name, 6542 dentry->d_name.len, 0, index); 6543 if (err) 6544 goto out_fail_inode; 6545 6546 d_instantiate(dentry, inode); 6547 /* 6548 * mkdir is special. We're unlocking after we call d_instantiate 6549 * to avoid a race with nfsd calling d_instantiate. 6550 */ 6551 unlock_new_inode(inode); 6552 drop_on_err = 0; 6553 6554out_fail: 6555 btrfs_end_transaction(trans, root); 6556 if (drop_on_err) { 6557 inode_dec_link_count(inode); 6558 iput(inode); 6559 } 6560 btrfs_balance_delayed_items(root); 6561 btrfs_btree_balance_dirty(root); 6562 return err; 6563 6564out_fail_inode: 6565 unlock_new_inode(inode); 6566 goto out_fail; 6567} 6568 6569/* Find next extent map of a given extent map, caller needs to ensure locks */ 6570static struct extent_map *next_extent_map(struct extent_map *em) 6571{ 6572 struct rb_node *next; 6573 6574 next = rb_next(&em->rb_node); 6575 if (!next) 6576 return NULL; 6577 return container_of(next, struct extent_map, rb_node); 6578} 6579 6580static struct extent_map *prev_extent_map(struct extent_map *em) 6581{ 6582 struct rb_node *prev; 6583 6584 prev = rb_prev(&em->rb_node); 6585 if (!prev) 6586 return NULL; 6587 return container_of(prev, struct extent_map, rb_node); 6588} 6589 6590/* helper for btfs_get_extent. Given an existing extent in the tree, 6591 * the existing extent is the nearest extent to map_start, 6592 * and an extent that you want to insert, deal with overlap and insert 6593 * the best fitted new extent into the tree. 6594 */ 6595static int merge_extent_mapping(struct extent_map_tree *em_tree, 6596 struct extent_map *existing, 6597 struct extent_map *em, 6598 u64 map_start) 6599{ 6600 struct extent_map *prev; 6601 struct extent_map *next; 6602 u64 start; 6603 u64 end; 6604 u64 start_diff; 6605 6606 BUG_ON(map_start < em->start || map_start >= extent_map_end(em)); 6607 6608 if (existing->start > map_start) { 6609 next = existing; 6610 prev = prev_extent_map(next); 6611 } else { 6612 prev = existing; 6613 next = next_extent_map(prev); 6614 } 6615 6616 start = prev ? extent_map_end(prev) : em->start; 6617 start = max_t(u64, start, em->start); 6618 end = next ? next->start : extent_map_end(em); 6619 end = min_t(u64, end, extent_map_end(em)); 6620 start_diff = start - em->start; 6621 em->start = start; 6622 em->len = end - start; 6623 if (em->block_start < EXTENT_MAP_LAST_BYTE && 6624 !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { 6625 em->block_start += start_diff; 6626 em->block_len -= start_diff; 6627 } 6628 return add_extent_mapping(em_tree, em, 0); 6629} 6630 6631static noinline int uncompress_inline(struct btrfs_path *path, 6632 struct inode *inode, struct page *page, 6633 size_t pg_offset, u64 extent_offset, 6634 struct btrfs_file_extent_item *item) 6635{ 6636 int ret; 6637 struct extent_buffer *leaf = path->nodes[0]; 6638 char *tmp; 6639 size_t max_size; 6640 unsigned long inline_size; 6641 unsigned long ptr; 6642 int compress_type; 6643 6644 WARN_ON(pg_offset != 0); 6645 compress_type = btrfs_file_extent_compression(leaf, item); 6646 max_size = btrfs_file_extent_ram_bytes(leaf, item); 6647 inline_size = btrfs_file_extent_inline_item_len(leaf, 6648 btrfs_item_nr(path->slots[0])); 6649 tmp = kmalloc(inline_size, GFP_NOFS); 6650 if (!tmp) 6651 return -ENOMEM; 6652 ptr = btrfs_file_extent_inline_start(item); 6653 6654 read_extent_buffer(leaf, tmp, ptr, inline_size); 6655 6656 max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size); 6657 ret = btrfs_decompress(compress_type, tmp, page, 6658 extent_offset, inline_size, max_size); 6659 kfree(tmp); 6660 return ret; 6661} 6662 6663/* 6664 * a bit scary, this does extent mapping from logical file offset to the disk. 6665 * the ugly parts come from merging extents from the disk with the in-ram 6666 * representation. This gets more complex because of the data=ordered code, 6667 * where the in-ram extents might be locked pending data=ordered completion. 6668 * 6669 * This also copies inline extents directly into the page. 6670 */ 6671 6672struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, 6673 size_t pg_offset, u64 start, u64 len, 6674 int create) 6675{ 6676 int ret; 6677 int err = 0; 6678 u64 extent_start = 0; 6679 u64 extent_end = 0; 6680 u64 objectid = btrfs_ino(inode); 6681 u32 found_type; 6682 struct btrfs_path *path = NULL; 6683 struct btrfs_root *root = BTRFS_I(inode)->root; 6684 struct btrfs_file_extent_item *item; 6685 struct extent_buffer *leaf; 6686 struct btrfs_key found_key; 6687 struct extent_map *em = NULL; 6688 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 6689 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 6690 struct btrfs_trans_handle *trans = NULL; 6691 const bool new_inline = !page || create; 6692 6693again: 6694 read_lock(&em_tree->lock); 6695 em = lookup_extent_mapping(em_tree, start, len); 6696 if (em) 6697 em->bdev = root->fs_info->fs_devices->latest_bdev; 6698 read_unlock(&em_tree->lock); 6699 6700 if (em) { 6701 if (em->start > start || em->start + em->len <= start) 6702 free_extent_map(em); 6703 else if (em->block_start == EXTENT_MAP_INLINE && page) 6704 free_extent_map(em); 6705 else 6706 goto out; 6707 } 6708 em = alloc_extent_map(); 6709 if (!em) { 6710 err = -ENOMEM; 6711 goto out; 6712 } 6713 em->bdev = root->fs_info->fs_devices->latest_bdev; 6714 em->start = EXTENT_MAP_HOLE; 6715 em->orig_start = EXTENT_MAP_HOLE; 6716 em->len = (u64)-1; 6717 em->block_len = (u64)-1; 6718 6719 if (!path) { 6720 path = btrfs_alloc_path(); 6721 if (!path) { 6722 err = -ENOMEM; 6723 goto out; 6724 } 6725 /* 6726 * Chances are we'll be called again, so go ahead and do 6727 * readahead 6728 */ 6729 path->reada = 1; 6730 } 6731 6732 ret = btrfs_lookup_file_extent(trans, root, path, 6733 objectid, start, trans != NULL); 6734 if (ret < 0) { 6735 err = ret; 6736 goto out; 6737 } 6738 6739 if (ret != 0) { 6740 if (path->slots[0] == 0) 6741 goto not_found; 6742 path->slots[0]--; 6743 } 6744 6745 leaf = path->nodes[0]; 6746 item = btrfs_item_ptr(leaf, path->slots[0], 6747 struct btrfs_file_extent_item); 6748 /* are we inside the extent that was found? */ 6749 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 6750 found_type = found_key.type; 6751 if (found_key.objectid != objectid || 6752 found_type != BTRFS_EXTENT_DATA_KEY) { 6753 /* 6754 * If we backup past the first extent we want to move forward 6755 * and see if there is an extent in front of us, otherwise we'll 6756 * say there is a hole for our whole search range which can 6757 * cause problems. 6758 */ 6759 extent_end = start; 6760 goto next; 6761 } 6762 6763 found_type = btrfs_file_extent_type(leaf, item); 6764 extent_start = found_key.offset; 6765 if (found_type == BTRFS_FILE_EXTENT_REG || 6766 found_type == BTRFS_FILE_EXTENT_PREALLOC) { 6767 extent_end = extent_start + 6768 btrfs_file_extent_num_bytes(leaf, item); 6769 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { 6770 size_t size; 6771 size = btrfs_file_extent_inline_len(leaf, path->slots[0], item); 6772 extent_end = ALIGN(extent_start + size, root->sectorsize); 6773 } 6774next: 6775 if (start >= extent_end) { 6776 path->slots[0]++; 6777 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 6778 ret = btrfs_next_leaf(root, path); 6779 if (ret < 0) { 6780 err = ret; 6781 goto out; 6782 } 6783 if (ret > 0) 6784 goto not_found; 6785 leaf = path->nodes[0]; 6786 } 6787 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 6788 if (found_key.objectid != objectid || 6789 found_key.type != BTRFS_EXTENT_DATA_KEY) 6790 goto not_found; 6791 if (start + len <= found_key.offset) 6792 goto not_found; 6793 if (start > found_key.offset) 6794 goto next; 6795 em->start = start; 6796 em->orig_start = start; 6797 em->len = found_key.offset - start; 6798 goto not_found_em; 6799 } 6800 6801 btrfs_extent_item_to_extent_map(inode, path, item, new_inline, em); 6802 6803 if (found_type == BTRFS_FILE_EXTENT_REG || 6804 found_type == BTRFS_FILE_EXTENT_PREALLOC) { 6805 goto insert; 6806 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { 6807 unsigned long ptr; 6808 char *map; 6809 size_t size; 6810 size_t extent_offset; 6811 size_t copy_size; 6812 6813 if (new_inline) 6814 goto out; 6815 6816 size = btrfs_file_extent_inline_len(leaf, path->slots[0], item); 6817 extent_offset = page_offset(page) + pg_offset - extent_start; 6818 copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset, 6819 size - extent_offset); 6820 em->start = extent_start + extent_offset; 6821 em->len = ALIGN(copy_size, root->sectorsize); 6822 em->orig_block_len = em->len; 6823 em->orig_start = em->start; 6824 ptr = btrfs_file_extent_inline_start(item) + extent_offset; 6825 if (create == 0 && !PageUptodate(page)) { 6826 if (btrfs_file_extent_compression(leaf, item) != 6827 BTRFS_COMPRESS_NONE) { 6828 ret = uncompress_inline(path, inode, page, 6829 pg_offset, 6830 extent_offset, item); 6831 if (ret) { 6832 err = ret; 6833 goto out; 6834 } 6835 } else { 6836 map = kmap(page); 6837 read_extent_buffer(leaf, map + pg_offset, ptr, 6838 copy_size); 6839 if (pg_offset + copy_size < PAGE_CACHE_SIZE) { 6840 memset(map + pg_offset + copy_size, 0, 6841 PAGE_CACHE_SIZE - pg_offset - 6842 copy_size); 6843 } 6844 kunmap(page); 6845 } 6846 flush_dcache_page(page); 6847 } else if (create && PageUptodate(page)) { 6848 BUG(); 6849 if (!trans) { 6850 kunmap(page); 6851 free_extent_map(em); 6852 em = NULL; 6853 6854 btrfs_release_path(path); 6855 trans = btrfs_join_transaction(root); 6856 6857 if (IS_ERR(trans)) 6858 return ERR_CAST(trans); 6859 goto again; 6860 } 6861 map = kmap(page); 6862 write_extent_buffer(leaf, map + pg_offset, ptr, 6863 copy_size); 6864 kunmap(page); 6865 btrfs_mark_buffer_dirty(leaf); 6866 } 6867 set_extent_uptodate(io_tree, em->start, 6868 extent_map_end(em) - 1, NULL, GFP_NOFS); 6869 goto insert; 6870 } 6871not_found: 6872 em->start = start; 6873 em->orig_start = start; 6874 em->len = len; 6875not_found_em: 6876 em->block_start = EXTENT_MAP_HOLE; 6877 set_bit(EXTENT_FLAG_VACANCY, &em->flags); 6878insert: 6879 btrfs_release_path(path); 6880 if (em->start > start || extent_map_end(em) <= start) { 6881 btrfs_err(root->fs_info, "bad extent! em: [%llu %llu] passed [%llu %llu]", 6882 em->start, em->len, start, len); 6883 err = -EIO; 6884 goto out; 6885 } 6886 6887 err = 0; 6888 write_lock(&em_tree->lock); 6889 ret = add_extent_mapping(em_tree, em, 0); 6890 /* it is possible that someone inserted the extent into the tree 6891 * while we had the lock dropped. It is also possible that 6892 * an overlapping map exists in the tree 6893 */ 6894 if (ret == -EEXIST) { 6895 struct extent_map *existing; 6896 6897 ret = 0; 6898 6899 existing = search_extent_mapping(em_tree, start, len); 6900 /* 6901 * existing will always be non-NULL, since there must be 6902 * extent causing the -EEXIST. 6903 */ 6904 if (start >= extent_map_end(existing) || 6905 start <= existing->start) { 6906 /* 6907 * The existing extent map is the one nearest to 6908 * the [start, start + len) range which overlaps 6909 */ 6910 err = merge_extent_mapping(em_tree, existing, 6911 em, start); 6912 free_extent_map(existing); 6913 if (err) { 6914 free_extent_map(em); 6915 em = NULL; 6916 } 6917 } else { 6918 free_extent_map(em); 6919 em = existing; 6920 err = 0; 6921 } 6922 } 6923 write_unlock(&em_tree->lock); 6924out: 6925 6926 trace_btrfs_get_extent(root, em); 6927 6928 if (path) 6929 btrfs_free_path(path); 6930 if (trans) { 6931 ret = btrfs_end_transaction(trans, root); 6932 if (!err) 6933 err = ret; 6934 } 6935 if (err) { 6936 free_extent_map(em); 6937 return ERR_PTR(err); 6938 } 6939 BUG_ON(!em); /* Error is always set */ 6940 return em; 6941} 6942 6943struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page, 6944 size_t pg_offset, u64 start, u64 len, 6945 int create) 6946{ 6947 struct extent_map *em; 6948 struct extent_map *hole_em = NULL; 6949 u64 range_start = start; 6950 u64 end; 6951 u64 found; 6952 u64 found_end; 6953 int err = 0; 6954 6955 em = btrfs_get_extent(inode, page, pg_offset, start, len, create); 6956 if (IS_ERR(em)) 6957 return em; 6958 if (em) { 6959 /* 6960 * if our em maps to 6961 * - a hole or 6962 * - a pre-alloc extent, 6963 * there might actually be delalloc bytes behind it. 6964 */ 6965 if (em->block_start != EXTENT_MAP_HOLE && 6966 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) 6967 return em; 6968 else 6969 hole_em = em; 6970 } 6971 6972 /* check to see if we've wrapped (len == -1 or similar) */ 6973 end = start + len; 6974 if (end < start) 6975 end = (u64)-1; 6976 else 6977 end -= 1; 6978 6979 em = NULL; 6980 6981 /* ok, we didn't find anything, lets look for delalloc */ 6982 found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start, 6983 end, len, EXTENT_DELALLOC, 1); 6984 found_end = range_start + found; 6985 if (found_end < range_start) 6986 found_end = (u64)-1; 6987 6988 /* 6989 * we didn't find anything useful, return 6990 * the original results from get_extent() 6991 */ 6992 if (range_start > end || found_end <= start) { 6993 em = hole_em; 6994 hole_em = NULL; 6995 goto out; 6996 } 6997 6998 /* adjust the range_start to make sure it doesn't 6999 * go backwards from the start they passed in 7000 */ 7001 range_start = max(start, range_start); 7002 found = found_end - range_start; 7003 7004 if (found > 0) { 7005 u64 hole_start = start; 7006 u64 hole_len = len; 7007 7008 em = alloc_extent_map(); 7009 if (!em) { 7010 err = -ENOMEM; 7011 goto out; 7012 } 7013 /* 7014 * when btrfs_get_extent can't find anything it 7015 * returns one huge hole 7016 * 7017 * make sure what it found really fits our range, and 7018 * adjust to make sure it is based on the start from 7019 * the caller 7020 */ 7021 if (hole_em) { 7022 u64 calc_end = extent_map_end(hole_em); 7023 7024 if (calc_end <= start || (hole_em->start > end)) { 7025 free_extent_map(hole_em); 7026 hole_em = NULL; 7027 } else { 7028 hole_start = max(hole_em->start, start); 7029 hole_len = calc_end - hole_start; 7030 } 7031 } 7032 em->bdev = NULL; 7033 if (hole_em && range_start > hole_start) { 7034 /* our hole starts before our delalloc, so we 7035 * have to return just the parts of the hole 7036 * that go until the delalloc starts 7037 */ 7038 em->len = min(hole_len, 7039 range_start - hole_start); 7040 em->start = hole_start; 7041 em->orig_start = hole_start; 7042 /* 7043 * don't adjust block start at all, 7044 * it is fixed at EXTENT_MAP_HOLE 7045 */ 7046 em->block_start = hole_em->block_start; 7047 em->block_len = hole_len; 7048 if (test_bit(EXTENT_FLAG_PREALLOC, &hole_em->flags)) 7049 set_bit(EXTENT_FLAG_PREALLOC, &em->flags); 7050 } else { 7051 em->start = range_start; 7052 em->len = found; 7053 em->orig_start = range_start; 7054 em->block_start = EXTENT_MAP_DELALLOC; 7055 em->block_len = found; 7056 } 7057 } else if (hole_em) { 7058 return hole_em; 7059 } 7060out: 7061 7062 free_extent_map(hole_em); 7063 if (err) { 7064 free_extent_map(em); 7065 return ERR_PTR(err); 7066 } 7067 return em; 7068} 7069 7070static struct extent_map *btrfs_new_extent_direct(struct inode *inode, 7071 u64 start, u64 len) 7072{ 7073 struct btrfs_root *root = BTRFS_I(inode)->root; 7074 struct extent_map *em; 7075 struct btrfs_key ins; 7076 u64 alloc_hint; 7077 int ret; 7078 7079 alloc_hint = get_extent_allocation_hint(inode, start, len); 7080 ret = btrfs_reserve_extent(root, len, root->sectorsize, 0, 7081 alloc_hint, &ins, 1, 1); 7082 if (ret) 7083 return ERR_PTR(ret); 7084 7085 em = create_pinned_em(inode, start, ins.offset, start, ins.objectid, 7086 ins.offset, ins.offset, ins.offset, 0); 7087 if (IS_ERR(em)) { 7088 btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1); 7089 return em; 7090 } 7091 7092 ret = btrfs_add_ordered_extent_dio(inode, start, ins.objectid, 7093 ins.offset, ins.offset, 0); 7094 if (ret) { 7095 btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1); 7096 free_extent_map(em); 7097 return ERR_PTR(ret); 7098 } 7099 7100 return em; 7101} 7102 7103/* 7104 * returns 1 when the nocow is safe, < 1 on error, 0 if the 7105 * block must be cow'd 7106 */ 7107noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len, 7108 u64 *orig_start, u64 *orig_block_len, 7109 u64 *ram_bytes) 7110{ 7111 struct btrfs_trans_handle *trans; 7112 struct btrfs_path *path; 7113 int ret; 7114 struct extent_buffer *leaf; 7115 struct btrfs_root *root = BTRFS_I(inode)->root; 7116 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 7117 struct btrfs_file_extent_item *fi; 7118 struct btrfs_key key; 7119 u64 disk_bytenr; 7120 u64 backref_offset; 7121 u64 extent_end; 7122 u64 num_bytes; 7123 int slot; 7124 int found_type; 7125 bool nocow = (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW); 7126 7127 path = btrfs_alloc_path(); 7128 if (!path) 7129 return -ENOMEM; 7130 7131 ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode), 7132 offset, 0); 7133 if (ret < 0) 7134 goto out; 7135 7136 slot = path->slots[0]; 7137 if (ret == 1) { 7138 if (slot == 0) { 7139 /* can't find the item, must cow */ 7140 ret = 0; 7141 goto out; 7142 } 7143 slot--; 7144 } 7145 ret = 0; 7146 leaf = path->nodes[0]; 7147 btrfs_item_key_to_cpu(leaf, &key, slot); 7148 if (key.objectid != btrfs_ino(inode) || 7149 key.type != BTRFS_EXTENT_DATA_KEY) { 7150 /* not our file or wrong item type, must cow */ 7151 goto out; 7152 } 7153 7154 if (key.offset > offset) { 7155 /* Wrong offset, must cow */ 7156 goto out; 7157 } 7158 7159 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); 7160 found_type = btrfs_file_extent_type(leaf, fi); 7161 if (found_type != BTRFS_FILE_EXTENT_REG && 7162 found_type != BTRFS_FILE_EXTENT_PREALLOC) { 7163 /* not a regular extent, must cow */ 7164 goto out; 7165 } 7166 7167 if (!nocow && found_type == BTRFS_FILE_EXTENT_REG) 7168 goto out; 7169 7170 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); 7171 if (extent_end <= offset) 7172 goto out; 7173 7174 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 7175 if (disk_bytenr == 0) 7176 goto out; 7177 7178 if (btrfs_file_extent_compression(leaf, fi) || 7179 btrfs_file_extent_encryption(leaf, fi) || 7180 btrfs_file_extent_other_encoding(leaf, fi)) 7181 goto out; 7182 7183 backref_offset = btrfs_file_extent_offset(leaf, fi); 7184 7185 if (orig_start) { 7186 *orig_start = key.offset - backref_offset; 7187 *orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi); 7188 *ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi); 7189 } 7190 7191 if (btrfs_extent_readonly(root, disk_bytenr)) 7192 goto out; 7193 7194 num_bytes = min(offset + *len, extent_end) - offset; 7195 if (!nocow && found_type == BTRFS_FILE_EXTENT_PREALLOC) { 7196 u64 range_end; 7197 7198 range_end = round_up(offset + num_bytes, root->sectorsize) - 1; 7199 ret = test_range_bit(io_tree, offset, range_end, 7200 EXTENT_DELALLOC, 0, NULL); 7201 if (ret) { 7202 ret = -EAGAIN; 7203 goto out; 7204 } 7205 } 7206 7207 btrfs_release_path(path); 7208 7209 /* 7210 * look for other files referencing this extent, if we 7211 * find any we must cow 7212 */ 7213 trans = btrfs_join_transaction(root); 7214 if (IS_ERR(trans)) { 7215 ret = 0; 7216 goto out; 7217 } 7218 7219 ret = btrfs_cross_ref_exist(trans, root, btrfs_ino(inode), 7220 key.offset - backref_offset, disk_bytenr); 7221 btrfs_end_transaction(trans, root); 7222 if (ret) { 7223 ret = 0; 7224 goto out; 7225 } 7226 7227 /* 7228 * adjust disk_bytenr and num_bytes to cover just the bytes 7229 * in this extent we are about to write. If there 7230 * are any csums in that range we have to cow in order 7231 * to keep the csums correct 7232 */ 7233 disk_bytenr += backref_offset; 7234 disk_bytenr += offset - key.offset; 7235 if (csum_exist_in_range(root, disk_bytenr, num_bytes)) 7236 goto out; 7237 /* 7238 * all of the above have passed, it is safe to overwrite this extent 7239 * without cow 7240 */ 7241 *len = num_bytes; 7242 ret = 1; 7243out: 7244 btrfs_free_path(path); 7245 return ret; 7246} 7247 7248bool btrfs_page_exists_in_range(struct inode *inode, loff_t start, loff_t end) 7249{ 7250 struct radix_tree_root *root = &inode->i_mapping->page_tree; 7251 int found = false; 7252 void **pagep = NULL; 7253 struct page *page = NULL; 7254 int start_idx; 7255 int end_idx; 7256 7257 start_idx = start >> PAGE_CACHE_SHIFT; 7258 7259 /* 7260 * end is the last byte in the last page. end == start is legal 7261 */ 7262 end_idx = end >> PAGE_CACHE_SHIFT; 7263 7264 rcu_read_lock(); 7265 7266 /* Most of the code in this while loop is lifted from 7267 * find_get_page. It's been modified to begin searching from a 7268 * page and return just the first page found in that range. If the 7269 * found idx is less than or equal to the end idx then we know that 7270 * a page exists. If no pages are found or if those pages are 7271 * outside of the range then we're fine (yay!) */ 7272 while (page == NULL && 7273 radix_tree_gang_lookup_slot(root, &pagep, NULL, start_idx, 1)) { 7274 page = radix_tree_deref_slot(pagep); 7275 if (unlikely(!page)) 7276 break; 7277 7278 if (radix_tree_exception(page)) { 7279 if (radix_tree_deref_retry(page)) { 7280 page = NULL; 7281 continue; 7282 } 7283 /* 7284 * Otherwise, shmem/tmpfs must be storing a swap entry 7285 * here as an exceptional entry: so return it without 7286 * attempting to raise page count. 7287 */ 7288 page = NULL; 7289 break; /* TODO: Is this relevant for this use case? */ 7290 } 7291 7292 if (!page_cache_get_speculative(page)) { 7293 page = NULL; 7294 continue; 7295 } 7296 7297 /* 7298 * Has the page moved? 7299 * This is part of the lockless pagecache protocol. See 7300 * include/linux/pagemap.h for details. 7301 */ 7302 if (unlikely(page != *pagep)) { 7303 page_cache_release(page); 7304 page = NULL; 7305 } 7306 } 7307 7308 if (page) { 7309 if (page->index <= end_idx) 7310 found = true; 7311 page_cache_release(page); 7312 } 7313 7314 rcu_read_unlock(); 7315 return found; 7316} 7317 7318static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend, 7319 struct extent_state **cached_state, int writing) 7320{ 7321 struct btrfs_ordered_extent *ordered; 7322 int ret = 0; 7323 7324 while (1) { 7325 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 7326 0, cached_state); 7327 /* 7328 * We're concerned with the entire range that we're going to be 7329 * doing DIO to, so we need to make sure theres no ordered 7330 * extents in this range. 7331 */ 7332 ordered = btrfs_lookup_ordered_range(inode, lockstart, 7333 lockend - lockstart + 1); 7334 7335 /* 7336 * We need to make sure there are no buffered pages in this 7337 * range either, we could have raced between the invalidate in 7338 * generic_file_direct_write and locking the extent. The 7339 * invalidate needs to happen so that reads after a write do not 7340 * get stale data. 7341 */ 7342 if (!ordered && 7343 (!writing || 7344 !btrfs_page_exists_in_range(inode, lockstart, lockend))) 7345 break; 7346 7347 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, 7348 cached_state, GFP_NOFS); 7349 7350 if (ordered) { 7351 btrfs_start_ordered_extent(inode, ordered, 1); 7352 btrfs_put_ordered_extent(ordered); 7353 } else { 7354 /* Screw you mmap */ 7355 ret = btrfs_fdatawrite_range(inode, lockstart, lockend); 7356 if (ret) 7357 break; 7358 ret = filemap_fdatawait_range(inode->i_mapping, 7359 lockstart, 7360 lockend); 7361 if (ret) 7362 break; 7363 7364 /* 7365 * If we found a page that couldn't be invalidated just 7366 * fall back to buffered. 7367 */ 7368 ret = invalidate_inode_pages2_range(inode->i_mapping, 7369 lockstart >> PAGE_CACHE_SHIFT, 7370 lockend >> PAGE_CACHE_SHIFT); 7371 if (ret) 7372 break; 7373 } 7374 7375 cond_resched(); 7376 } 7377 7378 return ret; 7379} 7380 7381static struct extent_map *create_pinned_em(struct inode *inode, u64 start, 7382 u64 len, u64 orig_start, 7383 u64 block_start, u64 block_len, 7384 u64 orig_block_len, u64 ram_bytes, 7385 int type) 7386{ 7387 struct extent_map_tree *em_tree; 7388 struct extent_map *em; 7389 struct btrfs_root *root = BTRFS_I(inode)->root; 7390 int ret; 7391 7392 em_tree = &BTRFS_I(inode)->extent_tree; 7393 em = alloc_extent_map(); 7394 if (!em) 7395 return ERR_PTR(-ENOMEM); 7396 7397 em->start = start; 7398 em->orig_start = orig_start; 7399 em->mod_start = start; 7400 em->mod_len = len; 7401 em->len = len; 7402 em->block_len = block_len; 7403 em->block_start = block_start; 7404 em->bdev = root->fs_info->fs_devices->latest_bdev; 7405 em->orig_block_len = orig_block_len; 7406 em->ram_bytes = ram_bytes; 7407 em->generation = -1; 7408 set_bit(EXTENT_FLAG_PINNED, &em->flags); 7409 if (type == BTRFS_ORDERED_PREALLOC) 7410 set_bit(EXTENT_FLAG_FILLING, &em->flags); 7411 7412 do { 7413 btrfs_drop_extent_cache(inode, em->start, 7414 em->start + em->len - 1, 0); 7415 write_lock(&em_tree->lock); 7416 ret = add_extent_mapping(em_tree, em, 1); 7417 write_unlock(&em_tree->lock); 7418 } while (ret == -EEXIST); 7419 7420 if (ret) { 7421 free_extent_map(em); 7422 return ERR_PTR(ret); 7423 } 7424 7425 return em; 7426} 7427 7428 7429static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, 7430 struct buffer_head *bh_result, int create) 7431{ 7432 struct extent_map *em; 7433 struct btrfs_root *root = BTRFS_I(inode)->root; 7434 struct extent_state *cached_state = NULL; 7435 u64 start = iblock << inode->i_blkbits; 7436 u64 lockstart, lockend; 7437 u64 len = bh_result->b_size; 7438 u64 *outstanding_extents = NULL; 7439 int unlock_bits = EXTENT_LOCKED; 7440 int ret = 0; 7441 7442 if (create) 7443 unlock_bits |= EXTENT_DIRTY; 7444 else 7445 len = min_t(u64, len, root->sectorsize); 7446 7447 lockstart = start; 7448 lockend = start + len - 1; 7449 7450 if (current->journal_info) { 7451 /* 7452 * Need to pull our outstanding extents and set journal_info to NULL so 7453 * that anything that needs to check if there's a transction doesn't get 7454 * confused. 7455 */ 7456 outstanding_extents = current->journal_info; 7457 current->journal_info = NULL; 7458 } 7459 7460 /* 7461 * If this errors out it's because we couldn't invalidate pagecache for 7462 * this range and we need to fallback to buffered. 7463 */ 7464 if (lock_extent_direct(inode, lockstart, lockend, &cached_state, create)) 7465 return -ENOTBLK; 7466 7467 em = btrfs_get_extent(inode, NULL, 0, start, len, 0); 7468 if (IS_ERR(em)) { 7469 ret = PTR_ERR(em); 7470 goto unlock_err; 7471 } 7472 7473 /* 7474 * Ok for INLINE and COMPRESSED extents we need to fallback on buffered 7475 * io. INLINE is special, and we could probably kludge it in here, but 7476 * it's still buffered so for safety lets just fall back to the generic 7477 * buffered path. 7478 * 7479 * For COMPRESSED we _have_ to read the entire extent in so we can 7480 * decompress it, so there will be buffering required no matter what we 7481 * do, so go ahead and fallback to buffered. 7482 * 7483 * We return -ENOTBLK because thats what makes DIO go ahead and go back 7484 * to buffered IO. Don't blame me, this is the price we pay for using 7485 * the generic code. 7486 */ 7487 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) || 7488 em->block_start == EXTENT_MAP_INLINE) { 7489 free_extent_map(em); 7490 ret = -ENOTBLK; 7491 goto unlock_err; 7492 } 7493 7494 /* Just a good old fashioned hole, return */ 7495 if (!create && (em->block_start == EXTENT_MAP_HOLE || 7496 test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) { 7497 free_extent_map(em); 7498 goto unlock_err; 7499 } 7500 7501 /* 7502 * We don't allocate a new extent in the following cases 7503 * 7504 * 1) The inode is marked as NODATACOW. In this case we'll just use the 7505 * existing extent. 7506 * 2) The extent is marked as PREALLOC. We're good to go here and can 7507 * just use the extent. 7508 * 7509 */ 7510 if (!create) { 7511 len = min(len, em->len - (start - em->start)); 7512 lockstart = start + len; 7513 goto unlock; 7514 } 7515 7516 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) || 7517 ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) && 7518 em->block_start != EXTENT_MAP_HOLE)) { 7519 int type; 7520 u64 block_start, orig_start, orig_block_len, ram_bytes; 7521 7522 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) 7523 type = BTRFS_ORDERED_PREALLOC; 7524 else 7525 type = BTRFS_ORDERED_NOCOW; 7526 len = min(len, em->len - (start - em->start)); 7527 block_start = em->block_start + (start - em->start); 7528 7529 if (can_nocow_extent(inode, start, &len, &orig_start, 7530 &orig_block_len, &ram_bytes) == 1) { 7531 if (type == BTRFS_ORDERED_PREALLOC) { 7532 free_extent_map(em); 7533 em = create_pinned_em(inode, start, len, 7534 orig_start, 7535 block_start, len, 7536 orig_block_len, 7537 ram_bytes, type); 7538 if (IS_ERR(em)) { 7539 ret = PTR_ERR(em); 7540 goto unlock_err; 7541 } 7542 } 7543 7544 ret = btrfs_add_ordered_extent_dio(inode, start, 7545 block_start, len, len, type); 7546 if (ret) { 7547 free_extent_map(em); 7548 goto unlock_err; 7549 } 7550 goto unlock; 7551 } 7552 } 7553 7554 /* 7555 * this will cow the extent, reset the len in case we changed 7556 * it above 7557 */ 7558 len = bh_result->b_size; 7559 free_extent_map(em); 7560 em = btrfs_new_extent_direct(inode, start, len); 7561 if (IS_ERR(em)) { 7562 ret = PTR_ERR(em); 7563 goto unlock_err; 7564 } 7565 len = min(len, em->len - (start - em->start)); 7566unlock: 7567 bh_result->b_blocknr = (em->block_start + (start - em->start)) >> 7568 inode->i_blkbits; 7569 bh_result->b_size = len; 7570 bh_result->b_bdev = em->bdev; 7571 set_buffer_mapped(bh_result); 7572 if (create) { 7573 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) 7574 set_buffer_new(bh_result); 7575 7576 /* 7577 * Need to update the i_size under the extent lock so buffered 7578 * readers will get the updated i_size when we unlock. 7579 */ 7580 if (start + len > i_size_read(inode)) 7581 i_size_write(inode, start + len); 7582 7583 /* 7584 * If we have an outstanding_extents count still set then we're 7585 * within our reservation, otherwise we need to adjust our inode 7586 * counter appropriately. 7587 */ 7588 if (*outstanding_extents) { 7589 (*outstanding_extents)--; 7590 } else { 7591 spin_lock(&BTRFS_I(inode)->lock); 7592 BTRFS_I(inode)->outstanding_extents++; 7593 spin_unlock(&BTRFS_I(inode)->lock); 7594 } 7595 7596 current->journal_info = outstanding_extents; 7597 btrfs_free_reserved_data_space(inode, len); 7598 } 7599 7600 /* 7601 * In the case of write we need to clear and unlock the entire range, 7602 * in the case of read we need to unlock only the end area that we 7603 * aren't using if there is any left over space. 7604 */ 7605 if (lockstart < lockend) { 7606 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, 7607 lockend, unlock_bits, 1, 0, 7608 &cached_state, GFP_NOFS); 7609 } else { 7610 free_extent_state(cached_state); 7611 } 7612 7613 free_extent_map(em); 7614 7615 return 0; 7616 7617unlock_err: 7618 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend, 7619 unlock_bits, 1, 0, &cached_state, GFP_NOFS); 7620 if (outstanding_extents) 7621 current->journal_info = outstanding_extents; 7622 return ret; 7623} 7624 7625static inline int submit_dio_repair_bio(struct inode *inode, struct bio *bio, 7626 int rw, int mirror_num) 7627{ 7628 struct btrfs_root *root = BTRFS_I(inode)->root; 7629 int ret; 7630 7631 BUG_ON(rw & REQ_WRITE); 7632 7633 bio_get(bio); 7634 7635 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 7636 BTRFS_WQ_ENDIO_DIO_REPAIR); 7637 if (ret) 7638 goto err; 7639 7640 ret = btrfs_map_bio(root, rw, bio, mirror_num, 0); 7641err: 7642 bio_put(bio); 7643 return ret; 7644} 7645 7646static int btrfs_check_dio_repairable(struct inode *inode, 7647 struct bio *failed_bio, 7648 struct io_failure_record *failrec, 7649 int failed_mirror) 7650{ 7651 int num_copies; 7652 7653 num_copies = btrfs_num_copies(BTRFS_I(inode)->root->fs_info, 7654 failrec->logical, failrec->len); 7655 if (num_copies == 1) { 7656 /* 7657 * we only have a single copy of the data, so don't bother with 7658 * all the retry and error correction code that follows. no 7659 * matter what the error is, it is very likely to persist. 7660 */ 7661 pr_debug("Check DIO Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d\n", 7662 num_copies, failrec->this_mirror, failed_mirror); 7663 return 0; 7664 } 7665 7666 failrec->failed_mirror = failed_mirror; 7667 failrec->this_mirror++; 7668 if (failrec->this_mirror == failed_mirror) 7669 failrec->this_mirror++; 7670 7671 if (failrec->this_mirror > num_copies) { 7672 pr_debug("Check DIO Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d\n", 7673 num_copies, failrec->this_mirror, failed_mirror); 7674 return 0; 7675 } 7676 7677 return 1; 7678} 7679 7680static int dio_read_error(struct inode *inode, struct bio *failed_bio, 7681 struct page *page, u64 start, u64 end, 7682 int failed_mirror, bio_end_io_t *repair_endio, 7683 void *repair_arg) 7684{ 7685 struct io_failure_record *failrec; 7686 struct bio *bio; 7687 int isector; 7688 int read_mode; 7689 int ret; 7690 7691 BUG_ON(failed_bio->bi_rw & REQ_WRITE); 7692 7693 ret = btrfs_get_io_failure_record(inode, start, end, &failrec); 7694 if (ret) 7695 return ret; 7696 7697 ret = btrfs_check_dio_repairable(inode, failed_bio, failrec, 7698 failed_mirror); 7699 if (!ret) { 7700 free_io_failure(inode, failrec); 7701 return -EIO; 7702 } 7703 7704 if (failed_bio->bi_vcnt > 1) 7705 read_mode = READ_SYNC | REQ_FAILFAST_DEV; 7706 else 7707 read_mode = READ_SYNC; 7708 7709 isector = start - btrfs_io_bio(failed_bio)->logical; 7710 isector >>= inode->i_sb->s_blocksize_bits; 7711 bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page, 7712 0, isector, repair_endio, repair_arg); 7713 if (!bio) { 7714 free_io_failure(inode, failrec); 7715 return -EIO; 7716 } 7717 7718 btrfs_debug(BTRFS_I(inode)->root->fs_info, 7719 "Repair DIO Read Error: submitting new dio read[%#x] to this_mirror=%d, in_validation=%d\n", 7720 read_mode, failrec->this_mirror, failrec->in_validation); 7721 7722 ret = submit_dio_repair_bio(inode, bio, read_mode, 7723 failrec->this_mirror); 7724 if (ret) { 7725 free_io_failure(inode, failrec); 7726 bio_put(bio); 7727 } 7728 7729 return ret; 7730} 7731 7732struct btrfs_retry_complete { 7733 struct completion done; 7734 struct inode *inode; 7735 u64 start; 7736 int uptodate; 7737}; 7738 7739static void btrfs_retry_endio_nocsum(struct bio *bio, int err) 7740{ 7741 struct btrfs_retry_complete *done = bio->bi_private; 7742 struct bio_vec *bvec; 7743 int i; 7744 7745 if (err) 7746 goto end; 7747 7748 done->uptodate = 1; 7749 bio_for_each_segment_all(bvec, bio, i) 7750 clean_io_failure(done->inode, done->start, bvec->bv_page, 0); 7751end: 7752 complete(&done->done); 7753 bio_put(bio); 7754} 7755 7756static int __btrfs_correct_data_nocsum(struct inode *inode, 7757 struct btrfs_io_bio *io_bio) 7758{ 7759 struct bio_vec *bvec; 7760 struct btrfs_retry_complete done; 7761 u64 start; 7762 int i; 7763 int ret; 7764 7765 start = io_bio->logical; 7766 done.inode = inode; 7767 7768 bio_for_each_segment_all(bvec, &io_bio->bio, i) { 7769try_again: 7770 done.uptodate = 0; 7771 done.start = start; 7772 init_completion(&done.done); 7773 7774 ret = dio_read_error(inode, &io_bio->bio, bvec->bv_page, start, 7775 start + bvec->bv_len - 1, 7776 io_bio->mirror_num, 7777 btrfs_retry_endio_nocsum, &done); 7778 if (ret) 7779 return ret; 7780 7781 wait_for_completion(&done.done); 7782 7783 if (!done.uptodate) { 7784 /* We might have another mirror, so try again */ 7785 goto try_again; 7786 } 7787 7788 start += bvec->bv_len; 7789 } 7790 7791 return 0; 7792} 7793 7794static void btrfs_retry_endio(struct bio *bio, int err) 7795{ 7796 struct btrfs_retry_complete *done = bio->bi_private; 7797 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); 7798 struct bio_vec *bvec; 7799 int uptodate; 7800 int ret; 7801 int i; 7802 7803 if (err) 7804 goto end; 7805 7806 uptodate = 1; 7807 bio_for_each_segment_all(bvec, bio, i) { 7808 ret = __readpage_endio_check(done->inode, io_bio, i, 7809 bvec->bv_page, 0, 7810 done->start, bvec->bv_len); 7811 if (!ret) 7812 clean_io_failure(done->inode, done->start, 7813 bvec->bv_page, 0); 7814 else 7815 uptodate = 0; 7816 } 7817 7818 done->uptodate = uptodate; 7819end: 7820 complete(&done->done); 7821 bio_put(bio); 7822} 7823 7824static int __btrfs_subio_endio_read(struct inode *inode, 7825 struct btrfs_io_bio *io_bio, int err) 7826{ 7827 struct bio_vec *bvec; 7828 struct btrfs_retry_complete done; 7829 u64 start; 7830 u64 offset = 0; 7831 int i; 7832 int ret; 7833 7834 err = 0; 7835 start = io_bio->logical; 7836 done.inode = inode; 7837 7838 bio_for_each_segment_all(bvec, &io_bio->bio, i) { 7839 ret = __readpage_endio_check(inode, io_bio, i, bvec->bv_page, 7840 0, start, bvec->bv_len); 7841 if (likely(!ret)) 7842 goto next; 7843try_again: 7844 done.uptodate = 0; 7845 done.start = start; 7846 init_completion(&done.done); 7847 7848 ret = dio_read_error(inode, &io_bio->bio, bvec->bv_page, start, 7849 start + bvec->bv_len - 1, 7850 io_bio->mirror_num, 7851 btrfs_retry_endio, &done); 7852 if (ret) { 7853 err = ret; 7854 goto next; 7855 } 7856 7857 wait_for_completion(&done.done); 7858 7859 if (!done.uptodate) { 7860 /* We might have another mirror, so try again */ 7861 goto try_again; 7862 } 7863next: 7864 offset += bvec->bv_len; 7865 start += bvec->bv_len; 7866 } 7867 7868 return err; 7869} 7870 7871static int btrfs_subio_endio_read(struct inode *inode, 7872 struct btrfs_io_bio *io_bio, int err) 7873{ 7874 bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; 7875 7876 if (skip_csum) { 7877 if (unlikely(err)) 7878 return __btrfs_correct_data_nocsum(inode, io_bio); 7879 else 7880 return 0; 7881 } else { 7882 return __btrfs_subio_endio_read(inode, io_bio, err); 7883 } 7884} 7885 7886static void btrfs_endio_direct_read(struct bio *bio, int err) 7887{ 7888 struct btrfs_dio_private *dip = bio->bi_private; 7889 struct inode *inode = dip->inode; 7890 struct bio *dio_bio; 7891 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); 7892 7893 if (dip->flags & BTRFS_DIO_ORIG_BIO_SUBMITTED) 7894 err = btrfs_subio_endio_read(inode, io_bio, err); 7895 7896 unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset, 7897 dip->logical_offset + dip->bytes - 1); 7898 dio_bio = dip->dio_bio; 7899 7900 kfree(dip); 7901 7902 /* If we had a csum failure make sure to clear the uptodate flag */ 7903 if (err) 7904 clear_bit(BIO_UPTODATE, &dio_bio->bi_flags); 7905 dio_end_io(dio_bio, err); 7906 7907 if (io_bio->end_io) 7908 io_bio->end_io(io_bio, err); 7909 bio_put(bio); 7910} 7911 7912static void btrfs_endio_direct_write(struct bio *bio, int err) 7913{ 7914 struct btrfs_dio_private *dip = bio->bi_private; 7915 struct inode *inode = dip->inode; 7916 struct btrfs_root *root = BTRFS_I(inode)->root; 7917 struct btrfs_ordered_extent *ordered = NULL; 7918 u64 ordered_offset = dip->logical_offset; 7919 u64 ordered_bytes = dip->bytes; 7920 struct bio *dio_bio; 7921 int ret; 7922 7923 if (err) 7924 goto out_done; 7925again: 7926 ret = btrfs_dec_test_first_ordered_pending(inode, &ordered, 7927 &ordered_offset, 7928 ordered_bytes, !err); 7929 if (!ret) 7930 goto out_test; 7931 7932 btrfs_init_work(&ordered->work, btrfs_endio_write_helper, 7933 finish_ordered_fn, NULL, NULL); 7934 btrfs_queue_work(root->fs_info->endio_write_workers, 7935 &ordered->work); 7936out_test: 7937 /* 7938 * our bio might span multiple ordered extents. If we haven't 7939 * completed the accounting for the whole dio, go back and try again 7940 */ 7941 if (ordered_offset < dip->logical_offset + dip->bytes) { 7942 ordered_bytes = dip->logical_offset + dip->bytes - 7943 ordered_offset; 7944 ordered = NULL; 7945 goto again; 7946 } 7947out_done: 7948 dio_bio = dip->dio_bio; 7949 7950 kfree(dip); 7951 7952 /* If we had an error make sure to clear the uptodate flag */ 7953 if (err) 7954 clear_bit(BIO_UPTODATE, &dio_bio->bi_flags); 7955 dio_end_io(dio_bio, err); 7956 bio_put(bio); 7957} 7958 7959static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw, 7960 struct bio *bio, int mirror_num, 7961 unsigned long bio_flags, u64 offset) 7962{ 7963 int ret; 7964 struct btrfs_root *root = BTRFS_I(inode)->root; 7965 ret = btrfs_csum_one_bio(root, inode, bio, offset, 1); 7966 BUG_ON(ret); /* -ENOMEM */ 7967 return 0; 7968} 7969 7970static void btrfs_end_dio_bio(struct bio *bio, int err) 7971{ 7972 struct btrfs_dio_private *dip = bio->bi_private; 7973 7974 if (err) 7975 btrfs_warn(BTRFS_I(dip->inode)->root->fs_info, 7976 "direct IO failed ino %llu rw %lu sector %#Lx len %u err no %d", 7977 btrfs_ino(dip->inode), bio->bi_rw, 7978 (unsigned long long)bio->bi_iter.bi_sector, 7979 bio->bi_iter.bi_size, err); 7980 7981 if (dip->subio_endio) 7982 err = dip->subio_endio(dip->inode, btrfs_io_bio(bio), err); 7983 7984 if (err) { 7985 dip->errors = 1; 7986 7987 /* 7988 * before atomic variable goto zero, we must make sure 7989 * dip->errors is perceived to be set. 7990 */ 7991 smp_mb__before_atomic(); 7992 } 7993 7994 /* if there are more bios still pending for this dio, just exit */ 7995 if (!atomic_dec_and_test(&dip->pending_bios)) 7996 goto out; 7997 7998 if (dip->errors) { 7999 bio_io_error(dip->orig_bio); 8000 } else { 8001 set_bit(BIO_UPTODATE, &dip->dio_bio->bi_flags); 8002 bio_endio(dip->orig_bio, 0); 8003 } 8004out: 8005 bio_put(bio); 8006} 8007 8008static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev, 8009 u64 first_sector, gfp_t gfp_flags) 8010{ 8011 int nr_vecs = bio_get_nr_vecs(bdev); 8012 return btrfs_bio_alloc(bdev, first_sector, nr_vecs, gfp_flags); 8013} 8014 8015static inline int btrfs_lookup_and_bind_dio_csum(struct btrfs_root *root, 8016 struct inode *inode, 8017 struct btrfs_dio_private *dip, 8018 struct bio *bio, 8019 u64 file_offset) 8020{ 8021 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); 8022 struct btrfs_io_bio *orig_io_bio = btrfs_io_bio(dip->orig_bio); 8023 int ret; 8024 8025 /* 8026 * We load all the csum data we need when we submit 8027 * the first bio to reduce the csum tree search and 8028 * contention. 8029 */ 8030 if (dip->logical_offset == file_offset) { 8031 ret = btrfs_lookup_bio_sums_dio(root, inode, dip->orig_bio, 8032 file_offset); 8033 if (ret) 8034 return ret; 8035 } 8036 8037 if (bio == dip->orig_bio) 8038 return 0; 8039 8040 file_offset -= dip->logical_offset; 8041 file_offset >>= inode->i_sb->s_blocksize_bits; 8042 io_bio->csum = (u8 *)(((u32 *)orig_io_bio->csum) + file_offset); 8043 8044 return 0; 8045} 8046 8047static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, 8048 int rw, u64 file_offset, int skip_sum, 8049 int async_submit) 8050{ 8051 struct btrfs_dio_private *dip = bio->bi_private; 8052 int write = rw & REQ_WRITE; 8053 struct btrfs_root *root = BTRFS_I(inode)->root; 8054 int ret; 8055 8056 if (async_submit) 8057 async_submit = !atomic_read(&BTRFS_I(inode)->sync_writers); 8058 8059 bio_get(bio); 8060 8061 if (!write) { 8062 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 8063 BTRFS_WQ_ENDIO_DATA); 8064 if (ret) 8065 goto err; 8066 } 8067 8068 if (skip_sum) 8069 goto map; 8070 8071 if (write && async_submit) { 8072 ret = btrfs_wq_submit_bio(root->fs_info, 8073 inode, rw, bio, 0, 0, 8074 file_offset, 8075 __btrfs_submit_bio_start_direct_io, 8076 __btrfs_submit_bio_done); 8077 goto err; 8078 } else if (write) { 8079 /* 8080 * If we aren't doing async submit, calculate the csum of the 8081 * bio now. 8082 */ 8083 ret = btrfs_csum_one_bio(root, inode, bio, file_offset, 1); 8084 if (ret) 8085 goto err; 8086 } else { 8087 ret = btrfs_lookup_and_bind_dio_csum(root, inode, dip, bio, 8088 file_offset); 8089 if (ret) 8090 goto err; 8091 } 8092map: 8093 ret = btrfs_map_bio(root, rw, bio, 0, async_submit); 8094err: 8095 bio_put(bio); 8096 return ret; 8097} 8098 8099static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, 8100 int skip_sum) 8101{ 8102 struct inode *inode = dip->inode; 8103 struct btrfs_root *root = BTRFS_I(inode)->root; 8104 struct bio *bio; 8105 struct bio *orig_bio = dip->orig_bio; 8106 struct bio_vec *bvec = orig_bio->bi_io_vec; 8107 u64 start_sector = orig_bio->bi_iter.bi_sector; 8108 u64 file_offset = dip->logical_offset; 8109 u64 submit_len = 0; 8110 u64 map_length; 8111 int nr_pages = 0; 8112 int ret; 8113 int async_submit = 0; 8114 8115 map_length = orig_bio->bi_iter.bi_size; 8116 ret = btrfs_map_block(root->fs_info, rw, start_sector << 9, 8117 &map_length, NULL, 0); 8118 if (ret) 8119 return -EIO; 8120 8121 if (map_length >= orig_bio->bi_iter.bi_size) { 8122 bio = orig_bio; 8123 dip->flags |= BTRFS_DIO_ORIG_BIO_SUBMITTED; 8124 goto submit; 8125 } 8126 8127 /* async crcs make it difficult to collect full stripe writes. */ 8128 if (btrfs_get_alloc_profile(root, 1) & BTRFS_BLOCK_GROUP_RAID56_MASK) 8129 async_submit = 0; 8130 else 8131 async_submit = 1; 8132 8133 bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS); 8134 if (!bio) 8135 return -ENOMEM; 8136 8137 bio->bi_private = dip; 8138 bio->bi_end_io = btrfs_end_dio_bio; 8139 btrfs_io_bio(bio)->logical = file_offset; 8140 atomic_inc(&dip->pending_bios); 8141 8142 while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) { 8143 if (map_length < submit_len + bvec->bv_len || 8144 bio_add_page(bio, bvec->bv_page, bvec->bv_len, 8145 bvec->bv_offset) < bvec->bv_len) { 8146 /* 8147 * inc the count before we submit the bio so 8148 * we know the end IO handler won't happen before 8149 * we inc the count. Otherwise, the dip might get freed 8150 * before we're done setting it up 8151 */ 8152 atomic_inc(&dip->pending_bios); 8153 ret = __btrfs_submit_dio_bio(bio, inode, rw, 8154 file_offset, skip_sum, 8155 async_submit); 8156 if (ret) { 8157 bio_put(bio); 8158 atomic_dec(&dip->pending_bios); 8159 goto out_err; 8160 } 8161 8162 start_sector += submit_len >> 9; 8163 file_offset += submit_len; 8164 8165 submit_len = 0; 8166 nr_pages = 0; 8167 8168 bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, 8169 start_sector, GFP_NOFS); 8170 if (!bio) 8171 goto out_err; 8172 bio->bi_private = dip; 8173 bio->bi_end_io = btrfs_end_dio_bio; 8174 btrfs_io_bio(bio)->logical = file_offset; 8175 8176 map_length = orig_bio->bi_iter.bi_size; 8177 ret = btrfs_map_block(root->fs_info, rw, 8178 start_sector << 9, 8179 &map_length, NULL, 0); 8180 if (ret) { 8181 bio_put(bio); 8182 goto out_err; 8183 } 8184 } else { 8185 submit_len += bvec->bv_len; 8186 nr_pages++; 8187 bvec++; 8188 } 8189 } 8190 8191submit: 8192 ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum, 8193 async_submit); 8194 if (!ret) 8195 return 0; 8196 8197 bio_put(bio); 8198out_err: 8199 dip->errors = 1; 8200 /* 8201 * before atomic variable goto zero, we must 8202 * make sure dip->errors is perceived to be set. 8203 */ 8204 smp_mb__before_atomic(); 8205 if (atomic_dec_and_test(&dip->pending_bios)) 8206 bio_io_error(dip->orig_bio); 8207 8208 /* bio_end_io() will handle error, so we needn't return it */ 8209 return 0; 8210} 8211 8212static void btrfs_submit_direct(int rw, struct bio *dio_bio, 8213 struct inode *inode, loff_t file_offset) 8214{ 8215 struct btrfs_root *root = BTRFS_I(inode)->root; 8216 struct btrfs_dio_private *dip; 8217 struct bio *io_bio; 8218 struct btrfs_io_bio *btrfs_bio; 8219 int skip_sum; 8220 int write = rw & REQ_WRITE; 8221 int ret = 0; 8222 8223 skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; 8224 8225 io_bio = btrfs_bio_clone(dio_bio, GFP_NOFS); 8226 if (!io_bio) { 8227 ret = -ENOMEM; 8228 goto free_ordered; 8229 } 8230 8231 dip = kzalloc(sizeof(*dip), GFP_NOFS); 8232 if (!dip) { 8233 ret = -ENOMEM; 8234 goto free_io_bio; 8235 } 8236 8237 dip->private = dio_bio->bi_private; 8238 dip->inode = inode; 8239 dip->logical_offset = file_offset; 8240 dip->bytes = dio_bio->bi_iter.bi_size; 8241 dip->disk_bytenr = (u64)dio_bio->bi_iter.bi_sector << 9; 8242 io_bio->bi_private = dip; 8243 dip->orig_bio = io_bio; 8244 dip->dio_bio = dio_bio; 8245 atomic_set(&dip->pending_bios, 0); 8246 btrfs_bio = btrfs_io_bio(io_bio); 8247 btrfs_bio->logical = file_offset; 8248 8249 if (write) { 8250 io_bio->bi_end_io = btrfs_endio_direct_write; 8251 } else { 8252 io_bio->bi_end_io = btrfs_endio_direct_read; 8253 dip->subio_endio = btrfs_subio_endio_read; 8254 } 8255 8256 ret = btrfs_submit_direct_hook(rw, dip, skip_sum); 8257 if (!ret) 8258 return; 8259 8260 if (btrfs_bio->end_io) 8261 btrfs_bio->end_io(btrfs_bio, ret); 8262free_io_bio: 8263 bio_put(io_bio); 8264 8265free_ordered: 8266 /* 8267 * If this is a write, we need to clean up the reserved space and kill 8268 * the ordered extent. 8269 */ 8270 if (write) { 8271 struct btrfs_ordered_extent *ordered; 8272 ordered = btrfs_lookup_ordered_extent(inode, file_offset); 8273 if (!test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags) && 8274 !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) 8275 btrfs_free_reserved_extent(root, ordered->start, 8276 ordered->disk_len, 1); 8277 btrfs_put_ordered_extent(ordered); 8278 btrfs_put_ordered_extent(ordered); 8279 } 8280 bio_endio(dio_bio, ret); 8281} 8282 8283static ssize_t check_direct_IO(struct btrfs_root *root, struct kiocb *iocb, 8284 const struct iov_iter *iter, loff_t offset) 8285{ 8286 int seg; 8287 int i; 8288 unsigned blocksize_mask = root->sectorsize - 1; 8289 ssize_t retval = -EINVAL; 8290 8291 if (offset & blocksize_mask) 8292 goto out; 8293 8294 if (iov_iter_alignment(iter) & blocksize_mask) 8295 goto out; 8296 8297 /* If this is a write we don't need to check anymore */ 8298 if (iov_iter_rw(iter) == WRITE) 8299 return 0; 8300 /* 8301 * Check to make sure we don't have duplicate iov_base's in this 8302 * iovec, if so return EINVAL, otherwise we'll get csum errors 8303 * when reading back. 8304 */ 8305 for (seg = 0; seg < iter->nr_segs; seg++) { 8306 for (i = seg + 1; i < iter->nr_segs; i++) { 8307 if (iter->iov[seg].iov_base == iter->iov[i].iov_base) 8308 goto out; 8309 } 8310 } 8311 retval = 0; 8312out: 8313 return retval; 8314} 8315 8316static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, 8317 loff_t offset) 8318{ 8319 struct file *file = iocb->ki_filp; 8320 struct inode *inode = file->f_mapping->host; 8321 u64 outstanding_extents = 0; 8322 size_t count = 0; 8323 int flags = 0; 8324 bool wakeup = true; 8325 bool relock = false; 8326 ssize_t ret; 8327 8328 if (check_direct_IO(BTRFS_I(inode)->root, iocb, iter, offset)) 8329 return 0; 8330 8331 inode_dio_begin(inode); 8332 smp_mb__after_atomic(); 8333 8334 /* 8335 * The generic stuff only does filemap_write_and_wait_range, which 8336 * isn't enough if we've written compressed pages to this area, so 8337 * we need to flush the dirty pages again to make absolutely sure 8338 * that any outstanding dirty pages are on disk. 8339 */ 8340 count = iov_iter_count(iter); 8341 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, 8342 &BTRFS_I(inode)->runtime_flags)) 8343 filemap_fdatawrite_range(inode->i_mapping, offset, 8344 offset + count - 1); 8345 8346 if (iov_iter_rw(iter) == WRITE) { 8347 /* 8348 * If the write DIO is beyond the EOF, we need update 8349 * the isize, but it is protected by i_mutex. So we can 8350 * not unlock the i_mutex at this case. 8351 */ 8352 if (offset + count <= inode->i_size) { 8353 mutex_unlock(&inode->i_mutex); 8354 relock = true; 8355 } 8356 ret = btrfs_delalloc_reserve_space(inode, count); 8357 if (ret) 8358 goto out; 8359 outstanding_extents = div64_u64(count + 8360 BTRFS_MAX_EXTENT_SIZE - 1, 8361 BTRFS_MAX_EXTENT_SIZE); 8362 8363 /* 8364 * We need to know how many extents we reserved so that we can 8365 * do the accounting properly if we go over the number we 8366 * originally calculated. Abuse current->journal_info for this. 8367 */ 8368 current->journal_info = &outstanding_extents; 8369 } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK, 8370 &BTRFS_I(inode)->runtime_flags)) { 8371 inode_dio_end(inode); 8372 flags = DIO_LOCKING | DIO_SKIP_HOLES; 8373 wakeup = false; 8374 } 8375 8376 ret = __blockdev_direct_IO(iocb, inode, 8377 BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev, 8378 iter, offset, btrfs_get_blocks_direct, NULL, 8379 btrfs_submit_direct, flags); 8380 if (iov_iter_rw(iter) == WRITE) { 8381 current->journal_info = NULL; 8382 if (ret < 0 && ret != -EIOCBQUEUED) 8383 btrfs_delalloc_release_space(inode, count); 8384 else if (ret >= 0 && (size_t)ret < count) 8385 btrfs_delalloc_release_space(inode, 8386 count - (size_t)ret); 8387 } 8388out: 8389 if (wakeup) 8390 inode_dio_end(inode); 8391 if (relock) 8392 mutex_lock(&inode->i_mutex); 8393 8394 return ret; 8395} 8396 8397#define BTRFS_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC) 8398 8399static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 8400 __u64 start, __u64 len) 8401{ 8402 int ret; 8403 8404 ret = fiemap_check_flags(fieinfo, BTRFS_FIEMAP_FLAGS); 8405 if (ret) 8406 return ret; 8407 8408 return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap); 8409} 8410 8411int btrfs_readpage(struct file *file, struct page *page) 8412{ 8413 struct extent_io_tree *tree; 8414 tree = &BTRFS_I(page->mapping->host)->io_tree; 8415 return extent_read_full_page(tree, page, btrfs_get_extent, 0); 8416} 8417 8418static int btrfs_writepage(struct page *page, struct writeback_control *wbc) 8419{ 8420 struct extent_io_tree *tree; 8421 struct inode *inode = page->mapping->host; 8422 int ret; 8423 8424 if (current->flags & PF_MEMALLOC) { 8425 redirty_page_for_writepage(wbc, page); 8426 unlock_page(page); 8427 return 0; 8428 } 8429 8430 /* 8431 * If we are under memory pressure we will call this directly from the 8432 * VM, we need to make sure we have the inode referenced for the ordered 8433 * extent. If not just return like we didn't do anything. 8434 */ 8435 if (!igrab(inode)) { 8436 redirty_page_for_writepage(wbc, page); 8437 return AOP_WRITEPAGE_ACTIVATE; 8438 } 8439 tree = &BTRFS_I(page->mapping->host)->io_tree; 8440 ret = extent_write_full_page(tree, page, btrfs_get_extent, wbc); 8441 btrfs_add_delayed_iput(inode); 8442 return ret; 8443} 8444 8445static int btrfs_writepages(struct address_space *mapping, 8446 struct writeback_control *wbc) 8447{ 8448 struct extent_io_tree *tree; 8449 8450 tree = &BTRFS_I(mapping->host)->io_tree; 8451 return extent_writepages(tree, mapping, btrfs_get_extent, wbc); 8452} 8453 8454static int 8455btrfs_readpages(struct file *file, struct address_space *mapping, 8456 struct list_head *pages, unsigned nr_pages) 8457{ 8458 struct extent_io_tree *tree; 8459 tree = &BTRFS_I(mapping->host)->io_tree; 8460 return extent_readpages(tree, mapping, pages, nr_pages, 8461 btrfs_get_extent); 8462} 8463static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags) 8464{ 8465 struct extent_io_tree *tree; 8466 struct extent_map_tree *map; 8467 int ret; 8468 8469 tree = &BTRFS_I(page->mapping->host)->io_tree; 8470 map = &BTRFS_I(page->mapping->host)->extent_tree; 8471 ret = try_release_extent_mapping(map, tree, page, gfp_flags); 8472 if (ret == 1) { 8473 ClearPagePrivate(page); 8474 set_page_private(page, 0); 8475 page_cache_release(page); 8476 } 8477 return ret; 8478} 8479 8480static int btrfs_releasepage(struct page *page, gfp_t gfp_flags) 8481{ 8482 if (PageWriteback(page) || PageDirty(page)) 8483 return 0; 8484 return __btrfs_releasepage(page, gfp_flags & GFP_NOFS); 8485} 8486 8487static void btrfs_invalidatepage(struct page *page, unsigned int offset, 8488 unsigned int length) 8489{ 8490 struct inode *inode = page->mapping->host; 8491 struct extent_io_tree *tree; 8492 struct btrfs_ordered_extent *ordered; 8493 struct extent_state *cached_state = NULL; 8494 u64 page_start = page_offset(page); 8495 u64 page_end = page_start + PAGE_CACHE_SIZE - 1; 8496 int inode_evicting = inode->i_state & I_FREEING; 8497 8498 /* 8499 * we have the page locked, so new writeback can't start, 8500 * and the dirty bit won't be cleared while we are here. 8501 * 8502 * Wait for IO on this page so that we can safely clear 8503 * the PagePrivate2 bit and do ordered accounting 8504 */ 8505 wait_on_page_writeback(page); 8506 8507 tree = &BTRFS_I(inode)->io_tree; 8508 if (offset) { 8509 btrfs_releasepage(page, GFP_NOFS); 8510 return; 8511 } 8512 8513 if (!inode_evicting) 8514 lock_extent_bits(tree, page_start, page_end, 0, &cached_state); 8515 ordered = btrfs_lookup_ordered_extent(inode, page_start); 8516 if (ordered) { 8517 /* 8518 * IO on this page will never be started, so we need 8519 * to account for any ordered extents now 8520 */ 8521 if (!inode_evicting) 8522 clear_extent_bit(tree, page_start, page_end, 8523 EXTENT_DIRTY | EXTENT_DELALLOC | 8524 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING | 8525 EXTENT_DEFRAG, 1, 0, &cached_state, 8526 GFP_NOFS); 8527 /* 8528 * whoever cleared the private bit is responsible 8529 * for the finish_ordered_io 8530 */ 8531 if (TestClearPagePrivate2(page)) { 8532 struct btrfs_ordered_inode_tree *tree; 8533 u64 new_len; 8534 8535 tree = &BTRFS_I(inode)->ordered_tree; 8536 8537 spin_lock_irq(&tree->lock); 8538 set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags); 8539 new_len = page_start - ordered->file_offset; 8540 if (new_len < ordered->truncated_len) 8541 ordered->truncated_len = new_len; 8542 spin_unlock_irq(&tree->lock); 8543 8544 if (btrfs_dec_test_ordered_pending(inode, &ordered, 8545 page_start, 8546 PAGE_CACHE_SIZE, 1)) 8547 btrfs_finish_ordered_io(ordered); 8548 } 8549 btrfs_put_ordered_extent(ordered); 8550 if (!inode_evicting) { 8551 cached_state = NULL; 8552 lock_extent_bits(tree, page_start, page_end, 0, 8553 &cached_state); 8554 } 8555 } 8556 8557 if (!inode_evicting) { 8558 clear_extent_bit(tree, page_start, page_end, 8559 EXTENT_LOCKED | EXTENT_DIRTY | 8560 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | 8561 EXTENT_DEFRAG, 1, 1, 8562 &cached_state, GFP_NOFS); 8563 8564 __btrfs_releasepage(page, GFP_NOFS); 8565 } 8566 8567 ClearPageChecked(page); 8568 if (PagePrivate(page)) { 8569 ClearPagePrivate(page); 8570 set_page_private(page, 0); 8571 page_cache_release(page); 8572 } 8573} 8574 8575/* 8576 * btrfs_page_mkwrite() is not allowed to change the file size as it gets 8577 * called from a page fault handler when a page is first dirtied. Hence we must 8578 * be careful to check for EOF conditions here. We set the page up correctly 8579 * for a written page which means we get ENOSPC checking when writing into 8580 * holes and correct delalloc and unwritten extent mapping on filesystems that 8581 * support these features. 8582 * 8583 * We are not allowed to take the i_mutex here so we have to play games to 8584 * protect against truncate races as the page could now be beyond EOF. Because 8585 * vmtruncate() writes the inode size before removing pages, once we have the 8586 * page lock we can determine safely if the page is beyond EOF. If it is not 8587 * beyond EOF, then the page is guaranteed safe against truncation until we 8588 * unlock the page. 8589 */ 8590int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) 8591{ 8592 struct page *page = vmf->page; 8593 struct inode *inode = file_inode(vma->vm_file); 8594 struct btrfs_root *root = BTRFS_I(inode)->root; 8595 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 8596 struct btrfs_ordered_extent *ordered; 8597 struct extent_state *cached_state = NULL; 8598 char *kaddr; 8599 unsigned long zero_start; 8600 loff_t size; 8601 int ret; 8602 int reserved = 0; 8603 u64 page_start; 8604 u64 page_end; 8605 8606 sb_start_pagefault(inode->i_sb); 8607 ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE); 8608 if (!ret) { 8609 ret = file_update_time(vma->vm_file); 8610 reserved = 1; 8611 } 8612 if (ret) { 8613 if (ret == -ENOMEM) 8614 ret = VM_FAULT_OOM; 8615 else /* -ENOSPC, -EIO, etc */ 8616 ret = VM_FAULT_SIGBUS; 8617 if (reserved) 8618 goto out; 8619 goto out_noreserve; 8620 } 8621 8622 ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */ 8623again: 8624 lock_page(page); 8625 size = i_size_read(inode); 8626 page_start = page_offset(page); 8627 page_end = page_start + PAGE_CACHE_SIZE - 1; 8628 8629 if ((page->mapping != inode->i_mapping) || 8630 (page_start >= size)) { 8631 /* page got truncated out from underneath us */ 8632 goto out_unlock; 8633 } 8634 wait_on_page_writeback(page); 8635 8636 lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state); 8637 set_page_extent_mapped(page); 8638 8639 /* 8640 * we can't set the delalloc bits if there are pending ordered 8641 * extents. Drop our locks and wait for them to finish 8642 */ 8643 ordered = btrfs_lookup_ordered_extent(inode, page_start); 8644 if (ordered) { 8645 unlock_extent_cached(io_tree, page_start, page_end, 8646 &cached_state, GFP_NOFS); 8647 unlock_page(page); 8648 btrfs_start_ordered_extent(inode, ordered, 1); 8649 btrfs_put_ordered_extent(ordered); 8650 goto again; 8651 } 8652 8653 /* 8654 * XXX - page_mkwrite gets called every time the page is dirtied, even 8655 * if it was already dirty, so for space accounting reasons we need to 8656 * clear any delalloc bits for the range we are fixing to save. There 8657 * is probably a better way to do this, but for now keep consistent with 8658 * prepare_pages in the normal write path. 8659 */ 8660 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end, 8661 EXTENT_DIRTY | EXTENT_DELALLOC | 8662 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 8663 0, 0, &cached_state, GFP_NOFS); 8664 8665 ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 8666 &cached_state); 8667 if (ret) { 8668 unlock_extent_cached(io_tree, page_start, page_end, 8669 &cached_state, GFP_NOFS); 8670 ret = VM_FAULT_SIGBUS; 8671 goto out_unlock; 8672 } 8673 ret = 0; 8674 8675 /* page is wholly or partially inside EOF */ 8676 if (page_start + PAGE_CACHE_SIZE > size) 8677 zero_start = size & ~PAGE_CACHE_MASK; 8678 else 8679 zero_start = PAGE_CACHE_SIZE; 8680 8681 if (zero_start != PAGE_CACHE_SIZE) { 8682 kaddr = kmap(page); 8683 memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start); 8684 flush_dcache_page(page); 8685 kunmap(page); 8686 } 8687 ClearPageChecked(page); 8688 set_page_dirty(page); 8689 SetPageUptodate(page); 8690 8691 BTRFS_I(inode)->last_trans = root->fs_info->generation; 8692 BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid; 8693 BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit; 8694 8695 unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS); 8696 8697out_unlock: 8698 if (!ret) { 8699 sb_end_pagefault(inode->i_sb); 8700 return VM_FAULT_LOCKED; 8701 } 8702 unlock_page(page); 8703out: 8704 btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE); 8705out_noreserve: 8706 sb_end_pagefault(inode->i_sb); 8707 return ret; 8708} 8709 8710static int btrfs_truncate(struct inode *inode) 8711{ 8712 struct btrfs_root *root = BTRFS_I(inode)->root; 8713 struct btrfs_block_rsv *rsv; 8714 int ret = 0; 8715 int err = 0; 8716 struct btrfs_trans_handle *trans; 8717 u64 mask = root->sectorsize - 1; 8718 u64 min_size = btrfs_calc_trunc_metadata_size(root, 1); 8719 8720 ret = btrfs_wait_ordered_range(inode, inode->i_size & (~mask), 8721 (u64)-1); 8722 if (ret) 8723 return ret; 8724 8725 /* 8726 * Yes ladies and gentelment, this is indeed ugly. The fact is we have 8727 * 3 things going on here 8728 * 8729 * 1) We need to reserve space for our orphan item and the space to 8730 * delete our orphan item. Lord knows we don't want to have a dangling 8731 * orphan item because we didn't reserve space to remove it. 8732 * 8733 * 2) We need to reserve space to update our inode. 8734 * 8735 * 3) We need to have something to cache all the space that is going to 8736 * be free'd up by the truncate operation, but also have some slack 8737 * space reserved in case it uses space during the truncate (thank you 8738 * very much snapshotting). 8739 * 8740 * And we need these to all be seperate. The fact is we can use alot of 8741 * space doing the truncate, and we have no earthly idea how much space 8742 * we will use, so we need the truncate reservation to be seperate so it 8743 * doesn't end up using space reserved for updating the inode or 8744 * removing the orphan item. We also need to be able to stop the 8745 * transaction and start a new one, which means we need to be able to 8746 * update the inode several times, and we have no idea of knowing how 8747 * many times that will be, so we can't just reserve 1 item for the 8748 * entirety of the opration, so that has to be done seperately as well. 8749 * Then there is the orphan item, which does indeed need to be held on 8750 * to for the whole operation, and we need nobody to touch this reserved 8751 * space except the orphan code. 8752 * 8753 * So that leaves us with 8754 * 8755 * 1) root->orphan_block_rsv - for the orphan deletion. 8756 * 2) rsv - for the truncate reservation, which we will steal from the 8757 * transaction reservation. 8758 * 3) fs_info->trans_block_rsv - this will have 1 items worth left for 8759 * updating the inode. 8760 */ 8761 rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP); 8762 if (!rsv) 8763 return -ENOMEM; 8764 rsv->size = min_size; 8765 rsv->failfast = 1; 8766 8767 /* 8768 * 1 for the truncate slack space 8769 * 1 for updating the inode. 8770 */ 8771 trans = btrfs_start_transaction(root, 2); 8772 if (IS_ERR(trans)) { 8773 err = PTR_ERR(trans); 8774 goto out; 8775 } 8776 8777 /* Migrate the slack space for the truncate to our reserve */ 8778 ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv, 8779 min_size); 8780 BUG_ON(ret); 8781 8782 /* 8783 * So if we truncate and then write and fsync we normally would just 8784 * write the extents that changed, which is a problem if we need to 8785 * first truncate that entire inode. So set this flag so we write out 8786 * all of the extents in the inode to the sync log so we're completely 8787 * safe. 8788 */ 8789 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags); 8790 trans->block_rsv = rsv; 8791 8792 while (1) { 8793 ret = btrfs_truncate_inode_items(trans, root, inode, 8794 inode->i_size, 8795 BTRFS_EXTENT_DATA_KEY); 8796 if (ret != -ENOSPC && ret != -EAGAIN) { 8797 err = ret; 8798 break; 8799 } 8800 8801 trans->block_rsv = &root->fs_info->trans_block_rsv; 8802 ret = btrfs_update_inode(trans, root, inode); 8803 if (ret) { 8804 err = ret; 8805 break; 8806 } 8807 8808 btrfs_end_transaction(trans, root); 8809 btrfs_btree_balance_dirty(root); 8810 8811 trans = btrfs_start_transaction(root, 2); 8812 if (IS_ERR(trans)) { 8813 ret = err = PTR_ERR(trans); 8814 trans = NULL; 8815 break; 8816 } 8817 8818 ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, 8819 rsv, min_size); 8820 BUG_ON(ret); /* shouldn't happen */ 8821 trans->block_rsv = rsv; 8822 } 8823 8824 if (ret == 0 && inode->i_nlink > 0) { 8825 trans->block_rsv = root->orphan_block_rsv; 8826 ret = btrfs_orphan_del(trans, inode); 8827 if (ret) 8828 err = ret; 8829 } 8830 8831 if (trans) { 8832 trans->block_rsv = &root->fs_info->trans_block_rsv; 8833 ret = btrfs_update_inode(trans, root, inode); 8834 if (ret && !err) 8835 err = ret; 8836 8837 ret = btrfs_end_transaction(trans, root); 8838 btrfs_btree_balance_dirty(root); 8839 } 8840 8841out: 8842 btrfs_free_block_rsv(root, rsv); 8843 8844 if (ret && !err) 8845 err = ret; 8846 8847 return err; 8848} 8849 8850/* 8851 * create a new subvolume directory/inode (helper for the ioctl). 8852 */ 8853int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, 8854 struct btrfs_root *new_root, 8855 struct btrfs_root *parent_root, 8856 u64 new_dirid) 8857{ 8858 struct inode *inode; 8859 int err; 8860 u64 index = 0; 8861 8862 inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, 8863 new_dirid, new_dirid, 8864 S_IFDIR | (~current_umask() & S_IRWXUGO), 8865 &index); 8866 if (IS_ERR(inode)) 8867 return PTR_ERR(inode); 8868 inode->i_op = &btrfs_dir_inode_operations; 8869 inode->i_fop = &btrfs_dir_file_operations; 8870 8871 set_nlink(inode, 1); 8872 btrfs_i_size_write(inode, 0); 8873 unlock_new_inode(inode); 8874 8875 err = btrfs_subvol_inherit_props(trans, new_root, parent_root); 8876 if (err) 8877 btrfs_err(new_root->fs_info, 8878 "error inheriting subvolume %llu properties: %d", 8879 new_root->root_key.objectid, err); 8880 8881 err = btrfs_update_inode(trans, new_root, inode); 8882 8883 iput(inode); 8884 return err; 8885} 8886 8887struct inode *btrfs_alloc_inode(struct super_block *sb) 8888{ 8889 struct btrfs_inode *ei; 8890 struct inode *inode; 8891 8892 ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS); 8893 if (!ei) 8894 return NULL; 8895 8896 ei->root = NULL; 8897 ei->generation = 0; 8898 ei->last_trans = 0; 8899 ei->last_sub_trans = 0; 8900 ei->logged_trans = 0; 8901 ei->delalloc_bytes = 0; 8902 ei->defrag_bytes = 0; 8903 ei->disk_i_size = 0; 8904 ei->flags = 0; 8905 ei->csum_bytes = 0; 8906 ei->index_cnt = (u64)-1; 8907 ei->dir_index = 0; 8908 ei->last_unlink_trans = 0; 8909 ei->last_log_commit = 0; 8910 ei->delayed_iput_count = 0; 8911 8912 spin_lock_init(&ei->lock); 8913 ei->outstanding_extents = 0; 8914 ei->reserved_extents = 0; 8915 8916 ei->runtime_flags = 0; 8917 ei->force_compress = BTRFS_COMPRESS_NONE; 8918 8919 ei->delayed_node = NULL; 8920 8921 ei->i_otime.tv_sec = 0; 8922 ei->i_otime.tv_nsec = 0; 8923 8924 inode = &ei->vfs_inode; 8925 extent_map_tree_init(&ei->extent_tree); 8926 extent_io_tree_init(&ei->io_tree, &inode->i_data); 8927 extent_io_tree_init(&ei->io_failure_tree, &inode->i_data); 8928 ei->io_tree.track_uptodate = 1; 8929 ei->io_failure_tree.track_uptodate = 1; 8930 atomic_set(&ei->sync_writers, 0); 8931 mutex_init(&ei->log_mutex); 8932 mutex_init(&ei->delalloc_mutex); 8933 btrfs_ordered_inode_tree_init(&ei->ordered_tree); 8934 INIT_LIST_HEAD(&ei->delalloc_inodes); 8935 INIT_LIST_HEAD(&ei->delayed_iput); 8936 RB_CLEAR_NODE(&ei->rb_node); 8937 8938 return inode; 8939} 8940 8941#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 8942void btrfs_test_destroy_inode(struct inode *inode) 8943{ 8944 btrfs_drop_extent_cache(inode, 0, (u64)-1, 0); 8945 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); 8946} 8947#endif 8948 8949static void btrfs_i_callback(struct rcu_head *head) 8950{ 8951 struct inode *inode = container_of(head, struct inode, i_rcu); 8952 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); 8953} 8954 8955void btrfs_destroy_inode(struct inode *inode) 8956{ 8957 struct btrfs_ordered_extent *ordered; 8958 struct btrfs_root *root = BTRFS_I(inode)->root; 8959 8960 WARN_ON(!hlist_empty(&inode->i_dentry)); 8961 WARN_ON(inode->i_data.nrpages); 8962 WARN_ON(BTRFS_I(inode)->outstanding_extents); 8963 WARN_ON(BTRFS_I(inode)->reserved_extents); 8964 WARN_ON(BTRFS_I(inode)->delalloc_bytes); 8965 WARN_ON(BTRFS_I(inode)->csum_bytes); 8966 WARN_ON(BTRFS_I(inode)->defrag_bytes); 8967 8968 /* 8969 * This can happen where we create an inode, but somebody else also 8970 * created the same inode and we need to destroy the one we already 8971 * created. 8972 */ 8973 if (!root) 8974 goto free; 8975 8976 if (test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, 8977 &BTRFS_I(inode)->runtime_flags)) { 8978 btrfs_info(root->fs_info, "inode %llu still on the orphan list", 8979 btrfs_ino(inode)); 8980 atomic_dec(&root->orphan_inodes); 8981 } 8982 8983 while (1) { 8984 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1); 8985 if (!ordered) 8986 break; 8987 else { 8988 btrfs_err(root->fs_info, "found ordered extent %llu %llu on inode cleanup", 8989 ordered->file_offset, ordered->len); 8990 btrfs_remove_ordered_extent(inode, ordered); 8991 btrfs_put_ordered_extent(ordered); 8992 btrfs_put_ordered_extent(ordered); 8993 } 8994 } 8995 inode_tree_del(inode); 8996 btrfs_drop_extent_cache(inode, 0, (u64)-1, 0); 8997free: 8998 call_rcu(&inode->i_rcu, btrfs_i_callback); 8999} 9000 9001int btrfs_drop_inode(struct inode *inode) 9002{ 9003 struct btrfs_root *root = BTRFS_I(inode)->root; 9004 9005 if (root == NULL) 9006 return 1; 9007 9008 /* the snap/subvol tree is on deleting */ 9009 if (btrfs_root_refs(&root->root_item) == 0) 9010 return 1; 9011 else 9012 return generic_drop_inode(inode); 9013} 9014 9015static void init_once(void *foo) 9016{ 9017 struct btrfs_inode *ei = (struct btrfs_inode *) foo; 9018 9019 inode_init_once(&ei->vfs_inode); 9020} 9021 9022void btrfs_destroy_cachep(void) 9023{ 9024 /* 9025 * Make sure all delayed rcu free inodes are flushed before we 9026 * destroy cache. 9027 */ 9028 rcu_barrier(); 9029 if (btrfs_inode_cachep) 9030 kmem_cache_destroy(btrfs_inode_cachep); 9031 if (btrfs_trans_handle_cachep) 9032 kmem_cache_destroy(btrfs_trans_handle_cachep); 9033 if (btrfs_transaction_cachep) 9034 kmem_cache_destroy(btrfs_transaction_cachep); 9035 if (btrfs_path_cachep) 9036 kmem_cache_destroy(btrfs_path_cachep); 9037 if (btrfs_free_space_cachep) 9038 kmem_cache_destroy(btrfs_free_space_cachep); 9039 if (btrfs_delalloc_work_cachep) 9040 kmem_cache_destroy(btrfs_delalloc_work_cachep); 9041} 9042 9043int btrfs_init_cachep(void) 9044{ 9045 btrfs_inode_cachep = kmem_cache_create("btrfs_inode", 9046 sizeof(struct btrfs_inode), 0, 9047 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, init_once); 9048 if (!btrfs_inode_cachep) 9049 goto fail; 9050 9051 btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle", 9052 sizeof(struct btrfs_trans_handle), 0, 9053 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); 9054 if (!btrfs_trans_handle_cachep) 9055 goto fail; 9056 9057 btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction", 9058 sizeof(struct btrfs_transaction), 0, 9059 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); 9060 if (!btrfs_transaction_cachep) 9061 goto fail; 9062 9063 btrfs_path_cachep = kmem_cache_create("btrfs_path", 9064 sizeof(struct btrfs_path), 0, 9065 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); 9066 if (!btrfs_path_cachep) 9067 goto fail; 9068 9069 btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space", 9070 sizeof(struct btrfs_free_space), 0, 9071 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); 9072 if (!btrfs_free_space_cachep) 9073 goto fail; 9074 9075 btrfs_delalloc_work_cachep = kmem_cache_create("btrfs_delalloc_work", 9076 sizeof(struct btrfs_delalloc_work), 0, 9077 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, 9078 NULL); 9079 if (!btrfs_delalloc_work_cachep) 9080 goto fail; 9081 9082 return 0; 9083fail: 9084 btrfs_destroy_cachep(); 9085 return -ENOMEM; 9086} 9087 9088static int btrfs_getattr(struct vfsmount *mnt, 9089 struct dentry *dentry, struct kstat *stat) 9090{ 9091 u64 delalloc_bytes; 9092 struct inode *inode = d_inode(dentry); 9093 u32 blocksize = inode->i_sb->s_blocksize; 9094 9095 generic_fillattr(inode, stat); 9096 stat->dev = BTRFS_I(inode)->root->anon_dev; 9097 stat->blksize = PAGE_CACHE_SIZE; 9098 9099 spin_lock(&BTRFS_I(inode)->lock); 9100 delalloc_bytes = BTRFS_I(inode)->delalloc_bytes; 9101 spin_unlock(&BTRFS_I(inode)->lock); 9102 stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) + 9103 ALIGN(delalloc_bytes, blocksize)) >> 9; 9104 return 0; 9105} 9106 9107static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, 9108 struct inode *new_dir, struct dentry *new_dentry) 9109{ 9110 struct btrfs_trans_handle *trans; 9111 struct btrfs_root *root = BTRFS_I(old_dir)->root; 9112 struct btrfs_root *dest = BTRFS_I(new_dir)->root; 9113 struct inode *new_inode = d_inode(new_dentry); 9114 struct inode *old_inode = d_inode(old_dentry); 9115 struct timespec ctime = CURRENT_TIME; 9116 u64 index = 0; 9117 u64 root_objectid; 9118 int ret; 9119 u64 old_ino = btrfs_ino(old_inode); 9120 9121 if (btrfs_ino(new_dir) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) 9122 return -EPERM; 9123 9124 /* we only allow rename subvolume link between subvolumes */ 9125 if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest) 9126 return -EXDEV; 9127 9128 if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID || 9129 (new_inode && btrfs_ino(new_inode) == BTRFS_FIRST_FREE_OBJECTID)) 9130 return -ENOTEMPTY; 9131 9132 if (S_ISDIR(old_inode->i_mode) && new_inode && 9133 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) 9134 return -ENOTEMPTY; 9135 9136 9137 /* check for collisions, even if the name isn't there */ 9138 ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino, 9139 new_dentry->d_name.name, 9140 new_dentry->d_name.len); 9141 9142 if (ret) { 9143 if (ret == -EEXIST) { 9144 /* we shouldn't get 9145 * eexist without a new_inode */ 9146 if (WARN_ON(!new_inode)) { 9147 return ret; 9148 } 9149 } else { 9150 /* maybe -EOVERFLOW */ 9151 return ret; 9152 } 9153 } 9154 ret = 0; 9155 9156 /* 9157 * we're using rename to replace one file with another. Start IO on it 9158 * now so we don't add too much work to the end of the transaction 9159 */ 9160 if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size) 9161 filemap_flush(old_inode->i_mapping); 9162 9163 /* close the racy window with snapshot create/destroy ioctl */ 9164 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) 9165 down_read(&root->fs_info->subvol_sem); 9166 /* 9167 * We want to reserve the absolute worst case amount of items. So if 9168 * both inodes are subvols and we need to unlink them then that would 9169 * require 4 item modifications, but if they are both normal inodes it 9170 * would require 5 item modifications, so we'll assume their normal 9171 * inodes. So 5 * 2 is 10, plus 1 for the new link, so 11 total items 9172 * should cover the worst case number of items we'll modify. 9173 */ 9174 trans = btrfs_start_transaction(root, 11); 9175 if (IS_ERR(trans)) { 9176 ret = PTR_ERR(trans); 9177 goto out_notrans; 9178 } 9179 9180 if (dest != root) 9181 btrfs_record_root_in_trans(trans, dest); 9182 9183 ret = btrfs_set_inode_index(new_dir, &index); 9184 if (ret) 9185 goto out_fail; 9186 9187 BTRFS_I(old_inode)->dir_index = 0ULL; 9188 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) { 9189 /* force full log commit if subvolume involved. */ 9190 btrfs_set_log_full_commit(root->fs_info, trans); 9191 } else { 9192 ret = btrfs_insert_inode_ref(trans, dest, 9193 new_dentry->d_name.name, 9194 new_dentry->d_name.len, 9195 old_ino, 9196 btrfs_ino(new_dir), index); 9197 if (ret) 9198 goto out_fail; 9199 /* 9200 * this is an ugly little race, but the rename is required 9201 * to make sure that if we crash, the inode is either at the 9202 * old name or the new one. pinning the log transaction lets 9203 * us make sure we don't allow a log commit to come in after 9204 * we unlink the name but before we add the new name back in. 9205 */ 9206 btrfs_pin_log_trans(root); 9207 } 9208 9209 inode_inc_iversion(old_dir); 9210 inode_inc_iversion(new_dir); 9211 inode_inc_iversion(old_inode); 9212 old_dir->i_ctime = old_dir->i_mtime = ctime; 9213 new_dir->i_ctime = new_dir->i_mtime = ctime; 9214 old_inode->i_ctime = ctime; 9215 9216 if (old_dentry->d_parent != new_dentry->d_parent) 9217 btrfs_record_unlink_dir(trans, old_dir, old_inode, 1); 9218 9219 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) { 9220 root_objectid = BTRFS_I(old_inode)->root->root_key.objectid; 9221 ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid, 9222 old_dentry->d_name.name, 9223 old_dentry->d_name.len); 9224 } else { 9225 ret = __btrfs_unlink_inode(trans, root, old_dir, 9226 d_inode(old_dentry), 9227 old_dentry->d_name.name, 9228 old_dentry->d_name.len); 9229 if (!ret) 9230 ret = btrfs_update_inode(trans, root, old_inode); 9231 } 9232 if (ret) { 9233 btrfs_abort_transaction(trans, root, ret); 9234 goto out_fail; 9235 } 9236 9237 if (new_inode) { 9238 inode_inc_iversion(new_inode); 9239 new_inode->i_ctime = CURRENT_TIME; 9240 if (unlikely(btrfs_ino(new_inode) == 9241 BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { 9242 root_objectid = BTRFS_I(new_inode)->location.objectid; 9243 ret = btrfs_unlink_subvol(trans, dest, new_dir, 9244 root_objectid, 9245 new_dentry->d_name.name, 9246 new_dentry->d_name.len); 9247 BUG_ON(new_inode->i_nlink == 0); 9248 } else { 9249 ret = btrfs_unlink_inode(trans, dest, new_dir, 9250 d_inode(new_dentry), 9251 new_dentry->d_name.name, 9252 new_dentry->d_name.len); 9253 } 9254 if (!ret && new_inode->i_nlink == 0) 9255 ret = btrfs_orphan_add(trans, d_inode(new_dentry)); 9256 if (ret) { 9257 btrfs_abort_transaction(trans, root, ret); 9258 goto out_fail; 9259 } 9260 } 9261 9262 ret = btrfs_add_link(trans, new_dir, old_inode, 9263 new_dentry->d_name.name, 9264 new_dentry->d_name.len, 0, index); 9265 if (ret) { 9266 btrfs_abort_transaction(trans, root, ret); 9267 goto out_fail; 9268 } 9269 9270 if (old_inode->i_nlink == 1) 9271 BTRFS_I(old_inode)->dir_index = index; 9272 9273 if (old_ino != BTRFS_FIRST_FREE_OBJECTID) { 9274 struct dentry *parent = new_dentry->d_parent; 9275 btrfs_log_new_name(trans, old_inode, old_dir, parent); 9276 btrfs_end_log_trans(root); 9277 } 9278out_fail: 9279 btrfs_end_transaction(trans, root); 9280out_notrans: 9281 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) 9282 up_read(&root->fs_info->subvol_sem); 9283 9284 return ret; 9285} 9286 9287static int btrfs_rename2(struct inode *old_dir, struct dentry *old_dentry, 9288 struct inode *new_dir, struct dentry *new_dentry, 9289 unsigned int flags) 9290{ 9291 if (flags & ~RENAME_NOREPLACE) 9292 return -EINVAL; 9293 9294 return btrfs_rename(old_dir, old_dentry, new_dir, new_dentry); 9295} 9296 9297static void btrfs_run_delalloc_work(struct btrfs_work *work) 9298{ 9299 struct btrfs_delalloc_work *delalloc_work; 9300 struct inode *inode; 9301 9302 delalloc_work = container_of(work, struct btrfs_delalloc_work, 9303 work); 9304 inode = delalloc_work->inode; 9305 if (delalloc_work->wait) { 9306 btrfs_wait_ordered_range(inode, 0, (u64)-1); 9307 } else { 9308 filemap_flush(inode->i_mapping); 9309 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, 9310 &BTRFS_I(inode)->runtime_flags)) 9311 filemap_flush(inode->i_mapping); 9312 } 9313 9314 if (delalloc_work->delay_iput) 9315 btrfs_add_delayed_iput(inode); 9316 else 9317 iput(inode); 9318 complete(&delalloc_work->completion); 9319} 9320 9321struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode, 9322 int wait, int delay_iput) 9323{ 9324 struct btrfs_delalloc_work *work; 9325 9326 work = kmem_cache_zalloc(btrfs_delalloc_work_cachep, GFP_NOFS); 9327 if (!work) 9328 return NULL; 9329 9330 init_completion(&work->completion); 9331 INIT_LIST_HEAD(&work->list); 9332 work->inode = inode; 9333 work->wait = wait; 9334 work->delay_iput = delay_iput; 9335 WARN_ON_ONCE(!inode); 9336 btrfs_init_work(&work->work, btrfs_flush_delalloc_helper, 9337 btrfs_run_delalloc_work, NULL, NULL); 9338 9339 return work; 9340} 9341 9342void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work) 9343{ 9344 wait_for_completion(&work->completion); 9345 kmem_cache_free(btrfs_delalloc_work_cachep, work); 9346} 9347 9348/* 9349 * some fairly slow code that needs optimization. This walks the list 9350 * of all the inodes with pending delalloc and forces them to disk. 9351 */ 9352static int __start_delalloc_inodes(struct btrfs_root *root, int delay_iput, 9353 int nr) 9354{ 9355 struct btrfs_inode *binode; 9356 struct inode *inode; 9357 struct btrfs_delalloc_work *work, *next; 9358 struct list_head works; 9359 struct list_head splice; 9360 int ret = 0; 9361 9362 INIT_LIST_HEAD(&works); 9363 INIT_LIST_HEAD(&splice); 9364 9365 mutex_lock(&root->delalloc_mutex); 9366 spin_lock(&root->delalloc_lock); 9367 list_splice_init(&root->delalloc_inodes, &splice); 9368 while (!list_empty(&splice)) { 9369 binode = list_entry(splice.next, struct btrfs_inode, 9370 delalloc_inodes); 9371 9372 list_move_tail(&binode->delalloc_inodes, 9373 &root->delalloc_inodes); 9374 inode = igrab(&binode->vfs_inode); 9375 if (!inode) { 9376 cond_resched_lock(&root->delalloc_lock); 9377 continue; 9378 } 9379 spin_unlock(&root->delalloc_lock); 9380 9381 work = btrfs_alloc_delalloc_work(inode, 0, delay_iput); 9382 if (!work) { 9383 if (delay_iput) 9384 btrfs_add_delayed_iput(inode); 9385 else 9386 iput(inode); 9387 ret = -ENOMEM; 9388 goto out; 9389 } 9390 list_add_tail(&work->list, &works); 9391 btrfs_queue_work(root->fs_info->flush_workers, 9392 &work->work); 9393 ret++; 9394 if (nr != -1 && ret >= nr) 9395 goto out; 9396 cond_resched(); 9397 spin_lock(&root->delalloc_lock); 9398 } 9399 spin_unlock(&root->delalloc_lock); 9400 9401out: 9402 list_for_each_entry_safe(work, next, &works, list) { 9403 list_del_init(&work->list); 9404 btrfs_wait_and_free_delalloc_work(work); 9405 } 9406 9407 if (!list_empty_careful(&splice)) { 9408 spin_lock(&root->delalloc_lock); 9409 list_splice_tail(&splice, &root->delalloc_inodes); 9410 spin_unlock(&root->delalloc_lock); 9411 } 9412 mutex_unlock(&root->delalloc_mutex); 9413 return ret; 9414} 9415 9416int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput) 9417{ 9418 int ret; 9419 9420 if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) 9421 return -EROFS; 9422 9423 ret = __start_delalloc_inodes(root, delay_iput, -1); 9424 if (ret > 0) 9425 ret = 0; 9426 /* 9427 * the filemap_flush will queue IO into the worker threads, but 9428 * we have to make sure the IO is actually started and that 9429 * ordered extents get created before we return 9430 */ 9431 atomic_inc(&root->fs_info->async_submit_draining); 9432 while (atomic_read(&root->fs_info->nr_async_submits) || 9433 atomic_read(&root->fs_info->async_delalloc_pages)) { 9434 wait_event(root->fs_info->async_submit_wait, 9435 (atomic_read(&root->fs_info->nr_async_submits) == 0 && 9436 atomic_read(&root->fs_info->async_delalloc_pages) == 0)); 9437 } 9438 atomic_dec(&root->fs_info->async_submit_draining); 9439 return ret; 9440} 9441 9442int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int delay_iput, 9443 int nr) 9444{ 9445 struct btrfs_root *root; 9446 struct list_head splice; 9447 int ret; 9448 9449 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) 9450 return -EROFS; 9451 9452 INIT_LIST_HEAD(&splice); 9453 9454 mutex_lock(&fs_info->delalloc_root_mutex); 9455 spin_lock(&fs_info->delalloc_root_lock); 9456 list_splice_init(&fs_info->delalloc_roots, &splice); 9457 while (!list_empty(&splice) && nr) { 9458 root = list_first_entry(&splice, struct btrfs_root, 9459 delalloc_root); 9460 root = btrfs_grab_fs_root(root); 9461 BUG_ON(!root); 9462 list_move_tail(&root->delalloc_root, 9463 &fs_info->delalloc_roots); 9464 spin_unlock(&fs_info->delalloc_root_lock); 9465 9466 ret = __start_delalloc_inodes(root, delay_iput, nr); 9467 btrfs_put_fs_root(root); 9468 if (ret < 0) 9469 goto out; 9470 9471 if (nr != -1) { 9472 nr -= ret; 9473 WARN_ON(nr < 0); 9474 } 9475 spin_lock(&fs_info->delalloc_root_lock); 9476 } 9477 spin_unlock(&fs_info->delalloc_root_lock); 9478 9479 ret = 0; 9480 atomic_inc(&fs_info->async_submit_draining); 9481 while (atomic_read(&fs_info->nr_async_submits) || 9482 atomic_read(&fs_info->async_delalloc_pages)) { 9483 wait_event(fs_info->async_submit_wait, 9484 (atomic_read(&fs_info->nr_async_submits) == 0 && 9485 atomic_read(&fs_info->async_delalloc_pages) == 0)); 9486 } 9487 atomic_dec(&fs_info->async_submit_draining); 9488out: 9489 if (!list_empty_careful(&splice)) { 9490 spin_lock(&fs_info->delalloc_root_lock); 9491 list_splice_tail(&splice, &fs_info->delalloc_roots); 9492 spin_unlock(&fs_info->delalloc_root_lock); 9493 } 9494 mutex_unlock(&fs_info->delalloc_root_mutex); 9495 return ret; 9496} 9497 9498static int btrfs_symlink(struct inode *dir, struct dentry *dentry, 9499 const char *symname) 9500{ 9501 struct btrfs_trans_handle *trans; 9502 struct btrfs_root *root = BTRFS_I(dir)->root; 9503 struct btrfs_path *path; 9504 struct btrfs_key key; 9505 struct inode *inode = NULL; 9506 int err; 9507 int drop_inode = 0; 9508 u64 objectid; 9509 u64 index = 0; 9510 int name_len; 9511 int datasize; 9512 unsigned long ptr; 9513 struct btrfs_file_extent_item *ei; 9514 struct extent_buffer *leaf; 9515 9516 name_len = strlen(symname); 9517 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root)) 9518 return -ENAMETOOLONG; 9519 9520 /* 9521 * 2 items for inode item and ref 9522 * 2 items for dir items 9523 * 1 item for updating parent inode item 9524 * 1 item for the inline extent item 9525 * 1 item for xattr if selinux is on 9526 */ 9527 trans = btrfs_start_transaction(root, 7); 9528 if (IS_ERR(trans)) 9529 return PTR_ERR(trans); 9530 9531 err = btrfs_find_free_ino(root, &objectid); 9532 if (err) 9533 goto out_unlock; 9534 9535 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 9536 dentry->d_name.len, btrfs_ino(dir), objectid, 9537 S_IFLNK|S_IRWXUGO, &index); 9538 if (IS_ERR(inode)) { 9539 err = PTR_ERR(inode); 9540 goto out_unlock; 9541 } 9542 9543 /* 9544 * If the active LSM wants to access the inode during 9545 * d_instantiate it needs these. Smack checks to see 9546 * if the filesystem supports xattrs by looking at the 9547 * ops vector. 9548 */ 9549 inode->i_fop = &btrfs_file_operations; 9550 inode->i_op = &btrfs_file_inode_operations; 9551 inode->i_mapping->a_ops = &btrfs_aops; 9552 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; 9553 9554 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); 9555 if (err) 9556 goto out_unlock_inode; 9557 9558 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); 9559 if (err) 9560 goto out_unlock_inode; 9561 9562 path = btrfs_alloc_path(); 9563 if (!path) { 9564 err = -ENOMEM; 9565 goto out_unlock_inode; 9566 } 9567 key.objectid = btrfs_ino(inode); 9568 key.offset = 0; 9569 key.type = BTRFS_EXTENT_DATA_KEY; 9570 datasize = btrfs_file_extent_calc_inline_size(name_len); 9571 err = btrfs_insert_empty_item(trans, root, path, &key, 9572 datasize); 9573 if (err) { 9574 btrfs_free_path(path); 9575 goto out_unlock_inode; 9576 } 9577 leaf = path->nodes[0]; 9578 ei = btrfs_item_ptr(leaf, path->slots[0], 9579 struct btrfs_file_extent_item); 9580 btrfs_set_file_extent_generation(leaf, ei, trans->transid); 9581 btrfs_set_file_extent_type(leaf, ei, 9582 BTRFS_FILE_EXTENT_INLINE); 9583 btrfs_set_file_extent_encryption(leaf, ei, 0); 9584 btrfs_set_file_extent_compression(leaf, ei, 0); 9585 btrfs_set_file_extent_other_encoding(leaf, ei, 0); 9586 btrfs_set_file_extent_ram_bytes(leaf, ei, name_len); 9587 9588 ptr = btrfs_file_extent_inline_start(ei); 9589 write_extent_buffer(leaf, symname, ptr, name_len); 9590 btrfs_mark_buffer_dirty(leaf); 9591 btrfs_free_path(path); 9592 9593 inode->i_op = &btrfs_symlink_inode_operations; 9594 inode->i_mapping->a_ops = &btrfs_symlink_aops; 9595 inode_set_bytes(inode, name_len); 9596 btrfs_i_size_write(inode, name_len); 9597 err = btrfs_update_inode(trans, root, inode); 9598 if (err) { 9599 drop_inode = 1; 9600 goto out_unlock_inode; 9601 } 9602 9603 unlock_new_inode(inode); 9604 d_instantiate(dentry, inode); 9605 9606out_unlock: 9607 btrfs_end_transaction(trans, root); 9608 if (drop_inode) { 9609 inode_dec_link_count(inode); 9610 iput(inode); 9611 } 9612 btrfs_btree_balance_dirty(root); 9613 return err; 9614 9615out_unlock_inode: 9616 drop_inode = 1; 9617 unlock_new_inode(inode); 9618 goto out_unlock; 9619} 9620 9621static int __btrfs_prealloc_file_range(struct inode *inode, int mode, 9622 u64 start, u64 num_bytes, u64 min_size, 9623 loff_t actual_len, u64 *alloc_hint, 9624 struct btrfs_trans_handle *trans) 9625{ 9626 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 9627 struct extent_map *em; 9628 struct btrfs_root *root = BTRFS_I(inode)->root; 9629 struct btrfs_key ins; 9630 u64 cur_offset = start; 9631 u64 i_size; 9632 u64 cur_bytes; 9633 int ret = 0; 9634 bool own_trans = true; 9635 9636 if (trans) 9637 own_trans = false; 9638 while (num_bytes > 0) { 9639 if (own_trans) { 9640 trans = btrfs_start_transaction(root, 3); 9641 if (IS_ERR(trans)) { 9642 ret = PTR_ERR(trans); 9643 break; 9644 } 9645 } 9646 9647 cur_bytes = min(num_bytes, 256ULL * 1024 * 1024); 9648 cur_bytes = max(cur_bytes, min_size); 9649 ret = btrfs_reserve_extent(root, cur_bytes, min_size, 0, 9650 *alloc_hint, &ins, 1, 0); 9651 if (ret) { 9652 if (own_trans) 9653 btrfs_end_transaction(trans, root); 9654 break; 9655 } 9656 9657 ret = insert_reserved_file_extent(trans, inode, 9658 cur_offset, ins.objectid, 9659 ins.offset, ins.offset, 9660 ins.offset, 0, 0, 0, 9661 BTRFS_FILE_EXTENT_PREALLOC); 9662 if (ret) { 9663 btrfs_free_reserved_extent(root, ins.objectid, 9664 ins.offset, 0); 9665 btrfs_abort_transaction(trans, root, ret); 9666 if (own_trans) 9667 btrfs_end_transaction(trans, root); 9668 break; 9669 } 9670 9671 btrfs_drop_extent_cache(inode, cur_offset, 9672 cur_offset + ins.offset -1, 0); 9673 9674 em = alloc_extent_map(); 9675 if (!em) { 9676 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 9677 &BTRFS_I(inode)->runtime_flags); 9678 goto next; 9679 } 9680 9681 em->start = cur_offset; 9682 em->orig_start = cur_offset; 9683 em->len = ins.offset; 9684 em->block_start = ins.objectid; 9685 em->block_len = ins.offset; 9686 em->orig_block_len = ins.offset; 9687 em->ram_bytes = ins.offset; 9688 em->bdev = root->fs_info->fs_devices->latest_bdev; 9689 set_bit(EXTENT_FLAG_PREALLOC, &em->flags); 9690 em->generation = trans->transid; 9691 9692 while (1) { 9693 write_lock(&em_tree->lock); 9694 ret = add_extent_mapping(em_tree, em, 1); 9695 write_unlock(&em_tree->lock); 9696 if (ret != -EEXIST) 9697 break; 9698 btrfs_drop_extent_cache(inode, cur_offset, 9699 cur_offset + ins.offset - 1, 9700 0); 9701 } 9702 free_extent_map(em); 9703next: 9704 num_bytes -= ins.offset; 9705 cur_offset += ins.offset; 9706 *alloc_hint = ins.objectid + ins.offset; 9707 9708 inode_inc_iversion(inode); 9709 inode->i_ctime = CURRENT_TIME; 9710 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC; 9711 if (!(mode & FALLOC_FL_KEEP_SIZE) && 9712 (actual_len > inode->i_size) && 9713 (cur_offset > inode->i_size)) { 9714 if (cur_offset > actual_len) 9715 i_size = actual_len; 9716 else 9717 i_size = cur_offset; 9718 i_size_write(inode, i_size); 9719 btrfs_ordered_update_i_size(inode, i_size, NULL); 9720 } 9721 9722 ret = btrfs_update_inode(trans, root, inode); 9723 9724 if (ret) { 9725 btrfs_abort_transaction(trans, root, ret); 9726 if (own_trans) 9727 btrfs_end_transaction(trans, root); 9728 break; 9729 } 9730 9731 if (own_trans) 9732 btrfs_end_transaction(trans, root); 9733 } 9734 return ret; 9735} 9736 9737int btrfs_prealloc_file_range(struct inode *inode, int mode, 9738 u64 start, u64 num_bytes, u64 min_size, 9739 loff_t actual_len, u64 *alloc_hint) 9740{ 9741 return __btrfs_prealloc_file_range(inode, mode, start, num_bytes, 9742 min_size, actual_len, alloc_hint, 9743 NULL); 9744} 9745 9746int btrfs_prealloc_file_range_trans(struct inode *inode, 9747 struct btrfs_trans_handle *trans, int mode, 9748 u64 start, u64 num_bytes, u64 min_size, 9749 loff_t actual_len, u64 *alloc_hint) 9750{ 9751 return __btrfs_prealloc_file_range(inode, mode, start, num_bytes, 9752 min_size, actual_len, alloc_hint, trans); 9753} 9754 9755static int btrfs_set_page_dirty(struct page *page) 9756{ 9757 return __set_page_dirty_nobuffers(page); 9758} 9759 9760static int btrfs_permission(struct inode *inode, int mask) 9761{ 9762 struct btrfs_root *root = BTRFS_I(inode)->root; 9763 umode_t mode = inode->i_mode; 9764 9765 if (mask & MAY_WRITE && 9766 (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) { 9767 if (btrfs_root_readonly(root)) 9768 return -EROFS; 9769 if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) 9770 return -EACCES; 9771 } 9772 return generic_permission(inode, mask); 9773} 9774 9775static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) 9776{ 9777 struct btrfs_trans_handle *trans; 9778 struct btrfs_root *root = BTRFS_I(dir)->root; 9779 struct inode *inode = NULL; 9780 u64 objectid; 9781 u64 index; 9782 int ret = 0; 9783 9784 /* 9785 * 5 units required for adding orphan entry 9786 */ 9787 trans = btrfs_start_transaction(root, 5); 9788 if (IS_ERR(trans)) 9789 return PTR_ERR(trans); 9790 9791 ret = btrfs_find_free_ino(root, &objectid); 9792 if (ret) 9793 goto out; 9794 9795 inode = btrfs_new_inode(trans, root, dir, NULL, 0, 9796 btrfs_ino(dir), objectid, mode, &index); 9797 if (IS_ERR(inode)) { 9798 ret = PTR_ERR(inode); 9799 inode = NULL; 9800 goto out; 9801 } 9802 9803 inode->i_fop = &btrfs_file_operations; 9804 inode->i_op = &btrfs_file_inode_operations; 9805 9806 inode->i_mapping->a_ops = &btrfs_aops; 9807 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; 9808 9809 ret = btrfs_init_inode_security(trans, inode, dir, NULL); 9810 if (ret) 9811 goto out_inode; 9812 9813 ret = btrfs_update_inode(trans, root, inode); 9814 if (ret) 9815 goto out_inode; 9816 ret = btrfs_orphan_add(trans, inode); 9817 if (ret) 9818 goto out_inode; 9819 9820 /* 9821 * We set number of links to 0 in btrfs_new_inode(), and here we set 9822 * it to 1 because d_tmpfile() will issue a warning if the count is 0, 9823 * through: 9824 * 9825 * d_tmpfile() -> inode_dec_link_count() -> drop_nlink() 9826 */ 9827 set_nlink(inode, 1); 9828 unlock_new_inode(inode); 9829 d_tmpfile(dentry, inode); 9830 mark_inode_dirty(inode); 9831 9832out: 9833 btrfs_end_transaction(trans, root); 9834 if (ret) 9835 iput(inode); 9836 btrfs_balance_delayed_items(root); 9837 btrfs_btree_balance_dirty(root); 9838 return ret; 9839 9840out_inode: 9841 unlock_new_inode(inode); 9842 goto out; 9843 9844} 9845 9846/* Inspired by filemap_check_errors() */ 9847int btrfs_inode_check_errors(struct inode *inode) 9848{ 9849 int ret = 0; 9850 9851 if (test_bit(AS_ENOSPC, &inode->i_mapping->flags) && 9852 test_and_clear_bit(AS_ENOSPC, &inode->i_mapping->flags)) 9853 ret = -ENOSPC; 9854 if (test_bit(AS_EIO, &inode->i_mapping->flags) && 9855 test_and_clear_bit(AS_EIO, &inode->i_mapping->flags)) 9856 ret = -EIO; 9857 9858 return ret; 9859} 9860 9861static const struct inode_operations btrfs_dir_inode_operations = { 9862 .getattr = btrfs_getattr, 9863 .lookup = btrfs_lookup, 9864 .create = btrfs_create, 9865 .unlink = btrfs_unlink, 9866 .link = btrfs_link, 9867 .mkdir = btrfs_mkdir, 9868 .rmdir = btrfs_rmdir, 9869 .rename2 = btrfs_rename2, 9870 .symlink = btrfs_symlink, 9871 .setattr = btrfs_setattr, 9872 .mknod = btrfs_mknod, 9873 .setxattr = btrfs_setxattr, 9874 .getxattr = btrfs_getxattr, 9875 .listxattr = btrfs_listxattr, 9876 .removexattr = btrfs_removexattr, 9877 .permission = btrfs_permission, 9878 .get_acl = btrfs_get_acl, 9879 .set_acl = btrfs_set_acl, 9880 .update_time = btrfs_update_time, 9881 .tmpfile = btrfs_tmpfile, 9882}; 9883static const struct inode_operations btrfs_dir_ro_inode_operations = { 9884 .lookup = btrfs_lookup, 9885 .permission = btrfs_permission, 9886 .get_acl = btrfs_get_acl, 9887 .set_acl = btrfs_set_acl, 9888 .update_time = btrfs_update_time, 9889}; 9890 9891static const struct file_operations btrfs_dir_file_operations = { 9892 .llseek = generic_file_llseek, 9893 .read = generic_read_dir, 9894 .iterate = btrfs_real_readdir, 9895 .unlocked_ioctl = btrfs_ioctl, 9896#ifdef CONFIG_COMPAT 9897 .compat_ioctl = btrfs_compat_ioctl, 9898#endif 9899 .release = btrfs_release_file, 9900 .fsync = btrfs_sync_file, 9901}; 9902 9903static struct extent_io_ops btrfs_extent_io_ops = { 9904 .fill_delalloc = run_delalloc_range, 9905 .submit_bio_hook = btrfs_submit_bio_hook, 9906 .merge_bio_hook = btrfs_merge_bio_hook, 9907 .readpage_end_io_hook = btrfs_readpage_end_io_hook, 9908 .writepage_end_io_hook = btrfs_writepage_end_io_hook, 9909 .writepage_start_hook = btrfs_writepage_start_hook, 9910 .set_bit_hook = btrfs_set_bit_hook, 9911 .clear_bit_hook = btrfs_clear_bit_hook, 9912 .merge_extent_hook = btrfs_merge_extent_hook, 9913 .split_extent_hook = btrfs_split_extent_hook, 9914}; 9915 9916/* 9917 * btrfs doesn't support the bmap operation because swapfiles 9918 * use bmap to make a mapping of extents in the file. They assume 9919 * these extents won't change over the life of the file and they 9920 * use the bmap result to do IO directly to the drive. 9921 * 9922 * the btrfs bmap call would return logical addresses that aren't 9923 * suitable for IO and they also will change frequently as COW 9924 * operations happen. So, swapfile + btrfs == corruption. 9925 * 9926 * For now we're avoiding this by dropping bmap. 9927 */ 9928static const struct address_space_operations btrfs_aops = { 9929 .readpage = btrfs_readpage, 9930 .writepage = btrfs_writepage, 9931 .writepages = btrfs_writepages, 9932 .readpages = btrfs_readpages, 9933 .direct_IO = btrfs_direct_IO, 9934 .invalidatepage = btrfs_invalidatepage, 9935 .releasepage = btrfs_releasepage, 9936 .set_page_dirty = btrfs_set_page_dirty, 9937 .error_remove_page = generic_error_remove_page, 9938}; 9939 9940static const struct address_space_operations btrfs_symlink_aops = { 9941 .readpage = btrfs_readpage, 9942 .writepage = btrfs_writepage, 9943 .invalidatepage = btrfs_invalidatepage, 9944 .releasepage = btrfs_releasepage, 9945}; 9946 9947static const struct inode_operations btrfs_file_inode_operations = { 9948 .getattr = btrfs_getattr, 9949 .setattr = btrfs_setattr, 9950 .setxattr = btrfs_setxattr, 9951 .getxattr = btrfs_getxattr, 9952 .listxattr = btrfs_listxattr, 9953 .removexattr = btrfs_removexattr, 9954 .permission = btrfs_permission, 9955 .fiemap = btrfs_fiemap, 9956 .get_acl = btrfs_get_acl, 9957 .set_acl = btrfs_set_acl, 9958 .update_time = btrfs_update_time, 9959}; 9960static const struct inode_operations btrfs_special_inode_operations = { 9961 .getattr = btrfs_getattr, 9962 .setattr = btrfs_setattr, 9963 .permission = btrfs_permission, 9964 .setxattr = btrfs_setxattr, 9965 .getxattr = btrfs_getxattr, 9966 .listxattr = btrfs_listxattr, 9967 .removexattr = btrfs_removexattr, 9968 .get_acl = btrfs_get_acl, 9969 .set_acl = btrfs_set_acl, 9970 .update_time = btrfs_update_time, 9971}; 9972static const struct inode_operations btrfs_symlink_inode_operations = { 9973 .readlink = generic_readlink, 9974 .follow_link = page_follow_link_light, 9975 .put_link = page_put_link, 9976 .getattr = btrfs_getattr, 9977 .setattr = btrfs_setattr, 9978 .permission = btrfs_permission, 9979 .setxattr = btrfs_setxattr, 9980 .getxattr = btrfs_getxattr, 9981 .listxattr = btrfs_listxattr, 9982 .removexattr = btrfs_removexattr, 9983 .update_time = btrfs_update_time, 9984}; 9985 9986const struct dentry_operations btrfs_dentry_operations = { 9987 .d_delete = btrfs_dentry_delete, 9988 .d_release = btrfs_dentry_release, 9989}; 9990