Lines Matching refs:sctx

95 	struct scrub_ctx	*sctx;  member
116 struct scrub_ctx *sctx; member
132 struct scrub_ctx *sctx; member
208 struct scrub_ctx *sctx; member
224 struct scrub_ctx *sctx; member
242 static void scrub_pending_bio_inc(struct scrub_ctx *sctx);
243 static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
244 static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx);
245 static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx);
275 static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
277 static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
289 static int scrub_setup_wr_ctx(struct scrub_ctx *sctx,
295 static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
297 static void scrub_wr_submit(struct scrub_ctx *sctx);
300 static int write_page_nocow(struct scrub_ctx *sctx,
304 static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
309 static void scrub_put_ctx(struct scrub_ctx *sctx);
312 static void scrub_pending_bio_inc(struct scrub_ctx *sctx) in scrub_pending_bio_inc() argument
314 atomic_inc(&sctx->refs); in scrub_pending_bio_inc()
315 atomic_inc(&sctx->bios_in_flight); in scrub_pending_bio_inc()
318 static void scrub_pending_bio_dec(struct scrub_ctx *sctx) in scrub_pending_bio_dec() argument
320 atomic_dec(&sctx->bios_in_flight); in scrub_pending_bio_dec()
321 wake_up(&sctx->list_wait); in scrub_pending_bio_dec()
322 scrub_put_ctx(sctx); in scrub_pending_bio_dec()
352 static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx) in scrub_pending_trans_workers_inc() argument
354 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info; in scrub_pending_trans_workers_inc()
356 atomic_inc(&sctx->refs); in scrub_pending_trans_workers_inc()
380 atomic_inc(&sctx->workers_pending); in scrub_pending_trans_workers_inc()
384 static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx) in scrub_pending_trans_workers_dec() argument
386 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info; in scrub_pending_trans_workers_dec()
396 atomic_dec(&sctx->workers_pending); in scrub_pending_trans_workers_dec()
398 wake_up(&sctx->list_wait); in scrub_pending_trans_workers_dec()
399 scrub_put_ctx(sctx); in scrub_pending_trans_workers_dec()
402 static void scrub_free_csums(struct scrub_ctx *sctx) in scrub_free_csums() argument
404 while (!list_empty(&sctx->csum_list)) { in scrub_free_csums()
406 sum = list_first_entry(&sctx->csum_list, in scrub_free_csums()
413 static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx) in scrub_free_ctx() argument
417 if (!sctx) in scrub_free_ctx()
420 scrub_free_wr_ctx(&sctx->wr_ctx); in scrub_free_ctx()
423 if (sctx->curr != -1) { in scrub_free_ctx()
424 struct scrub_bio *sbio = sctx->bios[sctx->curr]; in scrub_free_ctx()
434 struct scrub_bio *sbio = sctx->bios[i]; in scrub_free_ctx()
441 scrub_free_csums(sctx); in scrub_free_ctx()
442 kfree(sctx); in scrub_free_ctx()
445 static void scrub_put_ctx(struct scrub_ctx *sctx) in scrub_put_ctx() argument
447 if (atomic_dec_and_test(&sctx->refs)) in scrub_put_ctx()
448 scrub_free_ctx(sctx); in scrub_put_ctx()
454 struct scrub_ctx *sctx; in scrub_setup_ctx() local
472 sctx = kzalloc(sizeof(*sctx), GFP_NOFS); in scrub_setup_ctx()
473 if (!sctx) in scrub_setup_ctx()
475 atomic_set(&sctx->refs, 1); in scrub_setup_ctx()
476 sctx->is_dev_replace = is_dev_replace; in scrub_setup_ctx()
477 sctx->pages_per_rd_bio = pages_per_rd_bio; in scrub_setup_ctx()
478 sctx->curr = -1; in scrub_setup_ctx()
479 sctx->dev_root = dev->dev_root; in scrub_setup_ctx()
486 sctx->bios[i] = sbio; in scrub_setup_ctx()
489 sbio->sctx = sctx; in scrub_setup_ctx()
495 sctx->bios[i]->next_free = i + 1; in scrub_setup_ctx()
497 sctx->bios[i]->next_free = -1; in scrub_setup_ctx()
499 sctx->first_free = 0; in scrub_setup_ctx()
500 sctx->nodesize = dev->dev_root->nodesize; in scrub_setup_ctx()
501 sctx->sectorsize = dev->dev_root->sectorsize; in scrub_setup_ctx()
502 atomic_set(&sctx->bios_in_flight, 0); in scrub_setup_ctx()
503 atomic_set(&sctx->workers_pending, 0); in scrub_setup_ctx()
504 atomic_set(&sctx->cancel_req, 0); in scrub_setup_ctx()
505 sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy); in scrub_setup_ctx()
506 INIT_LIST_HEAD(&sctx->csum_list); in scrub_setup_ctx()
508 spin_lock_init(&sctx->list_lock); in scrub_setup_ctx()
509 spin_lock_init(&sctx->stat_lock); in scrub_setup_ctx()
510 init_waitqueue_head(&sctx->list_wait); in scrub_setup_ctx()
512 ret = scrub_setup_wr_ctx(sctx, &sctx->wr_ctx, fs_info, in scrub_setup_ctx()
515 scrub_free_ctx(sctx); in scrub_setup_ctx()
518 return sctx; in scrub_setup_ctx()
521 scrub_free_ctx(sctx); in scrub_setup_ctx()
627 fs_info = sblock->sctx->dev_root->fs_info; in scrub_print_warning()
801 struct scrub_ctx *sctx; in scrub_fixup_nodatasum() local
807 sctx = fixup->sctx; in scrub_fixup_nodatasum()
811 spin_lock(&sctx->stat_lock); in scrub_fixup_nodatasum()
812 ++sctx->stat.malloc_errors; in scrub_fixup_nodatasum()
813 spin_unlock(&sctx->stat_lock); in scrub_fixup_nodatasum()
842 spin_lock(&sctx->stat_lock); in scrub_fixup_nodatasum()
843 ++sctx->stat.corrected_errors; in scrub_fixup_nodatasum()
844 spin_unlock(&sctx->stat_lock); in scrub_fixup_nodatasum()
850 spin_lock(&sctx->stat_lock); in scrub_fixup_nodatasum()
851 ++sctx->stat.uncorrectable_errors; in scrub_fixup_nodatasum()
852 spin_unlock(&sctx->stat_lock); in scrub_fixup_nodatasum()
854 &sctx->dev_root->fs_info->dev_replace. in scrub_fixup_nodatasum()
864 scrub_pending_trans_workers_dec(sctx); in scrub_fixup_nodatasum()
890 struct scrub_ctx *sctx = sblock_to_check->sctx; in scrub_handle_errored_block() local
910 fs_info = sctx->dev_root->fs_info; in scrub_handle_errored_block()
917 spin_lock(&sctx->stat_lock); in scrub_handle_errored_block()
918 ++sctx->stat.super_errors; in scrub_handle_errored_block()
919 spin_unlock(&sctx->stat_lock); in scrub_handle_errored_block()
933 if (sctx->is_dev_replace && !is_metadata && !have_csum) { in scrub_handle_errored_block()
970 spin_lock(&sctx->stat_lock); in scrub_handle_errored_block()
971 sctx->stat.malloc_errors++; in scrub_handle_errored_block()
972 sctx->stat.read_errors++; in scrub_handle_errored_block()
973 sctx->stat.uncorrectable_errors++; in scrub_handle_errored_block()
974 spin_unlock(&sctx->stat_lock); in scrub_handle_errored_block()
982 spin_lock(&sctx->stat_lock); in scrub_handle_errored_block()
983 sctx->stat.read_errors++; in scrub_handle_errored_block()
984 sctx->stat.uncorrectable_errors++; in scrub_handle_errored_block()
985 spin_unlock(&sctx->stat_lock); in scrub_handle_errored_block()
994 csum, generation, sctx->csum_size, 1); in scrub_handle_errored_block()
1006 spin_lock(&sctx->stat_lock); in scrub_handle_errored_block()
1007 sctx->stat.unverified_errors++; in scrub_handle_errored_block()
1009 spin_unlock(&sctx->stat_lock); in scrub_handle_errored_block()
1011 if (sctx->is_dev_replace) in scrub_handle_errored_block()
1017 spin_lock(&sctx->stat_lock); in scrub_handle_errored_block()
1018 sctx->stat.read_errors++; in scrub_handle_errored_block()
1019 spin_unlock(&sctx->stat_lock); in scrub_handle_errored_block()
1024 spin_lock(&sctx->stat_lock); in scrub_handle_errored_block()
1025 sctx->stat.csum_errors++; in scrub_handle_errored_block()
1026 spin_unlock(&sctx->stat_lock); in scrub_handle_errored_block()
1032 spin_lock(&sctx->stat_lock); in scrub_handle_errored_block()
1033 sctx->stat.verify_errors++; in scrub_handle_errored_block()
1034 spin_unlock(&sctx->stat_lock); in scrub_handle_errored_block()
1046 if (sctx->readonly) { in scrub_handle_errored_block()
1047 ASSERT(!sctx->is_dev_replace); in scrub_handle_errored_block()
1054 WARN_ON(sctx->is_dev_replace); in scrub_handle_errored_block()
1068 fixup_nodatasum->sctx = sctx; in scrub_handle_errored_block()
1073 scrub_pending_trans_workers_inc(sctx); in scrub_handle_errored_block()
1109 sctx->csum_size, 0); in scrub_handle_errored_block()
1114 if (sctx->is_dev_replace) { in scrub_handle_errored_block()
1126 if (sblock_bad->no_io_error_seen && !sctx->is_dev_replace) in scrub_handle_errored_block()
1160 if (!page_bad->io_error && !sctx->is_dev_replace) in scrub_handle_errored_block()
1180 if (sctx->is_dev_replace) { in scrub_handle_errored_block()
1194 &sctx->dev_root-> in scrub_handle_errored_block()
1210 if (success && !sctx->is_dev_replace) { in scrub_handle_errored_block()
1223 generation, sctx->csum_size, 1); in scrub_handle_errored_block()
1232 spin_lock(&sctx->stat_lock); in scrub_handle_errored_block()
1233 sctx->stat.corrected_errors++; in scrub_handle_errored_block()
1235 spin_unlock(&sctx->stat_lock); in scrub_handle_errored_block()
1242 spin_lock(&sctx->stat_lock); in scrub_handle_errored_block()
1243 sctx->stat.uncorrectable_errors++; in scrub_handle_errored_block()
1244 spin_unlock(&sctx->stat_lock); in scrub_handle_errored_block()
1320 struct scrub_ctx *sctx = original_sblock->sctx; in scrub_setup_recheck_block() local
1321 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info; in scrub_setup_recheck_block()
1377 sblock->sctx = sctx; in scrub_setup_recheck_block()
1381 spin_lock(&sctx->stat_lock); in scrub_setup_recheck_block()
1382 sctx->stat.malloc_errors++; in scrub_setup_recheck_block()
1383 spin_unlock(&sctx->stat_lock); in scrub_setup_recheck_block()
1654 &sblock_bad->sctx->dev_root->fs_info-> in scrub_repair_page_from_good_copy()
1682 &sblock->sctx->dev_root->fs_info->dev_replace. in scrub_write_block_to_dev_replace()
1700 return scrub_add_page_to_wr_bio(sblock->sctx, spage); in scrub_write_page_to_dev_replace()
1703 static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx, in scrub_add_page_to_wr_bio() argument
1706 struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx; in scrub_add_page_to_wr_bio()
1719 wr_ctx->wr_curr_bio->sctx = sctx; in scrub_add_page_to_wr_bio()
1748 scrub_wr_submit(sctx); in scrub_add_page_to_wr_bio()
1760 scrub_wr_submit(sctx); in scrub_add_page_to_wr_bio()
1768 scrub_wr_submit(sctx); in scrub_add_page_to_wr_bio()
1774 static void scrub_wr_submit(struct scrub_ctx *sctx) in scrub_wr_submit() argument
1776 struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx; in scrub_wr_submit()
1785 scrub_pending_bio_inc(sctx); in scrub_wr_submit()
1809 struct scrub_ctx *sctx = sbio->sctx; in scrub_wr_bio_end_io_worker() local
1815 &sbio->sctx->dev_root->fs_info->dev_replace; in scrub_wr_bio_end_io_worker()
1831 scrub_pending_bio_dec(sctx); in scrub_wr_bio_end_io_worker()
1858 struct scrub_ctx *sctx = sblock->sctx; in scrub_checksum_data() local
1876 len = sctx->sectorsize; in scrub_checksum_data()
1894 if (memcmp(csum, on_disk_csum, sctx->csum_size)) in scrub_checksum_data()
1902 struct scrub_ctx *sctx = sblock->sctx; in scrub_checksum_tree_block() local
1904 struct btrfs_root *root = sctx->dev_root; in scrub_checksum_tree_block()
1922 memcpy(on_disk_csum, h->csum, sctx->csum_size); in scrub_checksum_tree_block()
1943 len = sctx->nodesize - BTRFS_CSUM_SIZE; in scrub_checksum_tree_block()
1965 if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size)) in scrub_checksum_tree_block()
1974 struct scrub_ctx *sctx = sblock->sctx; in scrub_checksum_super() local
1991 memcpy(on_disk_csum, s->csum, sctx->csum_size); in scrub_checksum_super()
2024 if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size)) in scrub_checksum_super()
2033 spin_lock(&sctx->stat_lock); in scrub_checksum_super()
2034 ++sctx->stat.super_errors; in scrub_checksum_super()
2035 spin_unlock(&sctx->stat_lock); in scrub_checksum_super()
2080 static void scrub_submit(struct scrub_ctx *sctx) in scrub_submit() argument
2084 if (sctx->curr == -1) in scrub_submit()
2087 sbio = sctx->bios[sctx->curr]; in scrub_submit()
2088 sctx->curr = -1; in scrub_submit()
2089 scrub_pending_bio_inc(sctx); in scrub_submit()
2107 static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx, in scrub_add_page_to_rd_bio() argument
2118 while (sctx->curr == -1) { in scrub_add_page_to_rd_bio()
2119 spin_lock(&sctx->list_lock); in scrub_add_page_to_rd_bio()
2120 sctx->curr = sctx->first_free; in scrub_add_page_to_rd_bio()
2121 if (sctx->curr != -1) { in scrub_add_page_to_rd_bio()
2122 sctx->first_free = sctx->bios[sctx->curr]->next_free; in scrub_add_page_to_rd_bio()
2123 sctx->bios[sctx->curr]->next_free = -1; in scrub_add_page_to_rd_bio()
2124 sctx->bios[sctx->curr]->page_count = 0; in scrub_add_page_to_rd_bio()
2125 spin_unlock(&sctx->list_lock); in scrub_add_page_to_rd_bio()
2127 spin_unlock(&sctx->list_lock); in scrub_add_page_to_rd_bio()
2128 wait_event(sctx->list_wait, sctx->first_free != -1); in scrub_add_page_to_rd_bio()
2131 sbio = sctx->bios[sctx->curr]; in scrub_add_page_to_rd_bio()
2140 bio = btrfs_io_bio_alloc(GFP_NOFS, sctx->pages_per_rd_bio); in scrub_add_page_to_rd_bio()
2156 scrub_submit(sctx); in scrub_add_page_to_rd_bio()
2168 scrub_submit(sctx); in scrub_add_page_to_rd_bio()
2175 if (sbio->page_count == sctx->pages_per_rd_bio) in scrub_add_page_to_rd_bio()
2176 scrub_submit(sctx); in scrub_add_page_to_rd_bio()
2181 static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len, in scrub_pages() argument
2191 spin_lock(&sctx->stat_lock); in scrub_pages()
2192 sctx->stat.malloc_errors++; in scrub_pages()
2193 spin_unlock(&sctx->stat_lock); in scrub_pages()
2200 sblock->sctx = sctx; in scrub_pages()
2210 spin_lock(&sctx->stat_lock); in scrub_pages()
2211 sctx->stat.malloc_errors++; in scrub_pages()
2212 spin_unlock(&sctx->stat_lock); in scrub_pages()
2229 memcpy(spage->csum, csum, sctx->csum_size); in scrub_pages()
2248 ret = scrub_add_page_to_rd_bio(sctx, spage); in scrub_pages()
2256 scrub_submit(sctx); in scrub_pages()
2277 struct scrub_ctx *sctx = sbio->sctx; in scrub_bio_end_io_worker() local
2302 spin_lock(&sctx->list_lock); in scrub_bio_end_io_worker()
2303 sbio->next_free = sctx->first_free; in scrub_bio_end_io_worker()
2304 sctx->first_free = sbio->index; in scrub_bio_end_io_worker()
2305 spin_unlock(&sctx->list_lock); in scrub_bio_end_io_worker()
2307 if (sctx->is_dev_replace && in scrub_bio_end_io_worker()
2308 atomic_read(&sctx->wr_ctx.flush_all_writes)) { in scrub_bio_end_io_worker()
2309 mutex_lock(&sctx->wr_ctx.wr_lock); in scrub_bio_end_io_worker()
2310 scrub_wr_submit(sctx); in scrub_bio_end_io_worker()
2311 mutex_unlock(&sctx->wr_ctx.wr_lock); in scrub_bio_end_io_worker()
2314 scrub_pending_bio_dec(sctx); in scrub_bio_end_io_worker()
2323 int sectorsize = sparity->sctx->dev_root->sectorsize; in __scrub_mark_bitmap()
2370 if (!corrupted && sblock->sctx->is_dev_replace) in scrub_block_complete()
2384 static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u64 len, in scrub_find_csum() argument
2391 while (!list_empty(&sctx->csum_list)) { in scrub_find_csum()
2392 sum = list_first_entry(&sctx->csum_list, in scrub_find_csum()
2399 ++sctx->stat.csum_discards; in scrub_find_csum()
2407 index = ((u32)(logical - sum->bytenr)) / sctx->sectorsize; in scrub_find_csum()
2408 num_sectors = sum->len / sctx->sectorsize; in scrub_find_csum()
2409 memcpy(csum, sum->sums + index, sctx->csum_size); in scrub_find_csum()
2418 static int scrub_extent(struct scrub_ctx *sctx, u64 logical, u64 len, in scrub_extent() argument
2427 blocksize = sctx->sectorsize; in scrub_extent()
2428 spin_lock(&sctx->stat_lock); in scrub_extent()
2429 sctx->stat.data_extents_scrubbed++; in scrub_extent()
2430 sctx->stat.data_bytes_scrubbed += len; in scrub_extent()
2431 spin_unlock(&sctx->stat_lock); in scrub_extent()
2433 blocksize = sctx->nodesize; in scrub_extent()
2434 spin_lock(&sctx->stat_lock); in scrub_extent()
2435 sctx->stat.tree_extents_scrubbed++; in scrub_extent()
2436 sctx->stat.tree_bytes_scrubbed += len; in scrub_extent()
2437 spin_unlock(&sctx->stat_lock); in scrub_extent()
2439 blocksize = sctx->sectorsize; in scrub_extent()
2449 have_csum = scrub_find_csum(sctx, logical, l, csum); in scrub_extent()
2451 ++sctx->stat.no_csum; in scrub_extent()
2452 if (sctx->is_dev_replace && !have_csum) { in scrub_extent()
2453 ret = copy_nocow_pages(sctx, logical, l, in scrub_extent()
2459 ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen, in scrub_extent()
2478 struct scrub_ctx *sctx = sparity->sctx; in scrub_pages_for_parity() local
2484 spin_lock(&sctx->stat_lock); in scrub_pages_for_parity()
2485 sctx->stat.malloc_errors++; in scrub_pages_for_parity()
2486 spin_unlock(&sctx->stat_lock); in scrub_pages_for_parity()
2493 sblock->sctx = sctx; in scrub_pages_for_parity()
2505 spin_lock(&sctx->stat_lock); in scrub_pages_for_parity()
2506 sctx->stat.malloc_errors++; in scrub_pages_for_parity()
2507 spin_unlock(&sctx->stat_lock); in scrub_pages_for_parity()
2527 memcpy(spage->csum, csum, sctx->csum_size); in scrub_pages_for_parity()
2545 ret = scrub_add_page_to_rd_bio(sctx, spage); in scrub_pages_for_parity()
2562 struct scrub_ctx *sctx = sparity->sctx; in scrub_extent_for_parity() local
2568 blocksize = sctx->sectorsize; in scrub_extent_for_parity()
2570 blocksize = sctx->nodesize; in scrub_extent_for_parity()
2572 blocksize = sctx->sectorsize; in scrub_extent_for_parity()
2582 have_csum = scrub_find_csum(sctx, logical, l, csum); in scrub_extent_for_parity()
2645 struct scrub_ctx *sctx = sparity->sctx; in scrub_free_parity() local
2651 spin_lock(&sctx->stat_lock); in scrub_free_parity()
2652 sctx->stat.read_errors += nbits; in scrub_free_parity()
2653 sctx->stat.uncorrectable_errors += nbits; in scrub_free_parity()
2654 spin_unlock(&sctx->stat_lock); in scrub_free_parity()
2668 struct scrub_ctx *sctx = sparity->sctx; in scrub_parity_bio_endio() local
2675 scrub_pending_bio_dec(sctx); in scrub_parity_bio_endio()
2681 struct scrub_ctx *sctx = sparity->sctx; in scrub_parity_check_and_repair() local
2694 ret = btrfs_map_sblock(sctx->dev_root->fs_info, WRITE, in scrub_parity_check_and_repair()
2708 rbio = raid56_parity_alloc_scrub_rbio(sctx->dev_root, bio, bbio, in scrub_parity_check_and_repair()
2719 scrub_pending_bio_inc(sctx); in scrub_parity_check_and_repair()
2729 spin_lock(&sctx->stat_lock); in scrub_parity_check_and_repair()
2730 sctx->stat.malloc_errors++; in scrub_parity_check_and_repair()
2731 spin_unlock(&sctx->stat_lock); in scrub_parity_check_and_repair()
2754 static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx, in scrub_raid56_parity() argument
2761 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info; in scrub_raid56_parity()
2786 spin_lock(&sctx->stat_lock); in scrub_raid56_parity()
2787 sctx->stat.malloc_errors++; in scrub_raid56_parity()
2788 spin_unlock(&sctx->stat_lock); in scrub_raid56_parity()
2794 sparity->sctx = sctx; in scrub_raid56_parity()
2904 &sctx->csum_list, 1); in scrub_raid56_parity()
2917 scrub_free_csums(sctx); in scrub_raid56_parity()
2948 scrub_submit(sctx); in scrub_raid56_parity()
2949 mutex_lock(&sctx->wr_ctx.wr_lock); in scrub_raid56_parity()
2950 scrub_wr_submit(sctx); in scrub_raid56_parity()
2951 mutex_unlock(&sctx->wr_ctx.wr_lock); in scrub_raid56_parity()
2957 static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, in scrub_stripe() argument
2964 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info; in scrub_stripe()
3057 wait_event(sctx->list_wait, in scrub_stripe()
3058 atomic_read(&sctx->bios_in_flight) == 0); in scrub_stripe()
3103 ret = scrub_raid56_parity(sctx, map, scrub_dev, in scrub_stripe()
3115 atomic_read(&sctx->cancel_req)) { in scrub_stripe()
3124 atomic_set(&sctx->wr_ctx.flush_all_writes, 1); in scrub_stripe()
3125 scrub_submit(sctx); in scrub_stripe()
3126 mutex_lock(&sctx->wr_ctx.wr_lock); in scrub_stripe()
3127 scrub_wr_submit(sctx); in scrub_stripe()
3128 mutex_unlock(&sctx->wr_ctx.wr_lock); in scrub_stripe()
3129 wait_event(sctx->list_wait, in scrub_stripe()
3130 atomic_read(&sctx->bios_in_flight) == 0); in scrub_stripe()
3131 atomic_set(&sctx->wr_ctx.flush_all_writes, 0); in scrub_stripe()
3240 &sctx->csum_list, 1); in scrub_stripe()
3244 ret = scrub_extent(sctx, extent_logical, extent_len, in scrub_stripe()
3251 scrub_free_csums(sctx); in scrub_stripe()
3270 ret = scrub_raid56_parity(sctx, in scrub_stripe()
3299 spin_lock(&sctx->stat_lock); in scrub_stripe()
3301 sctx->stat.last_physical = map->stripes[num].physical + in scrub_stripe()
3304 sctx->stat.last_physical = physical; in scrub_stripe()
3305 spin_unlock(&sctx->stat_lock); in scrub_stripe()
3311 scrub_submit(sctx); in scrub_stripe()
3312 mutex_lock(&sctx->wr_ctx.wr_lock); in scrub_stripe()
3313 scrub_wr_submit(sctx); in scrub_stripe()
3314 mutex_unlock(&sctx->wr_ctx.wr_lock); in scrub_stripe()
3322 static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx, in scrub_chunk() argument
3329 &sctx->dev_root->fs_info->mapping_tree; in scrub_chunk()
3352 ret = scrub_stripe(sctx, map, scrub_dev, i, in scrub_chunk()
3366 int scrub_enumerate_chunks(struct scrub_ctx *sctx, in scrub_enumerate_chunks() argument
3372 struct btrfs_root *root = sctx->dev_root; in scrub_enumerate_chunks()
3452 ret = scrub_chunk(sctx, scrub_dev, chunk_tree, chunk_objectid, in scrub_enumerate_chunks()
3466 atomic_set(&sctx->wr_ctx.flush_all_writes, 1); in scrub_enumerate_chunks()
3467 scrub_submit(sctx); in scrub_enumerate_chunks()
3468 mutex_lock(&sctx->wr_ctx.wr_lock); in scrub_enumerate_chunks()
3469 scrub_wr_submit(sctx); in scrub_enumerate_chunks()
3470 mutex_unlock(&sctx->wr_ctx.wr_lock); in scrub_enumerate_chunks()
3472 wait_event(sctx->list_wait, in scrub_enumerate_chunks()
3473 atomic_read(&sctx->bios_in_flight) == 0); in scrub_enumerate_chunks()
3482 wait_event(sctx->list_wait, in scrub_enumerate_chunks()
3483 atomic_read(&sctx->workers_pending) == 0); in scrub_enumerate_chunks()
3484 atomic_set(&sctx->wr_ctx.flush_all_writes, 0); in scrub_enumerate_chunks()
3500 if (sctx->stat.malloc_errors > 0) { in scrub_enumerate_chunks()
3521 static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx, in scrub_supers() argument
3528 struct btrfs_root *root = sctx->dev_root; in scrub_supers()
3545 ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr, in scrub_supers()
3551 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); in scrub_supers()
3612 struct scrub_ctx *sctx; in btrfs_scrub_dev() local
3701 sctx = scrub_setup_ctx(dev, is_dev_replace); in btrfs_scrub_dev()
3702 if (IS_ERR(sctx)) { in btrfs_scrub_dev()
3706 return PTR_ERR(sctx); in btrfs_scrub_dev()
3708 sctx->readonly = readonly; in btrfs_scrub_dev()
3709 dev->scrub_device = sctx; in btrfs_scrub_dev()
3726 ret = scrub_supers(sctx, dev); in btrfs_scrub_dev()
3731 ret = scrub_enumerate_chunks(sctx, dev, start, end, in btrfs_scrub_dev()
3734 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); in btrfs_scrub_dev()
3738 wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0); in btrfs_scrub_dev()
3741 memcpy(progress, &sctx->stat, sizeof(*progress)); in btrfs_scrub_dev()
3748 scrub_put_ctx(sctx); in btrfs_scrub_dev()
3802 struct scrub_ctx *sctx; in btrfs_scrub_cancel_dev() local
3805 sctx = dev->scrub_device; in btrfs_scrub_cancel_dev()
3806 if (!sctx) { in btrfs_scrub_cancel_dev()
3810 atomic_inc(&sctx->cancel_req); in btrfs_scrub_cancel_dev()
3826 struct scrub_ctx *sctx = NULL; in btrfs_scrub_progress() local
3831 sctx = dev->scrub_device; in btrfs_scrub_progress()
3832 if (sctx) in btrfs_scrub_progress()
3833 memcpy(progress, &sctx->stat, sizeof(*progress)); in btrfs_scrub_progress()
3836 return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV; in btrfs_scrub_progress()
3864 static int scrub_setup_wr_ctx(struct scrub_ctx *sctx, in scrub_setup_wr_ctx() argument
3893 static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len, in copy_nocow_pages() argument
3897 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info; in copy_nocow_pages()
3901 spin_lock(&sctx->stat_lock); in copy_nocow_pages()
3902 sctx->stat.malloc_errors++; in copy_nocow_pages()
3903 spin_unlock(&sctx->stat_lock); in copy_nocow_pages()
3907 scrub_pending_trans_workers_inc(sctx); in copy_nocow_pages()
3909 nocow_ctx->sctx = sctx; in copy_nocow_pages()
3944 struct scrub_ctx *sctx = nocow_ctx->sctx; in copy_nocow_pages_worker() local
3956 fs_info = sctx->dev_root->fs_info; in copy_nocow_pages_worker()
3961 spin_lock(&sctx->stat_lock); in copy_nocow_pages_worker()
3962 sctx->stat.malloc_errors++; in copy_nocow_pages_worker()
3963 spin_unlock(&sctx->stat_lock); in copy_nocow_pages_worker()
4021 scrub_pending_trans_workers_dec(sctx); in copy_nocow_pages_worker()
4071 struct btrfs_fs_info *fs_info = nocow_ctx->sctx->dev_root->fs_info; in copy_nocow_pages_for_inode()
4167 err = write_page_nocow(nocow_ctx->sctx, in copy_nocow_pages_for_inode()
4190 static int write_page_nocow(struct scrub_ctx *sctx, in write_page_nocow() argument
4197 dev = sctx->wr_ctx.tgtdev; in write_page_nocow()
4207 spin_lock(&sctx->stat_lock); in write_page_nocow()
4208 sctx->stat.malloc_errors++; in write_page_nocow()
4209 spin_unlock(&sctx->stat_lock); in write_page_nocow()