root/fs/btrfs/compression.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. btrfs_compress_type2str
  2. btrfs_compress_is_valid_type
  3. compressed_bio_size
  4. check_compressed_csum
  5. end_compressed_bio_read
  6. end_compressed_writeback
  7. end_compressed_bio_write
  8. btrfs_submit_compressed_write
  9. bio_end_offset
  10. add_ra_bio_pages
  11. btrfs_submit_compressed_read
  12. heuristic_init_workspace_manager
  13. heuristic_cleanup_workspace_manager
  14. heuristic_get_workspace
  15. heuristic_put_workspace
  16. free_heuristic_ws
  17. alloc_heuristic_ws
  18. btrfs_init_workspace_manager
  19. btrfs_cleanup_workspace_manager
  20. btrfs_get_workspace
  21. get_workspace
  22. btrfs_put_workspace
  23. put_workspace
  24. btrfs_compress_pages
  25. btrfs_decompress_bio
  26. btrfs_decompress
  27. btrfs_init_compress
  28. btrfs_exit_compress
  29. btrfs_decompress_buf2page
  30. ilog2_w
  31. shannon_entropy
  32. get4bits
  33. radix_sort
  34. byte_core_set_size
  35. byte_set_size
  36. sample_repeated_patterns
  37. heuristic_collect_sample
  38. btrfs_compress_heuristic
  39. btrfs_compress_str2level
  40. btrfs_compress_set_level

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Copyright (C) 2008 Oracle.  All rights reserved.
   4  */
   5 
   6 #include <linux/kernel.h>
   7 #include <linux/bio.h>
   8 #include <linux/file.h>
   9 #include <linux/fs.h>
  10 #include <linux/pagemap.h>
  11 #include <linux/highmem.h>
  12 #include <linux/time.h>
  13 #include <linux/init.h>
  14 #include <linux/string.h>
  15 #include <linux/backing-dev.h>
  16 #include <linux/writeback.h>
  17 #include <linux/slab.h>
  18 #include <linux/sched/mm.h>
  19 #include <linux/log2.h>
  20 #include <crypto/hash.h>
  21 #include "misc.h"
  22 #include "ctree.h"
  23 #include "disk-io.h"
  24 #include "transaction.h"
  25 #include "btrfs_inode.h"
  26 #include "volumes.h"
  27 #include "ordered-data.h"
  28 #include "compression.h"
  29 #include "extent_io.h"
  30 #include "extent_map.h"
  31 
  32 static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" };
  33 
  34 const char* btrfs_compress_type2str(enum btrfs_compression_type type)
  35 {
  36         switch (type) {
  37         case BTRFS_COMPRESS_ZLIB:
  38         case BTRFS_COMPRESS_LZO:
  39         case BTRFS_COMPRESS_ZSTD:
  40         case BTRFS_COMPRESS_NONE:
  41                 return btrfs_compress_types[type];
  42         }
  43 
  44         return NULL;
  45 }
  46 
  47 bool btrfs_compress_is_valid_type(const char *str, size_t len)
  48 {
  49         int i;
  50 
  51         for (i = 1; i < ARRAY_SIZE(btrfs_compress_types); i++) {
  52                 size_t comp_len = strlen(btrfs_compress_types[i]);
  53 
  54                 if (len < comp_len)
  55                         continue;
  56 
  57                 if (!strncmp(btrfs_compress_types[i], str, comp_len))
  58                         return true;
  59         }
  60         return false;
  61 }
  62 
  63 static int btrfs_decompress_bio(struct compressed_bio *cb);
  64 
  65 static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
  66                                       unsigned long disk_size)
  67 {
  68         u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
  69 
  70         return sizeof(struct compressed_bio) +
  71                 (DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * csum_size;
  72 }
  73 
  74 static int check_compressed_csum(struct btrfs_inode *inode,
  75                                  struct compressed_bio *cb,
  76                                  u64 disk_start)
  77 {
  78         struct btrfs_fs_info *fs_info = inode->root->fs_info;
  79         SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
  80         const u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
  81         int ret;
  82         struct page *page;
  83         unsigned long i;
  84         char *kaddr;
  85         u8 csum[BTRFS_CSUM_SIZE];
  86         u8 *cb_sum = cb->sums;
  87 
  88         if (inode->flags & BTRFS_INODE_NODATASUM)
  89                 return 0;
  90 
  91         shash->tfm = fs_info->csum_shash;
  92 
  93         for (i = 0; i < cb->nr_pages; i++) {
  94                 page = cb->compressed_pages[i];
  95 
  96                 crypto_shash_init(shash);
  97                 kaddr = kmap_atomic(page);
  98                 crypto_shash_update(shash, kaddr, PAGE_SIZE);
  99                 kunmap_atomic(kaddr);
 100                 crypto_shash_final(shash, (u8 *)&csum);
 101 
 102                 if (memcmp(&csum, cb_sum, csum_size)) {
 103                         btrfs_print_data_csum_error(inode, disk_start,
 104                                         csum, cb_sum, cb->mirror_num);
 105                         ret = -EIO;
 106                         goto fail;
 107                 }
 108                 cb_sum += csum_size;
 109 
 110         }
 111         ret = 0;
 112 fail:
 113         return ret;
 114 }
 115 
 116 /* when we finish reading compressed pages from the disk, we
 117  * decompress them and then run the bio end_io routines on the
 118  * decompressed pages (in the inode address space).
 119  *
 120  * This allows the checksumming and other IO error handling routines
 121  * to work normally
 122  *
 123  * The compressed pages are freed here, and it must be run
 124  * in process context
 125  */
 126 static void end_compressed_bio_read(struct bio *bio)
 127 {
 128         struct compressed_bio *cb = bio->bi_private;
 129         struct inode *inode;
 130         struct page *page;
 131         unsigned long index;
 132         unsigned int mirror = btrfs_io_bio(bio)->mirror_num;
 133         int ret = 0;
 134 
 135         if (bio->bi_status)
 136                 cb->errors = 1;
 137 
 138         /* if there are more bios still pending for this compressed
 139          * extent, just exit
 140          */
 141         if (!refcount_dec_and_test(&cb->pending_bios))
 142                 goto out;
 143 
 144         /*
 145          * Record the correct mirror_num in cb->orig_bio so that
 146          * read-repair can work properly.
 147          */
 148         ASSERT(btrfs_io_bio(cb->orig_bio));
 149         btrfs_io_bio(cb->orig_bio)->mirror_num = mirror;
 150         cb->mirror_num = mirror;
 151 
 152         /*
 153          * Some IO in this cb have failed, just skip checksum as there
 154          * is no way it could be correct.
 155          */
 156         if (cb->errors == 1)
 157                 goto csum_failed;
 158 
 159         inode = cb->inode;
 160         ret = check_compressed_csum(BTRFS_I(inode), cb,
 161                                     (u64)bio->bi_iter.bi_sector << 9);
 162         if (ret)
 163                 goto csum_failed;
 164 
 165         /* ok, we're the last bio for this extent, lets start
 166          * the decompression.
 167          */
 168         ret = btrfs_decompress_bio(cb);
 169 
 170 csum_failed:
 171         if (ret)
 172                 cb->errors = 1;
 173 
 174         /* release the compressed pages */
 175         index = 0;
 176         for (index = 0; index < cb->nr_pages; index++) {
 177                 page = cb->compressed_pages[index];
 178                 page->mapping = NULL;
 179                 put_page(page);
 180         }
 181 
 182         /* do io completion on the original bio */
 183         if (cb->errors) {
 184                 bio_io_error(cb->orig_bio);
 185         } else {
 186                 struct bio_vec *bvec;
 187                 struct bvec_iter_all iter_all;
 188 
 189                 /*
 190                  * we have verified the checksum already, set page
 191                  * checked so the end_io handlers know about it
 192                  */
 193                 ASSERT(!bio_flagged(bio, BIO_CLONED));
 194                 bio_for_each_segment_all(bvec, cb->orig_bio, iter_all)
 195                         SetPageChecked(bvec->bv_page);
 196 
 197                 bio_endio(cb->orig_bio);
 198         }
 199 
 200         /* finally free the cb struct */
 201         kfree(cb->compressed_pages);
 202         kfree(cb);
 203 out:
 204         bio_put(bio);
 205 }
 206 
 207 /*
 208  * Clear the writeback bits on all of the file
 209  * pages for a compressed write
 210  */
 211 static noinline void end_compressed_writeback(struct inode *inode,
 212                                               const struct compressed_bio *cb)
 213 {
 214         unsigned long index = cb->start >> PAGE_SHIFT;
 215         unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
 216         struct page *pages[16];
 217         unsigned long nr_pages = end_index - index + 1;
 218         int i;
 219         int ret;
 220 
 221         if (cb->errors)
 222                 mapping_set_error(inode->i_mapping, -EIO);
 223 
 224         while (nr_pages > 0) {
 225                 ret = find_get_pages_contig(inode->i_mapping, index,
 226                                      min_t(unsigned long,
 227                                      nr_pages, ARRAY_SIZE(pages)), pages);
 228                 if (ret == 0) {
 229                         nr_pages -= 1;
 230                         index += 1;
 231                         continue;
 232                 }
 233                 for (i = 0; i < ret; i++) {
 234                         if (cb->errors)
 235                                 SetPageError(pages[i]);
 236                         end_page_writeback(pages[i]);
 237                         put_page(pages[i]);
 238                 }
 239                 nr_pages -= ret;
 240                 index += ret;
 241         }
 242         /* the inode may be gone now */
 243 }
 244 
 245 /*
 246  * do the cleanup once all the compressed pages hit the disk.
 247  * This will clear writeback on the file pages and free the compressed
 248  * pages.
 249  *
 250  * This also calls the writeback end hooks for the file pages so that
 251  * metadata and checksums can be updated in the file.
 252  */
 253 static void end_compressed_bio_write(struct bio *bio)
 254 {
 255         struct compressed_bio *cb = bio->bi_private;
 256         struct inode *inode;
 257         struct page *page;
 258         unsigned long index;
 259 
 260         if (bio->bi_status)
 261                 cb->errors = 1;
 262 
 263         /* if there are more bios still pending for this compressed
 264          * extent, just exit
 265          */
 266         if (!refcount_dec_and_test(&cb->pending_bios))
 267                 goto out;
 268 
 269         /* ok, we're the last bio for this extent, step one is to
 270          * call back into the FS and do all the end_io operations
 271          */
 272         inode = cb->inode;
 273         cb->compressed_pages[0]->mapping = cb->inode->i_mapping;
 274         btrfs_writepage_endio_finish_ordered(cb->compressed_pages[0],
 275                         cb->start, cb->start + cb->len - 1,
 276                         bio->bi_status == BLK_STS_OK);
 277         cb->compressed_pages[0]->mapping = NULL;
 278 
 279         end_compressed_writeback(inode, cb);
 280         /* note, our inode could be gone now */
 281 
 282         /*
 283          * release the compressed pages, these came from alloc_page and
 284          * are not attached to the inode at all
 285          */
 286         index = 0;
 287         for (index = 0; index < cb->nr_pages; index++) {
 288                 page = cb->compressed_pages[index];
 289                 page->mapping = NULL;
 290                 put_page(page);
 291         }
 292 
 293         /* finally free the cb struct */
 294         kfree(cb->compressed_pages);
 295         kfree(cb);
 296 out:
 297         bio_put(bio);
 298 }
 299 
 300 /*
 301  * worker function to build and submit bios for previously compressed pages.
 302  * The corresponding pages in the inode should be marked for writeback
 303  * and the compressed pages should have a reference on them for dropping
 304  * when the IO is complete.
 305  *
 306  * This also checksums the file bytes and gets things ready for
 307  * the end io hooks.
 308  */
 309 blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
 310                                  unsigned long len, u64 disk_start,
 311                                  unsigned long compressed_len,
 312                                  struct page **compressed_pages,
 313                                  unsigned long nr_pages,
 314                                  unsigned int write_flags)
 315 {
 316         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 317         struct bio *bio = NULL;
 318         struct compressed_bio *cb;
 319         unsigned long bytes_left;
 320         int pg_index = 0;
 321         struct page *page;
 322         u64 first_byte = disk_start;
 323         struct block_device *bdev;
 324         blk_status_t ret;
 325         int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
 326 
 327         WARN_ON(!PAGE_ALIGNED(start));
 328         cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
 329         if (!cb)
 330                 return BLK_STS_RESOURCE;
 331         refcount_set(&cb->pending_bios, 0);
 332         cb->errors = 0;
 333         cb->inode = inode;
 334         cb->start = start;
 335         cb->len = len;
 336         cb->mirror_num = 0;
 337         cb->compressed_pages = compressed_pages;
 338         cb->compressed_len = compressed_len;
 339         cb->orig_bio = NULL;
 340         cb->nr_pages = nr_pages;
 341 
 342         bdev = fs_info->fs_devices->latest_bdev;
 343 
 344         bio = btrfs_bio_alloc(first_byte);
 345         bio_set_dev(bio, bdev);
 346         bio->bi_opf = REQ_OP_WRITE | write_flags;
 347         bio->bi_private = cb;
 348         bio->bi_end_io = end_compressed_bio_write;
 349         refcount_set(&cb->pending_bios, 1);
 350 
 351         /* create and submit bios for the compressed pages */
 352         bytes_left = compressed_len;
 353         for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
 354                 int submit = 0;
 355 
 356                 page = compressed_pages[pg_index];
 357                 page->mapping = inode->i_mapping;
 358                 if (bio->bi_iter.bi_size)
 359                         submit = btrfs_bio_fits_in_stripe(page, PAGE_SIZE, bio,
 360                                                           0);
 361 
 362                 page->mapping = NULL;
 363                 if (submit || bio_add_page(bio, page, PAGE_SIZE, 0) <
 364                     PAGE_SIZE) {
 365                         /*
 366                          * inc the count before we submit the bio so
 367                          * we know the end IO handler won't happen before
 368                          * we inc the count.  Otherwise, the cb might get
 369                          * freed before we're done setting it up
 370                          */
 371                         refcount_inc(&cb->pending_bios);
 372                         ret = btrfs_bio_wq_end_io(fs_info, bio,
 373                                                   BTRFS_WQ_ENDIO_DATA);
 374                         BUG_ON(ret); /* -ENOMEM */
 375 
 376                         if (!skip_sum) {
 377                                 ret = btrfs_csum_one_bio(inode, bio, start, 1);
 378                                 BUG_ON(ret); /* -ENOMEM */
 379                         }
 380 
 381                         ret = btrfs_map_bio(fs_info, bio, 0, 1);
 382                         if (ret) {
 383                                 bio->bi_status = ret;
 384                                 bio_endio(bio);
 385                         }
 386 
 387                         bio = btrfs_bio_alloc(first_byte);
 388                         bio_set_dev(bio, bdev);
 389                         bio->bi_opf = REQ_OP_WRITE | write_flags;
 390                         bio->bi_private = cb;
 391                         bio->bi_end_io = end_compressed_bio_write;
 392                         bio_add_page(bio, page, PAGE_SIZE, 0);
 393                 }
 394                 if (bytes_left < PAGE_SIZE) {
 395                         btrfs_info(fs_info,
 396                                         "bytes left %lu compress len %lu nr %lu",
 397                                bytes_left, cb->compressed_len, cb->nr_pages);
 398                 }
 399                 bytes_left -= PAGE_SIZE;
 400                 first_byte += PAGE_SIZE;
 401                 cond_resched();
 402         }
 403 
 404         ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
 405         BUG_ON(ret); /* -ENOMEM */
 406 
 407         if (!skip_sum) {
 408                 ret = btrfs_csum_one_bio(inode, bio, start, 1);
 409                 BUG_ON(ret); /* -ENOMEM */
 410         }
 411 
 412         ret = btrfs_map_bio(fs_info, bio, 0, 1);
 413         if (ret) {
 414                 bio->bi_status = ret;
 415                 bio_endio(bio);
 416         }
 417 
 418         return 0;
 419 }
 420 
 421 static u64 bio_end_offset(struct bio *bio)
 422 {
 423         struct bio_vec *last = bio_last_bvec_all(bio);
 424 
 425         return page_offset(last->bv_page) + last->bv_len + last->bv_offset;
 426 }
 427 
 428 static noinline int add_ra_bio_pages(struct inode *inode,
 429                                      u64 compressed_end,
 430                                      struct compressed_bio *cb)
 431 {
 432         unsigned long end_index;
 433         unsigned long pg_index;
 434         u64 last_offset;
 435         u64 isize = i_size_read(inode);
 436         int ret;
 437         struct page *page;
 438         unsigned long nr_pages = 0;
 439         struct extent_map *em;
 440         struct address_space *mapping = inode->i_mapping;
 441         struct extent_map_tree *em_tree;
 442         struct extent_io_tree *tree;
 443         u64 end;
 444         int misses = 0;
 445 
 446         last_offset = bio_end_offset(cb->orig_bio);
 447         em_tree = &BTRFS_I(inode)->extent_tree;
 448         tree = &BTRFS_I(inode)->io_tree;
 449 
 450         if (isize == 0)
 451                 return 0;
 452 
 453         end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
 454 
 455         while (last_offset < compressed_end) {
 456                 pg_index = last_offset >> PAGE_SHIFT;
 457 
 458                 if (pg_index > end_index)
 459                         break;
 460 
 461                 page = xa_load(&mapping->i_pages, pg_index);
 462                 if (page && !xa_is_value(page)) {
 463                         misses++;
 464                         if (misses > 4)
 465                                 break;
 466                         goto next;
 467                 }
 468 
 469                 page = __page_cache_alloc(mapping_gfp_constraint(mapping,
 470                                                                  ~__GFP_FS));
 471                 if (!page)
 472                         break;
 473 
 474                 if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) {
 475                         put_page(page);
 476                         goto next;
 477                 }
 478 
 479                 end = last_offset + PAGE_SIZE - 1;
 480                 /*
 481                  * at this point, we have a locked page in the page cache
 482                  * for these bytes in the file.  But, we have to make
 483                  * sure they map to this compressed extent on disk.
 484                  */
 485                 set_page_extent_mapped(page);
 486                 lock_extent(tree, last_offset, end);
 487                 read_lock(&em_tree->lock);
 488                 em = lookup_extent_mapping(em_tree, last_offset,
 489                                            PAGE_SIZE);
 490                 read_unlock(&em_tree->lock);
 491 
 492                 if (!em || last_offset < em->start ||
 493                     (last_offset + PAGE_SIZE > extent_map_end(em)) ||
 494                     (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) {
 495                         free_extent_map(em);
 496                         unlock_extent(tree, last_offset, end);
 497                         unlock_page(page);
 498                         put_page(page);
 499                         break;
 500                 }
 501                 free_extent_map(em);
 502 
 503                 if (page->index == end_index) {
 504                         char *userpage;
 505                         size_t zero_offset = offset_in_page(isize);
 506 
 507                         if (zero_offset) {
 508                                 int zeros;
 509                                 zeros = PAGE_SIZE - zero_offset;
 510                                 userpage = kmap_atomic(page);
 511                                 memset(userpage + zero_offset, 0, zeros);
 512                                 flush_dcache_page(page);
 513                                 kunmap_atomic(userpage);
 514                         }
 515                 }
 516 
 517                 ret = bio_add_page(cb->orig_bio, page,
 518                                    PAGE_SIZE, 0);
 519 
 520                 if (ret == PAGE_SIZE) {
 521                         nr_pages++;
 522                         put_page(page);
 523                 } else {
 524                         unlock_extent(tree, last_offset, end);
 525                         unlock_page(page);
 526                         put_page(page);
 527                         break;
 528                 }
 529 next:
 530                 last_offset += PAGE_SIZE;
 531         }
 532         return 0;
 533 }
 534 
 535 /*
 536  * for a compressed read, the bio we get passed has all the inode pages
 537  * in it.  We don't actually do IO on those pages but allocate new ones
 538  * to hold the compressed pages on disk.
 539  *
 540  * bio->bi_iter.bi_sector points to the compressed extent on disk
 541  * bio->bi_io_vec points to all of the inode pages
 542  *
 543  * After the compressed pages are read, we copy the bytes into the
 544  * bio we were passed and then call the bio end_io calls
 545  */
 546 blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
 547                                  int mirror_num, unsigned long bio_flags)
 548 {
 549         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 550         struct extent_map_tree *em_tree;
 551         struct compressed_bio *cb;
 552         unsigned long compressed_len;
 553         unsigned long nr_pages;
 554         unsigned long pg_index;
 555         struct page *page;
 556         struct block_device *bdev;
 557         struct bio *comp_bio;
 558         u64 cur_disk_byte = (u64)bio->bi_iter.bi_sector << 9;
 559         u64 em_len;
 560         u64 em_start;
 561         struct extent_map *em;
 562         blk_status_t ret = BLK_STS_RESOURCE;
 563         int faili = 0;
 564         const u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
 565         u8 *sums;
 566 
 567         em_tree = &BTRFS_I(inode)->extent_tree;
 568 
 569         /* we need the actual starting offset of this extent in the file */
 570         read_lock(&em_tree->lock);
 571         em = lookup_extent_mapping(em_tree,
 572                                    page_offset(bio_first_page_all(bio)),
 573                                    PAGE_SIZE);
 574         read_unlock(&em_tree->lock);
 575         if (!em)
 576                 return BLK_STS_IOERR;
 577 
 578         compressed_len = em->block_len;
 579         cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
 580         if (!cb)
 581                 goto out;
 582 
 583         refcount_set(&cb->pending_bios, 0);
 584         cb->errors = 0;
 585         cb->inode = inode;
 586         cb->mirror_num = mirror_num;
 587         sums = cb->sums;
 588 
 589         cb->start = em->orig_start;
 590         em_len = em->len;
 591         em_start = em->start;
 592 
 593         free_extent_map(em);
 594         em = NULL;
 595 
 596         cb->len = bio->bi_iter.bi_size;
 597         cb->compressed_len = compressed_len;
 598         cb->compress_type = extent_compress_type(bio_flags);
 599         cb->orig_bio = bio;
 600 
 601         nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
 602         cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *),
 603                                        GFP_NOFS);
 604         if (!cb->compressed_pages)
 605                 goto fail1;
 606 
 607         bdev = fs_info->fs_devices->latest_bdev;
 608 
 609         for (pg_index = 0; pg_index < nr_pages; pg_index++) {
 610                 cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS |
 611                                                               __GFP_HIGHMEM);
 612                 if (!cb->compressed_pages[pg_index]) {
 613                         faili = pg_index - 1;
 614                         ret = BLK_STS_RESOURCE;
 615                         goto fail2;
 616                 }
 617         }
 618         faili = nr_pages - 1;
 619         cb->nr_pages = nr_pages;
 620 
 621         add_ra_bio_pages(inode, em_start + em_len, cb);
 622 
 623         /* include any pages we added in add_ra-bio_pages */
 624         cb->len = bio->bi_iter.bi_size;
 625 
 626         comp_bio = btrfs_bio_alloc(cur_disk_byte);
 627         bio_set_dev(comp_bio, bdev);
 628         comp_bio->bi_opf = REQ_OP_READ;
 629         comp_bio->bi_private = cb;
 630         comp_bio->bi_end_io = end_compressed_bio_read;
 631         refcount_set(&cb->pending_bios, 1);
 632 
 633         for (pg_index = 0; pg_index < nr_pages; pg_index++) {
 634                 int submit = 0;
 635 
 636                 page = cb->compressed_pages[pg_index];
 637                 page->mapping = inode->i_mapping;
 638                 page->index = em_start >> PAGE_SHIFT;
 639 
 640                 if (comp_bio->bi_iter.bi_size)
 641                         submit = btrfs_bio_fits_in_stripe(page, PAGE_SIZE,
 642                                                           comp_bio, 0);
 643 
 644                 page->mapping = NULL;
 645                 if (submit || bio_add_page(comp_bio, page, PAGE_SIZE, 0) <
 646                     PAGE_SIZE) {
 647                         unsigned int nr_sectors;
 648 
 649                         ret = btrfs_bio_wq_end_io(fs_info, comp_bio,
 650                                                   BTRFS_WQ_ENDIO_DATA);
 651                         BUG_ON(ret); /* -ENOMEM */
 652 
 653                         /*
 654                          * inc the count before we submit the bio so
 655                          * we know the end IO handler won't happen before
 656                          * we inc the count.  Otherwise, the cb might get
 657                          * freed before we're done setting it up
 658                          */
 659                         refcount_inc(&cb->pending_bios);
 660 
 661                         if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
 662                                 ret = btrfs_lookup_bio_sums(inode, comp_bio,
 663                                                             sums);
 664                                 BUG_ON(ret); /* -ENOMEM */
 665                         }
 666 
 667                         nr_sectors = DIV_ROUND_UP(comp_bio->bi_iter.bi_size,
 668                                                   fs_info->sectorsize);
 669                         sums += csum_size * nr_sectors;
 670 
 671                         ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
 672                         if (ret) {
 673                                 comp_bio->bi_status = ret;
 674                                 bio_endio(comp_bio);
 675                         }
 676 
 677                         comp_bio = btrfs_bio_alloc(cur_disk_byte);
 678                         bio_set_dev(comp_bio, bdev);
 679                         comp_bio->bi_opf = REQ_OP_READ;
 680                         comp_bio->bi_private = cb;
 681                         comp_bio->bi_end_io = end_compressed_bio_read;
 682 
 683                         bio_add_page(comp_bio, page, PAGE_SIZE, 0);
 684                 }
 685                 cur_disk_byte += PAGE_SIZE;
 686         }
 687 
 688         ret = btrfs_bio_wq_end_io(fs_info, comp_bio, BTRFS_WQ_ENDIO_DATA);
 689         BUG_ON(ret); /* -ENOMEM */
 690 
 691         if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
 692                 ret = btrfs_lookup_bio_sums(inode, comp_bio, sums);
 693                 BUG_ON(ret); /* -ENOMEM */
 694         }
 695 
 696         ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
 697         if (ret) {
 698                 comp_bio->bi_status = ret;
 699                 bio_endio(comp_bio);
 700         }
 701 
 702         return 0;
 703 
 704 fail2:
 705         while (faili >= 0) {
 706                 __free_page(cb->compressed_pages[faili]);
 707                 faili--;
 708         }
 709 
 710         kfree(cb->compressed_pages);
 711 fail1:
 712         kfree(cb);
 713 out:
 714         free_extent_map(em);
 715         return ret;
 716 }
 717 
 718 /*
 719  * Heuristic uses systematic sampling to collect data from the input data
 720  * range, the logic can be tuned by the following constants:
 721  *
 722  * @SAMPLING_READ_SIZE - how many bytes will be copied from for each sample
 723  * @SAMPLING_INTERVAL  - range from which the sampled data can be collected
 724  */
 725 #define SAMPLING_READ_SIZE      (16)
 726 #define SAMPLING_INTERVAL       (256)
 727 
 728 /*
 729  * For statistical analysis of the input data we consider bytes that form a
 730  * Galois Field of 256 objects. Each object has an attribute count, ie. how
 731  * many times the object appeared in the sample.
 732  */
 733 #define BUCKET_SIZE             (256)
 734 
 735 /*
 736  * The size of the sample is based on a statistical sampling rule of thumb.
 737  * The common way is to perform sampling tests as long as the number of
 738  * elements in each cell is at least 5.
 739  *
 740  * Instead of 5, we choose 32 to obtain more accurate results.
 741  * If the data contain the maximum number of symbols, which is 256, we obtain a
 742  * sample size bound by 8192.
 743  *
 744  * For a sample of at most 8KB of data per data range: 16 consecutive bytes
 745  * from up to 512 locations.
 746  */
 747 #define MAX_SAMPLE_SIZE         (BTRFS_MAX_UNCOMPRESSED *               \
 748                                  SAMPLING_READ_SIZE / SAMPLING_INTERVAL)
 749 
 750 struct bucket_item {
 751         u32 count;
 752 };
 753 
 754 struct heuristic_ws {
 755         /* Partial copy of input data */
 756         u8 *sample;
 757         u32 sample_size;
 758         /* Buckets store counters for each byte value */
 759         struct bucket_item *bucket;
 760         /* Sorting buffer */
 761         struct bucket_item *bucket_b;
 762         struct list_head list;
 763 };
 764 
 765 static struct workspace_manager heuristic_wsm;
 766 
 767 static void heuristic_init_workspace_manager(void)
 768 {
 769         btrfs_init_workspace_manager(&heuristic_wsm, &btrfs_heuristic_compress);
 770 }
 771 
 772 static void heuristic_cleanup_workspace_manager(void)
 773 {
 774         btrfs_cleanup_workspace_manager(&heuristic_wsm);
 775 }
 776 
 777 static struct list_head *heuristic_get_workspace(unsigned int level)
 778 {
 779         return btrfs_get_workspace(&heuristic_wsm, level);
 780 }
 781 
 782 static void heuristic_put_workspace(struct list_head *ws)
 783 {
 784         btrfs_put_workspace(&heuristic_wsm, ws);
 785 }
 786 
 787 static void free_heuristic_ws(struct list_head *ws)
 788 {
 789         struct heuristic_ws *workspace;
 790 
 791         workspace = list_entry(ws, struct heuristic_ws, list);
 792 
 793         kvfree(workspace->sample);
 794         kfree(workspace->bucket);
 795         kfree(workspace->bucket_b);
 796         kfree(workspace);
 797 }
 798 
 799 static struct list_head *alloc_heuristic_ws(unsigned int level)
 800 {
 801         struct heuristic_ws *ws;
 802 
 803         ws = kzalloc(sizeof(*ws), GFP_KERNEL);
 804         if (!ws)
 805                 return ERR_PTR(-ENOMEM);
 806 
 807         ws->sample = kvmalloc(MAX_SAMPLE_SIZE, GFP_KERNEL);
 808         if (!ws->sample)
 809                 goto fail;
 810 
 811         ws->bucket = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket), GFP_KERNEL);
 812         if (!ws->bucket)
 813                 goto fail;
 814 
 815         ws->bucket_b = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket_b), GFP_KERNEL);
 816         if (!ws->bucket_b)
 817                 goto fail;
 818 
 819         INIT_LIST_HEAD(&ws->list);
 820         return &ws->list;
 821 fail:
 822         free_heuristic_ws(&ws->list);
 823         return ERR_PTR(-ENOMEM);
 824 }
 825 
 826 const struct btrfs_compress_op btrfs_heuristic_compress = {
 827         .init_workspace_manager = heuristic_init_workspace_manager,
 828         .cleanup_workspace_manager = heuristic_cleanup_workspace_manager,
 829         .get_workspace = heuristic_get_workspace,
 830         .put_workspace = heuristic_put_workspace,
 831         .alloc_workspace = alloc_heuristic_ws,
 832         .free_workspace = free_heuristic_ws,
 833 };
 834 
 835 static const struct btrfs_compress_op * const btrfs_compress_op[] = {
 836         /* The heuristic is represented as compression type 0 */
 837         &btrfs_heuristic_compress,
 838         &btrfs_zlib_compress,
 839         &btrfs_lzo_compress,
 840         &btrfs_zstd_compress,
 841 };
 842 
 843 void btrfs_init_workspace_manager(struct workspace_manager *wsm,
 844                                   const struct btrfs_compress_op *ops)
 845 {
 846         struct list_head *workspace;
 847 
 848         wsm->ops = ops;
 849 
 850         INIT_LIST_HEAD(&wsm->idle_ws);
 851         spin_lock_init(&wsm->ws_lock);
 852         atomic_set(&wsm->total_ws, 0);
 853         init_waitqueue_head(&wsm->ws_wait);
 854 
 855         /*
 856          * Preallocate one workspace for each compression type so we can
 857          * guarantee forward progress in the worst case
 858          */
 859         workspace = wsm->ops->alloc_workspace(0);
 860         if (IS_ERR(workspace)) {
 861                 pr_warn(
 862         "BTRFS: cannot preallocate compression workspace, will try later\n");
 863         } else {
 864                 atomic_set(&wsm->total_ws, 1);
 865                 wsm->free_ws = 1;
 866                 list_add(workspace, &wsm->idle_ws);
 867         }
 868 }
 869 
 870 void btrfs_cleanup_workspace_manager(struct workspace_manager *wsman)
 871 {
 872         struct list_head *ws;
 873 
 874         while (!list_empty(&wsman->idle_ws)) {
 875                 ws = wsman->idle_ws.next;
 876                 list_del(ws);
 877                 wsman->ops->free_workspace(ws);
 878                 atomic_dec(&wsman->total_ws);
 879         }
 880 }
 881 
 882 /*
 883  * This finds an available workspace or allocates a new one.
 884  * If it's not possible to allocate a new one, waits until there's one.
 885  * Preallocation makes a forward progress guarantees and we do not return
 886  * errors.
 887  */
 888 struct list_head *btrfs_get_workspace(struct workspace_manager *wsm,
 889                                       unsigned int level)
 890 {
 891         struct list_head *workspace;
 892         int cpus = num_online_cpus();
 893         unsigned nofs_flag;
 894         struct list_head *idle_ws;
 895         spinlock_t *ws_lock;
 896         atomic_t *total_ws;
 897         wait_queue_head_t *ws_wait;
 898         int *free_ws;
 899 
 900         idle_ws  = &wsm->idle_ws;
 901         ws_lock  = &wsm->ws_lock;
 902         total_ws = &wsm->total_ws;
 903         ws_wait  = &wsm->ws_wait;
 904         free_ws  = &wsm->free_ws;
 905 
 906 again:
 907         spin_lock(ws_lock);
 908         if (!list_empty(idle_ws)) {
 909                 workspace = idle_ws->next;
 910                 list_del(workspace);
 911                 (*free_ws)--;
 912                 spin_unlock(ws_lock);
 913                 return workspace;
 914 
 915         }
 916         if (atomic_read(total_ws) > cpus) {
 917                 DEFINE_WAIT(wait);
 918 
 919                 spin_unlock(ws_lock);
 920                 prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE);
 921                 if (atomic_read(total_ws) > cpus && !*free_ws)
 922                         schedule();
 923                 finish_wait(ws_wait, &wait);
 924                 goto again;
 925         }
 926         atomic_inc(total_ws);
 927         spin_unlock(ws_lock);
 928 
 929         /*
 930          * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have
 931          * to turn it off here because we might get called from the restricted
 932          * context of btrfs_compress_bio/btrfs_compress_pages
 933          */
 934         nofs_flag = memalloc_nofs_save();
 935         workspace = wsm->ops->alloc_workspace(level);
 936         memalloc_nofs_restore(nofs_flag);
 937 
 938         if (IS_ERR(workspace)) {
 939                 atomic_dec(total_ws);
 940                 wake_up(ws_wait);
 941 
 942                 /*
 943                  * Do not return the error but go back to waiting. There's a
 944                  * workspace preallocated for each type and the compression
 945                  * time is bounded so we get to a workspace eventually. This
 946                  * makes our caller's life easier.
 947                  *
 948                  * To prevent silent and low-probability deadlocks (when the
 949                  * initial preallocation fails), check if there are any
 950                  * workspaces at all.
 951                  */
 952                 if (atomic_read(total_ws) == 0) {
 953                         static DEFINE_RATELIMIT_STATE(_rs,
 954                                         /* once per minute */ 60 * HZ,
 955                                         /* no burst */ 1);
 956 
 957                         if (__ratelimit(&_rs)) {
 958                                 pr_warn("BTRFS: no compression workspaces, low memory, retrying\n");
 959                         }
 960                 }
 961                 goto again;
 962         }
 963         return workspace;
 964 }
 965 
 966 static struct list_head *get_workspace(int type, int level)
 967 {
 968         return btrfs_compress_op[type]->get_workspace(level);
 969 }
 970 
 971 /*
 972  * put a workspace struct back on the list or free it if we have enough
 973  * idle ones sitting around
 974  */
 975 void btrfs_put_workspace(struct workspace_manager *wsm, struct list_head *ws)
 976 {
 977         struct list_head *idle_ws;
 978         spinlock_t *ws_lock;
 979         atomic_t *total_ws;
 980         wait_queue_head_t *ws_wait;
 981         int *free_ws;
 982 
 983         idle_ws  = &wsm->idle_ws;
 984         ws_lock  = &wsm->ws_lock;
 985         total_ws = &wsm->total_ws;
 986         ws_wait  = &wsm->ws_wait;
 987         free_ws  = &wsm->free_ws;
 988 
 989         spin_lock(ws_lock);
 990         if (*free_ws <= num_online_cpus()) {
 991                 list_add(ws, idle_ws);
 992                 (*free_ws)++;
 993                 spin_unlock(ws_lock);
 994                 goto wake;
 995         }
 996         spin_unlock(ws_lock);
 997 
 998         wsm->ops->free_workspace(ws);
 999         atomic_dec(total_ws);
1000 wake:
1001         cond_wake_up(ws_wait);
1002 }
1003 
1004 static void put_workspace(int type, struct list_head *ws)
1005 {
1006         return btrfs_compress_op[type]->put_workspace(ws);
1007 }
1008 
1009 /*
1010  * Given an address space and start and length, compress the bytes into @pages
1011  * that are allocated on demand.
1012  *
1013  * @type_level is encoded algorithm and level, where level 0 means whatever
1014  * default the algorithm chooses and is opaque here;
1015  * - compression algo are 0-3
1016  * - the level are bits 4-7
1017  *
1018  * @out_pages is an in/out parameter, holds maximum number of pages to allocate
1019  * and returns number of actually allocated pages
1020  *
1021  * @total_in is used to return the number of bytes actually read.  It
1022  * may be smaller than the input length if we had to exit early because we
1023  * ran out of room in the pages array or because we cross the
1024  * max_out threshold.
1025  *
1026  * @total_out is an in/out parameter, must be set to the input length and will
1027  * be also used to return the total number of compressed bytes
1028  *
1029  * @max_out tells us the max number of bytes that we're allowed to
1030  * stuff into pages
1031  */
1032 int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
1033                          u64 start, struct page **pages,
1034                          unsigned long *out_pages,
1035                          unsigned long *total_in,
1036                          unsigned long *total_out)
1037 {
1038         int type = btrfs_compress_type(type_level);
1039         int level = btrfs_compress_level(type_level);
1040         struct list_head *workspace;
1041         int ret;
1042 
1043         level = btrfs_compress_set_level(type, level);
1044         workspace = get_workspace(type, level);
1045         ret = btrfs_compress_op[type]->compress_pages(workspace, mapping,
1046                                                       start, pages,
1047                                                       out_pages,
1048                                                       total_in, total_out);
1049         put_workspace(type, workspace);
1050         return ret;
1051 }
1052 
1053 /*
1054  * pages_in is an array of pages with compressed data.
1055  *
1056  * disk_start is the starting logical offset of this array in the file
1057  *
1058  * orig_bio contains the pages from the file that we want to decompress into
1059  *
1060  * srclen is the number of bytes in pages_in
1061  *
1062  * The basic idea is that we have a bio that was created by readpages.
1063  * The pages in the bio are for the uncompressed data, and they may not
1064  * be contiguous.  They all correspond to the range of bytes covered by
1065  * the compressed extent.
1066  */
1067 static int btrfs_decompress_bio(struct compressed_bio *cb)
1068 {
1069         struct list_head *workspace;
1070         int ret;
1071         int type = cb->compress_type;
1072 
1073         workspace = get_workspace(type, 0);
1074         ret = btrfs_compress_op[type]->decompress_bio(workspace, cb);
1075         put_workspace(type, workspace);
1076 
1077         return ret;
1078 }
1079 
1080 /*
1081  * a less complex decompression routine.  Our compressed data fits in a
1082  * single page, and we want to read a single page out of it.
1083  * start_byte tells us the offset into the compressed data we're interested in
1084  */
1085 int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
1086                      unsigned long start_byte, size_t srclen, size_t destlen)
1087 {
1088         struct list_head *workspace;
1089         int ret;
1090 
1091         workspace = get_workspace(type, 0);
1092         ret = btrfs_compress_op[type]->decompress(workspace, data_in,
1093                                                   dest_page, start_byte,
1094                                                   srclen, destlen);
1095         put_workspace(type, workspace);
1096 
1097         return ret;
1098 }
1099 
1100 void __init btrfs_init_compress(void)
1101 {
1102         int i;
1103 
1104         for (i = 0; i < BTRFS_NR_WORKSPACE_MANAGERS; i++)
1105                 btrfs_compress_op[i]->init_workspace_manager();
1106 }
1107 
1108 void __cold btrfs_exit_compress(void)
1109 {
1110         int i;
1111 
1112         for (i = 0; i < BTRFS_NR_WORKSPACE_MANAGERS; i++)
1113                 btrfs_compress_op[i]->cleanup_workspace_manager();
1114 }
1115 
1116 /*
1117  * Copy uncompressed data from working buffer to pages.
1118  *
1119  * buf_start is the byte offset we're of the start of our workspace buffer.
1120  *
1121  * total_out is the last byte of the buffer
1122  */
1123 int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start,
1124                               unsigned long total_out, u64 disk_start,
1125                               struct bio *bio)
1126 {
1127         unsigned long buf_offset;
1128         unsigned long current_buf_start;
1129         unsigned long start_byte;
1130         unsigned long prev_start_byte;
1131         unsigned long working_bytes = total_out - buf_start;
1132         unsigned long bytes;
1133         char *kaddr;
1134         struct bio_vec bvec = bio_iter_iovec(bio, bio->bi_iter);
1135 
1136         /*
1137          * start byte is the first byte of the page we're currently
1138          * copying into relative to the start of the compressed data.
1139          */
1140         start_byte = page_offset(bvec.bv_page) - disk_start;
1141 
1142         /* we haven't yet hit data corresponding to this page */
1143         if (total_out <= start_byte)
1144                 return 1;
1145 
1146         /*
1147          * the start of the data we care about is offset into
1148          * the middle of our working buffer
1149          */
1150         if (total_out > start_byte && buf_start < start_byte) {
1151                 buf_offset = start_byte - buf_start;
1152                 working_bytes -= buf_offset;
1153         } else {
1154                 buf_offset = 0;
1155         }
1156         current_buf_start = buf_start;
1157 
1158         /* copy bytes from the working buffer into the pages */
1159         while (working_bytes > 0) {
1160                 bytes = min_t(unsigned long, bvec.bv_len,
1161                                 PAGE_SIZE - buf_offset);
1162                 bytes = min(bytes, working_bytes);
1163 
1164                 kaddr = kmap_atomic(bvec.bv_page);
1165                 memcpy(kaddr + bvec.bv_offset, buf + buf_offset, bytes);
1166                 kunmap_atomic(kaddr);
1167                 flush_dcache_page(bvec.bv_page);
1168 
1169                 buf_offset += bytes;
1170                 working_bytes -= bytes;
1171                 current_buf_start += bytes;
1172 
1173                 /* check if we need to pick another page */
1174                 bio_advance(bio, bytes);
1175                 if (!bio->bi_iter.bi_size)
1176                         return 0;
1177                 bvec = bio_iter_iovec(bio, bio->bi_iter);
1178                 prev_start_byte = start_byte;
1179                 start_byte = page_offset(bvec.bv_page) - disk_start;
1180 
1181                 /*
1182                  * We need to make sure we're only adjusting
1183                  * our offset into compression working buffer when
1184                  * we're switching pages.  Otherwise we can incorrectly
1185                  * keep copying when we were actually done.
1186                  */
1187                 if (start_byte != prev_start_byte) {
1188                         /*
1189                          * make sure our new page is covered by this
1190                          * working buffer
1191                          */
1192                         if (total_out <= start_byte)
1193                                 return 1;
1194 
1195                         /*
1196                          * the next page in the biovec might not be adjacent
1197                          * to the last page, but it might still be found
1198                          * inside this working buffer. bump our offset pointer
1199                          */
1200                         if (total_out > start_byte &&
1201                             current_buf_start < start_byte) {
1202                                 buf_offset = start_byte - buf_start;
1203                                 working_bytes = total_out - start_byte;
1204                                 current_buf_start = buf_start + buf_offset;
1205                         }
1206                 }
1207         }
1208 
1209         return 1;
1210 }
1211 
1212 /*
1213  * Shannon Entropy calculation
1214  *
1215  * Pure byte distribution analysis fails to determine compressibility of data.
1216  * Try calculating entropy to estimate the average minimum number of bits
1217  * needed to encode the sampled data.
1218  *
1219  * For convenience, return the percentage of needed bits, instead of amount of
1220  * bits directly.
1221  *
1222  * @ENTROPY_LVL_ACEPTABLE - below that threshold, sample has low byte entropy
1223  *                          and can be compressible with high probability
1224  *
1225  * @ENTROPY_LVL_HIGH - data are not compressible with high probability
1226  *
1227  * Use of ilog2() decreases precision, we lower the LVL to 5 to compensate.
1228  */
1229 #define ENTROPY_LVL_ACEPTABLE           (65)
1230 #define ENTROPY_LVL_HIGH                (80)
1231 
1232 /*
1233  * For increasead precision in shannon_entropy calculation,
1234  * let's do pow(n, M) to save more digits after comma:
1235  *
1236  * - maximum int bit length is 64
1237  * - ilog2(MAX_SAMPLE_SIZE)     -> 13
1238  * - 13 * 4 = 52 < 64           -> M = 4
1239  *
1240  * So use pow(n, 4).
1241  */
1242 static inline u32 ilog2_w(u64 n)
1243 {
1244         return ilog2(n * n * n * n);
1245 }
1246 
1247 static u32 shannon_entropy(struct heuristic_ws *ws)
1248 {
1249         const u32 entropy_max = 8 * ilog2_w(2);
1250         u32 entropy_sum = 0;
1251         u32 p, p_base, sz_base;
1252         u32 i;
1253 
1254         sz_base = ilog2_w(ws->sample_size);
1255         for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) {
1256                 p = ws->bucket[i].count;
1257                 p_base = ilog2_w(p);
1258                 entropy_sum += p * (sz_base - p_base);
1259         }
1260 
1261         entropy_sum /= ws->sample_size;
1262         return entropy_sum * 100 / entropy_max;
1263 }
1264 
1265 #define RADIX_BASE              4U
1266 #define COUNTERS_SIZE           (1U << RADIX_BASE)
1267 
1268 static u8 get4bits(u64 num, int shift) {
1269         u8 low4bits;
1270 
1271         num >>= shift;
1272         /* Reverse order */
1273         low4bits = (COUNTERS_SIZE - 1) - (num % COUNTERS_SIZE);
1274         return low4bits;
1275 }
1276 
1277 /*
1278  * Use 4 bits as radix base
1279  * Use 16 u32 counters for calculating new position in buf array
1280  *
1281  * @array     - array that will be sorted
1282  * @array_buf - buffer array to store sorting results
1283  *              must be equal in size to @array
1284  * @num       - array size
1285  */
1286 static void radix_sort(struct bucket_item *array, struct bucket_item *array_buf,
1287                        int num)
1288 {
1289         u64 max_num;
1290         u64 buf_num;
1291         u32 counters[COUNTERS_SIZE];
1292         u32 new_addr;
1293         u32 addr;
1294         int bitlen;
1295         int shift;
1296         int i;
1297 
1298         /*
1299          * Try avoid useless loop iterations for small numbers stored in big
1300          * counters.  Example: 48 33 4 ... in 64bit array
1301          */
1302         max_num = array[0].count;
1303         for (i = 1; i < num; i++) {
1304                 buf_num = array[i].count;
1305                 if (buf_num > max_num)
1306                         max_num = buf_num;
1307         }
1308 
1309         buf_num = ilog2(max_num);
1310         bitlen = ALIGN(buf_num, RADIX_BASE * 2);
1311 
1312         shift = 0;
1313         while (shift < bitlen) {
1314                 memset(counters, 0, sizeof(counters));
1315 
1316                 for (i = 0; i < num; i++) {
1317                         buf_num = array[i].count;
1318                         addr = get4bits(buf_num, shift);
1319                         counters[addr]++;
1320                 }
1321 
1322                 for (i = 1; i < COUNTERS_SIZE; i++)
1323                         counters[i] += counters[i - 1];
1324 
1325                 for (i = num - 1; i >= 0; i--) {
1326                         buf_num = array[i].count;
1327                         addr = get4bits(buf_num, shift);
1328                         counters[addr]--;
1329                         new_addr = counters[addr];
1330                         array_buf[new_addr] = array[i];
1331                 }
1332 
1333                 shift += RADIX_BASE;
1334 
1335                 /*
1336                  * Normal radix expects to move data from a temporary array, to
1337                  * the main one.  But that requires some CPU time. Avoid that
1338                  * by doing another sort iteration to original array instead of
1339                  * memcpy()
1340                  */
1341                 memset(counters, 0, sizeof(counters));
1342 
1343                 for (i = 0; i < num; i ++) {
1344                         buf_num = array_buf[i].count;
1345                         addr = get4bits(buf_num, shift);
1346                         counters[addr]++;
1347                 }
1348 
1349                 for (i = 1; i < COUNTERS_SIZE; i++)
1350                         counters[i] += counters[i - 1];
1351 
1352                 for (i = num - 1; i >= 0; i--) {
1353                         buf_num = array_buf[i].count;
1354                         addr = get4bits(buf_num, shift);
1355                         counters[addr]--;
1356                         new_addr = counters[addr];
1357                         array[new_addr] = array_buf[i];
1358                 }
1359 
1360                 shift += RADIX_BASE;
1361         }
1362 }
1363 
1364 /*
1365  * Size of the core byte set - how many bytes cover 90% of the sample
1366  *
1367  * There are several types of structured binary data that use nearly all byte
1368  * values. The distribution can be uniform and counts in all buckets will be
1369  * nearly the same (eg. encrypted data). Unlikely to be compressible.
1370  *
1371  * Other possibility is normal (Gaussian) distribution, where the data could
1372  * be potentially compressible, but we have to take a few more steps to decide
1373  * how much.
1374  *
1375  * @BYTE_CORE_SET_LOW  - main part of byte values repeated frequently,
1376  *                       compression algo can easy fix that
1377  * @BYTE_CORE_SET_HIGH - data have uniform distribution and with high
1378  *                       probability is not compressible
1379  */
1380 #define BYTE_CORE_SET_LOW               (64)
1381 #define BYTE_CORE_SET_HIGH              (200)
1382 
1383 static int byte_core_set_size(struct heuristic_ws *ws)
1384 {
1385         u32 i;
1386         u32 coreset_sum = 0;
1387         const u32 core_set_threshold = ws->sample_size * 90 / 100;
1388         struct bucket_item *bucket = ws->bucket;
1389 
1390         /* Sort in reverse order */
1391         radix_sort(ws->bucket, ws->bucket_b, BUCKET_SIZE);
1392 
1393         for (i = 0; i < BYTE_CORE_SET_LOW; i++)
1394                 coreset_sum += bucket[i].count;
1395 
1396         if (coreset_sum > core_set_threshold)
1397                 return i;
1398 
1399         for (; i < BYTE_CORE_SET_HIGH && bucket[i].count > 0; i++) {
1400                 coreset_sum += bucket[i].count;
1401                 if (coreset_sum > core_set_threshold)
1402                         break;
1403         }
1404 
1405         return i;
1406 }
1407 
1408 /*
1409  * Count byte values in buckets.
1410  * This heuristic can detect textual data (configs, xml, json, html, etc).
1411  * Because in most text-like data byte set is restricted to limited number of
1412  * possible characters, and that restriction in most cases makes data easy to
1413  * compress.
1414  *
1415  * @BYTE_SET_THRESHOLD - consider all data within this byte set size:
1416  *      less - compressible
1417  *      more - need additional analysis
1418  */
1419 #define BYTE_SET_THRESHOLD              (64)
1420 
1421 static u32 byte_set_size(const struct heuristic_ws *ws)
1422 {
1423         u32 i;
1424         u32 byte_set_size = 0;
1425 
1426         for (i = 0; i < BYTE_SET_THRESHOLD; i++) {
1427                 if (ws->bucket[i].count > 0)
1428                         byte_set_size++;
1429         }
1430 
1431         /*
1432          * Continue collecting count of byte values in buckets.  If the byte
1433          * set size is bigger then the threshold, it's pointless to continue,
1434          * the detection technique would fail for this type of data.
1435          */
1436         for (; i < BUCKET_SIZE; i++) {
1437                 if (ws->bucket[i].count > 0) {
1438                         byte_set_size++;
1439                         if (byte_set_size > BYTE_SET_THRESHOLD)
1440                                 return byte_set_size;
1441                 }
1442         }
1443 
1444         return byte_set_size;
1445 }
1446 
1447 static bool sample_repeated_patterns(struct heuristic_ws *ws)
1448 {
1449         const u32 half_of_sample = ws->sample_size / 2;
1450         const u8 *data = ws->sample;
1451 
1452         return memcmp(&data[0], &data[half_of_sample], half_of_sample) == 0;
1453 }
1454 
1455 static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
1456                                      struct heuristic_ws *ws)
1457 {
1458         struct page *page;
1459         u64 index, index_end;
1460         u32 i, curr_sample_pos;
1461         u8 *in_data;
1462 
1463         /*
1464          * Compression handles the input data by chunks of 128KiB
1465          * (defined by BTRFS_MAX_UNCOMPRESSED)
1466          *
1467          * We do the same for the heuristic and loop over the whole range.
1468          *
1469          * MAX_SAMPLE_SIZE - calculated under assumption that heuristic will
1470          * process no more than BTRFS_MAX_UNCOMPRESSED at a time.
1471          */
1472         if (end - start > BTRFS_MAX_UNCOMPRESSED)
1473                 end = start + BTRFS_MAX_UNCOMPRESSED;
1474 
1475         index = start >> PAGE_SHIFT;
1476         index_end = end >> PAGE_SHIFT;
1477 
1478         /* Don't miss unaligned end */
1479         if (!IS_ALIGNED(end, PAGE_SIZE))
1480                 index_end++;
1481 
1482         curr_sample_pos = 0;
1483         while (index < index_end) {
1484                 page = find_get_page(inode->i_mapping, index);
1485                 in_data = kmap(page);
1486                 /* Handle case where the start is not aligned to PAGE_SIZE */
1487                 i = start % PAGE_SIZE;
1488                 while (i < PAGE_SIZE - SAMPLING_READ_SIZE) {
1489                         /* Don't sample any garbage from the last page */
1490                         if (start > end - SAMPLING_READ_SIZE)
1491                                 break;
1492                         memcpy(&ws->sample[curr_sample_pos], &in_data[i],
1493                                         SAMPLING_READ_SIZE);
1494                         i += SAMPLING_INTERVAL;
1495                         start += SAMPLING_INTERVAL;
1496                         curr_sample_pos += SAMPLING_READ_SIZE;
1497                 }
1498                 kunmap(page);
1499                 put_page(page);
1500 
1501                 index++;
1502         }
1503 
1504         ws->sample_size = curr_sample_pos;
1505 }
1506 
1507 /*
1508  * Compression heuristic.
1509  *
1510  * For now is's a naive and optimistic 'return true', we'll extend the logic to
1511  * quickly (compared to direct compression) detect data characteristics
1512  * (compressible/uncompressible) to avoid wasting CPU time on uncompressible
1513  * data.
1514  *
1515  * The following types of analysis can be performed:
1516  * - detect mostly zero data
1517  * - detect data with low "byte set" size (text, etc)
1518  * - detect data with low/high "core byte" set
1519  *
1520  * Return non-zero if the compression should be done, 0 otherwise.
1521  */
1522 int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end)
1523 {
1524         struct list_head *ws_list = get_workspace(0, 0);
1525         struct heuristic_ws *ws;
1526         u32 i;
1527         u8 byte;
1528         int ret = 0;
1529 
1530         ws = list_entry(ws_list, struct heuristic_ws, list);
1531 
1532         heuristic_collect_sample(inode, start, end, ws);
1533 
1534         if (sample_repeated_patterns(ws)) {
1535                 ret = 1;
1536                 goto out;
1537         }
1538 
1539         memset(ws->bucket, 0, sizeof(*ws->bucket)*BUCKET_SIZE);
1540 
1541         for (i = 0; i < ws->sample_size; i++) {
1542                 byte = ws->sample[i];
1543                 ws->bucket[byte].count++;
1544         }
1545 
1546         i = byte_set_size(ws);
1547         if (i < BYTE_SET_THRESHOLD) {
1548                 ret = 2;
1549                 goto out;
1550         }
1551 
1552         i = byte_core_set_size(ws);
1553         if (i <= BYTE_CORE_SET_LOW) {
1554                 ret = 3;
1555                 goto out;
1556         }
1557 
1558         if (i >= BYTE_CORE_SET_HIGH) {
1559                 ret = 0;
1560                 goto out;
1561         }
1562 
1563         i = shannon_entropy(ws);
1564         if (i <= ENTROPY_LVL_ACEPTABLE) {
1565                 ret = 4;
1566                 goto out;
1567         }
1568 
1569         /*
1570          * For the levels below ENTROPY_LVL_HIGH, additional analysis would be
1571          * needed to give green light to compression.
1572          *
1573          * For now just assume that compression at that level is not worth the
1574          * resources because:
1575          *
1576          * 1. it is possible to defrag the data later
1577          *
1578          * 2. the data would turn out to be hardly compressible, eg. 150 byte
1579          * values, every bucket has counter at level ~54. The heuristic would
1580          * be confused. This can happen when data have some internal repeated
1581          * patterns like "abbacbbc...". This can be detected by analyzing
1582          * pairs of bytes, which is too costly.
1583          */
1584         if (i < ENTROPY_LVL_HIGH) {
1585                 ret = 5;
1586                 goto out;
1587         } else {
1588                 ret = 0;
1589                 goto out;
1590         }
1591 
1592 out:
1593         put_workspace(0, ws_list);
1594         return ret;
1595 }
1596 
1597 /*
1598  * Convert the compression suffix (eg. after "zlib" starting with ":") to
1599  * level, unrecognized string will set the default level
1600  */
1601 unsigned int btrfs_compress_str2level(unsigned int type, const char *str)
1602 {
1603         unsigned int level = 0;
1604         int ret;
1605 
1606         if (!type)
1607                 return 0;
1608 
1609         if (str[0] == ':') {
1610                 ret = kstrtouint(str + 1, 10, &level);
1611                 if (ret)
1612                         level = 0;
1613         }
1614 
1615         level = btrfs_compress_set_level(type, level);
1616 
1617         return level;
1618 }
1619 
1620 /*
1621  * Adjust @level according to the limits of the compression algorithm or
1622  * fallback to default
1623  */
1624 unsigned int btrfs_compress_set_level(int type, unsigned level)
1625 {
1626         const struct btrfs_compress_op *ops = btrfs_compress_op[type];
1627 
1628         if (level == 0)
1629                 level = ops->default_level;
1630         else
1631                 level = min(level, ops->max_level);
1632 
1633         return level;
1634 }

/* [<][>][^][v][top][bottom][index][help] */