root/fs/ext4/block_validity.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. ext4_init_system_zone
  2. ext4_exit_system_zone
  3. can_merge
  4. release_system_zone
  5. add_system_zone
  6. debug_print_tree
  7. ext4_data_block_valid_rcu
  8. ext4_protect_reserved_inode
  9. ext4_destroy_system_zone
  10. ext4_setup_system_zone
  11. ext4_release_system_zone
  12. ext4_data_block_valid
  13. ext4_check_blockref

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  *  linux/fs/ext4/block_validity.c
   4  *
   5  * Copyright (C) 2009
   6  * Theodore Ts'o (tytso@mit.edu)
   7  *
   8  * Track which blocks in the filesystem are metadata blocks that
   9  * should never be used as data blocks by files or directories.
  10  */
  11 
  12 #include <linux/time.h>
  13 #include <linux/fs.h>
  14 #include <linux/namei.h>
  15 #include <linux/quotaops.h>
  16 #include <linux/buffer_head.h>
  17 #include <linux/swap.h>
  18 #include <linux/pagemap.h>
  19 #include <linux/blkdev.h>
  20 #include <linux/slab.h>
  21 #include "ext4.h"
  22 
  23 struct ext4_system_zone {
  24         struct rb_node  node;
  25         ext4_fsblk_t    start_blk;
  26         unsigned int    count;
  27 };
  28 
  29 static struct kmem_cache *ext4_system_zone_cachep;
  30 
  31 int __init ext4_init_system_zone(void)
  32 {
  33         ext4_system_zone_cachep = KMEM_CACHE(ext4_system_zone, 0);
  34         if (ext4_system_zone_cachep == NULL)
  35                 return -ENOMEM;
  36         return 0;
  37 }
  38 
  39 void ext4_exit_system_zone(void)
  40 {
  41         rcu_barrier();
  42         kmem_cache_destroy(ext4_system_zone_cachep);
  43 }
  44 
  45 static inline int can_merge(struct ext4_system_zone *entry1,
  46                      struct ext4_system_zone *entry2)
  47 {
  48         if ((entry1->start_blk + entry1->count) == entry2->start_blk)
  49                 return 1;
  50         return 0;
  51 }
  52 
  53 static void release_system_zone(struct ext4_system_blocks *system_blks)
  54 {
  55         struct ext4_system_zone *entry, *n;
  56 
  57         rbtree_postorder_for_each_entry_safe(entry, n,
  58                                 &system_blks->root, node)
  59                 kmem_cache_free(ext4_system_zone_cachep, entry);
  60 }
  61 
  62 /*
  63  * Mark a range of blocks as belonging to the "system zone" --- that
  64  * is, filesystem metadata blocks which should never be used by
  65  * inodes.
  66  */
  67 static int add_system_zone(struct ext4_system_blocks *system_blks,
  68                            ext4_fsblk_t start_blk,
  69                            unsigned int count)
  70 {
  71         struct ext4_system_zone *new_entry = NULL, *entry;
  72         struct rb_node **n = &system_blks->root.rb_node, *node;
  73         struct rb_node *parent = NULL, *new_node = NULL;
  74 
  75         while (*n) {
  76                 parent = *n;
  77                 entry = rb_entry(parent, struct ext4_system_zone, node);
  78                 if (start_blk < entry->start_blk)
  79                         n = &(*n)->rb_left;
  80                 else if (start_blk >= (entry->start_blk + entry->count))
  81                         n = &(*n)->rb_right;
  82                 else {
  83                         if (start_blk + count > (entry->start_blk +
  84                                                  entry->count))
  85                                 entry->count = (start_blk + count -
  86                                                 entry->start_blk);
  87                         new_node = *n;
  88                         new_entry = rb_entry(new_node, struct ext4_system_zone,
  89                                              node);
  90                         break;
  91                 }
  92         }
  93 
  94         if (!new_entry) {
  95                 new_entry = kmem_cache_alloc(ext4_system_zone_cachep,
  96                                              GFP_KERNEL);
  97                 if (!new_entry)
  98                         return -ENOMEM;
  99                 new_entry->start_blk = start_blk;
 100                 new_entry->count = count;
 101                 new_node = &new_entry->node;
 102 
 103                 rb_link_node(new_node, parent, n);
 104                 rb_insert_color(new_node, &system_blks->root);
 105         }
 106 
 107         /* Can we merge to the left? */
 108         node = rb_prev(new_node);
 109         if (node) {
 110                 entry = rb_entry(node, struct ext4_system_zone, node);
 111                 if (can_merge(entry, new_entry)) {
 112                         new_entry->start_blk = entry->start_blk;
 113                         new_entry->count += entry->count;
 114                         rb_erase(node, &system_blks->root);
 115                         kmem_cache_free(ext4_system_zone_cachep, entry);
 116                 }
 117         }
 118 
 119         /* Can we merge to the right? */
 120         node = rb_next(new_node);
 121         if (node) {
 122                 entry = rb_entry(node, struct ext4_system_zone, node);
 123                 if (can_merge(new_entry, entry)) {
 124                         new_entry->count += entry->count;
 125                         rb_erase(node, &system_blks->root);
 126                         kmem_cache_free(ext4_system_zone_cachep, entry);
 127                 }
 128         }
 129         return 0;
 130 }
 131 
 132 static void debug_print_tree(struct ext4_sb_info *sbi)
 133 {
 134         struct rb_node *node;
 135         struct ext4_system_zone *entry;
 136         int first = 1;
 137 
 138         printk(KERN_INFO "System zones: ");
 139         node = rb_first(&sbi->system_blks->root);
 140         while (node) {
 141                 entry = rb_entry(node, struct ext4_system_zone, node);
 142                 printk(KERN_CONT "%s%llu-%llu", first ? "" : ", ",
 143                        entry->start_blk, entry->start_blk + entry->count - 1);
 144                 first = 0;
 145                 node = rb_next(node);
 146         }
 147         printk(KERN_CONT "\n");
 148 }
 149 
 150 /*
 151  * Returns 1 if the passed-in block region (start_blk,
 152  * start_blk+count) is valid; 0 if some part of the block region
 153  * overlaps with filesystem metadata blocks.
 154  */
 155 static int ext4_data_block_valid_rcu(struct ext4_sb_info *sbi,
 156                                      struct ext4_system_blocks *system_blks,
 157                                      ext4_fsblk_t start_blk,
 158                                      unsigned int count)
 159 {
 160         struct ext4_system_zone *entry;
 161         struct rb_node *n;
 162 
 163         if ((start_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) ||
 164             (start_blk + count < start_blk) ||
 165             (start_blk + count > ext4_blocks_count(sbi->s_es))) {
 166                 sbi->s_es->s_last_error_block = cpu_to_le64(start_blk);
 167                 return 0;
 168         }
 169 
 170         if (system_blks == NULL)
 171                 return 1;
 172 
 173         n = system_blks->root.rb_node;
 174         while (n) {
 175                 entry = rb_entry(n, struct ext4_system_zone, node);
 176                 if (start_blk + count - 1 < entry->start_blk)
 177                         n = n->rb_left;
 178                 else if (start_blk >= (entry->start_blk + entry->count))
 179                         n = n->rb_right;
 180                 else {
 181                         sbi->s_es->s_last_error_block = cpu_to_le64(start_blk);
 182                         return 0;
 183                 }
 184         }
 185         return 1;
 186 }
 187 
 188 static int ext4_protect_reserved_inode(struct super_block *sb,
 189                                        struct ext4_system_blocks *system_blks,
 190                                        u32 ino)
 191 {
 192         struct inode *inode;
 193         struct ext4_sb_info *sbi = EXT4_SB(sb);
 194         struct ext4_map_blocks map;
 195         u32 i = 0, num;
 196         int err = 0, n;
 197 
 198         if ((ino < EXT4_ROOT_INO) ||
 199             (ino > le32_to_cpu(sbi->s_es->s_inodes_count)))
 200                 return -EINVAL;
 201         inode = ext4_iget(sb, ino, EXT4_IGET_SPECIAL);
 202         if (IS_ERR(inode))
 203                 return PTR_ERR(inode);
 204         num = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
 205         while (i < num) {
 206                 cond_resched();
 207                 map.m_lblk = i;
 208                 map.m_len = num - i;
 209                 n = ext4_map_blocks(NULL, inode, &map, 0);
 210                 if (n < 0) {
 211                         err = n;
 212                         break;
 213                 }
 214                 if (n == 0) {
 215                         i++;
 216                 } else {
 217                         if (!ext4_data_block_valid_rcu(sbi, system_blks,
 218                                                 map.m_pblk, n)) {
 219                                 ext4_error(sb, "blocks %llu-%llu from inode %u "
 220                                            "overlap system zone", map.m_pblk,
 221                                            map.m_pblk + map.m_len - 1, ino);
 222                                 err = -EFSCORRUPTED;
 223                                 break;
 224                         }
 225                         err = add_system_zone(system_blks, map.m_pblk, n);
 226                         if (err < 0)
 227                                 break;
 228                         i += n;
 229                 }
 230         }
 231         iput(inode);
 232         return err;
 233 }
 234 
 235 static void ext4_destroy_system_zone(struct rcu_head *rcu)
 236 {
 237         struct ext4_system_blocks *system_blks;
 238 
 239         system_blks = container_of(rcu, struct ext4_system_blocks, rcu);
 240         release_system_zone(system_blks);
 241         kfree(system_blks);
 242 }
 243 
 244 /*
 245  * Build system zone rbtree which is used for block validity checking.
 246  *
 247  * The update of system_blks pointer in this function is protected by
 248  * sb->s_umount semaphore. However we have to be careful as we can be
 249  * racing with ext4_data_block_valid() calls reading system_blks rbtree
 250  * protected only by RCU. That's why we first build the rbtree and then
 251  * swap it in place.
 252  */
 253 int ext4_setup_system_zone(struct super_block *sb)
 254 {
 255         ext4_group_t ngroups = ext4_get_groups_count(sb);
 256         struct ext4_sb_info *sbi = EXT4_SB(sb);
 257         struct ext4_system_blocks *system_blks;
 258         struct ext4_group_desc *gdp;
 259         ext4_group_t i;
 260         int flex_size = ext4_flex_bg_size(sbi);
 261         int ret;
 262 
 263         if (!test_opt(sb, BLOCK_VALIDITY)) {
 264                 if (sbi->system_blks)
 265                         ext4_release_system_zone(sb);
 266                 return 0;
 267         }
 268         if (sbi->system_blks)
 269                 return 0;
 270 
 271         system_blks = kzalloc(sizeof(*system_blks), GFP_KERNEL);
 272         if (!system_blks)
 273                 return -ENOMEM;
 274 
 275         for (i=0; i < ngroups; i++) {
 276                 cond_resched();
 277                 if (ext4_bg_has_super(sb, i) &&
 278                     ((i < 5) || ((i % flex_size) == 0)))
 279                         add_system_zone(system_blks,
 280                                         ext4_group_first_block_no(sb, i),
 281                                         ext4_bg_num_gdb(sb, i) + 1);
 282                 gdp = ext4_get_group_desc(sb, i, NULL);
 283                 ret = add_system_zone(system_blks,
 284                                 ext4_block_bitmap(sb, gdp), 1);
 285                 if (ret)
 286                         goto err;
 287                 ret = add_system_zone(system_blks,
 288                                 ext4_inode_bitmap(sb, gdp), 1);
 289                 if (ret)
 290                         goto err;
 291                 ret = add_system_zone(system_blks,
 292                                 ext4_inode_table(sb, gdp),
 293                                 sbi->s_itb_per_group);
 294                 if (ret)
 295                         goto err;
 296         }
 297         if (ext4_has_feature_journal(sb) && sbi->s_es->s_journal_inum) {
 298                 ret = ext4_protect_reserved_inode(sb, system_blks,
 299                                 le32_to_cpu(sbi->s_es->s_journal_inum));
 300                 if (ret)
 301                         goto err;
 302         }
 303 
 304         /*
 305          * System blks rbtree complete, announce it once to prevent racing
 306          * with ext4_data_block_valid() accessing the rbtree at the same
 307          * time.
 308          */
 309         rcu_assign_pointer(sbi->system_blks, system_blks);
 310 
 311         if (test_opt(sb, DEBUG))
 312                 debug_print_tree(sbi);
 313         return 0;
 314 err:
 315         release_system_zone(system_blks);
 316         kfree(system_blks);
 317         return ret;
 318 }
 319 
 320 /*
 321  * Called when the filesystem is unmounted or when remounting it with
 322  * noblock_validity specified.
 323  *
 324  * The update of system_blks pointer in this function is protected by
 325  * sb->s_umount semaphore. However we have to be careful as we can be
 326  * racing with ext4_data_block_valid() calls reading system_blks rbtree
 327  * protected only by RCU. So we first clear the system_blks pointer and
 328  * then free the rbtree only after RCU grace period expires.
 329  */
 330 void ext4_release_system_zone(struct super_block *sb)
 331 {
 332         struct ext4_system_blocks *system_blks;
 333 
 334         system_blks = rcu_dereference_protected(EXT4_SB(sb)->system_blks,
 335                                         lockdep_is_held(&sb->s_umount));
 336         rcu_assign_pointer(EXT4_SB(sb)->system_blks, NULL);
 337 
 338         if (system_blks)
 339                 call_rcu(&system_blks->rcu, ext4_destroy_system_zone);
 340 }
 341 
 342 int ext4_data_block_valid(struct ext4_sb_info *sbi, ext4_fsblk_t start_blk,
 343                           unsigned int count)
 344 {
 345         struct ext4_system_blocks *system_blks;
 346         int ret;
 347 
 348         /*
 349          * Lock the system zone to prevent it being released concurrently
 350          * when doing a remount which inverse current "[no]block_validity"
 351          * mount option.
 352          */
 353         rcu_read_lock();
 354         system_blks = rcu_dereference(sbi->system_blks);
 355         ret = ext4_data_block_valid_rcu(sbi, system_blks, start_blk,
 356                                         count);
 357         rcu_read_unlock();
 358         return ret;
 359 }
 360 
 361 int ext4_check_blockref(const char *function, unsigned int line,
 362                         struct inode *inode, __le32 *p, unsigned int max)
 363 {
 364         struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
 365         __le32 *bref = p;
 366         unsigned int blk;
 367 
 368         if (ext4_has_feature_journal(inode->i_sb) &&
 369             (inode->i_ino ==
 370              le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum)))
 371                 return 0;
 372 
 373         while (bref < p+max) {
 374                 blk = le32_to_cpu(*bref++);
 375                 if (blk &&
 376                     unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb),
 377                                                     blk, 1))) {
 378                         es->s_last_error_block = cpu_to_le64(blk);
 379                         ext4_error_inode(inode, function, line, blk,
 380                                          "invalid block");
 381                         return -EFSCORRUPTED;
 382                 }
 383         }
 384         return 0;
 385 }
 386 

/* [<][>][^][v][top][bottom][index][help] */