root/fs/xfs/scrub/bitmap.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. xfs_bitmap_set
  2. xfs_bitmap_destroy
  3. xfs_bitmap_init
  4. xfs_bitmap_range_cmp
  5. xfs_bitmap_disunion
  6. xfs_bitmap_set_btcur_path
  7. xfs_bitmap_collect_btblock
  8. xfs_bitmap_set_btblocks

   1 // SPDX-License-Identifier: GPL-2.0+
   2 /*
   3  * Copyright (C) 2018 Oracle.  All Rights Reserved.
   4  * Author: Darrick J. Wong <darrick.wong@oracle.com>
   5  */
   6 #include "xfs.h"
   7 #include "xfs_fs.h"
   8 #include "xfs_shared.h"
   9 #include "xfs_format.h"
  10 #include "xfs_trans_resv.h"
  11 #include "xfs_mount.h"
  12 #include "xfs_btree.h"
  13 #include "scrub/bitmap.h"
  14 
  15 /*
  16  * Set a range of this bitmap.  Caller must ensure the range is not set.
  17  *
  18  * This is the logical equivalent of bitmap |= mask(start, len).
  19  */
  20 int
  21 xfs_bitmap_set(
  22         struct xfs_bitmap       *bitmap,
  23         uint64_t                start,
  24         uint64_t                len)
  25 {
  26         struct xfs_bitmap_range *bmr;
  27 
  28         bmr = kmem_alloc(sizeof(struct xfs_bitmap_range), KM_MAYFAIL);
  29         if (!bmr)
  30                 return -ENOMEM;
  31 
  32         INIT_LIST_HEAD(&bmr->list);
  33         bmr->start = start;
  34         bmr->len = len;
  35         list_add_tail(&bmr->list, &bitmap->list);
  36 
  37         return 0;
  38 }
  39 
  40 /* Free everything related to this bitmap. */
  41 void
  42 xfs_bitmap_destroy(
  43         struct xfs_bitmap       *bitmap)
  44 {
  45         struct xfs_bitmap_range *bmr;
  46         struct xfs_bitmap_range *n;
  47 
  48         for_each_xfs_bitmap_extent(bmr, n, bitmap) {
  49                 list_del(&bmr->list);
  50                 kmem_free(bmr);
  51         }
  52 }
  53 
  54 /* Set up a per-AG block bitmap. */
  55 void
  56 xfs_bitmap_init(
  57         struct xfs_bitmap       *bitmap)
  58 {
  59         INIT_LIST_HEAD(&bitmap->list);
  60 }
  61 
  62 /* Compare two btree extents. */
  63 static int
  64 xfs_bitmap_range_cmp(
  65         void                    *priv,
  66         struct list_head        *a,
  67         struct list_head        *b)
  68 {
  69         struct xfs_bitmap_range *ap;
  70         struct xfs_bitmap_range *bp;
  71 
  72         ap = container_of(a, struct xfs_bitmap_range, list);
  73         bp = container_of(b, struct xfs_bitmap_range, list);
  74 
  75         if (ap->start > bp->start)
  76                 return 1;
  77         if (ap->start < bp->start)
  78                 return -1;
  79         return 0;
  80 }
  81 
  82 /*
  83  * Remove all the blocks mentioned in @sub from the extents in @bitmap.
  84  *
  85  * The intent is that callers will iterate the rmapbt for all of its records
  86  * for a given owner to generate @bitmap; and iterate all the blocks of the
  87  * metadata structures that are not being rebuilt and have the same rmapbt
  88  * owner to generate @sub.  This routine subtracts all the extents
  89  * mentioned in sub from all the extents linked in @bitmap, which leaves
  90  * @bitmap as the list of blocks that are not accounted for, which we assume
  91  * are the dead blocks of the old metadata structure.  The blocks mentioned in
  92  * @bitmap can be reaped.
  93  *
  94  * This is the logical equivalent of bitmap &= ~sub.
  95  */
  96 #define LEFT_ALIGNED    (1 << 0)
  97 #define RIGHT_ALIGNED   (1 << 1)
  98 int
  99 xfs_bitmap_disunion(
 100         struct xfs_bitmap       *bitmap,
 101         struct xfs_bitmap       *sub)
 102 {
 103         struct list_head        *lp;
 104         struct xfs_bitmap_range *br;
 105         struct xfs_bitmap_range *new_br;
 106         struct xfs_bitmap_range *sub_br;
 107         uint64_t                sub_start;
 108         uint64_t                sub_len;
 109         int                     state;
 110         int                     error = 0;
 111 
 112         if (list_empty(&bitmap->list) || list_empty(&sub->list))
 113                 return 0;
 114         ASSERT(!list_empty(&sub->list));
 115 
 116         list_sort(NULL, &bitmap->list, xfs_bitmap_range_cmp);
 117         list_sort(NULL, &sub->list, xfs_bitmap_range_cmp);
 118 
 119         /*
 120          * Now that we've sorted both lists, we iterate bitmap once, rolling
 121          * forward through sub and/or bitmap as necessary until we find an
 122          * overlap or reach the end of either list.  We do not reset lp to the
 123          * head of bitmap nor do we reset sub_br to the head of sub.  The
 124          * list traversal is similar to merge sort, but we're deleting
 125          * instead.  In this manner we avoid O(n^2) operations.
 126          */
 127         sub_br = list_first_entry(&sub->list, struct xfs_bitmap_range,
 128                         list);
 129         lp = bitmap->list.next;
 130         while (lp != &bitmap->list) {
 131                 br = list_entry(lp, struct xfs_bitmap_range, list);
 132 
 133                 /*
 134                  * Advance sub_br and/or br until we find a pair that
 135                  * intersect or we run out of extents.
 136                  */
 137                 while (sub_br->start + sub_br->len <= br->start) {
 138                         if (list_is_last(&sub_br->list, &sub->list))
 139                                 goto out;
 140                         sub_br = list_next_entry(sub_br, list);
 141                 }
 142                 if (sub_br->start >= br->start + br->len) {
 143                         lp = lp->next;
 144                         continue;
 145                 }
 146 
 147                 /* trim sub_br to fit the extent we have */
 148                 sub_start = sub_br->start;
 149                 sub_len = sub_br->len;
 150                 if (sub_br->start < br->start) {
 151                         sub_len -= br->start - sub_br->start;
 152                         sub_start = br->start;
 153                 }
 154                 if (sub_len > br->len)
 155                         sub_len = br->len;
 156 
 157                 state = 0;
 158                 if (sub_start == br->start)
 159                         state |= LEFT_ALIGNED;
 160                 if (sub_start + sub_len == br->start + br->len)
 161                         state |= RIGHT_ALIGNED;
 162                 switch (state) {
 163                 case LEFT_ALIGNED:
 164                         /* Coincides with only the left. */
 165                         br->start += sub_len;
 166                         br->len -= sub_len;
 167                         break;
 168                 case RIGHT_ALIGNED:
 169                         /* Coincides with only the right. */
 170                         br->len -= sub_len;
 171                         lp = lp->next;
 172                         break;
 173                 case LEFT_ALIGNED | RIGHT_ALIGNED:
 174                         /* Total overlap, just delete ex. */
 175                         lp = lp->next;
 176                         list_del(&br->list);
 177                         kmem_free(br);
 178                         break;
 179                 case 0:
 180                         /*
 181                          * Deleting from the middle: add the new right extent
 182                          * and then shrink the left extent.
 183                          */
 184                         new_br = kmem_alloc(sizeof(struct xfs_bitmap_range),
 185                                         KM_MAYFAIL);
 186                         if (!new_br) {
 187                                 error = -ENOMEM;
 188                                 goto out;
 189                         }
 190                         INIT_LIST_HEAD(&new_br->list);
 191                         new_br->start = sub_start + sub_len;
 192                         new_br->len = br->start + br->len - new_br->start;
 193                         list_add(&new_br->list, &br->list);
 194                         br->len = sub_start - br->start;
 195                         lp = lp->next;
 196                         break;
 197                 default:
 198                         ASSERT(0);
 199                         break;
 200                 }
 201         }
 202 
 203 out:
 204         return error;
 205 }
 206 #undef LEFT_ALIGNED
 207 #undef RIGHT_ALIGNED
 208 
 209 /*
 210  * Record all btree blocks seen while iterating all records of a btree.
 211  *
 212  * We know that the btree query_all function starts at the left edge and walks
 213  * towards the right edge of the tree.  Therefore, we know that we can walk up
 214  * the btree cursor towards the root; if the pointer for a given level points
 215  * to the first record/key in that block, we haven't seen this block before;
 216  * and therefore we need to remember that we saw this block in the btree.
 217  *
 218  * So if our btree is:
 219  *
 220  *    4
 221  *  / | \
 222  * 1  2  3
 223  *
 224  * Pretend for this example that each leaf block has 100 btree records.  For
 225  * the first btree record, we'll observe that bc_ptrs[0] == 1, so we record
 226  * that we saw block 1.  Then we observe that bc_ptrs[1] == 1, so we record
 227  * block 4.  The list is [1, 4].
 228  *
 229  * For the second btree record, we see that bc_ptrs[0] == 2, so we exit the
 230  * loop.  The list remains [1, 4].
 231  *
 232  * For the 101st btree record, we've moved onto leaf block 2.  Now
 233  * bc_ptrs[0] == 1 again, so we record that we saw block 2.  We see that
 234  * bc_ptrs[1] == 2, so we exit the loop.  The list is now [1, 4, 2].
 235  *
 236  * For the 102nd record, bc_ptrs[0] == 2, so we continue.
 237  *
 238  * For the 201st record, we've moved on to leaf block 3.  bc_ptrs[0] == 1, so
 239  * we add 3 to the list.  Now it is [1, 4, 2, 3].
 240  *
 241  * For the 300th record we just exit, with the list being [1, 4, 2, 3].
 242  */
 243 
 244 /*
 245  * Record all the buffers pointed to by the btree cursor.  Callers already
 246  * engaged in a btree walk should call this function to capture the list of
 247  * blocks going from the leaf towards the root.
 248  */
 249 int
 250 xfs_bitmap_set_btcur_path(
 251         struct xfs_bitmap       *bitmap,
 252         struct xfs_btree_cur    *cur)
 253 {
 254         struct xfs_buf          *bp;
 255         xfs_fsblock_t           fsb;
 256         int                     i;
 257         int                     error;
 258 
 259         for (i = 0; i < cur->bc_nlevels && cur->bc_ptrs[i] == 1; i++) {
 260                 xfs_btree_get_block(cur, i, &bp);
 261                 if (!bp)
 262                         continue;
 263                 fsb = XFS_DADDR_TO_FSB(cur->bc_mp, bp->b_bn);
 264                 error = xfs_bitmap_set(bitmap, fsb, 1);
 265                 if (error)
 266                         return error;
 267         }
 268 
 269         return 0;
 270 }
 271 
 272 /* Collect a btree's block in the bitmap. */
 273 STATIC int
 274 xfs_bitmap_collect_btblock(
 275         struct xfs_btree_cur    *cur,
 276         int                     level,
 277         void                    *priv)
 278 {
 279         struct xfs_bitmap       *bitmap = priv;
 280         struct xfs_buf          *bp;
 281         xfs_fsblock_t           fsbno;
 282 
 283         xfs_btree_get_block(cur, level, &bp);
 284         if (!bp)
 285                 return 0;
 286 
 287         fsbno = XFS_DADDR_TO_FSB(cur->bc_mp, bp->b_bn);
 288         return xfs_bitmap_set(bitmap, fsbno, 1);
 289 }
 290 
 291 /* Walk the btree and mark the bitmap wherever a btree block is found. */
 292 int
 293 xfs_bitmap_set_btblocks(
 294         struct xfs_bitmap       *bitmap,
 295         struct xfs_btree_cur    *cur)
 296 {
 297         return xfs_btree_visit_blocks(cur, xfs_bitmap_collect_btblock, bitmap);
 298 }

/* [<][>][^][v][top][bottom][index][help] */