root/fs/xfs/libxfs/xfs_bmap_btree.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. xfs_bmdr_to_bmbt
  2. xfs_bmbt_disk_get_all
  3. xfs_bmbt_disk_get_blockcount
  4. xfs_bmbt_disk_get_startoff
  5. xfs_bmbt_disk_set_all
  6. xfs_bmbt_to_bmdr
  7. xfs_bmbt_dup_cursor
  8. xfs_bmbt_update_cursor
  9. xfs_bmbt_alloc_block
  10. xfs_bmbt_free_block
  11. xfs_bmbt_get_minrecs
  12. xfs_bmbt_get_maxrecs
  13. xfs_bmbt_get_dmaxrecs
  14. xfs_bmbt_init_key_from_rec
  15. xfs_bmbt_init_high_key_from_rec
  16. xfs_bmbt_init_rec_from_cur
  17. xfs_bmbt_init_ptr_from_cur
  18. xfs_bmbt_key_diff
  19. xfs_bmbt_diff_two_keys
  20. xfs_bmbt_verify
  21. xfs_bmbt_read_verify
  22. xfs_bmbt_write_verify
  23. xfs_bmbt_keys_inorder
  24. xfs_bmbt_recs_inorder
  25. xfs_bmbt_init_cursor
  26. xfs_bmbt_maxrecs
  27. xfs_bmdr_maxrecs
  28. xfs_bmbt_change_owner
  29. xfs_bmbt_calc_size

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
   4  * All Rights Reserved.
   5  */
   6 #include "xfs.h"
   7 #include "xfs_fs.h"
   8 #include "xfs_shared.h"
   9 #include "xfs_format.h"
  10 #include "xfs_log_format.h"
  11 #include "xfs_trans_resv.h"
  12 #include "xfs_bit.h"
  13 #include "xfs_mount.h"
  14 #include "xfs_inode.h"
  15 #include "xfs_trans.h"
  16 #include "xfs_alloc.h"
  17 #include "xfs_btree.h"
  18 #include "xfs_bmap_btree.h"
  19 #include "xfs_bmap.h"
  20 #include "xfs_error.h"
  21 #include "xfs_quota.h"
  22 #include "xfs_trace.h"
  23 #include "xfs_rmap.h"
  24 
  25 /*
  26  * Convert on-disk form of btree root to in-memory form.
  27  */
  28 void
  29 xfs_bmdr_to_bmbt(
  30         struct xfs_inode        *ip,
  31         xfs_bmdr_block_t        *dblock,
  32         int                     dblocklen,
  33         struct xfs_btree_block  *rblock,
  34         int                     rblocklen)
  35 {
  36         struct xfs_mount        *mp = ip->i_mount;
  37         int                     dmxr;
  38         xfs_bmbt_key_t          *fkp;
  39         __be64                  *fpp;
  40         xfs_bmbt_key_t          *tkp;
  41         __be64                  *tpp;
  42 
  43         xfs_btree_init_block_int(mp, rblock, XFS_BUF_DADDR_NULL,
  44                                  XFS_BTNUM_BMAP, 0, 0, ip->i_ino,
  45                                  XFS_BTREE_LONG_PTRS);
  46         rblock->bb_level = dblock->bb_level;
  47         ASSERT(be16_to_cpu(rblock->bb_level) > 0);
  48         rblock->bb_numrecs = dblock->bb_numrecs;
  49         dmxr = xfs_bmdr_maxrecs(dblocklen, 0);
  50         fkp = XFS_BMDR_KEY_ADDR(dblock, 1);
  51         tkp = XFS_BMBT_KEY_ADDR(mp, rblock, 1);
  52         fpp = XFS_BMDR_PTR_ADDR(dblock, 1, dmxr);
  53         tpp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, rblocklen);
  54         dmxr = be16_to_cpu(dblock->bb_numrecs);
  55         memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
  56         memcpy(tpp, fpp, sizeof(*fpp) * dmxr);
  57 }
  58 
  59 void
  60 xfs_bmbt_disk_get_all(
  61         struct xfs_bmbt_rec     *rec,
  62         struct xfs_bmbt_irec    *irec)
  63 {
  64         uint64_t                l0 = get_unaligned_be64(&rec->l0);
  65         uint64_t                l1 = get_unaligned_be64(&rec->l1);
  66 
  67         irec->br_startoff = (l0 & xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
  68         irec->br_startblock = ((l0 & xfs_mask64lo(9)) << 43) | (l1 >> 21);
  69         irec->br_blockcount = l1 & xfs_mask64lo(21);
  70         if (l0 >> (64 - BMBT_EXNTFLAG_BITLEN))
  71                 irec->br_state = XFS_EXT_UNWRITTEN;
  72         else
  73                 irec->br_state = XFS_EXT_NORM;
  74 }
  75 
  76 /*
  77  * Extract the blockcount field from an on disk bmap extent record.
  78  */
  79 xfs_filblks_t
  80 xfs_bmbt_disk_get_blockcount(
  81         xfs_bmbt_rec_t  *r)
  82 {
  83         return (xfs_filblks_t)(be64_to_cpu(r->l1) & xfs_mask64lo(21));
  84 }
  85 
  86 /*
  87  * Extract the startoff field from a disk format bmap extent record.
  88  */
  89 xfs_fileoff_t
  90 xfs_bmbt_disk_get_startoff(
  91         xfs_bmbt_rec_t  *r)
  92 {
  93         return ((xfs_fileoff_t)be64_to_cpu(r->l0) &
  94                  xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
  95 }
  96 
  97 /*
  98  * Set all the fields in a bmap extent record from the uncompressed form.
  99  */
 100 void
 101 xfs_bmbt_disk_set_all(
 102         struct xfs_bmbt_rec     *r,
 103         struct xfs_bmbt_irec    *s)
 104 {
 105         int                     extent_flag = (s->br_state != XFS_EXT_NORM);
 106 
 107         ASSERT(s->br_state == XFS_EXT_NORM || s->br_state == XFS_EXT_UNWRITTEN);
 108         ASSERT(!(s->br_startoff & xfs_mask64hi(64-BMBT_STARTOFF_BITLEN)));
 109         ASSERT(!(s->br_blockcount & xfs_mask64hi(64-BMBT_BLOCKCOUNT_BITLEN)));
 110         ASSERT(!(s->br_startblock & xfs_mask64hi(64-BMBT_STARTBLOCK_BITLEN)));
 111 
 112         put_unaligned_be64(
 113                 ((xfs_bmbt_rec_base_t)extent_flag << 63) |
 114                  ((xfs_bmbt_rec_base_t)s->br_startoff << 9) |
 115                  ((xfs_bmbt_rec_base_t)s->br_startblock >> 43), &r->l0);
 116         put_unaligned_be64(
 117                 ((xfs_bmbt_rec_base_t)s->br_startblock << 21) |
 118                  ((xfs_bmbt_rec_base_t)s->br_blockcount &
 119                   (xfs_bmbt_rec_base_t)xfs_mask64lo(21)), &r->l1);
 120 }
 121 
 122 /*
 123  * Convert in-memory form of btree root to on-disk form.
 124  */
 125 void
 126 xfs_bmbt_to_bmdr(
 127         struct xfs_mount        *mp,
 128         struct xfs_btree_block  *rblock,
 129         int                     rblocklen,
 130         xfs_bmdr_block_t        *dblock,
 131         int                     dblocklen)
 132 {
 133         int                     dmxr;
 134         xfs_bmbt_key_t          *fkp;
 135         __be64                  *fpp;
 136         xfs_bmbt_key_t          *tkp;
 137         __be64                  *tpp;
 138 
 139         if (xfs_sb_version_hascrc(&mp->m_sb)) {
 140                 ASSERT(rblock->bb_magic == cpu_to_be32(XFS_BMAP_CRC_MAGIC));
 141                 ASSERT(uuid_equal(&rblock->bb_u.l.bb_uuid,
 142                        &mp->m_sb.sb_meta_uuid));
 143                 ASSERT(rblock->bb_u.l.bb_blkno ==
 144                        cpu_to_be64(XFS_BUF_DADDR_NULL));
 145         } else
 146                 ASSERT(rblock->bb_magic == cpu_to_be32(XFS_BMAP_MAGIC));
 147         ASSERT(rblock->bb_u.l.bb_leftsib == cpu_to_be64(NULLFSBLOCK));
 148         ASSERT(rblock->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK));
 149         ASSERT(rblock->bb_level != 0);
 150         dblock->bb_level = rblock->bb_level;
 151         dblock->bb_numrecs = rblock->bb_numrecs;
 152         dmxr = xfs_bmdr_maxrecs(dblocklen, 0);
 153         fkp = XFS_BMBT_KEY_ADDR(mp, rblock, 1);
 154         tkp = XFS_BMDR_KEY_ADDR(dblock, 1);
 155         fpp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, rblocklen);
 156         tpp = XFS_BMDR_PTR_ADDR(dblock, 1, dmxr);
 157         dmxr = be16_to_cpu(dblock->bb_numrecs);
 158         memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
 159         memcpy(tpp, fpp, sizeof(*fpp) * dmxr);
 160 }
 161 
 162 STATIC struct xfs_btree_cur *
 163 xfs_bmbt_dup_cursor(
 164         struct xfs_btree_cur    *cur)
 165 {
 166         struct xfs_btree_cur    *new;
 167 
 168         new = xfs_bmbt_init_cursor(cur->bc_mp, cur->bc_tp,
 169                         cur->bc_private.b.ip, cur->bc_private.b.whichfork);
 170 
 171         /*
 172          * Copy the firstblock, dfops, and flags values,
 173          * since init cursor doesn't get them.
 174          */
 175         new->bc_private.b.flags = cur->bc_private.b.flags;
 176 
 177         return new;
 178 }
 179 
 180 STATIC void
 181 xfs_bmbt_update_cursor(
 182         struct xfs_btree_cur    *src,
 183         struct xfs_btree_cur    *dst)
 184 {
 185         ASSERT((dst->bc_tp->t_firstblock != NULLFSBLOCK) ||
 186                (dst->bc_private.b.ip->i_d.di_flags & XFS_DIFLAG_REALTIME));
 187 
 188         dst->bc_private.b.allocated += src->bc_private.b.allocated;
 189         dst->bc_tp->t_firstblock = src->bc_tp->t_firstblock;
 190 
 191         src->bc_private.b.allocated = 0;
 192 }
 193 
 194 STATIC int
 195 xfs_bmbt_alloc_block(
 196         struct xfs_btree_cur    *cur,
 197         union xfs_btree_ptr     *start,
 198         union xfs_btree_ptr     *new,
 199         int                     *stat)
 200 {
 201         xfs_alloc_arg_t         args;           /* block allocation args */
 202         int                     error;          /* error return value */
 203 
 204         memset(&args, 0, sizeof(args));
 205         args.tp = cur->bc_tp;
 206         args.mp = cur->bc_mp;
 207         args.fsbno = cur->bc_tp->t_firstblock;
 208         xfs_rmap_ino_bmbt_owner(&args.oinfo, cur->bc_private.b.ip->i_ino,
 209                         cur->bc_private.b.whichfork);
 210 
 211         if (args.fsbno == NULLFSBLOCK) {
 212                 args.fsbno = be64_to_cpu(start->l);
 213                 args.type = XFS_ALLOCTYPE_START_BNO;
 214                 /*
 215                  * Make sure there is sufficient room left in the AG to
 216                  * complete a full tree split for an extent insert.  If
 217                  * we are converting the middle part of an extent then
 218                  * we may need space for two tree splits.
 219                  *
 220                  * We are relying on the caller to make the correct block
 221                  * reservation for this operation to succeed.  If the
 222                  * reservation amount is insufficient then we may fail a
 223                  * block allocation here and corrupt the filesystem.
 224                  */
 225                 args.minleft = args.tp->t_blk_res;
 226         } else if (cur->bc_tp->t_flags & XFS_TRANS_LOWMODE) {
 227                 args.type = XFS_ALLOCTYPE_START_BNO;
 228         } else {
 229                 args.type = XFS_ALLOCTYPE_NEAR_BNO;
 230         }
 231 
 232         args.minlen = args.maxlen = args.prod = 1;
 233         args.wasdel = cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL;
 234         if (!args.wasdel && args.tp->t_blk_res == 0) {
 235                 error = -ENOSPC;
 236                 goto error0;
 237         }
 238         error = xfs_alloc_vextent(&args);
 239         if (error)
 240                 goto error0;
 241 
 242         if (args.fsbno == NULLFSBLOCK && args.minleft) {
 243                 /*
 244                  * Could not find an AG with enough free space to satisfy
 245                  * a full btree split.  Try again and if
 246                  * successful activate the lowspace algorithm.
 247                  */
 248                 args.fsbno = 0;
 249                 args.type = XFS_ALLOCTYPE_FIRST_AG;
 250                 error = xfs_alloc_vextent(&args);
 251                 if (error)
 252                         goto error0;
 253                 cur->bc_tp->t_flags |= XFS_TRANS_LOWMODE;
 254         }
 255         if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
 256                 *stat = 0;
 257                 return 0;
 258         }
 259 
 260         ASSERT(args.len == 1);
 261         cur->bc_tp->t_firstblock = args.fsbno;
 262         cur->bc_private.b.allocated++;
 263         cur->bc_private.b.ip->i_d.di_nblocks++;
 264         xfs_trans_log_inode(args.tp, cur->bc_private.b.ip, XFS_ILOG_CORE);
 265         xfs_trans_mod_dquot_byino(args.tp, cur->bc_private.b.ip,
 266                         XFS_TRANS_DQ_BCOUNT, 1L);
 267 
 268         new->l = cpu_to_be64(args.fsbno);
 269 
 270         *stat = 1;
 271         return 0;
 272 
 273  error0:
 274         return error;
 275 }
 276 
 277 STATIC int
 278 xfs_bmbt_free_block(
 279         struct xfs_btree_cur    *cur,
 280         struct xfs_buf          *bp)
 281 {
 282         struct xfs_mount        *mp = cur->bc_mp;
 283         struct xfs_inode        *ip = cur->bc_private.b.ip;
 284         struct xfs_trans        *tp = cur->bc_tp;
 285         xfs_fsblock_t           fsbno = XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(bp));
 286         struct xfs_owner_info   oinfo;
 287 
 288         xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, cur->bc_private.b.whichfork);
 289         xfs_bmap_add_free(cur->bc_tp, fsbno, 1, &oinfo);
 290         ip->i_d.di_nblocks--;
 291 
 292         xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
 293         xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
 294         return 0;
 295 }
 296 
 297 STATIC int
 298 xfs_bmbt_get_minrecs(
 299         struct xfs_btree_cur    *cur,
 300         int                     level)
 301 {
 302         if (level == cur->bc_nlevels - 1) {
 303                 struct xfs_ifork        *ifp;
 304 
 305                 ifp = XFS_IFORK_PTR(cur->bc_private.b.ip,
 306                                     cur->bc_private.b.whichfork);
 307 
 308                 return xfs_bmbt_maxrecs(cur->bc_mp,
 309                                         ifp->if_broot_bytes, level == 0) / 2;
 310         }
 311 
 312         return cur->bc_mp->m_bmap_dmnr[level != 0];
 313 }
 314 
 315 int
 316 xfs_bmbt_get_maxrecs(
 317         struct xfs_btree_cur    *cur,
 318         int                     level)
 319 {
 320         if (level == cur->bc_nlevels - 1) {
 321                 struct xfs_ifork        *ifp;
 322 
 323                 ifp = XFS_IFORK_PTR(cur->bc_private.b.ip,
 324                                     cur->bc_private.b.whichfork);
 325 
 326                 return xfs_bmbt_maxrecs(cur->bc_mp,
 327                                         ifp->if_broot_bytes, level == 0);
 328         }
 329 
 330         return cur->bc_mp->m_bmap_dmxr[level != 0];
 331 
 332 }
 333 
 334 /*
 335  * Get the maximum records we could store in the on-disk format.
 336  *
 337  * For non-root nodes this is equivalent to xfs_bmbt_get_maxrecs, but
 338  * for the root node this checks the available space in the dinode fork
 339  * so that we can resize the in-memory buffer to match it.  After a
 340  * resize to the maximum size this function returns the same value
 341  * as xfs_bmbt_get_maxrecs for the root node, too.
 342  */
 343 STATIC int
 344 xfs_bmbt_get_dmaxrecs(
 345         struct xfs_btree_cur    *cur,
 346         int                     level)
 347 {
 348         if (level != cur->bc_nlevels - 1)
 349                 return cur->bc_mp->m_bmap_dmxr[level != 0];
 350         return xfs_bmdr_maxrecs(cur->bc_private.b.forksize, level == 0);
 351 }
 352 
 353 STATIC void
 354 xfs_bmbt_init_key_from_rec(
 355         union xfs_btree_key     *key,
 356         union xfs_btree_rec     *rec)
 357 {
 358         key->bmbt.br_startoff =
 359                 cpu_to_be64(xfs_bmbt_disk_get_startoff(&rec->bmbt));
 360 }
 361 
 362 STATIC void
 363 xfs_bmbt_init_high_key_from_rec(
 364         union xfs_btree_key     *key,
 365         union xfs_btree_rec     *rec)
 366 {
 367         key->bmbt.br_startoff = cpu_to_be64(
 368                         xfs_bmbt_disk_get_startoff(&rec->bmbt) +
 369                         xfs_bmbt_disk_get_blockcount(&rec->bmbt) - 1);
 370 }
 371 
 372 STATIC void
 373 xfs_bmbt_init_rec_from_cur(
 374         struct xfs_btree_cur    *cur,
 375         union xfs_btree_rec     *rec)
 376 {
 377         xfs_bmbt_disk_set_all(&rec->bmbt, &cur->bc_rec.b);
 378 }
 379 
 380 STATIC void
 381 xfs_bmbt_init_ptr_from_cur(
 382         struct xfs_btree_cur    *cur,
 383         union xfs_btree_ptr     *ptr)
 384 {
 385         ptr->l = 0;
 386 }
 387 
 388 STATIC int64_t
 389 xfs_bmbt_key_diff(
 390         struct xfs_btree_cur    *cur,
 391         union xfs_btree_key     *key)
 392 {
 393         return (int64_t)be64_to_cpu(key->bmbt.br_startoff) -
 394                                       cur->bc_rec.b.br_startoff;
 395 }
 396 
 397 STATIC int64_t
 398 xfs_bmbt_diff_two_keys(
 399         struct xfs_btree_cur    *cur,
 400         union xfs_btree_key     *k1,
 401         union xfs_btree_key     *k2)
 402 {
 403         uint64_t                a = be64_to_cpu(k1->bmbt.br_startoff);
 404         uint64_t                b = be64_to_cpu(k2->bmbt.br_startoff);
 405 
 406         /*
 407          * Note: This routine previously casted a and b to int64 and subtracted
 408          * them to generate a result.  This lead to problems if b was the
 409          * "maximum" key value (all ones) being signed incorrectly, hence this
 410          * somewhat less efficient version.
 411          */
 412         if (a > b)
 413                 return 1;
 414         if (b > a)
 415                 return -1;
 416         return 0;
 417 }
 418 
 419 static xfs_failaddr_t
 420 xfs_bmbt_verify(
 421         struct xfs_buf          *bp)
 422 {
 423         struct xfs_mount        *mp = bp->b_mount;
 424         struct xfs_btree_block  *block = XFS_BUF_TO_BLOCK(bp);
 425         xfs_failaddr_t          fa;
 426         unsigned int            level;
 427 
 428         if (!xfs_verify_magic(bp, block->bb_magic))
 429                 return __this_address;
 430 
 431         if (xfs_sb_version_hascrc(&mp->m_sb)) {
 432                 /*
 433                  * XXX: need a better way of verifying the owner here. Right now
 434                  * just make sure there has been one set.
 435                  */
 436                 fa = xfs_btree_lblock_v5hdr_verify(bp, XFS_RMAP_OWN_UNKNOWN);
 437                 if (fa)
 438                         return fa;
 439         }
 440 
 441         /*
 442          * numrecs and level verification.
 443          *
 444          * We don't know what fork we belong to, so just verify that the level
 445          * is less than the maximum of the two. Later checks will be more
 446          * precise.
 447          */
 448         level = be16_to_cpu(block->bb_level);
 449         if (level > max(mp->m_bm_maxlevels[0], mp->m_bm_maxlevels[1]))
 450                 return __this_address;
 451 
 452         return xfs_btree_lblock_verify(bp, mp->m_bmap_dmxr[level != 0]);
 453 }
 454 
 455 static void
 456 xfs_bmbt_read_verify(
 457         struct xfs_buf  *bp)
 458 {
 459         xfs_failaddr_t  fa;
 460 
 461         if (!xfs_btree_lblock_verify_crc(bp))
 462                 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
 463         else {
 464                 fa = xfs_bmbt_verify(bp);
 465                 if (fa)
 466                         xfs_verifier_error(bp, -EFSCORRUPTED, fa);
 467         }
 468 
 469         if (bp->b_error)
 470                 trace_xfs_btree_corrupt(bp, _RET_IP_);
 471 }
 472 
 473 static void
 474 xfs_bmbt_write_verify(
 475         struct xfs_buf  *bp)
 476 {
 477         xfs_failaddr_t  fa;
 478 
 479         fa = xfs_bmbt_verify(bp);
 480         if (fa) {
 481                 trace_xfs_btree_corrupt(bp, _RET_IP_);
 482                 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
 483                 return;
 484         }
 485         xfs_btree_lblock_calc_crc(bp);
 486 }
 487 
 488 const struct xfs_buf_ops xfs_bmbt_buf_ops = {
 489         .name = "xfs_bmbt",
 490         .magic = { cpu_to_be32(XFS_BMAP_MAGIC),
 491                    cpu_to_be32(XFS_BMAP_CRC_MAGIC) },
 492         .verify_read = xfs_bmbt_read_verify,
 493         .verify_write = xfs_bmbt_write_verify,
 494         .verify_struct = xfs_bmbt_verify,
 495 };
 496 
 497 
 498 STATIC int
 499 xfs_bmbt_keys_inorder(
 500         struct xfs_btree_cur    *cur,
 501         union xfs_btree_key     *k1,
 502         union xfs_btree_key     *k2)
 503 {
 504         return be64_to_cpu(k1->bmbt.br_startoff) <
 505                 be64_to_cpu(k2->bmbt.br_startoff);
 506 }
 507 
 508 STATIC int
 509 xfs_bmbt_recs_inorder(
 510         struct xfs_btree_cur    *cur,
 511         union xfs_btree_rec     *r1,
 512         union xfs_btree_rec     *r2)
 513 {
 514         return xfs_bmbt_disk_get_startoff(&r1->bmbt) +
 515                 xfs_bmbt_disk_get_blockcount(&r1->bmbt) <=
 516                 xfs_bmbt_disk_get_startoff(&r2->bmbt);
 517 }
 518 
 519 static const struct xfs_btree_ops xfs_bmbt_ops = {
 520         .rec_len                = sizeof(xfs_bmbt_rec_t),
 521         .key_len                = sizeof(xfs_bmbt_key_t),
 522 
 523         .dup_cursor             = xfs_bmbt_dup_cursor,
 524         .update_cursor          = xfs_bmbt_update_cursor,
 525         .alloc_block            = xfs_bmbt_alloc_block,
 526         .free_block             = xfs_bmbt_free_block,
 527         .get_maxrecs            = xfs_bmbt_get_maxrecs,
 528         .get_minrecs            = xfs_bmbt_get_minrecs,
 529         .get_dmaxrecs           = xfs_bmbt_get_dmaxrecs,
 530         .init_key_from_rec      = xfs_bmbt_init_key_from_rec,
 531         .init_high_key_from_rec = xfs_bmbt_init_high_key_from_rec,
 532         .init_rec_from_cur      = xfs_bmbt_init_rec_from_cur,
 533         .init_ptr_from_cur      = xfs_bmbt_init_ptr_from_cur,
 534         .key_diff               = xfs_bmbt_key_diff,
 535         .diff_two_keys          = xfs_bmbt_diff_two_keys,
 536         .buf_ops                = &xfs_bmbt_buf_ops,
 537         .keys_inorder           = xfs_bmbt_keys_inorder,
 538         .recs_inorder           = xfs_bmbt_recs_inorder,
 539 };
 540 
 541 /*
 542  * Allocate a new bmap btree cursor.
 543  */
 544 struct xfs_btree_cur *                          /* new bmap btree cursor */
 545 xfs_bmbt_init_cursor(
 546         struct xfs_mount        *mp,            /* file system mount point */
 547         struct xfs_trans        *tp,            /* transaction pointer */
 548         struct xfs_inode        *ip,            /* inode owning the btree */
 549         int                     whichfork)      /* data or attr fork */
 550 {
 551         struct xfs_ifork        *ifp = XFS_IFORK_PTR(ip, whichfork);
 552         struct xfs_btree_cur    *cur;
 553         ASSERT(whichfork != XFS_COW_FORK);
 554 
 555         cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS);
 556 
 557         cur->bc_tp = tp;
 558         cur->bc_mp = mp;
 559         cur->bc_nlevels = be16_to_cpu(ifp->if_broot->bb_level) + 1;
 560         cur->bc_btnum = XFS_BTNUM_BMAP;
 561         cur->bc_blocklog = mp->m_sb.sb_blocklog;
 562         cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_bmbt_2);
 563 
 564         cur->bc_ops = &xfs_bmbt_ops;
 565         cur->bc_flags = XFS_BTREE_LONG_PTRS | XFS_BTREE_ROOT_IN_INODE;
 566         if (xfs_sb_version_hascrc(&mp->m_sb))
 567                 cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
 568 
 569         cur->bc_private.b.forksize = XFS_IFORK_SIZE(ip, whichfork);
 570         cur->bc_private.b.ip = ip;
 571         cur->bc_private.b.allocated = 0;
 572         cur->bc_private.b.flags = 0;
 573         cur->bc_private.b.whichfork = whichfork;
 574 
 575         return cur;
 576 }
 577 
 578 /*
 579  * Calculate number of records in a bmap btree block.
 580  */
 581 int
 582 xfs_bmbt_maxrecs(
 583         struct xfs_mount        *mp,
 584         int                     blocklen,
 585         int                     leaf)
 586 {
 587         blocklen -= XFS_BMBT_BLOCK_LEN(mp);
 588 
 589         if (leaf)
 590                 return blocklen / sizeof(xfs_bmbt_rec_t);
 591         return blocklen / (sizeof(xfs_bmbt_key_t) + sizeof(xfs_bmbt_ptr_t));
 592 }
 593 
 594 /*
 595  * Calculate number of records in a bmap btree inode root.
 596  */
 597 int
 598 xfs_bmdr_maxrecs(
 599         int                     blocklen,
 600         int                     leaf)
 601 {
 602         blocklen -= sizeof(xfs_bmdr_block_t);
 603 
 604         if (leaf)
 605                 return blocklen / sizeof(xfs_bmdr_rec_t);
 606         return blocklen / (sizeof(xfs_bmdr_key_t) + sizeof(xfs_bmdr_ptr_t));
 607 }
 608 
 609 /*
 610  * Change the owner of a btree format fork fo the inode passed in. Change it to
 611  * the owner of that is passed in so that we can change owners before or after
 612  * we switch forks between inodes. The operation that the caller is doing will
 613  * determine whether is needs to change owner before or after the switch.
 614  *
 615  * For demand paged transactional modification, the fork switch should be done
 616  * after reading in all the blocks, modifying them and pinning them in the
 617  * transaction. For modification when the buffers are already pinned in memory,
 618  * the fork switch can be done before changing the owner as we won't need to
 619  * validate the owner until the btree buffers are unpinned and writes can occur
 620  * again.
 621  *
 622  * For recovery based ownership change, there is no transactional context and
 623  * so a buffer list must be supplied so that we can record the buffers that we
 624  * modified for the caller to issue IO on.
 625  */
 626 int
 627 xfs_bmbt_change_owner(
 628         struct xfs_trans        *tp,
 629         struct xfs_inode        *ip,
 630         int                     whichfork,
 631         xfs_ino_t               new_owner,
 632         struct list_head        *buffer_list)
 633 {
 634         struct xfs_btree_cur    *cur;
 635         int                     error;
 636 
 637         ASSERT(tp || buffer_list);
 638         ASSERT(!(tp && buffer_list));
 639         if (whichfork == XFS_DATA_FORK)
 640                 ASSERT(ip->i_d.di_format == XFS_DINODE_FMT_BTREE);
 641         else
 642                 ASSERT(ip->i_d.di_aformat == XFS_DINODE_FMT_BTREE);
 643 
 644         cur = xfs_bmbt_init_cursor(ip->i_mount, tp, ip, whichfork);
 645         if (!cur)
 646                 return -ENOMEM;
 647         cur->bc_private.b.flags |= XFS_BTCUR_BPRV_INVALID_OWNER;
 648 
 649         error = xfs_btree_change_owner(cur, new_owner, buffer_list);
 650         xfs_btree_del_cursor(cur, error);
 651         return error;
 652 }
 653 
 654 /* Calculate the bmap btree size for some records. */
 655 unsigned long long
 656 xfs_bmbt_calc_size(
 657         struct xfs_mount        *mp,
 658         unsigned long long      len)
 659 {
 660         return xfs_btree_calc_size(mp->m_bmap_dmnr, len);
 661 }

/* [<][>][^][v][top][bottom][index][help] */