root/fs/xfs/xfs_iwalk.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. xfs_iwalk_ichunk_ra
  2. xfs_iwalk_adjust_start
  3. xfs_iwalk_alloc
  4. xfs_iwalk_free
  5. xfs_iwalk_ag_recs
  6. xfs_iwalk_del_inobt
  7. xfs_iwalk_ag_start
  8. xfs_iwalk_run_callbacks
  9. xfs_iwalk_ag
  10. xfs_iwalk_prefetch
  11. xfs_iwalk
  12. xfs_iwalk_ag_work
  13. xfs_iwalk_threaded
  14. xfs_inobt_walk_prefetch
  15. xfs_inobt_walk

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3  * Copyright (C) 2019 Oracle.  All Rights Reserved.
   4  * Author: Darrick J. Wong <darrick.wong@oracle.com>
   5  */
   6 #include "xfs.h"
   7 #include "xfs_fs.h"
   8 #include "xfs_shared.h"
   9 #include "xfs_format.h"
  10 #include "xfs_log_format.h"
  11 #include "xfs_trans_resv.h"
  12 #include "xfs_mount.h"
  13 #include "xfs_inode.h"
  14 #include "xfs_btree.h"
  15 #include "xfs_ialloc.h"
  16 #include "xfs_ialloc_btree.h"
  17 #include "xfs_iwalk.h"
  18 #include "xfs_error.h"
  19 #include "xfs_trace.h"
  20 #include "xfs_icache.h"
  21 #include "xfs_health.h"
  22 #include "xfs_trans.h"
  23 #include "xfs_pwork.h"
  24 
  25 /*
  26  * Walking Inodes in the Filesystem
  27  * ================================
  28  *
  29  * This iterator function walks a subset of filesystem inodes in increasing
  30  * order from @startino until there are no more inodes.  For each allocated
  31  * inode it finds, it calls a walk function with the relevant inode number and
  32  * a pointer to caller-provided data.  The walk function can return the usual
  33  * negative error code to stop the iteration; 0 to continue the iteration; or
  34  * -ECANCELED to stop the iteration.  This return value is returned to the
  35  * caller.
  36  *
  37  * Internally, we allow the walk function to do anything, which means that we
  38  * cannot maintain the inobt cursor or our lock on the AGI buffer.  We
  39  * therefore cache the inobt records in kernel memory and only call the walk
  40  * function when our memory buffer is full.  @nr_recs is the number of records
  41  * that we've cached, and @sz_recs is the size of our cache.
  42  *
  43  * It is the responsibility of the walk function to ensure it accesses
  44  * allocated inodes, as the inobt records may be stale by the time they are
  45  * acted upon.
  46  */
  47 
  48 struct xfs_iwalk_ag {
  49         /* parallel work control data; will be null if single threaded */
  50         struct xfs_pwork                pwork;
  51 
  52         struct xfs_mount                *mp;
  53         struct xfs_trans                *tp;
  54 
  55         /* Where do we start the traversal? */
  56         xfs_ino_t                       startino;
  57 
  58         /* Array of inobt records we cache. */
  59         struct xfs_inobt_rec_incore     *recs;
  60 
  61         /* Number of entries allocated for the @recs array. */
  62         unsigned int                    sz_recs;
  63 
  64         /* Number of entries in the @recs array that are in use. */
  65         unsigned int                    nr_recs;
  66 
  67         /* Inode walk function and data pointer. */
  68         xfs_iwalk_fn                    iwalk_fn;
  69         xfs_inobt_walk_fn               inobt_walk_fn;
  70         void                            *data;
  71 
  72         /*
  73          * Make it look like the inodes up to startino are free so that
  74          * bulkstat can start its inode iteration at the correct place without
  75          * needing to special case everywhere.
  76          */
  77         unsigned int                    trim_start:1;
  78 
  79         /* Skip empty inobt records? */
  80         unsigned int                    skip_empty:1;
  81 };
  82 
  83 /*
  84  * Loop over all clusters in a chunk for a given incore inode allocation btree
  85  * record.  Do a readahead if there are any allocated inodes in that cluster.
  86  */
  87 STATIC void
  88 xfs_iwalk_ichunk_ra(
  89         struct xfs_mount                *mp,
  90         xfs_agnumber_t                  agno,
  91         struct xfs_inobt_rec_incore     *irec)
  92 {
  93         struct xfs_ino_geometry         *igeo = M_IGEO(mp);
  94         xfs_agblock_t                   agbno;
  95         struct blk_plug                 plug;
  96         int                             i;      /* inode chunk index */
  97 
  98         agbno = XFS_AGINO_TO_AGBNO(mp, irec->ir_startino);
  99 
 100         blk_start_plug(&plug);
 101         for (i = 0; i < XFS_INODES_PER_CHUNK; i += igeo->inodes_per_cluster) {
 102                 xfs_inofree_t   imask;
 103 
 104                 imask = xfs_inobt_maskn(i, igeo->inodes_per_cluster);
 105                 if (imask & ~irec->ir_free) {
 106                         xfs_btree_reada_bufs(mp, agno, agbno,
 107                                         igeo->blocks_per_cluster,
 108                                         &xfs_inode_buf_ops);
 109                 }
 110                 agbno += igeo->blocks_per_cluster;
 111         }
 112         blk_finish_plug(&plug);
 113 }
 114 
 115 /*
 116  * Set the bits in @irec's free mask that correspond to the inodes before
 117  * @agino so that we skip them.  This is how we restart an inode walk that was
 118  * interrupted in the middle of an inode record.
 119  */
 120 STATIC void
 121 xfs_iwalk_adjust_start(
 122         xfs_agino_t                     agino,  /* starting inode of chunk */
 123         struct xfs_inobt_rec_incore     *irec)  /* btree record */
 124 {
 125         int                             idx;    /* index into inode chunk */
 126         int                             i;
 127 
 128         idx = agino - irec->ir_startino;
 129 
 130         /*
 131          * We got a right chunk with some left inodes allocated at it.  Grab
 132          * the chunk record.  Mark all the uninteresting inodes free because
 133          * they're before our start point.
 134          */
 135         for (i = 0; i < idx; i++) {
 136                 if (XFS_INOBT_MASK(i) & ~irec->ir_free)
 137                         irec->ir_freecount++;
 138         }
 139 
 140         irec->ir_free |= xfs_inobt_maskn(0, idx);
 141 }
 142 
 143 /* Allocate memory for a walk. */
 144 STATIC int
 145 xfs_iwalk_alloc(
 146         struct xfs_iwalk_ag     *iwag)
 147 {
 148         size_t                  size;
 149 
 150         ASSERT(iwag->recs == NULL);
 151         iwag->nr_recs = 0;
 152 
 153         /* Allocate a prefetch buffer for inobt records. */
 154         size = iwag->sz_recs * sizeof(struct xfs_inobt_rec_incore);
 155         iwag->recs = kmem_alloc(size, KM_MAYFAIL);
 156         if (iwag->recs == NULL)
 157                 return -ENOMEM;
 158 
 159         return 0;
 160 }
 161 
 162 /* Free memory we allocated for a walk. */
 163 STATIC void
 164 xfs_iwalk_free(
 165         struct xfs_iwalk_ag     *iwag)
 166 {
 167         kmem_free(iwag->recs);
 168         iwag->recs = NULL;
 169 }
 170 
 171 /* For each inuse inode in each cached inobt record, call our function. */
 172 STATIC int
 173 xfs_iwalk_ag_recs(
 174         struct xfs_iwalk_ag             *iwag)
 175 {
 176         struct xfs_mount                *mp = iwag->mp;
 177         struct xfs_trans                *tp = iwag->tp;
 178         xfs_ino_t                       ino;
 179         unsigned int                    i, j;
 180         xfs_agnumber_t                  agno;
 181         int                             error;
 182 
 183         agno = XFS_INO_TO_AGNO(mp, iwag->startino);
 184         for (i = 0; i < iwag->nr_recs; i++) {
 185                 struct xfs_inobt_rec_incore     *irec = &iwag->recs[i];
 186 
 187                 trace_xfs_iwalk_ag_rec(mp, agno, irec);
 188 
 189                 if (xfs_pwork_want_abort(&iwag->pwork))
 190                         return 0;
 191 
 192                 if (iwag->inobt_walk_fn) {
 193                         error = iwag->inobt_walk_fn(mp, tp, agno, irec,
 194                                         iwag->data);
 195                         if (error)
 196                                 return error;
 197                 }
 198 
 199                 if (!iwag->iwalk_fn)
 200                         continue;
 201 
 202                 for (j = 0; j < XFS_INODES_PER_CHUNK; j++) {
 203                         if (xfs_pwork_want_abort(&iwag->pwork))
 204                                 return 0;
 205 
 206                         /* Skip if this inode is free */
 207                         if (XFS_INOBT_MASK(j) & irec->ir_free)
 208                                 continue;
 209 
 210                         /* Otherwise call our function. */
 211                         ino = XFS_AGINO_TO_INO(mp, agno, irec->ir_startino + j);
 212                         error = iwag->iwalk_fn(mp, tp, ino, iwag->data);
 213                         if (error)
 214                                 return error;
 215                 }
 216         }
 217 
 218         return 0;
 219 }
 220 
 221 /* Delete cursor and let go of AGI. */
 222 static inline void
 223 xfs_iwalk_del_inobt(
 224         struct xfs_trans        *tp,
 225         struct xfs_btree_cur    **curpp,
 226         struct xfs_buf          **agi_bpp,
 227         int                     error)
 228 {
 229         if (*curpp) {
 230                 xfs_btree_del_cursor(*curpp, error);
 231                 *curpp = NULL;
 232         }
 233         if (*agi_bpp) {
 234                 xfs_trans_brelse(tp, *agi_bpp);
 235                 *agi_bpp = NULL;
 236         }
 237 }
 238 
 239 /*
 240  * Set ourselves up for walking inobt records starting from a given point in
 241  * the filesystem.
 242  *
 243  * If caller passed in a nonzero start inode number, load the record from the
 244  * inobt and make the record look like all the inodes before agino are free so
 245  * that we skip them, and then move the cursor to the next inobt record.  This
 246  * is how we support starting an iwalk in the middle of an inode chunk.
 247  *
 248  * If the caller passed in a start number of zero, move the cursor to the first
 249  * inobt record.
 250  *
 251  * The caller is responsible for cleaning up the cursor and buffer pointer
 252  * regardless of the error status.
 253  */
 254 STATIC int
 255 xfs_iwalk_ag_start(
 256         struct xfs_iwalk_ag     *iwag,
 257         xfs_agnumber_t          agno,
 258         xfs_agino_t             agino,
 259         struct xfs_btree_cur    **curpp,
 260         struct xfs_buf          **agi_bpp,
 261         int                     *has_more)
 262 {
 263         struct xfs_mount        *mp = iwag->mp;
 264         struct xfs_trans        *tp = iwag->tp;
 265         struct xfs_inobt_rec_incore *irec;
 266         int                     error;
 267 
 268         /* Set up a fresh cursor and empty the inobt cache. */
 269         iwag->nr_recs = 0;
 270         error = xfs_inobt_cur(mp, tp, agno, XFS_BTNUM_INO, curpp, agi_bpp);
 271         if (error)
 272                 return error;
 273 
 274         /* Starting at the beginning of the AG?  That's easy! */
 275         if (agino == 0)
 276                 return xfs_inobt_lookup(*curpp, 0, XFS_LOOKUP_GE, has_more);
 277 
 278         /*
 279          * Otherwise, we have to grab the inobt record where we left off, stuff
 280          * the record into our cache, and then see if there are more records.
 281          * We require a lookup cache of at least two elements so that the
 282          * caller doesn't have to deal with tearing down the cursor to walk the
 283          * records.
 284          */
 285         error = xfs_inobt_lookup(*curpp, agino, XFS_LOOKUP_LE, has_more);
 286         if (error)
 287                 return error;
 288 
 289         /*
 290          * If the LE lookup at @agino yields no records, jump ahead to the
 291          * inobt cursor increment to see if there are more records to process.
 292          */
 293         if (!*has_more)
 294                 goto out_advance;
 295 
 296         /* Get the record, should always work */
 297         irec = &iwag->recs[iwag->nr_recs];
 298         error = xfs_inobt_get_rec(*curpp, irec, has_more);
 299         if (error)
 300                 return error;
 301         XFS_WANT_CORRUPTED_RETURN(mp, *has_more == 1);
 302 
 303         /*
 304          * If the LE lookup yielded an inobt record before the cursor position,
 305          * skip it and see if there's another one after it.
 306          */
 307         if (irec->ir_startino + XFS_INODES_PER_CHUNK <= agino)
 308                 goto out_advance;
 309 
 310         /*
 311          * If agino fell in the middle of the inode record, make it look like
 312          * the inodes up to agino are free so that we don't return them again.
 313          */
 314         if (iwag->trim_start)
 315                 xfs_iwalk_adjust_start(agino, irec);
 316 
 317         /*
 318          * The prefetch calculation is supposed to give us a large enough inobt
 319          * record cache that grab_ichunk can stage a partial first record and
 320          * the loop body can cache a record without having to check for cache
 321          * space until after it reads an inobt record.
 322          */
 323         iwag->nr_recs++;
 324         ASSERT(iwag->nr_recs < iwag->sz_recs);
 325 
 326 out_advance:
 327         return xfs_btree_increment(*curpp, 0, has_more);
 328 }
 329 
 330 /*
 331  * The inobt record cache is full, so preserve the inobt cursor state and
 332  * run callbacks on the cached inobt records.  When we're done, restore the
 333  * cursor state to wherever the cursor would have been had the cache not been
 334  * full (and therefore we could've just incremented the cursor) if *@has_more
 335  * is true.  On exit, *@has_more will indicate whether or not the caller should
 336  * try for more inode records.
 337  */
 338 STATIC int
 339 xfs_iwalk_run_callbacks(
 340         struct xfs_iwalk_ag             *iwag,
 341         xfs_agnumber_t                  agno,
 342         struct xfs_btree_cur            **curpp,
 343         struct xfs_buf                  **agi_bpp,
 344         int                             *has_more)
 345 {
 346         struct xfs_mount                *mp = iwag->mp;
 347         struct xfs_trans                *tp = iwag->tp;
 348         struct xfs_inobt_rec_incore     *irec;
 349         xfs_agino_t                     restart;
 350         int                             error;
 351 
 352         ASSERT(iwag->nr_recs > 0);
 353 
 354         /* Delete cursor but remember the last record we cached... */
 355         xfs_iwalk_del_inobt(tp, curpp, agi_bpp, 0);
 356         irec = &iwag->recs[iwag->nr_recs - 1];
 357         restart = irec->ir_startino + XFS_INODES_PER_CHUNK - 1;
 358 
 359         error = xfs_iwalk_ag_recs(iwag);
 360         if (error)
 361                 return error;
 362 
 363         /* ...empty the cache... */
 364         iwag->nr_recs = 0;
 365 
 366         if (!has_more)
 367                 return 0;
 368 
 369         /* ...and recreate the cursor just past where we left off. */
 370         error = xfs_inobt_cur(mp, tp, agno, XFS_BTNUM_INO, curpp, agi_bpp);
 371         if (error)
 372                 return error;
 373 
 374         return xfs_inobt_lookup(*curpp, restart, XFS_LOOKUP_GE, has_more);
 375 }
 376 
 377 /* Walk all inodes in a single AG, from @iwag->startino to the end of the AG. */
 378 STATIC int
 379 xfs_iwalk_ag(
 380         struct xfs_iwalk_ag             *iwag)
 381 {
 382         struct xfs_mount                *mp = iwag->mp;
 383         struct xfs_trans                *tp = iwag->tp;
 384         struct xfs_buf                  *agi_bp = NULL;
 385         struct xfs_btree_cur            *cur = NULL;
 386         xfs_agnumber_t                  agno;
 387         xfs_agino_t                     agino;
 388         int                             has_more;
 389         int                             error = 0;
 390 
 391         /* Set up our cursor at the right place in the inode btree. */
 392         agno = XFS_INO_TO_AGNO(mp, iwag->startino);
 393         agino = XFS_INO_TO_AGINO(mp, iwag->startino);
 394         error = xfs_iwalk_ag_start(iwag, agno, agino, &cur, &agi_bp, &has_more);
 395 
 396         while (!error && has_more) {
 397                 struct xfs_inobt_rec_incore     *irec;
 398 
 399                 cond_resched();
 400                 if (xfs_pwork_want_abort(&iwag->pwork))
 401                         goto out;
 402 
 403                 /* Fetch the inobt record. */
 404                 irec = &iwag->recs[iwag->nr_recs];
 405                 error = xfs_inobt_get_rec(cur, irec, &has_more);
 406                 if (error || !has_more)
 407                         break;
 408 
 409                 /* No allocated inodes in this chunk; skip it. */
 410                 if (iwag->skip_empty && irec->ir_freecount == irec->ir_count) {
 411                         error = xfs_btree_increment(cur, 0, &has_more);
 412                         if (error)
 413                                 break;
 414                         continue;
 415                 }
 416 
 417                 /*
 418                  * Start readahead for this inode chunk in anticipation of
 419                  * walking the inodes.
 420                  */
 421                 if (iwag->iwalk_fn)
 422                         xfs_iwalk_ichunk_ra(mp, agno, irec);
 423 
 424                 /*
 425                  * If there's space in the buffer for more records, increment
 426                  * the btree cursor and grab more.
 427                  */
 428                 if (++iwag->nr_recs < iwag->sz_recs) {
 429                         error = xfs_btree_increment(cur, 0, &has_more);
 430                         if (error || !has_more)
 431                                 break;
 432                         continue;
 433                 }
 434 
 435                 /*
 436                  * Otherwise, we need to save cursor state and run the callback
 437                  * function on the cached records.  The run_callbacks function
 438                  * is supposed to return a cursor pointing to the record where
 439                  * we would be if we had been able to increment like above.
 440                  */
 441                 ASSERT(has_more);
 442                 error = xfs_iwalk_run_callbacks(iwag, agno, &cur, &agi_bp,
 443                                 &has_more);
 444         }
 445 
 446         if (iwag->nr_recs == 0 || error)
 447                 goto out;
 448 
 449         /* Walk the unprocessed records in the cache. */
 450         error = xfs_iwalk_run_callbacks(iwag, agno, &cur, &agi_bp, &has_more);
 451 
 452 out:
 453         xfs_iwalk_del_inobt(tp, &cur, &agi_bp, error);
 454         return error;
 455 }
 456 
 457 /*
 458  * We experimentally determined that the reduction in ioctl call overhead
 459  * diminishes when userspace asks for more than 2048 inodes, so we'll cap
 460  * prefetch at this point.
 461  */
 462 #define IWALK_MAX_INODE_PREFETCH        (2048U)
 463 
 464 /*
 465  * Given the number of inodes to prefetch, set the number of inobt records that
 466  * we cache in memory, which controls the number of inodes we try to read
 467  * ahead.  Set the maximum if @inodes == 0.
 468  */
 469 static inline unsigned int
 470 xfs_iwalk_prefetch(
 471         unsigned int            inodes)
 472 {
 473         unsigned int            inobt_records;
 474 
 475         /*
 476          * If the caller didn't tell us the number of inodes they wanted,
 477          * assume the maximum prefetch possible for best performance.
 478          * Otherwise, cap prefetch at that maximum so that we don't start an
 479          * absurd amount of prefetch.
 480          */
 481         if (inodes == 0)
 482                 inodes = IWALK_MAX_INODE_PREFETCH;
 483         inodes = min(inodes, IWALK_MAX_INODE_PREFETCH);
 484 
 485         /* Round the inode count up to a full chunk. */
 486         inodes = round_up(inodes, XFS_INODES_PER_CHUNK);
 487 
 488         /*
 489          * In order to convert the number of inodes to prefetch into an
 490          * estimate of the number of inobt records to cache, we require a
 491          * conversion factor that reflects our expectations of the average
 492          * loading factor of an inode chunk.  Based on data gathered, most
 493          * (but not all) filesystems manage to keep the inode chunks totally
 494          * full, so we'll underestimate slightly so that our readahead will
 495          * still deliver the performance we want on aging filesystems:
 496          *
 497          * inobt = inodes / (INODES_PER_CHUNK * (4 / 5));
 498          *
 499          * The funny math is to avoid integer division.
 500          */
 501         inobt_records = (inodes * 5) / (4 * XFS_INODES_PER_CHUNK);
 502 
 503         /*
 504          * Allocate enough space to prefetch at least two inobt records so that
 505          * we can cache both the record where the iwalk started and the next
 506          * record.  This simplifies the AG inode walk loop setup code.
 507          */
 508         return max(inobt_records, 2U);
 509 }
 510 
 511 /*
 512  * Walk all inodes in the filesystem starting from @startino.  The @iwalk_fn
 513  * will be called for each allocated inode, being passed the inode's number and
 514  * @data.  @max_prefetch controls how many inobt records' worth of inodes we
 515  * try to readahead.
 516  */
 517 int
 518 xfs_iwalk(
 519         struct xfs_mount        *mp,
 520         struct xfs_trans        *tp,
 521         xfs_ino_t               startino,
 522         unsigned int            flags,
 523         xfs_iwalk_fn            iwalk_fn,
 524         unsigned int            inode_records,
 525         void                    *data)
 526 {
 527         struct xfs_iwalk_ag     iwag = {
 528                 .mp             = mp,
 529                 .tp             = tp,
 530                 .iwalk_fn       = iwalk_fn,
 531                 .data           = data,
 532                 .startino       = startino,
 533                 .sz_recs        = xfs_iwalk_prefetch(inode_records),
 534                 .trim_start     = 1,
 535                 .skip_empty     = 1,
 536                 .pwork          = XFS_PWORK_SINGLE_THREADED,
 537         };
 538         xfs_agnumber_t          agno = XFS_INO_TO_AGNO(mp, startino);
 539         int                     error;
 540 
 541         ASSERT(agno < mp->m_sb.sb_agcount);
 542         ASSERT(!(flags & ~XFS_IWALK_FLAGS_ALL));
 543 
 544         error = xfs_iwalk_alloc(&iwag);
 545         if (error)
 546                 return error;
 547 
 548         for (; agno < mp->m_sb.sb_agcount; agno++) {
 549                 error = xfs_iwalk_ag(&iwag);
 550                 if (error)
 551                         break;
 552                 iwag.startino = XFS_AGINO_TO_INO(mp, agno + 1, 0);
 553                 if (flags & XFS_INOBT_WALK_SAME_AG)
 554                         break;
 555         }
 556 
 557         xfs_iwalk_free(&iwag);
 558         return error;
 559 }
 560 
 561 /* Run per-thread iwalk work. */
 562 static int
 563 xfs_iwalk_ag_work(
 564         struct xfs_mount        *mp,
 565         struct xfs_pwork        *pwork)
 566 {
 567         struct xfs_iwalk_ag     *iwag;
 568         int                     error = 0;
 569 
 570         iwag = container_of(pwork, struct xfs_iwalk_ag, pwork);
 571         if (xfs_pwork_want_abort(pwork))
 572                 goto out;
 573 
 574         error = xfs_iwalk_alloc(iwag);
 575         if (error)
 576                 goto out;
 577 
 578         error = xfs_iwalk_ag(iwag);
 579         xfs_iwalk_free(iwag);
 580 out:
 581         kmem_free(iwag);
 582         return error;
 583 }
 584 
 585 /*
 586  * Walk all the inodes in the filesystem using multiple threads to process each
 587  * AG.
 588  */
 589 int
 590 xfs_iwalk_threaded(
 591         struct xfs_mount        *mp,
 592         xfs_ino_t               startino,
 593         unsigned int            flags,
 594         xfs_iwalk_fn            iwalk_fn,
 595         unsigned int            inode_records,
 596         bool                    polled,
 597         void                    *data)
 598 {
 599         struct xfs_pwork_ctl    pctl;
 600         xfs_agnumber_t          agno = XFS_INO_TO_AGNO(mp, startino);
 601         unsigned int            nr_threads;
 602         int                     error;
 603 
 604         ASSERT(agno < mp->m_sb.sb_agcount);
 605         ASSERT(!(flags & ~XFS_IWALK_FLAGS_ALL));
 606 
 607         nr_threads = xfs_pwork_guess_datadev_parallelism(mp);
 608         error = xfs_pwork_init(mp, &pctl, xfs_iwalk_ag_work, "xfs_iwalk",
 609                         nr_threads);
 610         if (error)
 611                 return error;
 612 
 613         for (; agno < mp->m_sb.sb_agcount; agno++) {
 614                 struct xfs_iwalk_ag     *iwag;
 615 
 616                 if (xfs_pwork_ctl_want_abort(&pctl))
 617                         break;
 618 
 619                 iwag = kmem_zalloc(sizeof(struct xfs_iwalk_ag), 0);
 620                 iwag->mp = mp;
 621                 iwag->iwalk_fn = iwalk_fn;
 622                 iwag->data = data;
 623                 iwag->startino = startino;
 624                 iwag->sz_recs = xfs_iwalk_prefetch(inode_records);
 625                 xfs_pwork_queue(&pctl, &iwag->pwork);
 626                 startino = XFS_AGINO_TO_INO(mp, agno + 1, 0);
 627                 if (flags & XFS_INOBT_WALK_SAME_AG)
 628                         break;
 629         }
 630 
 631         if (polled)
 632                 xfs_pwork_poll(&pctl);
 633         return xfs_pwork_destroy(&pctl);
 634 }
 635 
 636 /*
 637  * Allow callers to cache up to a page's worth of inobt records.  This reflects
 638  * the existing inumbers prefetching behavior.  Since the inobt walk does not
 639  * itself do anything with the inobt records, we can set a fairly high limit
 640  * here.
 641  */
 642 #define MAX_INOBT_WALK_PREFETCH \
 643         (PAGE_SIZE / sizeof(struct xfs_inobt_rec_incore))
 644 
 645 /*
 646  * Given the number of records that the user wanted, set the number of inobt
 647  * records that we buffer in memory.  Set the maximum if @inobt_records == 0.
 648  */
 649 static inline unsigned int
 650 xfs_inobt_walk_prefetch(
 651         unsigned int            inobt_records)
 652 {
 653         /*
 654          * If the caller didn't tell us the number of inobt records they
 655          * wanted, assume the maximum prefetch possible for best performance.
 656          */
 657         if (inobt_records == 0)
 658                 inobt_records = MAX_INOBT_WALK_PREFETCH;
 659 
 660         /*
 661          * Allocate enough space to prefetch at least two inobt records so that
 662          * we can cache both the record where the iwalk started and the next
 663          * record.  This simplifies the AG inode walk loop setup code.
 664          */
 665         inobt_records = max(inobt_records, 2U);
 666 
 667         /*
 668          * Cap prefetch at that maximum so that we don't use an absurd amount
 669          * of memory.
 670          */
 671         return min_t(unsigned int, inobt_records, MAX_INOBT_WALK_PREFETCH);
 672 }
 673 
 674 /*
 675  * Walk all inode btree records in the filesystem starting from @startino.  The
 676  * @inobt_walk_fn will be called for each btree record, being passed the incore
 677  * record and @data.  @max_prefetch controls how many inobt records we try to
 678  * cache ahead of time.
 679  */
 680 int
 681 xfs_inobt_walk(
 682         struct xfs_mount        *mp,
 683         struct xfs_trans        *tp,
 684         xfs_ino_t               startino,
 685         unsigned int            flags,
 686         xfs_inobt_walk_fn       inobt_walk_fn,
 687         unsigned int            inobt_records,
 688         void                    *data)
 689 {
 690         struct xfs_iwalk_ag     iwag = {
 691                 .mp             = mp,
 692                 .tp             = tp,
 693                 .inobt_walk_fn  = inobt_walk_fn,
 694                 .data           = data,
 695                 .startino       = startino,
 696                 .sz_recs        = xfs_inobt_walk_prefetch(inobt_records),
 697                 .pwork          = XFS_PWORK_SINGLE_THREADED,
 698         };
 699         xfs_agnumber_t          agno = XFS_INO_TO_AGNO(mp, startino);
 700         int                     error;
 701 
 702         ASSERT(agno < mp->m_sb.sb_agcount);
 703         ASSERT(!(flags & ~XFS_INOBT_WALK_FLAGS_ALL));
 704 
 705         error = xfs_iwalk_alloc(&iwag);
 706         if (error)
 707                 return error;
 708 
 709         for (; agno < mp->m_sb.sb_agcount; agno++) {
 710                 error = xfs_iwalk_ag(&iwag);
 711                 if (error)
 712                         break;
 713                 iwag.startino = XFS_AGINO_TO_INO(mp, agno + 1, 0);
 714                 if (flags & XFS_INOBT_WALK_SAME_AG)
 715                         break;
 716         }
 717 
 718         xfs_iwalk_free(&iwag);
 719         return error;
 720 }

/* [<][>][^][v][top][bottom][index][help] */