root/fs/xfs/xfs_log_recover.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. xlog_verify_bno
  2. xlog_alloc_buffer
  3. xlog_align
  4. xlog_do_io
  5. xlog_bread_noalign
  6. xlog_bread
  7. xlog_bwrite
  8. xlog_header_check_dump
  9. xlog_header_check_recover
  10. xlog_header_check_mount
  11. xlog_recover_iodone
  12. xlog_find_cycle_start
  13. xlog_find_verify_cycle
  14. xlog_find_verify_log_record
  15. xlog_find_head
  16. xlog_rseek_logrec_hdr
  17. xlog_seek_logrec_hdr
  18. xlog_tail_distance
  19. xlog_verify_tail
  20. xlog_verify_head
  21. xlog_wrap_logbno
  22. xlog_check_unmount_rec
  23. xlog_set_state
  24. xlog_find_tail
  25. xlog_find_zeroed
  26. xlog_add_record
  27. xlog_write_log_records
  28. xlog_clear_stale_blocks
  29. xlog_recover_reorder_trans
  30. xlog_recover_buffer_pass1
  31. xlog_peek_buffer_cancelled
  32. xlog_check_buffer_cancelled
  33. xlog_recover_do_inode_buffer
  34. xlog_recover_get_buf_lsn
  35. xlog_recover_validate_buf_type
  36. xlog_recover_do_reg_buffer
  37. xlog_recover_do_dquot_buffer
  38. xlog_recover_buffer_pass2
  39. xfs_recover_inode_owner_change
  40. xlog_recover_inode_pass2
  41. xlog_recover_quotaoff_pass1
  42. xlog_recover_dquot_pass2
  43. xlog_recover_efi_pass2
  44. xlog_recover_efd_pass2
  45. xlog_recover_rui_pass2
  46. xlog_recover_rud_pass2
  47. xfs_cui_copy_format
  48. xlog_recover_cui_pass2
  49. xlog_recover_cud_pass2
  50. xfs_bui_copy_format
  51. xlog_recover_bui_pass2
  52. xlog_recover_bud_pass2
  53. xlog_recover_do_icreate_pass2
  54. xlog_recover_buffer_ra_pass2
  55. xlog_recover_inode_ra_pass2
  56. xlog_recover_dquot_ra_pass2
  57. xlog_recover_ra_pass2
  58. xlog_recover_commit_pass1
  59. xlog_recover_commit_pass2
  60. xlog_recover_items_pass2
  61. xlog_recover_commit_trans
  62. xlog_recover_add_item
  63. xlog_recover_add_to_cont_trans
  64. xlog_recover_add_to_trans
  65. xlog_recover_free_trans
  66. xlog_recovery_process_trans
  67. xlog_recover_ophdr_to_trans
  68. xlog_recover_process_ophdr
  69. xlog_recover_process_data
  70. xlog_recover_process_efi
  71. xlog_recover_cancel_efi
  72. xlog_recover_process_rui
  73. xlog_recover_cancel_rui
  74. xlog_recover_process_cui
  75. xlog_recover_cancel_cui
  76. xlog_recover_process_bui
  77. xlog_recover_cancel_bui
  78. xlog_item_is_intent
  79. xlog_finish_defer_ops
  80. xlog_recover_process_intents
  81. xlog_recover_cancel_intents
  82. xlog_recover_clear_agi_bucket
  83. xlog_recover_process_one_iunlink
  84. xlog_recover_process_iunlinks
  85. xlog_unpack_data
  86. xlog_recover_process
  87. xlog_valid_rec_header
  88. xlog_do_recovery_pass
  89. xlog_do_log_recovery
  90. xlog_do_recover
  91. xlog_recover
  92. xlog_recover_finish
  93. xlog_recover_cancel
  94. xlog_recover_check_summary

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
   4  * All Rights Reserved.
   5  */
   6 #include "xfs.h"
   7 #include "xfs_fs.h"
   8 #include "xfs_shared.h"
   9 #include "xfs_format.h"
  10 #include "xfs_log_format.h"
  11 #include "xfs_trans_resv.h"
  12 #include "xfs_bit.h"
  13 #include "xfs_sb.h"
  14 #include "xfs_mount.h"
  15 #include "xfs_defer.h"
  16 #include "xfs_inode.h"
  17 #include "xfs_trans.h"
  18 #include "xfs_log.h"
  19 #include "xfs_log_priv.h"
  20 #include "xfs_log_recover.h"
  21 #include "xfs_inode_item.h"
  22 #include "xfs_extfree_item.h"
  23 #include "xfs_trans_priv.h"
  24 #include "xfs_alloc.h"
  25 #include "xfs_ialloc.h"
  26 #include "xfs_quota.h"
  27 #include "xfs_trace.h"
  28 #include "xfs_icache.h"
  29 #include "xfs_bmap_btree.h"
  30 #include "xfs_error.h"
  31 #include "xfs_dir2.h"
  32 #include "xfs_rmap_item.h"
  33 #include "xfs_buf_item.h"
  34 #include "xfs_refcount_item.h"
  35 #include "xfs_bmap_item.h"
  36 
  37 #define BLK_AVG(blk1, blk2)     ((blk1+blk2) >> 1)
  38 
  39 STATIC int
  40 xlog_find_zeroed(
  41         struct xlog     *,
  42         xfs_daddr_t     *);
  43 STATIC int
  44 xlog_clear_stale_blocks(
  45         struct xlog     *,
  46         xfs_lsn_t);
  47 #if defined(DEBUG)
  48 STATIC void
  49 xlog_recover_check_summary(
  50         struct xlog *);
  51 #else
  52 #define xlog_recover_check_summary(log)
  53 #endif
  54 STATIC int
  55 xlog_do_recovery_pass(
  56         struct xlog *, xfs_daddr_t, xfs_daddr_t, int, xfs_daddr_t *);
  57 
  58 /*
  59  * This structure is used during recovery to record the buf log items which
  60  * have been canceled and should not be replayed.
  61  */
  62 struct xfs_buf_cancel {
  63         xfs_daddr_t             bc_blkno;
  64         uint                    bc_len;
  65         int                     bc_refcount;
  66         struct list_head        bc_list;
  67 };
  68 
  69 /*
  70  * Sector aligned buffer routines for buffer create/read/write/access
  71  */
  72 
  73 /*
  74  * Verify the log-relative block number and length in basic blocks are valid for
  75  * an operation involving the given XFS log buffer. Returns true if the fields
  76  * are valid, false otherwise.
  77  */
  78 static inline bool
  79 xlog_verify_bno(
  80         struct xlog     *log,
  81         xfs_daddr_t     blk_no,
  82         int             bbcount)
  83 {
  84         if (blk_no < 0 || blk_no >= log->l_logBBsize)
  85                 return false;
  86         if (bbcount <= 0 || (blk_no + bbcount) > log->l_logBBsize)
  87                 return false;
  88         return true;
  89 }
  90 
  91 /*
  92  * Allocate a buffer to hold log data.  The buffer needs to be able to map to
  93  * a range of nbblks basic blocks at any valid offset within the log.
  94  */
  95 static char *
  96 xlog_alloc_buffer(
  97         struct xlog     *log,
  98         int             nbblks)
  99 {
 100         int align_mask = xfs_buftarg_dma_alignment(log->l_targ);
 101 
 102         /*
 103          * Pass log block 0 since we don't have an addr yet, buffer will be
 104          * verified on read.
 105          */
 106         if (!xlog_verify_bno(log, 0, nbblks)) {
 107                 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
 108                         nbblks);
 109                 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
 110                 return NULL;
 111         }
 112 
 113         /*
 114          * We do log I/O in units of log sectors (a power-of-2 multiple of the
 115          * basic block size), so we round up the requested size to accommodate
 116          * the basic blocks required for complete log sectors.
 117          *
 118          * In addition, the buffer may be used for a non-sector-aligned block
 119          * offset, in which case an I/O of the requested size could extend
 120          * beyond the end of the buffer.  If the requested size is only 1 basic
 121          * block it will never straddle a sector boundary, so this won't be an
 122          * issue.  Nor will this be a problem if the log I/O is done in basic
 123          * blocks (sector size 1).  But otherwise we extend the buffer by one
 124          * extra log sector to ensure there's space to accommodate this
 125          * possibility.
 126          */
 127         if (nbblks > 1 && log->l_sectBBsize > 1)
 128                 nbblks += log->l_sectBBsize;
 129         nbblks = round_up(nbblks, log->l_sectBBsize);
 130         return kmem_alloc_io(BBTOB(nbblks), align_mask, KM_MAYFAIL | KM_ZERO);
 131 }
 132 
 133 /*
 134  * Return the address of the start of the given block number's data
 135  * in a log buffer.  The buffer covers a log sector-aligned region.
 136  */
 137 static inline unsigned int
 138 xlog_align(
 139         struct xlog     *log,
 140         xfs_daddr_t     blk_no)
 141 {
 142         return BBTOB(blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1));
 143 }
 144 
 145 static int
 146 xlog_do_io(
 147         struct xlog             *log,
 148         xfs_daddr_t             blk_no,
 149         unsigned int            nbblks,
 150         char                    *data,
 151         unsigned int            op)
 152 {
 153         int                     error;
 154 
 155         if (!xlog_verify_bno(log, blk_no, nbblks)) {
 156                 xfs_warn(log->l_mp,
 157                          "Invalid log block/length (0x%llx, 0x%x) for buffer",
 158                          blk_no, nbblks);
 159                 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
 160                 return -EFSCORRUPTED;
 161         }
 162 
 163         blk_no = round_down(blk_no, log->l_sectBBsize);
 164         nbblks = round_up(nbblks, log->l_sectBBsize);
 165         ASSERT(nbblks > 0);
 166 
 167         error = xfs_rw_bdev(log->l_targ->bt_bdev, log->l_logBBstart + blk_no,
 168                         BBTOB(nbblks), data, op);
 169         if (error && !XFS_FORCED_SHUTDOWN(log->l_mp)) {
 170                 xfs_alert(log->l_mp,
 171                           "log recovery %s I/O error at daddr 0x%llx len %d error %d",
 172                           op == REQ_OP_WRITE ? "write" : "read",
 173                           blk_no, nbblks, error);
 174         }
 175         return error;
 176 }
 177 
 178 STATIC int
 179 xlog_bread_noalign(
 180         struct xlog     *log,
 181         xfs_daddr_t     blk_no,
 182         int             nbblks,
 183         char            *data)
 184 {
 185         return xlog_do_io(log, blk_no, nbblks, data, REQ_OP_READ);
 186 }
 187 
 188 STATIC int
 189 xlog_bread(
 190         struct xlog     *log,
 191         xfs_daddr_t     blk_no,
 192         int             nbblks,
 193         char            *data,
 194         char            **offset)
 195 {
 196         int             error;
 197 
 198         error = xlog_do_io(log, blk_no, nbblks, data, REQ_OP_READ);
 199         if (!error)
 200                 *offset = data + xlog_align(log, blk_no);
 201         return error;
 202 }
 203 
 204 STATIC int
 205 xlog_bwrite(
 206         struct xlog     *log,
 207         xfs_daddr_t     blk_no,
 208         int             nbblks,
 209         char            *data)
 210 {
 211         return xlog_do_io(log, blk_no, nbblks, data, REQ_OP_WRITE);
 212 }
 213 
 214 #ifdef DEBUG
 215 /*
 216  * dump debug superblock and log record information
 217  */
 218 STATIC void
 219 xlog_header_check_dump(
 220         xfs_mount_t             *mp,
 221         xlog_rec_header_t       *head)
 222 {
 223         xfs_debug(mp, "%s:  SB : uuid = %pU, fmt = %d",
 224                 __func__, &mp->m_sb.sb_uuid, XLOG_FMT);
 225         xfs_debug(mp, "    log : uuid = %pU, fmt = %d",
 226                 &head->h_fs_uuid, be32_to_cpu(head->h_fmt));
 227 }
 228 #else
 229 #define xlog_header_check_dump(mp, head)
 230 #endif
 231 
 232 /*
 233  * check log record header for recovery
 234  */
 235 STATIC int
 236 xlog_header_check_recover(
 237         xfs_mount_t             *mp,
 238         xlog_rec_header_t       *head)
 239 {
 240         ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
 241 
 242         /*
 243          * IRIX doesn't write the h_fmt field and leaves it zeroed
 244          * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
 245          * a dirty log created in IRIX.
 246          */
 247         if (unlikely(head->h_fmt != cpu_to_be32(XLOG_FMT))) {
 248                 xfs_warn(mp,
 249         "dirty log written in incompatible format - can't recover");
 250                 xlog_header_check_dump(mp, head);
 251                 XFS_ERROR_REPORT("xlog_header_check_recover(1)",
 252                                  XFS_ERRLEVEL_HIGH, mp);
 253                 return -EFSCORRUPTED;
 254         } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
 255                 xfs_warn(mp,
 256         "dirty log entry has mismatched uuid - can't recover");
 257                 xlog_header_check_dump(mp, head);
 258                 XFS_ERROR_REPORT("xlog_header_check_recover(2)",
 259                                  XFS_ERRLEVEL_HIGH, mp);
 260                 return -EFSCORRUPTED;
 261         }
 262         return 0;
 263 }
 264 
 265 /*
 266  * read the head block of the log and check the header
 267  */
 268 STATIC int
 269 xlog_header_check_mount(
 270         xfs_mount_t             *mp,
 271         xlog_rec_header_t       *head)
 272 {
 273         ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
 274 
 275         if (uuid_is_null(&head->h_fs_uuid)) {
 276                 /*
 277                  * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
 278                  * h_fs_uuid is null, we assume this log was last mounted
 279                  * by IRIX and continue.
 280                  */
 281                 xfs_warn(mp, "null uuid in log - IRIX style log");
 282         } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
 283                 xfs_warn(mp, "log has mismatched uuid - can't recover");
 284                 xlog_header_check_dump(mp, head);
 285                 XFS_ERROR_REPORT("xlog_header_check_mount",
 286                                  XFS_ERRLEVEL_HIGH, mp);
 287                 return -EFSCORRUPTED;
 288         }
 289         return 0;
 290 }
 291 
 292 STATIC void
 293 xlog_recover_iodone(
 294         struct xfs_buf  *bp)
 295 {
 296         if (bp->b_error) {
 297                 /*
 298                  * We're not going to bother about retrying
 299                  * this during recovery. One strike!
 300                  */
 301                 if (!XFS_FORCED_SHUTDOWN(bp->b_mount)) {
 302                         xfs_buf_ioerror_alert(bp, __func__);
 303                         xfs_force_shutdown(bp->b_mount, SHUTDOWN_META_IO_ERROR);
 304                 }
 305         }
 306 
 307         /*
 308          * On v5 supers, a bli could be attached to update the metadata LSN.
 309          * Clean it up.
 310          */
 311         if (bp->b_log_item)
 312                 xfs_buf_item_relse(bp);
 313         ASSERT(bp->b_log_item == NULL);
 314 
 315         bp->b_iodone = NULL;
 316         xfs_buf_ioend(bp);
 317 }
 318 
 319 /*
 320  * This routine finds (to an approximation) the first block in the physical
 321  * log which contains the given cycle.  It uses a binary search algorithm.
 322  * Note that the algorithm can not be perfect because the disk will not
 323  * necessarily be perfect.
 324  */
 325 STATIC int
 326 xlog_find_cycle_start(
 327         struct xlog     *log,
 328         char            *buffer,
 329         xfs_daddr_t     first_blk,
 330         xfs_daddr_t     *last_blk,
 331         uint            cycle)
 332 {
 333         char            *offset;
 334         xfs_daddr_t     mid_blk;
 335         xfs_daddr_t     end_blk;
 336         uint            mid_cycle;
 337         int             error;
 338 
 339         end_blk = *last_blk;
 340         mid_blk = BLK_AVG(first_blk, end_blk);
 341         while (mid_blk != first_blk && mid_blk != end_blk) {
 342                 error = xlog_bread(log, mid_blk, 1, buffer, &offset);
 343                 if (error)
 344                         return error;
 345                 mid_cycle = xlog_get_cycle(offset);
 346                 if (mid_cycle == cycle)
 347                         end_blk = mid_blk;   /* last_half_cycle == mid_cycle */
 348                 else
 349                         first_blk = mid_blk; /* first_half_cycle == mid_cycle */
 350                 mid_blk = BLK_AVG(first_blk, end_blk);
 351         }
 352         ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) ||
 353                (mid_blk == end_blk && mid_blk-1 == first_blk));
 354 
 355         *last_blk = end_blk;
 356 
 357         return 0;
 358 }
 359 
 360 /*
 361  * Check that a range of blocks does not contain stop_on_cycle_no.
 362  * Fill in *new_blk with the block offset where such a block is
 363  * found, or with -1 (an invalid block number) if there is no such
 364  * block in the range.  The scan needs to occur from front to back
 365  * and the pointer into the region must be updated since a later
 366  * routine will need to perform another test.
 367  */
 368 STATIC int
 369 xlog_find_verify_cycle(
 370         struct xlog     *log,
 371         xfs_daddr_t     start_blk,
 372         int             nbblks,
 373         uint            stop_on_cycle_no,
 374         xfs_daddr_t     *new_blk)
 375 {
 376         xfs_daddr_t     i, j;
 377         uint            cycle;
 378         char            *buffer;
 379         xfs_daddr_t     bufblks;
 380         char            *buf = NULL;
 381         int             error = 0;
 382 
 383         /*
 384          * Greedily allocate a buffer big enough to handle the full
 385          * range of basic blocks we'll be examining.  If that fails,
 386          * try a smaller size.  We need to be able to read at least
 387          * a log sector, or we're out of luck.
 388          */
 389         bufblks = 1 << ffs(nbblks);
 390         while (bufblks > log->l_logBBsize)
 391                 bufblks >>= 1;
 392         while (!(buffer = xlog_alloc_buffer(log, bufblks))) {
 393                 bufblks >>= 1;
 394                 if (bufblks < log->l_sectBBsize)
 395                         return -ENOMEM;
 396         }
 397 
 398         for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
 399                 int     bcount;
 400 
 401                 bcount = min(bufblks, (start_blk + nbblks - i));
 402 
 403                 error = xlog_bread(log, i, bcount, buffer, &buf);
 404                 if (error)
 405                         goto out;
 406 
 407                 for (j = 0; j < bcount; j++) {
 408                         cycle = xlog_get_cycle(buf);
 409                         if (cycle == stop_on_cycle_no) {
 410                                 *new_blk = i+j;
 411                                 goto out;
 412                         }
 413 
 414                         buf += BBSIZE;
 415                 }
 416         }
 417 
 418         *new_blk = -1;
 419 
 420 out:
 421         kmem_free(buffer);
 422         return error;
 423 }
 424 
 425 /*
 426  * Potentially backup over partial log record write.
 427  *
 428  * In the typical case, last_blk is the number of the block directly after
 429  * a good log record.  Therefore, we subtract one to get the block number
 430  * of the last block in the given buffer.  extra_bblks contains the number
 431  * of blocks we would have read on a previous read.  This happens when the
 432  * last log record is split over the end of the physical log.
 433  *
 434  * extra_bblks is the number of blocks potentially verified on a previous
 435  * call to this routine.
 436  */
 437 STATIC int
 438 xlog_find_verify_log_record(
 439         struct xlog             *log,
 440         xfs_daddr_t             start_blk,
 441         xfs_daddr_t             *last_blk,
 442         int                     extra_bblks)
 443 {
 444         xfs_daddr_t             i;
 445         char                    *buffer;
 446         char                    *offset = NULL;
 447         xlog_rec_header_t       *head = NULL;
 448         int                     error = 0;
 449         int                     smallmem = 0;
 450         int                     num_blks = *last_blk - start_blk;
 451         int                     xhdrs;
 452 
 453         ASSERT(start_blk != 0 || *last_blk != start_blk);
 454 
 455         buffer = xlog_alloc_buffer(log, num_blks);
 456         if (!buffer) {
 457                 buffer = xlog_alloc_buffer(log, 1);
 458                 if (!buffer)
 459                         return -ENOMEM;
 460                 smallmem = 1;
 461         } else {
 462                 error = xlog_bread(log, start_blk, num_blks, buffer, &offset);
 463                 if (error)
 464                         goto out;
 465                 offset += ((num_blks - 1) << BBSHIFT);
 466         }
 467 
 468         for (i = (*last_blk) - 1; i >= 0; i--) {
 469                 if (i < start_blk) {
 470                         /* valid log record not found */
 471                         xfs_warn(log->l_mp,
 472                 "Log inconsistent (didn't find previous header)");
 473                         ASSERT(0);
 474                         error = -EIO;
 475                         goto out;
 476                 }
 477 
 478                 if (smallmem) {
 479                         error = xlog_bread(log, i, 1, buffer, &offset);
 480                         if (error)
 481                                 goto out;
 482                 }
 483 
 484                 head = (xlog_rec_header_t *)offset;
 485 
 486                 if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
 487                         break;
 488 
 489                 if (!smallmem)
 490                         offset -= BBSIZE;
 491         }
 492 
 493         /*
 494          * We hit the beginning of the physical log & still no header.  Return
 495          * to caller.  If caller can handle a return of -1, then this routine
 496          * will be called again for the end of the physical log.
 497          */
 498         if (i == -1) {
 499                 error = 1;
 500                 goto out;
 501         }
 502 
 503         /*
 504          * We have the final block of the good log (the first block
 505          * of the log record _before_ the head. So we check the uuid.
 506          */
 507         if ((error = xlog_header_check_mount(log->l_mp, head)))
 508                 goto out;
 509 
 510         /*
 511          * We may have found a log record header before we expected one.
 512          * last_blk will be the 1st block # with a given cycle #.  We may end
 513          * up reading an entire log record.  In this case, we don't want to
 514          * reset last_blk.  Only when last_blk points in the middle of a log
 515          * record do we update last_blk.
 516          */
 517         if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
 518                 uint    h_size = be32_to_cpu(head->h_size);
 519 
 520                 xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE;
 521                 if (h_size % XLOG_HEADER_CYCLE_SIZE)
 522                         xhdrs++;
 523         } else {
 524                 xhdrs = 1;
 525         }
 526 
 527         if (*last_blk - i + extra_bblks !=
 528             BTOBB(be32_to_cpu(head->h_len)) + xhdrs)
 529                 *last_blk = i;
 530 
 531 out:
 532         kmem_free(buffer);
 533         return error;
 534 }
 535 
 536 /*
 537  * Head is defined to be the point of the log where the next log write
 538  * could go.  This means that incomplete LR writes at the end are
 539  * eliminated when calculating the head.  We aren't guaranteed that previous
 540  * LR have complete transactions.  We only know that a cycle number of
 541  * current cycle number -1 won't be present in the log if we start writing
 542  * from our current block number.
 543  *
 544  * last_blk contains the block number of the first block with a given
 545  * cycle number.
 546  *
 547  * Return: zero if normal, non-zero if error.
 548  */
 549 STATIC int
 550 xlog_find_head(
 551         struct xlog     *log,
 552         xfs_daddr_t     *return_head_blk)
 553 {
 554         char            *buffer;
 555         char            *offset;
 556         xfs_daddr_t     new_blk, first_blk, start_blk, last_blk, head_blk;
 557         int             num_scan_bblks;
 558         uint            first_half_cycle, last_half_cycle;
 559         uint            stop_on_cycle;
 560         int             error, log_bbnum = log->l_logBBsize;
 561 
 562         /* Is the end of the log device zeroed? */
 563         error = xlog_find_zeroed(log, &first_blk);
 564         if (error < 0) {
 565                 xfs_warn(log->l_mp, "empty log check failed");
 566                 return error;
 567         }
 568         if (error == 1) {
 569                 *return_head_blk = first_blk;
 570 
 571                 /* Is the whole lot zeroed? */
 572                 if (!first_blk) {
 573                         /* Linux XFS shouldn't generate totally zeroed logs -
 574                          * mkfs etc write a dummy unmount record to a fresh
 575                          * log so we can store the uuid in there
 576                          */
 577                         xfs_warn(log->l_mp, "totally zeroed log");
 578                 }
 579 
 580                 return 0;
 581         }
 582 
 583         first_blk = 0;                  /* get cycle # of 1st block */
 584         buffer = xlog_alloc_buffer(log, 1);
 585         if (!buffer)
 586                 return -ENOMEM;
 587 
 588         error = xlog_bread(log, 0, 1, buffer, &offset);
 589         if (error)
 590                 goto out_free_buffer;
 591 
 592         first_half_cycle = xlog_get_cycle(offset);
 593 
 594         last_blk = head_blk = log_bbnum - 1;    /* get cycle # of last block */
 595         error = xlog_bread(log, last_blk, 1, buffer, &offset);
 596         if (error)
 597                 goto out_free_buffer;
 598 
 599         last_half_cycle = xlog_get_cycle(offset);
 600         ASSERT(last_half_cycle != 0);
 601 
 602         /*
 603          * If the 1st half cycle number is equal to the last half cycle number,
 604          * then the entire log is stamped with the same cycle number.  In this
 605          * case, head_blk can't be set to zero (which makes sense).  The below
 606          * math doesn't work out properly with head_blk equal to zero.  Instead,
 607          * we set it to log_bbnum which is an invalid block number, but this
 608          * value makes the math correct.  If head_blk doesn't changed through
 609          * all the tests below, *head_blk is set to zero at the very end rather
 610          * than log_bbnum.  In a sense, log_bbnum and zero are the same block
 611          * in a circular file.
 612          */
 613         if (first_half_cycle == last_half_cycle) {
 614                 /*
 615                  * In this case we believe that the entire log should have
 616                  * cycle number last_half_cycle.  We need to scan backwards
 617                  * from the end verifying that there are no holes still
 618                  * containing last_half_cycle - 1.  If we find such a hole,
 619                  * then the start of that hole will be the new head.  The
 620                  * simple case looks like
 621                  *        x | x ... | x - 1 | x
 622                  * Another case that fits this picture would be
 623                  *        x | x + 1 | x ... | x
 624                  * In this case the head really is somewhere at the end of the
 625                  * log, as one of the latest writes at the beginning was
 626                  * incomplete.
 627                  * One more case is
 628                  *        x | x + 1 | x ... | x - 1 | x
 629                  * This is really the combination of the above two cases, and
 630                  * the head has to end up at the start of the x-1 hole at the
 631                  * end of the log.
 632                  *
 633                  * In the 256k log case, we will read from the beginning to the
 634                  * end of the log and search for cycle numbers equal to x-1.
 635                  * We don't worry about the x+1 blocks that we encounter,
 636                  * because we know that they cannot be the head since the log
 637                  * started with x.
 638                  */
 639                 head_blk = log_bbnum;
 640                 stop_on_cycle = last_half_cycle - 1;
 641         } else {
 642                 /*
 643                  * In this case we want to find the first block with cycle
 644                  * number matching last_half_cycle.  We expect the log to be
 645                  * some variation on
 646                  *        x + 1 ... | x ... | x
 647                  * The first block with cycle number x (last_half_cycle) will
 648                  * be where the new head belongs.  First we do a binary search
 649                  * for the first occurrence of last_half_cycle.  The binary
 650                  * search may not be totally accurate, so then we scan back
 651                  * from there looking for occurrences of last_half_cycle before
 652                  * us.  If that backwards scan wraps around the beginning of
 653                  * the log, then we look for occurrences of last_half_cycle - 1
 654                  * at the end of the log.  The cases we're looking for look
 655                  * like
 656                  *                               v binary search stopped here
 657                  *        x + 1 ... | x | x + 1 | x ... | x
 658                  *                   ^ but we want to locate this spot
 659                  * or
 660                  *        <---------> less than scan distance
 661                  *        x + 1 ... | x ... | x - 1 | x
 662                  *                           ^ we want to locate this spot
 663                  */
 664                 stop_on_cycle = last_half_cycle;
 665                 error = xlog_find_cycle_start(log, buffer, first_blk, &head_blk,
 666                                 last_half_cycle);
 667                 if (error)
 668                         goto out_free_buffer;
 669         }
 670 
 671         /*
 672          * Now validate the answer.  Scan back some number of maximum possible
 673          * blocks and make sure each one has the expected cycle number.  The
 674          * maximum is determined by the total possible amount of buffering
 675          * in the in-core log.  The following number can be made tighter if
 676          * we actually look at the block size of the filesystem.
 677          */
 678         num_scan_bblks = min_t(int, log_bbnum, XLOG_TOTAL_REC_SHIFT(log));
 679         if (head_blk >= num_scan_bblks) {
 680                 /*
 681                  * We are guaranteed that the entire check can be performed
 682                  * in one buffer.
 683                  */
 684                 start_blk = head_blk - num_scan_bblks;
 685                 if ((error = xlog_find_verify_cycle(log,
 686                                                 start_blk, num_scan_bblks,
 687                                                 stop_on_cycle, &new_blk)))
 688                         goto out_free_buffer;
 689                 if (new_blk != -1)
 690                         head_blk = new_blk;
 691         } else {                /* need to read 2 parts of log */
 692                 /*
 693                  * We are going to scan backwards in the log in two parts.
 694                  * First we scan the physical end of the log.  In this part
 695                  * of the log, we are looking for blocks with cycle number
 696                  * last_half_cycle - 1.
 697                  * If we find one, then we know that the log starts there, as
 698                  * we've found a hole that didn't get written in going around
 699                  * the end of the physical log.  The simple case for this is
 700                  *        x + 1 ... | x ... | x - 1 | x
 701                  *        <---------> less than scan distance
 702                  * If all of the blocks at the end of the log have cycle number
 703                  * last_half_cycle, then we check the blocks at the start of
 704                  * the log looking for occurrences of last_half_cycle.  If we
 705                  * find one, then our current estimate for the location of the
 706                  * first occurrence of last_half_cycle is wrong and we move
 707                  * back to the hole we've found.  This case looks like
 708                  *        x + 1 ... | x | x + 1 | x ...
 709                  *                               ^ binary search stopped here
 710                  * Another case we need to handle that only occurs in 256k
 711                  * logs is
 712                  *        x + 1 ... | x ... | x+1 | x ...
 713                  *                   ^ binary search stops here
 714                  * In a 256k log, the scan at the end of the log will see the
 715                  * x + 1 blocks.  We need to skip past those since that is
 716                  * certainly not the head of the log.  By searching for
 717                  * last_half_cycle-1 we accomplish that.
 718                  */
 719                 ASSERT(head_blk <= INT_MAX &&
 720                         (xfs_daddr_t) num_scan_bblks >= head_blk);
 721                 start_blk = log_bbnum - (num_scan_bblks - head_blk);
 722                 if ((error = xlog_find_verify_cycle(log, start_blk,
 723                                         num_scan_bblks - (int)head_blk,
 724                                         (stop_on_cycle - 1), &new_blk)))
 725                         goto out_free_buffer;
 726                 if (new_blk != -1) {
 727                         head_blk = new_blk;
 728                         goto validate_head;
 729                 }
 730 
 731                 /*
 732                  * Scan beginning of log now.  The last part of the physical
 733                  * log is good.  This scan needs to verify that it doesn't find
 734                  * the last_half_cycle.
 735                  */
 736                 start_blk = 0;
 737                 ASSERT(head_blk <= INT_MAX);
 738                 if ((error = xlog_find_verify_cycle(log,
 739                                         start_blk, (int)head_blk,
 740                                         stop_on_cycle, &new_blk)))
 741                         goto out_free_buffer;
 742                 if (new_blk != -1)
 743                         head_blk = new_blk;
 744         }
 745 
 746 validate_head:
 747         /*
 748          * Now we need to make sure head_blk is not pointing to a block in
 749          * the middle of a log record.
 750          */
 751         num_scan_bblks = XLOG_REC_SHIFT(log);
 752         if (head_blk >= num_scan_bblks) {
 753                 start_blk = head_blk - num_scan_bblks; /* don't read head_blk */
 754 
 755                 /* start ptr at last block ptr before head_blk */
 756                 error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
 757                 if (error == 1)
 758                         error = -EIO;
 759                 if (error)
 760                         goto out_free_buffer;
 761         } else {
 762                 start_blk = 0;
 763                 ASSERT(head_blk <= INT_MAX);
 764                 error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
 765                 if (error < 0)
 766                         goto out_free_buffer;
 767                 if (error == 1) {
 768                         /* We hit the beginning of the log during our search */
 769                         start_blk = log_bbnum - (num_scan_bblks - head_blk);
 770                         new_blk = log_bbnum;
 771                         ASSERT(start_blk <= INT_MAX &&
 772                                 (xfs_daddr_t) log_bbnum-start_blk >= 0);
 773                         ASSERT(head_blk <= INT_MAX);
 774                         error = xlog_find_verify_log_record(log, start_blk,
 775                                                         &new_blk, (int)head_blk);
 776                         if (error == 1)
 777                                 error = -EIO;
 778                         if (error)
 779                                 goto out_free_buffer;
 780                         if (new_blk != log_bbnum)
 781                                 head_blk = new_blk;
 782                 } else if (error)
 783                         goto out_free_buffer;
 784         }
 785 
 786         kmem_free(buffer);
 787         if (head_blk == log_bbnum)
 788                 *return_head_blk = 0;
 789         else
 790                 *return_head_blk = head_blk;
 791         /*
 792          * When returning here, we have a good block number.  Bad block
 793          * means that during a previous crash, we didn't have a clean break
 794          * from cycle number N to cycle number N-1.  In this case, we need
 795          * to find the first block with cycle number N-1.
 796          */
 797         return 0;
 798 
 799 out_free_buffer:
 800         kmem_free(buffer);
 801         if (error)
 802                 xfs_warn(log->l_mp, "failed to find log head");
 803         return error;
 804 }
 805 
 806 /*
 807  * Seek backwards in the log for log record headers.
 808  *
 809  * Given a starting log block, walk backwards until we find the provided number
 810  * of records or hit the provided tail block. The return value is the number of
 811  * records encountered or a negative error code. The log block and buffer
 812  * pointer of the last record seen are returned in rblk and rhead respectively.
 813  */
 814 STATIC int
 815 xlog_rseek_logrec_hdr(
 816         struct xlog             *log,
 817         xfs_daddr_t             head_blk,
 818         xfs_daddr_t             tail_blk,
 819         int                     count,
 820         char                    *buffer,
 821         xfs_daddr_t             *rblk,
 822         struct xlog_rec_header  **rhead,
 823         bool                    *wrapped)
 824 {
 825         int                     i;
 826         int                     error;
 827         int                     found = 0;
 828         char                    *offset = NULL;
 829         xfs_daddr_t             end_blk;
 830 
 831         *wrapped = false;
 832 
 833         /*
 834          * Walk backwards from the head block until we hit the tail or the first
 835          * block in the log.
 836          */
 837         end_blk = head_blk > tail_blk ? tail_blk : 0;
 838         for (i = (int) head_blk - 1; i >= end_blk; i--) {
 839                 error = xlog_bread(log, i, 1, buffer, &offset);
 840                 if (error)
 841                         goto out_error;
 842 
 843                 if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
 844                         *rblk = i;
 845                         *rhead = (struct xlog_rec_header *) offset;
 846                         if (++found == count)
 847                                 break;
 848                 }
 849         }
 850 
 851         /*
 852          * If we haven't hit the tail block or the log record header count,
 853          * start looking again from the end of the physical log. Note that
 854          * callers can pass head == tail if the tail is not yet known.
 855          */
 856         if (tail_blk >= head_blk && found != count) {
 857                 for (i = log->l_logBBsize - 1; i >= (int) tail_blk; i--) {
 858                         error = xlog_bread(log, i, 1, buffer, &offset);
 859                         if (error)
 860                                 goto out_error;
 861 
 862                         if (*(__be32 *)offset ==
 863                             cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
 864                                 *wrapped = true;
 865                                 *rblk = i;
 866                                 *rhead = (struct xlog_rec_header *) offset;
 867                                 if (++found == count)
 868                                         break;
 869                         }
 870                 }
 871         }
 872 
 873         return found;
 874 
 875 out_error:
 876         return error;
 877 }
 878 
 879 /*
 880  * Seek forward in the log for log record headers.
 881  *
 882  * Given head and tail blocks, walk forward from the tail block until we find
 883  * the provided number of records or hit the head block. The return value is the
 884  * number of records encountered or a negative error code. The log block and
 885  * buffer pointer of the last record seen are returned in rblk and rhead
 886  * respectively.
 887  */
 888 STATIC int
 889 xlog_seek_logrec_hdr(
 890         struct xlog             *log,
 891         xfs_daddr_t             head_blk,
 892         xfs_daddr_t             tail_blk,
 893         int                     count,
 894         char                    *buffer,
 895         xfs_daddr_t             *rblk,
 896         struct xlog_rec_header  **rhead,
 897         bool                    *wrapped)
 898 {
 899         int                     i;
 900         int                     error;
 901         int                     found = 0;
 902         char                    *offset = NULL;
 903         xfs_daddr_t             end_blk;
 904 
 905         *wrapped = false;
 906 
 907         /*
 908          * Walk forward from the tail block until we hit the head or the last
 909          * block in the log.
 910          */
 911         end_blk = head_blk > tail_blk ? head_blk : log->l_logBBsize - 1;
 912         for (i = (int) tail_blk; i <= end_blk; i++) {
 913                 error = xlog_bread(log, i, 1, buffer, &offset);
 914                 if (error)
 915                         goto out_error;
 916 
 917                 if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
 918                         *rblk = i;
 919                         *rhead = (struct xlog_rec_header *) offset;
 920                         if (++found == count)
 921                                 break;
 922                 }
 923         }
 924 
 925         /*
 926          * If we haven't hit the head block or the log record header count,
 927          * start looking again from the start of the physical log.
 928          */
 929         if (tail_blk > head_blk && found != count) {
 930                 for (i = 0; i < (int) head_blk; i++) {
 931                         error = xlog_bread(log, i, 1, buffer, &offset);
 932                         if (error)
 933                                 goto out_error;
 934 
 935                         if (*(__be32 *)offset ==
 936                             cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
 937                                 *wrapped = true;
 938                                 *rblk = i;
 939                                 *rhead = (struct xlog_rec_header *) offset;
 940                                 if (++found == count)
 941                                         break;
 942                         }
 943                 }
 944         }
 945 
 946         return found;
 947 
 948 out_error:
 949         return error;
 950 }
 951 
 952 /*
 953  * Calculate distance from head to tail (i.e., unused space in the log).
 954  */
 955 static inline int
 956 xlog_tail_distance(
 957         struct xlog     *log,
 958         xfs_daddr_t     head_blk,
 959         xfs_daddr_t     tail_blk)
 960 {
 961         if (head_blk < tail_blk)
 962                 return tail_blk - head_blk;
 963 
 964         return tail_blk + (log->l_logBBsize - head_blk);
 965 }
 966 
 967 /*
 968  * Verify the log tail. This is particularly important when torn or incomplete
 969  * writes have been detected near the front of the log and the head has been
 970  * walked back accordingly.
 971  *
 972  * We also have to handle the case where the tail was pinned and the head
 973  * blocked behind the tail right before a crash. If the tail had been pushed
 974  * immediately prior to the crash and the subsequent checkpoint was only
 975  * partially written, it's possible it overwrote the last referenced tail in the
 976  * log with garbage. This is not a coherency problem because the tail must have
 977  * been pushed before it can be overwritten, but appears as log corruption to
 978  * recovery because we have no way to know the tail was updated if the
 979  * subsequent checkpoint didn't write successfully.
 980  *
 981  * Therefore, CRC check the log from tail to head. If a failure occurs and the
 982  * offending record is within max iclog bufs from the head, walk the tail
 983  * forward and retry until a valid tail is found or corruption is detected out
 984  * of the range of a possible overwrite.
 985  */
 986 STATIC int
 987 xlog_verify_tail(
 988         struct xlog             *log,
 989         xfs_daddr_t             head_blk,
 990         xfs_daddr_t             *tail_blk,
 991         int                     hsize)
 992 {
 993         struct xlog_rec_header  *thead;
 994         char                    *buffer;
 995         xfs_daddr_t             first_bad;
 996         int                     error = 0;
 997         bool                    wrapped;
 998         xfs_daddr_t             tmp_tail;
 999         xfs_daddr_t             orig_tail = *tail_blk;
1000 
1001         buffer = xlog_alloc_buffer(log, 1);
1002         if (!buffer)
1003                 return -ENOMEM;
1004 
1005         /*
1006          * Make sure the tail points to a record (returns positive count on
1007          * success).
1008          */
1009         error = xlog_seek_logrec_hdr(log, head_blk, *tail_blk, 1, buffer,
1010                         &tmp_tail, &thead, &wrapped);
1011         if (error < 0)
1012                 goto out;
1013         if (*tail_blk != tmp_tail)
1014                 *tail_blk = tmp_tail;
1015 
1016         /*
1017          * Run a CRC check from the tail to the head. We can't just check
1018          * MAX_ICLOGS records past the tail because the tail may point to stale
1019          * blocks cleared during the search for the head/tail. These blocks are
1020          * overwritten with zero-length records and thus record count is not a
1021          * reliable indicator of the iclog state before a crash.
1022          */
1023         first_bad = 0;
1024         error = xlog_do_recovery_pass(log, head_blk, *tail_blk,
1025                                       XLOG_RECOVER_CRCPASS, &first_bad);
1026         while ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) {
1027                 int     tail_distance;
1028 
1029                 /*
1030                  * Is corruption within range of the head? If so, retry from
1031                  * the next record. Otherwise return an error.
1032                  */
1033                 tail_distance = xlog_tail_distance(log, head_blk, first_bad);
1034                 if (tail_distance > BTOBB(XLOG_MAX_ICLOGS * hsize))
1035                         break;
1036 
1037                 /* skip to the next record; returns positive count on success */
1038                 error = xlog_seek_logrec_hdr(log, head_blk, first_bad, 2,
1039                                 buffer, &tmp_tail, &thead, &wrapped);
1040                 if (error < 0)
1041                         goto out;
1042 
1043                 *tail_blk = tmp_tail;
1044                 first_bad = 0;
1045                 error = xlog_do_recovery_pass(log, head_blk, *tail_blk,
1046                                               XLOG_RECOVER_CRCPASS, &first_bad);
1047         }
1048 
1049         if (!error && *tail_blk != orig_tail)
1050                 xfs_warn(log->l_mp,
1051                 "Tail block (0x%llx) overwrite detected. Updated to 0x%llx",
1052                          orig_tail, *tail_blk);
1053 out:
1054         kmem_free(buffer);
1055         return error;
1056 }
1057 
1058 /*
1059  * Detect and trim torn writes from the head of the log.
1060  *
1061  * Storage without sector atomicity guarantees can result in torn writes in the
1062  * log in the event of a crash. Our only means to detect this scenario is via
1063  * CRC verification. While we can't always be certain that CRC verification
1064  * failure is due to a torn write vs. an unrelated corruption, we do know that
1065  * only a certain number (XLOG_MAX_ICLOGS) of log records can be written out at
1066  * one time. Therefore, CRC verify up to XLOG_MAX_ICLOGS records at the head of
1067  * the log and treat failures in this range as torn writes as a matter of
1068  * policy. In the event of CRC failure, the head is walked back to the last good
1069  * record in the log and the tail is updated from that record and verified.
1070  */
1071 STATIC int
1072 xlog_verify_head(
1073         struct xlog             *log,
1074         xfs_daddr_t             *head_blk,      /* in/out: unverified head */
1075         xfs_daddr_t             *tail_blk,      /* out: tail block */
1076         char                    *buffer,
1077         xfs_daddr_t             *rhead_blk,     /* start blk of last record */
1078         struct xlog_rec_header  **rhead,        /* ptr to last record */
1079         bool                    *wrapped)       /* last rec. wraps phys. log */
1080 {
1081         struct xlog_rec_header  *tmp_rhead;
1082         char                    *tmp_buffer;
1083         xfs_daddr_t             first_bad;
1084         xfs_daddr_t             tmp_rhead_blk;
1085         int                     found;
1086         int                     error;
1087         bool                    tmp_wrapped;
1088 
1089         /*
1090          * Check the head of the log for torn writes. Search backwards from the
1091          * head until we hit the tail or the maximum number of log record I/Os
1092          * that could have been in flight at one time. Use a temporary buffer so
1093          * we don't trash the rhead/buffer pointers from the caller.
1094          */
1095         tmp_buffer = xlog_alloc_buffer(log, 1);
1096         if (!tmp_buffer)
1097                 return -ENOMEM;
1098         error = xlog_rseek_logrec_hdr(log, *head_blk, *tail_blk,
1099                                       XLOG_MAX_ICLOGS, tmp_buffer,
1100                                       &tmp_rhead_blk, &tmp_rhead, &tmp_wrapped);
1101         kmem_free(tmp_buffer);
1102         if (error < 0)
1103                 return error;
1104 
1105         /*
1106          * Now run a CRC verification pass over the records starting at the
1107          * block found above to the current head. If a CRC failure occurs, the
1108          * log block of the first bad record is saved in first_bad.
1109          */
1110         error = xlog_do_recovery_pass(log, *head_blk, tmp_rhead_blk,
1111                                       XLOG_RECOVER_CRCPASS, &first_bad);
1112         if ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) {
1113                 /*
1114                  * We've hit a potential torn write. Reset the error and warn
1115                  * about it.
1116                  */
1117                 error = 0;
1118                 xfs_warn(log->l_mp,
1119 "Torn write (CRC failure) detected at log block 0x%llx. Truncating head block from 0x%llx.",
1120                          first_bad, *head_blk);
1121 
1122                 /*
1123                  * Get the header block and buffer pointer for the last good
1124                  * record before the bad record.
1125                  *
1126                  * Note that xlog_find_tail() clears the blocks at the new head
1127                  * (i.e., the records with invalid CRC) if the cycle number
1128                  * matches the the current cycle.
1129                  */
1130                 found = xlog_rseek_logrec_hdr(log, first_bad, *tail_blk, 1,
1131                                 buffer, rhead_blk, rhead, wrapped);
1132                 if (found < 0)
1133                         return found;
1134                 if (found == 0)         /* XXX: right thing to do here? */
1135                         return -EIO;
1136 
1137                 /*
1138                  * Reset the head block to the starting block of the first bad
1139                  * log record and set the tail block based on the last good
1140                  * record.
1141                  *
1142                  * Bail out if the updated head/tail match as this indicates
1143                  * possible corruption outside of the acceptable
1144                  * (XLOG_MAX_ICLOGS) range. This is a job for xfs_repair...
1145                  */
1146                 *head_blk = first_bad;
1147                 *tail_blk = BLOCK_LSN(be64_to_cpu((*rhead)->h_tail_lsn));
1148                 if (*head_blk == *tail_blk) {
1149                         ASSERT(0);
1150                         return 0;
1151                 }
1152         }
1153         if (error)
1154                 return error;
1155 
1156         return xlog_verify_tail(log, *head_blk, tail_blk,
1157                                 be32_to_cpu((*rhead)->h_size));
1158 }
1159 
1160 /*
1161  * We need to make sure we handle log wrapping properly, so we can't use the
1162  * calculated logbno directly. Make sure it wraps to the correct bno inside the
1163  * log.
1164  *
1165  * The log is limited to 32 bit sizes, so we use the appropriate modulus
1166  * operation here and cast it back to a 64 bit daddr on return.
1167  */
1168 static inline xfs_daddr_t
1169 xlog_wrap_logbno(
1170         struct xlog             *log,
1171         xfs_daddr_t             bno)
1172 {
1173         int                     mod;
1174 
1175         div_s64_rem(bno, log->l_logBBsize, &mod);
1176         return mod;
1177 }
1178 
1179 /*
1180  * Check whether the head of the log points to an unmount record. In other
1181  * words, determine whether the log is clean. If so, update the in-core state
1182  * appropriately.
1183  */
1184 static int
1185 xlog_check_unmount_rec(
1186         struct xlog             *log,
1187         xfs_daddr_t             *head_blk,
1188         xfs_daddr_t             *tail_blk,
1189         struct xlog_rec_header  *rhead,
1190         xfs_daddr_t             rhead_blk,
1191         char                    *buffer,
1192         bool                    *clean)
1193 {
1194         struct xlog_op_header   *op_head;
1195         xfs_daddr_t             umount_data_blk;
1196         xfs_daddr_t             after_umount_blk;
1197         int                     hblks;
1198         int                     error;
1199         char                    *offset;
1200 
1201         *clean = false;
1202 
1203         /*
1204          * Look for unmount record. If we find it, then we know there was a
1205          * clean unmount. Since 'i' could be the last block in the physical
1206          * log, we convert to a log block before comparing to the head_blk.
1207          *
1208          * Save the current tail lsn to use to pass to xlog_clear_stale_blocks()
1209          * below. We won't want to clear the unmount record if there is one, so
1210          * we pass the lsn of the unmount record rather than the block after it.
1211          */
1212         if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
1213                 int     h_size = be32_to_cpu(rhead->h_size);
1214                 int     h_version = be32_to_cpu(rhead->h_version);
1215 
1216                 if ((h_version & XLOG_VERSION_2) &&
1217                     (h_size > XLOG_HEADER_CYCLE_SIZE)) {
1218                         hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
1219                         if (h_size % XLOG_HEADER_CYCLE_SIZE)
1220                                 hblks++;
1221                 } else {
1222                         hblks = 1;
1223                 }
1224         } else {
1225                 hblks = 1;
1226         }
1227 
1228         after_umount_blk = xlog_wrap_logbno(log,
1229                         rhead_blk + hblks + BTOBB(be32_to_cpu(rhead->h_len)));
1230 
1231         if (*head_blk == after_umount_blk &&
1232             be32_to_cpu(rhead->h_num_logops) == 1) {
1233                 umount_data_blk = xlog_wrap_logbno(log, rhead_blk + hblks);
1234                 error = xlog_bread(log, umount_data_blk, 1, buffer, &offset);
1235                 if (error)
1236                         return error;
1237 
1238                 op_head = (struct xlog_op_header *)offset;
1239                 if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
1240                         /*
1241                          * Set tail and last sync so that newly written log
1242                          * records will point recovery to after the current
1243                          * unmount record.
1244                          */
1245                         xlog_assign_atomic_lsn(&log->l_tail_lsn,
1246                                         log->l_curr_cycle, after_umount_blk);
1247                         xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
1248                                         log->l_curr_cycle, after_umount_blk);
1249                         *tail_blk = after_umount_blk;
1250 
1251                         *clean = true;
1252                 }
1253         }
1254 
1255         return 0;
1256 }
1257 
1258 static void
1259 xlog_set_state(
1260         struct xlog             *log,
1261         xfs_daddr_t             head_blk,
1262         struct xlog_rec_header  *rhead,
1263         xfs_daddr_t             rhead_blk,
1264         bool                    bump_cycle)
1265 {
1266         /*
1267          * Reset log values according to the state of the log when we
1268          * crashed.  In the case where head_blk == 0, we bump curr_cycle
1269          * one because the next write starts a new cycle rather than
1270          * continuing the cycle of the last good log record.  At this
1271          * point we have guaranteed that all partial log records have been
1272          * accounted for.  Therefore, we know that the last good log record
1273          * written was complete and ended exactly on the end boundary
1274          * of the physical log.
1275          */
1276         log->l_prev_block = rhead_blk;
1277         log->l_curr_block = (int)head_blk;
1278         log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
1279         if (bump_cycle)
1280                 log->l_curr_cycle++;
1281         atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
1282         atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
1283         xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle,
1284                                         BBTOB(log->l_curr_block));
1285         xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle,
1286                                         BBTOB(log->l_curr_block));
1287 }
1288 
1289 /*
1290  * Find the sync block number or the tail of the log.
1291  *
1292  * This will be the block number of the last record to have its
1293  * associated buffers synced to disk.  Every log record header has
1294  * a sync lsn embedded in it.  LSNs hold block numbers, so it is easy
1295  * to get a sync block number.  The only concern is to figure out which
1296  * log record header to believe.
1297  *
1298  * The following algorithm uses the log record header with the largest
1299  * lsn.  The entire log record does not need to be valid.  We only care
1300  * that the header is valid.
1301  *
1302  * We could speed up search by using current head_blk buffer, but it is not
1303  * available.
1304  */
1305 STATIC int
1306 xlog_find_tail(
1307         struct xlog             *log,
1308         xfs_daddr_t             *head_blk,
1309         xfs_daddr_t             *tail_blk)
1310 {
1311         xlog_rec_header_t       *rhead;
1312         char                    *offset = NULL;
1313         char                    *buffer;
1314         int                     error;
1315         xfs_daddr_t             rhead_blk;
1316         xfs_lsn_t               tail_lsn;
1317         bool                    wrapped = false;
1318         bool                    clean = false;
1319 
1320         /*
1321          * Find previous log record
1322          */
1323         if ((error = xlog_find_head(log, head_blk)))
1324                 return error;
1325         ASSERT(*head_blk < INT_MAX);
1326 
1327         buffer = xlog_alloc_buffer(log, 1);
1328         if (!buffer)
1329                 return -ENOMEM;
1330         if (*head_blk == 0) {                           /* special case */
1331                 error = xlog_bread(log, 0, 1, buffer, &offset);
1332                 if (error)
1333                         goto done;
1334 
1335                 if (xlog_get_cycle(offset) == 0) {
1336                         *tail_blk = 0;
1337                         /* leave all other log inited values alone */
1338                         goto done;
1339                 }
1340         }
1341 
1342         /*
1343          * Search backwards through the log looking for the log record header
1344          * block. This wraps all the way back around to the head so something is
1345          * seriously wrong if we can't find it.
1346          */
1347         error = xlog_rseek_logrec_hdr(log, *head_blk, *head_blk, 1, buffer,
1348                                       &rhead_blk, &rhead, &wrapped);
1349         if (error < 0)
1350                 return error;
1351         if (!error) {
1352                 xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
1353                 return -EIO;
1354         }
1355         *tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
1356 
1357         /*
1358          * Set the log state based on the current head record.
1359          */
1360         xlog_set_state(log, *head_blk, rhead, rhead_blk, wrapped);
1361         tail_lsn = atomic64_read(&log->l_tail_lsn);
1362 
1363         /*
1364          * Look for an unmount record at the head of the log. This sets the log
1365          * state to determine whether recovery is necessary.
1366          */
1367         error = xlog_check_unmount_rec(log, head_blk, tail_blk, rhead,
1368                                        rhead_blk, buffer, &clean);
1369         if (error)
1370                 goto done;
1371 
1372         /*
1373          * Verify the log head if the log is not clean (e.g., we have anything
1374          * but an unmount record at the head). This uses CRC verification to
1375          * detect and trim torn writes. If discovered, CRC failures are
1376          * considered torn writes and the log head is trimmed accordingly.
1377          *
1378          * Note that we can only run CRC verification when the log is dirty
1379          * because there's no guarantee that the log data behind an unmount
1380          * record is compatible with the current architecture.
1381          */
1382         if (!clean) {
1383                 xfs_daddr_t     orig_head = *head_blk;
1384 
1385                 error = xlog_verify_head(log, head_blk, tail_blk, buffer,
1386                                          &rhead_blk, &rhead, &wrapped);
1387                 if (error)
1388                         goto done;
1389 
1390                 /* update in-core state again if the head changed */
1391                 if (*head_blk != orig_head) {
1392                         xlog_set_state(log, *head_blk, rhead, rhead_blk,
1393                                        wrapped);
1394                         tail_lsn = atomic64_read(&log->l_tail_lsn);
1395                         error = xlog_check_unmount_rec(log, head_blk, tail_blk,
1396                                                        rhead, rhead_blk, buffer,
1397                                                        &clean);
1398                         if (error)
1399                                 goto done;
1400                 }
1401         }
1402 
1403         /*
1404          * Note that the unmount was clean. If the unmount was not clean, we
1405          * need to know this to rebuild the superblock counters from the perag
1406          * headers if we have a filesystem using non-persistent counters.
1407          */
1408         if (clean)
1409                 log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN;
1410 
1411         /*
1412          * Make sure that there are no blocks in front of the head
1413          * with the same cycle number as the head.  This can happen
1414          * because we allow multiple outstanding log writes concurrently,
1415          * and the later writes might make it out before earlier ones.
1416          *
1417          * We use the lsn from before modifying it so that we'll never
1418          * overwrite the unmount record after a clean unmount.
1419          *
1420          * Do this only if we are going to recover the filesystem
1421          *
1422          * NOTE: This used to say "if (!readonly)"
1423          * However on Linux, we can & do recover a read-only filesystem.
1424          * We only skip recovery if NORECOVERY is specified on mount,
1425          * in which case we would not be here.
1426          *
1427          * But... if the -device- itself is readonly, just skip this.
1428          * We can't recover this device anyway, so it won't matter.
1429          */
1430         if (!xfs_readonly_buftarg(log->l_targ))
1431                 error = xlog_clear_stale_blocks(log, tail_lsn);
1432 
1433 done:
1434         kmem_free(buffer);
1435 
1436         if (error)
1437                 xfs_warn(log->l_mp, "failed to locate log tail");
1438         return error;
1439 }
1440 
1441 /*
1442  * Is the log zeroed at all?
1443  *
1444  * The last binary search should be changed to perform an X block read
1445  * once X becomes small enough.  You can then search linearly through
1446  * the X blocks.  This will cut down on the number of reads we need to do.
1447  *
1448  * If the log is partially zeroed, this routine will pass back the blkno
1449  * of the first block with cycle number 0.  It won't have a complete LR
1450  * preceding it.
1451  *
1452  * Return:
1453  *      0  => the log is completely written to
1454  *      1 => use *blk_no as the first block of the log
1455  *      <0 => error has occurred
1456  */
1457 STATIC int
1458 xlog_find_zeroed(
1459         struct xlog     *log,
1460         xfs_daddr_t     *blk_no)
1461 {
1462         char            *buffer;
1463         char            *offset;
1464         uint            first_cycle, last_cycle;
1465         xfs_daddr_t     new_blk, last_blk, start_blk;
1466         xfs_daddr_t     num_scan_bblks;
1467         int             error, log_bbnum = log->l_logBBsize;
1468 
1469         *blk_no = 0;
1470 
1471         /* check totally zeroed log */
1472         buffer = xlog_alloc_buffer(log, 1);
1473         if (!buffer)
1474                 return -ENOMEM;
1475         error = xlog_bread(log, 0, 1, buffer, &offset);
1476         if (error)
1477                 goto out_free_buffer;
1478 
1479         first_cycle = xlog_get_cycle(offset);
1480         if (first_cycle == 0) {         /* completely zeroed log */
1481                 *blk_no = 0;
1482                 kmem_free(buffer);
1483                 return 1;
1484         }
1485 
1486         /* check partially zeroed log */
1487         error = xlog_bread(log, log_bbnum-1, 1, buffer, &offset);
1488         if (error)
1489                 goto out_free_buffer;
1490 
1491         last_cycle = xlog_get_cycle(offset);
1492         if (last_cycle != 0) {          /* log completely written to */
1493                 kmem_free(buffer);
1494                 return 0;
1495         }
1496 
1497         /* we have a partially zeroed log */
1498         last_blk = log_bbnum-1;
1499         error = xlog_find_cycle_start(log, buffer, 0, &last_blk, 0);
1500         if (error)
1501                 goto out_free_buffer;
1502 
1503         /*
1504          * Validate the answer.  Because there is no way to guarantee that
1505          * the entire log is made up of log records which are the same size,
1506          * we scan over the defined maximum blocks.  At this point, the maximum
1507          * is not chosen to mean anything special.   XXXmiken
1508          */
1509         num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
1510         ASSERT(num_scan_bblks <= INT_MAX);
1511 
1512         if (last_blk < num_scan_bblks)
1513                 num_scan_bblks = last_blk;
1514         start_blk = last_blk - num_scan_bblks;
1515 
1516         /*
1517          * We search for any instances of cycle number 0 that occur before
1518          * our current estimate of the head.  What we're trying to detect is
1519          *        1 ... | 0 | 1 | 0...
1520          *                       ^ binary search ends here
1521          */
1522         if ((error = xlog_find_verify_cycle(log, start_blk,
1523                                          (int)num_scan_bblks, 0, &new_blk)))
1524                 goto out_free_buffer;
1525         if (new_blk != -1)
1526                 last_blk = new_blk;
1527 
1528         /*
1529          * Potentially backup over partial log record write.  We don't need
1530          * to search the end of the log because we know it is zero.
1531          */
1532         error = xlog_find_verify_log_record(log, start_blk, &last_blk, 0);
1533         if (error == 1)
1534                 error = -EIO;
1535         if (error)
1536                 goto out_free_buffer;
1537 
1538         *blk_no = last_blk;
1539 out_free_buffer:
1540         kmem_free(buffer);
1541         if (error)
1542                 return error;
1543         return 1;
1544 }
1545 
1546 /*
1547  * These are simple subroutines used by xlog_clear_stale_blocks() below
1548  * to initialize a buffer full of empty log record headers and write
1549  * them into the log.
1550  */
1551 STATIC void
1552 xlog_add_record(
1553         struct xlog             *log,
1554         char                    *buf,
1555         int                     cycle,
1556         int                     block,
1557         int                     tail_cycle,
1558         int                     tail_block)
1559 {
1560         xlog_rec_header_t       *recp = (xlog_rec_header_t *)buf;
1561 
1562         memset(buf, 0, BBSIZE);
1563         recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1564         recp->h_cycle = cpu_to_be32(cycle);
1565         recp->h_version = cpu_to_be32(
1566                         xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
1567         recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block));
1568         recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block));
1569         recp->h_fmt = cpu_to_be32(XLOG_FMT);
1570         memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
1571 }
1572 
1573 STATIC int
1574 xlog_write_log_records(
1575         struct xlog     *log,
1576         int             cycle,
1577         int             start_block,
1578         int             blocks,
1579         int             tail_cycle,
1580         int             tail_block)
1581 {
1582         char            *offset;
1583         char            *buffer;
1584         int             balign, ealign;
1585         int             sectbb = log->l_sectBBsize;
1586         int             end_block = start_block + blocks;
1587         int             bufblks;
1588         int             error = 0;
1589         int             i, j = 0;
1590 
1591         /*
1592          * Greedily allocate a buffer big enough to handle the full
1593          * range of basic blocks to be written.  If that fails, try
1594          * a smaller size.  We need to be able to write at least a
1595          * log sector, or we're out of luck.
1596          */
1597         bufblks = 1 << ffs(blocks);
1598         while (bufblks > log->l_logBBsize)
1599                 bufblks >>= 1;
1600         while (!(buffer = xlog_alloc_buffer(log, bufblks))) {
1601                 bufblks >>= 1;
1602                 if (bufblks < sectbb)
1603                         return -ENOMEM;
1604         }
1605 
1606         /* We may need to do a read at the start to fill in part of
1607          * the buffer in the starting sector not covered by the first
1608          * write below.
1609          */
1610         balign = round_down(start_block, sectbb);
1611         if (balign != start_block) {
1612                 error = xlog_bread_noalign(log, start_block, 1, buffer);
1613                 if (error)
1614                         goto out_free_buffer;
1615 
1616                 j = start_block - balign;
1617         }
1618 
1619         for (i = start_block; i < end_block; i += bufblks) {
1620                 int             bcount, endcount;
1621 
1622                 bcount = min(bufblks, end_block - start_block);
1623                 endcount = bcount - j;
1624 
1625                 /* We may need to do a read at the end to fill in part of
1626                  * the buffer in the final sector not covered by the write.
1627                  * If this is the same sector as the above read, skip it.
1628                  */
1629                 ealign = round_down(end_block, sectbb);
1630                 if (j == 0 && (start_block + endcount > ealign)) {
1631                         error = xlog_bread_noalign(log, ealign, sectbb,
1632                                         buffer + BBTOB(ealign - start_block));
1633                         if (error)
1634                                 break;
1635 
1636                 }
1637 
1638                 offset = buffer + xlog_align(log, start_block);
1639                 for (; j < endcount; j++) {
1640                         xlog_add_record(log, offset, cycle, i+j,
1641                                         tail_cycle, tail_block);
1642                         offset += BBSIZE;
1643                 }
1644                 error = xlog_bwrite(log, start_block, endcount, buffer);
1645                 if (error)
1646                         break;
1647                 start_block += endcount;
1648                 j = 0;
1649         }
1650 
1651 out_free_buffer:
1652         kmem_free(buffer);
1653         return error;
1654 }
1655 
1656 /*
1657  * This routine is called to blow away any incomplete log writes out
1658  * in front of the log head.  We do this so that we won't become confused
1659  * if we come up, write only a little bit more, and then crash again.
1660  * If we leave the partial log records out there, this situation could
1661  * cause us to think those partial writes are valid blocks since they
1662  * have the current cycle number.  We get rid of them by overwriting them
1663  * with empty log records with the old cycle number rather than the
1664  * current one.
1665  *
1666  * The tail lsn is passed in rather than taken from
1667  * the log so that we will not write over the unmount record after a
1668  * clean unmount in a 512 block log.  Doing so would leave the log without
1669  * any valid log records in it until a new one was written.  If we crashed
1670  * during that time we would not be able to recover.
1671  */
1672 STATIC int
1673 xlog_clear_stale_blocks(
1674         struct xlog     *log,
1675         xfs_lsn_t       tail_lsn)
1676 {
1677         int             tail_cycle, head_cycle;
1678         int             tail_block, head_block;
1679         int             tail_distance, max_distance;
1680         int             distance;
1681         int             error;
1682 
1683         tail_cycle = CYCLE_LSN(tail_lsn);
1684         tail_block = BLOCK_LSN(tail_lsn);
1685         head_cycle = log->l_curr_cycle;
1686         head_block = log->l_curr_block;
1687 
1688         /*
1689          * Figure out the distance between the new head of the log
1690          * and the tail.  We want to write over any blocks beyond the
1691          * head that we may have written just before the crash, but
1692          * we don't want to overwrite the tail of the log.
1693          */
1694         if (head_cycle == tail_cycle) {
1695                 /*
1696                  * The tail is behind the head in the physical log,
1697                  * so the distance from the head to the tail is the
1698                  * distance from the head to the end of the log plus
1699                  * the distance from the beginning of the log to the
1700                  * tail.
1701                  */
1702                 if (unlikely(head_block < tail_block || head_block >= log->l_logBBsize)) {
1703                         XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)",
1704                                          XFS_ERRLEVEL_LOW, log->l_mp);
1705                         return -EFSCORRUPTED;
1706                 }
1707                 tail_distance = tail_block + (log->l_logBBsize - head_block);
1708         } else {
1709                 /*
1710                  * The head is behind the tail in the physical log,
1711                  * so the distance from the head to the tail is just
1712                  * the tail block minus the head block.
1713                  */
1714                 if (unlikely(head_block >= tail_block || head_cycle != (tail_cycle + 1))){
1715                         XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)",
1716                                          XFS_ERRLEVEL_LOW, log->l_mp);
1717                         return -EFSCORRUPTED;
1718                 }
1719                 tail_distance = tail_block - head_block;
1720         }
1721 
1722         /*
1723          * If the head is right up against the tail, we can't clear
1724          * anything.
1725          */
1726         if (tail_distance <= 0) {
1727                 ASSERT(tail_distance == 0);
1728                 return 0;
1729         }
1730 
1731         max_distance = XLOG_TOTAL_REC_SHIFT(log);
1732         /*
1733          * Take the smaller of the maximum amount of outstanding I/O
1734          * we could have and the distance to the tail to clear out.
1735          * We take the smaller so that we don't overwrite the tail and
1736          * we don't waste all day writing from the head to the tail
1737          * for no reason.
1738          */
1739         max_distance = min(max_distance, tail_distance);
1740 
1741         if ((head_block + max_distance) <= log->l_logBBsize) {
1742                 /*
1743                  * We can stomp all the blocks we need to without
1744                  * wrapping around the end of the log.  Just do it
1745                  * in a single write.  Use the cycle number of the
1746                  * current cycle minus one so that the log will look like:
1747                  *     n ... | n - 1 ...
1748                  */
1749                 error = xlog_write_log_records(log, (head_cycle - 1),
1750                                 head_block, max_distance, tail_cycle,
1751                                 tail_block);
1752                 if (error)
1753                         return error;
1754         } else {
1755                 /*
1756                  * We need to wrap around the end of the physical log in
1757                  * order to clear all the blocks.  Do it in two separate
1758                  * I/Os.  The first write should be from the head to the
1759                  * end of the physical log, and it should use the current
1760                  * cycle number minus one just like above.
1761                  */
1762                 distance = log->l_logBBsize - head_block;
1763                 error = xlog_write_log_records(log, (head_cycle - 1),
1764                                 head_block, distance, tail_cycle,
1765                                 tail_block);
1766 
1767                 if (error)
1768                         return error;
1769 
1770                 /*
1771                  * Now write the blocks at the start of the physical log.
1772                  * This writes the remainder of the blocks we want to clear.
1773                  * It uses the current cycle number since we're now on the
1774                  * same cycle as the head so that we get:
1775                  *    n ... n ... | n - 1 ...
1776                  *    ^^^^^ blocks we're writing
1777                  */
1778                 distance = max_distance - (log->l_logBBsize - head_block);
1779                 error = xlog_write_log_records(log, head_cycle, 0, distance,
1780                                 tail_cycle, tail_block);
1781                 if (error)
1782                         return error;
1783         }
1784 
1785         return 0;
1786 }
1787 
1788 /******************************************************************************
1789  *
1790  *              Log recover routines
1791  *
1792  ******************************************************************************
1793  */
1794 
1795 /*
1796  * Sort the log items in the transaction.
1797  *
1798  * The ordering constraints are defined by the inode allocation and unlink
1799  * behaviour. The rules are:
1800  *
1801  *      1. Every item is only logged once in a given transaction. Hence it
1802  *         represents the last logged state of the item. Hence ordering is
1803  *         dependent on the order in which operations need to be performed so
1804  *         required initial conditions are always met.
1805  *
1806  *      2. Cancelled buffers are recorded in pass 1 in a separate table and
1807  *         there's nothing to replay from them so we can simply cull them
1808  *         from the transaction. However, we can't do that until after we've
1809  *         replayed all the other items because they may be dependent on the
1810  *         cancelled buffer and replaying the cancelled buffer can remove it
1811  *         form the cancelled buffer table. Hence they have tobe done last.
1812  *
1813  *      3. Inode allocation buffers must be replayed before inode items that
1814  *         read the buffer and replay changes into it. For filesystems using the
1815  *         ICREATE transactions, this means XFS_LI_ICREATE objects need to get
1816  *         treated the same as inode allocation buffers as they create and
1817  *         initialise the buffers directly.
1818  *
1819  *      4. Inode unlink buffers must be replayed after inode items are replayed.
1820  *         This ensures that inodes are completely flushed to the inode buffer
1821  *         in a "free" state before we remove the unlinked inode list pointer.
1822  *
1823  * Hence the ordering needs to be inode allocation buffers first, inode items
1824  * second, inode unlink buffers third and cancelled buffers last.
1825  *
1826  * But there's a problem with that - we can't tell an inode allocation buffer
1827  * apart from a regular buffer, so we can't separate them. We can, however,
1828  * tell an inode unlink buffer from the others, and so we can separate them out
1829  * from all the other buffers and move them to last.
1830  *
1831  * Hence, 4 lists, in order from head to tail:
1832  *      - buffer_list for all buffers except cancelled/inode unlink buffers
1833  *      - item_list for all non-buffer items
1834  *      - inode_buffer_list for inode unlink buffers
1835  *      - cancel_list for the cancelled buffers
1836  *
1837  * Note that we add objects to the tail of the lists so that first-to-last
1838  * ordering is preserved within the lists. Adding objects to the head of the
1839  * list means when we traverse from the head we walk them in last-to-first
1840  * order. For cancelled buffers and inode unlink buffers this doesn't matter,
1841  * but for all other items there may be specific ordering that we need to
1842  * preserve.
1843  */
1844 STATIC int
1845 xlog_recover_reorder_trans(
1846         struct xlog             *log,
1847         struct xlog_recover     *trans,
1848         int                     pass)
1849 {
1850         xlog_recover_item_t     *item, *n;
1851         int                     error = 0;
1852         LIST_HEAD(sort_list);
1853         LIST_HEAD(cancel_list);
1854         LIST_HEAD(buffer_list);
1855         LIST_HEAD(inode_buffer_list);
1856         LIST_HEAD(inode_list);
1857 
1858         list_splice_init(&trans->r_itemq, &sort_list);
1859         list_for_each_entry_safe(item, n, &sort_list, ri_list) {
1860                 xfs_buf_log_format_t    *buf_f = item->ri_buf[0].i_addr;
1861 
1862                 switch (ITEM_TYPE(item)) {
1863                 case XFS_LI_ICREATE:
1864                         list_move_tail(&item->ri_list, &buffer_list);
1865                         break;
1866                 case XFS_LI_BUF:
1867                         if (buf_f->blf_flags & XFS_BLF_CANCEL) {
1868                                 trace_xfs_log_recover_item_reorder_head(log,
1869                                                         trans, item, pass);
1870                                 list_move(&item->ri_list, &cancel_list);
1871                                 break;
1872                         }
1873                         if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
1874                                 list_move(&item->ri_list, &inode_buffer_list);
1875                                 break;
1876                         }
1877                         list_move_tail(&item->ri_list, &buffer_list);
1878                         break;
1879                 case XFS_LI_INODE:
1880                 case XFS_LI_DQUOT:
1881                 case XFS_LI_QUOTAOFF:
1882                 case XFS_LI_EFD:
1883                 case XFS_LI_EFI:
1884                 case XFS_LI_RUI:
1885                 case XFS_LI_RUD:
1886                 case XFS_LI_CUI:
1887                 case XFS_LI_CUD:
1888                 case XFS_LI_BUI:
1889                 case XFS_LI_BUD:
1890                         trace_xfs_log_recover_item_reorder_tail(log,
1891                                                         trans, item, pass);
1892                         list_move_tail(&item->ri_list, &inode_list);
1893                         break;
1894                 default:
1895                         xfs_warn(log->l_mp,
1896                                 "%s: unrecognized type of log operation",
1897                                 __func__);
1898                         ASSERT(0);
1899                         /*
1900                          * return the remaining items back to the transaction
1901                          * item list so they can be freed in caller.
1902                          */
1903                         if (!list_empty(&sort_list))
1904                                 list_splice_init(&sort_list, &trans->r_itemq);
1905                         error = -EIO;
1906                         goto out;
1907                 }
1908         }
1909 out:
1910         ASSERT(list_empty(&sort_list));
1911         if (!list_empty(&buffer_list))
1912                 list_splice(&buffer_list, &trans->r_itemq);
1913         if (!list_empty(&inode_list))
1914                 list_splice_tail(&inode_list, &trans->r_itemq);
1915         if (!list_empty(&inode_buffer_list))
1916                 list_splice_tail(&inode_buffer_list, &trans->r_itemq);
1917         if (!list_empty(&cancel_list))
1918                 list_splice_tail(&cancel_list, &trans->r_itemq);
1919         return error;
1920 }
1921 
1922 /*
1923  * Build up the table of buf cancel records so that we don't replay
1924  * cancelled data in the second pass.  For buffer records that are
1925  * not cancel records, there is nothing to do here so we just return.
1926  *
1927  * If we get a cancel record which is already in the table, this indicates
1928  * that the buffer was cancelled multiple times.  In order to ensure
1929  * that during pass 2 we keep the record in the table until we reach its
1930  * last occurrence in the log, we keep a reference count in the cancel
1931  * record in the table to tell us how many times we expect to see this
1932  * record during the second pass.
1933  */
1934 STATIC int
1935 xlog_recover_buffer_pass1(
1936         struct xlog                     *log,
1937         struct xlog_recover_item        *item)
1938 {
1939         xfs_buf_log_format_t    *buf_f = item->ri_buf[0].i_addr;
1940         struct list_head        *bucket;
1941         struct xfs_buf_cancel   *bcp;
1942 
1943         /*
1944          * If this isn't a cancel buffer item, then just return.
1945          */
1946         if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) {
1947                 trace_xfs_log_recover_buf_not_cancel(log, buf_f);
1948                 return 0;
1949         }
1950 
1951         /*
1952          * Insert an xfs_buf_cancel record into the hash table of them.
1953          * If there is already an identical record, bump its reference count.
1954          */
1955         bucket = XLOG_BUF_CANCEL_BUCKET(log, buf_f->blf_blkno);
1956         list_for_each_entry(bcp, bucket, bc_list) {
1957                 if (bcp->bc_blkno == buf_f->blf_blkno &&
1958                     bcp->bc_len == buf_f->blf_len) {
1959                         bcp->bc_refcount++;
1960                         trace_xfs_log_recover_buf_cancel_ref_inc(log, buf_f);
1961                         return 0;
1962                 }
1963         }
1964 
1965         bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), 0);
1966         bcp->bc_blkno = buf_f->blf_blkno;
1967         bcp->bc_len = buf_f->blf_len;
1968         bcp->bc_refcount = 1;
1969         list_add_tail(&bcp->bc_list, bucket);
1970 
1971         trace_xfs_log_recover_buf_cancel_add(log, buf_f);
1972         return 0;
1973 }
1974 
1975 /*
1976  * Check to see whether the buffer being recovered has a corresponding
1977  * entry in the buffer cancel record table. If it is, return the cancel
1978  * buffer structure to the caller.
1979  */
1980 STATIC struct xfs_buf_cancel *
1981 xlog_peek_buffer_cancelled(
1982         struct xlog             *log,
1983         xfs_daddr_t             blkno,
1984         uint                    len,
1985         unsigned short                  flags)
1986 {
1987         struct list_head        *bucket;
1988         struct xfs_buf_cancel   *bcp;
1989 
1990         if (!log->l_buf_cancel_table) {
1991                 /* empty table means no cancelled buffers in the log */
1992                 ASSERT(!(flags & XFS_BLF_CANCEL));
1993                 return NULL;
1994         }
1995 
1996         bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno);
1997         list_for_each_entry(bcp, bucket, bc_list) {
1998                 if (bcp->bc_blkno == blkno && bcp->bc_len == len)
1999                         return bcp;
2000         }
2001 
2002         /*
2003          * We didn't find a corresponding entry in the table, so return 0 so
2004          * that the buffer is NOT cancelled.
2005          */
2006         ASSERT(!(flags & XFS_BLF_CANCEL));
2007         return NULL;
2008 }
2009 
2010 /*
2011  * If the buffer is being cancelled then return 1 so that it will be cancelled,
2012  * otherwise return 0.  If the buffer is actually a buffer cancel item
2013  * (XFS_BLF_CANCEL is set), then decrement the refcount on the entry in the
2014  * table and remove it from the table if this is the last reference.
2015  *
2016  * We remove the cancel record from the table when we encounter its last
2017  * occurrence in the log so that if the same buffer is re-used again after its
2018  * last cancellation we actually replay the changes made at that point.
2019  */
2020 STATIC int
2021 xlog_check_buffer_cancelled(
2022         struct xlog             *log,
2023         xfs_daddr_t             blkno,
2024         uint                    len,
2025         unsigned short                  flags)
2026 {
2027         struct xfs_buf_cancel   *bcp;
2028 
2029         bcp = xlog_peek_buffer_cancelled(log, blkno, len, flags);
2030         if (!bcp)
2031                 return 0;
2032 
2033         /*
2034          * We've go a match, so return 1 so that the recovery of this buffer
2035          * is cancelled.  If this buffer is actually a buffer cancel log
2036          * item, then decrement the refcount on the one in the table and
2037          * remove it if this is the last reference.
2038          */
2039         if (flags & XFS_BLF_CANCEL) {
2040                 if (--bcp->bc_refcount == 0) {
2041                         list_del(&bcp->bc_list);
2042                         kmem_free(bcp);
2043                 }
2044         }
2045         return 1;
2046 }
2047 
2048 /*
2049  * Perform recovery for a buffer full of inodes.  In these buffers, the only
2050  * data which should be recovered is that which corresponds to the
2051  * di_next_unlinked pointers in the on disk inode structures.  The rest of the
2052  * data for the inodes is always logged through the inodes themselves rather
2053  * than the inode buffer and is recovered in xlog_recover_inode_pass2().
2054  *
2055  * The only time when buffers full of inodes are fully recovered is when the
2056  * buffer is full of newly allocated inodes.  In this case the buffer will
2057  * not be marked as an inode buffer and so will be sent to
2058  * xlog_recover_do_reg_buffer() below during recovery.
2059  */
2060 STATIC int
2061 xlog_recover_do_inode_buffer(
2062         struct xfs_mount        *mp,
2063         xlog_recover_item_t     *item,
2064         struct xfs_buf          *bp,
2065         xfs_buf_log_format_t    *buf_f)
2066 {
2067         int                     i;
2068         int                     item_index = 0;
2069         int                     bit = 0;
2070         int                     nbits = 0;
2071         int                     reg_buf_offset = 0;
2072         int                     reg_buf_bytes = 0;
2073         int                     next_unlinked_offset;
2074         int                     inodes_per_buf;
2075         xfs_agino_t             *logged_nextp;
2076         xfs_agino_t             *buffer_nextp;
2077 
2078         trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f);
2079 
2080         /*
2081          * Post recovery validation only works properly on CRC enabled
2082          * filesystems.
2083          */
2084         if (xfs_sb_version_hascrc(&mp->m_sb))
2085                 bp->b_ops = &xfs_inode_buf_ops;
2086 
2087         inodes_per_buf = BBTOB(bp->b_length) >> mp->m_sb.sb_inodelog;
2088         for (i = 0; i < inodes_per_buf; i++) {
2089                 next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
2090                         offsetof(xfs_dinode_t, di_next_unlinked);
2091 
2092                 while (next_unlinked_offset >=
2093                        (reg_buf_offset + reg_buf_bytes)) {
2094                         /*
2095                          * The next di_next_unlinked field is beyond
2096                          * the current logged region.  Find the next
2097                          * logged region that contains or is beyond
2098                          * the current di_next_unlinked field.
2099                          */
2100                         bit += nbits;
2101                         bit = xfs_next_bit(buf_f->blf_data_map,
2102                                            buf_f->blf_map_size, bit);
2103 
2104                         /*
2105                          * If there are no more logged regions in the
2106                          * buffer, then we're done.
2107                          */
2108                         if (bit == -1)
2109                                 return 0;
2110 
2111                         nbits = xfs_contig_bits(buf_f->blf_data_map,
2112                                                 buf_f->blf_map_size, bit);
2113                         ASSERT(nbits > 0);
2114                         reg_buf_offset = bit << XFS_BLF_SHIFT;
2115                         reg_buf_bytes = nbits << XFS_BLF_SHIFT;
2116                         item_index++;
2117                 }
2118 
2119                 /*
2120                  * If the current logged region starts after the current
2121                  * di_next_unlinked field, then move on to the next
2122                  * di_next_unlinked field.
2123                  */
2124                 if (next_unlinked_offset < reg_buf_offset)
2125                         continue;
2126 
2127                 ASSERT(item->ri_buf[item_index].i_addr != NULL);
2128                 ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0);
2129                 ASSERT((reg_buf_offset + reg_buf_bytes) <= BBTOB(bp->b_length));
2130 
2131                 /*
2132                  * The current logged region contains a copy of the
2133                  * current di_next_unlinked field.  Extract its value
2134                  * and copy it to the buffer copy.
2135                  */
2136                 logged_nextp = item->ri_buf[item_index].i_addr +
2137                                 next_unlinked_offset - reg_buf_offset;
2138                 if (unlikely(*logged_nextp == 0)) {
2139                         xfs_alert(mp,
2140                 "Bad inode buffer log record (ptr = "PTR_FMT", bp = "PTR_FMT"). "
2141                 "Trying to replay bad (0) inode di_next_unlinked field.",
2142                                 item, bp);
2143                         XFS_ERROR_REPORT("xlog_recover_do_inode_buf",
2144                                          XFS_ERRLEVEL_LOW, mp);
2145                         return -EFSCORRUPTED;
2146                 }
2147 
2148                 buffer_nextp = xfs_buf_offset(bp, next_unlinked_offset);
2149                 *buffer_nextp = *logged_nextp;
2150 
2151                 /*
2152                  * If necessary, recalculate the CRC in the on-disk inode. We
2153                  * have to leave the inode in a consistent state for whoever
2154                  * reads it next....
2155                  */
2156                 xfs_dinode_calc_crc(mp,
2157                                 xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize));
2158 
2159         }
2160 
2161         return 0;
2162 }
2163 
2164 /*
2165  * V5 filesystems know the age of the buffer on disk being recovered. We can
2166  * have newer objects on disk than we are replaying, and so for these cases we
2167  * don't want to replay the current change as that will make the buffer contents
2168  * temporarily invalid on disk.
2169  *
2170  * The magic number might not match the buffer type we are going to recover
2171  * (e.g. reallocated blocks), so we ignore the xfs_buf_log_format flags.  Hence
2172  * extract the LSN of the existing object in the buffer based on it's current
2173  * magic number.  If we don't recognise the magic number in the buffer, then
2174  * return a LSN of -1 so that the caller knows it was an unrecognised block and
2175  * so can recover the buffer.
2176  *
2177  * Note: we cannot rely solely on magic number matches to determine that the
2178  * buffer has a valid LSN - we also need to verify that it belongs to this
2179  * filesystem, so we need to extract the object's LSN and compare it to that
2180  * which we read from the superblock. If the UUIDs don't match, then we've got a
2181  * stale metadata block from an old filesystem instance that we need to recover
2182  * over the top of.
2183  */
2184 static xfs_lsn_t
2185 xlog_recover_get_buf_lsn(
2186         struct xfs_mount        *mp,
2187         struct xfs_buf          *bp)
2188 {
2189         uint32_t                magic32;
2190         uint16_t                magic16;
2191         uint16_t                magicda;
2192         void                    *blk = bp->b_addr;
2193         uuid_t                  *uuid;
2194         xfs_lsn_t               lsn = -1;
2195 
2196         /* v4 filesystems always recover immediately */
2197         if (!xfs_sb_version_hascrc(&mp->m_sb))
2198                 goto recover_immediately;
2199 
2200         magic32 = be32_to_cpu(*(__be32 *)blk);
2201         switch (magic32) {
2202         case XFS_ABTB_CRC_MAGIC:
2203         case XFS_ABTC_CRC_MAGIC:
2204         case XFS_ABTB_MAGIC:
2205         case XFS_ABTC_MAGIC:
2206         case XFS_RMAP_CRC_MAGIC:
2207         case XFS_REFC_CRC_MAGIC:
2208         case XFS_IBT_CRC_MAGIC:
2209         case XFS_IBT_MAGIC: {
2210                 struct xfs_btree_block *btb = blk;
2211 
2212                 lsn = be64_to_cpu(btb->bb_u.s.bb_lsn);
2213                 uuid = &btb->bb_u.s.bb_uuid;
2214                 break;
2215         }
2216         case XFS_BMAP_CRC_MAGIC:
2217         case XFS_BMAP_MAGIC: {
2218                 struct xfs_btree_block *btb = blk;
2219 
2220                 lsn = be64_to_cpu(btb->bb_u.l.bb_lsn);
2221                 uuid = &btb->bb_u.l.bb_uuid;
2222                 break;
2223         }
2224         case XFS_AGF_MAGIC:
2225                 lsn = be64_to_cpu(((struct xfs_agf *)blk)->agf_lsn);
2226                 uuid = &((struct xfs_agf *)blk)->agf_uuid;
2227                 break;
2228         case XFS_AGFL_MAGIC:
2229                 lsn = be64_to_cpu(((struct xfs_agfl *)blk)->agfl_lsn);
2230                 uuid = &((struct xfs_agfl *)blk)->agfl_uuid;
2231                 break;
2232         case XFS_AGI_MAGIC:
2233                 lsn = be64_to_cpu(((struct xfs_agi *)blk)->agi_lsn);
2234                 uuid = &((struct xfs_agi *)blk)->agi_uuid;
2235                 break;
2236         case XFS_SYMLINK_MAGIC:
2237                 lsn = be64_to_cpu(((struct xfs_dsymlink_hdr *)blk)->sl_lsn);
2238                 uuid = &((struct xfs_dsymlink_hdr *)blk)->sl_uuid;
2239                 break;
2240         case XFS_DIR3_BLOCK_MAGIC:
2241         case XFS_DIR3_DATA_MAGIC:
2242         case XFS_DIR3_FREE_MAGIC:
2243                 lsn = be64_to_cpu(((struct xfs_dir3_blk_hdr *)blk)->lsn);
2244                 uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid;
2245                 break;
2246         case XFS_ATTR3_RMT_MAGIC:
2247                 /*
2248                  * Remote attr blocks are written synchronously, rather than
2249                  * being logged. That means they do not contain a valid LSN
2250                  * (i.e. transactionally ordered) in them, and hence any time we
2251                  * see a buffer to replay over the top of a remote attribute
2252                  * block we should simply do so.
2253                  */
2254                 goto recover_immediately;
2255         case XFS_SB_MAGIC:
2256                 /*
2257                  * superblock uuids are magic. We may or may not have a
2258                  * sb_meta_uuid on disk, but it will be set in the in-core
2259                  * superblock. We set the uuid pointer for verification
2260                  * according to the superblock feature mask to ensure we check
2261                  * the relevant UUID in the superblock.
2262                  */
2263                 lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn);
2264                 if (xfs_sb_version_hasmetauuid(&mp->m_sb))
2265                         uuid = &((struct xfs_dsb *)blk)->sb_meta_uuid;
2266                 else
2267                         uuid = &((struct xfs_dsb *)blk)->sb_uuid;
2268                 break;
2269         default:
2270                 break;
2271         }
2272 
2273         if (lsn != (xfs_lsn_t)-1) {
2274                 if (!uuid_equal(&mp->m_sb.sb_meta_uuid, uuid))
2275                         goto recover_immediately;
2276                 return lsn;
2277         }
2278 
2279         magicda = be16_to_cpu(((struct xfs_da_blkinfo *)blk)->magic);
2280         switch (magicda) {
2281         case XFS_DIR3_LEAF1_MAGIC:
2282         case XFS_DIR3_LEAFN_MAGIC:
2283         case XFS_DA3_NODE_MAGIC:
2284                 lsn = be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn);
2285                 uuid = &((struct xfs_da3_blkinfo *)blk)->uuid;
2286                 break;
2287         default:
2288                 break;
2289         }
2290 
2291         if (lsn != (xfs_lsn_t)-1) {
2292                 if (!uuid_equal(&mp->m_sb.sb_uuid, uuid))
2293                         goto recover_immediately;
2294                 return lsn;
2295         }
2296 
2297         /*
2298          * We do individual object checks on dquot and inode buffers as they
2299          * have their own individual LSN records. Also, we could have a stale
2300          * buffer here, so we have to at least recognise these buffer types.
2301          *
2302          * A notd complexity here is inode unlinked list processing - it logs
2303          * the inode directly in the buffer, but we don't know which inodes have
2304          * been modified, and there is no global buffer LSN. Hence we need to
2305          * recover all inode buffer types immediately. This problem will be
2306          * fixed by logical logging of the unlinked list modifications.
2307          */
2308         magic16 = be16_to_cpu(*(__be16 *)blk);
2309         switch (magic16) {
2310         case XFS_DQUOT_MAGIC:
2311         case XFS_DINODE_MAGIC:
2312                 goto recover_immediately;
2313         default:
2314                 break;
2315         }
2316 
2317         /* unknown buffer contents, recover immediately */
2318 
2319 recover_immediately:
2320         return (xfs_lsn_t)-1;
2321 
2322 }
2323 
2324 /*
2325  * Validate the recovered buffer is of the correct type and attach the
2326  * appropriate buffer operations to them for writeback. Magic numbers are in a
2327  * few places:
2328  *      the first 16 bits of the buffer (inode buffer, dquot buffer),
2329  *      the first 32 bits of the buffer (most blocks),
2330  *      inside a struct xfs_da_blkinfo at the start of the buffer.
2331  */
2332 static void
2333 xlog_recover_validate_buf_type(
2334         struct xfs_mount        *mp,
2335         struct xfs_buf          *bp,
2336         xfs_buf_log_format_t    *buf_f,
2337         xfs_lsn_t               current_lsn)
2338 {
2339         struct xfs_da_blkinfo   *info = bp->b_addr;
2340         uint32_t                magic32;
2341         uint16_t                magic16;
2342         uint16_t                magicda;
2343         char                    *warnmsg = NULL;
2344 
2345         /*
2346          * We can only do post recovery validation on items on CRC enabled
2347          * fielsystems as we need to know when the buffer was written to be able
2348          * to determine if we should have replayed the item. If we replay old
2349          * metadata over a newer buffer, then it will enter a temporarily
2350          * inconsistent state resulting in verification failures. Hence for now
2351          * just avoid the verification stage for non-crc filesystems
2352          */
2353         if (!xfs_sb_version_hascrc(&mp->m_sb))
2354                 return;
2355 
2356         magic32 = be32_to_cpu(*(__be32 *)bp->b_addr);
2357         magic16 = be16_to_cpu(*(__be16*)bp->b_addr);
2358         magicda = be16_to_cpu(info->magic);
2359         switch (xfs_blft_from_flags(buf_f)) {
2360         case XFS_BLFT_BTREE_BUF:
2361                 switch (magic32) {
2362                 case XFS_ABTB_CRC_MAGIC:
2363                 case XFS_ABTB_MAGIC:
2364                         bp->b_ops = &xfs_bnobt_buf_ops;
2365                         break;
2366                 case XFS_ABTC_CRC_MAGIC:
2367                 case XFS_ABTC_MAGIC:
2368                         bp->b_ops = &xfs_cntbt_buf_ops;
2369                         break;
2370                 case XFS_IBT_CRC_MAGIC:
2371                 case XFS_IBT_MAGIC:
2372                         bp->b_ops = &xfs_inobt_buf_ops;
2373                         break;
2374                 case XFS_FIBT_CRC_MAGIC:
2375                 case XFS_FIBT_MAGIC:
2376                         bp->b_ops = &xfs_finobt_buf_ops;
2377                         break;
2378                 case XFS_BMAP_CRC_MAGIC:
2379                 case XFS_BMAP_MAGIC:
2380                         bp->b_ops = &xfs_bmbt_buf_ops;
2381                         break;
2382                 case XFS_RMAP_CRC_MAGIC:
2383                         bp->b_ops = &xfs_rmapbt_buf_ops;
2384                         break;
2385                 case XFS_REFC_CRC_MAGIC:
2386                         bp->b_ops = &xfs_refcountbt_buf_ops;
2387                         break;
2388                 default:
2389                         warnmsg = "Bad btree block magic!";
2390                         break;
2391                 }
2392                 break;
2393         case XFS_BLFT_AGF_BUF:
2394                 if (magic32 != XFS_AGF_MAGIC) {
2395                         warnmsg = "Bad AGF block magic!";
2396                         break;
2397                 }
2398                 bp->b_ops = &xfs_agf_buf_ops;
2399                 break;
2400         case XFS_BLFT_AGFL_BUF:
2401                 if (magic32 != XFS_AGFL_MAGIC) {
2402                         warnmsg = "Bad AGFL block magic!";
2403                         break;
2404                 }
2405                 bp->b_ops = &xfs_agfl_buf_ops;
2406                 break;
2407         case XFS_BLFT_AGI_BUF:
2408                 if (magic32 != XFS_AGI_MAGIC) {
2409                         warnmsg = "Bad AGI block magic!";
2410                         break;
2411                 }
2412                 bp->b_ops = &xfs_agi_buf_ops;
2413                 break;
2414         case XFS_BLFT_UDQUOT_BUF:
2415         case XFS_BLFT_PDQUOT_BUF:
2416         case XFS_BLFT_GDQUOT_BUF:
2417 #ifdef CONFIG_XFS_QUOTA
2418                 if (magic16 != XFS_DQUOT_MAGIC) {
2419                         warnmsg = "Bad DQUOT block magic!";
2420                         break;
2421                 }
2422                 bp->b_ops = &xfs_dquot_buf_ops;
2423 #else
2424                 xfs_alert(mp,
2425         "Trying to recover dquots without QUOTA support built in!");
2426                 ASSERT(0);
2427 #endif
2428                 break;
2429         case XFS_BLFT_DINO_BUF:
2430                 if (magic16 != XFS_DINODE_MAGIC) {
2431                         warnmsg = "Bad INODE block magic!";
2432                         break;
2433                 }
2434                 bp->b_ops = &xfs_inode_buf_ops;
2435                 break;
2436         case XFS_BLFT_SYMLINK_BUF:
2437                 if (magic32 != XFS_SYMLINK_MAGIC) {
2438                         warnmsg = "Bad symlink block magic!";
2439                         break;
2440                 }
2441                 bp->b_ops = &xfs_symlink_buf_ops;
2442                 break;
2443         case XFS_BLFT_DIR_BLOCK_BUF:
2444                 if (magic32 != XFS_DIR2_BLOCK_MAGIC &&
2445                     magic32 != XFS_DIR3_BLOCK_MAGIC) {
2446                         warnmsg = "Bad dir block magic!";
2447                         break;
2448                 }
2449                 bp->b_ops = &xfs_dir3_block_buf_ops;
2450                 break;
2451         case XFS_BLFT_DIR_DATA_BUF:
2452                 if (magic32 != XFS_DIR2_DATA_MAGIC &&
2453                     magic32 != XFS_DIR3_DATA_MAGIC) {
2454                         warnmsg = "Bad dir data magic!";
2455                         break;
2456                 }
2457                 bp->b_ops = &xfs_dir3_data_buf_ops;
2458                 break;
2459         case XFS_BLFT_DIR_FREE_BUF:
2460                 if (magic32 != XFS_DIR2_FREE_MAGIC &&
2461                     magic32 != XFS_DIR3_FREE_MAGIC) {
2462                         warnmsg = "Bad dir3 free magic!";
2463                         break;
2464                 }
2465                 bp->b_ops = &xfs_dir3_free_buf_ops;
2466                 break;
2467         case XFS_BLFT_DIR_LEAF1_BUF:
2468                 if (magicda != XFS_DIR2_LEAF1_MAGIC &&
2469                     magicda != XFS_DIR3_LEAF1_MAGIC) {
2470                         warnmsg = "Bad dir leaf1 magic!";
2471                         break;
2472                 }
2473                 bp->b_ops = &xfs_dir3_leaf1_buf_ops;
2474                 break;
2475         case XFS_BLFT_DIR_LEAFN_BUF:
2476                 if (magicda != XFS_DIR2_LEAFN_MAGIC &&
2477                     magicda != XFS_DIR3_LEAFN_MAGIC) {
2478                         warnmsg = "Bad dir leafn magic!";
2479                         break;
2480                 }
2481                 bp->b_ops = &xfs_dir3_leafn_buf_ops;
2482                 break;
2483         case XFS_BLFT_DA_NODE_BUF:
2484                 if (magicda != XFS_DA_NODE_MAGIC &&
2485                     magicda != XFS_DA3_NODE_MAGIC) {
2486                         warnmsg = "Bad da node magic!";
2487                         break;
2488                 }
2489                 bp->b_ops = &xfs_da3_node_buf_ops;
2490                 break;
2491         case XFS_BLFT_ATTR_LEAF_BUF:
2492                 if (magicda != XFS_ATTR_LEAF_MAGIC &&
2493                     magicda != XFS_ATTR3_LEAF_MAGIC) {
2494                         warnmsg = "Bad attr leaf magic!";
2495                         break;
2496                 }
2497                 bp->b_ops = &xfs_attr3_leaf_buf_ops;
2498                 break;
2499         case XFS_BLFT_ATTR_RMT_BUF:
2500                 if (magic32 != XFS_ATTR3_RMT_MAGIC) {
2501                         warnmsg = "Bad attr remote magic!";
2502                         break;
2503                 }
2504                 bp->b_ops = &xfs_attr3_rmt_buf_ops;
2505                 break;
2506         case XFS_BLFT_SB_BUF:
2507                 if (magic32 != XFS_SB_MAGIC) {
2508                         warnmsg = "Bad SB block magic!";
2509                         break;
2510                 }
2511                 bp->b_ops = &xfs_sb_buf_ops;
2512                 break;
2513 #ifdef CONFIG_XFS_RT
2514         case XFS_BLFT_RTBITMAP_BUF:
2515         case XFS_BLFT_RTSUMMARY_BUF:
2516                 /* no magic numbers for verification of RT buffers */
2517                 bp->b_ops = &xfs_rtbuf_ops;
2518                 break;
2519 #endif /* CONFIG_XFS_RT */
2520         default:
2521                 xfs_warn(mp, "Unknown buffer type %d!",
2522                          xfs_blft_from_flags(buf_f));
2523                 break;
2524         }
2525 
2526         /*
2527          * Nothing else to do in the case of a NULL current LSN as this means
2528          * the buffer is more recent than the change in the log and will be
2529          * skipped.
2530          */
2531         if (current_lsn == NULLCOMMITLSN)
2532                 return;
2533 
2534         if (warnmsg) {
2535                 xfs_warn(mp, warnmsg);
2536                 ASSERT(0);
2537         }
2538 
2539         /*
2540          * We must update the metadata LSN of the buffer as it is written out to
2541          * ensure that older transactions never replay over this one and corrupt
2542          * the buffer. This can occur if log recovery is interrupted at some
2543          * point after the current transaction completes, at which point a
2544          * subsequent mount starts recovery from the beginning.
2545          *
2546          * Write verifiers update the metadata LSN from log items attached to
2547          * the buffer. Therefore, initialize a bli purely to carry the LSN to
2548          * the verifier. We'll clean it up in our ->iodone() callback.
2549          */
2550         if (bp->b_ops) {
2551                 struct xfs_buf_log_item *bip;
2552 
2553                 ASSERT(!bp->b_iodone || bp->b_iodone == xlog_recover_iodone);
2554                 bp->b_iodone = xlog_recover_iodone;
2555                 xfs_buf_item_init(bp, mp);
2556                 bip = bp->b_log_item;
2557                 bip->bli_item.li_lsn = current_lsn;
2558         }
2559 }
2560 
2561 /*
2562  * Perform a 'normal' buffer recovery.  Each logged region of the
2563  * buffer should be copied over the corresponding region in the
2564  * given buffer.  The bitmap in the buf log format structure indicates
2565  * where to place the logged data.
2566  */
2567 STATIC void
2568 xlog_recover_do_reg_buffer(
2569         struct xfs_mount        *mp,
2570         xlog_recover_item_t     *item,
2571         struct xfs_buf          *bp,
2572         xfs_buf_log_format_t    *buf_f,
2573         xfs_lsn_t               current_lsn)
2574 {
2575         int                     i;
2576         int                     bit;
2577         int                     nbits;
2578         xfs_failaddr_t          fa;
2579 
2580         trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f);
2581 
2582         bit = 0;
2583         i = 1;  /* 0 is the buf format structure */
2584         while (1) {
2585                 bit = xfs_next_bit(buf_f->blf_data_map,
2586                                    buf_f->blf_map_size, bit);
2587                 if (bit == -1)
2588                         break;
2589                 nbits = xfs_contig_bits(buf_f->blf_data_map,
2590                                         buf_f->blf_map_size, bit);
2591                 ASSERT(nbits > 0);
2592                 ASSERT(item->ri_buf[i].i_addr != NULL);
2593                 ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0);
2594                 ASSERT(BBTOB(bp->b_length) >=
2595                        ((uint)bit << XFS_BLF_SHIFT) + (nbits << XFS_BLF_SHIFT));
2596 
2597                 /*
2598                  * The dirty regions logged in the buffer, even though
2599                  * contiguous, may span multiple chunks. This is because the
2600                  * dirty region may span a physical page boundary in a buffer
2601                  * and hence be split into two separate vectors for writing into
2602                  * the log. Hence we need to trim nbits back to the length of
2603                  * the current region being copied out of the log.
2604                  */
2605                 if (item->ri_buf[i].i_len < (nbits << XFS_BLF_SHIFT))
2606                         nbits = item->ri_buf[i].i_len >> XFS_BLF_SHIFT;
2607 
2608                 /*
2609                  * Do a sanity check if this is a dquot buffer. Just checking
2610                  * the first dquot in the buffer should do. XXXThis is
2611                  * probably a good thing to do for other buf types also.
2612                  */
2613                 fa = NULL;
2614                 if (buf_f->blf_flags &
2615                    (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
2616                         if (item->ri_buf[i].i_addr == NULL) {
2617                                 xfs_alert(mp,
2618                                         "XFS: NULL dquot in %s.", __func__);
2619                                 goto next;
2620                         }
2621                         if (item->ri_buf[i].i_len < sizeof(xfs_disk_dquot_t)) {
2622                                 xfs_alert(mp,
2623                                         "XFS: dquot too small (%d) in %s.",
2624                                         item->ri_buf[i].i_len, __func__);
2625                                 goto next;
2626                         }
2627                         fa = xfs_dquot_verify(mp, item->ri_buf[i].i_addr,
2628                                                -1, 0);
2629                         if (fa) {
2630                                 xfs_alert(mp,
2631         "dquot corrupt at %pS trying to replay into block 0x%llx",
2632                                         fa, bp->b_bn);
2633                                 goto next;
2634                         }
2635                 }
2636 
2637                 memcpy(xfs_buf_offset(bp,
2638                         (uint)bit << XFS_BLF_SHIFT),    /* dest */
2639                         item->ri_buf[i].i_addr,         /* source */
2640                         nbits<<XFS_BLF_SHIFT);          /* length */
2641  next:
2642                 i++;
2643                 bit += nbits;
2644         }
2645 
2646         /* Shouldn't be any more regions */
2647         ASSERT(i == item->ri_total);
2648 
2649         xlog_recover_validate_buf_type(mp, bp, buf_f, current_lsn);
2650 }
2651 
2652 /*
2653  * Perform a dquot buffer recovery.
2654  * Simple algorithm: if we have found a QUOTAOFF log item of the same type
2655  * (ie. USR or GRP), then just toss this buffer away; don't recover it.
2656  * Else, treat it as a regular buffer and do recovery.
2657  *
2658  * Return false if the buffer was tossed and true if we recovered the buffer to
2659  * indicate to the caller if the buffer needs writing.
2660  */
2661 STATIC bool
2662 xlog_recover_do_dquot_buffer(
2663         struct xfs_mount                *mp,
2664         struct xlog                     *log,
2665         struct xlog_recover_item        *item,
2666         struct xfs_buf                  *bp,
2667         struct xfs_buf_log_format       *buf_f)
2668 {
2669         uint                    type;
2670 
2671         trace_xfs_log_recover_buf_dquot_buf(log, buf_f);
2672 
2673         /*
2674          * Filesystems are required to send in quota flags at mount time.
2675          */
2676         if (!mp->m_qflags)
2677                 return false;
2678 
2679         type = 0;
2680         if (buf_f->blf_flags & XFS_BLF_UDQUOT_BUF)
2681                 type |= XFS_DQ_USER;
2682         if (buf_f->blf_flags & XFS_BLF_PDQUOT_BUF)
2683                 type |= XFS_DQ_PROJ;
2684         if (buf_f->blf_flags & XFS_BLF_GDQUOT_BUF)
2685                 type |= XFS_DQ_GROUP;
2686         /*
2687          * This type of quotas was turned off, so ignore this buffer
2688          */
2689         if (log->l_quotaoffs_flag & type)
2690                 return false;
2691 
2692         xlog_recover_do_reg_buffer(mp, item, bp, buf_f, NULLCOMMITLSN);
2693         return true;
2694 }
2695 
2696 /*
2697  * This routine replays a modification made to a buffer at runtime.
2698  * There are actually two types of buffer, regular and inode, which
2699  * are handled differently.  Inode buffers are handled differently
2700  * in that we only recover a specific set of data from them, namely
2701  * the inode di_next_unlinked fields.  This is because all other inode
2702  * data is actually logged via inode records and any data we replay
2703  * here which overlaps that may be stale.
2704  *
2705  * When meta-data buffers are freed at run time we log a buffer item
2706  * with the XFS_BLF_CANCEL bit set to indicate that previous copies
2707  * of the buffer in the log should not be replayed at recovery time.
2708  * This is so that if the blocks covered by the buffer are reused for
2709  * file data before we crash we don't end up replaying old, freed
2710  * meta-data into a user's file.
2711  *
2712  * To handle the cancellation of buffer log items, we make two passes
2713  * over the log during recovery.  During the first we build a table of
2714  * those buffers which have been cancelled, and during the second we
2715  * only replay those buffers which do not have corresponding cancel
2716  * records in the table.  See xlog_recover_buffer_pass[1,2] above
2717  * for more details on the implementation of the table of cancel records.
2718  */
2719 STATIC int
2720 xlog_recover_buffer_pass2(
2721         struct xlog                     *log,
2722         struct list_head                *buffer_list,
2723         struct xlog_recover_item        *item,
2724         xfs_lsn_t                       current_lsn)
2725 {
2726         xfs_buf_log_format_t    *buf_f = item->ri_buf[0].i_addr;
2727         xfs_mount_t             *mp = log->l_mp;
2728         xfs_buf_t               *bp;
2729         int                     error;
2730         uint                    buf_flags;
2731         xfs_lsn_t               lsn;
2732 
2733         /*
2734          * In this pass we only want to recover all the buffers which have
2735          * not been cancelled and are not cancellation buffers themselves.
2736          */
2737         if (xlog_check_buffer_cancelled(log, buf_f->blf_blkno,
2738                         buf_f->blf_len, buf_f->blf_flags)) {
2739                 trace_xfs_log_recover_buf_cancel(log, buf_f);
2740                 return 0;
2741         }
2742 
2743         trace_xfs_log_recover_buf_recover(log, buf_f);
2744 
2745         buf_flags = 0;
2746         if (buf_f->blf_flags & XFS_BLF_INODE_BUF)
2747                 buf_flags |= XBF_UNMAPPED;
2748 
2749         bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len,
2750                           buf_flags, NULL);
2751         if (!bp)
2752                 return -ENOMEM;
2753         error = bp->b_error;
2754         if (error) {
2755                 xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#1)");
2756                 goto out_release;
2757         }
2758 
2759         /*
2760          * Recover the buffer only if we get an LSN from it and it's less than
2761          * the lsn of the transaction we are replaying.
2762          *
2763          * Note that we have to be extremely careful of readahead here.
2764          * Readahead does not attach verfiers to the buffers so if we don't
2765          * actually do any replay after readahead because of the LSN we found
2766          * in the buffer if more recent than that current transaction then we
2767          * need to attach the verifier directly. Failure to do so can lead to
2768          * future recovery actions (e.g. EFI and unlinked list recovery) can
2769          * operate on the buffers and they won't get the verifier attached. This
2770          * can lead to blocks on disk having the correct content but a stale
2771          * CRC.
2772          *
2773          * It is safe to assume these clean buffers are currently up to date.
2774          * If the buffer is dirtied by a later transaction being replayed, then
2775          * the verifier will be reset to match whatever recover turns that
2776          * buffer into.
2777          */
2778         lsn = xlog_recover_get_buf_lsn(mp, bp);
2779         if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
2780                 trace_xfs_log_recover_buf_skip(log, buf_f);
2781                 xlog_recover_validate_buf_type(mp, bp, buf_f, NULLCOMMITLSN);
2782                 goto out_release;
2783         }
2784 
2785         if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
2786                 error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
2787                 if (error)
2788                         goto out_release;
2789         } else if (buf_f->blf_flags &
2790                   (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
2791                 bool    dirty;
2792 
2793                 dirty = xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
2794                 if (!dirty)
2795                         goto out_release;
2796         } else {
2797                 xlog_recover_do_reg_buffer(mp, item, bp, buf_f, current_lsn);
2798         }
2799 
2800         /*
2801          * Perform delayed write on the buffer.  Asynchronous writes will be
2802          * slower when taking into account all the buffers to be flushed.
2803          *
2804          * Also make sure that only inode buffers with good sizes stay in
2805          * the buffer cache.  The kernel moves inodes in buffers of 1 block
2806          * or inode_cluster_size bytes, whichever is bigger.  The inode
2807          * buffers in the log can be a different size if the log was generated
2808          * by an older kernel using unclustered inode buffers or a newer kernel
2809          * running with a different inode cluster size.  Regardless, if the
2810          * the inode buffer size isn't max(blocksize, inode_cluster_size)
2811          * for *our* value of inode_cluster_size, then we need to keep
2812          * the buffer out of the buffer cache so that the buffer won't
2813          * overlap with future reads of those inodes.
2814          */
2815         if (XFS_DINODE_MAGIC ==
2816             be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) &&
2817             (BBTOB(bp->b_length) != M_IGEO(log->l_mp)->inode_cluster_size)) {
2818                 xfs_buf_stale(bp);
2819                 error = xfs_bwrite(bp);
2820         } else {
2821                 ASSERT(bp->b_mount == mp);
2822                 bp->b_iodone = xlog_recover_iodone;
2823                 xfs_buf_delwri_queue(bp, buffer_list);
2824         }
2825 
2826 out_release:
2827         xfs_buf_relse(bp);
2828         return error;
2829 }
2830 
2831 /*
2832  * Inode fork owner changes
2833  *
2834  * If we have been told that we have to reparent the inode fork, it's because an
2835  * extent swap operation on a CRC enabled filesystem has been done and we are
2836  * replaying it. We need to walk the BMBT of the appropriate fork and change the
2837  * owners of it.
2838  *
2839  * The complexity here is that we don't have an inode context to work with, so
2840  * after we've replayed the inode we need to instantiate one.  This is where the
2841  * fun begins.
2842  *
2843  * We are in the middle of log recovery, so we can't run transactions. That
2844  * means we cannot use cache coherent inode instantiation via xfs_iget(), as
2845  * that will result in the corresponding iput() running the inode through
2846  * xfs_inactive(). If we've just replayed an inode core that changes the link
2847  * count to zero (i.e. it's been unlinked), then xfs_inactive() will run
2848  * transactions (bad!).
2849  *
2850  * So, to avoid this, we instantiate an inode directly from the inode core we've
2851  * just recovered. We have the buffer still locked, and all we really need to
2852  * instantiate is the inode core and the forks being modified. We can do this
2853  * manually, then run the inode btree owner change, and then tear down the
2854  * xfs_inode without having to run any transactions at all.
2855  *
2856  * Also, because we don't have a transaction context available here but need to
2857  * gather all the buffers we modify for writeback so we pass the buffer_list
2858  * instead for the operation to use.
2859  */
2860 
2861 STATIC int
2862 xfs_recover_inode_owner_change(
2863         struct xfs_mount        *mp,
2864         struct xfs_dinode       *dip,
2865         struct xfs_inode_log_format *in_f,
2866         struct list_head        *buffer_list)
2867 {
2868         struct xfs_inode        *ip;
2869         int                     error;
2870 
2871         ASSERT(in_f->ilf_fields & (XFS_ILOG_DOWNER|XFS_ILOG_AOWNER));
2872 
2873         ip = xfs_inode_alloc(mp, in_f->ilf_ino);
2874         if (!ip)
2875                 return -ENOMEM;
2876 
2877         /* instantiate the inode */
2878         xfs_inode_from_disk(ip, dip);
2879         ASSERT(ip->i_d.di_version >= 3);
2880 
2881         error = xfs_iformat_fork(ip, dip);
2882         if (error)
2883                 goto out_free_ip;
2884 
2885         if (!xfs_inode_verify_forks(ip)) {
2886                 error = -EFSCORRUPTED;
2887                 goto out_free_ip;
2888         }
2889 
2890         if (in_f->ilf_fields & XFS_ILOG_DOWNER) {
2891                 ASSERT(in_f->ilf_fields & XFS_ILOG_DBROOT);
2892                 error = xfs_bmbt_change_owner(NULL, ip, XFS_DATA_FORK,
2893                                               ip->i_ino, buffer_list);
2894                 if (error)
2895                         goto out_free_ip;
2896         }
2897 
2898         if (in_f->ilf_fields & XFS_ILOG_AOWNER) {
2899                 ASSERT(in_f->ilf_fields & XFS_ILOG_ABROOT);
2900                 error = xfs_bmbt_change_owner(NULL, ip, XFS_ATTR_FORK,
2901                                               ip->i_ino, buffer_list);
2902                 if (error)
2903                         goto out_free_ip;
2904         }
2905 
2906 out_free_ip:
2907         xfs_inode_free(ip);
2908         return error;
2909 }
2910 
2911 STATIC int
2912 xlog_recover_inode_pass2(
2913         struct xlog                     *log,
2914         struct list_head                *buffer_list,
2915         struct xlog_recover_item        *item,
2916         xfs_lsn_t                       current_lsn)
2917 {
2918         struct xfs_inode_log_format     *in_f;
2919         xfs_mount_t             *mp = log->l_mp;
2920         xfs_buf_t               *bp;
2921         xfs_dinode_t            *dip;
2922         int                     len;
2923         char                    *src;
2924         char                    *dest;
2925         int                     error;
2926         int                     attr_index;
2927         uint                    fields;
2928         struct xfs_log_dinode   *ldip;
2929         uint                    isize;
2930         int                     need_free = 0;
2931 
2932         if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) {
2933                 in_f = item->ri_buf[0].i_addr;
2934         } else {
2935                 in_f = kmem_alloc(sizeof(struct xfs_inode_log_format), 0);
2936                 need_free = 1;
2937                 error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f);
2938                 if (error)
2939                         goto error;
2940         }
2941 
2942         /*
2943          * Inode buffers can be freed, look out for it,
2944          * and do not replay the inode.
2945          */
2946         if (xlog_check_buffer_cancelled(log, in_f->ilf_blkno,
2947                                         in_f->ilf_len, 0)) {
2948                 error = 0;
2949                 trace_xfs_log_recover_inode_cancel(log, in_f);
2950                 goto error;
2951         }
2952         trace_xfs_log_recover_inode_recover(log, in_f);
2953 
2954         bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len, 0,
2955                           &xfs_inode_buf_ops);
2956         if (!bp) {
2957                 error = -ENOMEM;
2958                 goto error;
2959         }
2960         error = bp->b_error;
2961         if (error) {
2962                 xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#2)");
2963                 goto out_release;
2964         }
2965         ASSERT(in_f->ilf_fields & XFS_ILOG_CORE);
2966         dip = xfs_buf_offset(bp, in_f->ilf_boffset);
2967 
2968         /*
2969          * Make sure the place we're flushing out to really looks
2970          * like an inode!
2971          */
2972         if (unlikely(!xfs_verify_magic16(bp, dip->di_magic))) {
2973                 xfs_alert(mp,
2974         "%s: Bad inode magic number, dip = "PTR_FMT", dino bp = "PTR_FMT", ino = %Ld",
2975                         __func__, dip, bp, in_f->ilf_ino);
2976                 XFS_ERROR_REPORT("xlog_recover_inode_pass2(1)",
2977                                  XFS_ERRLEVEL_LOW, mp);
2978                 error = -EFSCORRUPTED;
2979                 goto out_release;
2980         }
2981         ldip = item->ri_buf[1].i_addr;
2982         if (unlikely(ldip->di_magic != XFS_DINODE_MAGIC)) {
2983                 xfs_alert(mp,
2984                         "%s: Bad inode log record, rec ptr "PTR_FMT", ino %Ld",
2985                         __func__, item, in_f->ilf_ino);
2986                 XFS_ERROR_REPORT("xlog_recover_inode_pass2(2)",
2987                                  XFS_ERRLEVEL_LOW, mp);
2988                 error = -EFSCORRUPTED;
2989                 goto out_release;
2990         }
2991 
2992         /*
2993          * If the inode has an LSN in it, recover the inode only if it's less
2994          * than the lsn of the transaction we are replaying. Note: we still
2995          * need to replay an owner change even though the inode is more recent
2996          * than the transaction as there is no guarantee that all the btree
2997          * blocks are more recent than this transaction, too.
2998          */
2999         if (dip->di_version >= 3) {
3000                 xfs_lsn_t       lsn = be64_to_cpu(dip->di_lsn);
3001 
3002                 if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
3003                         trace_xfs_log_recover_inode_skip(log, in_f);
3004                         error = 0;
3005                         goto out_owner_change;
3006                 }
3007         }
3008 
3009         /*
3010          * di_flushiter is only valid for v1/2 inodes. All changes for v3 inodes
3011          * are transactional and if ordering is necessary we can determine that
3012          * more accurately by the LSN field in the V3 inode core. Don't trust
3013          * the inode versions we might be changing them here - use the
3014          * superblock flag to determine whether we need to look at di_flushiter
3015          * to skip replay when the on disk inode is newer than the log one
3016          */
3017         if (!xfs_sb_version_hascrc(&mp->m_sb) &&
3018             ldip->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
3019                 /*
3020                  * Deal with the wrap case, DI_MAX_FLUSH is less
3021                  * than smaller numbers
3022                  */
3023                 if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH &&
3024                     ldip->di_flushiter < (DI_MAX_FLUSH >> 1)) {
3025                         /* do nothing */
3026                 } else {
3027                         trace_xfs_log_recover_inode_skip(log, in_f);
3028                         error = 0;
3029                         goto out_release;
3030                 }
3031         }
3032 
3033         /* Take the opportunity to reset the flush iteration count */
3034         ldip->di_flushiter = 0;
3035 
3036         if (unlikely(S_ISREG(ldip->di_mode))) {
3037                 if ((ldip->di_format != XFS_DINODE_FMT_EXTENTS) &&
3038                     (ldip->di_format != XFS_DINODE_FMT_BTREE)) {
3039                         XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)",
3040                                          XFS_ERRLEVEL_LOW, mp, ldip,
3041                                          sizeof(*ldip));
3042                         xfs_alert(mp,
3043                 "%s: Bad regular inode log record, rec ptr "PTR_FMT", "
3044                 "ino ptr = "PTR_FMT", ino bp = "PTR_FMT", ino %Ld",
3045                                 __func__, item, dip, bp, in_f->ilf_ino);
3046                         error = -EFSCORRUPTED;
3047                         goto out_release;
3048                 }
3049         } else if (unlikely(S_ISDIR(ldip->di_mode))) {
3050                 if ((ldip->di_format != XFS_DINODE_FMT_EXTENTS) &&
3051                     (ldip->di_format != XFS_DINODE_FMT_BTREE) &&
3052                     (ldip->di_format != XFS_DINODE_FMT_LOCAL)) {
3053                         XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)",
3054                                              XFS_ERRLEVEL_LOW, mp, ldip,
3055                                              sizeof(*ldip));
3056                         xfs_alert(mp,
3057                 "%s: Bad dir inode log record, rec ptr "PTR_FMT", "
3058                 "ino ptr = "PTR_FMT", ino bp = "PTR_FMT", ino %Ld",
3059                                 __func__, item, dip, bp, in_f->ilf_ino);
3060                         error = -EFSCORRUPTED;
3061                         goto out_release;
3062                 }
3063         }
3064         if (unlikely(ldip->di_nextents + ldip->di_anextents > ldip->di_nblocks)){
3065                 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)",
3066                                      XFS_ERRLEVEL_LOW, mp, ldip,
3067                                      sizeof(*ldip));
3068                 xfs_alert(mp,
3069         "%s: Bad inode log record, rec ptr "PTR_FMT", dino ptr "PTR_FMT", "
3070         "dino bp "PTR_FMT", ino %Ld, total extents = %d, nblocks = %Ld",
3071                         __func__, item, dip, bp, in_f->ilf_ino,
3072                         ldip->di_nextents + ldip->di_anextents,
3073                         ldip->di_nblocks);
3074                 error = -EFSCORRUPTED;
3075                 goto out_release;
3076         }
3077         if (unlikely(ldip->di_forkoff > mp->m_sb.sb_inodesize)) {
3078                 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)",
3079                                      XFS_ERRLEVEL_LOW, mp, ldip,
3080                                      sizeof(*ldip));
3081                 xfs_alert(mp,
3082         "%s: Bad inode log record, rec ptr "PTR_FMT", dino ptr "PTR_FMT", "
3083         "dino bp "PTR_FMT", ino %Ld, forkoff 0x%x", __func__,
3084                         item, dip, bp, in_f->ilf_ino, ldip->di_forkoff);
3085                 error = -EFSCORRUPTED;
3086                 goto out_release;
3087         }
3088         isize = xfs_log_dinode_size(ldip->di_version);
3089         if (unlikely(item->ri_buf[1].i_len > isize)) {
3090                 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)",
3091                                      XFS_ERRLEVEL_LOW, mp, ldip,
3092                                      sizeof(*ldip));
3093                 xfs_alert(mp,
3094                         "%s: Bad inode log record length %d, rec ptr "PTR_FMT,
3095                         __func__, item->ri_buf[1].i_len, item);
3096                 error = -EFSCORRUPTED;
3097                 goto out_release;
3098         }
3099 
3100         /* recover the log dinode inode into the on disk inode */
3101         xfs_log_dinode_to_disk(ldip, dip);
3102 
3103         fields = in_f->ilf_fields;
3104         if (fields & XFS_ILOG_DEV)
3105                 xfs_dinode_put_rdev(dip, in_f->ilf_u.ilfu_rdev);
3106 
3107         if (in_f->ilf_size == 2)
3108                 goto out_owner_change;
3109         len = item->ri_buf[2].i_len;
3110         src = item->ri_buf[2].i_addr;
3111         ASSERT(in_f->ilf_size <= 4);
3112         ASSERT((in_f->ilf_size == 3) || (fields & XFS_ILOG_AFORK));
3113         ASSERT(!(fields & XFS_ILOG_DFORK) ||
3114                (len == in_f->ilf_dsize));
3115 
3116         switch (fields & XFS_ILOG_DFORK) {
3117         case XFS_ILOG_DDATA:
3118         case XFS_ILOG_DEXT:
3119                 memcpy(XFS_DFORK_DPTR(dip), src, len);
3120                 break;
3121 
3122         case XFS_ILOG_DBROOT:
3123                 xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, len,
3124                                  (xfs_bmdr_block_t *)XFS_DFORK_DPTR(dip),
3125                                  XFS_DFORK_DSIZE(dip, mp));
3126                 break;
3127 
3128         default:
3129                 /*
3130                  * There are no data fork flags set.
3131                  */
3132                 ASSERT((fields & XFS_ILOG_DFORK) == 0);
3133                 break;
3134         }
3135 
3136         /*
3137          * If we logged any attribute data, recover it.  There may or
3138          * may not have been any other non-core data logged in this
3139          * transaction.
3140          */
3141         if (in_f->ilf_fields & XFS_ILOG_AFORK) {
3142                 if (in_f->ilf_fields & XFS_ILOG_DFORK) {
3143                         attr_index = 3;
3144                 } else {
3145                         attr_index = 2;
3146                 }
3147                 len = item->ri_buf[attr_index].i_len;
3148                 src = item->ri_buf[attr_index].i_addr;
3149                 ASSERT(len == in_f->ilf_asize);
3150 
3151                 switch (in_f->ilf_fields & XFS_ILOG_AFORK) {
3152                 case XFS_ILOG_ADATA:
3153                 case XFS_ILOG_AEXT:
3154                         dest = XFS_DFORK_APTR(dip);
3155                         ASSERT(len <= XFS_DFORK_ASIZE(dip, mp));
3156                         memcpy(dest, src, len);
3157                         break;
3158 
3159                 case XFS_ILOG_ABROOT:
3160                         dest = XFS_DFORK_APTR(dip);
3161                         xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src,
3162                                          len, (xfs_bmdr_block_t*)dest,
3163                                          XFS_DFORK_ASIZE(dip, mp));
3164                         break;
3165 
3166                 default:
3167                         xfs_warn(log->l_mp, "%s: Invalid flag", __func__);
3168                         ASSERT(0);
3169                         error = -EIO;
3170                         goto out_release;
3171                 }
3172         }
3173 
3174 out_owner_change:
3175         /* Recover the swapext owner change unless inode has been deleted */
3176         if ((in_f->ilf_fields & (XFS_ILOG_DOWNER|XFS_ILOG_AOWNER)) &&
3177             (dip->di_mode != 0))
3178                 error = xfs_recover_inode_owner_change(mp, dip, in_f,
3179                                                        buffer_list);
3180         /* re-generate the checksum. */
3181         xfs_dinode_calc_crc(log->l_mp, dip);
3182 
3183         ASSERT(bp->b_mount == mp);
3184         bp->b_iodone = xlog_recover_iodone;
3185         xfs_buf_delwri_queue(bp, buffer_list);
3186 
3187 out_release:
3188         xfs_buf_relse(bp);
3189 error:
3190         if (need_free)
3191                 kmem_free(in_f);
3192         return error;
3193 }
3194 
3195 /*
3196  * Recover QUOTAOFF records. We simply make a note of it in the xlog
3197  * structure, so that we know not to do any dquot item or dquot buffer recovery,
3198  * of that type.
3199  */
3200 STATIC int
3201 xlog_recover_quotaoff_pass1(
3202         struct xlog                     *log,
3203         struct xlog_recover_item        *item)
3204 {
3205         xfs_qoff_logformat_t    *qoff_f = item->ri_buf[0].i_addr;
3206         ASSERT(qoff_f);
3207 
3208         /*
3209          * The logitem format's flag tells us if this was user quotaoff,
3210          * group/project quotaoff or both.
3211          */
3212         if (qoff_f->qf_flags & XFS_UQUOTA_ACCT)
3213                 log->l_quotaoffs_flag |= XFS_DQ_USER;
3214         if (qoff_f->qf_flags & XFS_PQUOTA_ACCT)
3215                 log->l_quotaoffs_flag |= XFS_DQ_PROJ;
3216         if (qoff_f->qf_flags & XFS_GQUOTA_ACCT)
3217                 log->l_quotaoffs_flag |= XFS_DQ_GROUP;
3218 
3219         return 0;
3220 }
3221 
3222 /*
3223  * Recover a dquot record
3224  */
3225 STATIC int
3226 xlog_recover_dquot_pass2(
3227         struct xlog                     *log,
3228         struct list_head                *buffer_list,
3229         struct xlog_recover_item        *item,
3230         xfs_lsn_t                       current_lsn)
3231 {
3232         xfs_mount_t             *mp = log->l_mp;
3233         xfs_buf_t               *bp;
3234         struct xfs_disk_dquot   *ddq, *recddq;
3235         xfs_failaddr_t          fa;
3236         int                     error;
3237         xfs_dq_logformat_t      *dq_f;
3238         uint                    type;
3239 
3240 
3241         /*
3242          * Filesystems are required to send in quota flags at mount time.
3243          */
3244         if (mp->m_qflags == 0)
3245                 return 0;
3246 
3247         recddq = item->ri_buf[1].i_addr;
3248         if (recddq == NULL) {
3249                 xfs_alert(log->l_mp, "NULL dquot in %s.", __func__);
3250                 return -EIO;
3251         }
3252         if (item->ri_buf[1].i_len < sizeof(xfs_disk_dquot_t)) {
3253                 xfs_alert(log->l_mp, "dquot too small (%d) in %s.",
3254                         item->ri_buf[1].i_len, __func__);
3255                 return -EIO;
3256         }
3257 
3258         /*
3259          * This type of quotas was turned off, so ignore this record.
3260          */
3261         type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
3262         ASSERT(type);
3263         if (log->l_quotaoffs_flag & type)
3264                 return 0;
3265 
3266         /*
3267          * At this point we know that quota was _not_ turned off.
3268          * Since the mount flags are not indicating to us otherwise, this
3269          * must mean that quota is on, and the dquot needs to be replayed.
3270          * Remember that we may not have fully recovered the superblock yet,
3271          * so we can't do the usual trick of looking at the SB quota bits.
3272          *
3273          * The other possibility, of course, is that the quota subsystem was
3274          * removed since the last mount - ENOSYS.
3275          */
3276         dq_f = item->ri_buf[0].i_addr;
3277         ASSERT(dq_f);
3278         fa = xfs_dquot_verify(mp, recddq, dq_f->qlf_id, 0);
3279         if (fa) {
3280                 xfs_alert(mp, "corrupt dquot ID 0x%x in log at %pS",
3281                                 dq_f->qlf_id, fa);
3282                 return -EIO;
3283         }
3284         ASSERT(dq_f->qlf_len == 1);
3285 
3286         /*
3287          * At this point we are assuming that the dquots have been allocated
3288          * and hence the buffer has valid dquots stamped in it. It should,
3289          * therefore, pass verifier validation. If the dquot is bad, then the
3290          * we'll return an error here, so we don't need to specifically check
3291          * the dquot in the buffer after the verifier has run.
3292          */
3293         error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dq_f->qlf_blkno,
3294                                    XFS_FSB_TO_BB(mp, dq_f->qlf_len), 0, &bp,
3295                                    &xfs_dquot_buf_ops);
3296         if (error)
3297                 return error;
3298 
3299         ASSERT(bp);
3300         ddq = xfs_buf_offset(bp, dq_f->qlf_boffset);
3301 
3302         /*
3303          * If the dquot has an LSN in it, recover the dquot only if it's less
3304          * than the lsn of the transaction we are replaying.
3305          */
3306         if (xfs_sb_version_hascrc(&mp->m_sb)) {
3307                 struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddq;
3308                 xfs_lsn_t       lsn = be64_to_cpu(dqb->dd_lsn);
3309 
3310                 if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
3311                         goto out_release;
3312                 }
3313         }
3314 
3315         memcpy(ddq, recddq, item->ri_buf[1].i_len);
3316         if (xfs_sb_version_hascrc(&mp->m_sb)) {
3317                 xfs_update_cksum((char *)ddq, sizeof(struct xfs_dqblk),
3318                                  XFS_DQUOT_CRC_OFF);
3319         }
3320 
3321         ASSERT(dq_f->qlf_size == 2);
3322         ASSERT(bp->b_mount == mp);
3323         bp->b_iodone = xlog_recover_iodone;
3324         xfs_buf_delwri_queue(bp, buffer_list);
3325 
3326 out_release:
3327         xfs_buf_relse(bp);
3328         return 0;
3329 }
3330 
3331 /*
3332  * This routine is called to create an in-core extent free intent
3333  * item from the efi format structure which was logged on disk.
3334  * It allocates an in-core efi, copies the extents from the format
3335  * structure into it, and adds the efi to the AIL with the given
3336  * LSN.
3337  */
3338 STATIC int
3339 xlog_recover_efi_pass2(
3340         struct xlog                     *log,
3341         struct xlog_recover_item        *item,
3342         xfs_lsn_t                       lsn)
3343 {
3344         int                             error;
3345         struct xfs_mount                *mp = log->l_mp;
3346         struct xfs_efi_log_item         *efip;
3347         struct xfs_efi_log_format       *efi_formatp;
3348 
3349         efi_formatp = item->ri_buf[0].i_addr;
3350 
3351         efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
3352         error = xfs_efi_copy_format(&item->ri_buf[0], &efip->efi_format);
3353         if (error) {
3354                 xfs_efi_item_free(efip);
3355                 return error;
3356         }
3357         atomic_set(&efip->efi_next_extent, efi_formatp->efi_nextents);
3358 
3359         spin_lock(&log->l_ailp->ail_lock);
3360         /*
3361          * The EFI has two references. One for the EFD and one for EFI to ensure
3362          * it makes it into the AIL. Insert the EFI into the AIL directly and
3363          * drop the EFI reference. Note that xfs_trans_ail_update() drops the
3364          * AIL lock.
3365          */
3366         xfs_trans_ail_update(log->l_ailp, &efip->efi_item, lsn);
3367         xfs_efi_release(efip);
3368         return 0;
3369 }
3370 
3371 
3372 /*
3373  * This routine is called when an EFD format structure is found in a committed
3374  * transaction in the log. Its purpose is to cancel the corresponding EFI if it
3375  * was still in the log. To do this it searches the AIL for the EFI with an id
3376  * equal to that in the EFD format structure. If we find it we drop the EFD
3377  * reference, which removes the EFI from the AIL and frees it.
3378  */
3379 STATIC int
3380 xlog_recover_efd_pass2(
3381         struct xlog                     *log,
3382         struct xlog_recover_item        *item)
3383 {
3384         xfs_efd_log_format_t    *efd_formatp;
3385         xfs_efi_log_item_t      *efip = NULL;
3386         struct xfs_log_item     *lip;
3387         uint64_t                efi_id;
3388         struct xfs_ail_cursor   cur;
3389         struct xfs_ail          *ailp = log->l_ailp;
3390 
3391         efd_formatp = item->ri_buf[0].i_addr;
3392         ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) +
3393                 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) ||
3394                (item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_64_t) +
3395                 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_64_t)))));
3396         efi_id = efd_formatp->efd_efi_id;
3397 
3398         /*
3399          * Search for the EFI with the id in the EFD format structure in the
3400          * AIL.
3401          */
3402         spin_lock(&ailp->ail_lock);
3403         lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3404         while (lip != NULL) {
3405                 if (lip->li_type == XFS_LI_EFI) {
3406                         efip = (xfs_efi_log_item_t *)lip;
3407                         if (efip->efi_format.efi_id == efi_id) {
3408                                 /*
3409                                  * Drop the EFD reference to the EFI. This
3410                                  * removes the EFI from the AIL and frees it.
3411                                  */
3412                                 spin_unlock(&ailp->ail_lock);
3413                                 xfs_efi_release(efip);
3414                                 spin_lock(&ailp->ail_lock);
3415                                 break;
3416                         }
3417                 }
3418                 lip = xfs_trans_ail_cursor_next(ailp, &cur);
3419         }
3420 
3421         xfs_trans_ail_cursor_done(&cur);
3422         spin_unlock(&ailp->ail_lock);
3423 
3424         return 0;
3425 }
3426 
3427 /*
3428  * This routine is called to create an in-core extent rmap update
3429  * item from the rui format structure which was logged on disk.
3430  * It allocates an in-core rui, copies the extents from the format
3431  * structure into it, and adds the rui to the AIL with the given
3432  * LSN.
3433  */
3434 STATIC int
3435 xlog_recover_rui_pass2(
3436         struct xlog                     *log,
3437         struct xlog_recover_item        *item,
3438         xfs_lsn_t                       lsn)
3439 {
3440         int                             error;
3441         struct xfs_mount                *mp = log->l_mp;
3442         struct xfs_rui_log_item         *ruip;
3443         struct xfs_rui_log_format       *rui_formatp;
3444 
3445         rui_formatp = item->ri_buf[0].i_addr;
3446 
3447         ruip = xfs_rui_init(mp, rui_formatp->rui_nextents);
3448         error = xfs_rui_copy_format(&item->ri_buf[0], &ruip->rui_format);
3449         if (error) {
3450                 xfs_rui_item_free(ruip);
3451                 return error;
3452         }
3453         atomic_set(&ruip->rui_next_extent, rui_formatp->rui_nextents);
3454 
3455         spin_lock(&log->l_ailp->ail_lock);
3456         /*
3457          * The RUI has two references. One for the RUD and one for RUI to ensure
3458          * it makes it into the AIL. Insert the RUI into the AIL directly and
3459          * drop the RUI reference. Note that xfs_trans_ail_update() drops the
3460          * AIL lock.
3461          */
3462         xfs_trans_ail_update(log->l_ailp, &ruip->rui_item, lsn);
3463         xfs_rui_release(ruip);
3464         return 0;
3465 }
3466 
3467 
3468 /*
3469  * This routine is called when an RUD format structure is found in a committed
3470  * transaction in the log. Its purpose is to cancel the corresponding RUI if it
3471  * was still in the log. To do this it searches the AIL for the RUI with an id
3472  * equal to that in the RUD format structure. If we find it we drop the RUD
3473  * reference, which removes the RUI from the AIL and frees it.
3474  */
3475 STATIC int
3476 xlog_recover_rud_pass2(
3477         struct xlog                     *log,
3478         struct xlog_recover_item        *item)
3479 {
3480         struct xfs_rud_log_format       *rud_formatp;
3481         struct xfs_rui_log_item         *ruip = NULL;
3482         struct xfs_log_item             *lip;
3483         uint64_t                        rui_id;
3484         struct xfs_ail_cursor           cur;
3485         struct xfs_ail                  *ailp = log->l_ailp;
3486 
3487         rud_formatp = item->ri_buf[0].i_addr;
3488         ASSERT(item->ri_buf[0].i_len == sizeof(struct xfs_rud_log_format));
3489         rui_id = rud_formatp->rud_rui_id;
3490 
3491         /*
3492          * Search for the RUI with the id in the RUD format structure in the
3493          * AIL.
3494          */
3495         spin_lock(&ailp->ail_lock);
3496         lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3497         while (lip != NULL) {
3498                 if (lip->li_type == XFS_LI_RUI) {
3499                         ruip = (struct xfs_rui_log_item *)lip;
3500                         if (ruip->rui_format.rui_id == rui_id) {
3501                                 /*
3502                                  * Drop the RUD reference to the RUI. This
3503                                  * removes the RUI from the AIL and frees it.
3504                                  */
3505                                 spin_unlock(&ailp->ail_lock);
3506                                 xfs_rui_release(ruip);
3507                                 spin_lock(&ailp->ail_lock);
3508                                 break;
3509                         }
3510                 }
3511                 lip = xfs_trans_ail_cursor_next(ailp, &cur);
3512         }
3513 
3514         xfs_trans_ail_cursor_done(&cur);
3515         spin_unlock(&ailp->ail_lock);
3516 
3517         return 0;
3518 }
3519 
3520 /*
3521  * Copy an CUI format buffer from the given buf, and into the destination
3522  * CUI format structure.  The CUI/CUD items were designed not to need any
3523  * special alignment handling.
3524  */
3525 static int
3526 xfs_cui_copy_format(
3527         struct xfs_log_iovec            *buf,
3528         struct xfs_cui_log_format       *dst_cui_fmt)
3529 {
3530         struct xfs_cui_log_format       *src_cui_fmt;
3531         uint                            len;
3532 
3533         src_cui_fmt = buf->i_addr;
3534         len = xfs_cui_log_format_sizeof(src_cui_fmt->cui_nextents);
3535 
3536         if (buf->i_len == len) {
3537                 memcpy(dst_cui_fmt, src_cui_fmt, len);
3538                 return 0;
3539         }
3540         return -EFSCORRUPTED;
3541 }
3542 
3543 /*
3544  * This routine is called to create an in-core extent refcount update
3545  * item from the cui format structure which was logged on disk.
3546  * It allocates an in-core cui, copies the extents from the format
3547  * structure into it, and adds the cui to the AIL with the given
3548  * LSN.
3549  */
3550 STATIC int
3551 xlog_recover_cui_pass2(
3552         struct xlog                     *log,
3553         struct xlog_recover_item        *item,
3554         xfs_lsn_t                       lsn)
3555 {
3556         int                             error;
3557         struct xfs_mount                *mp = log->l_mp;
3558         struct xfs_cui_log_item         *cuip;
3559         struct xfs_cui_log_format       *cui_formatp;
3560 
3561         cui_formatp = item->ri_buf[0].i_addr;
3562 
3563         cuip = xfs_cui_init(mp, cui_formatp->cui_nextents);
3564         error = xfs_cui_copy_format(&item->ri_buf[0], &cuip->cui_format);
3565         if (error) {
3566                 xfs_cui_item_free(cuip);
3567                 return error;
3568         }
3569         atomic_set(&cuip->cui_next_extent, cui_formatp->cui_nextents);
3570 
3571         spin_lock(&log->l_ailp->ail_lock);
3572         /*
3573          * The CUI has two references. One for the CUD and one for CUI to ensure
3574          * it makes it into the AIL. Insert the CUI into the AIL directly and
3575          * drop the CUI reference. Note that xfs_trans_ail_update() drops the
3576          * AIL lock.
3577          */
3578         xfs_trans_ail_update(log->l_ailp, &cuip->cui_item, lsn);
3579         xfs_cui_release(cuip);
3580         return 0;
3581 }
3582 
3583 
3584 /*
3585  * This routine is called when an CUD format structure is found in a committed
3586  * transaction in the log. Its purpose is to cancel the corresponding CUI if it
3587  * was still in the log. To do this it searches the AIL for the CUI with an id
3588  * equal to that in the CUD format structure. If we find it we drop the CUD
3589  * reference, which removes the CUI from the AIL and frees it.
3590  */
3591 STATIC int
3592 xlog_recover_cud_pass2(
3593         struct xlog                     *log,
3594         struct xlog_recover_item        *item)
3595 {
3596         struct xfs_cud_log_format       *cud_formatp;
3597         struct xfs_cui_log_item         *cuip = NULL;
3598         struct xfs_log_item             *lip;
3599         uint64_t                        cui_id;
3600         struct xfs_ail_cursor           cur;
3601         struct xfs_ail                  *ailp = log->l_ailp;
3602 
3603         cud_formatp = item->ri_buf[0].i_addr;
3604         if (item->ri_buf[0].i_len != sizeof(struct xfs_cud_log_format))
3605                 return -EFSCORRUPTED;
3606         cui_id = cud_formatp->cud_cui_id;
3607 
3608         /*
3609          * Search for the CUI with the id in the CUD format structure in the
3610          * AIL.
3611          */
3612         spin_lock(&ailp->ail_lock);
3613         lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3614         while (lip != NULL) {
3615                 if (lip->li_type == XFS_LI_CUI) {
3616                         cuip = (struct xfs_cui_log_item *)lip;
3617                         if (cuip->cui_format.cui_id == cui_id) {
3618                                 /*
3619                                  * Drop the CUD reference to the CUI. This
3620                                  * removes the CUI from the AIL and frees it.
3621                                  */
3622                                 spin_unlock(&ailp->ail_lock);
3623                                 xfs_cui_release(cuip);
3624                                 spin_lock(&ailp->ail_lock);
3625                                 break;
3626                         }
3627                 }
3628                 lip = xfs_trans_ail_cursor_next(ailp, &cur);
3629         }
3630 
3631         xfs_trans_ail_cursor_done(&cur);
3632         spin_unlock(&ailp->ail_lock);
3633 
3634         return 0;
3635 }
3636 
3637 /*
3638  * Copy an BUI format buffer from the given buf, and into the destination
3639  * BUI format structure.  The BUI/BUD items were designed not to need any
3640  * special alignment handling.
3641  */
3642 static int
3643 xfs_bui_copy_format(
3644         struct xfs_log_iovec            *buf,
3645         struct xfs_bui_log_format       *dst_bui_fmt)
3646 {
3647         struct xfs_bui_log_format       *src_bui_fmt;
3648         uint                            len;
3649 
3650         src_bui_fmt = buf->i_addr;
3651         len = xfs_bui_log_format_sizeof(src_bui_fmt->bui_nextents);
3652 
3653         if (buf->i_len == len) {
3654                 memcpy(dst_bui_fmt, src_bui_fmt, len);
3655                 return 0;
3656         }
3657         return -EFSCORRUPTED;
3658 }
3659 
3660 /*
3661  * This routine is called to create an in-core extent bmap update
3662  * item from the bui format structure which was logged on disk.
3663  * It allocates an in-core bui, copies the extents from the format
3664  * structure into it, and adds the bui to the AIL with the given
3665  * LSN.
3666  */
3667 STATIC int
3668 xlog_recover_bui_pass2(
3669         struct xlog                     *log,
3670         struct xlog_recover_item        *item,
3671         xfs_lsn_t                       lsn)
3672 {
3673         int                             error;
3674         struct xfs_mount                *mp = log->l_mp;
3675         struct xfs_bui_log_item         *buip;
3676         struct xfs_bui_log_format       *bui_formatp;
3677 
3678         bui_formatp = item->ri_buf[0].i_addr;
3679 
3680         if (bui_formatp->bui_nextents != XFS_BUI_MAX_FAST_EXTENTS)
3681                 return -EFSCORRUPTED;
3682         buip = xfs_bui_init(mp);
3683         error = xfs_bui_copy_format(&item->ri_buf[0], &buip->bui_format);
3684         if (error) {
3685                 xfs_bui_item_free(buip);
3686                 return error;
3687         }
3688         atomic_set(&buip->bui_next_extent, bui_formatp->bui_nextents);
3689 
3690         spin_lock(&log->l_ailp->ail_lock);
3691         /*
3692          * The RUI has two references. One for the RUD and one for RUI to ensure
3693          * it makes it into the AIL. Insert the RUI into the AIL directly and
3694          * drop the RUI reference. Note that xfs_trans_ail_update() drops the
3695          * AIL lock.
3696          */
3697         xfs_trans_ail_update(log->l_ailp, &buip->bui_item, lsn);
3698         xfs_bui_release(buip);
3699         return 0;
3700 }
3701 
3702 
3703 /*
3704  * This routine is called when an BUD format structure is found in a committed
3705  * transaction in the log. Its purpose is to cancel the corresponding BUI if it
3706  * was still in the log. To do this it searches the AIL for the BUI with an id
3707  * equal to that in the BUD format structure. If we find it we drop the BUD
3708  * reference, which removes the BUI from the AIL and frees it.
3709  */
3710 STATIC int
3711 xlog_recover_bud_pass2(
3712         struct xlog                     *log,
3713         struct xlog_recover_item        *item)
3714 {
3715         struct xfs_bud_log_format       *bud_formatp;
3716         struct xfs_bui_log_item         *buip = NULL;
3717         struct xfs_log_item             *lip;
3718         uint64_t                        bui_id;
3719         struct xfs_ail_cursor           cur;
3720         struct xfs_ail                  *ailp = log->l_ailp;
3721 
3722         bud_formatp = item->ri_buf[0].i_addr;
3723         if (item->ri_buf[0].i_len != sizeof(struct xfs_bud_log_format))
3724                 return -EFSCORRUPTED;
3725         bui_id = bud_formatp->bud_bui_id;
3726 
3727         /*
3728          * Search for the BUI with the id in the BUD format structure in the
3729          * AIL.
3730          */
3731         spin_lock(&ailp->ail_lock);
3732         lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3733         while (lip != NULL) {
3734                 if (lip->li_type == XFS_LI_BUI) {
3735                         buip = (struct xfs_bui_log_item *)lip;
3736                         if (buip->bui_format.bui_id == bui_id) {
3737                                 /*
3738                                  * Drop the BUD reference to the BUI. This
3739                                  * removes the BUI from the AIL and frees it.
3740                                  */
3741                                 spin_unlock(&ailp->ail_lock);
3742                                 xfs_bui_release(buip);
3743                                 spin_lock(&ailp->ail_lock);
3744                                 break;
3745                         }
3746                 }
3747                 lip = xfs_trans_ail_cursor_next(ailp, &cur);
3748         }
3749 
3750         xfs_trans_ail_cursor_done(&cur);
3751         spin_unlock(&ailp->ail_lock);
3752 
3753         return 0;
3754 }
3755 
3756 /*
3757  * This routine is called when an inode create format structure is found in a
3758  * committed transaction in the log.  It's purpose is to initialise the inodes
3759  * being allocated on disk. This requires us to get inode cluster buffers that
3760  * match the range to be initialised, stamped with inode templates and written
3761  * by delayed write so that subsequent modifications will hit the cached buffer
3762  * and only need writing out at the end of recovery.
3763  */
3764 STATIC int
3765 xlog_recover_do_icreate_pass2(
3766         struct xlog             *log,
3767         struct list_head        *buffer_list,
3768         xlog_recover_item_t     *item)
3769 {
3770         struct xfs_mount        *mp = log->l_mp;
3771         struct xfs_icreate_log  *icl;
3772         struct xfs_ino_geometry *igeo = M_IGEO(mp);
3773         xfs_agnumber_t          agno;
3774         xfs_agblock_t           agbno;
3775         unsigned int            count;
3776         unsigned int            isize;
3777         xfs_agblock_t           length;
3778         int                     bb_per_cluster;
3779         int                     cancel_count;
3780         int                     nbufs;
3781         int                     i;
3782 
3783         icl = (struct xfs_icreate_log *)item->ri_buf[0].i_addr;
3784         if (icl->icl_type != XFS_LI_ICREATE) {
3785                 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad type");
3786                 return -EINVAL;
3787         }
3788 
3789         if (icl->icl_size != 1) {
3790                 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad icl size");
3791                 return -EINVAL;
3792         }
3793 
3794         agno = be32_to_cpu(icl->icl_ag);
3795         if (agno >= mp->m_sb.sb_agcount) {
3796                 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agno");
3797                 return -EINVAL;
3798         }
3799         agbno = be32_to_cpu(icl->icl_agbno);
3800         if (!agbno || agbno == NULLAGBLOCK || agbno >= mp->m_sb.sb_agblocks) {
3801                 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agbno");
3802                 return -EINVAL;
3803         }
3804         isize = be32_to_cpu(icl->icl_isize);
3805         if (isize != mp->m_sb.sb_inodesize) {
3806                 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad isize");
3807                 return -EINVAL;
3808         }
3809         count = be32_to_cpu(icl->icl_count);
3810         if (!count) {
3811                 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count");
3812                 return -EINVAL;
3813         }
3814         length = be32_to_cpu(icl->icl_length);
3815         if (!length || length >= mp->m_sb.sb_agblocks) {
3816                 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad length");
3817                 return -EINVAL;
3818         }
3819 
3820         /*
3821          * The inode chunk is either full or sparse and we only support
3822          * m_ino_geo.ialloc_min_blks sized sparse allocations at this time.
3823          */
3824         if (length != igeo->ialloc_blks &&
3825             length != igeo->ialloc_min_blks) {
3826                 xfs_warn(log->l_mp,
3827                          "%s: unsupported chunk length", __FUNCTION__);
3828                 return -EINVAL;
3829         }
3830 
3831         /* verify inode count is consistent with extent length */
3832         if ((count >> mp->m_sb.sb_inopblog) != length) {
3833                 xfs_warn(log->l_mp,
3834                          "%s: inconsistent inode count and chunk length",
3835                          __FUNCTION__);
3836                 return -EINVAL;
3837         }
3838 
3839         /*
3840          * The icreate transaction can cover multiple cluster buffers and these
3841          * buffers could have been freed and reused. Check the individual
3842          * buffers for cancellation so we don't overwrite anything written after
3843          * a cancellation.
3844          */
3845         bb_per_cluster = XFS_FSB_TO_BB(mp, igeo->blocks_per_cluster);
3846         nbufs = length / igeo->blocks_per_cluster;
3847         for (i = 0, cancel_count = 0; i < nbufs; i++) {
3848                 xfs_daddr_t     daddr;
3849 
3850                 daddr = XFS_AGB_TO_DADDR(mp, agno,
3851                                 agbno + i * igeo->blocks_per_cluster);
3852                 if (xlog_check_buffer_cancelled(log, daddr, bb_per_cluster, 0))
3853                         cancel_count++;
3854         }
3855 
3856         /*
3857          * We currently only use icreate for a single allocation at a time. This
3858          * means we should expect either all or none of the buffers to be
3859          * cancelled. Be conservative and skip replay if at least one buffer is
3860          * cancelled, but warn the user that something is awry if the buffers
3861          * are not consistent.
3862          *
3863          * XXX: This must be refined to only skip cancelled clusters once we use
3864          * icreate for multiple chunk allocations.
3865          */
3866         ASSERT(!cancel_count || cancel_count == nbufs);
3867         if (cancel_count) {
3868                 if (cancel_count != nbufs)
3869                         xfs_warn(mp,
3870         "WARNING: partial inode chunk cancellation, skipped icreate.");
3871                 trace_xfs_log_recover_icreate_cancel(log, icl);
3872                 return 0;
3873         }
3874 
3875         trace_xfs_log_recover_icreate_recover(log, icl);
3876         return xfs_ialloc_inode_init(mp, NULL, buffer_list, count, agno, agbno,
3877                                      length, be32_to_cpu(icl->icl_gen));
3878 }
3879 
3880 STATIC void
3881 xlog_recover_buffer_ra_pass2(
3882         struct xlog                     *log,
3883         struct xlog_recover_item        *item)
3884 {
3885         struct xfs_buf_log_format       *buf_f = item->ri_buf[0].i_addr;
3886         struct xfs_mount                *mp = log->l_mp;
3887 
3888         if (xlog_peek_buffer_cancelled(log, buf_f->blf_blkno,
3889                         buf_f->blf_len, buf_f->blf_flags)) {
3890                 return;
3891         }
3892 
3893         xfs_buf_readahead(mp->m_ddev_targp, buf_f->blf_blkno,
3894                                 buf_f->blf_len, NULL);
3895 }
3896 
3897 STATIC void
3898 xlog_recover_inode_ra_pass2(
3899         struct xlog                     *log,
3900         struct xlog_recover_item        *item)
3901 {
3902         struct xfs_inode_log_format     ilf_buf;
3903         struct xfs_inode_log_format     *ilfp;
3904         struct xfs_mount                *mp = log->l_mp;
3905         int                     error;
3906 
3907         if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) {
3908                 ilfp = item->ri_buf[0].i_addr;
3909         } else {
3910                 ilfp = &ilf_buf;
3911                 memset(ilfp, 0, sizeof(*ilfp));
3912                 error = xfs_inode_item_format_convert(&item->ri_buf[0], ilfp);
3913                 if (error)
3914                         return;
3915         }
3916 
3917         if (xlog_peek_buffer_cancelled(log, ilfp->ilf_blkno, ilfp->ilf_len, 0))
3918                 return;
3919 
3920         xfs_buf_readahead(mp->m_ddev_targp, ilfp->ilf_blkno,
3921                                 ilfp->ilf_len, &xfs_inode_buf_ra_ops);
3922 }
3923 
3924 STATIC void
3925 xlog_recover_dquot_ra_pass2(
3926         struct xlog                     *log,
3927         struct xlog_recover_item        *item)
3928 {
3929         struct xfs_mount        *mp = log->l_mp;
3930         struct xfs_disk_dquot   *recddq;
3931         struct xfs_dq_logformat *dq_f;
3932         uint                    type;
3933         int                     len;
3934 
3935 
3936         if (mp->m_qflags == 0)
3937                 return;
3938 
3939         recddq = item->ri_buf[1].i_addr;
3940         if (recddq == NULL)
3941                 return;
3942         if (item->ri_buf[1].i_len < sizeof(struct xfs_disk_dquot))
3943                 return;
3944 
3945         type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
3946         ASSERT(type);
3947         if (log->l_quotaoffs_flag & type)
3948                 return;
3949 
3950         dq_f = item->ri_buf[0].i_addr;
3951         ASSERT(dq_f);
3952         ASSERT(dq_f->qlf_len == 1);
3953 
3954         len = XFS_FSB_TO_BB(mp, dq_f->qlf_len);
3955         if (xlog_peek_buffer_cancelled(log, dq_f->qlf_blkno, len, 0))
3956                 return;
3957 
3958         xfs_buf_readahead(mp->m_ddev_targp, dq_f->qlf_blkno, len,
3959                           &xfs_dquot_buf_ra_ops);
3960 }
3961 
3962 STATIC void
3963 xlog_recover_ra_pass2(
3964         struct xlog                     *log,
3965         struct xlog_recover_item        *item)
3966 {
3967         switch (ITEM_TYPE(item)) {
3968         case XFS_LI_BUF:
3969                 xlog_recover_buffer_ra_pass2(log, item);
3970                 break;
3971         case XFS_LI_INODE:
3972                 xlog_recover_inode_ra_pass2(log, item);
3973                 break;
3974         case XFS_LI_DQUOT:
3975                 xlog_recover_dquot_ra_pass2(log, item);
3976                 break;
3977         case XFS_LI_EFI:
3978         case XFS_LI_EFD:
3979         case XFS_LI_QUOTAOFF:
3980         case XFS_LI_RUI:
3981         case XFS_LI_RUD:
3982         case XFS_LI_CUI:
3983         case XFS_LI_CUD:
3984         case XFS_LI_BUI:
3985         case XFS_LI_BUD:
3986         default:
3987                 break;
3988         }
3989 }
3990 
3991 STATIC int
3992 xlog_recover_commit_pass1(
3993         struct xlog                     *log,
3994         struct xlog_recover             *trans,
3995         struct xlog_recover_item        *item)
3996 {
3997         trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS1);
3998 
3999         switch (ITEM_TYPE(item)) {
4000         case XFS_LI_BUF:
4001                 return xlog_recover_buffer_pass1(log, item);
4002         case XFS_LI_QUOTAOFF:
4003                 return xlog_recover_quotaoff_pass1(log, item);
4004         case XFS_LI_INODE:
4005         case XFS_LI_EFI:
4006         case XFS_LI_EFD:
4007         case XFS_LI_DQUOT:
4008         case XFS_LI_ICREATE:
4009         case XFS_LI_RUI:
4010         case XFS_LI_RUD:
4011         case XFS_LI_CUI:
4012         case XFS_LI_CUD:
4013         case XFS_LI_BUI:
4014         case XFS_LI_BUD:
4015                 /* nothing to do in pass 1 */
4016                 return 0;
4017         default:
4018                 xfs_warn(log->l_mp, "%s: invalid item type (%d)",
4019                         __func__, ITEM_TYPE(item));
4020                 ASSERT(0);
4021                 return -EIO;
4022         }
4023 }
4024 
4025 STATIC int
4026 xlog_recover_commit_pass2(
4027         struct xlog                     *log,
4028         struct xlog_recover             *trans,
4029         struct list_head                *buffer_list,
4030         struct xlog_recover_item        *item)
4031 {
4032         trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2);
4033 
4034         switch (ITEM_TYPE(item)) {
4035         case XFS_LI_BUF:
4036                 return xlog_recover_buffer_pass2(log, buffer_list, item,
4037                                                  trans->r_lsn);
4038         case XFS_LI_INODE:
4039                 return xlog_recover_inode_pass2(log, buffer_list, item,
4040                                                  trans->r_lsn);
4041         case XFS_LI_EFI:
4042                 return xlog_recover_efi_pass2(log, item, trans->r_lsn);
4043         case XFS_LI_EFD:
4044                 return xlog_recover_efd_pass2(log, item);
4045         case XFS_LI_RUI:
4046                 return xlog_recover_rui_pass2(log, item, trans->r_lsn);
4047         case XFS_LI_RUD:
4048                 return xlog_recover_rud_pass2(log, item);
4049         case XFS_LI_CUI:
4050                 return xlog_recover_cui_pass2(log, item, trans->r_lsn);
4051         case XFS_LI_CUD:
4052                 return xlog_recover_cud_pass2(log, item);
4053         case XFS_LI_BUI:
4054                 return xlog_recover_bui_pass2(log, item, trans->r_lsn);
4055         case XFS_LI_BUD:
4056                 return xlog_recover_bud_pass2(log, item);
4057         case XFS_LI_DQUOT:
4058                 return xlog_recover_dquot_pass2(log, buffer_list, item,
4059                                                 trans->r_lsn);
4060         case XFS_LI_ICREATE:
4061                 return xlog_recover_do_icreate_pass2(log, buffer_list, item);
4062         case XFS_LI_QUOTAOFF:
4063                 /* nothing to do in pass2 */
4064                 return 0;
4065         default:
4066                 xfs_warn(log->l_mp, "%s: invalid item type (%d)",
4067                         __func__, ITEM_TYPE(item));
4068                 ASSERT(0);
4069                 return -EIO;
4070         }
4071 }
4072 
4073 STATIC int
4074 xlog_recover_items_pass2(
4075         struct xlog                     *log,
4076         struct xlog_recover             *trans,
4077         struct list_head                *buffer_list,
4078         struct list_head                *item_list)
4079 {
4080         struct xlog_recover_item        *item;
4081         int                             error = 0;
4082 
4083         list_for_each_entry(item, item_list, ri_list) {
4084                 error = xlog_recover_commit_pass2(log, trans,
4085                                           buffer_list, item);
4086                 if (error)
4087                         return error;
4088         }
4089 
4090         return error;
4091 }
4092 
4093 /*
4094  * Perform the transaction.
4095  *
4096  * If the transaction modifies a buffer or inode, do it now.  Otherwise,
4097  * EFIs and EFDs get queued up by adding entries into the AIL for them.
4098  */
4099 STATIC int
4100 xlog_recover_commit_trans(
4101         struct xlog             *log,
4102         struct xlog_recover     *trans,
4103         int                     pass,
4104         struct list_head        *buffer_list)
4105 {
4106         int                             error = 0;
4107         int                             items_queued = 0;
4108         struct xlog_recover_item        *item;
4109         struct xlog_recover_item        *next;
4110         LIST_HEAD                       (ra_list);
4111         LIST_HEAD                       (done_list);
4112 
4113         #define XLOG_RECOVER_COMMIT_QUEUE_MAX 100
4114 
4115         hlist_del_init(&trans->r_list);
4116 
4117         error = xlog_recover_reorder_trans(log, trans, pass);
4118         if (error)
4119                 return error;
4120 
4121         list_for_each_entry_safe(item, next, &trans->r_itemq, ri_list) {
4122                 switch (pass) {
4123                 case XLOG_RECOVER_PASS1:
4124                         error = xlog_recover_commit_pass1(log, trans, item);
4125                         break;
4126                 case XLOG_RECOVER_PASS2:
4127                         xlog_recover_ra_pass2(log, item);
4128                         list_move_tail(&item->ri_list, &ra_list);
4129                         items_queued++;
4130                         if (items_queued >= XLOG_RECOVER_COMMIT_QUEUE_MAX) {
4131                                 error = xlog_recover_items_pass2(log, trans,
4132                                                 buffer_list, &ra_list);
4133                                 list_splice_tail_init(&ra_list, &done_list);
4134                                 items_queued = 0;
4135                         }
4136 
4137                         break;
4138                 default:
4139                         ASSERT(0);
4140                 }
4141 
4142                 if (error)
4143                         goto out;
4144         }
4145 
4146 out:
4147         if (!list_empty(&ra_list)) {
4148                 if (!error)
4149                         error = xlog_recover_items_pass2(log, trans,
4150                                         buffer_list, &ra_list);
4151                 list_splice_tail_init(&ra_list, &done_list);
4152         }
4153 
4154         if (!list_empty(&done_list))
4155                 list_splice_init(&done_list, &trans->r_itemq);
4156 
4157         return error;
4158 }
4159 
4160 STATIC void
4161 xlog_recover_add_item(
4162         struct list_head        *head)
4163 {
4164         xlog_recover_item_t     *item;
4165 
4166         item = kmem_zalloc(sizeof(xlog_recover_item_t), 0);
4167         INIT_LIST_HEAD(&item->ri_list);
4168         list_add_tail(&item->ri_list, head);
4169 }
4170 
4171 STATIC int
4172 xlog_recover_add_to_cont_trans(
4173         struct xlog             *log,
4174         struct xlog_recover     *trans,
4175         char                    *dp,
4176         int                     len)
4177 {
4178         xlog_recover_item_t     *item;
4179         char                    *ptr, *old_ptr;
4180         int                     old_len;
4181 
4182         /*
4183          * If the transaction is empty, the header was split across this and the
4184          * previous record. Copy the rest of the header.
4185          */
4186         if (list_empty(&trans->r_itemq)) {
4187                 ASSERT(len <= sizeof(struct xfs_trans_header));
4188                 if (len > sizeof(struct xfs_trans_header)) {
4189                         xfs_warn(log->l_mp, "%s: bad header length", __func__);
4190                         return -EIO;
4191                 }
4192 
4193                 xlog_recover_add_item(&trans->r_itemq);
4194                 ptr = (char *)&trans->r_theader +
4195                                 sizeof(struct xfs_trans_header) - len;
4196                 memcpy(ptr, dp, len);
4197                 return 0;
4198         }
4199 
4200         /* take the tail entry */
4201         item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
4202 
4203         old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
4204         old_len = item->ri_buf[item->ri_cnt-1].i_len;
4205 
4206         ptr = kmem_realloc(old_ptr, len + old_len, 0);
4207         memcpy(&ptr[old_len], dp, len);
4208         item->ri_buf[item->ri_cnt-1].i_len += len;
4209         item->ri_buf[item->ri_cnt-1].i_addr = ptr;
4210         trace_xfs_log_recover_item_add_cont(log, trans, item, 0);
4211         return 0;
4212 }
4213 
4214 /*
4215  * The next region to add is the start of a new region.  It could be
4216  * a whole region or it could be the first part of a new region.  Because
4217  * of this, the assumption here is that the type and size fields of all
4218  * format structures fit into the first 32 bits of the structure.
4219  *
4220  * This works because all regions must be 32 bit aligned.  Therefore, we
4221  * either have both fields or we have neither field.  In the case we have
4222  * neither field, the data part of the region is zero length.  We only have
4223  * a log_op_header and can throw away the header since a new one will appear
4224  * later.  If we have at least 4 bytes, then we can determine how many regions
4225  * will appear in the current log item.
4226  */
4227 STATIC int
4228 xlog_recover_add_to_trans(
4229         struct xlog             *log,
4230         struct xlog_recover     *trans,
4231         char                    *dp,
4232         int                     len)
4233 {
4234         struct xfs_inode_log_format     *in_f;                  /* any will do */
4235         xlog_recover_item_t     *item;
4236         char                    *ptr;
4237 
4238         if (!len)
4239                 return 0;
4240         if (list_empty(&trans->r_itemq)) {
4241                 /* we need to catch log corruptions here */
4242                 if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
4243                         xfs_warn(log->l_mp, "%s: bad header magic number",
4244                                 __func__);
4245                         ASSERT(0);
4246                         return -EIO;
4247                 }
4248 
4249                 if (len > sizeof(struct xfs_trans_header)) {
4250                         xfs_warn(log->l_mp, "%s: bad header length", __func__);
4251                         ASSERT(0);
4252                         return -EIO;
4253                 }
4254 
4255                 /*
4256                  * The transaction header can be arbitrarily split across op
4257                  * records. If we don't have the whole thing here, copy what we
4258                  * do have and handle the rest in the next record.
4259                  */
4260                 if (len == sizeof(struct xfs_trans_header))
4261                         xlog_recover_add_item(&trans->r_itemq);
4262                 memcpy(&trans->r_theader, dp, len);
4263                 return 0;
4264         }
4265 
4266         ptr = kmem_alloc(len, 0);
4267         memcpy(ptr, dp, len);
4268         in_f = (struct xfs_inode_log_format *)ptr;
4269 
4270         /* take the tail entry */
4271         item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
4272         if (item->ri_total != 0 &&
4273              item->ri_total == item->ri_cnt) {
4274                 /* tail item is in use, get a new one */
4275                 xlog_recover_add_item(&trans->r_itemq);
4276                 item = list_entry(trans->r_itemq.prev,
4277                                         xlog_recover_item_t, ri_list);
4278         }
4279 
4280         if (item->ri_total == 0) {              /* first region to be added */
4281                 if (in_f->ilf_size == 0 ||
4282                     in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) {
4283                         xfs_warn(log->l_mp,
4284                 "bad number of regions (%d) in inode log format",
4285                                   in_f->ilf_size);
4286                         ASSERT(0);
4287                         kmem_free(ptr);
4288                         return -EIO;
4289                 }
4290 
4291                 item->ri_total = in_f->ilf_size;
4292                 item->ri_buf =
4293                         kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t),
4294                                     0);
4295         }
4296         ASSERT(item->ri_total > item->ri_cnt);
4297         /* Description region is ri_buf[0] */
4298         item->ri_buf[item->ri_cnt].i_addr = ptr;
4299         item->ri_buf[item->ri_cnt].i_len  = len;
4300         item->ri_cnt++;
4301         trace_xfs_log_recover_item_add(log, trans, item, 0);
4302         return 0;
4303 }
4304 
4305 /*
4306  * Free up any resources allocated by the transaction
4307  *
4308  * Remember that EFIs, EFDs, and IUNLINKs are handled later.
4309  */
4310 STATIC void
4311 xlog_recover_free_trans(
4312         struct xlog_recover     *trans)
4313 {
4314         xlog_recover_item_t     *item, *n;
4315         int                     i;
4316 
4317         hlist_del_init(&trans->r_list);
4318 
4319         list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) {
4320                 /* Free the regions in the item. */
4321                 list_del(&item->ri_list);
4322                 for (i = 0; i < item->ri_cnt; i++)
4323                         kmem_free(item->ri_buf[i].i_addr);
4324                 /* Free the item itself */
4325                 kmem_free(item->ri_buf);
4326                 kmem_free(item);
4327         }
4328         /* Free the transaction recover structure */
4329         kmem_free(trans);
4330 }
4331 
4332 /*
4333  * On error or completion, trans is freed.
4334  */
4335 STATIC int
4336 xlog_recovery_process_trans(
4337         struct xlog             *log,
4338         struct xlog_recover     *trans,
4339         char                    *dp,
4340         unsigned int            len,
4341         unsigned int            flags,
4342         int                     pass,
4343         struct list_head        *buffer_list)
4344 {
4345         int                     error = 0;
4346         bool                    freeit = false;
4347 
4348         /* mask off ophdr transaction container flags */
4349         flags &= ~XLOG_END_TRANS;
4350         if (flags & XLOG_WAS_CONT_TRANS)
4351                 flags &= ~XLOG_CONTINUE_TRANS;
4352 
4353         /*
4354          * Callees must not free the trans structure. We'll decide if we need to
4355          * free it or not based on the operation being done and it's result.
4356          */
4357         switch (flags) {
4358         /* expected flag values */
4359         case 0:
4360         case XLOG_CONTINUE_TRANS:
4361                 error = xlog_recover_add_to_trans(log, trans, dp, len);
4362                 break;
4363         case XLOG_WAS_CONT_TRANS:
4364                 error = xlog_recover_add_to_cont_trans(log, trans, dp, len);
4365                 break;
4366         case XLOG_COMMIT_TRANS:
4367                 error = xlog_recover_commit_trans(log, trans, pass,
4368                                                   buffer_list);
4369                 /* success or fail, we are now done with this transaction. */
4370                 freeit = true;
4371                 break;
4372 
4373         /* unexpected flag values */
4374         case XLOG_UNMOUNT_TRANS:
4375                 /* just skip trans */
4376                 xfs_warn(log->l_mp, "%s: Unmount LR", __func__);
4377                 freeit = true;
4378                 break;
4379         case XLOG_START_TRANS:
4380         default:
4381                 xfs_warn(log->l_mp, "%s: bad flag 0x%x", __func__, flags);
4382                 ASSERT(0);
4383                 error = -EIO;
4384                 break;
4385         }
4386         if (error || freeit)
4387                 xlog_recover_free_trans(trans);
4388         return error;
4389 }
4390 
4391 /*
4392  * Lookup the transaction recovery structure associated with the ID in the
4393  * current ophdr. If the transaction doesn't exist and the start flag is set in
4394  * the ophdr, then allocate a new transaction for future ID matches to find.
4395  * Either way, return what we found during the lookup - an existing transaction
4396  * or nothing.
4397  */
4398 STATIC struct xlog_recover *
4399 xlog_recover_ophdr_to_trans(
4400         struct hlist_head       rhash[],
4401         struct xlog_rec_header  *rhead,
4402         struct xlog_op_header   *ohead)
4403 {
4404         struct xlog_recover     *trans;
4405         xlog_tid_t              tid;
4406         struct hlist_head       *rhp;
4407 
4408         tid = be32_to_cpu(ohead->oh_tid);
4409         rhp = &rhash[XLOG_RHASH(tid)];
4410         hlist_for_each_entry(trans, rhp, r_list) {
4411                 if (trans->r_log_tid == tid)
4412                         return trans;
4413         }
4414 
4415         /*
4416          * skip over non-start transaction headers - we could be
4417          * processing slack space before the next transaction starts
4418          */
4419         if (!(ohead->oh_flags & XLOG_START_TRANS))
4420                 return NULL;
4421 
4422         ASSERT(be32_to_cpu(ohead->oh_len) == 0);
4423 
4424         /*
4425          * This is a new transaction so allocate a new recovery container to
4426          * hold the recovery ops that will follow.
4427          */
4428         trans = kmem_zalloc(sizeof(struct xlog_recover), 0);
4429         trans->r_log_tid = tid;
4430         trans->r_lsn = be64_to_cpu(rhead->h_lsn);
4431         INIT_LIST_HEAD(&trans->r_itemq);
4432         INIT_HLIST_NODE(&trans->r_list);
4433         hlist_add_head(&trans->r_list, rhp);
4434 
4435         /*
4436          * Nothing more to do for this ophdr. Items to be added to this new
4437          * transaction will be in subsequent ophdr containers.
4438          */
4439         return NULL;
4440 }
4441 
4442 STATIC int
4443 xlog_recover_process_ophdr(
4444         struct xlog             *log,
4445         struct hlist_head       rhash[],
4446         struct xlog_rec_header  *rhead,
4447         struct xlog_op_header   *ohead,
4448         char                    *dp,
4449         char                    *end,
4450         int                     pass,
4451         struct list_head        *buffer_list)
4452 {
4453         struct xlog_recover     *trans;
4454         unsigned int            len;
4455         int                     error;
4456 
4457         /* Do we understand who wrote this op? */
4458         if (ohead->oh_clientid != XFS_TRANSACTION &&
4459             ohead->oh_clientid != XFS_LOG) {
4460                 xfs_warn(log->l_mp, "%s: bad clientid 0x%x",
4461                         __func__, ohead->oh_clientid);
4462                 ASSERT(0);
4463                 return -EIO;
4464         }
4465 
4466         /*
4467          * Check the ophdr contains all the data it is supposed to contain.
4468          */
4469         len = be32_to_cpu(ohead->oh_len);
4470         if (dp + len > end) {
4471                 xfs_warn(log->l_mp, "%s: bad length 0x%x", __func__, len);
4472                 WARN_ON(1);
4473                 return -EIO;
4474         }
4475 
4476         trans = xlog_recover_ophdr_to_trans(rhash, rhead, ohead);
4477         if (!trans) {
4478                 /* nothing to do, so skip over this ophdr */
4479                 return 0;
4480         }
4481 
4482         /*
4483          * The recovered buffer queue is drained only once we know that all
4484          * recovery items for the current LSN have been processed. This is
4485          * required because:
4486          *
4487          * - Buffer write submission updates the metadata LSN of the buffer.
4488          * - Log recovery skips items with a metadata LSN >= the current LSN of
4489          *   the recovery item.
4490          * - Separate recovery items against the same metadata buffer can share
4491          *   a current LSN. I.e., consider that the LSN of a recovery item is
4492          *   defined as the starting LSN of the first record in which its
4493          *   transaction appears, that a record can hold multiple transactions,
4494          *   and/or that a transaction can span multiple records.
4495          *
4496          * In other words, we are allowed to submit a buffer from log recovery
4497          * once per current LSN. Otherwise, we may incorrectly skip recovery
4498          * items and cause corruption.
4499          *
4500          * We don't know up front whether buffers are updated multiple times per
4501          * LSN. Therefore, track the current LSN of each commit log record as it
4502          * is processed and drain the queue when it changes. Use commit records
4503          * because they are ordered correctly by the logging code.
4504          */
4505         if (log->l_recovery_lsn != trans->r_lsn &&
4506             ohead->oh_flags & XLOG_COMMIT_TRANS) {
4507                 error = xfs_buf_delwri_submit(buffer_list);
4508                 if (error)
4509                         return error;
4510                 log->l_recovery_lsn = trans->r_lsn;
4511         }
4512 
4513         return xlog_recovery_process_trans(log, trans, dp, len,
4514                                            ohead->oh_flags, pass, buffer_list);
4515 }
4516 
4517 /*
4518  * There are two valid states of the r_state field.  0 indicates that the
4519  * transaction structure is in a normal state.  We have either seen the
4520  * start of the transaction or the last operation we added was not a partial
4521  * operation.  If the last operation we added to the transaction was a
4522  * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
4523  *
4524  * NOTE: skip LRs with 0 data length.
4525  */
4526 STATIC int
4527 xlog_recover_process_data(
4528         struct xlog             *log,
4529         struct hlist_head       rhash[],
4530         struct xlog_rec_header  *rhead,
4531         char                    *dp,
4532         int                     pass,
4533         struct list_head        *buffer_list)
4534 {
4535         struct xlog_op_header   *ohead;
4536         char                    *end;
4537         int                     num_logops;
4538         int                     error;
4539 
4540         end = dp + be32_to_cpu(rhead->h_len);
4541         num_logops = be32_to_cpu(rhead->h_num_logops);
4542 
4543         /* check the log format matches our own - else we can't recover */
4544         if (xlog_header_check_recover(log->l_mp, rhead))
4545                 return -EIO;
4546 
4547         trace_xfs_log_recover_record(log, rhead, pass);
4548         while ((dp < end) && num_logops) {
4549 
4550                 ohead = (struct xlog_op_header *)dp;
4551                 dp += sizeof(*ohead);
4552                 ASSERT(dp <= end);
4553 
4554                 /* errors will abort recovery */
4555                 error = xlog_recover_process_ophdr(log, rhash, rhead, ohead,
4556                                                    dp, end, pass, buffer_list);
4557                 if (error)
4558                         return error;
4559 
4560                 dp += be32_to_cpu(ohead->oh_len);
4561                 num_logops--;
4562         }
4563         return 0;
4564 }
4565 
4566 /* Recover the EFI if necessary. */
4567 STATIC int
4568 xlog_recover_process_efi(
4569         struct xfs_mount                *mp,
4570         struct xfs_ail                  *ailp,
4571         struct xfs_log_item             *lip)
4572 {
4573         struct xfs_efi_log_item         *efip;
4574         int                             error;
4575 
4576         /*
4577          * Skip EFIs that we've already processed.
4578          */
4579         efip = container_of(lip, struct xfs_efi_log_item, efi_item);
4580         if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags))
4581                 return 0;
4582 
4583         spin_unlock(&ailp->ail_lock);
4584         error = xfs_efi_recover(mp, efip);
4585         spin_lock(&ailp->ail_lock);
4586 
4587         return error;
4588 }
4589 
4590 /* Release the EFI since we're cancelling everything. */
4591 STATIC void
4592 xlog_recover_cancel_efi(
4593         struct xfs_mount                *mp,
4594         struct xfs_ail                  *ailp,
4595         struct xfs_log_item             *lip)
4596 {
4597         struct xfs_efi_log_item         *efip;
4598 
4599         efip = container_of(lip, struct xfs_efi_log_item, efi_item);
4600 
4601         spin_unlock(&ailp->ail_lock);
4602         xfs_efi_release(efip);
4603         spin_lock(&ailp->ail_lock);
4604 }
4605 
4606 /* Recover the RUI if necessary. */
4607 STATIC int
4608 xlog_recover_process_rui(
4609         struct xfs_mount                *mp,
4610         struct xfs_ail                  *ailp,
4611         struct xfs_log_item             *lip)
4612 {
4613         struct xfs_rui_log_item         *ruip;
4614         int                             error;
4615 
4616         /*
4617          * Skip RUIs that we've already processed.
4618          */
4619         ruip = container_of(lip, struct xfs_rui_log_item, rui_item);
4620         if (test_bit(XFS_RUI_RECOVERED, &ruip->rui_flags))
4621                 return 0;
4622 
4623         spin_unlock(&ailp->ail_lock);
4624         error = xfs_rui_recover(mp, ruip);
4625         spin_lock(&ailp->ail_lock);
4626 
4627         return error;
4628 }
4629 
4630 /* Release the RUI since we're cancelling everything. */
4631 STATIC void
4632 xlog_recover_cancel_rui(
4633         struct xfs_mount                *mp,
4634         struct xfs_ail                  *ailp,
4635         struct xfs_log_item             *lip)
4636 {
4637         struct xfs_rui_log_item         *ruip;
4638 
4639         ruip = container_of(lip, struct xfs_rui_log_item, rui_item);
4640 
4641         spin_unlock(&ailp->ail_lock);
4642         xfs_rui_release(ruip);
4643         spin_lock(&ailp->ail_lock);
4644 }
4645 
4646 /* Recover the CUI if necessary. */
4647 STATIC int
4648 xlog_recover_process_cui(
4649         struct xfs_trans                *parent_tp,
4650         struct xfs_ail                  *ailp,
4651         struct xfs_log_item             *lip)
4652 {
4653         struct xfs_cui_log_item         *cuip;
4654         int                             error;
4655 
4656         /*
4657          * Skip CUIs that we've already processed.
4658          */
4659         cuip = container_of(lip, struct xfs_cui_log_item, cui_item);
4660         if (test_bit(XFS_CUI_RECOVERED, &cuip->cui_flags))
4661                 return 0;
4662 
4663         spin_unlock(&ailp->ail_lock);
4664         error = xfs_cui_recover(parent_tp, cuip);
4665         spin_lock(&ailp->ail_lock);
4666 
4667         return error;
4668 }
4669 
4670 /* Release the CUI since we're cancelling everything. */
4671 STATIC void
4672 xlog_recover_cancel_cui(
4673         struct xfs_mount                *mp,
4674         struct xfs_ail                  *ailp,
4675         struct xfs_log_item             *lip)
4676 {
4677         struct xfs_cui_log_item         *cuip;
4678 
4679         cuip = container_of(lip, struct xfs_cui_log_item, cui_item);
4680 
4681         spin_unlock(&ailp->ail_lock);
4682         xfs_cui_release(cuip);
4683         spin_lock(&ailp->ail_lock);
4684 }
4685 
4686 /* Recover the BUI if necessary. */
4687 STATIC int
4688 xlog_recover_process_bui(
4689         struct xfs_trans                *parent_tp,
4690         struct xfs_ail                  *ailp,
4691         struct xfs_log_item             *lip)
4692 {
4693         struct xfs_bui_log_item         *buip;
4694         int                             error;
4695 
4696         /*
4697          * Skip BUIs that we've already processed.
4698          */
4699         buip = container_of(lip, struct xfs_bui_log_item, bui_item);
4700         if (test_bit(XFS_BUI_RECOVERED, &buip->bui_flags))
4701                 return 0;
4702 
4703         spin_unlock(&ailp->ail_lock);
4704         error = xfs_bui_recover(parent_tp, buip);
4705         spin_lock(&ailp->ail_lock);
4706 
4707         return error;
4708 }
4709 
4710 /* Release the BUI since we're cancelling everything. */
4711 STATIC void
4712 xlog_recover_cancel_bui(
4713         struct xfs_mount                *mp,
4714         struct xfs_ail                  *ailp,
4715         struct xfs_log_item             *lip)
4716 {
4717         struct xfs_bui_log_item         *buip;
4718 
4719         buip = container_of(lip, struct xfs_bui_log_item, bui_item);
4720 
4721         spin_unlock(&ailp->ail_lock);
4722         xfs_bui_release(buip);
4723         spin_lock(&ailp->ail_lock);
4724 }
4725 
4726 /* Is this log item a deferred action intent? */
4727 static inline bool xlog_item_is_intent(struct xfs_log_item *lip)
4728 {
4729         switch (lip->li_type) {
4730         case XFS_LI_EFI:
4731         case XFS_LI_RUI:
4732         case XFS_LI_CUI:
4733         case XFS_LI_BUI:
4734                 return true;
4735         default:
4736                 return false;
4737         }
4738 }
4739 
4740 /* Take all the collected deferred ops and finish them in order. */
4741 static int
4742 xlog_finish_defer_ops(
4743         struct xfs_trans        *parent_tp)
4744 {
4745         struct xfs_mount        *mp = parent_tp->t_mountp;
4746         struct xfs_trans        *tp;
4747         int64_t                 freeblks;
4748         uint                    resblks;
4749         int                     error;
4750 
4751         /*
4752          * We're finishing the defer_ops that accumulated as a result of
4753          * recovering unfinished intent items during log recovery.  We
4754          * reserve an itruncate transaction because it is the largest
4755          * permanent transaction type.  Since we're the only user of the fs
4756          * right now, take 93% (15/16) of the available free blocks.  Use
4757          * weird math to avoid a 64-bit division.
4758          */
4759         freeblks = percpu_counter_sum(&mp->m_fdblocks);
4760         if (freeblks <= 0)
4761                 return -ENOSPC;
4762         resblks = min_t(int64_t, UINT_MAX, freeblks);
4763         resblks = (resblks * 15) >> 4;
4764         error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, resblks,
4765                         0, XFS_TRANS_RESERVE, &tp);
4766         if (error)
4767                 return error;
4768         /* transfer all collected dfops to this transaction */
4769         xfs_defer_move(tp, parent_tp);
4770 
4771         return xfs_trans_commit(tp);
4772 }
4773 
4774 /*
4775  * When this is called, all of the log intent items which did not have
4776  * corresponding log done items should be in the AIL.  What we do now
4777  * is update the data structures associated with each one.
4778  *
4779  * Since we process the log intent items in normal transactions, they
4780  * will be removed at some point after the commit.  This prevents us
4781  * from just walking down the list processing each one.  We'll use a
4782  * flag in the intent item to skip those that we've already processed
4783  * and use the AIL iteration mechanism's generation count to try to
4784  * speed this up at least a bit.
4785  *
4786  * When we start, we know that the intents are the only things in the
4787  * AIL.  As we process them, however, other items are added to the
4788  * AIL.
4789  */
4790 STATIC int
4791 xlog_recover_process_intents(
4792         struct xlog             *log)
4793 {
4794         struct xfs_trans        *parent_tp;
4795         struct xfs_ail_cursor   cur;
4796         struct xfs_log_item     *lip;
4797         struct xfs_ail          *ailp;
4798         int                     error;
4799 #if defined(DEBUG) || defined(XFS_WARN)
4800         xfs_lsn_t               last_lsn;
4801 #endif
4802 
4803         /*
4804          * The intent recovery handlers commit transactions to complete recovery
4805          * for individual intents, but any new deferred operations that are
4806          * queued during that process are held off until the very end. The
4807          * purpose of this transaction is to serve as a container for deferred
4808          * operations. Each intent recovery handler must transfer dfops here
4809          * before its local transaction commits, and we'll finish the entire
4810          * list below.
4811          */
4812         error = xfs_trans_alloc_empty(log->l_mp, &parent_tp);
4813         if (error)
4814                 return error;
4815 
4816         ailp = log->l_ailp;
4817         spin_lock(&ailp->ail_lock);
4818         lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
4819 #if defined(DEBUG) || defined(XFS_WARN)
4820         last_lsn = xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block);
4821 #endif
4822         while (lip != NULL) {
4823                 /*
4824                  * We're done when we see something other than an intent.
4825                  * There should be no intents left in the AIL now.
4826                  */
4827                 if (!xlog_item_is_intent(lip)) {
4828 #ifdef DEBUG
4829                         for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
4830                                 ASSERT(!xlog_item_is_intent(lip));
4831 #endif
4832                         break;
4833                 }
4834 
4835                 /*
4836                  * We should never see a redo item with a LSN higher than
4837                  * the last transaction we found in the log at the start
4838                  * of recovery.
4839                  */
4840                 ASSERT(XFS_LSN_CMP(last_lsn, lip->li_lsn) >= 0);
4841 
4842                 /*
4843                  * NOTE: If your intent processing routine can create more
4844                  * deferred ops, you /must/ attach them to the dfops in this
4845                  * routine or else those subsequent intents will get
4846                  * replayed in the wrong order!
4847                  */
4848                 switch (lip->li_type) {
4849                 case XFS_LI_EFI:
4850                         error = xlog_recover_process_efi(log->l_mp, ailp, lip);
4851                         break;
4852                 case XFS_LI_RUI:
4853                         error = xlog_recover_process_rui(log->l_mp, ailp, lip);
4854                         break;
4855                 case XFS_LI_CUI:
4856                         error = xlog_recover_process_cui(parent_tp, ailp, lip);
4857                         break;
4858                 case XFS_LI_BUI:
4859                         error = xlog_recover_process_bui(parent_tp, ailp, lip);
4860                         break;
4861                 }
4862                 if (error)
4863                         goto out;
4864                 lip = xfs_trans_ail_cursor_next(ailp, &cur);
4865         }
4866 out:
4867         xfs_trans_ail_cursor_done(&cur);
4868         spin_unlock(&ailp->ail_lock);
4869         if (!error)
4870                 error = xlog_finish_defer_ops(parent_tp);
4871         xfs_trans_cancel(parent_tp);
4872 
4873         return error;
4874 }
4875 
4876 /*
4877  * A cancel occurs when the mount has failed and we're bailing out.
4878  * Release all pending log intent items so they don't pin the AIL.
4879  */
4880 STATIC void
4881 xlog_recover_cancel_intents(
4882         struct xlog             *log)
4883 {
4884         struct xfs_log_item     *lip;
4885         struct xfs_ail_cursor   cur;
4886         struct xfs_ail          *ailp;
4887 
4888         ailp = log->l_ailp;
4889         spin_lock(&ailp->ail_lock);
4890         lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
4891         while (lip != NULL) {
4892                 /*
4893                  * We're done when we see something other than an intent.
4894                  * There should be no intents left in the AIL now.
4895                  */
4896                 if (!xlog_item_is_intent(lip)) {
4897 #ifdef DEBUG
4898                         for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
4899                                 ASSERT(!xlog_item_is_intent(lip));
4900 #endif
4901                         break;
4902                 }
4903 
4904                 switch (lip->li_type) {
4905                 case XFS_LI_EFI:
4906                         xlog_recover_cancel_efi(log->l_mp, ailp, lip);
4907                         break;
4908                 case XFS_LI_RUI:
4909                         xlog_recover_cancel_rui(log->l_mp, ailp, lip);
4910                         break;
4911                 case XFS_LI_CUI:
4912                         xlog_recover_cancel_cui(log->l_mp, ailp, lip);
4913                         break;
4914                 case XFS_LI_BUI:
4915                         xlog_recover_cancel_bui(log->l_mp, ailp, lip);
4916                         break;
4917                 }
4918 
4919                 lip = xfs_trans_ail_cursor_next(ailp, &cur);
4920         }
4921 
4922         xfs_trans_ail_cursor_done(&cur);
4923         spin_unlock(&ailp->ail_lock);
4924 }
4925 
4926 /*
4927  * This routine performs a transaction to null out a bad inode pointer
4928  * in an agi unlinked inode hash bucket.
4929  */
4930 STATIC void
4931 xlog_recover_clear_agi_bucket(
4932         xfs_mount_t     *mp,
4933         xfs_agnumber_t  agno,
4934         int             bucket)
4935 {
4936         xfs_trans_t     *tp;
4937         xfs_agi_t       *agi;
4938         xfs_buf_t       *agibp;
4939         int             offset;
4940         int             error;
4941 
4942         error = xfs_trans_alloc(mp, &M_RES(mp)->tr_clearagi, 0, 0, 0, &tp);
4943         if (error)
4944                 goto out_error;
4945 
4946         error = xfs_read_agi(mp, tp, agno, &agibp);
4947         if (error)
4948                 goto out_abort;
4949 
4950         agi = XFS_BUF_TO_AGI(agibp);
4951         agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
4952         offset = offsetof(xfs_agi_t, agi_unlinked) +
4953                  (sizeof(xfs_agino_t) * bucket);
4954         xfs_trans_log_buf(tp, agibp, offset,
4955                           (offset + sizeof(xfs_agino_t) - 1));
4956 
4957         error = xfs_trans_commit(tp);
4958         if (error)
4959                 goto out_error;
4960         return;
4961 
4962 out_abort:
4963         xfs_trans_cancel(tp);
4964 out_error:
4965         xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__, agno);
4966         return;
4967 }
4968 
4969 STATIC xfs_agino_t
4970 xlog_recover_process_one_iunlink(
4971         struct xfs_mount                *mp,
4972         xfs_agnumber_t                  agno,
4973         xfs_agino_t                     agino,
4974         int                             bucket)
4975 {
4976         struct xfs_buf                  *ibp;
4977         struct xfs_dinode               *dip;
4978         struct xfs_inode                *ip;
4979         xfs_ino_t                       ino;
4980         int                             error;
4981 
4982         ino = XFS_AGINO_TO_INO(mp, agno, agino);
4983         error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
4984         if (error)
4985                 goto fail;
4986 
4987         /*
4988          * Get the on disk inode to find the next inode in the bucket.
4989          */
4990         error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &ibp, 0, 0);
4991         if (error)
4992                 goto fail_iput;
4993 
4994         xfs_iflags_clear(ip, XFS_IRECOVERY);
4995         ASSERT(VFS_I(ip)->i_nlink == 0);
4996         ASSERT(VFS_I(ip)->i_mode != 0);
4997 
4998         /* setup for the next pass */
4999         agino = be32_to_cpu(dip->di_next_unlinked);
5000         xfs_buf_relse(ibp);
5001 
5002         /*
5003          * Prevent any DMAPI event from being sent when the reference on
5004          * the inode is dropped.
5005          */
5006         ip->i_d.di_dmevmask = 0;
5007 
5008         xfs_irele(ip);
5009         return agino;
5010 
5011  fail_iput:
5012         xfs_irele(ip);
5013  fail:
5014         /*
5015          * We can't read in the inode this bucket points to, or this inode
5016          * is messed up.  Just ditch this bucket of inodes.  We will lose
5017          * some inodes and space, but at least we won't hang.
5018          *
5019          * Call xlog_recover_clear_agi_bucket() to perform a transaction to
5020          * clear the inode pointer in the bucket.
5021          */
5022         xlog_recover_clear_agi_bucket(mp, agno, bucket);
5023         return NULLAGINO;
5024 }
5025 
5026 /*
5027  * Recover AGI unlinked lists
5028  *
5029  * This is called during recovery to process any inodes which we unlinked but
5030  * not freed when the system crashed.  These inodes will be on the lists in the
5031  * AGI blocks. What we do here is scan all the AGIs and fully truncate and free
5032  * any inodes found on the lists. Each inode is removed from the lists when it
5033  * has been fully truncated and is freed. The freeing of the inode and its
5034  * removal from the list must be atomic.
5035  *
5036  * If everything we touch in the agi processing loop is already in memory, this
5037  * loop can hold the cpu for a long time. It runs without lock contention,
5038  * memory allocation contention, the need wait for IO, etc, and so will run
5039  * until we either run out of inodes to process, run low on memory or we run out
5040  * of log space.
5041  *
5042  * This behaviour is bad for latency on single CPU and non-preemptible kernels,
5043  * and can prevent other filesytem work (such as CIL pushes) from running. This
5044  * can lead to deadlocks if the recovery process runs out of log reservation
5045  * space. Hence we need to yield the CPU when there is other kernel work
5046  * scheduled on this CPU to ensure other scheduled work can run without undue
5047  * latency.
5048  */
5049 STATIC void
5050 xlog_recover_process_iunlinks(
5051         struct xlog     *log)
5052 {
5053         xfs_mount_t     *mp;
5054         xfs_agnumber_t  agno;
5055         xfs_agi_t       *agi;
5056         xfs_buf_t       *agibp;
5057         xfs_agino_t     agino;
5058         int             bucket;
5059         int             error;
5060 
5061         mp = log->l_mp;
5062 
5063         for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
5064                 /*
5065                  * Find the agi for this ag.
5066                  */
5067                 error = xfs_read_agi(mp, NULL, agno, &agibp);
5068                 if (error) {
5069                         /*
5070                          * AGI is b0rked. Don't process it.
5071                          *
5072                          * We should probably mark the filesystem as corrupt
5073                          * after we've recovered all the ag's we can....
5074                          */
5075                         continue;
5076                 }
5077                 /*
5078                  * Unlock the buffer so that it can be acquired in the normal
5079                  * course of the transaction to truncate and free each inode.
5080                  * Because we are not racing with anyone else here for the AGI
5081                  * buffer, we don't even need to hold it locked to read the
5082                  * initial unlinked bucket entries out of the buffer. We keep
5083                  * buffer reference though, so that it stays pinned in memory
5084                  * while we need the buffer.
5085                  */
5086                 agi = XFS_BUF_TO_AGI(agibp);
5087                 xfs_buf_unlock(agibp);
5088 
5089                 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
5090                         agino = be32_to_cpu(agi->agi_unlinked[bucket]);
5091                         while (agino != NULLAGINO) {
5092                                 agino = xlog_recover_process_one_iunlink(mp,
5093                                                         agno, agino, bucket);
5094                                 cond_resched();
5095                         }
5096                 }
5097                 xfs_buf_rele(agibp);
5098         }
5099 }
5100 
5101 STATIC void
5102 xlog_unpack_data(
5103         struct xlog_rec_header  *rhead,
5104         char                    *dp,
5105         struct xlog             *log)
5106 {
5107         int                     i, j, k;
5108 
5109         for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) &&
5110                   i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
5111                 *(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i];
5112                 dp += BBSIZE;
5113         }
5114 
5115         if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
5116                 xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead;
5117                 for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
5118                         j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
5119                         k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
5120                         *(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
5121                         dp += BBSIZE;
5122                 }
5123         }
5124 }
5125 
5126 /*
5127  * CRC check, unpack and process a log record.
5128  */
5129 STATIC int
5130 xlog_recover_process(
5131         struct xlog             *log,
5132         struct hlist_head       rhash[],
5133         struct xlog_rec_header  *rhead,
5134         char                    *dp,
5135         int                     pass,
5136         struct list_head        *buffer_list)
5137 {
5138         __le32                  old_crc = rhead->h_crc;
5139         __le32                  crc;
5140 
5141         crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len));
5142 
5143         /*
5144          * Nothing else to do if this is a CRC verification pass. Just return
5145          * if this a record with a non-zero crc. Unfortunately, mkfs always
5146          * sets old_crc to 0 so we must consider this valid even on v5 supers.
5147          * Otherwise, return EFSBADCRC on failure so the callers up the stack
5148          * know precisely what failed.
5149          */
5150         if (pass == XLOG_RECOVER_CRCPASS) {
5151                 if (old_crc && crc != old_crc)
5152                         return -EFSBADCRC;
5153                 return 0;
5154         }
5155 
5156         /*
5157          * We're in the normal recovery path. Issue a warning if and only if the
5158          * CRC in the header is non-zero. This is an advisory warning and the
5159          * zero CRC check prevents warnings from being emitted when upgrading
5160          * the kernel from one that does not add CRCs by default.
5161          */
5162         if (crc != old_crc) {
5163                 if (old_crc || xfs_sb_version_hascrc(&log->l_mp->m_sb)) {
5164                         xfs_alert(log->l_mp,
5165                 "log record CRC mismatch: found 0x%x, expected 0x%x.",
5166                                         le32_to_cpu(old_crc),
5167                                         le32_to_cpu(crc));
5168                         xfs_hex_dump(dp, 32);
5169                 }
5170 
5171                 /*
5172                  * If the filesystem is CRC enabled, this mismatch becomes a
5173                  * fatal log corruption failure.
5174                  */
5175                 if (xfs_sb_version_hascrc(&log->l_mp->m_sb))
5176                         return -EFSCORRUPTED;
5177         }
5178 
5179         xlog_unpack_data(rhead, dp, log);
5180 
5181         return xlog_recover_process_data(log, rhash, rhead, dp, pass,
5182                                          buffer_list);
5183 }
5184 
5185 STATIC int
5186 xlog_valid_rec_header(
5187         struct xlog             *log,
5188         struct xlog_rec_header  *rhead,
5189         xfs_daddr_t             blkno)
5190 {
5191         int                     hlen;
5192 
5193         if (unlikely(rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))) {
5194                 XFS_ERROR_REPORT("xlog_valid_rec_header(1)",
5195                                 XFS_ERRLEVEL_LOW, log->l_mp);
5196                 return -EFSCORRUPTED;
5197         }
5198         if (unlikely(
5199             (!rhead->h_version ||
5200             (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) {
5201                 xfs_warn(log->l_mp, "%s: unrecognised log version (%d).",
5202                         __func__, be32_to_cpu(rhead->h_version));
5203                 return -EIO;
5204         }
5205 
5206         /* LR body must have data or it wouldn't have been written */
5207         hlen = be32_to_cpu(rhead->h_len);
5208         if (unlikely( hlen <= 0 || hlen > INT_MAX )) {
5209                 XFS_ERROR_REPORT("xlog_valid_rec_header(2)",
5210                                 XFS_ERRLEVEL_LOW, log->l_mp);
5211                 return -EFSCORRUPTED;
5212         }
5213         if (unlikely( blkno > log->l_logBBsize || blkno > INT_MAX )) {
5214                 XFS_ERROR_REPORT("xlog_valid_rec_header(3)",
5215                                 XFS_ERRLEVEL_LOW, log->l_mp);
5216                 return -EFSCORRUPTED;
5217         }
5218         return 0;
5219 }
5220 
5221 /*
5222  * Read the log from tail to head and process the log records found.
5223  * Handle the two cases where the tail and head are in the same cycle
5224  * and where the active portion of the log wraps around the end of
5225  * the physical log separately.  The pass parameter is passed through
5226  * to the routines called to process the data and is not looked at
5227  * here.
5228  */
5229 STATIC int
5230 xlog_do_recovery_pass(
5231         struct xlog             *log,
5232         xfs_daddr_t             head_blk,
5233         xfs_daddr_t             tail_blk,
5234         int                     pass,
5235         xfs_daddr_t             *first_bad)     /* out: first bad log rec */
5236 {
5237         xlog_rec_header_t       *rhead;
5238         xfs_daddr_t             blk_no, rblk_no;
5239         xfs_daddr_t             rhead_blk;
5240         char                    *offset;
5241         char                    *hbp, *dbp;
5242         int                     error = 0, h_size, h_len;
5243         int                     error2 = 0;
5244         int                     bblks, split_bblks;
5245         int                     hblks, split_hblks, wrapped_hblks;
5246         int                     i;
5247         struct hlist_head       rhash[XLOG_RHASH_SIZE];
5248         LIST_HEAD               (buffer_list);
5249 
5250         ASSERT(head_blk != tail_blk);
5251         blk_no = rhead_blk = tail_blk;
5252 
5253         for (i = 0; i < XLOG_RHASH_SIZE; i++)
5254                 INIT_HLIST_HEAD(&rhash[i]);
5255 
5256         /*
5257          * Read the header of the tail block and get the iclog buffer size from
5258          * h_size.  Use this to tell how many sectors make up the log header.
5259          */
5260         if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
5261                 /*
5262                  * When using variable length iclogs, read first sector of
5263                  * iclog header and extract the header size from it.  Get a
5264                  * new hbp that is the correct size.
5265                  */
5266                 hbp = xlog_alloc_buffer(log, 1);
5267                 if (!hbp)
5268                         return -ENOMEM;
5269 
5270                 error = xlog_bread(log, tail_blk, 1, hbp, &offset);
5271                 if (error)
5272                         goto bread_err1;
5273 
5274                 rhead = (xlog_rec_header_t *)offset;
5275                 error = xlog_valid_rec_header(log, rhead, tail_blk);
5276                 if (error)
5277                         goto bread_err1;
5278 
5279                 /*
5280                  * xfsprogs has a bug where record length is based on lsunit but
5281                  * h_size (iclog size) is hardcoded to 32k. Now that we
5282                  * unconditionally CRC verify the unmount record, this means the
5283                  * log buffer can be too small for the record and cause an
5284                  * overrun.
5285                  *
5286                  * Detect this condition here. Use lsunit for the buffer size as
5287                  * long as this looks like the mkfs case. Otherwise, return an
5288                  * error to avoid a buffer overrun.
5289                  */
5290                 h_size = be32_to_cpu(rhead->h_size);
5291                 h_len = be32_to_cpu(rhead->h_len);
5292                 if (h_len > h_size) {
5293                         if (h_len <= log->l_mp->m_logbsize &&
5294                             be32_to_cpu(rhead->h_num_logops) == 1) {
5295                                 xfs_warn(log->l_mp,
5296                 "invalid iclog size (%d bytes), using lsunit (%d bytes)",
5297                                          h_size, log->l_mp->m_logbsize);
5298                                 h_size = log->l_mp->m_logbsize;
5299                         } else
5300                                 return -EFSCORRUPTED;
5301                 }
5302 
5303                 if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) &&
5304                     (h_size > XLOG_HEADER_CYCLE_SIZE)) {
5305                         hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
5306                         if (h_size % XLOG_HEADER_CYCLE_SIZE)
5307                                 hblks++;
5308                         kmem_free(hbp);
5309                         hbp = xlog_alloc_buffer(log, hblks);
5310                 } else {
5311                         hblks = 1;
5312                 }
5313         } else {
5314                 ASSERT(log->l_sectBBsize == 1);
5315                 hblks = 1;
5316                 hbp = xlog_alloc_buffer(log, 1);
5317                 h_size = XLOG_BIG_RECORD_BSIZE;
5318         }
5319 
5320         if (!hbp)
5321                 return -ENOMEM;
5322         dbp = xlog_alloc_buffer(log, BTOBB(h_size));
5323         if (!dbp) {
5324                 kmem_free(hbp);
5325                 return -ENOMEM;
5326         }
5327 
5328         memset(rhash, 0, sizeof(rhash));
5329         if (tail_blk > head_blk) {
5330                 /*
5331                  * Perform recovery around the end of the physical log.
5332                  * When the head is not on the same cycle number as the tail,
5333                  * we can't do a sequential recovery.
5334                  */
5335                 while (blk_no < log->l_logBBsize) {
5336                         /*
5337                          * Check for header wrapping around physical end-of-log
5338                          */
5339                         offset = hbp;
5340                         split_hblks = 0;
5341                         wrapped_hblks = 0;
5342                         if (blk_no + hblks <= log->l_logBBsize) {
5343                                 /* Read header in one read */
5344                                 error = xlog_bread(log, blk_no, hblks, hbp,
5345                                                    &offset);
5346                                 if (error)
5347                                         goto bread_err2;
5348                         } else {
5349                                 /* This LR is split across physical log end */
5350                                 if (blk_no != log->l_logBBsize) {
5351                                         /* some data before physical log end */
5352                                         ASSERT(blk_no <= INT_MAX);
5353                                         split_hblks = log->l_logBBsize - (int)blk_no;
5354                                         ASSERT(split_hblks > 0);
5355                                         error = xlog_bread(log, blk_no,
5356                                                            split_hblks, hbp,
5357                                                            &offset);
5358                                         if (error)
5359                                                 goto bread_err2;
5360                                 }
5361 
5362                                 /*
5363                                  * Note: this black magic still works with
5364                                  * large sector sizes (non-512) only because:
5365                                  * - we increased the buffer size originally
5366                                  *   by 1 sector giving us enough extra space
5367                                  *   for the second read;
5368                                  * - the log start is guaranteed to be sector
5369                                  *   aligned;
5370                                  * - we read the log end (LR header start)
5371                                  *   _first_, then the log start (LR header end)
5372                                  *   - order is important.
5373                                  */
5374                                 wrapped_hblks = hblks - split_hblks;
5375                                 error = xlog_bread_noalign(log, 0,
5376                                                 wrapped_hblks,
5377                                                 offset + BBTOB(split_hblks));
5378                                 if (error)
5379                                         goto bread_err2;
5380                         }
5381                         rhead = (xlog_rec_header_t *)offset;
5382                         error = xlog_valid_rec_header(log, rhead,
5383                                                 split_hblks ? blk_no : 0);
5384                         if (error)
5385                                 goto bread_err2;
5386 
5387                         bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
5388                         blk_no += hblks;
5389 
5390                         /*
5391                          * Read the log record data in multiple reads if it
5392                          * wraps around the end of the log. Note that if the
5393                          * header already wrapped, blk_no could point past the
5394                          * end of the log. The record data is contiguous in
5395                          * that case.
5396                          */
5397                         if (blk_no + bblks <= log->l_logBBsize ||
5398                             blk_no >= log->l_logBBsize) {
5399                                 rblk_no = xlog_wrap_logbno(log, blk_no);
5400                                 error = xlog_bread(log, rblk_no, bblks, dbp,
5401                                                    &offset);
5402                                 if (error)
5403                                         goto bread_err2;
5404                         } else {
5405                                 /* This log record is split across the
5406                                  * physical end of log */
5407                                 offset = dbp;
5408                                 split_bblks = 0;
5409                                 if (blk_no != log->l_logBBsize) {
5410                                         /* some data is before the physical
5411                                          * end of log */
5412                                         ASSERT(!wrapped_hblks);
5413                                         ASSERT(blk_no <= INT_MAX);
5414                                         split_bblks =
5415                                                 log->l_logBBsize - (int)blk_no;
5416                                         ASSERT(split_bblks > 0);
5417                                         error = xlog_bread(log, blk_no,
5418                                                         split_bblks, dbp,
5419                                                         &offset);
5420                                         if (error)
5421                                                 goto bread_err2;
5422                                 }
5423 
5424                                 /*
5425                                  * Note: this black magic still works with
5426                                  * large sector sizes (non-512) only because:
5427                                  * - we increased the buffer size originally
5428                                  *   by 1 sector giving us enough extra space
5429                                  *   for the second read;
5430                                  * - the log start is guaranteed to be sector
5431                                  *   aligned;
5432                                  * - we read the log end (LR header start)
5433                                  *   _first_, then the log start (LR header end)
5434                                  *   - order is important.
5435                                  */
5436                                 error = xlog_bread_noalign(log, 0,
5437                                                 bblks - split_bblks,
5438                                                 offset + BBTOB(split_bblks));
5439                                 if (error)
5440                                         goto bread_err2;
5441                         }
5442 
5443                         error = xlog_recover_process(log, rhash, rhead, offset,
5444                                                      pass, &buffer_list);
5445                         if (error)
5446                                 goto bread_err2;
5447 
5448                         blk_no += bblks;
5449                         rhead_blk = blk_no;
5450                 }
5451 
5452                 ASSERT(blk_no >= log->l_logBBsize);
5453                 blk_no -= log->l_logBBsize;
5454                 rhead_blk = blk_no;
5455         }
5456 
5457         /* read first part of physical log */
5458         while (blk_no < head_blk) {
5459                 error = xlog_bread(log, blk_no, hblks, hbp, &offset);
5460                 if (error)
5461                         goto bread_err2;
5462 
5463                 rhead = (xlog_rec_header_t *)offset;
5464                 error = xlog_valid_rec_header(log, rhead, blk_no);
5465                 if (error)
5466                         goto bread_err2;
5467 
5468                 /* blocks in data section */
5469                 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
5470                 error = xlog_bread(log, blk_no+hblks, bblks, dbp,
5471                                    &offset);
5472                 if (error)
5473                         goto bread_err2;
5474 
5475                 error = xlog_recover_process(log, rhash, rhead, offset, pass,
5476                                              &buffer_list);
5477                 if (error)
5478                         goto bread_err2;
5479 
5480                 blk_no += bblks + hblks;
5481                 rhead_blk = blk_no;
5482         }
5483 
5484  bread_err2:
5485         kmem_free(dbp);
5486  bread_err1:
5487         kmem_free(hbp);
5488 
5489         /*
5490          * Submit buffers that have been added from the last record processed,
5491          * regardless of error status.
5492          */
5493         if (!list_empty(&buffer_list))
5494                 error2 = xfs_buf_delwri_submit(&buffer_list);
5495 
5496         if (error && first_bad)
5497                 *first_bad = rhead_blk;
5498 
5499         /*
5500          * Transactions are freed at commit time but transactions without commit
5501          * records on disk are never committed. Free any that may be left in the
5502          * hash table.
5503          */
5504         for (i = 0; i < XLOG_RHASH_SIZE; i++) {
5505                 struct hlist_node       *tmp;
5506                 struct xlog_recover     *trans;
5507 
5508                 hlist_for_each_entry_safe(trans, tmp, &rhash[i], r_list)
5509                         xlog_recover_free_trans(trans);
5510         }
5511 
5512         return error ? error : error2;
5513 }
5514 
5515 /*
5516  * Do the recovery of the log.  We actually do this in two phases.
5517  * The two passes are necessary in order to implement the function
5518  * of cancelling a record written into the log.  The first pass
5519  * determines those things which have been cancelled, and the
5520  * second pass replays log items normally except for those which
5521  * have been cancelled.  The handling of the replay and cancellations
5522  * takes place in the log item type specific routines.
5523  *
5524  * The table of items which have cancel records in the log is allocated
5525  * and freed at this level, since only here do we know when all of
5526  * the log recovery has been completed.
5527  */
5528 STATIC int
5529 xlog_do_log_recovery(
5530         struct xlog     *log,
5531         xfs_daddr_t     head_blk,
5532         xfs_daddr_t     tail_blk)
5533 {
5534         int             error, i;
5535 
5536         ASSERT(head_blk != tail_blk);
5537 
5538         /*
5539          * First do a pass to find all of the cancelled buf log items.
5540          * Store them in the buf_cancel_table for use in the second pass.
5541          */
5542         log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE *
5543                                                  sizeof(struct list_head),
5544                                                  0);
5545         for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
5546                 INIT_LIST_HEAD(&log->l_buf_cancel_table[i]);
5547 
5548         error = xlog_do_recovery_pass(log, head_blk, tail_blk,
5549                                       XLOG_RECOVER_PASS1, NULL);
5550         if (error != 0) {
5551                 kmem_free(log->l_buf_cancel_table);
5552                 log->l_buf_cancel_table = NULL;
5553                 return error;
5554         }
5555         /*
5556          * Then do a second pass to actually recover the items in the log.
5557          * When it is complete free the table of buf cancel items.
5558          */
5559         error = xlog_do_recovery_pass(log, head_blk, tail_blk,
5560                                       XLOG_RECOVER_PASS2, NULL);
5561 #ifdef DEBUG
5562         if (!error) {
5563                 int     i;
5564 
5565                 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
5566                         ASSERT(list_empty(&log->l_buf_cancel_table[i]));
5567         }
5568 #endif  /* DEBUG */
5569 
5570         kmem_free(log->l_buf_cancel_table);
5571         log->l_buf_cancel_table = NULL;
5572 
5573         return error;
5574 }
5575 
5576 /*
5577  * Do the actual recovery
5578  */
5579 STATIC int
5580 xlog_do_recover(
5581         struct xlog     *log,
5582         xfs_daddr_t     head_blk,
5583         xfs_daddr_t     tail_blk)
5584 {
5585         struct xfs_mount *mp = log->l_mp;
5586         int             error;
5587         xfs_buf_t       *bp;
5588         xfs_sb_t        *sbp;
5589 
5590         trace_xfs_log_recover(log, head_blk, tail_blk);
5591 
5592         /*
5593          * First replay the images in the log.
5594          */
5595         error = xlog_do_log_recovery(log, head_blk, tail_blk);
5596         if (error)
5597                 return error;
5598 
5599         /*
5600          * If IO errors happened during recovery, bail out.
5601          */
5602         if (XFS_FORCED_SHUTDOWN(mp)) {
5603                 return -EIO;
5604         }
5605 
5606         /*
5607          * We now update the tail_lsn since much of the recovery has completed
5608          * and there may be space available to use.  If there were no extent
5609          * or iunlinks, we can free up the entire log and set the tail_lsn to
5610          * be the last_sync_lsn.  This was set in xlog_find_tail to be the
5611          * lsn of the last known good LR on disk.  If there are extent frees
5612          * or iunlinks they will have some entries in the AIL; so we look at
5613          * the AIL to determine how to set the tail_lsn.
5614          */
5615         xlog_assign_tail_lsn(mp);
5616 
5617         /*
5618          * Now that we've finished replaying all buffer and inode
5619          * updates, re-read in the superblock and reverify it.
5620          */
5621         bp = xfs_getsb(mp);
5622         bp->b_flags &= ~(XBF_DONE | XBF_ASYNC);
5623         ASSERT(!(bp->b_flags & XBF_WRITE));
5624         bp->b_flags |= XBF_READ;
5625         bp->b_ops = &xfs_sb_buf_ops;
5626 
5627         error = xfs_buf_submit(bp);
5628         if (error) {
5629                 if (!XFS_FORCED_SHUTDOWN(mp)) {
5630                         xfs_buf_ioerror_alert(bp, __func__);
5631                         ASSERT(0);
5632                 }
5633                 xfs_buf_relse(bp);
5634                 return error;
5635         }
5636 
5637         /* Convert superblock from on-disk format */
5638         sbp = &mp->m_sb;
5639         xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp));
5640         xfs_buf_relse(bp);
5641 
5642         /* re-initialise in-core superblock and geometry structures */
5643         xfs_reinit_percpu_counters(mp);
5644         error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi);
5645         if (error) {
5646                 xfs_warn(mp, "Failed post-recovery per-ag init: %d", error);
5647                 return error;
5648         }
5649         mp->m_alloc_set_aside = xfs_alloc_set_aside(mp);
5650 
5651         xlog_recover_check_summary(log);
5652 
5653         /* Normal transactions can now occur */
5654         log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
5655         return 0;
5656 }
5657 
5658 /*
5659  * Perform recovery and re-initialize some log variables in xlog_find_tail.
5660  *
5661  * Return error or zero.
5662  */
5663 int
5664 xlog_recover(
5665         struct xlog     *log)
5666 {
5667         xfs_daddr_t     head_blk, tail_blk;
5668         int             error;
5669 
5670         /* find the tail of the log */
5671         error = xlog_find_tail(log, &head_blk, &tail_blk);
5672         if (error)
5673                 return error;
5674 
5675         /*
5676          * The superblock was read before the log was available and thus the LSN
5677          * could not be verified. Check the superblock LSN against the current
5678          * LSN now that it's known.
5679          */
5680         if (xfs_sb_version_hascrc(&log->l_mp->m_sb) &&
5681             !xfs_log_check_lsn(log->l_mp, log->l_mp->m_sb.sb_lsn))
5682                 return -EINVAL;
5683 
5684         if (tail_blk != head_blk) {
5685                 /* There used to be a comment here:
5686                  *
5687                  * disallow recovery on read-only mounts.  note -- mount
5688                  * checks for ENOSPC and turns it into an intelligent
5689                  * error message.
5690                  * ...but this is no longer true.  Now, unless you specify
5691                  * NORECOVERY (in which case this function would never be
5692                  * called), we just go ahead and recover.  We do this all
5693                  * under the vfs layer, so we can get away with it unless
5694                  * the device itself is read-only, in which case we fail.
5695                  */
5696                 if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) {
5697                         return error;
5698                 }
5699 
5700                 /*
5701                  * Version 5 superblock log feature mask validation. We know the
5702                  * log is dirty so check if there are any unknown log features
5703                  * in what we need to recover. If there are unknown features
5704                  * (e.g. unsupported transactions, then simply reject the
5705                  * attempt at recovery before touching anything.
5706                  */
5707                 if (XFS_SB_VERSION_NUM(&log->l_mp->m_sb) == XFS_SB_VERSION_5 &&
5708                     xfs_sb_has_incompat_log_feature(&log->l_mp->m_sb,
5709                                         XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)) {
5710                         xfs_warn(log->l_mp,
5711 "Superblock has unknown incompatible log features (0x%x) enabled.",
5712                                 (log->l_mp->m_sb.sb_features_log_incompat &
5713                                         XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN));
5714                         xfs_warn(log->l_mp,
5715 "The log can not be fully and/or safely recovered by this kernel.");
5716                         xfs_warn(log->l_mp,
5717 "Please recover the log on a kernel that supports the unknown features.");
5718                         return -EINVAL;
5719                 }
5720 
5721                 /*
5722                  * Delay log recovery if the debug hook is set. This is debug
5723                  * instrumention to coordinate simulation of I/O failures with
5724                  * log recovery.
5725                  */
5726                 if (xfs_globals.log_recovery_delay) {
5727                         xfs_notice(log->l_mp,
5728                                 "Delaying log recovery for %d seconds.",
5729                                 xfs_globals.log_recovery_delay);
5730                         msleep(xfs_globals.log_recovery_delay * 1000);
5731                 }
5732 
5733                 xfs_notice(log->l_mp, "Starting recovery (logdev: %s)",
5734                                 log->l_mp->m_logname ? log->l_mp->m_logname
5735                                                      : "internal");
5736 
5737                 error = xlog_do_recover(log, head_blk, tail_blk);
5738                 log->l_flags |= XLOG_RECOVERY_NEEDED;
5739         }
5740         return error;
5741 }
5742 
5743 /*
5744  * In the first part of recovery we replay inodes and buffers and build
5745  * up the list of extent free items which need to be processed.  Here
5746  * we process the extent free items and clean up the on disk unlinked
5747  * inode lists.  This is separated from the first part of recovery so
5748  * that the root and real-time bitmap inodes can be read in from disk in
5749  * between the two stages.  This is necessary so that we can free space
5750  * in the real-time portion of the file system.
5751  */
5752 int
5753 xlog_recover_finish(
5754         struct xlog     *log)
5755 {
5756         /*
5757          * Now we're ready to do the transactions needed for the
5758          * rest of recovery.  Start with completing all the extent
5759          * free intent records and then process the unlinked inode
5760          * lists.  At this point, we essentially run in normal mode
5761          * except that we're still performing recovery actions
5762          * rather than accepting new requests.
5763          */
5764         if (log->l_flags & XLOG_RECOVERY_NEEDED) {
5765                 int     error;
5766                 error = xlog_recover_process_intents(log);
5767                 if (error) {
5768                         xfs_alert(log->l_mp, "Failed to recover intents");
5769                         return error;
5770                 }
5771 
5772                 /*
5773                  * Sync the log to get all the intents out of the AIL.
5774                  * This isn't absolutely necessary, but it helps in
5775                  * case the unlink transactions would have problems
5776                  * pushing the intents out of the way.
5777                  */
5778                 xfs_log_force(log->l_mp, XFS_LOG_SYNC);
5779 
5780                 xlog_recover_process_iunlinks(log);
5781 
5782                 xlog_recover_check_summary(log);
5783 
5784                 xfs_notice(log->l_mp, "Ending recovery (logdev: %s)",
5785                                 log->l_mp->m_logname ? log->l_mp->m_logname
5786                                                      : "internal");
5787                 log->l_flags &= ~XLOG_RECOVERY_NEEDED;
5788         } else {
5789                 xfs_info(log->l_mp, "Ending clean mount");
5790         }
5791         return 0;
5792 }
5793 
5794 void
5795 xlog_recover_cancel(
5796         struct xlog     *log)
5797 {
5798         if (log->l_flags & XLOG_RECOVERY_NEEDED)
5799                 xlog_recover_cancel_intents(log);
5800 }
5801 
5802 #if defined(DEBUG)
5803 /*
5804  * Read all of the agf and agi counters and check that they
5805  * are consistent with the superblock counters.
5806  */
5807 STATIC void
5808 xlog_recover_check_summary(
5809         struct xlog     *log)
5810 {
5811         xfs_mount_t     *mp;
5812         xfs_agf_t       *agfp;
5813         xfs_buf_t       *agfbp;
5814         xfs_buf_t       *agibp;
5815         xfs_agnumber_t  agno;
5816         uint64_t        freeblks;
5817         uint64_t        itotal;
5818         uint64_t        ifree;
5819         int             error;
5820 
5821         mp = log->l_mp;
5822 
5823         freeblks = 0LL;
5824         itotal = 0LL;
5825         ifree = 0LL;
5826         for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
5827                 error = xfs_read_agf(mp, NULL, agno, 0, &agfbp);
5828                 if (error) {
5829                         xfs_alert(mp, "%s agf read failed agno %d error %d",
5830                                                 __func__, agno, error);
5831                 } else {
5832                         agfp = XFS_BUF_TO_AGF(agfbp);
5833                         freeblks += be32_to_cpu(agfp->agf_freeblks) +
5834                                     be32_to_cpu(agfp->agf_flcount);
5835                         xfs_buf_relse(agfbp);
5836                 }
5837 
5838                 error = xfs_read_agi(mp, NULL, agno, &agibp);
5839                 if (error) {
5840                         xfs_alert(mp, "%s agi read failed agno %d error %d",
5841                                                 __func__, agno, error);
5842                 } else {
5843                         struct xfs_agi  *agi = XFS_BUF_TO_AGI(agibp);
5844 
5845                         itotal += be32_to_cpu(agi->agi_count);
5846                         ifree += be32_to_cpu(agi->agi_freecount);
5847                         xfs_buf_relse(agibp);
5848                 }
5849         }
5850 }
5851 #endif /* DEBUG */

/* [<][>][^][v][top][bottom][index][help] */