root/fs/jbd2/journal.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. __jbd2_debug
  2. jbd2_verify_csum_type
  3. jbd2_superblock_csum
  4. commit_timeout
  5. kjournald2
  6. jbd2_journal_start_thread
  7. journal_kill_thread
  8. jbd2_journal_write_metadata_buffer
  9. __jbd2_log_start_commit
  10. jbd2_log_start_commit
  11. __jbd2_journal_force_commit
  12. jbd2_journal_force_commit_nested
  13. jbd2_journal_force_commit
  14. jbd2_journal_start_commit
  15. jbd2_trans_will_send_data_barrier
  16. jbd2_log_wait_commit
  17. jbd2_transaction_committed
  18. jbd2_complete_transaction
  19. jbd2_journal_next_log_block
  20. jbd2_journal_bmap
  21. jbd2_journal_get_descriptor_buffer
  22. jbd2_descriptor_block_csum_set
  23. jbd2_journal_get_log_tail
  24. __jbd2_update_log_tail
  25. jbd2_update_log_tail
  26. jbd2_seq_info_start
  27. jbd2_seq_info_next
  28. jbd2_seq_info_show
  29. jbd2_seq_info_stop
  30. jbd2_seq_info_open
  31. jbd2_seq_info_release
  32. jbd2_stats_proc_init
  33. jbd2_stats_proc_exit
  34. journal_init_common
  35. jbd2_journal_init_dev
  36. jbd2_journal_init_inode
  37. journal_fail_superblock
  38. journal_reset
  39. jbd2_write_superblock
  40. jbd2_journal_update_sb_log_tail
  41. jbd2_mark_journal_empty
  42. jbd2_journal_update_sb_errno
  43. journal_get_superblock
  44. load_superblock
  45. jbd2_journal_load
  46. jbd2_journal_destroy
  47. jbd2_journal_check_used_features
  48. jbd2_journal_check_available_features
  49. jbd2_journal_set_features
  50. jbd2_journal_clear_features
  51. jbd2_journal_flush
  52. jbd2_journal_wipe
  53. __jbd2_journal_abort_hard
  54. __journal_abort_soft
  55. jbd2_journal_abort
  56. jbd2_journal_errno
  57. jbd2_journal_clear_err
  58. jbd2_journal_ack_err
  59. jbd2_journal_blocks_per_page
  60. journal_tag_bytes
  61. jbd2_journal_destroy_slabs
  62. jbd2_journal_create_slab
  63. get_slab
  64. jbd2_alloc
  65. jbd2_free
  66. jbd2_journal_init_journal_head_cache
  67. jbd2_journal_destroy_journal_head_cache
  68. journal_alloc_journal_head
  69. journal_free_journal_head
  70. jbd2_journal_add_journal_head
  71. jbd2_journal_grab_journal_head
  72. __journal_remove_journal_head
  73. jbd2_journal_put_journal_head
  74. jbd2_journal_init_jbd_inode
  75. jbd2_journal_release_jbd_inode
  76. jbd2_create_jbd_stats_proc_entry
  77. jbd2_remove_jbd_stats_proc_entry
  78. jbd2_journal_init_inode_cache
  79. jbd2_journal_init_handle_cache
  80. jbd2_journal_destroy_inode_cache
  81. jbd2_journal_destroy_handle_cache
  82. journal_init_caches
  83. jbd2_journal_destroy_caches
  84. journal_init
  85. journal_exit

   1 // SPDX-License-Identifier: GPL-2.0+
   2 /*
   3  * linux/fs/jbd2/journal.c
   4  *
   5  * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
   6  *
   7  * Copyright 1998 Red Hat corp --- All Rights Reserved
   8  *
   9  * Generic filesystem journal-writing code; part of the ext2fs
  10  * journaling system.
  11  *
  12  * This file manages journals: areas of disk reserved for logging
  13  * transactional updates.  This includes the kernel journaling thread
  14  * which is responsible for scheduling updates to the log.
  15  *
  16  * We do not actually manage the physical storage of the journal in this
  17  * file: that is left to a per-journal policy function, which allows us
  18  * to store the journal within a filesystem-specified area for ext2
  19  * journaling (ext2 can use a reserved inode for storing the log).
  20  */
  21 
  22 #include <linux/module.h>
  23 #include <linux/time.h>
  24 #include <linux/fs.h>
  25 #include <linux/jbd2.h>
  26 #include <linux/errno.h>
  27 #include <linux/slab.h>
  28 #include <linux/init.h>
  29 #include <linux/mm.h>
  30 #include <linux/freezer.h>
  31 #include <linux/pagemap.h>
  32 #include <linux/kthread.h>
  33 #include <linux/poison.h>
  34 #include <linux/proc_fs.h>
  35 #include <linux/seq_file.h>
  36 #include <linux/math64.h>
  37 #include <linux/hash.h>
  38 #include <linux/log2.h>
  39 #include <linux/vmalloc.h>
  40 #include <linux/backing-dev.h>
  41 #include <linux/bitops.h>
  42 #include <linux/ratelimit.h>
  43 #include <linux/sched/mm.h>
  44 
  45 #define CREATE_TRACE_POINTS
  46 #include <trace/events/jbd2.h>
  47 
  48 #include <linux/uaccess.h>
  49 #include <asm/page.h>
  50 
  51 #ifdef CONFIG_JBD2_DEBUG
  52 ushort jbd2_journal_enable_debug __read_mostly;
  53 EXPORT_SYMBOL(jbd2_journal_enable_debug);
  54 
  55 module_param_named(jbd2_debug, jbd2_journal_enable_debug, ushort, 0644);
  56 MODULE_PARM_DESC(jbd2_debug, "Debugging level for jbd2");
  57 #endif
  58 
  59 EXPORT_SYMBOL(jbd2_journal_extend);
  60 EXPORT_SYMBOL(jbd2_journal_stop);
  61 EXPORT_SYMBOL(jbd2_journal_lock_updates);
  62 EXPORT_SYMBOL(jbd2_journal_unlock_updates);
  63 EXPORT_SYMBOL(jbd2_journal_get_write_access);
  64 EXPORT_SYMBOL(jbd2_journal_get_create_access);
  65 EXPORT_SYMBOL(jbd2_journal_get_undo_access);
  66 EXPORT_SYMBOL(jbd2_journal_set_triggers);
  67 EXPORT_SYMBOL(jbd2_journal_dirty_metadata);
  68 EXPORT_SYMBOL(jbd2_journal_forget);
  69 EXPORT_SYMBOL(jbd2_journal_flush);
  70 EXPORT_SYMBOL(jbd2_journal_revoke);
  71 
  72 EXPORT_SYMBOL(jbd2_journal_init_dev);
  73 EXPORT_SYMBOL(jbd2_journal_init_inode);
  74 EXPORT_SYMBOL(jbd2_journal_check_used_features);
  75 EXPORT_SYMBOL(jbd2_journal_check_available_features);
  76 EXPORT_SYMBOL(jbd2_journal_set_features);
  77 EXPORT_SYMBOL(jbd2_journal_load);
  78 EXPORT_SYMBOL(jbd2_journal_destroy);
  79 EXPORT_SYMBOL(jbd2_journal_abort);
  80 EXPORT_SYMBOL(jbd2_journal_errno);
  81 EXPORT_SYMBOL(jbd2_journal_ack_err);
  82 EXPORT_SYMBOL(jbd2_journal_clear_err);
  83 EXPORT_SYMBOL(jbd2_log_wait_commit);
  84 EXPORT_SYMBOL(jbd2_log_start_commit);
  85 EXPORT_SYMBOL(jbd2_journal_start_commit);
  86 EXPORT_SYMBOL(jbd2_journal_force_commit_nested);
  87 EXPORT_SYMBOL(jbd2_journal_wipe);
  88 EXPORT_SYMBOL(jbd2_journal_blocks_per_page);
  89 EXPORT_SYMBOL(jbd2_journal_invalidatepage);
  90 EXPORT_SYMBOL(jbd2_journal_try_to_free_buffers);
  91 EXPORT_SYMBOL(jbd2_journal_force_commit);
  92 EXPORT_SYMBOL(jbd2_journal_inode_ranged_write);
  93 EXPORT_SYMBOL(jbd2_journal_inode_ranged_wait);
  94 EXPORT_SYMBOL(jbd2_journal_init_jbd_inode);
  95 EXPORT_SYMBOL(jbd2_journal_release_jbd_inode);
  96 EXPORT_SYMBOL(jbd2_journal_begin_ordered_truncate);
  97 EXPORT_SYMBOL(jbd2_inode_cache);
  98 
  99 static void __journal_abort_soft (journal_t *journal, int errno);
 100 static int jbd2_journal_create_slab(size_t slab_size);
 101 
 102 #ifdef CONFIG_JBD2_DEBUG
 103 void __jbd2_debug(int level, const char *file, const char *func,
 104                   unsigned int line, const char *fmt, ...)
 105 {
 106         struct va_format vaf;
 107         va_list args;
 108 
 109         if (level > jbd2_journal_enable_debug)
 110                 return;
 111         va_start(args, fmt);
 112         vaf.fmt = fmt;
 113         vaf.va = &args;
 114         printk(KERN_DEBUG "%s: (%s, %u): %pV", file, func, line, &vaf);
 115         va_end(args);
 116 }
 117 EXPORT_SYMBOL(__jbd2_debug);
 118 #endif
 119 
 120 /* Checksumming functions */
 121 static int jbd2_verify_csum_type(journal_t *j, journal_superblock_t *sb)
 122 {
 123         if (!jbd2_journal_has_csum_v2or3_feature(j))
 124                 return 1;
 125 
 126         return sb->s_checksum_type == JBD2_CRC32C_CHKSUM;
 127 }
 128 
 129 static __be32 jbd2_superblock_csum(journal_t *j, journal_superblock_t *sb)
 130 {
 131         __u32 csum;
 132         __be32 old_csum;
 133 
 134         old_csum = sb->s_checksum;
 135         sb->s_checksum = 0;
 136         csum = jbd2_chksum(j, ~0, (char *)sb, sizeof(journal_superblock_t));
 137         sb->s_checksum = old_csum;
 138 
 139         return cpu_to_be32(csum);
 140 }
 141 
 142 /*
 143  * Helper function used to manage commit timeouts
 144  */
 145 
 146 static void commit_timeout(struct timer_list *t)
 147 {
 148         journal_t *journal = from_timer(journal, t, j_commit_timer);
 149 
 150         wake_up_process(journal->j_task);
 151 }
 152 
 153 /*
 154  * kjournald2: The main thread function used to manage a logging device
 155  * journal.
 156  *
 157  * This kernel thread is responsible for two things:
 158  *
 159  * 1) COMMIT:  Every so often we need to commit the current state of the
 160  *    filesystem to disk.  The journal thread is responsible for writing
 161  *    all of the metadata buffers to disk.
 162  *
 163  * 2) CHECKPOINT: We cannot reuse a used section of the log file until all
 164  *    of the data in that part of the log has been rewritten elsewhere on
 165  *    the disk.  Flushing these old buffers to reclaim space in the log is
 166  *    known as checkpointing, and this thread is responsible for that job.
 167  */
 168 
 169 static int kjournald2(void *arg)
 170 {
 171         journal_t *journal = arg;
 172         transaction_t *transaction;
 173 
 174         /*
 175          * Set up an interval timer which can be used to trigger a commit wakeup
 176          * after the commit interval expires
 177          */
 178         timer_setup(&journal->j_commit_timer, commit_timeout, 0);
 179 
 180         set_freezable();
 181 
 182         /* Record that the journal thread is running */
 183         journal->j_task = current;
 184         wake_up(&journal->j_wait_done_commit);
 185 
 186         /*
 187          * Make sure that no allocations from this kernel thread will ever
 188          * recurse to the fs layer because we are responsible for the
 189          * transaction commit and any fs involvement might get stuck waiting for
 190          * the trasn. commit.
 191          */
 192         memalloc_nofs_save();
 193 
 194         /*
 195          * And now, wait forever for commit wakeup events.
 196          */
 197         write_lock(&journal->j_state_lock);
 198 
 199 loop:
 200         if (journal->j_flags & JBD2_UNMOUNT)
 201                 goto end_loop;
 202 
 203         jbd_debug(1, "commit_sequence=%u, commit_request=%u\n",
 204                 journal->j_commit_sequence, journal->j_commit_request);
 205 
 206         if (journal->j_commit_sequence != journal->j_commit_request) {
 207                 jbd_debug(1, "OK, requests differ\n");
 208                 write_unlock(&journal->j_state_lock);
 209                 del_timer_sync(&journal->j_commit_timer);
 210                 jbd2_journal_commit_transaction(journal);
 211                 write_lock(&journal->j_state_lock);
 212                 goto loop;
 213         }
 214 
 215         wake_up(&journal->j_wait_done_commit);
 216         if (freezing(current)) {
 217                 /*
 218                  * The simpler the better. Flushing journal isn't a
 219                  * good idea, because that depends on threads that may
 220                  * be already stopped.
 221                  */
 222                 jbd_debug(1, "Now suspending kjournald2\n");
 223                 write_unlock(&journal->j_state_lock);
 224                 try_to_freeze();
 225                 write_lock(&journal->j_state_lock);
 226         } else {
 227                 /*
 228                  * We assume on resume that commits are already there,
 229                  * so we don't sleep
 230                  */
 231                 DEFINE_WAIT(wait);
 232                 int should_sleep = 1;
 233 
 234                 prepare_to_wait(&journal->j_wait_commit, &wait,
 235                                 TASK_INTERRUPTIBLE);
 236                 if (journal->j_commit_sequence != journal->j_commit_request)
 237                         should_sleep = 0;
 238                 transaction = journal->j_running_transaction;
 239                 if (transaction && time_after_eq(jiffies,
 240                                                 transaction->t_expires))
 241                         should_sleep = 0;
 242                 if (journal->j_flags & JBD2_UNMOUNT)
 243                         should_sleep = 0;
 244                 if (should_sleep) {
 245                         write_unlock(&journal->j_state_lock);
 246                         schedule();
 247                         write_lock(&journal->j_state_lock);
 248                 }
 249                 finish_wait(&journal->j_wait_commit, &wait);
 250         }
 251 
 252         jbd_debug(1, "kjournald2 wakes\n");
 253 
 254         /*
 255          * Were we woken up by a commit wakeup event?
 256          */
 257         transaction = journal->j_running_transaction;
 258         if (transaction && time_after_eq(jiffies, transaction->t_expires)) {
 259                 journal->j_commit_request = transaction->t_tid;
 260                 jbd_debug(1, "woke because of timeout\n");
 261         }
 262         goto loop;
 263 
 264 end_loop:
 265         del_timer_sync(&journal->j_commit_timer);
 266         journal->j_task = NULL;
 267         wake_up(&journal->j_wait_done_commit);
 268         jbd_debug(1, "Journal thread exiting.\n");
 269         write_unlock(&journal->j_state_lock);
 270         return 0;
 271 }
 272 
 273 static int jbd2_journal_start_thread(journal_t *journal)
 274 {
 275         struct task_struct *t;
 276 
 277         t = kthread_run(kjournald2, journal, "jbd2/%s",
 278                         journal->j_devname);
 279         if (IS_ERR(t))
 280                 return PTR_ERR(t);
 281 
 282         wait_event(journal->j_wait_done_commit, journal->j_task != NULL);
 283         return 0;
 284 }
 285 
 286 static void journal_kill_thread(journal_t *journal)
 287 {
 288         write_lock(&journal->j_state_lock);
 289         journal->j_flags |= JBD2_UNMOUNT;
 290 
 291         while (journal->j_task) {
 292                 write_unlock(&journal->j_state_lock);
 293                 wake_up(&journal->j_wait_commit);
 294                 wait_event(journal->j_wait_done_commit, journal->j_task == NULL);
 295                 write_lock(&journal->j_state_lock);
 296         }
 297         write_unlock(&journal->j_state_lock);
 298 }
 299 
 300 /*
 301  * jbd2_journal_write_metadata_buffer: write a metadata buffer to the journal.
 302  *
 303  * Writes a metadata buffer to a given disk block.  The actual IO is not
 304  * performed but a new buffer_head is constructed which labels the data
 305  * to be written with the correct destination disk block.
 306  *
 307  * Any magic-number escaping which needs to be done will cause a
 308  * copy-out here.  If the buffer happens to start with the
 309  * JBD2_MAGIC_NUMBER, then we can't write it to the log directly: the
 310  * magic number is only written to the log for descripter blocks.  In
 311  * this case, we copy the data and replace the first word with 0, and we
 312  * return a result code which indicates that this buffer needs to be
 313  * marked as an escaped buffer in the corresponding log descriptor
 314  * block.  The missing word can then be restored when the block is read
 315  * during recovery.
 316  *
 317  * If the source buffer has already been modified by a new transaction
 318  * since we took the last commit snapshot, we use the frozen copy of
 319  * that data for IO. If we end up using the existing buffer_head's data
 320  * for the write, then we have to make sure nobody modifies it while the
 321  * IO is in progress. do_get_write_access() handles this.
 322  *
 323  * The function returns a pointer to the buffer_head to be used for IO.
 324  *
 325  *
 326  * Return value:
 327  *  <0: Error
 328  * >=0: Finished OK
 329  *
 330  * On success:
 331  * Bit 0 set == escape performed on the data
 332  * Bit 1 set == buffer copy-out performed (kfree the data after IO)
 333  */
 334 
 335 int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
 336                                   struct journal_head  *jh_in,
 337                                   struct buffer_head **bh_out,
 338                                   sector_t blocknr)
 339 {
 340         int need_copy_out = 0;
 341         int done_copy_out = 0;
 342         int do_escape = 0;
 343         char *mapped_data;
 344         struct buffer_head *new_bh;
 345         struct page *new_page;
 346         unsigned int new_offset;
 347         struct buffer_head *bh_in = jh2bh(jh_in);
 348         journal_t *journal = transaction->t_journal;
 349 
 350         /*
 351          * The buffer really shouldn't be locked: only the current committing
 352          * transaction is allowed to write it, so nobody else is allowed
 353          * to do any IO.
 354          *
 355          * akpm: except if we're journalling data, and write() output is
 356          * also part of a shared mapping, and another thread has
 357          * decided to launch a writepage() against this buffer.
 358          */
 359         J_ASSERT_BH(bh_in, buffer_jbddirty(bh_in));
 360 
 361         new_bh = alloc_buffer_head(GFP_NOFS|__GFP_NOFAIL);
 362 
 363         /* keep subsequent assertions sane */
 364         atomic_set(&new_bh->b_count, 1);
 365 
 366         jbd_lock_bh_state(bh_in);
 367 repeat:
 368         /*
 369          * If a new transaction has already done a buffer copy-out, then
 370          * we use that version of the data for the commit.
 371          */
 372         if (jh_in->b_frozen_data) {
 373                 done_copy_out = 1;
 374                 new_page = virt_to_page(jh_in->b_frozen_data);
 375                 new_offset = offset_in_page(jh_in->b_frozen_data);
 376         } else {
 377                 new_page = jh2bh(jh_in)->b_page;
 378                 new_offset = offset_in_page(jh2bh(jh_in)->b_data);
 379         }
 380 
 381         mapped_data = kmap_atomic(new_page);
 382         /*
 383          * Fire data frozen trigger if data already wasn't frozen.  Do this
 384          * before checking for escaping, as the trigger may modify the magic
 385          * offset.  If a copy-out happens afterwards, it will have the correct
 386          * data in the buffer.
 387          */
 388         if (!done_copy_out)
 389                 jbd2_buffer_frozen_trigger(jh_in, mapped_data + new_offset,
 390                                            jh_in->b_triggers);
 391 
 392         /*
 393          * Check for escaping
 394          */
 395         if (*((__be32 *)(mapped_data + new_offset)) ==
 396                                 cpu_to_be32(JBD2_MAGIC_NUMBER)) {
 397                 need_copy_out = 1;
 398                 do_escape = 1;
 399         }
 400         kunmap_atomic(mapped_data);
 401 
 402         /*
 403          * Do we need to do a data copy?
 404          */
 405         if (need_copy_out && !done_copy_out) {
 406                 char *tmp;
 407 
 408                 jbd_unlock_bh_state(bh_in);
 409                 tmp = jbd2_alloc(bh_in->b_size, GFP_NOFS);
 410                 if (!tmp) {
 411                         brelse(new_bh);
 412                         return -ENOMEM;
 413                 }
 414                 jbd_lock_bh_state(bh_in);
 415                 if (jh_in->b_frozen_data) {
 416                         jbd2_free(tmp, bh_in->b_size);
 417                         goto repeat;
 418                 }
 419 
 420                 jh_in->b_frozen_data = tmp;
 421                 mapped_data = kmap_atomic(new_page);
 422                 memcpy(tmp, mapped_data + new_offset, bh_in->b_size);
 423                 kunmap_atomic(mapped_data);
 424 
 425                 new_page = virt_to_page(tmp);
 426                 new_offset = offset_in_page(tmp);
 427                 done_copy_out = 1;
 428 
 429                 /*
 430                  * This isn't strictly necessary, as we're using frozen
 431                  * data for the escaping, but it keeps consistency with
 432                  * b_frozen_data usage.
 433                  */
 434                 jh_in->b_frozen_triggers = jh_in->b_triggers;
 435         }
 436 
 437         /*
 438          * Did we need to do an escaping?  Now we've done all the
 439          * copying, we can finally do so.
 440          */
 441         if (do_escape) {
 442                 mapped_data = kmap_atomic(new_page);
 443                 *((unsigned int *)(mapped_data + new_offset)) = 0;
 444                 kunmap_atomic(mapped_data);
 445         }
 446 
 447         set_bh_page(new_bh, new_page, new_offset);
 448         new_bh->b_size = bh_in->b_size;
 449         new_bh->b_bdev = journal->j_dev;
 450         new_bh->b_blocknr = blocknr;
 451         new_bh->b_private = bh_in;
 452         set_buffer_mapped(new_bh);
 453         set_buffer_dirty(new_bh);
 454 
 455         *bh_out = new_bh;
 456 
 457         /*
 458          * The to-be-written buffer needs to get moved to the io queue,
 459          * and the original buffer whose contents we are shadowing or
 460          * copying is moved to the transaction's shadow queue.
 461          */
 462         JBUFFER_TRACE(jh_in, "file as BJ_Shadow");
 463         spin_lock(&journal->j_list_lock);
 464         __jbd2_journal_file_buffer(jh_in, transaction, BJ_Shadow);
 465         spin_unlock(&journal->j_list_lock);
 466         set_buffer_shadow(bh_in);
 467         jbd_unlock_bh_state(bh_in);
 468 
 469         return do_escape | (done_copy_out << 1);
 470 }
 471 
 472 /*
 473  * Allocation code for the journal file.  Manage the space left in the
 474  * journal, so that we can begin checkpointing when appropriate.
 475  */
 476 
 477 /*
 478  * Called with j_state_lock locked for writing.
 479  * Returns true if a transaction commit was started.
 480  */
 481 int __jbd2_log_start_commit(journal_t *journal, tid_t target)
 482 {
 483         /* Return if the txn has already requested to be committed */
 484         if (journal->j_commit_request == target)
 485                 return 0;
 486 
 487         /*
 488          * The only transaction we can possibly wait upon is the
 489          * currently running transaction (if it exists).  Otherwise,
 490          * the target tid must be an old one.
 491          */
 492         if (journal->j_running_transaction &&
 493             journal->j_running_transaction->t_tid == target) {
 494                 /*
 495                  * We want a new commit: OK, mark the request and wakeup the
 496                  * commit thread.  We do _not_ do the commit ourselves.
 497                  */
 498 
 499                 journal->j_commit_request = target;
 500                 jbd_debug(1, "JBD2: requesting commit %u/%u\n",
 501                           journal->j_commit_request,
 502                           journal->j_commit_sequence);
 503                 journal->j_running_transaction->t_requested = jiffies;
 504                 wake_up(&journal->j_wait_commit);
 505                 return 1;
 506         } else if (!tid_geq(journal->j_commit_request, target))
 507                 /* This should never happen, but if it does, preserve
 508                    the evidence before kjournald goes into a loop and
 509                    increments j_commit_sequence beyond all recognition. */
 510                 WARN_ONCE(1, "JBD2: bad log_start_commit: %u %u %u %u\n",
 511                           journal->j_commit_request,
 512                           journal->j_commit_sequence,
 513                           target, journal->j_running_transaction ?
 514                           journal->j_running_transaction->t_tid : 0);
 515         return 0;
 516 }
 517 
 518 int jbd2_log_start_commit(journal_t *journal, tid_t tid)
 519 {
 520         int ret;
 521 
 522         write_lock(&journal->j_state_lock);
 523         ret = __jbd2_log_start_commit(journal, tid);
 524         write_unlock(&journal->j_state_lock);
 525         return ret;
 526 }
 527 
 528 /*
 529  * Force and wait any uncommitted transactions.  We can only force the running
 530  * transaction if we don't have an active handle, otherwise, we will deadlock.
 531  * Returns: <0 in case of error,
 532  *           0 if nothing to commit,
 533  *           1 if transaction was successfully committed.
 534  */
 535 static int __jbd2_journal_force_commit(journal_t *journal)
 536 {
 537         transaction_t *transaction = NULL;
 538         tid_t tid;
 539         int need_to_start = 0, ret = 0;
 540 
 541         read_lock(&journal->j_state_lock);
 542         if (journal->j_running_transaction && !current->journal_info) {
 543                 transaction = journal->j_running_transaction;
 544                 if (!tid_geq(journal->j_commit_request, transaction->t_tid))
 545                         need_to_start = 1;
 546         } else if (journal->j_committing_transaction)
 547                 transaction = journal->j_committing_transaction;
 548 
 549         if (!transaction) {
 550                 /* Nothing to commit */
 551                 read_unlock(&journal->j_state_lock);
 552                 return 0;
 553         }
 554         tid = transaction->t_tid;
 555         read_unlock(&journal->j_state_lock);
 556         if (need_to_start)
 557                 jbd2_log_start_commit(journal, tid);
 558         ret = jbd2_log_wait_commit(journal, tid);
 559         if (!ret)
 560                 ret = 1;
 561 
 562         return ret;
 563 }
 564 
 565 /**
 566  * Force and wait upon a commit if the calling process is not within
 567  * transaction.  This is used for forcing out undo-protected data which contains
 568  * bitmaps, when the fs is running out of space.
 569  *
 570  * @journal: journal to force
 571  * Returns true if progress was made.
 572  */
 573 int jbd2_journal_force_commit_nested(journal_t *journal)
 574 {
 575         int ret;
 576 
 577         ret = __jbd2_journal_force_commit(journal);
 578         return ret > 0;
 579 }
 580 
 581 /**
 582  * int journal_force_commit() - force any uncommitted transactions
 583  * @journal: journal to force
 584  *
 585  * Caller want unconditional commit. We can only force the running transaction
 586  * if we don't have an active handle, otherwise, we will deadlock.
 587  */
 588 int jbd2_journal_force_commit(journal_t *journal)
 589 {
 590         int ret;
 591 
 592         J_ASSERT(!current->journal_info);
 593         ret = __jbd2_journal_force_commit(journal);
 594         if (ret > 0)
 595                 ret = 0;
 596         return ret;
 597 }
 598 
 599 /*
 600  * Start a commit of the current running transaction (if any).  Returns true
 601  * if a transaction is going to be committed (or is currently already
 602  * committing), and fills its tid in at *ptid
 603  */
 604 int jbd2_journal_start_commit(journal_t *journal, tid_t *ptid)
 605 {
 606         int ret = 0;
 607 
 608         write_lock(&journal->j_state_lock);
 609         if (journal->j_running_transaction) {
 610                 tid_t tid = journal->j_running_transaction->t_tid;
 611 
 612                 __jbd2_log_start_commit(journal, tid);
 613                 /* There's a running transaction and we've just made sure
 614                  * it's commit has been scheduled. */
 615                 if (ptid)
 616                         *ptid = tid;
 617                 ret = 1;
 618         } else if (journal->j_committing_transaction) {
 619                 /*
 620                  * If commit has been started, then we have to wait for
 621                  * completion of that transaction.
 622                  */
 623                 if (ptid)
 624                         *ptid = journal->j_committing_transaction->t_tid;
 625                 ret = 1;
 626         }
 627         write_unlock(&journal->j_state_lock);
 628         return ret;
 629 }
 630 
 631 /*
 632  * Return 1 if a given transaction has not yet sent barrier request
 633  * connected with a transaction commit. If 0 is returned, transaction
 634  * may or may not have sent the barrier. Used to avoid sending barrier
 635  * twice in common cases.
 636  */
 637 int jbd2_trans_will_send_data_barrier(journal_t *journal, tid_t tid)
 638 {
 639         int ret = 0;
 640         transaction_t *commit_trans;
 641 
 642         if (!(journal->j_flags & JBD2_BARRIER))
 643                 return 0;
 644         read_lock(&journal->j_state_lock);
 645         /* Transaction already committed? */
 646         if (tid_geq(journal->j_commit_sequence, tid))
 647                 goto out;
 648         commit_trans = journal->j_committing_transaction;
 649         if (!commit_trans || commit_trans->t_tid != tid) {
 650                 ret = 1;
 651                 goto out;
 652         }
 653         /*
 654          * Transaction is being committed and we already proceeded to
 655          * submitting a flush to fs partition?
 656          */
 657         if (journal->j_fs_dev != journal->j_dev) {
 658                 if (!commit_trans->t_need_data_flush ||
 659                     commit_trans->t_state >= T_COMMIT_DFLUSH)
 660                         goto out;
 661         } else {
 662                 if (commit_trans->t_state >= T_COMMIT_JFLUSH)
 663                         goto out;
 664         }
 665         ret = 1;
 666 out:
 667         read_unlock(&journal->j_state_lock);
 668         return ret;
 669 }
 670 EXPORT_SYMBOL(jbd2_trans_will_send_data_barrier);
 671 
 672 /*
 673  * Wait for a specified commit to complete.
 674  * The caller may not hold the journal lock.
 675  */
 676 int jbd2_log_wait_commit(journal_t *journal, tid_t tid)
 677 {
 678         int err = 0;
 679 
 680         read_lock(&journal->j_state_lock);
 681 #ifdef CONFIG_PROVE_LOCKING
 682         /*
 683          * Some callers make sure transaction is already committing and in that
 684          * case we cannot block on open handles anymore. So don't warn in that
 685          * case.
 686          */
 687         if (tid_gt(tid, journal->j_commit_sequence) &&
 688             (!journal->j_committing_transaction ||
 689              journal->j_committing_transaction->t_tid != tid)) {
 690                 read_unlock(&journal->j_state_lock);
 691                 jbd2_might_wait_for_commit(journal);
 692                 read_lock(&journal->j_state_lock);
 693         }
 694 #endif
 695 #ifdef CONFIG_JBD2_DEBUG
 696         if (!tid_geq(journal->j_commit_request, tid)) {
 697                 printk(KERN_ERR
 698                        "%s: error: j_commit_request=%u, tid=%u\n",
 699                        __func__, journal->j_commit_request, tid);
 700         }
 701 #endif
 702         while (tid_gt(tid, journal->j_commit_sequence)) {
 703                 jbd_debug(1, "JBD2: want %u, j_commit_sequence=%u\n",
 704                                   tid, journal->j_commit_sequence);
 705                 read_unlock(&journal->j_state_lock);
 706                 wake_up(&journal->j_wait_commit);
 707                 wait_event(journal->j_wait_done_commit,
 708                                 !tid_gt(tid, journal->j_commit_sequence));
 709                 read_lock(&journal->j_state_lock);
 710         }
 711         read_unlock(&journal->j_state_lock);
 712 
 713         if (unlikely(is_journal_aborted(journal)))
 714                 err = -EIO;
 715         return err;
 716 }
 717 
 718 /* Return 1 when transaction with given tid has already committed. */
 719 int jbd2_transaction_committed(journal_t *journal, tid_t tid)
 720 {
 721         int ret = 1;
 722 
 723         read_lock(&journal->j_state_lock);
 724         if (journal->j_running_transaction &&
 725             journal->j_running_transaction->t_tid == tid)
 726                 ret = 0;
 727         if (journal->j_committing_transaction &&
 728             journal->j_committing_transaction->t_tid == tid)
 729                 ret = 0;
 730         read_unlock(&journal->j_state_lock);
 731         return ret;
 732 }
 733 EXPORT_SYMBOL(jbd2_transaction_committed);
 734 
 735 /*
 736  * When this function returns the transaction corresponding to tid
 737  * will be completed.  If the transaction has currently running, start
 738  * committing that transaction before waiting for it to complete.  If
 739  * the transaction id is stale, it is by definition already completed,
 740  * so just return SUCCESS.
 741  */
 742 int jbd2_complete_transaction(journal_t *journal, tid_t tid)
 743 {
 744         int     need_to_wait = 1;
 745 
 746         read_lock(&journal->j_state_lock);
 747         if (journal->j_running_transaction &&
 748             journal->j_running_transaction->t_tid == tid) {
 749                 if (journal->j_commit_request != tid) {
 750                         /* transaction not yet started, so request it */
 751                         read_unlock(&journal->j_state_lock);
 752                         jbd2_log_start_commit(journal, tid);
 753                         goto wait_commit;
 754                 }
 755         } else if (!(journal->j_committing_transaction &&
 756                      journal->j_committing_transaction->t_tid == tid))
 757                 need_to_wait = 0;
 758         read_unlock(&journal->j_state_lock);
 759         if (!need_to_wait)
 760                 return 0;
 761 wait_commit:
 762         return jbd2_log_wait_commit(journal, tid);
 763 }
 764 EXPORT_SYMBOL(jbd2_complete_transaction);
 765 
 766 /*
 767  * Log buffer allocation routines:
 768  */
 769 
 770 int jbd2_journal_next_log_block(journal_t *journal, unsigned long long *retp)
 771 {
 772         unsigned long blocknr;
 773 
 774         write_lock(&journal->j_state_lock);
 775         J_ASSERT(journal->j_free > 1);
 776 
 777         blocknr = journal->j_head;
 778         journal->j_head++;
 779         journal->j_free--;
 780         if (journal->j_head == journal->j_last)
 781                 journal->j_head = journal->j_first;
 782         write_unlock(&journal->j_state_lock);
 783         return jbd2_journal_bmap(journal, blocknr, retp);
 784 }
 785 
 786 /*
 787  * Conversion of logical to physical block numbers for the journal
 788  *
 789  * On external journals the journal blocks are identity-mapped, so
 790  * this is a no-op.  If needed, we can use j_blk_offset - everything is
 791  * ready.
 792  */
 793 int jbd2_journal_bmap(journal_t *journal, unsigned long blocknr,
 794                  unsigned long long *retp)
 795 {
 796         int err = 0;
 797         unsigned long long ret;
 798 
 799         if (journal->j_inode) {
 800                 ret = bmap(journal->j_inode, blocknr);
 801                 if (ret)
 802                         *retp = ret;
 803                 else {
 804                         printk(KERN_ALERT "%s: journal block not found "
 805                                         "at offset %lu on %s\n",
 806                                __func__, blocknr, journal->j_devname);
 807                         err = -EIO;
 808                         __journal_abort_soft(journal, err);
 809                 }
 810         } else {
 811                 *retp = blocknr; /* +journal->j_blk_offset */
 812         }
 813         return err;
 814 }
 815 
 816 /*
 817  * We play buffer_head aliasing tricks to write data/metadata blocks to
 818  * the journal without copying their contents, but for journal
 819  * descriptor blocks we do need to generate bona fide buffers.
 820  *
 821  * After the caller of jbd2_journal_get_descriptor_buffer() has finished modifying
 822  * the buffer's contents they really should run flush_dcache_page(bh->b_page).
 823  * But we don't bother doing that, so there will be coherency problems with
 824  * mmaps of blockdevs which hold live JBD-controlled filesystems.
 825  */
 826 struct buffer_head *
 827 jbd2_journal_get_descriptor_buffer(transaction_t *transaction, int type)
 828 {
 829         journal_t *journal = transaction->t_journal;
 830         struct buffer_head *bh;
 831         unsigned long long blocknr;
 832         journal_header_t *header;
 833         int err;
 834 
 835         err = jbd2_journal_next_log_block(journal, &blocknr);
 836 
 837         if (err)
 838                 return NULL;
 839 
 840         bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
 841         if (!bh)
 842                 return NULL;
 843         lock_buffer(bh);
 844         memset(bh->b_data, 0, journal->j_blocksize);
 845         header = (journal_header_t *)bh->b_data;
 846         header->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER);
 847         header->h_blocktype = cpu_to_be32(type);
 848         header->h_sequence = cpu_to_be32(transaction->t_tid);
 849         set_buffer_uptodate(bh);
 850         unlock_buffer(bh);
 851         BUFFER_TRACE(bh, "return this buffer");
 852         return bh;
 853 }
 854 
 855 void jbd2_descriptor_block_csum_set(journal_t *j, struct buffer_head *bh)
 856 {
 857         struct jbd2_journal_block_tail *tail;
 858         __u32 csum;
 859 
 860         if (!jbd2_journal_has_csum_v2or3(j))
 861                 return;
 862 
 863         tail = (struct jbd2_journal_block_tail *)(bh->b_data + j->j_blocksize -
 864                         sizeof(struct jbd2_journal_block_tail));
 865         tail->t_checksum = 0;
 866         csum = jbd2_chksum(j, j->j_csum_seed, bh->b_data, j->j_blocksize);
 867         tail->t_checksum = cpu_to_be32(csum);
 868 }
 869 
 870 /*
 871  * Return tid of the oldest transaction in the journal and block in the journal
 872  * where the transaction starts.
 873  *
 874  * If the journal is now empty, return which will be the next transaction ID
 875  * we will write and where will that transaction start.
 876  *
 877  * The return value is 0 if journal tail cannot be pushed any further, 1 if
 878  * it can.
 879  */
 880 int jbd2_journal_get_log_tail(journal_t *journal, tid_t *tid,
 881                               unsigned long *block)
 882 {
 883         transaction_t *transaction;
 884         int ret;
 885 
 886         read_lock(&journal->j_state_lock);
 887         spin_lock(&journal->j_list_lock);
 888         transaction = journal->j_checkpoint_transactions;
 889         if (transaction) {
 890                 *tid = transaction->t_tid;
 891                 *block = transaction->t_log_start;
 892         } else if ((transaction = journal->j_committing_transaction) != NULL) {
 893                 *tid = transaction->t_tid;
 894                 *block = transaction->t_log_start;
 895         } else if ((transaction = journal->j_running_transaction) != NULL) {
 896                 *tid = transaction->t_tid;
 897                 *block = journal->j_head;
 898         } else {
 899                 *tid = journal->j_transaction_sequence;
 900                 *block = journal->j_head;
 901         }
 902         ret = tid_gt(*tid, journal->j_tail_sequence);
 903         spin_unlock(&journal->j_list_lock);
 904         read_unlock(&journal->j_state_lock);
 905 
 906         return ret;
 907 }
 908 
 909 /*
 910  * Update information in journal structure and in on disk journal superblock
 911  * about log tail. This function does not check whether information passed in
 912  * really pushes log tail further. It's responsibility of the caller to make
 913  * sure provided log tail information is valid (e.g. by holding
 914  * j_checkpoint_mutex all the time between computing log tail and calling this
 915  * function as is the case with jbd2_cleanup_journal_tail()).
 916  *
 917  * Requires j_checkpoint_mutex
 918  */
 919 int __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block)
 920 {
 921         unsigned long freed;
 922         int ret;
 923 
 924         BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
 925 
 926         /*
 927          * We cannot afford for write to remain in drive's caches since as
 928          * soon as we update j_tail, next transaction can start reusing journal
 929          * space and if we lose sb update during power failure we'd replay
 930          * old transaction with possibly newly overwritten data.
 931          */
 932         ret = jbd2_journal_update_sb_log_tail(journal, tid, block,
 933                                               REQ_SYNC | REQ_FUA);
 934         if (ret)
 935                 goto out;
 936 
 937         write_lock(&journal->j_state_lock);
 938         freed = block - journal->j_tail;
 939         if (block < journal->j_tail)
 940                 freed += journal->j_last - journal->j_first;
 941 
 942         trace_jbd2_update_log_tail(journal, tid, block, freed);
 943         jbd_debug(1,
 944                   "Cleaning journal tail from %u to %u (offset %lu), "
 945                   "freeing %lu\n",
 946                   journal->j_tail_sequence, tid, block, freed);
 947 
 948         journal->j_free += freed;
 949         journal->j_tail_sequence = tid;
 950         journal->j_tail = block;
 951         write_unlock(&journal->j_state_lock);
 952 
 953 out:
 954         return ret;
 955 }
 956 
 957 /*
 958  * This is a variation of __jbd2_update_log_tail which checks for validity of
 959  * provided log tail and locks j_checkpoint_mutex. So it is safe against races
 960  * with other threads updating log tail.
 961  */
 962 void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block)
 963 {
 964         mutex_lock_io(&journal->j_checkpoint_mutex);
 965         if (tid_gt(tid, journal->j_tail_sequence))
 966                 __jbd2_update_log_tail(journal, tid, block);
 967         mutex_unlock(&journal->j_checkpoint_mutex);
 968 }
 969 
 970 struct jbd2_stats_proc_session {
 971         journal_t *journal;
 972         struct transaction_stats_s *stats;
 973         int start;
 974         int max;
 975 };
 976 
 977 static void *jbd2_seq_info_start(struct seq_file *seq, loff_t *pos)
 978 {
 979         return *pos ? NULL : SEQ_START_TOKEN;
 980 }
 981 
 982 static void *jbd2_seq_info_next(struct seq_file *seq, void *v, loff_t *pos)
 983 {
 984         (*pos)++;
 985         return NULL;
 986 }
 987 
 988 static int jbd2_seq_info_show(struct seq_file *seq, void *v)
 989 {
 990         struct jbd2_stats_proc_session *s = seq->private;
 991 
 992         if (v != SEQ_START_TOKEN)
 993                 return 0;
 994         seq_printf(seq, "%lu transactions (%lu requested), "
 995                    "each up to %u blocks\n",
 996                    s->stats->ts_tid, s->stats->ts_requested,
 997                    s->journal->j_max_transaction_buffers);
 998         if (s->stats->ts_tid == 0)
 999                 return 0;
1000         seq_printf(seq, "average: \n  %ums waiting for transaction\n",
1001             jiffies_to_msecs(s->stats->run.rs_wait / s->stats->ts_tid));
1002         seq_printf(seq, "  %ums request delay\n",
1003             (s->stats->ts_requested == 0) ? 0 :
1004             jiffies_to_msecs(s->stats->run.rs_request_delay /
1005                              s->stats->ts_requested));
1006         seq_printf(seq, "  %ums running transaction\n",
1007             jiffies_to_msecs(s->stats->run.rs_running / s->stats->ts_tid));
1008         seq_printf(seq, "  %ums transaction was being locked\n",
1009             jiffies_to_msecs(s->stats->run.rs_locked / s->stats->ts_tid));
1010         seq_printf(seq, "  %ums flushing data (in ordered mode)\n",
1011             jiffies_to_msecs(s->stats->run.rs_flushing / s->stats->ts_tid));
1012         seq_printf(seq, "  %ums logging transaction\n",
1013             jiffies_to_msecs(s->stats->run.rs_logging / s->stats->ts_tid));
1014         seq_printf(seq, "  %lluus average transaction commit time\n",
1015                    div_u64(s->journal->j_average_commit_time, 1000));
1016         seq_printf(seq, "  %lu handles per transaction\n",
1017             s->stats->run.rs_handle_count / s->stats->ts_tid);
1018         seq_printf(seq, "  %lu blocks per transaction\n",
1019             s->stats->run.rs_blocks / s->stats->ts_tid);
1020         seq_printf(seq, "  %lu logged blocks per transaction\n",
1021             s->stats->run.rs_blocks_logged / s->stats->ts_tid);
1022         return 0;
1023 }
1024 
1025 static void jbd2_seq_info_stop(struct seq_file *seq, void *v)
1026 {
1027 }
1028 
1029 static const struct seq_operations jbd2_seq_info_ops = {
1030         .start  = jbd2_seq_info_start,
1031         .next   = jbd2_seq_info_next,
1032         .stop   = jbd2_seq_info_stop,
1033         .show   = jbd2_seq_info_show,
1034 };
1035 
1036 static int jbd2_seq_info_open(struct inode *inode, struct file *file)
1037 {
1038         journal_t *journal = PDE_DATA(inode);
1039         struct jbd2_stats_proc_session *s;
1040         int rc, size;
1041 
1042         s = kmalloc(sizeof(*s), GFP_KERNEL);
1043         if (s == NULL)
1044                 return -ENOMEM;
1045         size = sizeof(struct transaction_stats_s);
1046         s->stats = kmalloc(size, GFP_KERNEL);
1047         if (s->stats == NULL) {
1048                 kfree(s);
1049                 return -ENOMEM;
1050         }
1051         spin_lock(&journal->j_history_lock);
1052         memcpy(s->stats, &journal->j_stats, size);
1053         s->journal = journal;
1054         spin_unlock(&journal->j_history_lock);
1055 
1056         rc = seq_open(file, &jbd2_seq_info_ops);
1057         if (rc == 0) {
1058                 struct seq_file *m = file->private_data;
1059                 m->private = s;
1060         } else {
1061                 kfree(s->stats);
1062                 kfree(s);
1063         }
1064         return rc;
1065 
1066 }
1067 
1068 static int jbd2_seq_info_release(struct inode *inode, struct file *file)
1069 {
1070         struct seq_file *seq = file->private_data;
1071         struct jbd2_stats_proc_session *s = seq->private;
1072         kfree(s->stats);
1073         kfree(s);
1074         return seq_release(inode, file);
1075 }
1076 
1077 static const struct file_operations jbd2_seq_info_fops = {
1078         .owner          = THIS_MODULE,
1079         .open           = jbd2_seq_info_open,
1080         .read           = seq_read,
1081         .llseek         = seq_lseek,
1082         .release        = jbd2_seq_info_release,
1083 };
1084 
1085 static struct proc_dir_entry *proc_jbd2_stats;
1086 
1087 static void jbd2_stats_proc_init(journal_t *journal)
1088 {
1089         journal->j_proc_entry = proc_mkdir(journal->j_devname, proc_jbd2_stats);
1090         if (journal->j_proc_entry) {
1091                 proc_create_data("info", S_IRUGO, journal->j_proc_entry,
1092                                  &jbd2_seq_info_fops, journal);
1093         }
1094 }
1095 
1096 static void jbd2_stats_proc_exit(journal_t *journal)
1097 {
1098         remove_proc_entry("info", journal->j_proc_entry);
1099         remove_proc_entry(journal->j_devname, proc_jbd2_stats);
1100 }
1101 
1102 /*
1103  * Management for journal control blocks: functions to create and
1104  * destroy journal_t structures, and to initialise and read existing
1105  * journal blocks from disk.  */
1106 
1107 /* First: create and setup a journal_t object in memory.  We initialise
1108  * very few fields yet: that has to wait until we have created the
1109  * journal structures from from scratch, or loaded them from disk. */
1110 
1111 static journal_t *journal_init_common(struct block_device *bdev,
1112                         struct block_device *fs_dev,
1113                         unsigned long long start, int len, int blocksize)
1114 {
1115         static struct lock_class_key jbd2_trans_commit_key;
1116         journal_t *journal;
1117         int err;
1118         struct buffer_head *bh;
1119         int n;
1120 
1121         journal = kzalloc(sizeof(*journal), GFP_KERNEL);
1122         if (!journal)
1123                 return NULL;
1124 
1125         init_waitqueue_head(&journal->j_wait_transaction_locked);
1126         init_waitqueue_head(&journal->j_wait_done_commit);
1127         init_waitqueue_head(&journal->j_wait_commit);
1128         init_waitqueue_head(&journal->j_wait_updates);
1129         init_waitqueue_head(&journal->j_wait_reserved);
1130         mutex_init(&journal->j_barrier);
1131         mutex_init(&journal->j_checkpoint_mutex);
1132         spin_lock_init(&journal->j_revoke_lock);
1133         spin_lock_init(&journal->j_list_lock);
1134         rwlock_init(&journal->j_state_lock);
1135 
1136         journal->j_commit_interval = (HZ * JBD2_DEFAULT_MAX_COMMIT_AGE);
1137         journal->j_min_batch_time = 0;
1138         journal->j_max_batch_time = 15000; /* 15ms */
1139         atomic_set(&journal->j_reserved_credits, 0);
1140 
1141         /* The journal is marked for error until we succeed with recovery! */
1142         journal->j_flags = JBD2_ABORT;
1143 
1144         /* Set up a default-sized revoke table for the new mount. */
1145         err = jbd2_journal_init_revoke(journal, JOURNAL_REVOKE_DEFAULT_HASH);
1146         if (err)
1147                 goto err_cleanup;
1148 
1149         spin_lock_init(&journal->j_history_lock);
1150 
1151         lockdep_init_map(&journal->j_trans_commit_map, "jbd2_handle",
1152                          &jbd2_trans_commit_key, 0);
1153 
1154         /* journal descriptor can store up to n blocks -bzzz */
1155         journal->j_blocksize = blocksize;
1156         journal->j_dev = bdev;
1157         journal->j_fs_dev = fs_dev;
1158         journal->j_blk_offset = start;
1159         journal->j_maxlen = len;
1160         n = journal->j_blocksize / sizeof(journal_block_tag_t);
1161         journal->j_wbufsize = n;
1162         journal->j_wbuf = kmalloc_array(n, sizeof(struct buffer_head *),
1163                                         GFP_KERNEL);
1164         if (!journal->j_wbuf)
1165                 goto err_cleanup;
1166 
1167         bh = getblk_unmovable(journal->j_dev, start, journal->j_blocksize);
1168         if (!bh) {
1169                 pr_err("%s: Cannot get buffer for journal superblock\n",
1170                         __func__);
1171                 goto err_cleanup;
1172         }
1173         journal->j_sb_buffer = bh;
1174         journal->j_superblock = (journal_superblock_t *)bh->b_data;
1175 
1176         return journal;
1177 
1178 err_cleanup:
1179         kfree(journal->j_wbuf);
1180         jbd2_journal_destroy_revoke(journal);
1181         kfree(journal);
1182         return NULL;
1183 }
1184 
1185 /* jbd2_journal_init_dev and jbd2_journal_init_inode:
1186  *
1187  * Create a journal structure assigned some fixed set of disk blocks to
1188  * the journal.  We don't actually touch those disk blocks yet, but we
1189  * need to set up all of the mapping information to tell the journaling
1190  * system where the journal blocks are.
1191  *
1192  */
1193 
1194 /**
1195  *  journal_t * jbd2_journal_init_dev() - creates and initialises a journal structure
1196  *  @bdev: Block device on which to create the journal
1197  *  @fs_dev: Device which hold journalled filesystem for this journal.
1198  *  @start: Block nr Start of journal.
1199  *  @len:  Length of the journal in blocks.
1200  *  @blocksize: blocksize of journalling device
1201  *
1202  *  Returns: a newly created journal_t *
1203  *
1204  *  jbd2_journal_init_dev creates a journal which maps a fixed contiguous
1205  *  range of blocks on an arbitrary block device.
1206  *
1207  */
1208 journal_t *jbd2_journal_init_dev(struct block_device *bdev,
1209                         struct block_device *fs_dev,
1210                         unsigned long long start, int len, int blocksize)
1211 {
1212         journal_t *journal;
1213 
1214         journal = journal_init_common(bdev, fs_dev, start, len, blocksize);
1215         if (!journal)
1216                 return NULL;
1217 
1218         bdevname(journal->j_dev, journal->j_devname);
1219         strreplace(journal->j_devname, '/', '!');
1220         jbd2_stats_proc_init(journal);
1221 
1222         return journal;
1223 }
1224 
1225 /**
1226  *  journal_t * jbd2_journal_init_inode () - creates a journal which maps to a inode.
1227  *  @inode: An inode to create the journal in
1228  *
1229  * jbd2_journal_init_inode creates a journal which maps an on-disk inode as
1230  * the journal.  The inode must exist already, must support bmap() and
1231  * must have all data blocks preallocated.
1232  */
1233 journal_t *jbd2_journal_init_inode(struct inode *inode)
1234 {
1235         journal_t *journal;
1236         char *p;
1237         unsigned long long blocknr;
1238 
1239         blocknr = bmap(inode, 0);
1240         if (!blocknr) {
1241                 pr_err("%s: Cannot locate journal superblock\n",
1242                         __func__);
1243                 return NULL;
1244         }
1245 
1246         jbd_debug(1, "JBD2: inode %s/%ld, size %lld, bits %d, blksize %ld\n",
1247                   inode->i_sb->s_id, inode->i_ino, (long long) inode->i_size,
1248                   inode->i_sb->s_blocksize_bits, inode->i_sb->s_blocksize);
1249 
1250         journal = journal_init_common(inode->i_sb->s_bdev, inode->i_sb->s_bdev,
1251                         blocknr, inode->i_size >> inode->i_sb->s_blocksize_bits,
1252                         inode->i_sb->s_blocksize);
1253         if (!journal)
1254                 return NULL;
1255 
1256         journal->j_inode = inode;
1257         bdevname(journal->j_dev, journal->j_devname);
1258         p = strreplace(journal->j_devname, '/', '!');
1259         sprintf(p, "-%lu", journal->j_inode->i_ino);
1260         jbd2_stats_proc_init(journal);
1261 
1262         return journal;
1263 }
1264 
1265 /*
1266  * If the journal init or create aborts, we need to mark the journal
1267  * superblock as being NULL to prevent the journal destroy from writing
1268  * back a bogus superblock.
1269  */
1270 static void journal_fail_superblock (journal_t *journal)
1271 {
1272         struct buffer_head *bh = journal->j_sb_buffer;
1273         brelse(bh);
1274         journal->j_sb_buffer = NULL;
1275 }
1276 
1277 /*
1278  * Given a journal_t structure, initialise the various fields for
1279  * startup of a new journaling session.  We use this both when creating
1280  * a journal, and after recovering an old journal to reset it for
1281  * subsequent use.
1282  */
1283 
1284 static int journal_reset(journal_t *journal)
1285 {
1286         journal_superblock_t *sb = journal->j_superblock;
1287         unsigned long long first, last;
1288 
1289         first = be32_to_cpu(sb->s_first);
1290         last = be32_to_cpu(sb->s_maxlen);
1291         if (first + JBD2_MIN_JOURNAL_BLOCKS > last + 1) {
1292                 printk(KERN_ERR "JBD2: Journal too short (blocks %llu-%llu).\n",
1293                        first, last);
1294                 journal_fail_superblock(journal);
1295                 return -EINVAL;
1296         }
1297 
1298         journal->j_first = first;
1299         journal->j_last = last;
1300 
1301         journal->j_head = first;
1302         journal->j_tail = first;
1303         journal->j_free = last - first;
1304 
1305         journal->j_tail_sequence = journal->j_transaction_sequence;
1306         journal->j_commit_sequence = journal->j_transaction_sequence - 1;
1307         journal->j_commit_request = journal->j_commit_sequence;
1308 
1309         journal->j_max_transaction_buffers = journal->j_maxlen / 4;
1310 
1311         /*
1312          * As a special case, if the on-disk copy is already marked as needing
1313          * no recovery (s_start == 0), then we can safely defer the superblock
1314          * update until the next commit by setting JBD2_FLUSHED.  This avoids
1315          * attempting a write to a potential-readonly device.
1316          */
1317         if (sb->s_start == 0) {
1318                 jbd_debug(1, "JBD2: Skipping superblock update on recovered sb "
1319                         "(start %ld, seq %u, errno %d)\n",
1320                         journal->j_tail, journal->j_tail_sequence,
1321                         journal->j_errno);
1322                 journal->j_flags |= JBD2_FLUSHED;
1323         } else {
1324                 /* Lock here to make assertions happy... */
1325                 mutex_lock_io(&journal->j_checkpoint_mutex);
1326                 /*
1327                  * Update log tail information. We use REQ_FUA since new
1328                  * transaction will start reusing journal space and so we
1329                  * must make sure information about current log tail is on
1330                  * disk before that.
1331                  */
1332                 jbd2_journal_update_sb_log_tail(journal,
1333                                                 journal->j_tail_sequence,
1334                                                 journal->j_tail,
1335                                                 REQ_SYNC | REQ_FUA);
1336                 mutex_unlock(&journal->j_checkpoint_mutex);
1337         }
1338         return jbd2_journal_start_thread(journal);
1339 }
1340 
1341 /*
1342  * This function expects that the caller will have locked the journal
1343  * buffer head, and will return with it unlocked
1344  */
1345 static int jbd2_write_superblock(journal_t *journal, int write_flags)
1346 {
1347         struct buffer_head *bh = journal->j_sb_buffer;
1348         journal_superblock_t *sb = journal->j_superblock;
1349         int ret;
1350 
1351         /* Buffer got discarded which means block device got invalidated */
1352         if (!buffer_mapped(bh))
1353                 return -EIO;
1354 
1355         trace_jbd2_write_superblock(journal, write_flags);
1356         if (!(journal->j_flags & JBD2_BARRIER))
1357                 write_flags &= ~(REQ_FUA | REQ_PREFLUSH);
1358         if (buffer_write_io_error(bh)) {
1359                 /*
1360                  * Oh, dear.  A previous attempt to write the journal
1361                  * superblock failed.  This could happen because the
1362                  * USB device was yanked out.  Or it could happen to
1363                  * be a transient write error and maybe the block will
1364                  * be remapped.  Nothing we can do but to retry the
1365                  * write and hope for the best.
1366                  */
1367                 printk(KERN_ERR "JBD2: previous I/O error detected "
1368                        "for journal superblock update for %s.\n",
1369                        journal->j_devname);
1370                 clear_buffer_write_io_error(bh);
1371                 set_buffer_uptodate(bh);
1372         }
1373         if (jbd2_journal_has_csum_v2or3(journal))
1374                 sb->s_checksum = jbd2_superblock_csum(journal, sb);
1375         get_bh(bh);
1376         bh->b_end_io = end_buffer_write_sync;
1377         ret = submit_bh(REQ_OP_WRITE, write_flags, bh);
1378         wait_on_buffer(bh);
1379         if (buffer_write_io_error(bh)) {
1380                 clear_buffer_write_io_error(bh);
1381                 set_buffer_uptodate(bh);
1382                 ret = -EIO;
1383         }
1384         if (ret) {
1385                 printk(KERN_ERR "JBD2: Error %d detected when updating "
1386                        "journal superblock for %s.\n", ret,
1387                        journal->j_devname);
1388                 jbd2_journal_abort(journal, ret);
1389         }
1390 
1391         return ret;
1392 }
1393 
1394 /**
1395  * jbd2_journal_update_sb_log_tail() - Update log tail in journal sb on disk.
1396  * @journal: The journal to update.
1397  * @tail_tid: TID of the new transaction at the tail of the log
1398  * @tail_block: The first block of the transaction at the tail of the log
1399  * @write_op: With which operation should we write the journal sb
1400  *
1401  * Update a journal's superblock information about log tail and write it to
1402  * disk, waiting for the IO to complete.
1403  */
1404 int jbd2_journal_update_sb_log_tail(journal_t *journal, tid_t tail_tid,
1405                                      unsigned long tail_block, int write_op)
1406 {
1407         journal_superblock_t *sb = journal->j_superblock;
1408         int ret;
1409 
1410         if (is_journal_aborted(journal))
1411                 return -EIO;
1412 
1413         BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
1414         jbd_debug(1, "JBD2: updating superblock (start %lu, seq %u)\n",
1415                   tail_block, tail_tid);
1416 
1417         lock_buffer(journal->j_sb_buffer);
1418         sb->s_sequence = cpu_to_be32(tail_tid);
1419         sb->s_start    = cpu_to_be32(tail_block);
1420 
1421         ret = jbd2_write_superblock(journal, write_op);
1422         if (ret)
1423                 goto out;
1424 
1425         /* Log is no longer empty */
1426         write_lock(&journal->j_state_lock);
1427         WARN_ON(!sb->s_sequence);
1428         journal->j_flags &= ~JBD2_FLUSHED;
1429         write_unlock(&journal->j_state_lock);
1430 
1431 out:
1432         return ret;
1433 }
1434 
1435 /**
1436  * jbd2_mark_journal_empty() - Mark on disk journal as empty.
1437  * @journal: The journal to update.
1438  * @write_op: With which operation should we write the journal sb
1439  *
1440  * Update a journal's dynamic superblock fields to show that journal is empty.
1441  * Write updated superblock to disk waiting for IO to complete.
1442  */
1443 static void jbd2_mark_journal_empty(journal_t *journal, int write_op)
1444 {
1445         journal_superblock_t *sb = journal->j_superblock;
1446 
1447         BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
1448         lock_buffer(journal->j_sb_buffer);
1449         if (sb->s_start == 0) {         /* Is it already empty? */
1450                 unlock_buffer(journal->j_sb_buffer);
1451                 return;
1452         }
1453 
1454         jbd_debug(1, "JBD2: Marking journal as empty (seq %u)\n",
1455                   journal->j_tail_sequence);
1456 
1457         sb->s_sequence = cpu_to_be32(journal->j_tail_sequence);
1458         sb->s_start    = cpu_to_be32(0);
1459 
1460         jbd2_write_superblock(journal, write_op);
1461 
1462         /* Log is no longer empty */
1463         write_lock(&journal->j_state_lock);
1464         journal->j_flags |= JBD2_FLUSHED;
1465         write_unlock(&journal->j_state_lock);
1466 }
1467 
1468 
1469 /**
1470  * jbd2_journal_update_sb_errno() - Update error in the journal.
1471  * @journal: The journal to update.
1472  *
1473  * Update a journal's errno.  Write updated superblock to disk waiting for IO
1474  * to complete.
1475  */
1476 void jbd2_journal_update_sb_errno(journal_t *journal)
1477 {
1478         journal_superblock_t *sb = journal->j_superblock;
1479         int errcode;
1480 
1481         lock_buffer(journal->j_sb_buffer);
1482         errcode = journal->j_errno;
1483         if (errcode == -ESHUTDOWN)
1484                 errcode = 0;
1485         jbd_debug(1, "JBD2: updating superblock error (errno %d)\n", errcode);
1486         sb->s_errno    = cpu_to_be32(errcode);
1487 
1488         jbd2_write_superblock(journal, REQ_SYNC | REQ_FUA);
1489 }
1490 EXPORT_SYMBOL(jbd2_journal_update_sb_errno);
1491 
1492 /*
1493  * Read the superblock for a given journal, performing initial
1494  * validation of the format.
1495  */
1496 static int journal_get_superblock(journal_t *journal)
1497 {
1498         struct buffer_head *bh;
1499         journal_superblock_t *sb;
1500         int err = -EIO;
1501 
1502         bh = journal->j_sb_buffer;
1503 
1504         J_ASSERT(bh != NULL);
1505         if (!buffer_uptodate(bh)) {
1506                 ll_rw_block(REQ_OP_READ, 0, 1, &bh);
1507                 wait_on_buffer(bh);
1508                 if (!buffer_uptodate(bh)) {
1509                         printk(KERN_ERR
1510                                 "JBD2: IO error reading journal superblock\n");
1511                         goto out;
1512                 }
1513         }
1514 
1515         if (buffer_verified(bh))
1516                 return 0;
1517 
1518         sb = journal->j_superblock;
1519 
1520         err = -EINVAL;
1521 
1522         if (sb->s_header.h_magic != cpu_to_be32(JBD2_MAGIC_NUMBER) ||
1523             sb->s_blocksize != cpu_to_be32(journal->j_blocksize)) {
1524                 printk(KERN_WARNING "JBD2: no valid journal superblock found\n");
1525                 goto out;
1526         }
1527 
1528         switch(be32_to_cpu(sb->s_header.h_blocktype)) {
1529         case JBD2_SUPERBLOCK_V1:
1530                 journal->j_format_version = 1;
1531                 break;
1532         case JBD2_SUPERBLOCK_V2:
1533                 journal->j_format_version = 2;
1534                 break;
1535         default:
1536                 printk(KERN_WARNING "JBD2: unrecognised superblock format ID\n");
1537                 goto out;
1538         }
1539 
1540         if (be32_to_cpu(sb->s_maxlen) < journal->j_maxlen)
1541                 journal->j_maxlen = be32_to_cpu(sb->s_maxlen);
1542         else if (be32_to_cpu(sb->s_maxlen) > journal->j_maxlen) {
1543                 printk(KERN_WARNING "JBD2: journal file too short\n");
1544                 goto out;
1545         }
1546 
1547         if (be32_to_cpu(sb->s_first) == 0 ||
1548             be32_to_cpu(sb->s_first) >= journal->j_maxlen) {
1549                 printk(KERN_WARNING
1550                         "JBD2: Invalid start block of journal: %u\n",
1551                         be32_to_cpu(sb->s_first));
1552                 goto out;
1553         }
1554 
1555         if (jbd2_has_feature_csum2(journal) &&
1556             jbd2_has_feature_csum3(journal)) {
1557                 /* Can't have checksum v2 and v3 at the same time! */
1558                 printk(KERN_ERR "JBD2: Can't enable checksumming v2 and v3 "
1559                        "at the same time!\n");
1560                 goto out;
1561         }
1562 
1563         if (jbd2_journal_has_csum_v2or3_feature(journal) &&
1564             jbd2_has_feature_checksum(journal)) {
1565                 /* Can't have checksum v1 and v2 on at the same time! */
1566                 printk(KERN_ERR "JBD2: Can't enable checksumming v1 and v2/3 "
1567                        "at the same time!\n");
1568                 goto out;
1569         }
1570 
1571         if (!jbd2_verify_csum_type(journal, sb)) {
1572                 printk(KERN_ERR "JBD2: Unknown checksum type\n");
1573                 goto out;
1574         }
1575 
1576         /* Load the checksum driver */
1577         if (jbd2_journal_has_csum_v2or3_feature(journal)) {
1578                 journal->j_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
1579                 if (IS_ERR(journal->j_chksum_driver)) {
1580                         printk(KERN_ERR "JBD2: Cannot load crc32c driver.\n");
1581                         err = PTR_ERR(journal->j_chksum_driver);
1582                         journal->j_chksum_driver = NULL;
1583                         goto out;
1584                 }
1585         }
1586 
1587         if (jbd2_journal_has_csum_v2or3(journal)) {
1588                 /* Check superblock checksum */
1589                 if (sb->s_checksum != jbd2_superblock_csum(journal, sb)) {
1590                         printk(KERN_ERR "JBD2: journal checksum error\n");
1591                         err = -EFSBADCRC;
1592                         goto out;
1593                 }
1594 
1595                 /* Precompute checksum seed for all metadata */
1596                 journal->j_csum_seed = jbd2_chksum(journal, ~0, sb->s_uuid,
1597                                                    sizeof(sb->s_uuid));
1598         }
1599 
1600         set_buffer_verified(bh);
1601 
1602         return 0;
1603 
1604 out:
1605         journal_fail_superblock(journal);
1606         return err;
1607 }
1608 
1609 /*
1610  * Load the on-disk journal superblock and read the key fields into the
1611  * journal_t.
1612  */
1613 
1614 static int load_superblock(journal_t *journal)
1615 {
1616         int err;
1617         journal_superblock_t *sb;
1618 
1619         err = journal_get_superblock(journal);
1620         if (err)
1621                 return err;
1622 
1623         sb = journal->j_superblock;
1624 
1625         journal->j_tail_sequence = be32_to_cpu(sb->s_sequence);
1626         journal->j_tail = be32_to_cpu(sb->s_start);
1627         journal->j_first = be32_to_cpu(sb->s_first);
1628         journal->j_last = be32_to_cpu(sb->s_maxlen);
1629         journal->j_errno = be32_to_cpu(sb->s_errno);
1630 
1631         return 0;
1632 }
1633 
1634 
1635 /**
1636  * int jbd2_journal_load() - Read journal from disk.
1637  * @journal: Journal to act on.
1638  *
1639  * Given a journal_t structure which tells us which disk blocks contain
1640  * a journal, read the journal from disk to initialise the in-memory
1641  * structures.
1642  */
1643 int jbd2_journal_load(journal_t *journal)
1644 {
1645         int err;
1646         journal_superblock_t *sb;
1647 
1648         err = load_superblock(journal);
1649         if (err)
1650                 return err;
1651 
1652         sb = journal->j_superblock;
1653         /* If this is a V2 superblock, then we have to check the
1654          * features flags on it. */
1655 
1656         if (journal->j_format_version >= 2) {
1657                 if ((sb->s_feature_ro_compat &
1658                      ~cpu_to_be32(JBD2_KNOWN_ROCOMPAT_FEATURES)) ||
1659                     (sb->s_feature_incompat &
1660                      ~cpu_to_be32(JBD2_KNOWN_INCOMPAT_FEATURES))) {
1661                         printk(KERN_WARNING
1662                                 "JBD2: Unrecognised features on journal\n");
1663                         return -EINVAL;
1664                 }
1665         }
1666 
1667         /*
1668          * Create a slab for this blocksize
1669          */
1670         err = jbd2_journal_create_slab(be32_to_cpu(sb->s_blocksize));
1671         if (err)
1672                 return err;
1673 
1674         /* Let the recovery code check whether it needs to recover any
1675          * data from the journal. */
1676         if (jbd2_journal_recover(journal))
1677                 goto recovery_error;
1678 
1679         if (journal->j_failed_commit) {
1680                 printk(KERN_ERR "JBD2: journal transaction %u on %s "
1681                        "is corrupt.\n", journal->j_failed_commit,
1682                        journal->j_devname);
1683                 return -EFSCORRUPTED;
1684         }
1685         /*
1686          * clear JBD2_ABORT flag initialized in journal_init_common
1687          * here to update log tail information with the newest seq.
1688          */
1689         journal->j_flags &= ~JBD2_ABORT;
1690 
1691         /* OK, we've finished with the dynamic journal bits:
1692          * reinitialise the dynamic contents of the superblock in memory
1693          * and reset them on disk. */
1694         if (journal_reset(journal))
1695                 goto recovery_error;
1696 
1697         journal->j_flags |= JBD2_LOADED;
1698         return 0;
1699 
1700 recovery_error:
1701         printk(KERN_WARNING "JBD2: recovery failed\n");
1702         return -EIO;
1703 }
1704 
1705 /**
1706  * void jbd2_journal_destroy() - Release a journal_t structure.
1707  * @journal: Journal to act on.
1708  *
1709  * Release a journal_t structure once it is no longer in use by the
1710  * journaled object.
1711  * Return <0 if we couldn't clean up the journal.
1712  */
1713 int jbd2_journal_destroy(journal_t *journal)
1714 {
1715         int err = 0;
1716 
1717         /* Wait for the commit thread to wake up and die. */
1718         journal_kill_thread(journal);
1719 
1720         /* Force a final log commit */
1721         if (journal->j_running_transaction)
1722                 jbd2_journal_commit_transaction(journal);
1723 
1724         /* Force any old transactions to disk */
1725 
1726         /* Totally anal locking here... */
1727         spin_lock(&journal->j_list_lock);
1728         while (journal->j_checkpoint_transactions != NULL) {
1729                 spin_unlock(&journal->j_list_lock);
1730                 mutex_lock_io(&journal->j_checkpoint_mutex);
1731                 err = jbd2_log_do_checkpoint(journal);
1732                 mutex_unlock(&journal->j_checkpoint_mutex);
1733                 /*
1734                  * If checkpointing failed, just free the buffers to avoid
1735                  * looping forever
1736                  */
1737                 if (err) {
1738                         jbd2_journal_destroy_checkpoint(journal);
1739                         spin_lock(&journal->j_list_lock);
1740                         break;
1741                 }
1742                 spin_lock(&journal->j_list_lock);
1743         }
1744 
1745         J_ASSERT(journal->j_running_transaction == NULL);
1746         J_ASSERT(journal->j_committing_transaction == NULL);
1747         J_ASSERT(journal->j_checkpoint_transactions == NULL);
1748         spin_unlock(&journal->j_list_lock);
1749 
1750         if (journal->j_sb_buffer) {
1751                 if (!is_journal_aborted(journal)) {
1752                         mutex_lock_io(&journal->j_checkpoint_mutex);
1753 
1754                         write_lock(&journal->j_state_lock);
1755                         journal->j_tail_sequence =
1756                                 ++journal->j_transaction_sequence;
1757                         write_unlock(&journal->j_state_lock);
1758 
1759                         jbd2_mark_journal_empty(journal,
1760                                         REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
1761                         mutex_unlock(&journal->j_checkpoint_mutex);
1762                 } else
1763                         err = -EIO;
1764                 brelse(journal->j_sb_buffer);
1765         }
1766 
1767         if (journal->j_proc_entry)
1768                 jbd2_stats_proc_exit(journal);
1769         iput(journal->j_inode);
1770         if (journal->j_revoke)
1771                 jbd2_journal_destroy_revoke(journal);
1772         if (journal->j_chksum_driver)
1773                 crypto_free_shash(journal->j_chksum_driver);
1774         kfree(journal->j_wbuf);
1775         kfree(journal);
1776 
1777         return err;
1778 }
1779 
1780 
1781 /**
1782  *int jbd2_journal_check_used_features () - Check if features specified are used.
1783  * @journal: Journal to check.
1784  * @compat: bitmask of compatible features
1785  * @ro: bitmask of features that force read-only mount
1786  * @incompat: bitmask of incompatible features
1787  *
1788  * Check whether the journal uses all of a given set of
1789  * features.  Return true (non-zero) if it does.
1790  **/
1791 
1792 int jbd2_journal_check_used_features (journal_t *journal, unsigned long compat,
1793                                  unsigned long ro, unsigned long incompat)
1794 {
1795         journal_superblock_t *sb;
1796 
1797         if (!compat && !ro && !incompat)
1798                 return 1;
1799         /* Load journal superblock if it is not loaded yet. */
1800         if (journal->j_format_version == 0 &&
1801             journal_get_superblock(journal) != 0)
1802                 return 0;
1803         if (journal->j_format_version == 1)
1804                 return 0;
1805 
1806         sb = journal->j_superblock;
1807 
1808         if (((be32_to_cpu(sb->s_feature_compat) & compat) == compat) &&
1809             ((be32_to_cpu(sb->s_feature_ro_compat) & ro) == ro) &&
1810             ((be32_to_cpu(sb->s_feature_incompat) & incompat) == incompat))
1811                 return 1;
1812 
1813         return 0;
1814 }
1815 
1816 /**
1817  * int jbd2_journal_check_available_features() - Check feature set in journalling layer
1818  * @journal: Journal to check.
1819  * @compat: bitmask of compatible features
1820  * @ro: bitmask of features that force read-only mount
1821  * @incompat: bitmask of incompatible features
1822  *
1823  * Check whether the journaling code supports the use of
1824  * all of a given set of features on this journal.  Return true
1825  * (non-zero) if it can. */
1826 
1827 int jbd2_journal_check_available_features (journal_t *journal, unsigned long compat,
1828                                       unsigned long ro, unsigned long incompat)
1829 {
1830         if (!compat && !ro && !incompat)
1831                 return 1;
1832 
1833         /* We can support any known requested features iff the
1834          * superblock is in version 2.  Otherwise we fail to support any
1835          * extended sb features. */
1836 
1837         if (journal->j_format_version != 2)
1838                 return 0;
1839 
1840         if ((compat   & JBD2_KNOWN_COMPAT_FEATURES) == compat &&
1841             (ro       & JBD2_KNOWN_ROCOMPAT_FEATURES) == ro &&
1842             (incompat & JBD2_KNOWN_INCOMPAT_FEATURES) == incompat)
1843                 return 1;
1844 
1845         return 0;
1846 }
1847 
1848 /**
1849  * int jbd2_journal_set_features () - Mark a given journal feature in the superblock
1850  * @journal: Journal to act on.
1851  * @compat: bitmask of compatible features
1852  * @ro: bitmask of features that force read-only mount
1853  * @incompat: bitmask of incompatible features
1854  *
1855  * Mark a given journal feature as present on the
1856  * superblock.  Returns true if the requested features could be set.
1857  *
1858  */
1859 
1860 int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
1861                           unsigned long ro, unsigned long incompat)
1862 {
1863 #define INCOMPAT_FEATURE_ON(f) \
1864                 ((incompat & (f)) && !(sb->s_feature_incompat & cpu_to_be32(f)))
1865 #define COMPAT_FEATURE_ON(f) \
1866                 ((compat & (f)) && !(sb->s_feature_compat & cpu_to_be32(f)))
1867         journal_superblock_t *sb;
1868 
1869         if (jbd2_journal_check_used_features(journal, compat, ro, incompat))
1870                 return 1;
1871 
1872         if (!jbd2_journal_check_available_features(journal, compat, ro, incompat))
1873                 return 0;
1874 
1875         /* If enabling v2 checksums, turn on v3 instead */
1876         if (incompat & JBD2_FEATURE_INCOMPAT_CSUM_V2) {
1877                 incompat &= ~JBD2_FEATURE_INCOMPAT_CSUM_V2;
1878                 incompat |= JBD2_FEATURE_INCOMPAT_CSUM_V3;
1879         }
1880 
1881         /* Asking for checksumming v3 and v1?  Only give them v3. */
1882         if (incompat & JBD2_FEATURE_INCOMPAT_CSUM_V3 &&
1883             compat & JBD2_FEATURE_COMPAT_CHECKSUM)
1884                 compat &= ~JBD2_FEATURE_COMPAT_CHECKSUM;
1885 
1886         jbd_debug(1, "Setting new features 0x%lx/0x%lx/0x%lx\n",
1887                   compat, ro, incompat);
1888 
1889         sb = journal->j_superblock;
1890 
1891         /* Load the checksum driver if necessary */
1892         if ((journal->j_chksum_driver == NULL) &&
1893             INCOMPAT_FEATURE_ON(JBD2_FEATURE_INCOMPAT_CSUM_V3)) {
1894                 journal->j_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
1895                 if (IS_ERR(journal->j_chksum_driver)) {
1896                         printk(KERN_ERR "JBD2: Cannot load crc32c driver.\n");
1897                         journal->j_chksum_driver = NULL;
1898                         return 0;
1899                 }
1900                 /* Precompute checksum seed for all metadata */
1901                 journal->j_csum_seed = jbd2_chksum(journal, ~0, sb->s_uuid,
1902                                                    sizeof(sb->s_uuid));
1903         }
1904 
1905         lock_buffer(journal->j_sb_buffer);
1906 
1907         /* If enabling v3 checksums, update superblock */
1908         if (INCOMPAT_FEATURE_ON(JBD2_FEATURE_INCOMPAT_CSUM_V3)) {
1909                 sb->s_checksum_type = JBD2_CRC32C_CHKSUM;
1910                 sb->s_feature_compat &=
1911                         ~cpu_to_be32(JBD2_FEATURE_COMPAT_CHECKSUM);
1912         }
1913 
1914         /* If enabling v1 checksums, downgrade superblock */
1915         if (COMPAT_FEATURE_ON(JBD2_FEATURE_COMPAT_CHECKSUM))
1916                 sb->s_feature_incompat &=
1917                         ~cpu_to_be32(JBD2_FEATURE_INCOMPAT_CSUM_V2 |
1918                                      JBD2_FEATURE_INCOMPAT_CSUM_V3);
1919 
1920         sb->s_feature_compat    |= cpu_to_be32(compat);
1921         sb->s_feature_ro_compat |= cpu_to_be32(ro);
1922         sb->s_feature_incompat  |= cpu_to_be32(incompat);
1923         unlock_buffer(journal->j_sb_buffer);
1924 
1925         return 1;
1926 #undef COMPAT_FEATURE_ON
1927 #undef INCOMPAT_FEATURE_ON
1928 }
1929 
1930 /*
1931  * jbd2_journal_clear_features () - Clear a given journal feature in the
1932  *                                  superblock
1933  * @journal: Journal to act on.
1934  * @compat: bitmask of compatible features
1935  * @ro: bitmask of features that force read-only mount
1936  * @incompat: bitmask of incompatible features
1937  *
1938  * Clear a given journal feature as present on the
1939  * superblock.
1940  */
1941 void jbd2_journal_clear_features(journal_t *journal, unsigned long compat,
1942                                 unsigned long ro, unsigned long incompat)
1943 {
1944         journal_superblock_t *sb;
1945 
1946         jbd_debug(1, "Clear features 0x%lx/0x%lx/0x%lx\n",
1947                   compat, ro, incompat);
1948 
1949         sb = journal->j_superblock;
1950 
1951         sb->s_feature_compat    &= ~cpu_to_be32(compat);
1952         sb->s_feature_ro_compat &= ~cpu_to_be32(ro);
1953         sb->s_feature_incompat  &= ~cpu_to_be32(incompat);
1954 }
1955 EXPORT_SYMBOL(jbd2_journal_clear_features);
1956 
1957 /**
1958  * int jbd2_journal_flush () - Flush journal
1959  * @journal: Journal to act on.
1960  *
1961  * Flush all data for a given journal to disk and empty the journal.
1962  * Filesystems can use this when remounting readonly to ensure that
1963  * recovery does not need to happen on remount.
1964  */
1965 
1966 int jbd2_journal_flush(journal_t *journal)
1967 {
1968         int err = 0;
1969         transaction_t *transaction = NULL;
1970 
1971         write_lock(&journal->j_state_lock);
1972 
1973         /* Force everything buffered to the log... */
1974         if (journal->j_running_transaction) {
1975                 transaction = journal->j_running_transaction;
1976                 __jbd2_log_start_commit(journal, transaction->t_tid);
1977         } else if (journal->j_committing_transaction)
1978                 transaction = journal->j_committing_transaction;
1979 
1980         /* Wait for the log commit to complete... */
1981         if (transaction) {
1982                 tid_t tid = transaction->t_tid;
1983 
1984                 write_unlock(&journal->j_state_lock);
1985                 jbd2_log_wait_commit(journal, tid);
1986         } else {
1987                 write_unlock(&journal->j_state_lock);
1988         }
1989 
1990         /* ...and flush everything in the log out to disk. */
1991         spin_lock(&journal->j_list_lock);
1992         while (!err && journal->j_checkpoint_transactions != NULL) {
1993                 spin_unlock(&journal->j_list_lock);
1994                 mutex_lock_io(&journal->j_checkpoint_mutex);
1995                 err = jbd2_log_do_checkpoint(journal);
1996                 mutex_unlock(&journal->j_checkpoint_mutex);
1997                 spin_lock(&journal->j_list_lock);
1998         }
1999         spin_unlock(&journal->j_list_lock);
2000 
2001         if (is_journal_aborted(journal))
2002                 return -EIO;
2003 
2004         mutex_lock_io(&journal->j_checkpoint_mutex);
2005         if (!err) {
2006                 err = jbd2_cleanup_journal_tail(journal);
2007                 if (err < 0) {
2008                         mutex_unlock(&journal->j_checkpoint_mutex);
2009                         goto out;
2010                 }
2011                 err = 0;
2012         }
2013 
2014         /* Finally, mark the journal as really needing no recovery.
2015          * This sets s_start==0 in the underlying superblock, which is
2016          * the magic code for a fully-recovered superblock.  Any future
2017          * commits of data to the journal will restore the current
2018          * s_start value. */
2019         jbd2_mark_journal_empty(journal, REQ_SYNC | REQ_FUA);
2020         mutex_unlock(&journal->j_checkpoint_mutex);
2021         write_lock(&journal->j_state_lock);
2022         J_ASSERT(!journal->j_running_transaction);
2023         J_ASSERT(!journal->j_committing_transaction);
2024         J_ASSERT(!journal->j_checkpoint_transactions);
2025         J_ASSERT(journal->j_head == journal->j_tail);
2026         J_ASSERT(journal->j_tail_sequence == journal->j_transaction_sequence);
2027         write_unlock(&journal->j_state_lock);
2028 out:
2029         return err;
2030 }
2031 
2032 /**
2033  * int jbd2_journal_wipe() - Wipe journal contents
2034  * @journal: Journal to act on.
2035  * @write: flag (see below)
2036  *
2037  * Wipe out all of the contents of a journal, safely.  This will produce
2038  * a warning if the journal contains any valid recovery information.
2039  * Must be called between journal_init_*() and jbd2_journal_load().
2040  *
2041  * If 'write' is non-zero, then we wipe out the journal on disk; otherwise
2042  * we merely suppress recovery.
2043  */
2044 
2045 int jbd2_journal_wipe(journal_t *journal, int write)
2046 {
2047         int err = 0;
2048 
2049         J_ASSERT (!(journal->j_flags & JBD2_LOADED));
2050 
2051         err = load_superblock(journal);
2052         if (err)
2053                 return err;
2054 
2055         if (!journal->j_tail)
2056                 goto no_recovery;
2057 
2058         printk(KERN_WARNING "JBD2: %s recovery information on journal\n",
2059                 write ? "Clearing" : "Ignoring");
2060 
2061         err = jbd2_journal_skip_recovery(journal);
2062         if (write) {
2063                 /* Lock to make assertions happy... */
2064                 mutex_lock_io(&journal->j_checkpoint_mutex);
2065                 jbd2_mark_journal_empty(journal, REQ_SYNC | REQ_FUA);
2066                 mutex_unlock(&journal->j_checkpoint_mutex);
2067         }
2068 
2069  no_recovery:
2070         return err;
2071 }
2072 
2073 /*
2074  * Journal abort has very specific semantics, which we describe
2075  * for journal abort.
2076  *
2077  * Two internal functions, which provide abort to the jbd layer
2078  * itself are here.
2079  */
2080 
2081 /*
2082  * Quick version for internal journal use (doesn't lock the journal).
2083  * Aborts hard --- we mark the abort as occurred, but do _nothing_ else,
2084  * and don't attempt to make any other journal updates.
2085  */
2086 void __jbd2_journal_abort_hard(journal_t *journal)
2087 {
2088         transaction_t *transaction;
2089 
2090         if (journal->j_flags & JBD2_ABORT)
2091                 return;
2092 
2093         printk(KERN_ERR "Aborting journal on device %s.\n",
2094                journal->j_devname);
2095 
2096         write_lock(&journal->j_state_lock);
2097         journal->j_flags |= JBD2_ABORT;
2098         transaction = journal->j_running_transaction;
2099         if (transaction)
2100                 __jbd2_log_start_commit(journal, transaction->t_tid);
2101         write_unlock(&journal->j_state_lock);
2102 }
2103 
2104 /* Soft abort: record the abort error status in the journal superblock,
2105  * but don't do any other IO. */
2106 static void __journal_abort_soft (journal_t *journal, int errno)
2107 {
2108         int old_errno;
2109 
2110         write_lock(&journal->j_state_lock);
2111         old_errno = journal->j_errno;
2112         if (!journal->j_errno || errno == -ESHUTDOWN)
2113                 journal->j_errno = errno;
2114 
2115         if (journal->j_flags & JBD2_ABORT) {
2116                 write_unlock(&journal->j_state_lock);
2117                 if (old_errno != -ESHUTDOWN && errno == -ESHUTDOWN)
2118                         jbd2_journal_update_sb_errno(journal);
2119                 return;
2120         }
2121         write_unlock(&journal->j_state_lock);
2122 
2123         __jbd2_journal_abort_hard(journal);
2124 
2125         jbd2_journal_update_sb_errno(journal);
2126         write_lock(&journal->j_state_lock);
2127         journal->j_flags |= JBD2_REC_ERR;
2128         write_unlock(&journal->j_state_lock);
2129 }
2130 
2131 /**
2132  * void jbd2_journal_abort () - Shutdown the journal immediately.
2133  * @journal: the journal to shutdown.
2134  * @errno:   an error number to record in the journal indicating
2135  *           the reason for the shutdown.
2136  *
2137  * Perform a complete, immediate shutdown of the ENTIRE
2138  * journal (not of a single transaction).  This operation cannot be
2139  * undone without closing and reopening the journal.
2140  *
2141  * The jbd2_journal_abort function is intended to support higher level error
2142  * recovery mechanisms such as the ext2/ext3 remount-readonly error
2143  * mode.
2144  *
2145  * Journal abort has very specific semantics.  Any existing dirty,
2146  * unjournaled buffers in the main filesystem will still be written to
2147  * disk by bdflush, but the journaling mechanism will be suspended
2148  * immediately and no further transaction commits will be honoured.
2149  *
2150  * Any dirty, journaled buffers will be written back to disk without
2151  * hitting the journal.  Atomicity cannot be guaranteed on an aborted
2152  * filesystem, but we _do_ attempt to leave as much data as possible
2153  * behind for fsck to use for cleanup.
2154  *
2155  * Any attempt to get a new transaction handle on a journal which is in
2156  * ABORT state will just result in an -EROFS error return.  A
2157  * jbd2_journal_stop on an existing handle will return -EIO if we have
2158  * entered abort state during the update.
2159  *
2160  * Recursive transactions are not disturbed by journal abort until the
2161  * final jbd2_journal_stop, which will receive the -EIO error.
2162  *
2163  * Finally, the jbd2_journal_abort call allows the caller to supply an errno
2164  * which will be recorded (if possible) in the journal superblock.  This
2165  * allows a client to record failure conditions in the middle of a
2166  * transaction without having to complete the transaction to record the
2167  * failure to disk.  ext3_error, for example, now uses this
2168  * functionality.
2169  *
2170  */
2171 
2172 void jbd2_journal_abort(journal_t *journal, int errno)
2173 {
2174         __journal_abort_soft(journal, errno);
2175 }
2176 
2177 /**
2178  * int jbd2_journal_errno () - returns the journal's error state.
2179  * @journal: journal to examine.
2180  *
2181  * This is the errno number set with jbd2_journal_abort(), the last
2182  * time the journal was mounted - if the journal was stopped
2183  * without calling abort this will be 0.
2184  *
2185  * If the journal has been aborted on this mount time -EROFS will
2186  * be returned.
2187  */
2188 int jbd2_journal_errno(journal_t *journal)
2189 {
2190         int err;
2191 
2192         read_lock(&journal->j_state_lock);
2193         if (journal->j_flags & JBD2_ABORT)
2194                 err = -EROFS;
2195         else
2196                 err = journal->j_errno;
2197         read_unlock(&journal->j_state_lock);
2198         return err;
2199 }
2200 
2201 /**
2202  * int jbd2_journal_clear_err () - clears the journal's error state
2203  * @journal: journal to act on.
2204  *
2205  * An error must be cleared or acked to take a FS out of readonly
2206  * mode.
2207  */
2208 int jbd2_journal_clear_err(journal_t *journal)
2209 {
2210         int err = 0;
2211 
2212         write_lock(&journal->j_state_lock);
2213         if (journal->j_flags & JBD2_ABORT)
2214                 err = -EROFS;
2215         else
2216                 journal->j_errno = 0;
2217         write_unlock(&journal->j_state_lock);
2218         return err;
2219 }
2220 
2221 /**
2222  * void jbd2_journal_ack_err() - Ack journal err.
2223  * @journal: journal to act on.
2224  *
2225  * An error must be cleared or acked to take a FS out of readonly
2226  * mode.
2227  */
2228 void jbd2_journal_ack_err(journal_t *journal)
2229 {
2230         write_lock(&journal->j_state_lock);
2231         if (journal->j_errno)
2232                 journal->j_flags |= JBD2_ACK_ERR;
2233         write_unlock(&journal->j_state_lock);
2234 }
2235 
2236 int jbd2_journal_blocks_per_page(struct inode *inode)
2237 {
2238         return 1 << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
2239 }
2240 
2241 /*
2242  * helper functions to deal with 32 or 64bit block numbers.
2243  */
2244 size_t journal_tag_bytes(journal_t *journal)
2245 {
2246         size_t sz;
2247 
2248         if (jbd2_has_feature_csum3(journal))
2249                 return sizeof(journal_block_tag3_t);
2250 
2251         sz = sizeof(journal_block_tag_t);
2252 
2253         if (jbd2_has_feature_csum2(journal))
2254                 sz += sizeof(__u16);
2255 
2256         if (jbd2_has_feature_64bit(journal))
2257                 return sz;
2258         else
2259                 return sz - sizeof(__u32);
2260 }
2261 
2262 /*
2263  * JBD memory management
2264  *
2265  * These functions are used to allocate block-sized chunks of memory
2266  * used for making copies of buffer_head data.  Very often it will be
2267  * page-sized chunks of data, but sometimes it will be in
2268  * sub-page-size chunks.  (For example, 16k pages on Power systems
2269  * with a 4k block file system.)  For blocks smaller than a page, we
2270  * use a SLAB allocator.  There are slab caches for each block size,
2271  * which are allocated at mount time, if necessary, and we only free
2272  * (all of) the slab caches when/if the jbd2 module is unloaded.  For
2273  * this reason we don't need to a mutex to protect access to
2274  * jbd2_slab[] allocating or releasing memory; only in
2275  * jbd2_journal_create_slab().
2276  */
2277 #define JBD2_MAX_SLABS 8
2278 static struct kmem_cache *jbd2_slab[JBD2_MAX_SLABS];
2279 
2280 static const char *jbd2_slab_names[JBD2_MAX_SLABS] = {
2281         "jbd2_1k", "jbd2_2k", "jbd2_4k", "jbd2_8k",
2282         "jbd2_16k", "jbd2_32k", "jbd2_64k", "jbd2_128k"
2283 };
2284 
2285 
2286 static void jbd2_journal_destroy_slabs(void)
2287 {
2288         int i;
2289 
2290         for (i = 0; i < JBD2_MAX_SLABS; i++) {
2291                 kmem_cache_destroy(jbd2_slab[i]);
2292                 jbd2_slab[i] = NULL;
2293         }
2294 }
2295 
2296 static int jbd2_journal_create_slab(size_t size)
2297 {
2298         static DEFINE_MUTEX(jbd2_slab_create_mutex);
2299         int i = order_base_2(size) - 10;
2300         size_t slab_size;
2301 
2302         if (size == PAGE_SIZE)
2303                 return 0;
2304 
2305         if (i >= JBD2_MAX_SLABS)
2306                 return -EINVAL;
2307 
2308         if (unlikely(i < 0))
2309                 i = 0;
2310         mutex_lock(&jbd2_slab_create_mutex);
2311         if (jbd2_slab[i]) {
2312                 mutex_unlock(&jbd2_slab_create_mutex);
2313                 return 0;       /* Already created */
2314         }
2315 
2316         slab_size = 1 << (i+10);
2317         jbd2_slab[i] = kmem_cache_create(jbd2_slab_names[i], slab_size,
2318                                          slab_size, 0, NULL);
2319         mutex_unlock(&jbd2_slab_create_mutex);
2320         if (!jbd2_slab[i]) {
2321                 printk(KERN_EMERG "JBD2: no memory for jbd2_slab cache\n");
2322                 return -ENOMEM;
2323         }
2324         return 0;
2325 }
2326 
2327 static struct kmem_cache *get_slab(size_t size)
2328 {
2329         int i = order_base_2(size) - 10;
2330 
2331         BUG_ON(i >= JBD2_MAX_SLABS);
2332         if (unlikely(i < 0))
2333                 i = 0;
2334         BUG_ON(jbd2_slab[i] == NULL);
2335         return jbd2_slab[i];
2336 }
2337 
2338 void *jbd2_alloc(size_t size, gfp_t flags)
2339 {
2340         void *ptr;
2341 
2342         BUG_ON(size & (size-1)); /* Must be a power of 2 */
2343 
2344         if (size < PAGE_SIZE)
2345                 ptr = kmem_cache_alloc(get_slab(size), flags);
2346         else
2347                 ptr = (void *)__get_free_pages(flags, get_order(size));
2348 
2349         /* Check alignment; SLUB has gotten this wrong in the past,
2350          * and this can lead to user data corruption! */
2351         BUG_ON(((unsigned long) ptr) & (size-1));
2352 
2353         return ptr;
2354 }
2355 
2356 void jbd2_free(void *ptr, size_t size)
2357 {
2358         if (size < PAGE_SIZE)
2359                 kmem_cache_free(get_slab(size), ptr);
2360         else
2361                 free_pages((unsigned long)ptr, get_order(size));
2362 };
2363 
2364 /*
2365  * Journal_head storage management
2366  */
2367 static struct kmem_cache *jbd2_journal_head_cache;
2368 #ifdef CONFIG_JBD2_DEBUG
2369 static atomic_t nr_journal_heads = ATOMIC_INIT(0);
2370 #endif
2371 
2372 static int __init jbd2_journal_init_journal_head_cache(void)
2373 {
2374         J_ASSERT(!jbd2_journal_head_cache);
2375         jbd2_journal_head_cache = kmem_cache_create("jbd2_journal_head",
2376                                 sizeof(struct journal_head),
2377                                 0,              /* offset */
2378                                 SLAB_TEMPORARY | SLAB_TYPESAFE_BY_RCU,
2379                                 NULL);          /* ctor */
2380         if (!jbd2_journal_head_cache) {
2381                 printk(KERN_EMERG "JBD2: no memory for journal_head cache\n");
2382                 return -ENOMEM;
2383         }
2384         return 0;
2385 }
2386 
2387 static void jbd2_journal_destroy_journal_head_cache(void)
2388 {
2389         kmem_cache_destroy(jbd2_journal_head_cache);
2390         jbd2_journal_head_cache = NULL;
2391 }
2392 
2393 /*
2394  * journal_head splicing and dicing
2395  */
2396 static struct journal_head *journal_alloc_journal_head(void)
2397 {
2398         struct journal_head *ret;
2399 
2400 #ifdef CONFIG_JBD2_DEBUG
2401         atomic_inc(&nr_journal_heads);
2402 #endif
2403         ret = kmem_cache_zalloc(jbd2_journal_head_cache, GFP_NOFS);
2404         if (!ret) {
2405                 jbd_debug(1, "out of memory for journal_head\n");
2406                 pr_notice_ratelimited("ENOMEM in %s, retrying.\n", __func__);
2407                 ret = kmem_cache_zalloc(jbd2_journal_head_cache,
2408                                 GFP_NOFS | __GFP_NOFAIL);
2409         }
2410         return ret;
2411 }
2412 
2413 static void journal_free_journal_head(struct journal_head *jh)
2414 {
2415 #ifdef CONFIG_JBD2_DEBUG
2416         atomic_dec(&nr_journal_heads);
2417         memset(jh, JBD2_POISON_FREE, sizeof(*jh));
2418 #endif
2419         kmem_cache_free(jbd2_journal_head_cache, jh);
2420 }
2421 
2422 /*
2423  * A journal_head is attached to a buffer_head whenever JBD has an
2424  * interest in the buffer.
2425  *
2426  * Whenever a buffer has an attached journal_head, its ->b_state:BH_JBD bit
2427  * is set.  This bit is tested in core kernel code where we need to take
2428  * JBD-specific actions.  Testing the zeroness of ->b_private is not reliable
2429  * there.
2430  *
2431  * When a buffer has its BH_JBD bit set, its ->b_count is elevated by one.
2432  *
2433  * When a buffer has its BH_JBD bit set it is immune from being released by
2434  * core kernel code, mainly via ->b_count.
2435  *
2436  * A journal_head is detached from its buffer_head when the journal_head's
2437  * b_jcount reaches zero. Running transaction (b_transaction) and checkpoint
2438  * transaction (b_cp_transaction) hold their references to b_jcount.
2439  *
2440  * Various places in the kernel want to attach a journal_head to a buffer_head
2441  * _before_ attaching the journal_head to a transaction.  To protect the
2442  * journal_head in this situation, jbd2_journal_add_journal_head elevates the
2443  * journal_head's b_jcount refcount by one.  The caller must call
2444  * jbd2_journal_put_journal_head() to undo this.
2445  *
2446  * So the typical usage would be:
2447  *
2448  *      (Attach a journal_head if needed.  Increments b_jcount)
2449  *      struct journal_head *jh = jbd2_journal_add_journal_head(bh);
2450  *      ...
2451  *      (Get another reference for transaction)
2452  *      jbd2_journal_grab_journal_head(bh);
2453  *      jh->b_transaction = xxx;
2454  *      (Put original reference)
2455  *      jbd2_journal_put_journal_head(jh);
2456  */
2457 
2458 /*
2459  * Give a buffer_head a journal_head.
2460  *
2461  * May sleep.
2462  */
2463 struct journal_head *jbd2_journal_add_journal_head(struct buffer_head *bh)
2464 {
2465         struct journal_head *jh;
2466         struct journal_head *new_jh = NULL;
2467 
2468 repeat:
2469         if (!buffer_jbd(bh))
2470                 new_jh = journal_alloc_journal_head();
2471 
2472         jbd_lock_bh_journal_head(bh);
2473         if (buffer_jbd(bh)) {
2474                 jh = bh2jh(bh);
2475         } else {
2476                 J_ASSERT_BH(bh,
2477                         (atomic_read(&bh->b_count) > 0) ||
2478                         (bh->b_page && bh->b_page->mapping));
2479 
2480                 if (!new_jh) {
2481                         jbd_unlock_bh_journal_head(bh);
2482                         goto repeat;
2483                 }
2484 
2485                 jh = new_jh;
2486                 new_jh = NULL;          /* We consumed it */
2487                 set_buffer_jbd(bh);
2488                 bh->b_private = jh;
2489                 jh->b_bh = bh;
2490                 get_bh(bh);
2491                 BUFFER_TRACE(bh, "added journal_head");
2492         }
2493         jh->b_jcount++;
2494         jbd_unlock_bh_journal_head(bh);
2495         if (new_jh)
2496                 journal_free_journal_head(new_jh);
2497         return bh->b_private;
2498 }
2499 
2500 /*
2501  * Grab a ref against this buffer_head's journal_head.  If it ended up not
2502  * having a journal_head, return NULL
2503  */
2504 struct journal_head *jbd2_journal_grab_journal_head(struct buffer_head *bh)
2505 {
2506         struct journal_head *jh = NULL;
2507 
2508         jbd_lock_bh_journal_head(bh);
2509         if (buffer_jbd(bh)) {
2510                 jh = bh2jh(bh);
2511                 jh->b_jcount++;
2512         }
2513         jbd_unlock_bh_journal_head(bh);
2514         return jh;
2515 }
2516 
2517 static void __journal_remove_journal_head(struct buffer_head *bh)
2518 {
2519         struct journal_head *jh = bh2jh(bh);
2520 
2521         J_ASSERT_JH(jh, jh->b_jcount >= 0);
2522         J_ASSERT_JH(jh, jh->b_transaction == NULL);
2523         J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
2524         J_ASSERT_JH(jh, jh->b_cp_transaction == NULL);
2525         J_ASSERT_JH(jh, jh->b_jlist == BJ_None);
2526         J_ASSERT_BH(bh, buffer_jbd(bh));
2527         J_ASSERT_BH(bh, jh2bh(jh) == bh);
2528         BUFFER_TRACE(bh, "remove journal_head");
2529         if (jh->b_frozen_data) {
2530                 printk(KERN_WARNING "%s: freeing b_frozen_data\n", __func__);
2531                 jbd2_free(jh->b_frozen_data, bh->b_size);
2532         }
2533         if (jh->b_committed_data) {
2534                 printk(KERN_WARNING "%s: freeing b_committed_data\n", __func__);
2535                 jbd2_free(jh->b_committed_data, bh->b_size);
2536         }
2537         bh->b_private = NULL;
2538         jh->b_bh = NULL;        /* debug, really */
2539         clear_buffer_jbd(bh);
2540         journal_free_journal_head(jh);
2541 }
2542 
2543 /*
2544  * Drop a reference on the passed journal_head.  If it fell to zero then
2545  * release the journal_head from the buffer_head.
2546  */
2547 void jbd2_journal_put_journal_head(struct journal_head *jh)
2548 {
2549         struct buffer_head *bh = jh2bh(jh);
2550 
2551         jbd_lock_bh_journal_head(bh);
2552         J_ASSERT_JH(jh, jh->b_jcount > 0);
2553         --jh->b_jcount;
2554         if (!jh->b_jcount) {
2555                 __journal_remove_journal_head(bh);
2556                 jbd_unlock_bh_journal_head(bh);
2557                 __brelse(bh);
2558         } else
2559                 jbd_unlock_bh_journal_head(bh);
2560 }
2561 
2562 /*
2563  * Initialize jbd inode head
2564  */
2565 void jbd2_journal_init_jbd_inode(struct jbd2_inode *jinode, struct inode *inode)
2566 {
2567         jinode->i_transaction = NULL;
2568         jinode->i_next_transaction = NULL;
2569         jinode->i_vfs_inode = inode;
2570         jinode->i_flags = 0;
2571         jinode->i_dirty_start = 0;
2572         jinode->i_dirty_end = 0;
2573         INIT_LIST_HEAD(&jinode->i_list);
2574 }
2575 
2576 /*
2577  * Function to be called before we start removing inode from memory (i.e.,
2578  * clear_inode() is a fine place to be called from). It removes inode from
2579  * transaction's lists.
2580  */
2581 void jbd2_journal_release_jbd_inode(journal_t *journal,
2582                                     struct jbd2_inode *jinode)
2583 {
2584         if (!journal)
2585                 return;
2586 restart:
2587         spin_lock(&journal->j_list_lock);
2588         /* Is commit writing out inode - we have to wait */
2589         if (jinode->i_flags & JI_COMMIT_RUNNING) {
2590                 wait_queue_head_t *wq;
2591                 DEFINE_WAIT_BIT(wait, &jinode->i_flags, __JI_COMMIT_RUNNING);
2592                 wq = bit_waitqueue(&jinode->i_flags, __JI_COMMIT_RUNNING);
2593                 prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
2594                 spin_unlock(&journal->j_list_lock);
2595                 schedule();
2596                 finish_wait(wq, &wait.wq_entry);
2597                 goto restart;
2598         }
2599 
2600         if (jinode->i_transaction) {
2601                 list_del(&jinode->i_list);
2602                 jinode->i_transaction = NULL;
2603         }
2604         spin_unlock(&journal->j_list_lock);
2605 }
2606 
2607 
2608 #ifdef CONFIG_PROC_FS
2609 
2610 #define JBD2_STATS_PROC_NAME "fs/jbd2"
2611 
2612 static void __init jbd2_create_jbd_stats_proc_entry(void)
2613 {
2614         proc_jbd2_stats = proc_mkdir(JBD2_STATS_PROC_NAME, NULL);
2615 }
2616 
2617 static void __exit jbd2_remove_jbd_stats_proc_entry(void)
2618 {
2619         if (proc_jbd2_stats)
2620                 remove_proc_entry(JBD2_STATS_PROC_NAME, NULL);
2621 }
2622 
2623 #else
2624 
2625 #define jbd2_create_jbd_stats_proc_entry() do {} while (0)
2626 #define jbd2_remove_jbd_stats_proc_entry() do {} while (0)
2627 
2628 #endif
2629 
2630 struct kmem_cache *jbd2_handle_cache, *jbd2_inode_cache;
2631 
2632 static int __init jbd2_journal_init_inode_cache(void)
2633 {
2634         J_ASSERT(!jbd2_inode_cache);
2635         jbd2_inode_cache = KMEM_CACHE(jbd2_inode, 0);
2636         if (!jbd2_inode_cache) {
2637                 pr_emerg("JBD2: failed to create inode cache\n");
2638                 return -ENOMEM;
2639         }
2640         return 0;
2641 }
2642 
2643 static int __init jbd2_journal_init_handle_cache(void)
2644 {
2645         J_ASSERT(!jbd2_handle_cache);
2646         jbd2_handle_cache = KMEM_CACHE(jbd2_journal_handle, SLAB_TEMPORARY);
2647         if (!jbd2_handle_cache) {
2648                 printk(KERN_EMERG "JBD2: failed to create handle cache\n");
2649                 return -ENOMEM;
2650         }
2651         return 0;
2652 }
2653 
2654 static void jbd2_journal_destroy_inode_cache(void)
2655 {
2656         kmem_cache_destroy(jbd2_inode_cache);
2657         jbd2_inode_cache = NULL;
2658 }
2659 
2660 static void jbd2_journal_destroy_handle_cache(void)
2661 {
2662         kmem_cache_destroy(jbd2_handle_cache);
2663         jbd2_handle_cache = NULL;
2664 }
2665 
2666 /*
2667  * Module startup and shutdown
2668  */
2669 
2670 static int __init journal_init_caches(void)
2671 {
2672         int ret;
2673 
2674         ret = jbd2_journal_init_revoke_record_cache();
2675         if (ret == 0)
2676                 ret = jbd2_journal_init_revoke_table_cache();
2677         if (ret == 0)
2678                 ret = jbd2_journal_init_journal_head_cache();
2679         if (ret == 0)
2680                 ret = jbd2_journal_init_handle_cache();
2681         if (ret == 0)
2682                 ret = jbd2_journal_init_inode_cache();
2683         if (ret == 0)
2684                 ret = jbd2_journal_init_transaction_cache();
2685         return ret;
2686 }
2687 
2688 static void jbd2_journal_destroy_caches(void)
2689 {
2690         jbd2_journal_destroy_revoke_record_cache();
2691         jbd2_journal_destroy_revoke_table_cache();
2692         jbd2_journal_destroy_journal_head_cache();
2693         jbd2_journal_destroy_handle_cache();
2694         jbd2_journal_destroy_inode_cache();
2695         jbd2_journal_destroy_transaction_cache();
2696         jbd2_journal_destroy_slabs();
2697 }
2698 
2699 static int __init journal_init(void)
2700 {
2701         int ret;
2702 
2703         BUILD_BUG_ON(sizeof(struct journal_superblock_s) != 1024);
2704 
2705         ret = journal_init_caches();
2706         if (ret == 0) {
2707                 jbd2_create_jbd_stats_proc_entry();
2708         } else {
2709                 jbd2_journal_destroy_caches();
2710         }
2711         return ret;
2712 }
2713 
2714 static void __exit journal_exit(void)
2715 {
2716 #ifdef CONFIG_JBD2_DEBUG
2717         int n = atomic_read(&nr_journal_heads);
2718         if (n)
2719                 printk(KERN_ERR "JBD2: leaked %d journal_heads!\n", n);
2720 #endif
2721         jbd2_remove_jbd_stats_proc_entry();
2722         jbd2_journal_destroy_caches();
2723 }
2724 
2725 MODULE_LICENSE("GPL");
2726 module_init(journal_init);
2727 module_exit(journal_exit);
2728 

/* [<][>][^][v][top][bottom][index][help] */