root/fs/f2fs/recovery.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. f2fs_space_for_roll_forward
  2. get_fsync_inode
  3. add_fsync_inode
  4. del_fsync_inode
  5. recover_dentry
  6. recover_quota_data
  7. recover_inline_flags
  8. recover_inode
  9. find_fsync_dnodes
  10. destroy_fsync_dnodes
  11. check_index_in_prev_nodes
  12. do_recover_data
  13. recover_data
  14. f2fs_recover_fsync_data

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * fs/f2fs/recovery.c
   4  *
   5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
   6  *             http://www.samsung.com/
   7  */
   8 #include <linux/fs.h>
   9 #include <linux/f2fs_fs.h>
  10 #include "f2fs.h"
  11 #include "node.h"
  12 #include "segment.h"
  13 
  14 /*
  15  * Roll forward recovery scenarios.
  16  *
  17  * [Term] F: fsync_mark, D: dentry_mark
  18  *
  19  * 1. inode(x) | CP | inode(x) | dnode(F)
  20  * -> Update the latest inode(x).
  21  *
  22  * 2. inode(x) | CP | inode(F) | dnode(F)
  23  * -> No problem.
  24  *
  25  * 3. inode(x) | CP | dnode(F) | inode(x)
  26  * -> Recover to the latest dnode(F), and drop the last inode(x)
  27  *
  28  * 4. inode(x) | CP | dnode(F) | inode(F)
  29  * -> No problem.
  30  *
  31  * 5. CP | inode(x) | dnode(F)
  32  * -> The inode(DF) was missing. Should drop this dnode(F).
  33  *
  34  * 6. CP | inode(DF) | dnode(F)
  35  * -> No problem.
  36  *
  37  * 7. CP | dnode(F) | inode(DF)
  38  * -> If f2fs_iget fails, then goto next to find inode(DF).
  39  *
  40  * 8. CP | dnode(F) | inode(x)
  41  * -> If f2fs_iget fails, then goto next to find inode(DF).
  42  *    But it will fail due to no inode(DF).
  43  */
  44 
  45 static struct kmem_cache *fsync_entry_slab;
  46 
  47 bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi)
  48 {
  49         s64 nalloc = percpu_counter_sum_positive(&sbi->alloc_valid_block_count);
  50 
  51         if (sbi->last_valid_block_count + nalloc > sbi->user_block_count)
  52                 return false;
  53         return true;
  54 }
  55 
  56 static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
  57                                                                 nid_t ino)
  58 {
  59         struct fsync_inode_entry *entry;
  60 
  61         list_for_each_entry(entry, head, list)
  62                 if (entry->inode->i_ino == ino)
  63                         return entry;
  64 
  65         return NULL;
  66 }
  67 
  68 static struct fsync_inode_entry *add_fsync_inode(struct f2fs_sb_info *sbi,
  69                         struct list_head *head, nid_t ino, bool quota_inode)
  70 {
  71         struct inode *inode;
  72         struct fsync_inode_entry *entry;
  73         int err;
  74 
  75         inode = f2fs_iget_retry(sbi->sb, ino);
  76         if (IS_ERR(inode))
  77                 return ERR_CAST(inode);
  78 
  79         err = dquot_initialize(inode);
  80         if (err)
  81                 goto err_out;
  82 
  83         if (quota_inode) {
  84                 err = dquot_alloc_inode(inode);
  85                 if (err)
  86                         goto err_out;
  87         }
  88 
  89         entry = f2fs_kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO);
  90         entry->inode = inode;
  91         list_add_tail(&entry->list, head);
  92 
  93         return entry;
  94 err_out:
  95         iput(inode);
  96         return ERR_PTR(err);
  97 }
  98 
  99 static void del_fsync_inode(struct fsync_inode_entry *entry, int drop)
 100 {
 101         if (drop) {
 102                 /* inode should not be recovered, drop it */
 103                 f2fs_inode_synced(entry->inode);
 104         }
 105         iput(entry->inode);
 106         list_del(&entry->list);
 107         kmem_cache_free(fsync_entry_slab, entry);
 108 }
 109 
 110 static int recover_dentry(struct inode *inode, struct page *ipage,
 111                                                 struct list_head *dir_list)
 112 {
 113         struct f2fs_inode *raw_inode = F2FS_INODE(ipage);
 114         nid_t pino = le32_to_cpu(raw_inode->i_pino);
 115         struct f2fs_dir_entry *de;
 116         struct fscrypt_name fname;
 117         struct page *page;
 118         struct inode *dir, *einode;
 119         struct fsync_inode_entry *entry;
 120         int err = 0;
 121         char *name;
 122 
 123         entry = get_fsync_inode(dir_list, pino);
 124         if (!entry) {
 125                 entry = add_fsync_inode(F2FS_I_SB(inode), dir_list,
 126                                                         pino, false);
 127                 if (IS_ERR(entry)) {
 128                         dir = ERR_CAST(entry);
 129                         err = PTR_ERR(entry);
 130                         goto out;
 131                 }
 132         }
 133 
 134         dir = entry->inode;
 135 
 136         memset(&fname, 0, sizeof(struct fscrypt_name));
 137         fname.disk_name.len = le32_to_cpu(raw_inode->i_namelen);
 138         fname.disk_name.name = raw_inode->i_name;
 139 
 140         if (unlikely(fname.disk_name.len > F2FS_NAME_LEN)) {
 141                 WARN_ON(1);
 142                 err = -ENAMETOOLONG;
 143                 goto out;
 144         }
 145 retry:
 146         de = __f2fs_find_entry(dir, &fname, &page);
 147         if (de && inode->i_ino == le32_to_cpu(de->ino))
 148                 goto out_put;
 149 
 150         if (de) {
 151                 einode = f2fs_iget_retry(inode->i_sb, le32_to_cpu(de->ino));
 152                 if (IS_ERR(einode)) {
 153                         WARN_ON(1);
 154                         err = PTR_ERR(einode);
 155                         if (err == -ENOENT)
 156                                 err = -EEXIST;
 157                         goto out_put;
 158                 }
 159 
 160                 err = dquot_initialize(einode);
 161                 if (err) {
 162                         iput(einode);
 163                         goto out_put;
 164                 }
 165 
 166                 err = f2fs_acquire_orphan_inode(F2FS_I_SB(inode));
 167                 if (err) {
 168                         iput(einode);
 169                         goto out_put;
 170                 }
 171                 f2fs_delete_entry(de, page, dir, einode);
 172                 iput(einode);
 173                 goto retry;
 174         } else if (IS_ERR(page)) {
 175                 err = PTR_ERR(page);
 176         } else {
 177                 err = f2fs_add_dentry(dir, &fname, inode,
 178                                         inode->i_ino, inode->i_mode);
 179         }
 180         if (err == -ENOMEM)
 181                 goto retry;
 182         goto out;
 183 
 184 out_put:
 185         f2fs_put_page(page, 0);
 186 out:
 187         if (file_enc_name(inode))
 188                 name = "<encrypted>";
 189         else
 190                 name = raw_inode->i_name;
 191         f2fs_notice(F2FS_I_SB(inode), "%s: ino = %x, name = %s, dir = %lx, err = %d",
 192                     __func__, ino_of_node(ipage), name,
 193                     IS_ERR(dir) ? 0 : dir->i_ino, err);
 194         return err;
 195 }
 196 
 197 static int recover_quota_data(struct inode *inode, struct page *page)
 198 {
 199         struct f2fs_inode *raw = F2FS_INODE(page);
 200         struct iattr attr;
 201         uid_t i_uid = le32_to_cpu(raw->i_uid);
 202         gid_t i_gid = le32_to_cpu(raw->i_gid);
 203         int err;
 204 
 205         memset(&attr, 0, sizeof(attr));
 206 
 207         attr.ia_uid = make_kuid(inode->i_sb->s_user_ns, i_uid);
 208         attr.ia_gid = make_kgid(inode->i_sb->s_user_ns, i_gid);
 209 
 210         if (!uid_eq(attr.ia_uid, inode->i_uid))
 211                 attr.ia_valid |= ATTR_UID;
 212         if (!gid_eq(attr.ia_gid, inode->i_gid))
 213                 attr.ia_valid |= ATTR_GID;
 214 
 215         if (!attr.ia_valid)
 216                 return 0;
 217 
 218         err = dquot_transfer(inode, &attr);
 219         if (err)
 220                 set_sbi_flag(F2FS_I_SB(inode), SBI_QUOTA_NEED_REPAIR);
 221         return err;
 222 }
 223 
 224 static void recover_inline_flags(struct inode *inode, struct f2fs_inode *ri)
 225 {
 226         if (ri->i_inline & F2FS_PIN_FILE)
 227                 set_inode_flag(inode, FI_PIN_FILE);
 228         else
 229                 clear_inode_flag(inode, FI_PIN_FILE);
 230         if (ri->i_inline & F2FS_DATA_EXIST)
 231                 set_inode_flag(inode, FI_DATA_EXIST);
 232         else
 233                 clear_inode_flag(inode, FI_DATA_EXIST);
 234 }
 235 
 236 static int recover_inode(struct inode *inode, struct page *page)
 237 {
 238         struct f2fs_inode *raw = F2FS_INODE(page);
 239         char *name;
 240         int err;
 241 
 242         inode->i_mode = le16_to_cpu(raw->i_mode);
 243 
 244         err = recover_quota_data(inode, page);
 245         if (err)
 246                 return err;
 247 
 248         i_uid_write(inode, le32_to_cpu(raw->i_uid));
 249         i_gid_write(inode, le32_to_cpu(raw->i_gid));
 250 
 251         if (raw->i_inline & F2FS_EXTRA_ATTR) {
 252                 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)) &&
 253                         F2FS_FITS_IN_INODE(raw, le16_to_cpu(raw->i_extra_isize),
 254                                                                 i_projid)) {
 255                         projid_t i_projid;
 256                         kprojid_t kprojid;
 257 
 258                         i_projid = (projid_t)le32_to_cpu(raw->i_projid);
 259                         kprojid = make_kprojid(&init_user_ns, i_projid);
 260 
 261                         if (!projid_eq(kprojid, F2FS_I(inode)->i_projid)) {
 262                                 err = f2fs_transfer_project_quota(inode,
 263                                                                 kprojid);
 264                                 if (err)
 265                                         return err;
 266                                 F2FS_I(inode)->i_projid = kprojid;
 267                         }
 268                 }
 269         }
 270 
 271         f2fs_i_size_write(inode, le64_to_cpu(raw->i_size));
 272         inode->i_atime.tv_sec = le64_to_cpu(raw->i_atime);
 273         inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime);
 274         inode->i_mtime.tv_sec = le64_to_cpu(raw->i_mtime);
 275         inode->i_atime.tv_nsec = le32_to_cpu(raw->i_atime_nsec);
 276         inode->i_ctime.tv_nsec = le32_to_cpu(raw->i_ctime_nsec);
 277         inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
 278 
 279         F2FS_I(inode)->i_advise = raw->i_advise;
 280         F2FS_I(inode)->i_flags = le32_to_cpu(raw->i_flags);
 281         f2fs_set_inode_flags(inode);
 282         F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] =
 283                                 le16_to_cpu(raw->i_gc_failures);
 284 
 285         recover_inline_flags(inode, raw);
 286 
 287         f2fs_mark_inode_dirty_sync(inode, true);
 288 
 289         if (file_enc_name(inode))
 290                 name = "<encrypted>";
 291         else
 292                 name = F2FS_INODE(page)->i_name;
 293 
 294         f2fs_notice(F2FS_I_SB(inode), "recover_inode: ino = %x, name = %s, inline = %x",
 295                     ino_of_node(page), name, raw->i_inline);
 296         return 0;
 297 }
 298 
 299 static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head,
 300                                 bool check_only)
 301 {
 302         struct curseg_info *curseg;
 303         struct page *page = NULL;
 304         block_t blkaddr;
 305         unsigned int loop_cnt = 0;
 306         unsigned int free_blocks = MAIN_SEGS(sbi) * sbi->blocks_per_seg -
 307                                                 valid_user_blocks(sbi);
 308         int err = 0;
 309 
 310         /* get node pages in the current segment */
 311         curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
 312         blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
 313 
 314         while (1) {
 315                 struct fsync_inode_entry *entry;
 316 
 317                 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
 318                         return 0;
 319 
 320                 page = f2fs_get_tmp_page(sbi, blkaddr);
 321                 if (IS_ERR(page)) {
 322                         err = PTR_ERR(page);
 323                         break;
 324                 }
 325 
 326                 if (!is_recoverable_dnode(page)) {
 327                         f2fs_put_page(page, 1);
 328                         break;
 329                 }
 330 
 331                 if (!is_fsync_dnode(page))
 332                         goto next;
 333 
 334                 entry = get_fsync_inode(head, ino_of_node(page));
 335                 if (!entry) {
 336                         bool quota_inode = false;
 337 
 338                         if (!check_only &&
 339                                         IS_INODE(page) && is_dent_dnode(page)) {
 340                                 err = f2fs_recover_inode_page(sbi, page);
 341                                 if (err) {
 342                                         f2fs_put_page(page, 1);
 343                                         break;
 344                                 }
 345                                 quota_inode = true;
 346                         }
 347 
 348                         /*
 349                          * CP | dnode(F) | inode(DF)
 350                          * For this case, we should not give up now.
 351                          */
 352                         entry = add_fsync_inode(sbi, head, ino_of_node(page),
 353                                                                 quota_inode);
 354                         if (IS_ERR(entry)) {
 355                                 err = PTR_ERR(entry);
 356                                 if (err == -ENOENT) {
 357                                         err = 0;
 358                                         goto next;
 359                                 }
 360                                 f2fs_put_page(page, 1);
 361                                 break;
 362                         }
 363                 }
 364                 entry->blkaddr = blkaddr;
 365 
 366                 if (IS_INODE(page) && is_dent_dnode(page))
 367                         entry->last_dentry = blkaddr;
 368 next:
 369                 /* sanity check in order to detect looped node chain */
 370                 if (++loop_cnt >= free_blocks ||
 371                         blkaddr == next_blkaddr_of_node(page)) {
 372                         f2fs_notice(sbi, "%s: detect looped node chain, blkaddr:%u, next:%u",
 373                                     __func__, blkaddr,
 374                                     next_blkaddr_of_node(page));
 375                         f2fs_put_page(page, 1);
 376                         err = -EINVAL;
 377                         break;
 378                 }
 379 
 380                 /* check next segment */
 381                 blkaddr = next_blkaddr_of_node(page);
 382                 f2fs_put_page(page, 1);
 383 
 384                 f2fs_ra_meta_pages_cond(sbi, blkaddr);
 385         }
 386         return err;
 387 }
 388 
 389 static void destroy_fsync_dnodes(struct list_head *head, int drop)
 390 {
 391         struct fsync_inode_entry *entry, *tmp;
 392 
 393         list_for_each_entry_safe(entry, tmp, head, list)
 394                 del_fsync_inode(entry, drop);
 395 }
 396 
 397 static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
 398                         block_t blkaddr, struct dnode_of_data *dn)
 399 {
 400         struct seg_entry *sentry;
 401         unsigned int segno = GET_SEGNO(sbi, blkaddr);
 402         unsigned short blkoff = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
 403         struct f2fs_summary_block *sum_node;
 404         struct f2fs_summary sum;
 405         struct page *sum_page, *node_page;
 406         struct dnode_of_data tdn = *dn;
 407         nid_t ino, nid;
 408         struct inode *inode;
 409         unsigned int offset;
 410         block_t bidx;
 411         int i;
 412 
 413         sentry = get_seg_entry(sbi, segno);
 414         if (!f2fs_test_bit(blkoff, sentry->cur_valid_map))
 415                 return 0;
 416 
 417         /* Get the previous summary */
 418         for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
 419                 struct curseg_info *curseg = CURSEG_I(sbi, i);
 420                 if (curseg->segno == segno) {
 421                         sum = curseg->sum_blk->entries[blkoff];
 422                         goto got_it;
 423                 }
 424         }
 425 
 426         sum_page = f2fs_get_sum_page(sbi, segno);
 427         if (IS_ERR(sum_page))
 428                 return PTR_ERR(sum_page);
 429         sum_node = (struct f2fs_summary_block *)page_address(sum_page);
 430         sum = sum_node->entries[blkoff];
 431         f2fs_put_page(sum_page, 1);
 432 got_it:
 433         /* Use the locked dnode page and inode */
 434         nid = le32_to_cpu(sum.nid);
 435         if (dn->inode->i_ino == nid) {
 436                 tdn.nid = nid;
 437                 if (!dn->inode_page_locked)
 438                         lock_page(dn->inode_page);
 439                 tdn.node_page = dn->inode_page;
 440                 tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
 441                 goto truncate_out;
 442         } else if (dn->nid == nid) {
 443                 tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
 444                 goto truncate_out;
 445         }
 446 
 447         /* Get the node page */
 448         node_page = f2fs_get_node_page(sbi, nid);
 449         if (IS_ERR(node_page))
 450                 return PTR_ERR(node_page);
 451 
 452         offset = ofs_of_node(node_page);
 453         ino = ino_of_node(node_page);
 454         f2fs_put_page(node_page, 1);
 455 
 456         if (ino != dn->inode->i_ino) {
 457                 int ret;
 458 
 459                 /* Deallocate previous index in the node page */
 460                 inode = f2fs_iget_retry(sbi->sb, ino);
 461                 if (IS_ERR(inode))
 462                         return PTR_ERR(inode);
 463 
 464                 ret = dquot_initialize(inode);
 465                 if (ret) {
 466                         iput(inode);
 467                         return ret;
 468                 }
 469         } else {
 470                 inode = dn->inode;
 471         }
 472 
 473         bidx = f2fs_start_bidx_of_node(offset, inode) +
 474                                 le16_to_cpu(sum.ofs_in_node);
 475 
 476         /*
 477          * if inode page is locked, unlock temporarily, but its reference
 478          * count keeps alive.
 479          */
 480         if (ino == dn->inode->i_ino && dn->inode_page_locked)
 481                 unlock_page(dn->inode_page);
 482 
 483         set_new_dnode(&tdn, inode, NULL, NULL, 0);
 484         if (f2fs_get_dnode_of_data(&tdn, bidx, LOOKUP_NODE))
 485                 goto out;
 486 
 487         if (tdn.data_blkaddr == blkaddr)
 488                 f2fs_truncate_data_blocks_range(&tdn, 1);
 489 
 490         f2fs_put_dnode(&tdn);
 491 out:
 492         if (ino != dn->inode->i_ino)
 493                 iput(inode);
 494         else if (dn->inode_page_locked)
 495                 lock_page(dn->inode_page);
 496         return 0;
 497 
 498 truncate_out:
 499         if (datablock_addr(tdn.inode, tdn.node_page,
 500                                         tdn.ofs_in_node) == blkaddr)
 501                 f2fs_truncate_data_blocks_range(&tdn, 1);
 502         if (dn->inode->i_ino == nid && !dn->inode_page_locked)
 503                 unlock_page(dn->inode_page);
 504         return 0;
 505 }
 506 
 507 static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
 508                                         struct page *page)
 509 {
 510         struct dnode_of_data dn;
 511         struct node_info ni;
 512         unsigned int start, end;
 513         int err = 0, recovered = 0;
 514 
 515         /* step 1: recover xattr */
 516         if (IS_INODE(page)) {
 517                 f2fs_recover_inline_xattr(inode, page);
 518         } else if (f2fs_has_xattr_block(ofs_of_node(page))) {
 519                 err = f2fs_recover_xattr_data(inode, page);
 520                 if (!err)
 521                         recovered++;
 522                 goto out;
 523         }
 524 
 525         /* step 2: recover inline data */
 526         if (f2fs_recover_inline_data(inode, page))
 527                 goto out;
 528 
 529         /* step 3: recover data indices */
 530         start = f2fs_start_bidx_of_node(ofs_of_node(page), inode);
 531         end = start + ADDRS_PER_PAGE(page, inode);
 532 
 533         set_new_dnode(&dn, inode, NULL, NULL, 0);
 534 retry_dn:
 535         err = f2fs_get_dnode_of_data(&dn, start, ALLOC_NODE);
 536         if (err) {
 537                 if (err == -ENOMEM) {
 538                         congestion_wait(BLK_RW_ASYNC, HZ/50);
 539                         goto retry_dn;
 540                 }
 541                 goto out;
 542         }
 543 
 544         f2fs_wait_on_page_writeback(dn.node_page, NODE, true, true);
 545 
 546         err = f2fs_get_node_info(sbi, dn.nid, &ni);
 547         if (err)
 548                 goto err;
 549 
 550         f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
 551 
 552         if (ofs_of_node(dn.node_page) != ofs_of_node(page)) {
 553                 f2fs_warn(sbi, "Inconsistent ofs_of_node, ino:%lu, ofs:%u, %u",
 554                           inode->i_ino, ofs_of_node(dn.node_page),
 555                           ofs_of_node(page));
 556                 err = -EFSCORRUPTED;
 557                 goto err;
 558         }
 559 
 560         for (; start < end; start++, dn.ofs_in_node++) {
 561                 block_t src, dest;
 562 
 563                 src = datablock_addr(dn.inode, dn.node_page, dn.ofs_in_node);
 564                 dest = datablock_addr(dn.inode, page, dn.ofs_in_node);
 565 
 566                 if (__is_valid_data_blkaddr(src) &&
 567                         !f2fs_is_valid_blkaddr(sbi, src, META_POR)) {
 568                         err = -EFSCORRUPTED;
 569                         goto err;
 570                 }
 571 
 572                 if (__is_valid_data_blkaddr(dest) &&
 573                         !f2fs_is_valid_blkaddr(sbi, dest, META_POR)) {
 574                         err = -EFSCORRUPTED;
 575                         goto err;
 576                 }
 577 
 578                 /* skip recovering if dest is the same as src */
 579                 if (src == dest)
 580                         continue;
 581 
 582                 /* dest is invalid, just invalidate src block */
 583                 if (dest == NULL_ADDR) {
 584                         f2fs_truncate_data_blocks_range(&dn, 1);
 585                         continue;
 586                 }
 587 
 588                 if (!file_keep_isize(inode) &&
 589                         (i_size_read(inode) <= ((loff_t)start << PAGE_SHIFT)))
 590                         f2fs_i_size_write(inode,
 591                                 (loff_t)(start + 1) << PAGE_SHIFT);
 592 
 593                 /*
 594                  * dest is reserved block, invalidate src block
 595                  * and then reserve one new block in dnode page.
 596                  */
 597                 if (dest == NEW_ADDR) {
 598                         f2fs_truncate_data_blocks_range(&dn, 1);
 599                         f2fs_reserve_new_block(&dn);
 600                         continue;
 601                 }
 602 
 603                 /* dest is valid block, try to recover from src to dest */
 604                 if (f2fs_is_valid_blkaddr(sbi, dest, META_POR)) {
 605 
 606                         if (src == NULL_ADDR) {
 607                                 err = f2fs_reserve_new_block(&dn);
 608                                 while (err &&
 609                                        IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION))
 610                                         err = f2fs_reserve_new_block(&dn);
 611                                 /* We should not get -ENOSPC */
 612                                 f2fs_bug_on(sbi, err);
 613                                 if (err)
 614                                         goto err;
 615                         }
 616 retry_prev:
 617                         /* Check the previous node page having this index */
 618                         err = check_index_in_prev_nodes(sbi, dest, &dn);
 619                         if (err) {
 620                                 if (err == -ENOMEM) {
 621                                         congestion_wait(BLK_RW_ASYNC, HZ/50);
 622                                         goto retry_prev;
 623                                 }
 624                                 goto err;
 625                         }
 626 
 627                         /* write dummy data page */
 628                         f2fs_replace_block(sbi, &dn, src, dest,
 629                                                 ni.version, false, false);
 630                         recovered++;
 631                 }
 632         }
 633 
 634         copy_node_footer(dn.node_page, page);
 635         fill_node_footer(dn.node_page, dn.nid, ni.ino,
 636                                         ofs_of_node(page), false);
 637         set_page_dirty(dn.node_page);
 638 err:
 639         f2fs_put_dnode(&dn);
 640 out:
 641         f2fs_notice(sbi, "recover_data: ino = %lx (i_size: %s) recovered = %d, err = %d",
 642                     inode->i_ino, file_keep_isize(inode) ? "keep" : "recover",
 643                     recovered, err);
 644         return err;
 645 }
 646 
 647 static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
 648                 struct list_head *tmp_inode_list, struct list_head *dir_list)
 649 {
 650         struct curseg_info *curseg;
 651         struct page *page = NULL;
 652         int err = 0;
 653         block_t blkaddr;
 654 
 655         /* get node pages in the current segment */
 656         curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
 657         blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
 658 
 659         while (1) {
 660                 struct fsync_inode_entry *entry;
 661 
 662                 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
 663                         break;
 664 
 665                 f2fs_ra_meta_pages_cond(sbi, blkaddr);
 666 
 667                 page = f2fs_get_tmp_page(sbi, blkaddr);
 668                 if (IS_ERR(page)) {
 669                         err = PTR_ERR(page);
 670                         break;
 671                 }
 672 
 673                 if (!is_recoverable_dnode(page)) {
 674                         f2fs_put_page(page, 1);
 675                         break;
 676                 }
 677 
 678                 entry = get_fsync_inode(inode_list, ino_of_node(page));
 679                 if (!entry)
 680                         goto next;
 681                 /*
 682                  * inode(x) | CP | inode(x) | dnode(F)
 683                  * In this case, we can lose the latest inode(x).
 684                  * So, call recover_inode for the inode update.
 685                  */
 686                 if (IS_INODE(page)) {
 687                         err = recover_inode(entry->inode, page);
 688                         if (err) {
 689                                 f2fs_put_page(page, 1);
 690                                 break;
 691                         }
 692                 }
 693                 if (entry->last_dentry == blkaddr) {
 694                         err = recover_dentry(entry->inode, page, dir_list);
 695                         if (err) {
 696                                 f2fs_put_page(page, 1);
 697                                 break;
 698                         }
 699                 }
 700                 err = do_recover_data(sbi, entry->inode, page);
 701                 if (err) {
 702                         f2fs_put_page(page, 1);
 703                         break;
 704                 }
 705 
 706                 if (entry->blkaddr == blkaddr)
 707                         list_move_tail(&entry->list, tmp_inode_list);
 708 next:
 709                 /* check next segment */
 710                 blkaddr = next_blkaddr_of_node(page);
 711                 f2fs_put_page(page, 1);
 712         }
 713         if (!err)
 714                 f2fs_allocate_new_segments(sbi);
 715         return err;
 716 }
 717 
 718 int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
 719 {
 720         struct list_head inode_list, tmp_inode_list;
 721         struct list_head dir_list;
 722         int err;
 723         int ret = 0;
 724         unsigned long s_flags = sbi->sb->s_flags;
 725         bool need_writecp = false;
 726 #ifdef CONFIG_QUOTA
 727         int quota_enabled;
 728 #endif
 729 
 730         if (s_flags & SB_RDONLY) {
 731                 f2fs_info(sbi, "recover fsync data on readonly fs");
 732                 sbi->sb->s_flags &= ~SB_RDONLY;
 733         }
 734 
 735 #ifdef CONFIG_QUOTA
 736         /* Needed for iput() to work correctly and not trash data */
 737         sbi->sb->s_flags |= SB_ACTIVE;
 738         /* Turn on quotas so that they are updated correctly */
 739         quota_enabled = f2fs_enable_quota_files(sbi, s_flags & SB_RDONLY);
 740 #endif
 741 
 742         fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
 743                         sizeof(struct fsync_inode_entry));
 744         if (!fsync_entry_slab) {
 745                 err = -ENOMEM;
 746                 goto out;
 747         }
 748 
 749         INIT_LIST_HEAD(&inode_list);
 750         INIT_LIST_HEAD(&tmp_inode_list);
 751         INIT_LIST_HEAD(&dir_list);
 752 
 753         /* prevent checkpoint */
 754         mutex_lock(&sbi->cp_mutex);
 755 
 756         /* step #1: find fsynced inode numbers */
 757         err = find_fsync_dnodes(sbi, &inode_list, check_only);
 758         if (err || list_empty(&inode_list))
 759                 goto skip;
 760 
 761         if (check_only) {
 762                 ret = 1;
 763                 goto skip;
 764         }
 765 
 766         need_writecp = true;
 767 
 768         /* step #2: recover data */
 769         err = recover_data(sbi, &inode_list, &tmp_inode_list, &dir_list);
 770         if (!err)
 771                 f2fs_bug_on(sbi, !list_empty(&inode_list));
 772         else {
 773                 /* restore s_flags to let iput() trash data */
 774                 sbi->sb->s_flags = s_flags;
 775         }
 776 skip:
 777         destroy_fsync_dnodes(&inode_list, err);
 778         destroy_fsync_dnodes(&tmp_inode_list, err);
 779 
 780         /* truncate meta pages to be used by the recovery */
 781         truncate_inode_pages_range(META_MAPPING(sbi),
 782                         (loff_t)MAIN_BLKADDR(sbi) << PAGE_SHIFT, -1);
 783 
 784         if (err) {
 785                 truncate_inode_pages_final(NODE_MAPPING(sbi));
 786                 truncate_inode_pages_final(META_MAPPING(sbi));
 787         } else {
 788                 clear_sbi_flag(sbi, SBI_POR_DOING);
 789         }
 790         mutex_unlock(&sbi->cp_mutex);
 791 
 792         /* let's drop all the directory inodes for clean checkpoint */
 793         destroy_fsync_dnodes(&dir_list, err);
 794 
 795         if (need_writecp) {
 796                 set_sbi_flag(sbi, SBI_IS_RECOVERED);
 797 
 798                 if (!err) {
 799                         struct cp_control cpc = {
 800                                 .reason = CP_RECOVERY,
 801                         };
 802                         err = f2fs_write_checkpoint(sbi, &cpc);
 803                 }
 804         }
 805 
 806         kmem_cache_destroy(fsync_entry_slab);
 807 out:
 808 #ifdef CONFIG_QUOTA
 809         /* Turn quotas off */
 810         if (quota_enabled)
 811                 f2fs_quota_off_umount(sbi->sb);
 812 #endif
 813         sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */
 814 
 815         return ret ? ret: err;
 816 }

/* [<][>][^][v][top][bottom][index][help] */