Lines Matching refs:sbi
32 struct page *grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index) in grab_meta_page() argument
34 struct address_space *mapping = META_MAPPING(sbi); in grab_meta_page()
50 struct page *get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index) in get_meta_page() argument
52 struct address_space *mapping = META_MAPPING(sbi); in get_meta_page()
68 if (f2fs_submit_page_bio(sbi, page, &fio)) in get_meta_page()
80 static inline bool is_valid_blkaddr(struct f2fs_sb_info *sbi, in is_valid_blkaddr() argument
87 if (unlikely(blkaddr >= SIT_BLK_CNT(sbi))) in is_valid_blkaddr()
91 if (unlikely(blkaddr >= MAIN_BLKADDR(sbi) || in is_valid_blkaddr()
92 blkaddr < SM_I(sbi)->ssa_blkaddr)) in is_valid_blkaddr()
96 if (unlikely(blkaddr >= SIT_I(sbi)->sit_base_addr || in is_valid_blkaddr()
97 blkaddr < __start_cp_addr(sbi))) in is_valid_blkaddr()
101 if (unlikely(blkaddr >= MAX_BLKADDR(sbi) || in is_valid_blkaddr()
102 blkaddr < MAIN_BLKADDR(sbi))) in is_valid_blkaddr()
115 int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, int type) in ra_meta_pages() argument
127 if (!is_valid_blkaddr(sbi, blkno, type)) in ra_meta_pages()
133 NAT_BLOCK_OFFSET(NM_I(sbi)->max_nid))) in ra_meta_pages()
136 fio.blk_addr = current_nat_addr(sbi, in ra_meta_pages()
141 fio.blk_addr = current_sit_addr(sbi, in ra_meta_pages()
156 page = grab_cache_page(META_MAPPING(sbi), fio.blk_addr); in ra_meta_pages()
164 f2fs_submit_page_mbio(sbi, page, &fio); in ra_meta_pages()
168 f2fs_submit_merged_bio(sbi, META, READ); in ra_meta_pages()
172 void ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index) in ra_meta_pages_cond() argument
177 page = find_get_page(META_MAPPING(sbi), index); in ra_meta_pages_cond()
183 ra_meta_pages(sbi, index, MAX_BIO_BLOCKS(sbi), META_POR); in ra_meta_pages_cond()
189 struct f2fs_sb_info *sbi = F2FS_P_SB(page); in f2fs_write_meta_page() local
193 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) in f2fs_write_meta_page()
195 if (wbc->for_reclaim && page->index < GET_SUM_BLOCK(sbi, 0)) in f2fs_write_meta_page()
197 if (unlikely(f2fs_cp_error(sbi))) in f2fs_write_meta_page()
201 write_meta_page(sbi, page); in f2fs_write_meta_page()
202 dec_page_count(sbi, F2FS_DIRTY_META); in f2fs_write_meta_page()
206 f2fs_submit_merged_bio(sbi, META, WRITE); in f2fs_write_meta_page()
217 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping); in f2fs_write_meta_pages() local
224 get_pages(sbi, F2FS_DIRTY_META) < nr_pages_to_skip(sbi, META)) in f2fs_write_meta_pages()
228 mutex_lock(&sbi->cp_mutex); in f2fs_write_meta_pages()
229 diff = nr_pages_to_write(sbi, META, wbc); in f2fs_write_meta_pages()
230 written = sync_meta_pages(sbi, META, wbc->nr_to_write); in f2fs_write_meta_pages()
231 mutex_unlock(&sbi->cp_mutex); in f2fs_write_meta_pages()
236 wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_META); in f2fs_write_meta_pages()
240 long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type, in sync_meta_pages() argument
243 struct address_space *mapping = META_MAPPING(sbi); in sync_meta_pages()
292 f2fs_submit_merged_bio(sbi, type, WRITE); in sync_meta_pages()
320 static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type) in __add_ino_entry() argument
322 struct inode_management *im = &sbi->im[type]; in __add_ino_entry()
357 static void __remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type) in __remove_ino_entry() argument
359 struct inode_management *im = &sbi->im[type]; in __remove_ino_entry()
375 void add_dirty_inode(struct f2fs_sb_info *sbi, nid_t ino, int type) in add_dirty_inode() argument
378 __add_ino_entry(sbi, ino, type); in add_dirty_inode()
381 void remove_dirty_inode(struct f2fs_sb_info *sbi, nid_t ino, int type) in remove_dirty_inode() argument
384 __remove_ino_entry(sbi, ino, type); in remove_dirty_inode()
388 bool exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode) in exist_written_data() argument
390 struct inode_management *im = &sbi->im[mode]; in exist_written_data()
399 void release_dirty_inode(struct f2fs_sb_info *sbi) in release_dirty_inode() argument
405 struct inode_management *im = &sbi->im[i]; in release_dirty_inode()
418 int acquire_orphan_inode(struct f2fs_sb_info *sbi) in acquire_orphan_inode() argument
420 struct inode_management *im = &sbi->im[ORPHAN_INO]; in acquire_orphan_inode()
424 if (unlikely(im->ino_num >= sbi->max_orphans)) in acquire_orphan_inode()
433 void release_orphan_inode(struct f2fs_sb_info *sbi) in release_orphan_inode() argument
435 struct inode_management *im = &sbi->im[ORPHAN_INO]; in release_orphan_inode()
438 f2fs_bug_on(sbi, im->ino_num == 0); in release_orphan_inode()
443 void add_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino) in add_orphan_inode() argument
446 __add_ino_entry(sbi, ino, ORPHAN_INO); in add_orphan_inode()
449 void remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino) in remove_orphan_inode() argument
452 __remove_ino_entry(sbi, ino, ORPHAN_INO); in remove_orphan_inode()
455 static void recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino) in recover_orphan_inode() argument
457 struct inode *inode = f2fs_iget(sbi->sb, ino); in recover_orphan_inode()
458 f2fs_bug_on(sbi, IS_ERR(inode)); in recover_orphan_inode()
465 void recover_orphan_inodes(struct f2fs_sb_info *sbi) in recover_orphan_inodes() argument
469 if (!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG)) in recover_orphan_inodes()
472 set_sbi_flag(sbi, SBI_POR_DOING); in recover_orphan_inodes()
474 start_blk = __start_cp_addr(sbi) + 1 + __cp_payload(sbi); in recover_orphan_inodes()
475 orphan_blocks = __start_sum_addr(sbi) - 1 - __cp_payload(sbi); in recover_orphan_inodes()
477 ra_meta_pages(sbi, start_blk, orphan_blocks, META_CP); in recover_orphan_inodes()
480 struct page *page = get_meta_page(sbi, start_blk + i); in recover_orphan_inodes()
486 recover_orphan_inode(sbi, ino); in recover_orphan_inodes()
491 clear_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG); in recover_orphan_inodes()
492 clear_sbi_flag(sbi, SBI_POR_DOING); in recover_orphan_inodes()
496 static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk) in write_orphan_inodes() argument
505 struct inode_management *im = &sbi->im[ORPHAN_INO]; in write_orphan_inodes()
510 grab_meta_page(sbi, start_blk + index); in write_orphan_inodes()
519 page = find_get_page(META_MAPPING(sbi), start_blk++); in write_orphan_inodes()
520 f2fs_bug_on(sbi, !page); in write_orphan_inodes()
557 static struct page *validate_checkpoint(struct f2fs_sb_info *sbi, in validate_checkpoint() argument
561 unsigned long blk_size = sbi->blocksize; in validate_checkpoint()
568 cp_page_1 = get_meta_page(sbi, cp_addr); in validate_checkpoint()
584 cp_page_2 = get_meta_page(sbi, cp_addr); in validate_checkpoint()
609 int get_valid_checkpoint(struct f2fs_sb_info *sbi) in get_valid_checkpoint() argument
612 struct f2fs_super_block *fsb = sbi->raw_super; in get_valid_checkpoint()
614 unsigned long blk_size = sbi->blocksize; in get_valid_checkpoint()
617 unsigned int cp_blks = 1 + __cp_payload(sbi); in get_valid_checkpoint()
621 sbi->ckpt = kzalloc(cp_blks * blk_size, GFP_KERNEL); in get_valid_checkpoint()
622 if (!sbi->ckpt) in get_valid_checkpoint()
629 cp1 = validate_checkpoint(sbi, cp_start_blk_no, &cp1_version); in get_valid_checkpoint()
634 cp2 = validate_checkpoint(sbi, cp_start_blk_no, &cp2_version); in get_valid_checkpoint()
650 memcpy(sbi->ckpt, cp_block, blk_size); in get_valid_checkpoint()
661 unsigned char *ckpt = (unsigned char *)sbi->ckpt; in get_valid_checkpoint()
663 cur_page = get_meta_page(sbi, cp_blk_no + i); in get_valid_checkpoint()
674 kfree(sbi->ckpt); in get_valid_checkpoint()
680 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in __add_dirty_inode() local
687 list_add_tail(&new->list, &sbi->dir_inode_list); in __add_dirty_inode()
688 stat_inc_dirty_dir(sbi); in __add_dirty_inode()
694 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in update_dirty_page() local
710 spin_lock(&sbi->dir_inode_lock); in update_dirty_page()
713 spin_unlock(&sbi->dir_inode_lock); in update_dirty_page()
724 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in add_dirty_dir_inode() local
732 spin_lock(&sbi->dir_inode_lock); in add_dirty_dir_inode()
734 spin_unlock(&sbi->dir_inode_lock); in add_dirty_dir_inode()
742 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in remove_dirty_dir_inode() local
748 spin_lock(&sbi->dir_inode_lock); in remove_dirty_dir_inode()
751 spin_unlock(&sbi->dir_inode_lock); in remove_dirty_dir_inode()
759 stat_dec_dirty_dir(sbi); in remove_dirty_dir_inode()
760 spin_unlock(&sbi->dir_inode_lock); in remove_dirty_dir_inode()
770 void sync_dirty_dir_inodes(struct f2fs_sb_info *sbi) in sync_dirty_dir_inodes() argument
776 if (unlikely(f2fs_cp_error(sbi))) in sync_dirty_dir_inodes()
779 spin_lock(&sbi->dir_inode_lock); in sync_dirty_dir_inodes()
781 head = &sbi->dir_inode_list; in sync_dirty_dir_inodes()
783 spin_unlock(&sbi->dir_inode_lock); in sync_dirty_dir_inodes()
788 spin_unlock(&sbi->dir_inode_lock); in sync_dirty_dir_inodes()
797 f2fs_submit_merged_bio(sbi, DATA, WRITE); in sync_dirty_dir_inodes()
806 static int block_operations(struct f2fs_sb_info *sbi) in block_operations() argument
819 f2fs_lock_all(sbi); in block_operations()
821 if (get_pages(sbi, F2FS_DIRTY_DENTS)) { in block_operations()
822 f2fs_unlock_all(sbi); in block_operations()
823 sync_dirty_dir_inodes(sbi); in block_operations()
824 if (unlikely(f2fs_cp_error(sbi))) { in block_operations()
836 down_write(&sbi->node_write); in block_operations()
838 if (get_pages(sbi, F2FS_DIRTY_NODES)) { in block_operations()
839 up_write(&sbi->node_write); in block_operations()
840 sync_node_pages(sbi, 0, &wbc); in block_operations()
841 if (unlikely(f2fs_cp_error(sbi))) { in block_operations()
842 f2fs_unlock_all(sbi); in block_operations()
853 static void unblock_operations(struct f2fs_sb_info *sbi) in unblock_operations() argument
855 up_write(&sbi->node_write); in unblock_operations()
856 f2fs_unlock_all(sbi); in unblock_operations()
859 static void wait_on_all_pages_writeback(struct f2fs_sb_info *sbi) in wait_on_all_pages_writeback() argument
864 prepare_to_wait(&sbi->cp_wait, &wait, TASK_UNINTERRUPTIBLE); in wait_on_all_pages_writeback()
866 if (!get_pages(sbi, F2FS_WRITEBACK)) in wait_on_all_pages_writeback()
871 finish_wait(&sbi->cp_wait, &wait); in wait_on_all_pages_writeback()
874 static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) in do_checkpoint() argument
876 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); in do_checkpoint()
877 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_WARM_NODE); in do_checkpoint()
878 struct f2fs_nm_info *nm_i = NM_I(sbi); in do_checkpoint()
879 unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num; in do_checkpoint()
887 int cp_payload_blks = __cp_payload(sbi); in do_checkpoint()
893 discard_next_dnode(sbi, NEXT_FREE_BLKADDR(sbi, curseg)); in do_checkpoint()
896 while (get_pages(sbi, F2FS_DIRTY_META)) { in do_checkpoint()
897 sync_meta_pages(sbi, META, LONG_MAX); in do_checkpoint()
898 if (unlikely(f2fs_cp_error(sbi))) in do_checkpoint()
902 next_free_nid(sbi, &last_nid); in do_checkpoint()
908 ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi)); in do_checkpoint()
909 ckpt->valid_block_count = cpu_to_le64(valid_user_blocks(sbi)); in do_checkpoint()
910 ckpt->free_segment_count = cpu_to_le32(free_segments(sbi)); in do_checkpoint()
913 cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_NODE)); in do_checkpoint()
915 cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_NODE)); in do_checkpoint()
917 curseg_alloc_type(sbi, i + CURSEG_HOT_NODE); in do_checkpoint()
921 cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_DATA)); in do_checkpoint()
923 cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_DATA)); in do_checkpoint()
925 curseg_alloc_type(sbi, i + CURSEG_HOT_DATA); in do_checkpoint()
928 ckpt->valid_node_count = cpu_to_le32(valid_node_count(sbi)); in do_checkpoint()
929 ckpt->valid_inode_count = cpu_to_le32(valid_inode_count(sbi)); in do_checkpoint()
933 data_sum_blocks = npages_for_summary_flush(sbi, false); in do_checkpoint()
967 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) in do_checkpoint()
971 get_sit_bitmap(sbi, __bitmap_ptr(sbi, SIT_BITMAP)); in do_checkpoint()
972 get_nat_bitmap(sbi, __bitmap_ptr(sbi, NAT_BITMAP)); in do_checkpoint()
979 start_blk = __start_cp_addr(sbi); in do_checkpoint()
982 cp_page = grab_meta_page(sbi, start_blk++); in do_checkpoint()
989 cp_page = grab_meta_page(sbi, start_blk++); in do_checkpoint()
997 write_orphan_inodes(sbi, start_blk); in do_checkpoint()
1001 write_data_summaries(sbi, start_blk); in do_checkpoint()
1004 write_node_summaries(sbi, start_blk); in do_checkpoint()
1009 cp_page = grab_meta_page(sbi, start_blk); in do_checkpoint()
1016 wait_on_all_pages_writeback(sbi); in do_checkpoint()
1018 if (unlikely(f2fs_cp_error(sbi))) in do_checkpoint()
1021 filemap_fdatawait_range(NODE_MAPPING(sbi), 0, LONG_MAX); in do_checkpoint()
1022 filemap_fdatawait_range(META_MAPPING(sbi), 0, LONG_MAX); in do_checkpoint()
1025 sbi->last_valid_block_count = sbi->total_valid_block_count; in do_checkpoint()
1026 sbi->alloc_valid_block_count = 0; in do_checkpoint()
1029 sync_meta_pages(sbi, META_FLUSH, LONG_MAX); in do_checkpoint()
1032 wait_on_all_pages_writeback(sbi); in do_checkpoint()
1034 release_dirty_inode(sbi); in do_checkpoint()
1036 if (unlikely(f2fs_cp_error(sbi))) in do_checkpoint()
1039 clear_prefree_segments(sbi); in do_checkpoint()
1040 clear_sbi_flag(sbi, SBI_IS_DIRTY); in do_checkpoint()
1046 void write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) in write_checkpoint() argument
1048 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); in write_checkpoint()
1051 mutex_lock(&sbi->cp_mutex); in write_checkpoint()
1053 if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) && in write_checkpoint()
1056 if (unlikely(f2fs_cp_error(sbi))) in write_checkpoint()
1058 if (f2fs_readonly(sbi->sb)) in write_checkpoint()
1061 trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "start block_ops"); in write_checkpoint()
1063 if (block_operations(sbi)) in write_checkpoint()
1066 trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish block_ops"); in write_checkpoint()
1068 f2fs_submit_merged_bio(sbi, DATA, WRITE); in write_checkpoint()
1069 f2fs_submit_merged_bio(sbi, NODE, WRITE); in write_checkpoint()
1070 f2fs_submit_merged_bio(sbi, META, WRITE); in write_checkpoint()
1081 flush_nat_entries(sbi); in write_checkpoint()
1082 flush_sit_entries(sbi, cpc); in write_checkpoint()
1085 do_checkpoint(sbi, cpc); in write_checkpoint()
1087 unblock_operations(sbi); in write_checkpoint()
1088 stat_inc_cp_count(sbi->stat_info); in write_checkpoint()
1091 f2fs_msg(sbi->sb, KERN_NOTICE, in write_checkpoint()
1094 mutex_unlock(&sbi->cp_mutex); in write_checkpoint()
1095 trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint"); in write_checkpoint()
1098 void init_ino_entry_info(struct f2fs_sb_info *sbi) in init_ino_entry_info() argument
1103 struct inode_management *im = &sbi->im[i]; in init_ino_entry_info()
1111 sbi->max_orphans = (sbi->blocks_per_seg - F2FS_CP_PACKS - in init_ino_entry_info()
1112 NR_CURSEG_TYPE - __cp_payload(sbi)) * in init_ino_entry_info()