Lines Matching refs:sbi
29 struct f2fs_sb_info *sbi = data; in gc_thread_func() local
30 struct f2fs_gc_kthread *gc_th = sbi->gc_thread; in gc_thread_func()
31 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head; in gc_thread_func()
46 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) { in gc_thread_func()
64 if (!mutex_trylock(&sbi->gc_mutex)) in gc_thread_func()
67 if (!is_idle(sbi)) { in gc_thread_func()
69 mutex_unlock(&sbi->gc_mutex); in gc_thread_func()
73 if (has_enough_invalid_blocks(sbi)) in gc_thread_func()
78 stat_inc_bggc_count(sbi); in gc_thread_func()
81 if (f2fs_gc(sbi)) in gc_thread_func()
85 f2fs_balance_fs_bg(sbi); in gc_thread_func()
91 int start_gc_thread(struct f2fs_sb_info *sbi) in start_gc_thread() argument
94 dev_t dev = sbi->sb->s_bdev->bd_dev; in start_gc_thread()
109 sbi->gc_thread = gc_th; in start_gc_thread()
110 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head); in start_gc_thread()
111 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi, in start_gc_thread()
116 sbi->gc_thread = NULL; in start_gc_thread()
122 void stop_gc_thread(struct f2fs_sb_info *sbi) in stop_gc_thread() argument
124 struct f2fs_gc_kthread *gc_th = sbi->gc_thread; in stop_gc_thread()
129 sbi->gc_thread = NULL; in stop_gc_thread()
145 static void select_policy(struct f2fs_sb_info *sbi, int gc_type, in select_policy() argument
148 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); in select_policy()
156 p->gc_mode = select_gc_type(sbi->gc_thread, gc_type); in select_policy()
159 p->ofs_unit = sbi->segs_per_sec; in select_policy()
162 if (p->max_search > sbi->max_victim_search) in select_policy()
163 p->max_search = sbi->max_victim_search; in select_policy()
165 p->offset = sbi->last_victim[p->gc_mode]; in select_policy()
168 static unsigned int get_max_cost(struct f2fs_sb_info *sbi, in get_max_cost() argument
173 return 1 << sbi->log_blocks_per_seg; in get_max_cost()
175 return (1 << sbi->log_blocks_per_seg) * p->ofs_unit; in get_max_cost()
182 static unsigned int check_bg_victims(struct f2fs_sb_info *sbi) in check_bg_victims() argument
184 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); in check_bg_victims()
192 for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) { in check_bg_victims()
193 if (sec_usage_check(sbi, secno)) in check_bg_victims()
196 return secno * sbi->segs_per_sec; in check_bg_victims()
201 static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno) in get_cb_cost() argument
203 struct sit_info *sit_i = SIT_I(sbi); in get_cb_cost()
204 unsigned int secno = GET_SECNO(sbi, segno); in get_cb_cost()
205 unsigned int start = secno * sbi->segs_per_sec; in get_cb_cost()
212 for (i = 0; i < sbi->segs_per_sec; i++) in get_cb_cost()
213 mtime += get_seg_entry(sbi, start + i)->mtime; in get_cb_cost()
214 vblocks = get_valid_blocks(sbi, segno, sbi->segs_per_sec); in get_cb_cost()
216 mtime = div_u64(mtime, sbi->segs_per_sec); in get_cb_cost()
217 vblocks = div_u64(vblocks, sbi->segs_per_sec); in get_cb_cost()
219 u = (vblocks * 100) >> sbi->log_blocks_per_seg; in get_cb_cost()
233 static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi, in get_gc_cost() argument
237 return get_seg_entry(sbi, segno)->ckpt_valid_blocks; in get_gc_cost()
241 return get_valid_blocks(sbi, segno, sbi->segs_per_sec); in get_gc_cost()
243 return get_cb_cost(sbi, segno); in get_gc_cost()
254 static int get_victim_by_default(struct f2fs_sb_info *sbi, in get_victim_by_default() argument
257 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); in get_victim_by_default()
265 select_policy(sbi, gc_type, type, &p); in get_victim_by_default()
268 p.min_cost = max_cost = get_max_cost(sbi, &p); in get_victim_by_default()
271 p.min_segno = check_bg_victims(sbi); in get_victim_by_default()
280 segno = find_next_bit(p.dirty_segmap, MAIN_SEGS(sbi), p.offset); in get_victim_by_default()
281 if (segno >= MAIN_SEGS(sbi)) { in get_victim_by_default()
282 if (sbi->last_victim[p.gc_mode]) { in get_victim_by_default()
283 sbi->last_victim[p.gc_mode] = 0; in get_victim_by_default()
294 secno = GET_SECNO(sbi, segno); in get_victim_by_default()
296 if (sec_usage_check(sbi, secno)) in get_victim_by_default()
301 cost = get_gc_cost(sbi, segno, &p); in get_victim_by_default()
311 sbi->last_victim[p.gc_mode] = segno; in get_victim_by_default()
318 secno = GET_SECNO(sbi, p.min_segno); in get_victim_by_default()
320 sbi->cur_victim_sec = secno; in get_victim_by_default()
326 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p, in get_victim_by_default()
327 sbi->cur_victim_sec, in get_victim_by_default()
328 prefree_segments(sbi), free_segments(sbi)); in get_victim_by_default()
375 static int check_valid_map(struct f2fs_sb_info *sbi, in check_valid_map() argument
378 struct sit_info *sit_i = SIT_I(sbi); in check_valid_map()
383 sentry = get_seg_entry(sbi, segno); in check_valid_map()
394 static void gc_node_segment(struct f2fs_sb_info *sbi, in gc_node_segment() argument
404 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) { in gc_node_segment()
409 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0)) in gc_node_segment()
412 if (check_valid_map(sbi, segno, off) == 0) in gc_node_segment()
416 ra_node_page(sbi, nid); in gc_node_segment()
419 node_page = get_node_page(sbi, nid); in gc_node_segment()
424 if (check_valid_map(sbi, segno, off) == 0) { in gc_node_segment()
438 stat_inc_node_blk_count(sbi, 1, gc_type); in gc_node_segment()
452 sync_node_pages(sbi, 0, &wbc); in gc_node_segment()
458 if (get_valid_blocks(sbi, segno, 1) != 0) in gc_node_segment()
490 static int check_dnode(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, in check_dnode() argument
501 node_page = get_node_page(sbi, nid); in check_dnode()
505 get_node_info(sbi, nid, dni); in check_dnode()
553 static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, in gc_data_segment() argument
556 struct super_block *sb = sbi->sb; in gc_data_segment()
562 start_addr = START_BLOCK(sbi, segno); in gc_data_segment()
567 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) { in gc_data_segment()
575 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0)) in gc_data_segment()
578 if (check_valid_map(sbi, segno, off) == 0) in gc_data_segment()
582 ra_node_page(sbi, le32_to_cpu(entry->nid)); in gc_data_segment()
587 if (check_dnode(sbi, entry, &dni, start_addr + off, &nofs) == 0) in gc_data_segment()
591 ra_node_page(sbi, dni.ino); in gc_data_segment()
625 stat_inc_data_blk_count(sbi, 1, gc_type); in gc_data_segment()
633 f2fs_submit_merged_bio(sbi, DATA, WRITE); in gc_data_segment()
639 if (get_valid_blocks(sbi, segno, 1) != 0) { in gc_data_segment()
646 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim, in __get_victim() argument
649 struct sit_info *sit_i = SIT_I(sbi); in __get_victim()
653 ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type, in __get_victim()
659 static void do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno, in do_garbage_collect() argument
667 sum_page = get_sum_page(sbi, segno); in do_garbage_collect()
675 gc_node_segment(sbi, sum->entries, segno, gc_type); in do_garbage_collect()
678 gc_data_segment(sbi, sum->entries, gc_list, segno, gc_type); in do_garbage_collect()
683 stat_inc_seg_count(sbi, GET_SUM_TYPE((&sum->footer)), gc_type); in do_garbage_collect()
684 stat_inc_call_count(sbi->stat_info); in do_garbage_collect()
689 int f2fs_gc(struct f2fs_sb_info *sbi) in f2fs_gc() argument
701 cpc.reason = __get_cp_reason(sbi); in f2fs_gc()
703 if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE))) in f2fs_gc()
705 if (unlikely(f2fs_cp_error(sbi))) in f2fs_gc()
708 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, nfree)) { in f2fs_gc()
710 write_checkpoint(sbi, &cpc); in f2fs_gc()
713 if (!__get_victim(sbi, &segno, gc_type)) in f2fs_gc()
718 if (sbi->segs_per_sec > 1) in f2fs_gc()
719 ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno), sbi->segs_per_sec, in f2fs_gc()
722 for (i = 0; i < sbi->segs_per_sec; i++) in f2fs_gc()
723 do_garbage_collect(sbi, segno + i, &gc_list, gc_type); in f2fs_gc()
726 sbi->cur_victim_sec = NULL_SEGNO; in f2fs_gc()
728 WARN_ON(get_valid_blocks(sbi, segno, sbi->segs_per_sec)); in f2fs_gc()
731 if (has_not_enough_free_secs(sbi, nfree)) in f2fs_gc()
735 write_checkpoint(sbi, &cpc); in f2fs_gc()
737 mutex_unlock(&sbi->gc_mutex); in f2fs_gc()
743 void build_gc_manager(struct f2fs_sb_info *sbi) in build_gc_manager() argument
745 DIRTY_I(sbi)->v_ops = &default_v_ops; in build_gc_manager()