nm_i             1124 fs/f2fs/checkpoint.c 	struct f2fs_nm_info *nm_i = NM_I(sbi);
nm_i             1125 fs/f2fs/checkpoint.c 	nid_t last_nid = nm_i->next_scan_nid;
nm_i             1377 fs/f2fs/checkpoint.c 	struct f2fs_nm_info *nm_i = NM_I(sbi);
nm_i             1457 fs/f2fs/checkpoint.c 		*(__le64 *)nm_i->nat_bits = cpu_to_le64(cp_ver);
nm_i             1459 fs/f2fs/checkpoint.c 		blk = start_blk + sbi->blocks_per_seg - nm_i->nat_bits_blocks;
nm_i             1460 fs/f2fs/checkpoint.c 		for (i = 0; i < nm_i->nat_bits_blocks; i++)
nm_i             1461 fs/f2fs/checkpoint.c 			f2fs_update_meta_page(sbi, nm_i->nat_bits +
nm_i               23 fs/f2fs/node.c #define on_f2fs_build_free_nids(nmi) mutex_is_locked(&(nm_i)->build_lock)
nm_i               46 fs/f2fs/node.c 	struct f2fs_nm_info *nm_i = NM_I(sbi);
nm_i               61 fs/f2fs/node.c 		mem_size = (nm_i->nid_cnt[FREE_NID] *
nm_i               63 fs/f2fs/node.c 		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
nm_i               65 fs/f2fs/node.c 		mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >>
nm_i               67 fs/f2fs/node.c 		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
nm_i               74 fs/f2fs/node.c 		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
nm_i               82 fs/f2fs/node.c 		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
nm_i               88 fs/f2fs/node.c 		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
nm_i              122 fs/f2fs/node.c 	struct f2fs_nm_info *nm_i = NM_I(sbi);
nm_i              139 fs/f2fs/node.c 	set_to_next_nat(nm_i, nid);
nm_i              165 fs/f2fs/node.c static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i,
nm_i              169 fs/f2fs/node.c 		f2fs_radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne);
nm_i              170 fs/f2fs/node.c 	else if (radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne))
nm_i              176 fs/f2fs/node.c 	spin_lock(&nm_i->nat_list_lock);
nm_i              177 fs/f2fs/node.c 	list_add_tail(&ne->list, &nm_i->nat_entries);
nm_i              178 fs/f2fs/node.c 	spin_unlock(&nm_i->nat_list_lock);
nm_i              180 fs/f2fs/node.c 	nm_i->nat_cnt++;
nm_i              184 fs/f2fs/node.c static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
nm_i              188 fs/f2fs/node.c 	ne = radix_tree_lookup(&nm_i->nat_root, n);
nm_i              192 fs/f2fs/node.c 		spin_lock(&nm_i->nat_list_lock);
nm_i              194 fs/f2fs/node.c 			list_move_tail(&ne->list, &nm_i->nat_entries);
nm_i              195 fs/f2fs/node.c 		spin_unlock(&nm_i->nat_list_lock);
nm_i              201 fs/f2fs/node.c static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
nm_i              204 fs/f2fs/node.c 	return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr);
nm_i              207 fs/f2fs/node.c static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
nm_i              209 fs/f2fs/node.c 	radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
nm_i              210 fs/f2fs/node.c 	nm_i->nat_cnt--;
nm_i              214 fs/f2fs/node.c static struct nat_entry_set *__grab_nat_entry_set(struct f2fs_nm_info *nm_i,
nm_i              220 fs/f2fs/node.c 	head = radix_tree_lookup(&nm_i->nat_set_root, set);
nm_i              228 fs/f2fs/node.c 		f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head);
nm_i              233 fs/f2fs/node.c static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
nm_i              240 fs/f2fs/node.c 		head = __grab_nat_entry_set(nm_i, ne);
nm_i              256 fs/f2fs/node.c 	nm_i->dirty_nat_cnt++;
nm_i              259 fs/f2fs/node.c 	spin_lock(&nm_i->nat_list_lock);
nm_i              264 fs/f2fs/node.c 	spin_unlock(&nm_i->nat_list_lock);
nm_i              267 fs/f2fs/node.c static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i,
nm_i              270 fs/f2fs/node.c 	spin_lock(&nm_i->nat_list_lock);
nm_i              271 fs/f2fs/node.c 	list_move_tail(&ne->list, &nm_i->nat_entries);
nm_i              272 fs/f2fs/node.c 	spin_unlock(&nm_i->nat_list_lock);
nm_i              276 fs/f2fs/node.c 	nm_i->dirty_nat_cnt--;
nm_i              279 fs/f2fs/node.c static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i,
nm_i              282 fs/f2fs/node.c 	return radix_tree_gang_lookup(&nm_i->nat_set_root, (void **)ep,
nm_i              354 fs/f2fs/node.c 	struct f2fs_nm_info *nm_i = NM_I(sbi);
nm_i              358 fs/f2fs/node.c 	down_read(&nm_i->nat_tree_lock);
nm_i              359 fs/f2fs/node.c 	e = __lookup_nat_cache(nm_i, nid);
nm_i              365 fs/f2fs/node.c 	up_read(&nm_i->nat_tree_lock);
nm_i              371 fs/f2fs/node.c 	struct f2fs_nm_info *nm_i = NM_I(sbi);
nm_i              375 fs/f2fs/node.c 	down_read(&nm_i->nat_tree_lock);
nm_i              376 fs/f2fs/node.c 	e = __lookup_nat_cache(nm_i, nid);
nm_i              379 fs/f2fs/node.c 	up_read(&nm_i->nat_tree_lock);
nm_i              385 fs/f2fs/node.c 	struct f2fs_nm_info *nm_i = NM_I(sbi);
nm_i              389 fs/f2fs/node.c 	down_read(&nm_i->nat_tree_lock);
nm_i              390 fs/f2fs/node.c 	e = __lookup_nat_cache(nm_i, ino);
nm_i              395 fs/f2fs/node.c 	up_read(&nm_i->nat_tree_lock);
nm_i              403 fs/f2fs/node.c 	struct f2fs_nm_info *nm_i = NM_I(sbi);
nm_i              410 fs/f2fs/node.c 	down_write(&nm_i->nat_tree_lock);
nm_i              411 fs/f2fs/node.c 	e = __lookup_nat_cache(nm_i, nid);
nm_i              413 fs/f2fs/node.c 		e = __init_nat_entry(nm_i, new, ne, false);
nm_i              419 fs/f2fs/node.c 	up_write(&nm_i->nat_tree_lock);
nm_i              427 fs/f2fs/node.c 	struct f2fs_nm_info *nm_i = NM_I(sbi);
nm_i              431 fs/f2fs/node.c 	down_write(&nm_i->nat_tree_lock);
nm_i              432 fs/f2fs/node.c 	e = __lookup_nat_cache(nm_i, ni->nid);
nm_i              434 fs/f2fs/node.c 		e = __init_nat_entry(nm_i, new, NULL, true);
nm_i              469 fs/f2fs/node.c 	__set_nat_cache_dirty(nm_i, e);
nm_i              473 fs/f2fs/node.c 		e = __lookup_nat_cache(nm_i, ni->ino);
nm_i              479 fs/f2fs/node.c 	up_write(&nm_i->nat_tree_lock);
nm_i              484 fs/f2fs/node.c 	struct f2fs_nm_info *nm_i = NM_I(sbi);
nm_i              487 fs/f2fs/node.c 	if (!down_write_trylock(&nm_i->nat_tree_lock))
nm_i              490 fs/f2fs/node.c 	spin_lock(&nm_i->nat_list_lock);
nm_i              494 fs/f2fs/node.c 		if (list_empty(&nm_i->nat_entries))
nm_i              497 fs/f2fs/node.c 		ne = list_first_entry(&nm_i->nat_entries,
nm_i              500 fs/f2fs/node.c 		spin_unlock(&nm_i->nat_list_lock);
nm_i              502 fs/f2fs/node.c 		__del_from_nat_cache(nm_i, ne);
nm_i              505 fs/f2fs/node.c 		spin_lock(&nm_i->nat_list_lock);
nm_i              507 fs/f2fs/node.c 	spin_unlock(&nm_i->nat_list_lock);
nm_i              509 fs/f2fs/node.c 	up_write(&nm_i->nat_tree_lock);
nm_i              519 fs/f2fs/node.c 	struct f2fs_nm_info *nm_i = NM_I(sbi);
nm_i              534 fs/f2fs/node.c 	down_read(&nm_i->nat_tree_lock);
nm_i              535 fs/f2fs/node.c 	e = __lookup_nat_cache(nm_i, nid);
nm_i              540 fs/f2fs/node.c 		up_read(&nm_i->nat_tree_lock);
nm_i              555 fs/f2fs/node.c 		up_read(&nm_i->nat_tree_lock);
nm_i              561 fs/f2fs/node.c 	up_read(&nm_i->nat_tree_lock);
nm_i             2048 fs/f2fs/node.c static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
nm_i             2051 fs/f2fs/node.c 	return radix_tree_lookup(&nm_i->free_nid_root, n);
nm_i             2057 fs/f2fs/node.c 	struct f2fs_nm_info *nm_i = NM_I(sbi);
nm_i             2059 fs/f2fs/node.c 	int err = radix_tree_insert(&nm_i->free_nid_root, i->nid, i);
nm_i             2064 fs/f2fs/node.c 	nm_i->nid_cnt[state]++;
nm_i             2066 fs/f2fs/node.c 		list_add_tail(&i->list, &nm_i->free_nid_list);
nm_i             2073 fs/f2fs/node.c 	struct f2fs_nm_info *nm_i = NM_I(sbi);
nm_i             2076 fs/f2fs/node.c 	nm_i->nid_cnt[state]--;
nm_i             2079 fs/f2fs/node.c 	radix_tree_delete(&nm_i->free_nid_root, i->nid);
nm_i             2085 fs/f2fs/node.c 	struct f2fs_nm_info *nm_i = NM_I(sbi);
nm_i             2089 fs/f2fs/node.c 	nm_i->nid_cnt[org_state]--;
nm_i             2090 fs/f2fs/node.c 	nm_i->nid_cnt[dst_state]++;
nm_i             2097 fs/f2fs/node.c 		list_add_tail(&i->list, &nm_i->free_nid_list);
nm_i             2107 fs/f2fs/node.c 	struct f2fs_nm_info *nm_i = NM_I(sbi);
nm_i             2111 fs/f2fs/node.c 	if (!test_bit_le(nat_ofs, nm_i->nat_block_bitmap))
nm_i             2115 fs/f2fs/node.c 		if (test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
nm_i             2117 fs/f2fs/node.c 		__set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
nm_i             2118 fs/f2fs/node.c 		nm_i->free_nid_count[nat_ofs]++;
nm_i             2120 fs/f2fs/node.c 		if (!test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
nm_i             2122 fs/f2fs/node.c 		__clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
nm_i             2124 fs/f2fs/node.c 			nm_i->free_nid_count[nat_ofs]--;
nm_i             2132 fs/f2fs/node.c 	struct f2fs_nm_info *nm_i = NM_I(sbi);
nm_i             2151 fs/f2fs/node.c 	spin_lock(&nm_i->nid_list_lock);
nm_i             2175 fs/f2fs/node.c 		ne = __lookup_nat_cache(nm_i, nid);
nm_i             2180 fs/f2fs/node.c 		e = __lookup_free_nid_list(nm_i, nid);
nm_i             2193 fs/f2fs/node.c 			nm_i->available_nids++;
nm_i             2195 fs/f2fs/node.c 	spin_unlock(&nm_i->nid_list_lock);
nm_i             2205 fs/f2fs/node.c 	struct f2fs_nm_info *nm_i = NM_I(sbi);
nm_i             2209 fs/f2fs/node.c 	spin_lock(&nm_i->nid_list_lock);
nm_i             2210 fs/f2fs/node.c 	i = __lookup_free_nid_list(nm_i, nid);
nm_i             2215 fs/f2fs/node.c 	spin_unlock(&nm_i->nid_list_lock);
nm_i             2224 fs/f2fs/node.c 	struct f2fs_nm_info *nm_i = NM_I(sbi);
nm_i             2230 fs/f2fs/node.c 	__set_bit_le(nat_ofs, nm_i->nat_block_bitmap);
nm_i             2235 fs/f2fs/node.c 		if (unlikely(start_nid >= nm_i->max_nid))
nm_i             2278 fs/f2fs/node.c 	struct f2fs_nm_info *nm_i = NM_I(sbi);
nm_i             2282 fs/f2fs/node.c 	down_read(&nm_i->nat_tree_lock);
nm_i             2284 fs/f2fs/node.c 	for (i = 0; i < nm_i->nat_blocks; i++) {
nm_i             2285 fs/f2fs/node.c 		if (!test_bit_le(i, nm_i->nat_block_bitmap))
nm_i             2287 fs/f2fs/node.c 		if (!nm_i->free_nid_count[i])
nm_i             2290 fs/f2fs/node.c 			idx = find_next_bit_le(nm_i->free_nid_bitmap[i],
nm_i             2298 fs/f2fs/node.c 			if (nm_i->nid_cnt[FREE_NID] >= MAX_FREE_NIDS)
nm_i             2305 fs/f2fs/node.c 	up_read(&nm_i->nat_tree_lock);
nm_i             2311 fs/f2fs/node.c 	struct f2fs_nm_info *nm_i = NM_I(sbi);
nm_i             2313 fs/f2fs/node.c 	nid_t nid = nm_i->next_scan_nid;
nm_i             2315 fs/f2fs/node.c 	if (unlikely(nid >= nm_i->max_nid))
nm_i             2319 fs/f2fs/node.c 	if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
nm_i             2329 fs/f2fs/node.c 		if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
nm_i             2337 fs/f2fs/node.c 	down_read(&nm_i->nat_tree_lock);
nm_i             2341 fs/f2fs/node.c 						nm_i->nat_block_bitmap)) {
nm_i             2352 fs/f2fs/node.c 				up_read(&nm_i->nat_tree_lock);
nm_i             2360 fs/f2fs/node.c 		if (unlikely(nid >= nm_i->max_nid))
nm_i             2368 fs/f2fs/node.c 	nm_i->next_scan_nid = nid;
nm_i             2373 fs/f2fs/node.c 	up_read(&nm_i->nat_tree_lock);
nm_i             2375 fs/f2fs/node.c 	f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
nm_i             2376 fs/f2fs/node.c 					nm_i->ra_nid_pages, META_NAT, false);
nm_i             2399 fs/f2fs/node.c 	struct f2fs_nm_info *nm_i = NM_I(sbi);
nm_i             2407 fs/f2fs/node.c 	spin_lock(&nm_i->nid_list_lock);
nm_i             2409 fs/f2fs/node.c 	if (unlikely(nm_i->available_nids == 0)) {
nm_i             2410 fs/f2fs/node.c 		spin_unlock(&nm_i->nid_list_lock);
nm_i             2415 fs/f2fs/node.c 	if (nm_i->nid_cnt[FREE_NID] && !on_f2fs_build_free_nids(nm_i)) {
nm_i             2416 fs/f2fs/node.c 		f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list));
nm_i             2417 fs/f2fs/node.c 		i = list_first_entry(&nm_i->free_nid_list,
nm_i             2422 fs/f2fs/node.c 		nm_i->available_nids--;
nm_i             2426 fs/f2fs/node.c 		spin_unlock(&nm_i->nid_list_lock);
nm_i             2429 fs/f2fs/node.c 	spin_unlock(&nm_i->nid_list_lock);
nm_i             2442 fs/f2fs/node.c 	struct f2fs_nm_info *nm_i = NM_I(sbi);
nm_i             2445 fs/f2fs/node.c 	spin_lock(&nm_i->nid_list_lock);
nm_i             2446 fs/f2fs/node.c 	i = __lookup_free_nid_list(nm_i, nid);
nm_i             2449 fs/f2fs/node.c 	spin_unlock(&nm_i->nid_list_lock);
nm_i             2459 fs/f2fs/node.c 	struct f2fs_nm_info *nm_i = NM_I(sbi);
nm_i             2466 fs/f2fs/node.c 	spin_lock(&nm_i->nid_list_lock);
nm_i             2467 fs/f2fs/node.c 	i = __lookup_free_nid_list(nm_i, nid);
nm_i             2477 fs/f2fs/node.c 	nm_i->available_nids++;
nm_i             2481 fs/f2fs/node.c 	spin_unlock(&nm_i->nid_list_lock);
nm_i             2489 fs/f2fs/node.c 	struct f2fs_nm_info *nm_i = NM_I(sbi);
nm_i             2493 fs/f2fs/node.c 	if (nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
nm_i             2496 fs/f2fs/node.c 	if (!mutex_trylock(&nm_i->build_lock))
nm_i             2499 fs/f2fs/node.c 	spin_lock(&nm_i->nid_list_lock);
nm_i             2500 fs/f2fs/node.c 	list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) {
nm_i             2502 fs/f2fs/node.c 				nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
nm_i             2509 fs/f2fs/node.c 	spin_unlock(&nm_i->nid_list_lock);
nm_i             2510 fs/f2fs/node.c 	mutex_unlock(&nm_i->build_lock);
nm_i             2702 fs/f2fs/node.c 	struct f2fs_nm_info *nm_i = NM_I(sbi);
nm_i             2715 fs/f2fs/node.c 		ne = __lookup_nat_cache(nm_i, nid);
nm_i             2718 fs/f2fs/node.c 			__init_nat_entry(nm_i, ne, &raw_ne, true);
nm_i             2728 fs/f2fs/node.c 			spin_lock(&nm_i->nid_list_lock);
nm_i             2729 fs/f2fs/node.c 			nm_i->available_nids--;
nm_i             2730 fs/f2fs/node.c 			spin_unlock(&nm_i->nid_list_lock);
nm_i             2733 fs/f2fs/node.c 		__set_nat_cache_dirty(nm_i, ne);
nm_i             2760 fs/f2fs/node.c 	struct f2fs_nm_info *nm_i = NM_I(sbi);
nm_i             2778 fs/f2fs/node.c 		__set_bit_le(nat_index, nm_i->empty_nat_bits);
nm_i             2779 fs/f2fs/node.c 		__clear_bit_le(nat_index, nm_i->full_nat_bits);
nm_i             2783 fs/f2fs/node.c 	__clear_bit_le(nat_index, nm_i->empty_nat_bits);
nm_i             2785 fs/f2fs/node.c 		__set_bit_le(nat_index, nm_i->full_nat_bits);
nm_i             2787 fs/f2fs/node.c 		__clear_bit_le(nat_index, nm_i->full_nat_bits);
nm_i             2870 fs/f2fs/node.c 	struct f2fs_nm_info *nm_i = NM_I(sbi);
nm_i             2882 fs/f2fs/node.c 		down_write(&nm_i->nat_tree_lock);
nm_i             2884 fs/f2fs/node.c 		up_write(&nm_i->nat_tree_lock);
nm_i             2887 fs/f2fs/node.c 	if (!nm_i->dirty_nat_cnt)
nm_i             2890 fs/f2fs/node.c 	down_write(&nm_i->nat_tree_lock);
nm_i             2898 fs/f2fs/node.c 		!__has_cursum_space(journal, nm_i->dirty_nat_cnt, NAT_JOURNAL))
nm_i             2901 fs/f2fs/node.c 	while ((found = __gang_lookup_nat_set(nm_i,
nm_i             2917 fs/f2fs/node.c 	up_write(&nm_i->nat_tree_lock);
nm_i             2926 fs/f2fs/node.c 	struct f2fs_nm_info *nm_i = NM_I(sbi);
nm_i             2927 fs/f2fs/node.c 	unsigned int nat_bits_bytes = nm_i->nat_blocks / BITS_PER_BYTE;
nm_i             2935 fs/f2fs/node.c 	nm_i->nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8);
nm_i             2936 fs/f2fs/node.c 	nm_i->nat_bits = f2fs_kzalloc(sbi,
nm_i             2937 fs/f2fs/node.c 			nm_i->nat_bits_blocks << F2FS_BLKSIZE_BITS, GFP_KERNEL);
nm_i             2938 fs/f2fs/node.c 	if (!nm_i->nat_bits)
nm_i             2942 fs/f2fs/node.c 						nm_i->nat_bits_blocks;
nm_i             2943 fs/f2fs/node.c 	for (i = 0; i < nm_i->nat_bits_blocks; i++) {
nm_i             2950 fs/f2fs/node.c 		memcpy(nm_i->nat_bits + (i << F2FS_BLKSIZE_BITS),
nm_i             2956 fs/f2fs/node.c 	if (cpu_to_le64(cp_ver) != *(__le64 *)nm_i->nat_bits) {
nm_i             2961 fs/f2fs/node.c 	nm_i->full_nat_bits = nm_i->nat_bits + 8;
nm_i             2962 fs/f2fs/node.c 	nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes;
nm_i             2970 fs/f2fs/node.c 	struct f2fs_nm_info *nm_i = NM_I(sbi);
nm_i             2977 fs/f2fs/node.c 	for (i = 0; i < nm_i->nat_blocks; i++) {
nm_i             2978 fs/f2fs/node.c 		i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i);
nm_i             2979 fs/f2fs/node.c 		if (i >= nm_i->nat_blocks)
nm_i             2982 fs/f2fs/node.c 		__set_bit_le(i, nm_i->nat_block_bitmap);
nm_i             2993 fs/f2fs/node.c 	for (i = 0; i < nm_i->nat_blocks; i++) {
nm_i             2994 fs/f2fs/node.c 		i = find_next_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i);
nm_i             2995 fs/f2fs/node.c 		if (i >= nm_i->nat_blocks)
nm_i             2998 fs/f2fs/node.c 		__set_bit_le(i, nm_i->nat_block_bitmap);
nm_i             3005 fs/f2fs/node.c 	struct f2fs_nm_info *nm_i = NM_I(sbi);
nm_i             3010 fs/f2fs/node.c 	nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
nm_i             3014 fs/f2fs/node.c 	nm_i->nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
nm_i             3015 fs/f2fs/node.c 	nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nm_i->nat_blocks;
nm_i             3018 fs/f2fs/node.c 	nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count -
nm_i             3020 fs/f2fs/node.c 	nm_i->nid_cnt[FREE_NID] = 0;
nm_i             3021 fs/f2fs/node.c 	nm_i->nid_cnt[PREALLOC_NID] = 0;
nm_i             3022 fs/f2fs/node.c 	nm_i->nat_cnt = 0;
nm_i             3023 fs/f2fs/node.c 	nm_i->ram_thresh = DEF_RAM_THRESHOLD;
nm_i             3024 fs/f2fs/node.c 	nm_i->ra_nid_pages = DEF_RA_NID_PAGES;
nm_i             3025 fs/f2fs/node.c 	nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD;
nm_i             3027 fs/f2fs/node.c 	INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
nm_i             3028 fs/f2fs/node.c 	INIT_LIST_HEAD(&nm_i->free_nid_list);
nm_i             3029 fs/f2fs/node.c 	INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO);
nm_i             3030 fs/f2fs/node.c 	INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO);
nm_i             3031 fs/f2fs/node.c 	INIT_LIST_HEAD(&nm_i->nat_entries);
nm_i             3032 fs/f2fs/node.c 	spin_lock_init(&nm_i->nat_list_lock);
nm_i             3034 fs/f2fs/node.c 	mutex_init(&nm_i->build_lock);
nm_i             3035 fs/f2fs/node.c 	spin_lock_init(&nm_i->nid_list_lock);
nm_i             3036 fs/f2fs/node.c 	init_rwsem(&nm_i->nat_tree_lock);
nm_i             3038 fs/f2fs/node.c 	nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
nm_i             3039 fs/f2fs/node.c 	nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
nm_i             3044 fs/f2fs/node.c 	nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size,
nm_i             3046 fs/f2fs/node.c 	if (!nm_i->nat_bitmap)
nm_i             3054 fs/f2fs/node.c 	nm_i->nat_bitmap_mir = kmemdup(version_bitmap, nm_i->bitmap_size,
nm_i             3056 fs/f2fs/node.c 	if (!nm_i->nat_bitmap_mir)
nm_i             3065 fs/f2fs/node.c 	struct f2fs_nm_info *nm_i = NM_I(sbi);
nm_i             3068 fs/f2fs/node.c 	nm_i->free_nid_bitmap =
nm_i             3070 fs/f2fs/node.c 					     nm_i->nat_blocks),
nm_i             3072 fs/f2fs/node.c 	if (!nm_i->free_nid_bitmap)
nm_i             3075 fs/f2fs/node.c 	for (i = 0; i < nm_i->nat_blocks; i++) {
nm_i             3076 fs/f2fs/node.c 		nm_i->free_nid_bitmap[i] = f2fs_kvzalloc(sbi,
nm_i             3078 fs/f2fs/node.c 		if (!nm_i->free_nid_bitmap[i])
nm_i             3082 fs/f2fs/node.c 	nm_i->nat_block_bitmap = f2fs_kvzalloc(sbi, nm_i->nat_blocks / 8,
nm_i             3084 fs/f2fs/node.c 	if (!nm_i->nat_block_bitmap)
nm_i             3087 fs/f2fs/node.c 	nm_i->free_nid_count =
nm_i             3089 fs/f2fs/node.c 					      nm_i->nat_blocks),
nm_i             3091 fs/f2fs/node.c 	if (!nm_i->free_nid_count)
nm_i             3121 fs/f2fs/node.c 	struct f2fs_nm_info *nm_i = NM_I(sbi);
nm_i             3128 fs/f2fs/node.c 	if (!nm_i)
nm_i             3132 fs/f2fs/node.c 	spin_lock(&nm_i->nid_list_lock);
nm_i             3133 fs/f2fs/node.c 	list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
nm_i             3135 fs/f2fs/node.c 		spin_unlock(&nm_i->nid_list_lock);
nm_i             3137 fs/f2fs/node.c 		spin_lock(&nm_i->nid_list_lock);
nm_i             3139 fs/f2fs/node.c 	f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID]);
nm_i             3140 fs/f2fs/node.c 	f2fs_bug_on(sbi, nm_i->nid_cnt[PREALLOC_NID]);
nm_i             3141 fs/f2fs/node.c 	f2fs_bug_on(sbi, !list_empty(&nm_i->free_nid_list));
nm_i             3142 fs/f2fs/node.c 	spin_unlock(&nm_i->nid_list_lock);
nm_i             3145 fs/f2fs/node.c 	down_write(&nm_i->nat_tree_lock);
nm_i             3146 fs/f2fs/node.c 	while ((found = __gang_lookup_nat_cache(nm_i,
nm_i             3152 fs/f2fs/node.c 			spin_lock(&nm_i->nat_list_lock);
nm_i             3154 fs/f2fs/node.c 			spin_unlock(&nm_i->nat_list_lock);
nm_i             3156 fs/f2fs/node.c 			__del_from_nat_cache(nm_i, natvec[idx]);
nm_i             3159 fs/f2fs/node.c 	f2fs_bug_on(sbi, nm_i->nat_cnt);
nm_i             3163 fs/f2fs/node.c 	while ((found = __gang_lookup_nat_set(nm_i,
nm_i             3171 fs/f2fs/node.c 			radix_tree_delete(&nm_i->nat_set_root, setvec[idx]->set);
nm_i             3175 fs/f2fs/node.c 	up_write(&nm_i->nat_tree_lock);
nm_i             3177 fs/f2fs/node.c 	kvfree(nm_i->nat_block_bitmap);
nm_i             3178 fs/f2fs/node.c 	if (nm_i->free_nid_bitmap) {
nm_i             3181 fs/f2fs/node.c 		for (i = 0; i < nm_i->nat_blocks; i++)
nm_i             3182 fs/f2fs/node.c 			kvfree(nm_i->free_nid_bitmap[i]);
nm_i             3183 fs/f2fs/node.c 		kvfree(nm_i->free_nid_bitmap);
nm_i             3185 fs/f2fs/node.c 	kvfree(nm_i->free_nid_count);
nm_i             3187 fs/f2fs/node.c 	kvfree(nm_i->nat_bitmap);
nm_i             3188 fs/f2fs/node.c 	kvfree(nm_i->nat_bits);
nm_i             3190 fs/f2fs/node.c 	kvfree(nm_i->nat_bitmap_mir);
nm_i             3193 fs/f2fs/node.c 	kvfree(nm_i);
nm_i              165 fs/f2fs/node.h 	struct f2fs_nm_info *nm_i = NM_I(sbi);
nm_i              168 fs/f2fs/node.h 	spin_lock(&nm_i->nid_list_lock);
nm_i              169 fs/f2fs/node.h 	if (nm_i->nid_cnt[FREE_NID] <= 0) {
nm_i              170 fs/f2fs/node.h 		spin_unlock(&nm_i->nid_list_lock);
nm_i              173 fs/f2fs/node.h 	fnid = list_first_entry(&nm_i->free_nid_list, struct free_nid, list);
nm_i              175 fs/f2fs/node.h 	spin_unlock(&nm_i->nid_list_lock);
nm_i              183 fs/f2fs/node.h 	struct f2fs_nm_info *nm_i = NM_I(sbi);
nm_i              186 fs/f2fs/node.h 	if (memcmp(nm_i->nat_bitmap, nm_i->nat_bitmap_mir,
nm_i              187 fs/f2fs/node.h 						nm_i->bitmap_size))
nm_i              190 fs/f2fs/node.h 	memcpy(addr, nm_i->nat_bitmap, nm_i->bitmap_size);
nm_i              195 fs/f2fs/node.h 	struct f2fs_nm_info *nm_i = NM_I(sbi);
nm_i              206 fs/f2fs/node.h 	block_addr = (pgoff_t)(nm_i->nat_blkaddr +
nm_i              210 fs/f2fs/node.h 	if (f2fs_test_bit(block_off, nm_i->nat_bitmap))
nm_i              219 fs/f2fs/node.h 	struct f2fs_nm_info *nm_i = NM_I(sbi);
nm_i              221 fs/f2fs/node.h 	block_addr -= nm_i->nat_blkaddr;
nm_i              223 fs/f2fs/node.h 	return block_addr + nm_i->nat_blkaddr;
nm_i              226 fs/f2fs/node.h static inline void set_to_next_nat(struct f2fs_nm_info *nm_i, nid_t start_nid)
nm_i              230 fs/f2fs/node.h 	f2fs_change_bit(block_off, nm_i->nat_bitmap);
nm_i              232 fs/f2fs/node.h 	f2fs_change_bit(block_off, nm_i->nat_bitmap_mir);