1/*
2 * fs/f2fs/node.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 *             http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/f2fs_fs.h>
13#include <linux/mpage.h>
14#include <linux/backing-dev.h>
15#include <linux/blkdev.h>
16#include <linux/pagevec.h>
17#include <linux/swap.h>
18
19#include "f2fs.h"
20#include "node.h"
21#include "segment.h"
22#include "trace.h"
23#include <trace/events/f2fs.h>
24
25#define on_build_free_nids(nmi) mutex_is_locked(&nm_i->build_lock)
26
27static struct kmem_cache *nat_entry_slab;
28static struct kmem_cache *free_nid_slab;
29static struct kmem_cache *nat_entry_set_slab;
30
31bool available_free_memory(struct f2fs_sb_info *sbi, int type)
32{
33	struct f2fs_nm_info *nm_i = NM_I(sbi);
34	struct sysinfo val;
35	unsigned long avail_ram;
36	unsigned long mem_size = 0;
37	bool res = false;
38
39	si_meminfo(&val);
40
41	/* only uses low memory */
42	avail_ram = val.totalram - val.totalhigh;
43
44	/*
45	 * give 25%, 25%, 50%, 50%, 50% memory for each components respectively
46	 */
47	if (type == FREE_NIDS) {
48		mem_size = (nm_i->fcnt * sizeof(struct free_nid)) >>
49							PAGE_CACHE_SHIFT;
50		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
51	} else if (type == NAT_ENTRIES) {
52		mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >>
53							PAGE_CACHE_SHIFT;
54		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
55	} else if (type == DIRTY_DENTS) {
56		if (sbi->sb->s_bdi->dirty_exceeded)
57			return false;
58		mem_size = get_pages(sbi, F2FS_DIRTY_DENTS);
59		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
60	} else if (type == INO_ENTRIES) {
61		int i;
62
63		for (i = 0; i <= UPDATE_INO; i++)
64			mem_size += (sbi->im[i].ino_num *
65				sizeof(struct ino_entry)) >> PAGE_CACHE_SHIFT;
66		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
67	} else if (type == EXTENT_CACHE) {
68		mem_size = (sbi->total_ext_tree * sizeof(struct extent_tree) +
69				atomic_read(&sbi->total_ext_node) *
70				sizeof(struct extent_node)) >> PAGE_CACHE_SHIFT;
71		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
72	} else {
73		if (sbi->sb->s_bdi->dirty_exceeded)
74			return false;
75	}
76	return res;
77}
78
79static void clear_node_page_dirty(struct page *page)
80{
81	struct address_space *mapping = page->mapping;
82	unsigned int long flags;
83
84	if (PageDirty(page)) {
85		spin_lock_irqsave(&mapping->tree_lock, flags);
86		radix_tree_tag_clear(&mapping->page_tree,
87				page_index(page),
88				PAGECACHE_TAG_DIRTY);
89		spin_unlock_irqrestore(&mapping->tree_lock, flags);
90
91		clear_page_dirty_for_io(page);
92		dec_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_NODES);
93	}
94	ClearPageUptodate(page);
95}
96
97static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
98{
99	pgoff_t index = current_nat_addr(sbi, nid);
100	return get_meta_page(sbi, index);
101}
102
103static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
104{
105	struct page *src_page;
106	struct page *dst_page;
107	pgoff_t src_off;
108	pgoff_t dst_off;
109	void *src_addr;
110	void *dst_addr;
111	struct f2fs_nm_info *nm_i = NM_I(sbi);
112
113	src_off = current_nat_addr(sbi, nid);
114	dst_off = next_nat_addr(sbi, src_off);
115
116	/* get current nat block page with lock */
117	src_page = get_meta_page(sbi, src_off);
118	dst_page = grab_meta_page(sbi, dst_off);
119	f2fs_bug_on(sbi, PageDirty(src_page));
120
121	src_addr = page_address(src_page);
122	dst_addr = page_address(dst_page);
123	memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
124	set_page_dirty(dst_page);
125	f2fs_put_page(src_page, 1);
126
127	set_to_next_nat(nm_i, nid);
128
129	return dst_page;
130}
131
132static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
133{
134	return radix_tree_lookup(&nm_i->nat_root, n);
135}
136
137static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
138		nid_t start, unsigned int nr, struct nat_entry **ep)
139{
140	return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr);
141}
142
143static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
144{
145	list_del(&e->list);
146	radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
147	nm_i->nat_cnt--;
148	kmem_cache_free(nat_entry_slab, e);
149}
150
151static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
152						struct nat_entry *ne)
153{
154	nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid);
155	struct nat_entry_set *head;
156
157	if (get_nat_flag(ne, IS_DIRTY))
158		return;
159
160	head = radix_tree_lookup(&nm_i->nat_set_root, set);
161	if (!head) {
162		head = f2fs_kmem_cache_alloc(nat_entry_set_slab, GFP_ATOMIC);
163
164		INIT_LIST_HEAD(&head->entry_list);
165		INIT_LIST_HEAD(&head->set_list);
166		head->set = set;
167		head->entry_cnt = 0;
168		f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head);
169	}
170	list_move_tail(&ne->list, &head->entry_list);
171	nm_i->dirty_nat_cnt++;
172	head->entry_cnt++;
173	set_nat_flag(ne, IS_DIRTY, true);
174}
175
176static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i,
177						struct nat_entry *ne)
178{
179	nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid);
180	struct nat_entry_set *head;
181
182	head = radix_tree_lookup(&nm_i->nat_set_root, set);
183	if (head) {
184		list_move_tail(&ne->list, &nm_i->nat_entries);
185		set_nat_flag(ne, IS_DIRTY, false);
186		head->entry_cnt--;
187		nm_i->dirty_nat_cnt--;
188	}
189}
190
191static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i,
192		nid_t start, unsigned int nr, struct nat_entry_set **ep)
193{
194	return radix_tree_gang_lookup(&nm_i->nat_set_root, (void **)ep,
195							start, nr);
196}
197
198bool is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
199{
200	struct f2fs_nm_info *nm_i = NM_I(sbi);
201	struct nat_entry *e;
202	bool is_cp = true;
203
204	down_read(&nm_i->nat_tree_lock);
205	e = __lookup_nat_cache(nm_i, nid);
206	if (e && !get_nat_flag(e, IS_CHECKPOINTED))
207		is_cp = false;
208	up_read(&nm_i->nat_tree_lock);
209	return is_cp;
210}
211
212bool has_fsynced_inode(struct f2fs_sb_info *sbi, nid_t ino)
213{
214	struct f2fs_nm_info *nm_i = NM_I(sbi);
215	struct nat_entry *e;
216	bool fsynced = false;
217
218	down_read(&nm_i->nat_tree_lock);
219	e = __lookup_nat_cache(nm_i, ino);
220	if (e && get_nat_flag(e, HAS_FSYNCED_INODE))
221		fsynced = true;
222	up_read(&nm_i->nat_tree_lock);
223	return fsynced;
224}
225
226bool need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino)
227{
228	struct f2fs_nm_info *nm_i = NM_I(sbi);
229	struct nat_entry *e;
230	bool need_update = true;
231
232	down_read(&nm_i->nat_tree_lock);
233	e = __lookup_nat_cache(nm_i, ino);
234	if (e && get_nat_flag(e, HAS_LAST_FSYNC) &&
235			(get_nat_flag(e, IS_CHECKPOINTED) ||
236			 get_nat_flag(e, HAS_FSYNCED_INODE)))
237		need_update = false;
238	up_read(&nm_i->nat_tree_lock);
239	return need_update;
240}
241
242static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid)
243{
244	struct nat_entry *new;
245
246	new = f2fs_kmem_cache_alloc(nat_entry_slab, GFP_ATOMIC);
247	f2fs_radix_tree_insert(&nm_i->nat_root, nid, new);
248	memset(new, 0, sizeof(struct nat_entry));
249	nat_set_nid(new, nid);
250	nat_reset_flag(new);
251	list_add_tail(&new->list, &nm_i->nat_entries);
252	nm_i->nat_cnt++;
253	return new;
254}
255
256static void cache_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid,
257						struct f2fs_nat_entry *ne)
258{
259	struct nat_entry *e;
260
261	down_write(&nm_i->nat_tree_lock);
262	e = __lookup_nat_cache(nm_i, nid);
263	if (!e) {
264		e = grab_nat_entry(nm_i, nid);
265		node_info_from_raw_nat(&e->ni, ne);
266	}
267	up_write(&nm_i->nat_tree_lock);
268}
269
270static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
271			block_t new_blkaddr, bool fsync_done)
272{
273	struct f2fs_nm_info *nm_i = NM_I(sbi);
274	struct nat_entry *e;
275
276	down_write(&nm_i->nat_tree_lock);
277	e = __lookup_nat_cache(nm_i, ni->nid);
278	if (!e) {
279		e = grab_nat_entry(nm_i, ni->nid);
280		copy_node_info(&e->ni, ni);
281		f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR);
282	} else if (new_blkaddr == NEW_ADDR) {
283		/*
284		 * when nid is reallocated,
285		 * previous nat entry can be remained in nat cache.
286		 * So, reinitialize it with new information.
287		 */
288		copy_node_info(&e->ni, ni);
289		f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR);
290	}
291
292	/* sanity check */
293	f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr);
294	f2fs_bug_on(sbi, nat_get_blkaddr(e) == NULL_ADDR &&
295			new_blkaddr == NULL_ADDR);
296	f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR &&
297			new_blkaddr == NEW_ADDR);
298	f2fs_bug_on(sbi, nat_get_blkaddr(e) != NEW_ADDR &&
299			nat_get_blkaddr(e) != NULL_ADDR &&
300			new_blkaddr == NEW_ADDR);
301
302	/* increment version no as node is removed */
303	if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
304		unsigned char version = nat_get_version(e);
305		nat_set_version(e, inc_node_version(version));
306	}
307
308	/* change address */
309	nat_set_blkaddr(e, new_blkaddr);
310	if (new_blkaddr == NEW_ADDR || new_blkaddr == NULL_ADDR)
311		set_nat_flag(e, IS_CHECKPOINTED, false);
312	__set_nat_cache_dirty(nm_i, e);
313
314	/* update fsync_mark if its inode nat entry is still alive */
315	e = __lookup_nat_cache(nm_i, ni->ino);
316	if (e) {
317		if (fsync_done && ni->nid == ni->ino)
318			set_nat_flag(e, HAS_FSYNCED_INODE, true);
319		set_nat_flag(e, HAS_LAST_FSYNC, fsync_done);
320	}
321	up_write(&nm_i->nat_tree_lock);
322}
323
324int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
325{
326	struct f2fs_nm_info *nm_i = NM_I(sbi);
327
328	if (available_free_memory(sbi, NAT_ENTRIES))
329		return 0;
330
331	down_write(&nm_i->nat_tree_lock);
332	while (nr_shrink && !list_empty(&nm_i->nat_entries)) {
333		struct nat_entry *ne;
334		ne = list_first_entry(&nm_i->nat_entries,
335					struct nat_entry, list);
336		__del_from_nat_cache(nm_i, ne);
337		nr_shrink--;
338	}
339	up_write(&nm_i->nat_tree_lock);
340	return nr_shrink;
341}
342
343/*
344 * This function always returns success
345 */
346void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
347{
348	struct f2fs_nm_info *nm_i = NM_I(sbi);
349	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
350	struct f2fs_summary_block *sum = curseg->sum_blk;
351	nid_t start_nid = START_NID(nid);
352	struct f2fs_nat_block *nat_blk;
353	struct page *page = NULL;
354	struct f2fs_nat_entry ne;
355	struct nat_entry *e;
356	int i;
357
358	ni->nid = nid;
359
360	/* Check nat cache */
361	down_read(&nm_i->nat_tree_lock);
362	e = __lookup_nat_cache(nm_i, nid);
363	if (e) {
364		ni->ino = nat_get_ino(e);
365		ni->blk_addr = nat_get_blkaddr(e);
366		ni->version = nat_get_version(e);
367	}
368	up_read(&nm_i->nat_tree_lock);
369	if (e)
370		return;
371
372	memset(&ne, 0, sizeof(struct f2fs_nat_entry));
373
374	/* Check current segment summary */
375	mutex_lock(&curseg->curseg_mutex);
376	i = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 0);
377	if (i >= 0) {
378		ne = nat_in_journal(sum, i);
379		node_info_from_raw_nat(ni, &ne);
380	}
381	mutex_unlock(&curseg->curseg_mutex);
382	if (i >= 0)
383		goto cache;
384
385	/* Fill node_info from nat page */
386	page = get_current_nat_page(sbi, start_nid);
387	nat_blk = (struct f2fs_nat_block *)page_address(page);
388	ne = nat_blk->entries[nid - start_nid];
389	node_info_from_raw_nat(ni, &ne);
390	f2fs_put_page(page, 1);
391cache:
392	/* cache nat entry */
393	cache_nat_entry(NM_I(sbi), nid, &ne);
394}
395
396/*
397 * The maximum depth is four.
398 * Offset[0] will have raw inode offset.
399 */
400static int get_node_path(struct f2fs_inode_info *fi, long block,
401				int offset[4], unsigned int noffset[4])
402{
403	const long direct_index = ADDRS_PER_INODE(fi);
404	const long direct_blks = ADDRS_PER_BLOCK;
405	const long dptrs_per_blk = NIDS_PER_BLOCK;
406	const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
407	const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
408	int n = 0;
409	int level = 0;
410
411	noffset[0] = 0;
412
413	if (block < direct_index) {
414		offset[n] = block;
415		goto got;
416	}
417	block -= direct_index;
418	if (block < direct_blks) {
419		offset[n++] = NODE_DIR1_BLOCK;
420		noffset[n] = 1;
421		offset[n] = block;
422		level = 1;
423		goto got;
424	}
425	block -= direct_blks;
426	if (block < direct_blks) {
427		offset[n++] = NODE_DIR2_BLOCK;
428		noffset[n] = 2;
429		offset[n] = block;
430		level = 1;
431		goto got;
432	}
433	block -= direct_blks;
434	if (block < indirect_blks) {
435		offset[n++] = NODE_IND1_BLOCK;
436		noffset[n] = 3;
437		offset[n++] = block / direct_blks;
438		noffset[n] = 4 + offset[n - 1];
439		offset[n] = block % direct_blks;
440		level = 2;
441		goto got;
442	}
443	block -= indirect_blks;
444	if (block < indirect_blks) {
445		offset[n++] = NODE_IND2_BLOCK;
446		noffset[n] = 4 + dptrs_per_blk;
447		offset[n++] = block / direct_blks;
448		noffset[n] = 5 + dptrs_per_blk + offset[n - 1];
449		offset[n] = block % direct_blks;
450		level = 2;
451		goto got;
452	}
453	block -= indirect_blks;
454	if (block < dindirect_blks) {
455		offset[n++] = NODE_DIND_BLOCK;
456		noffset[n] = 5 + (dptrs_per_blk * 2);
457		offset[n++] = block / indirect_blks;
458		noffset[n] = 6 + (dptrs_per_blk * 2) +
459			      offset[n - 1] * (dptrs_per_blk + 1);
460		offset[n++] = (block / direct_blks) % dptrs_per_blk;
461		noffset[n] = 7 + (dptrs_per_blk * 2) +
462			      offset[n - 2] * (dptrs_per_blk + 1) +
463			      offset[n - 1];
464		offset[n] = block % direct_blks;
465		level = 3;
466		goto got;
467	} else {
468		BUG();
469	}
470got:
471	return level;
472}
473
474/*
475 * Caller should call f2fs_put_dnode(dn).
476 * Also, it should grab and release a rwsem by calling f2fs_lock_op() and
477 * f2fs_unlock_op() only if ro is not set RDONLY_NODE.
478 * In the case of RDONLY_NODE, we don't need to care about mutex.
479 */
480int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
481{
482	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
483	struct page *npage[4];
484	struct page *parent = NULL;
485	int offset[4];
486	unsigned int noffset[4];
487	nid_t nids[4];
488	int level, i;
489	int err = 0;
490
491	level = get_node_path(F2FS_I(dn->inode), index, offset, noffset);
492
493	nids[0] = dn->inode->i_ino;
494	npage[0] = dn->inode_page;
495
496	if (!npage[0]) {
497		npage[0] = get_node_page(sbi, nids[0]);
498		if (IS_ERR(npage[0]))
499			return PTR_ERR(npage[0]);
500	}
501
502	/* if inline_data is set, should not report any block indices */
503	if (f2fs_has_inline_data(dn->inode) && index) {
504		err = -ENOENT;
505		f2fs_put_page(npage[0], 1);
506		goto release_out;
507	}
508
509	parent = npage[0];
510	if (level != 0)
511		nids[1] = get_nid(parent, offset[0], true);
512	dn->inode_page = npage[0];
513	dn->inode_page_locked = true;
514
515	/* get indirect or direct nodes */
516	for (i = 1; i <= level; i++) {
517		bool done = false;
518
519		if (!nids[i] && mode == ALLOC_NODE) {
520			/* alloc new node */
521			if (!alloc_nid(sbi, &(nids[i]))) {
522				err = -ENOSPC;
523				goto release_pages;
524			}
525
526			dn->nid = nids[i];
527			npage[i] = new_node_page(dn, noffset[i], NULL);
528			if (IS_ERR(npage[i])) {
529				alloc_nid_failed(sbi, nids[i]);
530				err = PTR_ERR(npage[i]);
531				goto release_pages;
532			}
533
534			set_nid(parent, offset[i - 1], nids[i], i == 1);
535			alloc_nid_done(sbi, nids[i]);
536			done = true;
537		} else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
538			npage[i] = get_node_page_ra(parent, offset[i - 1]);
539			if (IS_ERR(npage[i])) {
540				err = PTR_ERR(npage[i]);
541				goto release_pages;
542			}
543			done = true;
544		}
545		if (i == 1) {
546			dn->inode_page_locked = false;
547			unlock_page(parent);
548		} else {
549			f2fs_put_page(parent, 1);
550		}
551
552		if (!done) {
553			npage[i] = get_node_page(sbi, nids[i]);
554			if (IS_ERR(npage[i])) {
555				err = PTR_ERR(npage[i]);
556				f2fs_put_page(npage[0], 0);
557				goto release_out;
558			}
559		}
560		if (i < level) {
561			parent = npage[i];
562			nids[i + 1] = get_nid(parent, offset[i], false);
563		}
564	}
565	dn->nid = nids[level];
566	dn->ofs_in_node = offset[level];
567	dn->node_page = npage[level];
568	dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
569	return 0;
570
571release_pages:
572	f2fs_put_page(parent, 1);
573	if (i > 1)
574		f2fs_put_page(npage[0], 0);
575release_out:
576	dn->inode_page = NULL;
577	dn->node_page = NULL;
578	return err;
579}
580
581static void truncate_node(struct dnode_of_data *dn)
582{
583	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
584	struct node_info ni;
585
586	get_node_info(sbi, dn->nid, &ni);
587	if (dn->inode->i_blocks == 0) {
588		f2fs_bug_on(sbi, ni.blk_addr != NULL_ADDR);
589		goto invalidate;
590	}
591	f2fs_bug_on(sbi, ni.blk_addr == NULL_ADDR);
592
593	/* Deallocate node address */
594	invalidate_blocks(sbi, ni.blk_addr);
595	dec_valid_node_count(sbi, dn->inode);
596	set_node_addr(sbi, &ni, NULL_ADDR, false);
597
598	if (dn->nid == dn->inode->i_ino) {
599		remove_orphan_inode(sbi, dn->nid);
600		dec_valid_inode_count(sbi);
601	} else {
602		sync_inode_page(dn);
603	}
604invalidate:
605	clear_node_page_dirty(dn->node_page);
606	set_sbi_flag(sbi, SBI_IS_DIRTY);
607
608	f2fs_put_page(dn->node_page, 1);
609
610	invalidate_mapping_pages(NODE_MAPPING(sbi),
611			dn->node_page->index, dn->node_page->index);
612
613	dn->node_page = NULL;
614	trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
615}
616
617static int truncate_dnode(struct dnode_of_data *dn)
618{
619	struct page *page;
620
621	if (dn->nid == 0)
622		return 1;
623
624	/* get direct node */
625	page = get_node_page(F2FS_I_SB(dn->inode), dn->nid);
626	if (IS_ERR(page) && PTR_ERR(page) == -ENOENT)
627		return 1;
628	else if (IS_ERR(page))
629		return PTR_ERR(page);
630
631	/* Make dnode_of_data for parameter */
632	dn->node_page = page;
633	dn->ofs_in_node = 0;
634	truncate_data_blocks(dn);
635	truncate_node(dn);
636	return 1;
637}
638
639static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
640						int ofs, int depth)
641{
642	struct dnode_of_data rdn = *dn;
643	struct page *page;
644	struct f2fs_node *rn;
645	nid_t child_nid;
646	unsigned int child_nofs;
647	int freed = 0;
648	int i, ret;
649
650	if (dn->nid == 0)
651		return NIDS_PER_BLOCK + 1;
652
653	trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr);
654
655	page = get_node_page(F2FS_I_SB(dn->inode), dn->nid);
656	if (IS_ERR(page)) {
657		trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page));
658		return PTR_ERR(page);
659	}
660
661	rn = F2FS_NODE(page);
662	if (depth < 3) {
663		for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
664			child_nid = le32_to_cpu(rn->in.nid[i]);
665			if (child_nid == 0)
666				continue;
667			rdn.nid = child_nid;
668			ret = truncate_dnode(&rdn);
669			if (ret < 0)
670				goto out_err;
671			set_nid(page, i, 0, false);
672		}
673	} else {
674		child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
675		for (i = ofs; i < NIDS_PER_BLOCK; i++) {
676			child_nid = le32_to_cpu(rn->in.nid[i]);
677			if (child_nid == 0) {
678				child_nofs += NIDS_PER_BLOCK + 1;
679				continue;
680			}
681			rdn.nid = child_nid;
682			ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
683			if (ret == (NIDS_PER_BLOCK + 1)) {
684				set_nid(page, i, 0, false);
685				child_nofs += ret;
686			} else if (ret < 0 && ret != -ENOENT) {
687				goto out_err;
688			}
689		}
690		freed = child_nofs;
691	}
692
693	if (!ofs) {
694		/* remove current indirect node */
695		dn->node_page = page;
696		truncate_node(dn);
697		freed++;
698	} else {
699		f2fs_put_page(page, 1);
700	}
701	trace_f2fs_truncate_nodes_exit(dn->inode, freed);
702	return freed;
703
704out_err:
705	f2fs_put_page(page, 1);
706	trace_f2fs_truncate_nodes_exit(dn->inode, ret);
707	return ret;
708}
709
710static int truncate_partial_nodes(struct dnode_of_data *dn,
711			struct f2fs_inode *ri, int *offset, int depth)
712{
713	struct page *pages[2];
714	nid_t nid[3];
715	nid_t child_nid;
716	int err = 0;
717	int i;
718	int idx = depth - 2;
719
720	nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
721	if (!nid[0])
722		return 0;
723
724	/* get indirect nodes in the path */
725	for (i = 0; i < idx + 1; i++) {
726		/* reference count'll be increased */
727		pages[i] = get_node_page(F2FS_I_SB(dn->inode), nid[i]);
728		if (IS_ERR(pages[i])) {
729			err = PTR_ERR(pages[i]);
730			idx = i - 1;
731			goto fail;
732		}
733		nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
734	}
735
736	/* free direct nodes linked to a partial indirect node */
737	for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) {
738		child_nid = get_nid(pages[idx], i, false);
739		if (!child_nid)
740			continue;
741		dn->nid = child_nid;
742		err = truncate_dnode(dn);
743		if (err < 0)
744			goto fail;
745		set_nid(pages[idx], i, 0, false);
746	}
747
748	if (offset[idx + 1] == 0) {
749		dn->node_page = pages[idx];
750		dn->nid = nid[idx];
751		truncate_node(dn);
752	} else {
753		f2fs_put_page(pages[idx], 1);
754	}
755	offset[idx]++;
756	offset[idx + 1] = 0;
757	idx--;
758fail:
759	for (i = idx; i >= 0; i--)
760		f2fs_put_page(pages[i], 1);
761
762	trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err);
763
764	return err;
765}
766
767/*
768 * All the block addresses of data and nodes should be nullified.
769 */
770int truncate_inode_blocks(struct inode *inode, pgoff_t from)
771{
772	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
773	int err = 0, cont = 1;
774	int level, offset[4], noffset[4];
775	unsigned int nofs = 0;
776	struct f2fs_inode *ri;
777	struct dnode_of_data dn;
778	struct page *page;
779
780	trace_f2fs_truncate_inode_blocks_enter(inode, from);
781
782	level = get_node_path(F2FS_I(inode), from, offset, noffset);
783restart:
784	page = get_node_page(sbi, inode->i_ino);
785	if (IS_ERR(page)) {
786		trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
787		return PTR_ERR(page);
788	}
789
790	set_new_dnode(&dn, inode, page, NULL, 0);
791	unlock_page(page);
792
793	ri = F2FS_INODE(page);
794	switch (level) {
795	case 0:
796	case 1:
797		nofs = noffset[1];
798		break;
799	case 2:
800		nofs = noffset[1];
801		if (!offset[level - 1])
802			goto skip_partial;
803		err = truncate_partial_nodes(&dn, ri, offset, level);
804		if (err < 0 && err != -ENOENT)
805			goto fail;
806		nofs += 1 + NIDS_PER_BLOCK;
807		break;
808	case 3:
809		nofs = 5 + 2 * NIDS_PER_BLOCK;
810		if (!offset[level - 1])
811			goto skip_partial;
812		err = truncate_partial_nodes(&dn, ri, offset, level);
813		if (err < 0 && err != -ENOENT)
814			goto fail;
815		break;
816	default:
817		BUG();
818	}
819
820skip_partial:
821	while (cont) {
822		dn.nid = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
823		switch (offset[0]) {
824		case NODE_DIR1_BLOCK:
825		case NODE_DIR2_BLOCK:
826			err = truncate_dnode(&dn);
827			break;
828
829		case NODE_IND1_BLOCK:
830		case NODE_IND2_BLOCK:
831			err = truncate_nodes(&dn, nofs, offset[1], 2);
832			break;
833
834		case NODE_DIND_BLOCK:
835			err = truncate_nodes(&dn, nofs, offset[1], 3);
836			cont = 0;
837			break;
838
839		default:
840			BUG();
841		}
842		if (err < 0 && err != -ENOENT)
843			goto fail;
844		if (offset[1] == 0 &&
845				ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) {
846			lock_page(page);
847			if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
848				f2fs_put_page(page, 1);
849				goto restart;
850			}
851			f2fs_wait_on_page_writeback(page, NODE);
852			ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
853			set_page_dirty(page);
854			unlock_page(page);
855		}
856		offset[1] = 0;
857		offset[0]++;
858		nofs += err;
859	}
860fail:
861	f2fs_put_page(page, 0);
862	trace_f2fs_truncate_inode_blocks_exit(inode, err);
863	return err > 0 ? 0 : err;
864}
865
866int truncate_xattr_node(struct inode *inode, struct page *page)
867{
868	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
869	nid_t nid = F2FS_I(inode)->i_xattr_nid;
870	struct dnode_of_data dn;
871	struct page *npage;
872
873	if (!nid)
874		return 0;
875
876	npage = get_node_page(sbi, nid);
877	if (IS_ERR(npage))
878		return PTR_ERR(npage);
879
880	F2FS_I(inode)->i_xattr_nid = 0;
881
882	/* need to do checkpoint during fsync */
883	F2FS_I(inode)->xattr_ver = cur_cp_version(F2FS_CKPT(sbi));
884
885	set_new_dnode(&dn, inode, page, npage, nid);
886
887	if (page)
888		dn.inode_page_locked = true;
889	truncate_node(&dn);
890	return 0;
891}
892
893/*
894 * Caller should grab and release a rwsem by calling f2fs_lock_op() and
895 * f2fs_unlock_op().
896 */
897void remove_inode_page(struct inode *inode)
898{
899	struct dnode_of_data dn;
900
901	set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
902	if (get_dnode_of_data(&dn, 0, LOOKUP_NODE))
903		return;
904
905	if (truncate_xattr_node(inode, dn.inode_page)) {
906		f2fs_put_dnode(&dn);
907		return;
908	}
909
910	/* remove potential inline_data blocks */
911	if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
912				S_ISLNK(inode->i_mode))
913		truncate_data_blocks_range(&dn, 1);
914
915	/* 0 is possible, after f2fs_new_inode() has failed */
916	f2fs_bug_on(F2FS_I_SB(inode),
917			inode->i_blocks != 0 && inode->i_blocks != 1);
918
919	/* will put inode & node pages */
920	truncate_node(&dn);
921}
922
923struct page *new_inode_page(struct inode *inode)
924{
925	struct dnode_of_data dn;
926
927	/* allocate inode page for new inode */
928	set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
929
930	/* caller should f2fs_put_page(page, 1); */
931	return new_node_page(&dn, 0, NULL);
932}
933
934struct page *new_node_page(struct dnode_of_data *dn,
935				unsigned int ofs, struct page *ipage)
936{
937	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
938	struct node_info old_ni, new_ni;
939	struct page *page;
940	int err;
941
942	if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
943		return ERR_PTR(-EPERM);
944
945	page = grab_cache_page(NODE_MAPPING(sbi), dn->nid);
946	if (!page)
947		return ERR_PTR(-ENOMEM);
948
949	if (unlikely(!inc_valid_node_count(sbi, dn->inode))) {
950		err = -ENOSPC;
951		goto fail;
952	}
953
954	get_node_info(sbi, dn->nid, &old_ni);
955
956	/* Reinitialize old_ni with new node page */
957	f2fs_bug_on(sbi, old_ni.blk_addr != NULL_ADDR);
958	new_ni = old_ni;
959	new_ni.ino = dn->inode->i_ino;
960	set_node_addr(sbi, &new_ni, NEW_ADDR, false);
961
962	f2fs_wait_on_page_writeback(page, NODE);
963	fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
964	set_cold_node(dn->inode, page);
965	SetPageUptodate(page);
966	set_page_dirty(page);
967
968	if (f2fs_has_xattr_block(ofs))
969		F2FS_I(dn->inode)->i_xattr_nid = dn->nid;
970
971	dn->node_page = page;
972	if (ipage)
973		update_inode(dn->inode, ipage);
974	else
975		sync_inode_page(dn);
976	if (ofs == 0)
977		inc_valid_inode_count(sbi);
978
979	return page;
980
981fail:
982	clear_node_page_dirty(page);
983	f2fs_put_page(page, 1);
984	return ERR_PTR(err);
985}
986
987/*
988 * Caller should do after getting the following values.
989 * 0: f2fs_put_page(page, 0)
990 * LOCKED_PAGE: f2fs_put_page(page, 1)
991 * error: nothing
992 */
993static int read_node_page(struct page *page, int rw)
994{
995	struct f2fs_sb_info *sbi = F2FS_P_SB(page);
996	struct node_info ni;
997	struct f2fs_io_info fio = {
998		.type = NODE,
999		.rw = rw,
1000	};
1001
1002	get_node_info(sbi, page->index, &ni);
1003
1004	if (unlikely(ni.blk_addr == NULL_ADDR)) {
1005		ClearPageUptodate(page);
1006		f2fs_put_page(page, 1);
1007		return -ENOENT;
1008	}
1009
1010	if (PageUptodate(page))
1011		return LOCKED_PAGE;
1012
1013	fio.blk_addr = ni.blk_addr;
1014	return f2fs_submit_page_bio(sbi, page, &fio);
1015}
1016
1017/*
1018 * Readahead a node page
1019 */
1020void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
1021{
1022	struct page *apage;
1023	int err;
1024
1025	apage = find_get_page(NODE_MAPPING(sbi), nid);
1026	if (apage && PageUptodate(apage)) {
1027		f2fs_put_page(apage, 0);
1028		return;
1029	}
1030	f2fs_put_page(apage, 0);
1031
1032	apage = grab_cache_page(NODE_MAPPING(sbi), nid);
1033	if (!apage)
1034		return;
1035
1036	err = read_node_page(apage, READA);
1037	if (err == 0)
1038		f2fs_put_page(apage, 0);
1039	else if (err == LOCKED_PAGE)
1040		f2fs_put_page(apage, 1);
1041}
1042
1043struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
1044{
1045	struct page *page;
1046	int err;
1047repeat:
1048	page = grab_cache_page(NODE_MAPPING(sbi), nid);
1049	if (!page)
1050		return ERR_PTR(-ENOMEM);
1051
1052	err = read_node_page(page, READ_SYNC);
1053	if (err < 0)
1054		return ERR_PTR(err);
1055	else if (err != LOCKED_PAGE)
1056		lock_page(page);
1057
1058	if (unlikely(!PageUptodate(page) || nid != nid_of_node(page))) {
1059		ClearPageUptodate(page);
1060		f2fs_put_page(page, 1);
1061		return ERR_PTR(-EIO);
1062	}
1063	if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1064		f2fs_put_page(page, 1);
1065		goto repeat;
1066	}
1067	return page;
1068}
1069
1070/*
1071 * Return a locked page for the desired node page.
1072 * And, readahead MAX_RA_NODE number of node pages.
1073 */
1074struct page *get_node_page_ra(struct page *parent, int start)
1075{
1076	struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
1077	struct blk_plug plug;
1078	struct page *page;
1079	int err, i, end;
1080	nid_t nid;
1081
1082	/* First, try getting the desired direct node. */
1083	nid = get_nid(parent, start, false);
1084	if (!nid)
1085		return ERR_PTR(-ENOENT);
1086repeat:
1087	page = grab_cache_page(NODE_MAPPING(sbi), nid);
1088	if (!page)
1089		return ERR_PTR(-ENOMEM);
1090
1091	err = read_node_page(page, READ_SYNC);
1092	if (err < 0)
1093		return ERR_PTR(err);
1094	else if (err == LOCKED_PAGE)
1095		goto page_hit;
1096
1097	blk_start_plug(&plug);
1098
1099	/* Then, try readahead for siblings of the desired node */
1100	end = start + MAX_RA_NODE;
1101	end = min(end, NIDS_PER_BLOCK);
1102	for (i = start + 1; i < end; i++) {
1103		nid = get_nid(parent, i, false);
1104		if (!nid)
1105			continue;
1106		ra_node_page(sbi, nid);
1107	}
1108
1109	blk_finish_plug(&plug);
1110
1111	lock_page(page);
1112	if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1113		f2fs_put_page(page, 1);
1114		goto repeat;
1115	}
1116page_hit:
1117	if (unlikely(!PageUptodate(page))) {
1118		f2fs_put_page(page, 1);
1119		return ERR_PTR(-EIO);
1120	}
1121	return page;
1122}
1123
1124void sync_inode_page(struct dnode_of_data *dn)
1125{
1126	if (IS_INODE(dn->node_page) || dn->inode_page == dn->node_page) {
1127		update_inode(dn->inode, dn->node_page);
1128	} else if (dn->inode_page) {
1129		if (!dn->inode_page_locked)
1130			lock_page(dn->inode_page);
1131		update_inode(dn->inode, dn->inode_page);
1132		if (!dn->inode_page_locked)
1133			unlock_page(dn->inode_page);
1134	} else {
1135		update_inode_page(dn->inode);
1136	}
1137}
1138
1139int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino,
1140					struct writeback_control *wbc)
1141{
1142	pgoff_t index, end;
1143	struct pagevec pvec;
1144	int step = ino ? 2 : 0;
1145	int nwritten = 0, wrote = 0;
1146
1147	pagevec_init(&pvec, 0);
1148
1149next_step:
1150	index = 0;
1151	end = LONG_MAX;
1152
1153	while (index <= end) {
1154		int i, nr_pages;
1155		nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
1156				PAGECACHE_TAG_DIRTY,
1157				min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1158		if (nr_pages == 0)
1159			break;
1160
1161		for (i = 0; i < nr_pages; i++) {
1162			struct page *page = pvec.pages[i];
1163
1164			/*
1165			 * flushing sequence with step:
1166			 * 0. indirect nodes
1167			 * 1. dentry dnodes
1168			 * 2. file dnodes
1169			 */
1170			if (step == 0 && IS_DNODE(page))
1171				continue;
1172			if (step == 1 && (!IS_DNODE(page) ||
1173						is_cold_node(page)))
1174				continue;
1175			if (step == 2 && (!IS_DNODE(page) ||
1176						!is_cold_node(page)))
1177				continue;
1178
1179			/*
1180			 * If an fsync mode,
1181			 * we should not skip writing node pages.
1182			 */
1183			if (ino && ino_of_node(page) == ino)
1184				lock_page(page);
1185			else if (!trylock_page(page))
1186				continue;
1187
1188			if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1189continue_unlock:
1190				unlock_page(page);
1191				continue;
1192			}
1193			if (ino && ino_of_node(page) != ino)
1194				goto continue_unlock;
1195
1196			if (!PageDirty(page)) {
1197				/* someone wrote it for us */
1198				goto continue_unlock;
1199			}
1200
1201			if (!clear_page_dirty_for_io(page))
1202				goto continue_unlock;
1203
1204			/* called by fsync() */
1205			if (ino && IS_DNODE(page)) {
1206				set_fsync_mark(page, 1);
1207				if (IS_INODE(page)) {
1208					if (!is_checkpointed_node(sbi, ino) &&
1209						!has_fsynced_inode(sbi, ino))
1210						set_dentry_mark(page, 1);
1211					else
1212						set_dentry_mark(page, 0);
1213				}
1214				nwritten++;
1215			} else {
1216				set_fsync_mark(page, 0);
1217				set_dentry_mark(page, 0);
1218			}
1219
1220			if (NODE_MAPPING(sbi)->a_ops->writepage(page, wbc))
1221				unlock_page(page);
1222			else
1223				wrote++;
1224
1225			if (--wbc->nr_to_write == 0)
1226				break;
1227		}
1228		pagevec_release(&pvec);
1229		cond_resched();
1230
1231		if (wbc->nr_to_write == 0) {
1232			step = 2;
1233			break;
1234		}
1235	}
1236
1237	if (step < 2) {
1238		step++;
1239		goto next_step;
1240	}
1241
1242	if (wrote)
1243		f2fs_submit_merged_bio(sbi, NODE, WRITE);
1244	return nwritten;
1245}
1246
1247int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino)
1248{
1249	pgoff_t index = 0, end = LONG_MAX;
1250	struct pagevec pvec;
1251	int ret2 = 0, ret = 0;
1252
1253	pagevec_init(&pvec, 0);
1254
1255	while (index <= end) {
1256		int i, nr_pages;
1257		nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
1258				PAGECACHE_TAG_WRITEBACK,
1259				min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1260		if (nr_pages == 0)
1261			break;
1262
1263		for (i = 0; i < nr_pages; i++) {
1264			struct page *page = pvec.pages[i];
1265
1266			/* until radix tree lookup accepts end_index */
1267			if (unlikely(page->index > end))
1268				continue;
1269
1270			if (ino && ino_of_node(page) == ino) {
1271				f2fs_wait_on_page_writeback(page, NODE);
1272				if (TestClearPageError(page))
1273					ret = -EIO;
1274			}
1275		}
1276		pagevec_release(&pvec);
1277		cond_resched();
1278	}
1279
1280	if (unlikely(test_and_clear_bit(AS_ENOSPC, &NODE_MAPPING(sbi)->flags)))
1281		ret2 = -ENOSPC;
1282	if (unlikely(test_and_clear_bit(AS_EIO, &NODE_MAPPING(sbi)->flags)))
1283		ret2 = -EIO;
1284	if (!ret)
1285		ret = ret2;
1286	return ret;
1287}
1288
1289static int f2fs_write_node_page(struct page *page,
1290				struct writeback_control *wbc)
1291{
1292	struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1293	nid_t nid;
1294	struct node_info ni;
1295	struct f2fs_io_info fio = {
1296		.type = NODE,
1297		.rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE,
1298	};
1299
1300	trace_f2fs_writepage(page, NODE);
1301
1302	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1303		goto redirty_out;
1304	if (unlikely(f2fs_cp_error(sbi)))
1305		goto redirty_out;
1306
1307	f2fs_wait_on_page_writeback(page, NODE);
1308
1309	/* get old block addr of this node page */
1310	nid = nid_of_node(page);
1311	f2fs_bug_on(sbi, page->index != nid);
1312
1313	get_node_info(sbi, nid, &ni);
1314
1315	/* This page is already truncated */
1316	if (unlikely(ni.blk_addr == NULL_ADDR)) {
1317		ClearPageUptodate(page);
1318		dec_page_count(sbi, F2FS_DIRTY_NODES);
1319		unlock_page(page);
1320		return 0;
1321	}
1322
1323	if (wbc->for_reclaim) {
1324		if (!down_read_trylock(&sbi->node_write))
1325			goto redirty_out;
1326	} else {
1327		down_read(&sbi->node_write);
1328	}
1329
1330	set_page_writeback(page);
1331	fio.blk_addr = ni.blk_addr;
1332	write_node_page(sbi, page, nid, &fio);
1333	set_node_addr(sbi, &ni, fio.blk_addr, is_fsync_dnode(page));
1334	dec_page_count(sbi, F2FS_DIRTY_NODES);
1335	up_read(&sbi->node_write);
1336	unlock_page(page);
1337
1338	if (wbc->for_reclaim)
1339		f2fs_submit_merged_bio(sbi, NODE, WRITE);
1340
1341	return 0;
1342
1343redirty_out:
1344	redirty_page_for_writepage(wbc, page);
1345	return AOP_WRITEPAGE_ACTIVATE;
1346}
1347
1348static int f2fs_write_node_pages(struct address_space *mapping,
1349			    struct writeback_control *wbc)
1350{
1351	struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
1352	long diff;
1353
1354	trace_f2fs_writepages(mapping->host, wbc, NODE);
1355
1356	/* balancing f2fs's metadata in background */
1357	f2fs_balance_fs_bg(sbi);
1358
1359	/* collect a number of dirty node pages and write together */
1360	if (get_pages(sbi, F2FS_DIRTY_NODES) < nr_pages_to_skip(sbi, NODE))
1361		goto skip_write;
1362
1363	diff = nr_pages_to_write(sbi, NODE, wbc);
1364	wbc->sync_mode = WB_SYNC_NONE;
1365	sync_node_pages(sbi, 0, wbc);
1366	wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
1367	return 0;
1368
1369skip_write:
1370	wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES);
1371	return 0;
1372}
1373
1374static int f2fs_set_node_page_dirty(struct page *page)
1375{
1376	trace_f2fs_set_page_dirty(page, NODE);
1377
1378	SetPageUptodate(page);
1379	if (!PageDirty(page)) {
1380		__set_page_dirty_nobuffers(page);
1381		inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
1382		SetPagePrivate(page);
1383		f2fs_trace_pid(page);
1384		return 1;
1385	}
1386	return 0;
1387}
1388
1389/*
1390 * Structure of the f2fs node operations
1391 */
1392const struct address_space_operations f2fs_node_aops = {
1393	.writepage	= f2fs_write_node_page,
1394	.writepages	= f2fs_write_node_pages,
1395	.set_page_dirty	= f2fs_set_node_page_dirty,
1396	.invalidatepage	= f2fs_invalidate_page,
1397	.releasepage	= f2fs_release_page,
1398};
1399
1400static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
1401						nid_t n)
1402{
1403	return radix_tree_lookup(&nm_i->free_nid_root, n);
1404}
1405
1406static void __del_from_free_nid_list(struct f2fs_nm_info *nm_i,
1407						struct free_nid *i)
1408{
1409	list_del(&i->list);
1410	radix_tree_delete(&nm_i->free_nid_root, i->nid);
1411}
1412
1413static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
1414{
1415	struct f2fs_nm_info *nm_i = NM_I(sbi);
1416	struct free_nid *i;
1417	struct nat_entry *ne;
1418	bool allocated = false;
1419
1420	if (!available_free_memory(sbi, FREE_NIDS))
1421		return -1;
1422
1423	/* 0 nid should not be used */
1424	if (unlikely(nid == 0))
1425		return 0;
1426
1427	if (build) {
1428		/* do not add allocated nids */
1429		down_read(&nm_i->nat_tree_lock);
1430		ne = __lookup_nat_cache(nm_i, nid);
1431		if (ne &&
1432			(!get_nat_flag(ne, IS_CHECKPOINTED) ||
1433				nat_get_blkaddr(ne) != NULL_ADDR))
1434			allocated = true;
1435		up_read(&nm_i->nat_tree_lock);
1436		if (allocated)
1437			return 0;
1438	}
1439
1440	i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS);
1441	i->nid = nid;
1442	i->state = NID_NEW;
1443
1444	if (radix_tree_preload(GFP_NOFS)) {
1445		kmem_cache_free(free_nid_slab, i);
1446		return 0;
1447	}
1448
1449	spin_lock(&nm_i->free_nid_list_lock);
1450	if (radix_tree_insert(&nm_i->free_nid_root, i->nid, i)) {
1451		spin_unlock(&nm_i->free_nid_list_lock);
1452		radix_tree_preload_end();
1453		kmem_cache_free(free_nid_slab, i);
1454		return 0;
1455	}
1456	list_add_tail(&i->list, &nm_i->free_nid_list);
1457	nm_i->fcnt++;
1458	spin_unlock(&nm_i->free_nid_list_lock);
1459	radix_tree_preload_end();
1460	return 1;
1461}
1462
1463static void remove_free_nid(struct f2fs_nm_info *nm_i, nid_t nid)
1464{
1465	struct free_nid *i;
1466	bool need_free = false;
1467
1468	spin_lock(&nm_i->free_nid_list_lock);
1469	i = __lookup_free_nid_list(nm_i, nid);
1470	if (i && i->state == NID_NEW) {
1471		__del_from_free_nid_list(nm_i, i);
1472		nm_i->fcnt--;
1473		need_free = true;
1474	}
1475	spin_unlock(&nm_i->free_nid_list_lock);
1476
1477	if (need_free)
1478		kmem_cache_free(free_nid_slab, i);
1479}
1480
1481static void scan_nat_page(struct f2fs_sb_info *sbi,
1482			struct page *nat_page, nid_t start_nid)
1483{
1484	struct f2fs_nm_info *nm_i = NM_I(sbi);
1485	struct f2fs_nat_block *nat_blk = page_address(nat_page);
1486	block_t blk_addr;
1487	int i;
1488
1489	i = start_nid % NAT_ENTRY_PER_BLOCK;
1490
1491	for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
1492
1493		if (unlikely(start_nid >= nm_i->max_nid))
1494			break;
1495
1496		blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
1497		f2fs_bug_on(sbi, blk_addr == NEW_ADDR);
1498		if (blk_addr == NULL_ADDR) {
1499			if (add_free_nid(sbi, start_nid, true) < 0)
1500				break;
1501		}
1502	}
1503}
1504
1505static void build_free_nids(struct f2fs_sb_info *sbi)
1506{
1507	struct f2fs_nm_info *nm_i = NM_I(sbi);
1508	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1509	struct f2fs_summary_block *sum = curseg->sum_blk;
1510	int i = 0;
1511	nid_t nid = nm_i->next_scan_nid;
1512
1513	/* Enough entries */
1514	if (nm_i->fcnt > NAT_ENTRY_PER_BLOCK)
1515		return;
1516
1517	/* readahead nat pages to be scanned */
1518	ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES, META_NAT);
1519
1520	while (1) {
1521		struct page *page = get_current_nat_page(sbi, nid);
1522
1523		scan_nat_page(sbi, page, nid);
1524		f2fs_put_page(page, 1);
1525
1526		nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
1527		if (unlikely(nid >= nm_i->max_nid))
1528			nid = 0;
1529
1530		if (i++ == FREE_NID_PAGES)
1531			break;
1532	}
1533
1534	/* go to the next free nat pages to find free nids abundantly */
1535	nm_i->next_scan_nid = nid;
1536
1537	/* find free nids from current sum_pages */
1538	mutex_lock(&curseg->curseg_mutex);
1539	for (i = 0; i < nats_in_cursum(sum); i++) {
1540		block_t addr = le32_to_cpu(nat_in_journal(sum, i).block_addr);
1541		nid = le32_to_cpu(nid_in_journal(sum, i));
1542		if (addr == NULL_ADDR)
1543			add_free_nid(sbi, nid, true);
1544		else
1545			remove_free_nid(nm_i, nid);
1546	}
1547	mutex_unlock(&curseg->curseg_mutex);
1548}
1549
1550/*
1551 * If this function returns success, caller can obtain a new nid
1552 * from second parameter of this function.
1553 * The returned nid could be used ino as well as nid when inode is created.
1554 */
1555bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
1556{
1557	struct f2fs_nm_info *nm_i = NM_I(sbi);
1558	struct free_nid *i = NULL;
1559retry:
1560	if (unlikely(sbi->total_valid_node_count + 1 > nm_i->available_nids))
1561		return false;
1562
1563	spin_lock(&nm_i->free_nid_list_lock);
1564
1565	/* We should not use stale free nids created by build_free_nids */
1566	if (nm_i->fcnt && !on_build_free_nids(nm_i)) {
1567		f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list));
1568		list_for_each_entry(i, &nm_i->free_nid_list, list)
1569			if (i->state == NID_NEW)
1570				break;
1571
1572		f2fs_bug_on(sbi, i->state != NID_NEW);
1573		*nid = i->nid;
1574		i->state = NID_ALLOC;
1575		nm_i->fcnt--;
1576		spin_unlock(&nm_i->free_nid_list_lock);
1577		return true;
1578	}
1579	spin_unlock(&nm_i->free_nid_list_lock);
1580
1581	/* Let's scan nat pages and its caches to get free nids */
1582	mutex_lock(&nm_i->build_lock);
1583	build_free_nids(sbi);
1584	mutex_unlock(&nm_i->build_lock);
1585	goto retry;
1586}
1587
1588/*
1589 * alloc_nid() should be called prior to this function.
1590 */
1591void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
1592{
1593	struct f2fs_nm_info *nm_i = NM_I(sbi);
1594	struct free_nid *i;
1595
1596	spin_lock(&nm_i->free_nid_list_lock);
1597	i = __lookup_free_nid_list(nm_i, nid);
1598	f2fs_bug_on(sbi, !i || i->state != NID_ALLOC);
1599	__del_from_free_nid_list(nm_i, i);
1600	spin_unlock(&nm_i->free_nid_list_lock);
1601
1602	kmem_cache_free(free_nid_slab, i);
1603}
1604
1605/*
1606 * alloc_nid() should be called prior to this function.
1607 */
1608void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
1609{
1610	struct f2fs_nm_info *nm_i = NM_I(sbi);
1611	struct free_nid *i;
1612	bool need_free = false;
1613
1614	if (!nid)
1615		return;
1616
1617	spin_lock(&nm_i->free_nid_list_lock);
1618	i = __lookup_free_nid_list(nm_i, nid);
1619	f2fs_bug_on(sbi, !i || i->state != NID_ALLOC);
1620	if (!available_free_memory(sbi, FREE_NIDS)) {
1621		__del_from_free_nid_list(nm_i, i);
1622		need_free = true;
1623	} else {
1624		i->state = NID_NEW;
1625		nm_i->fcnt++;
1626	}
1627	spin_unlock(&nm_i->free_nid_list_lock);
1628
1629	if (need_free)
1630		kmem_cache_free(free_nid_slab, i);
1631}
1632
1633void recover_inline_xattr(struct inode *inode, struct page *page)
1634{
1635	void *src_addr, *dst_addr;
1636	size_t inline_size;
1637	struct page *ipage;
1638	struct f2fs_inode *ri;
1639
1640	ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino);
1641	f2fs_bug_on(F2FS_I_SB(inode), IS_ERR(ipage));
1642
1643	ri = F2FS_INODE(page);
1644	if (!(ri->i_inline & F2FS_INLINE_XATTR)) {
1645		clear_inode_flag(F2FS_I(inode), FI_INLINE_XATTR);
1646		goto update_inode;
1647	}
1648
1649	dst_addr = inline_xattr_addr(ipage);
1650	src_addr = inline_xattr_addr(page);
1651	inline_size = inline_xattr_size(inode);
1652
1653	f2fs_wait_on_page_writeback(ipage, NODE);
1654	memcpy(dst_addr, src_addr, inline_size);
1655update_inode:
1656	update_inode(inode, ipage);
1657	f2fs_put_page(ipage, 1);
1658}
1659
1660void recover_xattr_data(struct inode *inode, struct page *page, block_t blkaddr)
1661{
1662	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1663	nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid;
1664	nid_t new_xnid = nid_of_node(page);
1665	struct node_info ni;
1666
1667	/* 1: invalidate the previous xattr nid */
1668	if (!prev_xnid)
1669		goto recover_xnid;
1670
1671	/* Deallocate node address */
1672	get_node_info(sbi, prev_xnid, &ni);
1673	f2fs_bug_on(sbi, ni.blk_addr == NULL_ADDR);
1674	invalidate_blocks(sbi, ni.blk_addr);
1675	dec_valid_node_count(sbi, inode);
1676	set_node_addr(sbi, &ni, NULL_ADDR, false);
1677
1678recover_xnid:
1679	/* 2: allocate new xattr nid */
1680	if (unlikely(!inc_valid_node_count(sbi, inode)))
1681		f2fs_bug_on(sbi, 1);
1682
1683	remove_free_nid(NM_I(sbi), new_xnid);
1684	get_node_info(sbi, new_xnid, &ni);
1685	ni.ino = inode->i_ino;
1686	set_node_addr(sbi, &ni, NEW_ADDR, false);
1687	F2FS_I(inode)->i_xattr_nid = new_xnid;
1688
1689	/* 3: update xattr blkaddr */
1690	refresh_sit_entry(sbi, NEW_ADDR, blkaddr);
1691	set_node_addr(sbi, &ni, blkaddr, false);
1692
1693	update_inode_page(inode);
1694}
1695
1696int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
1697{
1698	struct f2fs_inode *src, *dst;
1699	nid_t ino = ino_of_node(page);
1700	struct node_info old_ni, new_ni;
1701	struct page *ipage;
1702
1703	get_node_info(sbi, ino, &old_ni);
1704
1705	if (unlikely(old_ni.blk_addr != NULL_ADDR))
1706		return -EINVAL;
1707
1708	ipage = grab_cache_page(NODE_MAPPING(sbi), ino);
1709	if (!ipage)
1710		return -ENOMEM;
1711
1712	/* Should not use this inode from free nid list */
1713	remove_free_nid(NM_I(sbi), ino);
1714
1715	SetPageUptodate(ipage);
1716	fill_node_footer(ipage, ino, ino, 0, true);
1717
1718	src = F2FS_INODE(page);
1719	dst = F2FS_INODE(ipage);
1720
1721	memcpy(dst, src, (unsigned long)&src->i_ext - (unsigned long)src);
1722	dst->i_size = 0;
1723	dst->i_blocks = cpu_to_le64(1);
1724	dst->i_links = cpu_to_le32(1);
1725	dst->i_xattr_nid = 0;
1726	dst->i_inline = src->i_inline & F2FS_INLINE_XATTR;
1727
1728	new_ni = old_ni;
1729	new_ni.ino = ino;
1730
1731	if (unlikely(!inc_valid_node_count(sbi, NULL)))
1732		WARN_ON(1);
1733	set_node_addr(sbi, &new_ni, NEW_ADDR, false);
1734	inc_valid_inode_count(sbi);
1735	set_page_dirty(ipage);
1736	f2fs_put_page(ipage, 1);
1737	return 0;
1738}
1739
1740int restore_node_summary(struct f2fs_sb_info *sbi,
1741			unsigned int segno, struct f2fs_summary_block *sum)
1742{
1743	struct f2fs_node *rn;
1744	struct f2fs_summary *sum_entry;
1745	block_t addr;
1746	int bio_blocks = MAX_BIO_BLOCKS(sbi);
1747	int i, idx, last_offset, nrpages;
1748
1749	/* scan the node segment */
1750	last_offset = sbi->blocks_per_seg;
1751	addr = START_BLOCK(sbi, segno);
1752	sum_entry = &sum->entries[0];
1753
1754	for (i = 0; i < last_offset; i += nrpages, addr += nrpages) {
1755		nrpages = min(last_offset - i, bio_blocks);
1756
1757		/* readahead node pages */
1758		ra_meta_pages(sbi, addr, nrpages, META_POR);
1759
1760		for (idx = addr; idx < addr + nrpages; idx++) {
1761			struct page *page = get_meta_page(sbi, idx);
1762
1763			rn = F2FS_NODE(page);
1764			sum_entry->nid = rn->footer.nid;
1765			sum_entry->version = 0;
1766			sum_entry->ofs_in_node = 0;
1767			sum_entry++;
1768			f2fs_put_page(page, 1);
1769		}
1770
1771		invalidate_mapping_pages(META_MAPPING(sbi), addr,
1772							addr + nrpages);
1773	}
1774	return 0;
1775}
1776
1777static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
1778{
1779	struct f2fs_nm_info *nm_i = NM_I(sbi);
1780	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1781	struct f2fs_summary_block *sum = curseg->sum_blk;
1782	int i;
1783
1784	mutex_lock(&curseg->curseg_mutex);
1785	for (i = 0; i < nats_in_cursum(sum); i++) {
1786		struct nat_entry *ne;
1787		struct f2fs_nat_entry raw_ne;
1788		nid_t nid = le32_to_cpu(nid_in_journal(sum, i));
1789
1790		raw_ne = nat_in_journal(sum, i);
1791
1792		down_write(&nm_i->nat_tree_lock);
1793		ne = __lookup_nat_cache(nm_i, nid);
1794		if (!ne) {
1795			ne = grab_nat_entry(nm_i, nid);
1796			node_info_from_raw_nat(&ne->ni, &raw_ne);
1797		}
1798		__set_nat_cache_dirty(nm_i, ne);
1799		up_write(&nm_i->nat_tree_lock);
1800	}
1801	update_nats_in_cursum(sum, -i);
1802	mutex_unlock(&curseg->curseg_mutex);
1803}
1804
1805static void __adjust_nat_entry_set(struct nat_entry_set *nes,
1806						struct list_head *head, int max)
1807{
1808	struct nat_entry_set *cur;
1809
1810	if (nes->entry_cnt >= max)
1811		goto add_out;
1812
1813	list_for_each_entry(cur, head, set_list) {
1814		if (cur->entry_cnt >= nes->entry_cnt) {
1815			list_add(&nes->set_list, cur->set_list.prev);
1816			return;
1817		}
1818	}
1819add_out:
1820	list_add_tail(&nes->set_list, head);
1821}
1822
1823static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
1824					struct nat_entry_set *set)
1825{
1826	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1827	struct f2fs_summary_block *sum = curseg->sum_blk;
1828	nid_t start_nid = set->set * NAT_ENTRY_PER_BLOCK;
1829	bool to_journal = true;
1830	struct f2fs_nat_block *nat_blk;
1831	struct nat_entry *ne, *cur;
1832	struct page *page = NULL;
1833	struct f2fs_nm_info *nm_i = NM_I(sbi);
1834
1835	/*
1836	 * there are two steps to flush nat entries:
1837	 * #1, flush nat entries to journal in current hot data summary block.
1838	 * #2, flush nat entries to nat page.
1839	 */
1840	if (!__has_cursum_space(sum, set->entry_cnt, NAT_JOURNAL))
1841		to_journal = false;
1842
1843	if (to_journal) {
1844		mutex_lock(&curseg->curseg_mutex);
1845	} else {
1846		page = get_next_nat_page(sbi, start_nid);
1847		nat_blk = page_address(page);
1848		f2fs_bug_on(sbi, !nat_blk);
1849	}
1850
1851	/* flush dirty nats in nat entry set */
1852	list_for_each_entry_safe(ne, cur, &set->entry_list, list) {
1853		struct f2fs_nat_entry *raw_ne;
1854		nid_t nid = nat_get_nid(ne);
1855		int offset;
1856
1857		if (nat_get_blkaddr(ne) == NEW_ADDR)
1858			continue;
1859
1860		if (to_journal) {
1861			offset = lookup_journal_in_cursum(sum,
1862							NAT_JOURNAL, nid, 1);
1863			f2fs_bug_on(sbi, offset < 0);
1864			raw_ne = &nat_in_journal(sum, offset);
1865			nid_in_journal(sum, offset) = cpu_to_le32(nid);
1866		} else {
1867			raw_ne = &nat_blk->entries[nid - start_nid];
1868		}
1869		raw_nat_from_node_info(raw_ne, &ne->ni);
1870
1871		down_write(&NM_I(sbi)->nat_tree_lock);
1872		nat_reset_flag(ne);
1873		__clear_nat_cache_dirty(NM_I(sbi), ne);
1874		up_write(&NM_I(sbi)->nat_tree_lock);
1875
1876		if (nat_get_blkaddr(ne) == NULL_ADDR)
1877			add_free_nid(sbi, nid, false);
1878	}
1879
1880	if (to_journal)
1881		mutex_unlock(&curseg->curseg_mutex);
1882	else
1883		f2fs_put_page(page, 1);
1884
1885	f2fs_bug_on(sbi, set->entry_cnt);
1886
1887	down_write(&nm_i->nat_tree_lock);
1888	radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set);
1889	up_write(&nm_i->nat_tree_lock);
1890	kmem_cache_free(nat_entry_set_slab, set);
1891}
1892
1893/*
1894 * This function is called during the checkpointing process.
1895 */
1896void flush_nat_entries(struct f2fs_sb_info *sbi)
1897{
1898	struct f2fs_nm_info *nm_i = NM_I(sbi);
1899	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1900	struct f2fs_summary_block *sum = curseg->sum_blk;
1901	struct nat_entry_set *setvec[SETVEC_SIZE];
1902	struct nat_entry_set *set, *tmp;
1903	unsigned int found;
1904	nid_t set_idx = 0;
1905	LIST_HEAD(sets);
1906
1907	if (!nm_i->dirty_nat_cnt)
1908		return;
1909	/*
1910	 * if there are no enough space in journal to store dirty nat
1911	 * entries, remove all entries from journal and merge them
1912	 * into nat entry set.
1913	 */
1914	if (!__has_cursum_space(sum, nm_i->dirty_nat_cnt, NAT_JOURNAL))
1915		remove_nats_in_journal(sbi);
1916
1917	down_write(&nm_i->nat_tree_lock);
1918	while ((found = __gang_lookup_nat_set(nm_i,
1919					set_idx, SETVEC_SIZE, setvec))) {
1920		unsigned idx;
1921		set_idx = setvec[found - 1]->set + 1;
1922		for (idx = 0; idx < found; idx++)
1923			__adjust_nat_entry_set(setvec[idx], &sets,
1924							MAX_NAT_JENTRIES(sum));
1925	}
1926	up_write(&nm_i->nat_tree_lock);
1927
1928	/* flush dirty nats in nat entry set */
1929	list_for_each_entry_safe(set, tmp, &sets, set_list)
1930		__flush_nat_entry_set(sbi, set);
1931
1932	f2fs_bug_on(sbi, nm_i->dirty_nat_cnt);
1933}
1934
1935static int init_node_manager(struct f2fs_sb_info *sbi)
1936{
1937	struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
1938	struct f2fs_nm_info *nm_i = NM_I(sbi);
1939	unsigned char *version_bitmap;
1940	unsigned int nat_segs, nat_blocks;
1941
1942	nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
1943
1944	/* segment_count_nat includes pair segment so divide to 2. */
1945	nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
1946	nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
1947
1948	nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks;
1949
1950	/* not used nids: 0, node, meta, (and root counted as valid node) */
1951	nm_i->available_nids = nm_i->max_nid - F2FS_RESERVED_NODE_NUM;
1952	nm_i->fcnt = 0;
1953	nm_i->nat_cnt = 0;
1954	nm_i->ram_thresh = DEF_RAM_THRESHOLD;
1955
1956	INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
1957	INIT_LIST_HEAD(&nm_i->free_nid_list);
1958	INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO);
1959	INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO);
1960	INIT_LIST_HEAD(&nm_i->nat_entries);
1961
1962	mutex_init(&nm_i->build_lock);
1963	spin_lock_init(&nm_i->free_nid_list_lock);
1964	init_rwsem(&nm_i->nat_tree_lock);
1965
1966	nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
1967	nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
1968	version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
1969	if (!version_bitmap)
1970		return -EFAULT;
1971
1972	nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size,
1973					GFP_KERNEL);
1974	if (!nm_i->nat_bitmap)
1975		return -ENOMEM;
1976	return 0;
1977}
1978
1979int build_node_manager(struct f2fs_sb_info *sbi)
1980{
1981	int err;
1982
1983	sbi->nm_info = kzalloc(sizeof(struct f2fs_nm_info), GFP_KERNEL);
1984	if (!sbi->nm_info)
1985		return -ENOMEM;
1986
1987	err = init_node_manager(sbi);
1988	if (err)
1989		return err;
1990
1991	build_free_nids(sbi);
1992	return 0;
1993}
1994
1995void destroy_node_manager(struct f2fs_sb_info *sbi)
1996{
1997	struct f2fs_nm_info *nm_i = NM_I(sbi);
1998	struct free_nid *i, *next_i;
1999	struct nat_entry *natvec[NATVEC_SIZE];
2000	struct nat_entry_set *setvec[SETVEC_SIZE];
2001	nid_t nid = 0;
2002	unsigned int found;
2003
2004	if (!nm_i)
2005		return;
2006
2007	/* destroy free nid list */
2008	spin_lock(&nm_i->free_nid_list_lock);
2009	list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
2010		f2fs_bug_on(sbi, i->state == NID_ALLOC);
2011		__del_from_free_nid_list(nm_i, i);
2012		nm_i->fcnt--;
2013		spin_unlock(&nm_i->free_nid_list_lock);
2014		kmem_cache_free(free_nid_slab, i);
2015		spin_lock(&nm_i->free_nid_list_lock);
2016	}
2017	f2fs_bug_on(sbi, nm_i->fcnt);
2018	spin_unlock(&nm_i->free_nid_list_lock);
2019
2020	/* destroy nat cache */
2021	down_write(&nm_i->nat_tree_lock);
2022	while ((found = __gang_lookup_nat_cache(nm_i,
2023					nid, NATVEC_SIZE, natvec))) {
2024		unsigned idx;
2025
2026		nid = nat_get_nid(natvec[found - 1]) + 1;
2027		for (idx = 0; idx < found; idx++)
2028			__del_from_nat_cache(nm_i, natvec[idx]);
2029	}
2030	f2fs_bug_on(sbi, nm_i->nat_cnt);
2031
2032	/* destroy nat set cache */
2033	nid = 0;
2034	while ((found = __gang_lookup_nat_set(nm_i,
2035					nid, SETVEC_SIZE, setvec))) {
2036		unsigned idx;
2037
2038		nid = setvec[found - 1]->set + 1;
2039		for (idx = 0; idx < found; idx++) {
2040			/* entry_cnt is not zero, when cp_error was occurred */
2041			f2fs_bug_on(sbi, !list_empty(&setvec[idx]->entry_list));
2042			radix_tree_delete(&nm_i->nat_set_root, setvec[idx]->set);
2043			kmem_cache_free(nat_entry_set_slab, setvec[idx]);
2044		}
2045	}
2046	up_write(&nm_i->nat_tree_lock);
2047
2048	kfree(nm_i->nat_bitmap);
2049	sbi->nm_info = NULL;
2050	kfree(nm_i);
2051}
2052
2053int __init create_node_manager_caches(void)
2054{
2055	nat_entry_slab = f2fs_kmem_cache_create("nat_entry",
2056			sizeof(struct nat_entry));
2057	if (!nat_entry_slab)
2058		goto fail;
2059
2060	free_nid_slab = f2fs_kmem_cache_create("free_nid",
2061			sizeof(struct free_nid));
2062	if (!free_nid_slab)
2063		goto destroy_nat_entry;
2064
2065	nat_entry_set_slab = f2fs_kmem_cache_create("nat_entry_set",
2066			sizeof(struct nat_entry_set));
2067	if (!nat_entry_set_slab)
2068		goto destroy_free_nid;
2069	return 0;
2070
2071destroy_free_nid:
2072	kmem_cache_destroy(free_nid_slab);
2073destroy_nat_entry:
2074	kmem_cache_destroy(nat_entry_slab);
2075fail:
2076	return -ENOMEM;
2077}
2078
2079void destroy_node_manager_caches(void)
2080{
2081	kmem_cache_destroy(nat_entry_set_slab);
2082	kmem_cache_destroy(free_nid_slab);
2083	kmem_cache_destroy(nat_entry_slab);
2084}
2085