Lines Matching refs:segbuf
43 static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
45 static int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf);
49 struct nilfs_segment_buffer *segbuf; in nilfs_segbuf_new() local
51 segbuf = kmem_cache_alloc(nilfs_segbuf_cachep, GFP_NOFS); in nilfs_segbuf_new()
52 if (unlikely(!segbuf)) in nilfs_segbuf_new()
55 segbuf->sb_super = sb; in nilfs_segbuf_new()
56 INIT_LIST_HEAD(&segbuf->sb_list); in nilfs_segbuf_new()
57 INIT_LIST_HEAD(&segbuf->sb_segsum_buffers); in nilfs_segbuf_new()
58 INIT_LIST_HEAD(&segbuf->sb_payload_buffers); in nilfs_segbuf_new()
59 segbuf->sb_super_root = NULL; in nilfs_segbuf_new()
61 init_completion(&segbuf->sb_bio_event); in nilfs_segbuf_new()
62 atomic_set(&segbuf->sb_err, 0); in nilfs_segbuf_new()
63 segbuf->sb_nbio = 0; in nilfs_segbuf_new()
65 return segbuf; in nilfs_segbuf_new()
68 void nilfs_segbuf_free(struct nilfs_segment_buffer *segbuf) in nilfs_segbuf_free() argument
70 kmem_cache_free(nilfs_segbuf_cachep, segbuf); in nilfs_segbuf_free()
73 void nilfs_segbuf_map(struct nilfs_segment_buffer *segbuf, __u64 segnum, in nilfs_segbuf_map() argument
76 segbuf->sb_segnum = segnum; in nilfs_segbuf_map()
77 nilfs_get_segment_range(nilfs, segnum, &segbuf->sb_fseg_start, in nilfs_segbuf_map()
78 &segbuf->sb_fseg_end); in nilfs_segbuf_map()
80 segbuf->sb_pseg_start = segbuf->sb_fseg_start + offset; in nilfs_segbuf_map()
81 segbuf->sb_rest_blocks = in nilfs_segbuf_map()
82 segbuf->sb_fseg_end - segbuf->sb_pseg_start + 1; in nilfs_segbuf_map()
90 void nilfs_segbuf_map_cont(struct nilfs_segment_buffer *segbuf, in nilfs_segbuf_map_cont() argument
93 segbuf->sb_segnum = prev->sb_segnum; in nilfs_segbuf_map_cont()
94 segbuf->sb_fseg_start = prev->sb_fseg_start; in nilfs_segbuf_map_cont()
95 segbuf->sb_fseg_end = prev->sb_fseg_end; in nilfs_segbuf_map_cont()
96 segbuf->sb_pseg_start = prev->sb_pseg_start + prev->sb_sum.nblocks; in nilfs_segbuf_map_cont()
97 segbuf->sb_rest_blocks = in nilfs_segbuf_map_cont()
98 segbuf->sb_fseg_end - segbuf->sb_pseg_start + 1; in nilfs_segbuf_map_cont()
101 void nilfs_segbuf_set_next_segnum(struct nilfs_segment_buffer *segbuf, in nilfs_segbuf_set_next_segnum() argument
104 segbuf->sb_nextnum = nextnum; in nilfs_segbuf_set_next_segnum()
105 segbuf->sb_sum.next = nilfs_get_segment_start_blocknr(nilfs, nextnum); in nilfs_segbuf_set_next_segnum()
108 int nilfs_segbuf_extend_segsum(struct nilfs_segment_buffer *segbuf) in nilfs_segbuf_extend_segsum() argument
112 bh = sb_getblk(segbuf->sb_super, in nilfs_segbuf_extend_segsum()
113 segbuf->sb_pseg_start + segbuf->sb_sum.nsumblk); in nilfs_segbuf_extend_segsum()
117 nilfs_segbuf_add_segsum_buffer(segbuf, bh); in nilfs_segbuf_extend_segsum()
121 int nilfs_segbuf_extend_payload(struct nilfs_segment_buffer *segbuf, in nilfs_segbuf_extend_payload() argument
126 bh = sb_getblk(segbuf->sb_super, in nilfs_segbuf_extend_payload()
127 segbuf->sb_pseg_start + segbuf->sb_sum.nblocks); in nilfs_segbuf_extend_payload()
131 nilfs_segbuf_add_payload_buffer(segbuf, bh); in nilfs_segbuf_extend_payload()
136 int nilfs_segbuf_reset(struct nilfs_segment_buffer *segbuf, unsigned flags, in nilfs_segbuf_reset() argument
141 segbuf->sb_sum.nblocks = segbuf->sb_sum.nsumblk = 0; in nilfs_segbuf_reset()
142 err = nilfs_segbuf_extend_segsum(segbuf); in nilfs_segbuf_reset()
146 segbuf->sb_sum.flags = flags; in nilfs_segbuf_reset()
147 segbuf->sb_sum.sumbytes = sizeof(struct nilfs_segment_summary); in nilfs_segbuf_reset()
148 segbuf->sb_sum.nfinfo = segbuf->sb_sum.nfileblk = 0; in nilfs_segbuf_reset()
149 segbuf->sb_sum.ctime = ctime; in nilfs_segbuf_reset()
150 segbuf->sb_sum.cno = cno; in nilfs_segbuf_reset()
157 void nilfs_segbuf_fill_in_segsum(struct nilfs_segment_buffer *segbuf) in nilfs_segbuf_fill_in_segsum() argument
162 bh_sum = list_entry(segbuf->sb_segsum_buffers.next, in nilfs_segbuf_fill_in_segsum()
168 raw_sum->ss_flags = cpu_to_le16(segbuf->sb_sum.flags); in nilfs_segbuf_fill_in_segsum()
169 raw_sum->ss_seq = cpu_to_le64(segbuf->sb_sum.seg_seq); in nilfs_segbuf_fill_in_segsum()
170 raw_sum->ss_create = cpu_to_le64(segbuf->sb_sum.ctime); in nilfs_segbuf_fill_in_segsum()
171 raw_sum->ss_next = cpu_to_le64(segbuf->sb_sum.next); in nilfs_segbuf_fill_in_segsum()
172 raw_sum->ss_nblocks = cpu_to_le32(segbuf->sb_sum.nblocks); in nilfs_segbuf_fill_in_segsum()
173 raw_sum->ss_nfinfo = cpu_to_le32(segbuf->sb_sum.nfinfo); in nilfs_segbuf_fill_in_segsum()
174 raw_sum->ss_sumbytes = cpu_to_le32(segbuf->sb_sum.sumbytes); in nilfs_segbuf_fill_in_segsum()
176 raw_sum->ss_cno = cpu_to_le64(segbuf->sb_sum.cno); in nilfs_segbuf_fill_in_segsum()
183 nilfs_segbuf_fill_in_segsum_crc(struct nilfs_segment_buffer *segbuf, u32 seed) in nilfs_segbuf_fill_in_segsum_crc() argument
187 unsigned long size, bytes = segbuf->sb_sum.sumbytes; in nilfs_segbuf_fill_in_segsum_crc()
190 bh = list_entry(segbuf->sb_segsum_buffers.next, struct buffer_head, in nilfs_segbuf_fill_in_segsum_crc()
201 list_for_each_entry_continue(bh, &segbuf->sb_segsum_buffers, in nilfs_segbuf_fill_in_segsum_crc()
210 static void nilfs_segbuf_fill_in_data_crc(struct nilfs_segment_buffer *segbuf, in nilfs_segbuf_fill_in_data_crc() argument
218 bh = list_entry(segbuf->sb_segsum_buffers.next, struct buffer_head, in nilfs_segbuf_fill_in_data_crc()
225 list_for_each_entry_continue(bh, &segbuf->sb_segsum_buffers, in nilfs_segbuf_fill_in_data_crc()
229 list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) { in nilfs_segbuf_fill_in_data_crc()
238 nilfs_segbuf_fill_in_super_root_crc(struct nilfs_segment_buffer *segbuf, in nilfs_segbuf_fill_in_super_root_crc() argument
242 struct the_nilfs *nilfs = segbuf->sb_super->s_fs_info; in nilfs_segbuf_fill_in_super_root_crc()
246 raw_sr = (struct nilfs_super_root *)segbuf->sb_super_root->b_data; in nilfs_segbuf_fill_in_super_root_crc()
264 static void nilfs_segbuf_clear(struct nilfs_segment_buffer *segbuf) in nilfs_segbuf_clear() argument
266 nilfs_release_buffers(&segbuf->sb_segsum_buffers); in nilfs_segbuf_clear()
267 nilfs_release_buffers(&segbuf->sb_payload_buffers); in nilfs_segbuf_clear()
268 segbuf->sb_super_root = NULL; in nilfs_segbuf_clear()
276 struct nilfs_segment_buffer *segbuf; in nilfs_clear_logs() local
278 list_for_each_entry(segbuf, logs, sb_list) in nilfs_clear_logs()
279 nilfs_segbuf_clear(segbuf); in nilfs_clear_logs()
285 struct nilfs_segment_buffer *n, *segbuf; in nilfs_truncate_logs() local
287 segbuf = list_prepare_entry(last, logs, sb_list); in nilfs_truncate_logs()
288 list_for_each_entry_safe_continue(segbuf, n, logs, sb_list) { in nilfs_truncate_logs()
289 list_del_init(&segbuf->sb_list); in nilfs_truncate_logs()
290 nilfs_segbuf_clear(segbuf); in nilfs_truncate_logs()
291 nilfs_segbuf_free(segbuf); in nilfs_truncate_logs()
297 struct nilfs_segment_buffer *segbuf; in nilfs_write_logs() local
300 list_for_each_entry(segbuf, logs, sb_list) { in nilfs_write_logs()
301 ret = nilfs_segbuf_write(segbuf, nilfs); in nilfs_write_logs()
310 struct nilfs_segment_buffer *segbuf; in nilfs_wait_on_logs() local
313 list_for_each_entry(segbuf, logs, sb_list) { in nilfs_wait_on_logs()
314 err = nilfs_segbuf_wait(segbuf); in nilfs_wait_on_logs()
328 struct nilfs_segment_buffer *segbuf; in nilfs_add_checksums_on_logs() local
330 list_for_each_entry(segbuf, logs, sb_list) { in nilfs_add_checksums_on_logs()
331 if (segbuf->sb_super_root) in nilfs_add_checksums_on_logs()
332 nilfs_segbuf_fill_in_super_root_crc(segbuf, seed); in nilfs_add_checksums_on_logs()
333 nilfs_segbuf_fill_in_segsum_crc(segbuf, seed); in nilfs_add_checksums_on_logs()
334 nilfs_segbuf_fill_in_data_crc(segbuf, seed); in nilfs_add_checksums_on_logs()
344 struct nilfs_segment_buffer *segbuf = bio->bi_private; in nilfs_end_bio_write() local
352 atomic_inc(&segbuf->sb_err); in nilfs_end_bio_write()
355 complete(&segbuf->sb_bio_event); in nilfs_end_bio_write()
358 static int nilfs_segbuf_submit_bio(struct nilfs_segment_buffer *segbuf, in nilfs_segbuf_submit_bio() argument
364 if (segbuf->sb_nbio > 0 && in nilfs_segbuf_submit_bio()
365 bdi_write_congested(segbuf->sb_super->s_bdi)) { in nilfs_segbuf_submit_bio()
366 wait_for_completion(&segbuf->sb_bio_event); in nilfs_segbuf_submit_bio()
367 segbuf->sb_nbio--; in nilfs_segbuf_submit_bio()
368 if (unlikely(atomic_read(&segbuf->sb_err))) { in nilfs_segbuf_submit_bio()
376 bio->bi_private = segbuf; in nilfs_segbuf_submit_bio()
379 segbuf->sb_nbio++; in nilfs_segbuf_submit_bio()
425 static void nilfs_segbuf_prepare_write(struct nilfs_segment_buffer *segbuf, in nilfs_segbuf_prepare_write() argument
429 wi->rest_blocks = segbuf->sb_sum.nblocks; in nilfs_segbuf_prepare_write()
433 wi->blocknr = segbuf->sb_pseg_start; in nilfs_segbuf_prepare_write()
436 static int nilfs_segbuf_submit_bh(struct nilfs_segment_buffer *segbuf, in nilfs_segbuf_submit_bh() argument
457 err = nilfs_segbuf_submit_bio(segbuf, wi, mode); in nilfs_segbuf_submit_bh()
476 static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf, in nilfs_segbuf_write() argument
484 nilfs_segbuf_prepare_write(segbuf, &wi); in nilfs_segbuf_write()
486 list_for_each_entry(bh, &segbuf->sb_segsum_buffers, b_assoc_buffers) { in nilfs_segbuf_write()
487 res = nilfs_segbuf_submit_bh(segbuf, &wi, bh, rw); in nilfs_segbuf_write()
492 list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) { in nilfs_segbuf_write()
493 res = nilfs_segbuf_submit_bh(segbuf, &wi, bh, rw); in nilfs_segbuf_write()
504 res = nilfs_segbuf_submit_bio(segbuf, &wi, rw); in nilfs_segbuf_write()
520 static int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf) in nilfs_segbuf_wait() argument
524 if (!segbuf->sb_nbio) in nilfs_segbuf_wait()
528 wait_for_completion(&segbuf->sb_bio_event); in nilfs_segbuf_wait()
529 } while (--segbuf->sb_nbio > 0); in nilfs_segbuf_wait()
531 if (unlikely(atomic_read(&segbuf->sb_err) > 0)) { in nilfs_segbuf_wait()