Lines Matching refs:bh

50 void init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)  in init_buffer()  argument
52 bh->b_end_io = handler; in init_buffer()
53 bh->b_private = private; in init_buffer()
57 inline void touch_buffer(struct buffer_head *bh) in touch_buffer() argument
59 trace_block_touch_buffer(bh); in touch_buffer()
60 mark_page_accessed(bh->b_page); in touch_buffer()
64 void __lock_buffer(struct buffer_head *bh) in __lock_buffer() argument
66 wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE); in __lock_buffer()
70 void unlock_buffer(struct buffer_head *bh) in unlock_buffer() argument
72 clear_bit_unlock(BH_Lock, &bh->b_state); in unlock_buffer()
74 wake_up_bit(&bh->b_state, BH_Lock); in unlock_buffer()
86 struct buffer_head *head, *bh; in buffer_check_dirty_writeback() local
99 bh = head; in buffer_check_dirty_writeback()
101 if (buffer_locked(bh)) in buffer_check_dirty_writeback()
104 if (buffer_dirty(bh)) in buffer_check_dirty_writeback()
107 bh = bh->b_this_page; in buffer_check_dirty_writeback()
108 } while (bh != head); in buffer_check_dirty_writeback()
117 void __wait_on_buffer(struct buffer_head * bh) in __wait_on_buffer() argument
119 wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE); in __wait_on_buffer()
131 static void buffer_io_error(struct buffer_head *bh, char *msg) in buffer_io_error() argument
135 if (!test_bit(BH_Quiet, &bh->b_state)) in buffer_io_error()
138 bdevname(bh->b_bdev, b), in buffer_io_error()
139 (unsigned long long)bh->b_blocknr, msg); in buffer_io_error()
150 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate) in __end_buffer_read_notouch() argument
153 set_buffer_uptodate(bh); in __end_buffer_read_notouch()
156 clear_buffer_uptodate(bh); in __end_buffer_read_notouch()
158 unlock_buffer(bh); in __end_buffer_read_notouch()
165 void end_buffer_read_sync(struct buffer_head *bh, int uptodate) in end_buffer_read_sync() argument
167 __end_buffer_read_notouch(bh, uptodate); in end_buffer_read_sync()
168 put_bh(bh); in end_buffer_read_sync()
172 void end_buffer_write_sync(struct buffer_head *bh, int uptodate) in end_buffer_write_sync() argument
175 set_buffer_uptodate(bh); in end_buffer_write_sync()
177 buffer_io_error(bh, ", lost sync page write"); in end_buffer_write_sync()
178 set_buffer_write_io_error(bh); in end_buffer_write_sync()
179 clear_buffer_uptodate(bh); in end_buffer_write_sync()
181 unlock_buffer(bh); in end_buffer_write_sync()
182 put_bh(bh); in end_buffer_write_sync()
204 struct buffer_head *bh; in __find_get_block_slow() local
218 bh = head; in __find_get_block_slow()
220 if (!buffer_mapped(bh)) in __find_get_block_slow()
222 else if (bh->b_blocknr == block) { in __find_get_block_slow()
223 ret = bh; in __find_get_block_slow()
224 get_bh(bh); in __find_get_block_slow()
227 bh = bh->b_this_page; in __find_get_block_slow()
228 } while (bh != head); in __find_get_block_slow()
241 (unsigned long long)bh->b_blocknr); in __find_get_block_slow()
243 bh->b_state, bh->b_size); in __find_get_block_slow()
279 static void end_buffer_async_read(struct buffer_head *bh, int uptodate) in end_buffer_async_read() argument
287 BUG_ON(!buffer_async_read(bh)); in end_buffer_async_read()
289 page = bh->b_page; in end_buffer_async_read()
291 set_buffer_uptodate(bh); in end_buffer_async_read()
293 clear_buffer_uptodate(bh); in end_buffer_async_read()
294 buffer_io_error(bh, ", async page read"); in end_buffer_async_read()
306 clear_buffer_async_read(bh); in end_buffer_async_read()
307 unlock_buffer(bh); in end_buffer_async_read()
308 tmp = bh; in end_buffer_async_read()
317 } while (tmp != bh); in end_buffer_async_read()
340 void end_buffer_async_write(struct buffer_head *bh, int uptodate) in end_buffer_async_write() argument
347 BUG_ON(!buffer_async_write(bh)); in end_buffer_async_write()
349 page = bh->b_page; in end_buffer_async_write()
351 set_buffer_uptodate(bh); in end_buffer_async_write()
353 buffer_io_error(bh, ", lost async page write"); in end_buffer_async_write()
355 set_buffer_write_io_error(bh); in end_buffer_async_write()
356 clear_buffer_uptodate(bh); in end_buffer_async_write()
364 clear_buffer_async_write(bh); in end_buffer_async_write()
365 unlock_buffer(bh); in end_buffer_async_write()
366 tmp = bh->b_this_page; in end_buffer_async_write()
367 while (tmp != bh) { in end_buffer_async_write()
407 static void mark_buffer_async_read(struct buffer_head *bh) in mark_buffer_async_read() argument
409 bh->b_end_io = end_buffer_async_read; in mark_buffer_async_read()
410 set_buffer_async_read(bh); in mark_buffer_async_read()
413 static void mark_buffer_async_write_endio(struct buffer_head *bh, in mark_buffer_async_write_endio() argument
416 bh->b_end_io = handler; in mark_buffer_async_write_endio()
417 set_buffer_async_write(bh); in mark_buffer_async_write_endio()
420 void mark_buffer_async_write(struct buffer_head *bh) in mark_buffer_async_write() argument
422 mark_buffer_async_write_endio(bh, end_buffer_async_write); in mark_buffer_async_write()
479 static void __remove_assoc_queue(struct buffer_head *bh) in __remove_assoc_queue() argument
481 list_del_init(&bh->b_assoc_buffers); in __remove_assoc_queue()
482 WARN_ON(!bh->b_assoc_map); in __remove_assoc_queue()
483 if (buffer_write_io_error(bh)) in __remove_assoc_queue()
484 set_bit(AS_EIO, &bh->b_assoc_map->flags); in __remove_assoc_queue()
485 bh->b_assoc_map = NULL; in __remove_assoc_queue()
505 struct buffer_head *bh; in osync_buffers_list() local
512 bh = BH_ENTRY(p); in osync_buffers_list()
513 if (buffer_locked(bh)) { in osync_buffers_list()
514 get_bh(bh); in osync_buffers_list()
516 wait_on_buffer(bh); in osync_buffers_list()
517 if (!buffer_uptodate(bh)) in osync_buffers_list()
519 brelse(bh); in osync_buffers_list()
591 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize); in write_boundary_block() local
592 if (bh) { in write_boundary_block()
593 if (buffer_dirty(bh)) in write_boundary_block()
594 ll_rw_block(WRITE, 1, &bh); in write_boundary_block()
595 put_bh(bh); in write_boundary_block()
599 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode) in mark_buffer_dirty_inode() argument
602 struct address_space *buffer_mapping = bh->b_page->mapping; in mark_buffer_dirty_inode()
604 mark_buffer_dirty(bh); in mark_buffer_dirty_inode()
610 if (!bh->b_assoc_map) { in mark_buffer_dirty_inode()
612 list_move_tail(&bh->b_assoc_buffers, in mark_buffer_dirty_inode()
614 bh->b_assoc_map = mapping; in mark_buffer_dirty_inode()
679 struct buffer_head *bh = head; in __set_page_dirty_buffers() local
682 set_buffer_dirty(bh); in __set_page_dirty_buffers()
683 bh = bh->b_this_page; in __set_page_dirty_buffers()
684 } while (bh != head); in __set_page_dirty_buffers()
716 struct buffer_head *bh; in fsync_buffers_list() local
727 bh = BH_ENTRY(list->next); in fsync_buffers_list()
728 mapping = bh->b_assoc_map; in fsync_buffers_list()
729 __remove_assoc_queue(bh); in fsync_buffers_list()
733 if (buffer_dirty(bh) || buffer_locked(bh)) { in fsync_buffers_list()
734 list_add(&bh->b_assoc_buffers, &tmp); in fsync_buffers_list()
735 bh->b_assoc_map = mapping; in fsync_buffers_list()
736 if (buffer_dirty(bh)) { in fsync_buffers_list()
737 get_bh(bh); in fsync_buffers_list()
746 write_dirty_buffer(bh, WRITE_SYNC); in fsync_buffers_list()
754 brelse(bh); in fsync_buffers_list()
765 bh = BH_ENTRY(tmp.prev); in fsync_buffers_list()
766 get_bh(bh); in fsync_buffers_list()
767 mapping = bh->b_assoc_map; in fsync_buffers_list()
768 __remove_assoc_queue(bh); in fsync_buffers_list()
772 if (buffer_dirty(bh)) { in fsync_buffers_list()
773 list_add(&bh->b_assoc_buffers, in fsync_buffers_list()
775 bh->b_assoc_map = mapping; in fsync_buffers_list()
778 wait_on_buffer(bh); in fsync_buffers_list()
779 if (!buffer_uptodate(bh)) in fsync_buffers_list()
781 brelse(bh); in fsync_buffers_list()
834 struct buffer_head *bh = BH_ENTRY(list->next); in remove_inode_buffers() local
835 if (buffer_dirty(bh)) { in remove_inode_buffers()
839 __remove_assoc_queue(bh); in remove_inode_buffers()
858 struct buffer_head *bh, *head; in alloc_page_buffers() local
865 bh = alloc_buffer_head(GFP_NOFS); in alloc_page_buffers()
866 if (!bh) in alloc_page_buffers()
869 bh->b_this_page = head; in alloc_page_buffers()
870 bh->b_blocknr = -1; in alloc_page_buffers()
871 head = bh; in alloc_page_buffers()
873 bh->b_size = size; in alloc_page_buffers()
876 set_bh_page(bh, page, offset); in alloc_page_buffers()
885 bh = head; in alloc_page_buffers()
887 free_buffer_head(bh); in alloc_page_buffers()
914 struct buffer_head *bh, *tail; in link_dev_buffers() local
916 bh = head; in link_dev_buffers()
918 tail = bh; in link_dev_buffers()
919 bh = bh->b_this_page; in link_dev_buffers()
920 } while (bh); in link_dev_buffers()
945 struct buffer_head *bh = head; in init_page_buffers() local
950 if (!buffer_mapped(bh)) { in init_page_buffers()
951 init_buffer(bh, NULL, NULL); in init_page_buffers()
952 bh->b_bdev = bdev; in init_page_buffers()
953 bh->b_blocknr = block; in init_page_buffers()
955 set_buffer_uptodate(bh); in init_page_buffers()
957 set_buffer_mapped(bh); in init_page_buffers()
960 bh = bh->b_this_page; in init_page_buffers()
961 } while (bh != head); in init_page_buffers()
980 struct buffer_head *bh; in grow_dev_page() local
1002 bh = page_buffers(page); in grow_dev_page()
1003 if (bh->b_size == size) { in grow_dev_page()
1016 bh = alloc_page_buffers(page, size, 0); in grow_dev_page()
1017 if (!bh) in grow_dev_page()
1026 link_dev_buffers(page, bh); in grow_dev_page()
1090 struct buffer_head *bh; in __getblk_slow() local
1093 bh = __find_get_block(bdev, block, size); in __getblk_slow()
1094 if (bh) in __getblk_slow()
1095 return bh; in __getblk_slow()
1141 void mark_buffer_dirty(struct buffer_head *bh) in mark_buffer_dirty() argument
1143 WARN_ON_ONCE(!buffer_uptodate(bh)); in mark_buffer_dirty()
1145 trace_block_dirty_buffer(bh); in mark_buffer_dirty()
1153 if (buffer_dirty(bh)) { in mark_buffer_dirty()
1155 if (buffer_dirty(bh)) in mark_buffer_dirty()
1159 if (!test_set_buffer_dirty(bh)) { in mark_buffer_dirty()
1160 struct page *page = bh->b_page; in mark_buffer_dirty()
1191 void __bforget(struct buffer_head *bh) in __bforget() argument
1193 clear_buffer_dirty(bh); in __bforget()
1194 if (bh->b_assoc_map) { in __bforget()
1195 struct address_space *buffer_mapping = bh->b_page->mapping; in __bforget()
1198 list_del_init(&bh->b_assoc_buffers); in __bforget()
1199 bh->b_assoc_map = NULL; in __bforget()
1202 __brelse(bh); in __bforget()
1206 static struct buffer_head *__bread_slow(struct buffer_head *bh) in __bread_slow() argument
1208 lock_buffer(bh); in __bread_slow()
1209 if (buffer_uptodate(bh)) { in __bread_slow()
1210 unlock_buffer(bh); in __bread_slow()
1211 return bh; in __bread_slow()
1213 get_bh(bh); in __bread_slow()
1214 bh->b_end_io = end_buffer_read_sync; in __bread_slow()
1215 submit_bh(READ, bh); in __bread_slow()
1216 wait_on_buffer(bh); in __bread_slow()
1217 if (buffer_uptodate(bh)) in __bread_slow()
1218 return bh; in __bread_slow()
1220 brelse(bh); in __bread_slow()
1264 static void bh_lru_install(struct buffer_head *bh) in bh_lru_install() argument
1270 if (__this_cpu_read(bh_lrus.bhs[0]) != bh) { in bh_lru_install()
1275 get_bh(bh); in bh_lru_install()
1276 bhs[out++] = bh; in bh_lru_install()
1281 if (bh2 == bh) { in bh_lru_install()
1314 struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]); in lookup_bh_lru() local
1316 if (bh && bh->b_blocknr == block && bh->b_bdev == bdev && in lookup_bh_lru()
1317 bh->b_size == size) { in lookup_bh_lru()
1324 __this_cpu_write(bh_lrus.bhs[0], bh); in lookup_bh_lru()
1326 get_bh(bh); in lookup_bh_lru()
1327 ret = bh; in lookup_bh_lru()
1343 struct buffer_head *bh = lookup_bh_lru(bdev, block, size); in __find_get_block() local
1345 if (bh == NULL) { in __find_get_block()
1347 bh = __find_get_block_slow(bdev, block); in __find_get_block()
1348 if (bh) in __find_get_block()
1349 bh_lru_install(bh); in __find_get_block()
1351 touch_buffer(bh); in __find_get_block()
1353 return bh; in __find_get_block()
1369 struct buffer_head *bh = __find_get_block(bdev, block, size); in __getblk_gfp() local
1372 if (bh == NULL) in __getblk_gfp()
1373 bh = __getblk_slow(bdev, block, size, gfp); in __getblk_gfp()
1374 return bh; in __getblk_gfp()
1383 struct buffer_head *bh = __getblk(bdev, block, size); in __breadahead() local
1384 if (likely(bh)) { in __breadahead()
1385 ll_rw_block(READA, 1, &bh); in __breadahead()
1386 brelse(bh); in __breadahead()
1407 struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp); in __bread_gfp() local
1409 if (likely(bh) && !buffer_uptodate(bh)) in __bread_gfp()
1410 bh = __bread_slow(bh); in __bread_gfp()
1411 return bh; in __bread_gfp()
1451 void set_bh_page(struct buffer_head *bh, in set_bh_page() argument
1454 bh->b_page = page; in set_bh_page()
1460 bh->b_data = (char *)(0 + offset); in set_bh_page()
1462 bh->b_data = page_address(page) + offset; in set_bh_page()
1475 static void discard_buffer(struct buffer_head * bh) in discard_buffer() argument
1479 lock_buffer(bh); in discard_buffer()
1480 clear_buffer_dirty(bh); in discard_buffer()
1481 bh->b_bdev = NULL; in discard_buffer()
1482 b_state = bh->b_state; in discard_buffer()
1484 b_state_old = cmpxchg(&bh->b_state, b_state, in discard_buffer()
1490 unlock_buffer(bh); in discard_buffer()
1512 struct buffer_head *head, *bh, *next; in block_invalidatepage() local
1526 bh = head; in block_invalidatepage()
1528 unsigned int next_off = curr_off + bh->b_size; in block_invalidatepage()
1529 next = bh->b_this_page; in block_invalidatepage()
1541 discard_buffer(bh); in block_invalidatepage()
1543 bh = next; in block_invalidatepage()
1544 } while (bh != head); in block_invalidatepage()
1567 struct buffer_head *bh, *head, *tail; in create_empty_buffers() local
1570 bh = head; in create_empty_buffers()
1572 bh->b_state |= b_state; in create_empty_buffers()
1573 tail = bh; in create_empty_buffers()
1574 bh = bh->b_this_page; in create_empty_buffers()
1575 } while (bh); in create_empty_buffers()
1580 bh = head; in create_empty_buffers()
1583 set_buffer_dirty(bh); in create_empty_buffers()
1585 set_buffer_uptodate(bh); in create_empty_buffers()
1586 bh = bh->b_this_page; in create_empty_buffers()
1587 } while (bh != head); in create_empty_buffers()
1684 struct buffer_head *bh, *head; in __block_write_full_page() local
1703 bh = head; in __block_write_full_page()
1704 blocksize = bh->b_size; in __block_write_full_page()
1724 clear_buffer_dirty(bh); in __block_write_full_page()
1725 set_buffer_uptodate(bh); in __block_write_full_page()
1726 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) && in __block_write_full_page()
1727 buffer_dirty(bh)) { in __block_write_full_page()
1728 WARN_ON(bh->b_size != blocksize); in __block_write_full_page()
1729 err = get_block(inode, block, bh, 1); in __block_write_full_page()
1732 clear_buffer_delay(bh); in __block_write_full_page()
1733 if (buffer_new(bh)) { in __block_write_full_page()
1735 clear_buffer_new(bh); in __block_write_full_page()
1736 unmap_underlying_metadata(bh->b_bdev, in __block_write_full_page()
1737 bh->b_blocknr); in __block_write_full_page()
1740 bh = bh->b_this_page; in __block_write_full_page()
1742 } while (bh != head); in __block_write_full_page()
1745 if (!buffer_mapped(bh)) in __block_write_full_page()
1755 lock_buffer(bh); in __block_write_full_page()
1756 } else if (!trylock_buffer(bh)) { in __block_write_full_page()
1760 if (test_clear_buffer_dirty(bh)) { in __block_write_full_page()
1761 mark_buffer_async_write_endio(bh, handler); in __block_write_full_page()
1763 unlock_buffer(bh); in __block_write_full_page()
1765 } while ((bh = bh->b_this_page) != head); in __block_write_full_page()
1775 struct buffer_head *next = bh->b_this_page; in __block_write_full_page()
1776 if (buffer_async_write(bh)) { in __block_write_full_page()
1777 submit_bh(write_op, bh); in __block_write_full_page()
1780 bh = next; in __block_write_full_page()
1781 } while (bh != head); in __block_write_full_page()
1808 bh = head; in __block_write_full_page()
1811 if (buffer_mapped(bh) && buffer_dirty(bh) && in __block_write_full_page()
1812 !buffer_delay(bh)) { in __block_write_full_page()
1813 lock_buffer(bh); in __block_write_full_page()
1814 mark_buffer_async_write_endio(bh, handler); in __block_write_full_page()
1820 clear_buffer_dirty(bh); in __block_write_full_page()
1822 } while ((bh = bh->b_this_page) != head); in __block_write_full_page()
1828 struct buffer_head *next = bh->b_this_page; in __block_write_full_page()
1829 if (buffer_async_write(bh)) { in __block_write_full_page()
1830 clear_buffer_dirty(bh); in __block_write_full_page()
1831 submit_bh(write_op, bh); in __block_write_full_page()
1834 bh = next; in __block_write_full_page()
1835 } while (bh != head); in __block_write_full_page()
1848 struct buffer_head *head, *bh; in page_zero_new_buffers() local
1854 bh = head = page_buffers(page); in page_zero_new_buffers()
1857 block_end = block_start + bh->b_size; in page_zero_new_buffers()
1859 if (buffer_new(bh)) { in page_zero_new_buffers()
1868 set_buffer_uptodate(bh); in page_zero_new_buffers()
1871 clear_buffer_new(bh); in page_zero_new_buffers()
1872 mark_buffer_dirty(bh); in page_zero_new_buffers()
1877 bh = bh->b_this_page; in page_zero_new_buffers()
1878 } while (bh != head); in page_zero_new_buffers()
1892 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait; in __block_write_begin() local
1905 for(bh = head, block_start = 0; bh != head || !block_start; in __block_write_begin()
1906 block++, block_start=block_end, bh = bh->b_this_page) { in __block_write_begin()
1910 if (!buffer_uptodate(bh)) in __block_write_begin()
1911 set_buffer_uptodate(bh); in __block_write_begin()
1915 if (buffer_new(bh)) in __block_write_begin()
1916 clear_buffer_new(bh); in __block_write_begin()
1917 if (!buffer_mapped(bh)) { in __block_write_begin()
1918 WARN_ON(bh->b_size != blocksize); in __block_write_begin()
1919 err = get_block(inode, block, bh, 1); in __block_write_begin()
1922 if (buffer_new(bh)) { in __block_write_begin()
1923 unmap_underlying_metadata(bh->b_bdev, in __block_write_begin()
1924 bh->b_blocknr); in __block_write_begin()
1926 clear_buffer_new(bh); in __block_write_begin()
1927 set_buffer_uptodate(bh); in __block_write_begin()
1928 mark_buffer_dirty(bh); in __block_write_begin()
1939 if (!buffer_uptodate(bh)) in __block_write_begin()
1940 set_buffer_uptodate(bh); in __block_write_begin()
1943 if (!buffer_uptodate(bh) && !buffer_delay(bh) && in __block_write_begin()
1944 !buffer_unwritten(bh) && in __block_write_begin()
1946 ll_rw_block(READ, 1, &bh); in __block_write_begin()
1947 *wait_bh++=bh; in __block_write_begin()
1970 struct buffer_head *bh, *head; in __block_commit_write() local
1972 bh = head = page_buffers(page); in __block_commit_write()
1973 blocksize = bh->b_size; in __block_commit_write()
1979 if (!buffer_uptodate(bh)) in __block_commit_write()
1982 set_buffer_uptodate(bh); in __block_commit_write()
1983 mark_buffer_dirty(bh); in __block_commit_write()
1985 clear_buffer_new(bh); in __block_commit_write()
1988 bh = bh->b_this_page; in __block_commit_write()
1989 } while (bh != head); in __block_commit_write()
2119 struct buffer_head *bh, *head; in block_is_partially_uptodate() local
2132 bh = head; in block_is_partially_uptodate()
2137 if (!buffer_uptodate(bh)) { in block_is_partially_uptodate()
2145 bh = bh->b_this_page; in block_is_partially_uptodate()
2146 } while (bh != head); in block_is_partially_uptodate()
2163 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE]; in block_read_full_page() local
2174 bh = head; in block_read_full_page()
2179 if (buffer_uptodate(bh)) in block_read_full_page()
2182 if (!buffer_mapped(bh)) { in block_read_full_page()
2187 WARN_ON(bh->b_size != blocksize); in block_read_full_page()
2188 err = get_block(inode, iblock, bh, 0); in block_read_full_page()
2192 if (!buffer_mapped(bh)) { in block_read_full_page()
2195 set_buffer_uptodate(bh); in block_read_full_page()
2202 if (buffer_uptodate(bh)) in block_read_full_page()
2205 arr[nr++] = bh; in block_read_full_page()
2206 } while (i++, iblock++, (bh = bh->b_this_page) != head); in block_read_full_page()
2224 bh = arr[i]; in block_read_full_page()
2225 lock_buffer(bh); in block_read_full_page()
2226 mark_buffer_async_read(bh); in block_read_full_page()
2235 bh = arr[i]; in block_read_full_page()
2236 if (buffer_uptodate(bh)) in block_read_full_page()
2237 end_buffer_async_read(bh, 1); in block_read_full_page()
2239 submit_bh(READ, bh); in block_read_full_page()
2466 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate) in end_buffer_read_nobh() argument
2468 __end_buffer_read_notouch(bh, uptodate); in end_buffer_read_nobh()
2478 struct buffer_head *bh; in attach_nobh_buffers() local
2483 bh = head; in attach_nobh_buffers()
2486 set_buffer_dirty(bh); in attach_nobh_buffers()
2487 if (!bh->b_this_page) in attach_nobh_buffers()
2488 bh->b_this_page = head; in attach_nobh_buffers()
2489 bh = bh->b_this_page; in attach_nobh_buffers()
2490 } while (bh != head); in attach_nobh_buffers()
2508 struct buffer_head *head, *bh; in nobh_write_begin() local
2561 for (block_start = 0, block_in_page = 0, bh = head; in nobh_write_begin()
2563 block_in_page++, block_start += blocksize, bh = bh->b_this_page) { in nobh_write_begin()
2567 bh->b_state = 0; in nobh_write_begin()
2572 bh, create); in nobh_write_begin()
2575 if (!buffer_mapped(bh)) in nobh_write_begin()
2577 if (buffer_new(bh)) in nobh_write_begin()
2578 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr); in nobh_write_begin()
2580 set_buffer_uptodate(bh); in nobh_write_begin()
2583 if (buffer_new(bh) || !buffer_mapped(bh)) { in nobh_write_begin()
2588 if (buffer_uptodate(bh)) in nobh_write_begin()
2591 lock_buffer(bh); in nobh_write_begin()
2592 bh->b_end_io = end_buffer_read_nobh; in nobh_write_begin()
2593 submit_bh(READ, bh); in nobh_write_begin()
2604 for (bh = head; bh; bh = bh->b_this_page) { in nobh_write_begin()
2605 wait_on_buffer(bh); in nobh_write_begin()
2606 if (!buffer_uptodate(bh)) in nobh_write_begin()
2647 struct buffer_head *bh; in nobh_write_end() local
2667 bh = head; in nobh_write_end()
2669 free_buffer_head(bh); in nobh_write_end()
2816 struct buffer_head *bh; in block_truncate_page() local
2838 bh = page_buffers(page); in block_truncate_page()
2841 bh = bh->b_this_page; in block_truncate_page()
2847 if (!buffer_mapped(bh)) { in block_truncate_page()
2848 WARN_ON(bh->b_size != blocksize); in block_truncate_page()
2849 err = get_block(inode, iblock, bh, 0); in block_truncate_page()
2853 if (!buffer_mapped(bh)) in block_truncate_page()
2859 set_buffer_uptodate(bh); in block_truncate_page()
2861 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) { in block_truncate_page()
2863 ll_rw_block(READ, 1, &bh); in block_truncate_page()
2864 wait_on_buffer(bh); in block_truncate_page()
2866 if (!buffer_uptodate(bh)) in block_truncate_page()
2871 mark_buffer_dirty(bh); in block_truncate_page()
2939 struct buffer_head *bh = bio->bi_private; in end_bio_bh_io_sync() local
2946 set_bit(BH_Quiet, &bh->b_state); in end_bio_bh_io_sync()
2948 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags)); in end_bio_bh_io_sync()
3000 int _submit_bh(int rw, struct buffer_head *bh, unsigned long bio_flags) in _submit_bh() argument
3005 BUG_ON(!buffer_locked(bh)); in _submit_bh()
3006 BUG_ON(!buffer_mapped(bh)); in _submit_bh()
3007 BUG_ON(!bh->b_end_io); in _submit_bh()
3008 BUG_ON(buffer_delay(bh)); in _submit_bh()
3009 BUG_ON(buffer_unwritten(bh)); in _submit_bh()
3014 if (test_set_buffer_req(bh) && (rw & WRITE)) in _submit_bh()
3015 clear_buffer_write_io_error(bh); in _submit_bh()
3023 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); in _submit_bh()
3024 bio->bi_bdev = bh->b_bdev; in _submit_bh()
3025 bio->bi_io_vec[0].bv_page = bh->b_page; in _submit_bh()
3026 bio->bi_io_vec[0].bv_len = bh->b_size; in _submit_bh()
3027 bio->bi_io_vec[0].bv_offset = bh_offset(bh); in _submit_bh()
3030 bio->bi_iter.bi_size = bh->b_size; in _submit_bh()
3033 bio->bi_private = bh; in _submit_bh()
3039 if (buffer_meta(bh)) in _submit_bh()
3041 if (buffer_prio(bh)) in _submit_bh()
3055 int submit_bh(int rw, struct buffer_head *bh) in submit_bh() argument
3057 return _submit_bh(rw, bh, 0); in submit_bh()
3091 struct buffer_head *bh = bhs[i]; in ll_rw_block() local
3093 if (!trylock_buffer(bh)) in ll_rw_block()
3096 if (test_clear_buffer_dirty(bh)) { in ll_rw_block()
3097 bh->b_end_io = end_buffer_write_sync; in ll_rw_block()
3098 get_bh(bh); in ll_rw_block()
3099 submit_bh(WRITE, bh); in ll_rw_block()
3103 if (!buffer_uptodate(bh)) { in ll_rw_block()
3104 bh->b_end_io = end_buffer_read_sync; in ll_rw_block()
3105 get_bh(bh); in ll_rw_block()
3106 submit_bh(rw, bh); in ll_rw_block()
3110 unlock_buffer(bh); in ll_rw_block()
3115 void write_dirty_buffer(struct buffer_head *bh, int rw) in write_dirty_buffer() argument
3117 lock_buffer(bh); in write_dirty_buffer()
3118 if (!test_clear_buffer_dirty(bh)) { in write_dirty_buffer()
3119 unlock_buffer(bh); in write_dirty_buffer()
3122 bh->b_end_io = end_buffer_write_sync; in write_dirty_buffer()
3123 get_bh(bh); in write_dirty_buffer()
3124 submit_bh(rw, bh); in write_dirty_buffer()
3133 int __sync_dirty_buffer(struct buffer_head *bh, int rw) in __sync_dirty_buffer() argument
3137 WARN_ON(atomic_read(&bh->b_count) < 1); in __sync_dirty_buffer()
3138 lock_buffer(bh); in __sync_dirty_buffer()
3139 if (test_clear_buffer_dirty(bh)) { in __sync_dirty_buffer()
3140 get_bh(bh); in __sync_dirty_buffer()
3141 bh->b_end_io = end_buffer_write_sync; in __sync_dirty_buffer()
3142 ret = submit_bh(rw, bh); in __sync_dirty_buffer()
3143 wait_on_buffer(bh); in __sync_dirty_buffer()
3144 if (!ret && !buffer_uptodate(bh)) in __sync_dirty_buffer()
3147 unlock_buffer(bh); in __sync_dirty_buffer()
3153 int sync_dirty_buffer(struct buffer_head *bh) in sync_dirty_buffer() argument
3155 return __sync_dirty_buffer(bh, WRITE_SYNC); in sync_dirty_buffer()
3179 static inline int buffer_busy(struct buffer_head *bh) in buffer_busy() argument
3181 return atomic_read(&bh->b_count) | in buffer_busy()
3182 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock))); in buffer_busy()
3189 struct buffer_head *bh; in drop_buffers() local
3191 bh = head; in drop_buffers()
3193 if (buffer_write_io_error(bh) && page->mapping) in drop_buffers()
3195 if (buffer_busy(bh)) in drop_buffers()
3197 bh = bh->b_this_page; in drop_buffers()
3198 } while (bh != head); in drop_buffers()
3201 struct buffer_head *next = bh->b_this_page; in drop_buffers()
3203 if (bh->b_assoc_map) in drop_buffers()
3204 __remove_assoc_queue(bh); in drop_buffers()
3205 bh = next; in drop_buffers()
3206 } while (bh != head); in drop_buffers()
3251 struct buffer_head *bh = buffers_to_free; in try_to_free_buffers() local
3254 struct buffer_head *next = bh->b_this_page; in try_to_free_buffers()
3255 free_buffer_head(bh); in try_to_free_buffers()
3256 bh = next; in try_to_free_buffers()
3257 } while (bh != buffers_to_free); in try_to_free_buffers()
3337 void free_buffer_head(struct buffer_head *bh) in free_buffer_head() argument
3339 BUG_ON(!list_empty(&bh->b_assoc_buffers)); in free_buffer_head()
3340 kmem_cache_free(bh_cachep, bh); in free_buffer_head()
3376 int bh_uptodate_or_lock(struct buffer_head *bh) in bh_uptodate_or_lock() argument
3378 if (!buffer_uptodate(bh)) { in bh_uptodate_or_lock()
3379 lock_buffer(bh); in bh_uptodate_or_lock()
3380 if (!buffer_uptodate(bh)) in bh_uptodate_or_lock()
3382 unlock_buffer(bh); in bh_uptodate_or_lock()
3394 int bh_submit_read(struct buffer_head *bh) in bh_submit_read() argument
3396 BUG_ON(!buffer_locked(bh)); in bh_submit_read()
3398 if (buffer_uptodate(bh)) { in bh_submit_read()
3399 unlock_buffer(bh); in bh_submit_read()
3403 get_bh(bh); in bh_submit_read()
3404 bh->b_end_io = end_buffer_read_sync; in bh_submit_read()
3405 submit_bh(READ, bh); in bh_submit_read()
3406 wait_on_buffer(bh); in bh_submit_read()
3407 if (buffer_uptodate(bh)) in bh_submit_read()