Lines Matching refs:bh

45 	struct buffer_head	*bh, *head;  in xfs_count_page_state()  local
49 bh = head = page_buffers(page); in xfs_count_page_state()
51 if (buffer_unwritten(bh)) in xfs_count_page_state()
53 else if (buffer_delay(bh)) in xfs_count_page_state()
55 } while ((bh = bh->b_this_page) != head); in xfs_count_page_state()
81 struct buffer_head *bh, *next; in xfs_destroy_ioend() local
83 for (bh = ioend->io_buffer_head; bh; bh = next) { in xfs_destroy_ioend()
84 next = bh->b_private; in xfs_destroy_ioend()
85 bh->b_end_io(bh, !ioend->io_error); in xfs_destroy_ioend()
384 struct buffer_head *bh) in xfs_alloc_ioend_bio() argument
386 int nvecs = bio_get_nr_vecs(bh->b_bdev); in xfs_alloc_ioend_bio()
390 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); in xfs_alloc_ioend_bio()
391 bio->bi_bdev = bh->b_bdev; in xfs_alloc_ioend_bio()
397 struct buffer_head *bh) in xfs_start_buffer_writeback() argument
399 ASSERT(buffer_mapped(bh)); in xfs_start_buffer_writeback()
400 ASSERT(buffer_locked(bh)); in xfs_start_buffer_writeback()
401 ASSERT(!buffer_delay(bh)); in xfs_start_buffer_writeback()
402 ASSERT(!buffer_unwritten(bh)); in xfs_start_buffer_writeback()
404 mark_buffer_async_write(bh); in xfs_start_buffer_writeback()
405 set_buffer_uptodate(bh); in xfs_start_buffer_writeback()
406 clear_buffer_dirty(bh); in xfs_start_buffer_writeback()
438 static inline int xfs_bio_add_buffer(struct bio *bio, struct buffer_head *bh) in xfs_bio_add_buffer() argument
440 return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh)); in xfs_bio_add_buffer()
473 struct buffer_head *bh; in xfs_submit_ioend() local
480 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) in xfs_submit_ioend()
481 xfs_start_buffer_writeback(bh); in xfs_submit_ioend()
502 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) { in xfs_submit_ioend()
506 bio = xfs_alloc_ioend_bio(bh); in xfs_submit_ioend()
507 } else if (bh->b_blocknr != lastblock + 1) { in xfs_submit_ioend()
512 if (xfs_bio_add_buffer(bio, bh) != bh->b_size) { in xfs_submit_ioend()
517 lastblock = bh->b_blocknr; in xfs_submit_ioend()
535 struct buffer_head *bh, *next_bh; in xfs_cancel_ioend() local
539 bh = ioend->io_buffer_head; in xfs_cancel_ioend()
541 next_bh = bh->b_private; in xfs_cancel_ioend()
542 clear_buffer_async_write(bh); in xfs_cancel_ioend()
549 set_buffer_unwritten(bh); in xfs_cancel_ioend()
550 unlock_buffer(bh); in xfs_cancel_ioend()
551 } while ((bh = next_bh) != NULL); in xfs_cancel_ioend()
566 struct buffer_head *bh, in xfs_add_to_ioend() argument
579 ioend->io_buffer_head = bh; in xfs_add_to_ioend()
580 ioend->io_buffer_tail = bh; in xfs_add_to_ioend()
585 ioend->io_buffer_tail->b_private = bh; in xfs_add_to_ioend()
586 ioend->io_buffer_tail = bh; in xfs_add_to_ioend()
589 bh->b_private = NULL; in xfs_add_to_ioend()
590 ioend->io_size += bh->b_size; in xfs_add_to_ioend()
596 struct buffer_head *bh, in xfs_map_buffer() argument
613 bh->b_blocknr = bn; in xfs_map_buffer()
614 set_buffer_mapped(bh); in xfs_map_buffer()
620 struct buffer_head *bh, in xfs_map_at_offset() argument
627 xfs_map_buffer(inode, bh, imap, offset); in xfs_map_at_offset()
628 set_buffer_mapped(bh); in xfs_map_at_offset()
629 clear_buffer_delay(bh); in xfs_map_at_offset()
630 clear_buffer_unwritten(bh); in xfs_map_at_offset()
645 struct buffer_head *bh; in xfs_check_page_type() local
655 bh = head = page_buffers(page); in xfs_check_page_type()
657 if (buffer_unwritten(bh)) { in xfs_check_page_type()
660 } else if (buffer_delay(bh)) { in xfs_check_page_type()
663 } else if (buffer_dirty(bh) && buffer_mapped(bh)) { in xfs_check_page_type()
671 } while ((bh = bh->b_this_page) != head); in xfs_check_page_type()
691 struct buffer_head *bh, *head; in xfs_convert_page() local
761 bh = head = page_buffers(page); in xfs_convert_page()
765 if (!buffer_uptodate(bh)) in xfs_convert_page()
767 if (!(PageUptodate(page) || buffer_uptodate(bh))) { in xfs_convert_page()
772 if (buffer_unwritten(bh) || buffer_delay(bh) || in xfs_convert_page()
773 buffer_mapped(bh)) { in xfs_convert_page()
774 if (buffer_unwritten(bh)) in xfs_convert_page()
776 else if (buffer_delay(bh)) in xfs_convert_page()
787 lock_buffer(bh); in xfs_convert_page()
789 xfs_map_at_offset(inode, bh, imap, offset); in xfs_convert_page()
790 xfs_add_to_ioend(inode, bh, offset, type, in xfs_convert_page()
799 } while (offset += len, (bh = bh->b_this_page) != head); in xfs_convert_page()
801 if (uptodate && bh == head) in xfs_convert_page()
886 struct buffer_head *bh, *head; in xfs_aops_discard_page() local
900 bh = head = page_buffers(page); in xfs_aops_discard_page()
905 if (!buffer_delay(bh)) in xfs_aops_discard_page()
921 } while ((bh = bh->b_this_page) != head); in xfs_aops_discard_page()
943 struct buffer_head *bh, *head; in xfs_vm_writepage() local
1049 bh = head = page_buffers(page); in xfs_vm_writepage()
1061 if (!buffer_uptodate(bh)) in xfs_vm_writepage()
1070 if (!buffer_mapped(bh) && buffer_uptodate(bh)) { in xfs_vm_writepage()
1075 if (buffer_unwritten(bh)) { in xfs_vm_writepage()
1080 } else if (buffer_delay(bh)) { in xfs_vm_writepage()
1085 } else if (buffer_uptodate(bh)) { in xfs_vm_writepage()
1092 ASSERT(buffer_mapped(bh)); in xfs_vm_writepage()
1122 lock_buffer(bh); in xfs_vm_writepage()
1124 xfs_map_at_offset(inode, bh, &imap, offset); in xfs_vm_writepage()
1125 xfs_add_to_ioend(inode, bh, offset, type, &ioend, in xfs_vm_writepage()
1133 } while (offset += len, ((bh = bh->b_this_page) != head)); in xfs_vm_writepage()
1135 if (uptodate && bh == head) in xfs_vm_writepage()
1677 struct buffer_head *bh, *head; in xfs_vm_write_failed() local
1696 for (bh = head; bh != head || !block_start; in xfs_vm_write_failed()
1697 bh = bh->b_this_page, block_start = block_end, in xfs_vm_write_failed()
1698 block_offset += bh->b_size) { in xfs_vm_write_failed()
1699 block_end = block_start + bh->b_size; in xfs_vm_write_failed()
1709 if (!buffer_delay(bh)) in xfs_vm_write_failed()
1712 if (!buffer_new(bh) && block_offset < i_size_read(inode)) in xfs_vm_write_failed()
1716 block_offset + bh->b_size); in xfs_vm_write_failed()
1722 clear_buffer_delay(bh); in xfs_vm_write_failed()
1723 clear_buffer_uptodate(bh); in xfs_vm_write_failed()
1724 clear_buffer_mapped(bh); in xfs_vm_write_failed()
1725 clear_buffer_new(bh); in xfs_vm_write_failed()
1726 clear_buffer_dirty(bh); in xfs_vm_write_failed()
1887 struct buffer_head *bh = head; in xfs_vm_set_page_dirty() local
1891 set_buffer_dirty(bh); in xfs_vm_set_page_dirty()
1892 bh = bh->b_this_page; in xfs_vm_set_page_dirty()
1894 } while (bh != head); in xfs_vm_set_page_dirty()