Lines Matching refs:bp

48 # define XB_SET_OWNER(bp)	((bp)->b_last_holder = current->pid)  argument
49 # define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1) argument
50 # define XB_GET_OWNER(bp) ((bp)->b_last_holder) argument
52 # define XB_SET_OWNER(bp) do { } while (0) argument
53 # define XB_CLEAR_OWNER(bp) do { } while (0) argument
54 # define XB_GET_OWNER(bp) do { } while (0) argument
63 struct xfs_buf *bp) in xfs_buf_is_vmapped() argument
72 return bp->b_addr && bp->b_page_count > 1; in xfs_buf_is_vmapped()
77 struct xfs_buf *bp) in xfs_buf_vmap_len() argument
79 return (bp->b_page_count * PAGE_SIZE) - bp->b_offset; in xfs_buf_vmap_len()
92 struct xfs_buf *bp) in xfs_buf_stale() argument
94 ASSERT(xfs_buf_islocked(bp)); in xfs_buf_stale()
96 bp->b_flags |= XBF_STALE; in xfs_buf_stale()
103 bp->b_flags &= ~_XBF_DELWRI_Q; in xfs_buf_stale()
105 spin_lock(&bp->b_lock); in xfs_buf_stale()
106 atomic_set(&bp->b_lru_ref, 0); in xfs_buf_stale()
107 if (!(bp->b_state & XFS_BSTATE_DISPOSE) && in xfs_buf_stale()
108 (list_lru_del(&bp->b_target->bt_lru, &bp->b_lru))) in xfs_buf_stale()
109 atomic_dec(&bp->b_hold); in xfs_buf_stale()
111 ASSERT(atomic_read(&bp->b_hold) >= 1); in xfs_buf_stale()
112 spin_unlock(&bp->b_lock); in xfs_buf_stale()
117 struct xfs_buf *bp, in xfs_buf_get_maps() argument
120 ASSERT(bp->b_maps == NULL); in xfs_buf_get_maps()
121 bp->b_map_count = map_count; in xfs_buf_get_maps()
124 bp->b_maps = &bp->__b_map; in xfs_buf_get_maps()
128 bp->b_maps = kmem_zalloc(map_count * sizeof(struct xfs_buf_map), in xfs_buf_get_maps()
130 if (!bp->b_maps) in xfs_buf_get_maps()
140 struct xfs_buf *bp) in xfs_buf_free_maps() argument
142 if (bp->b_maps != &bp->__b_map) { in xfs_buf_free_maps()
143 kmem_free(bp->b_maps); in xfs_buf_free_maps()
144 bp->b_maps = NULL; in xfs_buf_free_maps()
155 struct xfs_buf *bp; in _xfs_buf_alloc() local
159 bp = kmem_zone_zalloc(xfs_buf_zone, KM_NOFS); in _xfs_buf_alloc()
160 if (unlikely(!bp)) in _xfs_buf_alloc()
169 atomic_set(&bp->b_hold, 1); in _xfs_buf_alloc()
170 atomic_set(&bp->b_lru_ref, 1); in _xfs_buf_alloc()
171 init_completion(&bp->b_iowait); in _xfs_buf_alloc()
172 INIT_LIST_HEAD(&bp->b_lru); in _xfs_buf_alloc()
173 INIT_LIST_HEAD(&bp->b_list); in _xfs_buf_alloc()
174 RB_CLEAR_NODE(&bp->b_rbnode); in _xfs_buf_alloc()
175 sema_init(&bp->b_sema, 0); /* held, no waiters */ in _xfs_buf_alloc()
176 spin_lock_init(&bp->b_lock); in _xfs_buf_alloc()
177 XB_SET_OWNER(bp); in _xfs_buf_alloc()
178 bp->b_target = target; in _xfs_buf_alloc()
179 bp->b_flags = flags; in _xfs_buf_alloc()
186 error = xfs_buf_get_maps(bp, nmaps); in _xfs_buf_alloc()
188 kmem_zone_free(xfs_buf_zone, bp); in _xfs_buf_alloc()
192 bp->b_bn = map[0].bm_bn; in _xfs_buf_alloc()
193 bp->b_length = 0; in _xfs_buf_alloc()
195 bp->b_maps[i].bm_bn = map[i].bm_bn; in _xfs_buf_alloc()
196 bp->b_maps[i].bm_len = map[i].bm_len; in _xfs_buf_alloc()
197 bp->b_length += map[i].bm_len; in _xfs_buf_alloc()
199 bp->b_io_length = bp->b_length; in _xfs_buf_alloc()
201 atomic_set(&bp->b_pin_count, 0); in _xfs_buf_alloc()
202 init_waitqueue_head(&bp->b_waiters); in _xfs_buf_alloc()
205 trace_xfs_buf_init(bp, _RET_IP_); in _xfs_buf_alloc()
207 return bp; in _xfs_buf_alloc()
216 xfs_buf_t *bp, in _xfs_buf_get_pages() argument
220 if (bp->b_pages == NULL) { in _xfs_buf_get_pages()
221 bp->b_page_count = page_count; in _xfs_buf_get_pages()
223 bp->b_pages = bp->b_page_array; in _xfs_buf_get_pages()
225 bp->b_pages = kmem_alloc(sizeof(struct page *) * in _xfs_buf_get_pages()
227 if (bp->b_pages == NULL) in _xfs_buf_get_pages()
230 memset(bp->b_pages, 0, sizeof(struct page *) * page_count); in _xfs_buf_get_pages()
240 xfs_buf_t *bp) in _xfs_buf_free_pages() argument
242 if (bp->b_pages != bp->b_page_array) { in _xfs_buf_free_pages()
243 kmem_free(bp->b_pages); in _xfs_buf_free_pages()
244 bp->b_pages = NULL; in _xfs_buf_free_pages()
257 xfs_buf_t *bp) in xfs_buf_free() argument
259 trace_xfs_buf_free(bp, _RET_IP_); in xfs_buf_free()
261 ASSERT(list_empty(&bp->b_lru)); in xfs_buf_free()
263 if (bp->b_flags & _XBF_PAGES) { in xfs_buf_free()
266 if (xfs_buf_is_vmapped(bp)) in xfs_buf_free()
267 vm_unmap_ram(bp->b_addr - bp->b_offset, in xfs_buf_free()
268 bp->b_page_count); in xfs_buf_free()
270 for (i = 0; i < bp->b_page_count; i++) { in xfs_buf_free()
271 struct page *page = bp->b_pages[i]; in xfs_buf_free()
275 } else if (bp->b_flags & _XBF_KMEM) in xfs_buf_free()
276 kmem_free(bp->b_addr); in xfs_buf_free()
277 _xfs_buf_free_pages(bp); in xfs_buf_free()
278 xfs_buf_free_maps(bp); in xfs_buf_free()
279 kmem_zone_free(xfs_buf_zone, bp); in xfs_buf_free()
287 xfs_buf_t *bp, in xfs_buf_allocate_memory() argument
302 size = BBTOB(bp->b_length); in xfs_buf_allocate_memory()
304 bp->b_addr = kmem_alloc(size, KM_NOFS); in xfs_buf_allocate_memory()
305 if (!bp->b_addr) { in xfs_buf_allocate_memory()
310 if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) != in xfs_buf_allocate_memory()
311 ((unsigned long)bp->b_addr & PAGE_MASK)) { in xfs_buf_allocate_memory()
313 kmem_free(bp->b_addr); in xfs_buf_allocate_memory()
314 bp->b_addr = NULL; in xfs_buf_allocate_memory()
317 bp->b_offset = offset_in_page(bp->b_addr); in xfs_buf_allocate_memory()
318 bp->b_pages = bp->b_page_array; in xfs_buf_allocate_memory()
319 bp->b_pages[0] = virt_to_page(bp->b_addr); in xfs_buf_allocate_memory()
320 bp->b_page_count = 1; in xfs_buf_allocate_memory()
321 bp->b_flags |= _XBF_KMEM; in xfs_buf_allocate_memory()
326 start = BBTOB(bp->b_maps[0].bm_bn) >> PAGE_SHIFT; in xfs_buf_allocate_memory()
327 end = (BBTOB(bp->b_maps[0].bm_bn + bp->b_length) + PAGE_SIZE - 1) in xfs_buf_allocate_memory()
330 error = _xfs_buf_get_pages(bp, page_count); in xfs_buf_allocate_memory()
334 offset = bp->b_offset; in xfs_buf_allocate_memory()
335 bp->b_flags |= _XBF_PAGES; in xfs_buf_allocate_memory()
337 for (i = 0; i < bp->b_page_count; i++) { in xfs_buf_allocate_memory()
344 bp->b_page_count = i; in xfs_buf_allocate_memory()
369 bp->b_pages[i] = page; in xfs_buf_allocate_memory()
375 for (i = 0; i < bp->b_page_count; i++) in xfs_buf_allocate_memory()
376 __free_page(bp->b_pages[i]); in xfs_buf_allocate_memory()
385 xfs_buf_t *bp, in _xfs_buf_map_pages() argument
388 ASSERT(bp->b_flags & _XBF_PAGES); in _xfs_buf_map_pages()
389 if (bp->b_page_count == 1) { in _xfs_buf_map_pages()
391 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset; in _xfs_buf_map_pages()
393 bp->b_addr = NULL; in _xfs_buf_map_pages()
408 bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count, in _xfs_buf_map_pages()
410 if (bp->b_addr) in _xfs_buf_map_pages()
416 if (!bp->b_addr) in _xfs_buf_map_pages()
418 bp->b_addr += bp->b_offset; in _xfs_buf_map_pages()
445 xfs_buf_t *bp; in _xfs_buf_find() local
485 bp = NULL; in _xfs_buf_find()
488 bp = rb_entry(parent, struct xfs_buf, b_rbnode); in _xfs_buf_find()
490 if (blkno < bp->b_bn) in _xfs_buf_find()
492 else if (blkno > bp->b_bn) in _xfs_buf_find()
503 if (bp->b_length != numblks) { in _xfs_buf_find()
504 ASSERT(bp->b_flags & XBF_STALE); in _xfs_buf_find()
508 atomic_inc(&bp->b_hold); in _xfs_buf_find()
531 if (!xfs_buf_trylock(bp)) { in _xfs_buf_find()
533 xfs_buf_rele(bp); in _xfs_buf_find()
537 xfs_buf_lock(bp); in _xfs_buf_find()
546 if (bp->b_flags & XBF_STALE) { in _xfs_buf_find()
547 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0); in _xfs_buf_find()
548 ASSERT(bp->b_iodone == NULL); in _xfs_buf_find()
549 bp->b_flags &= _XBF_KMEM | _XBF_PAGES; in _xfs_buf_find()
550 bp->b_ops = NULL; in _xfs_buf_find()
553 trace_xfs_buf_find(bp, flags, _RET_IP_); in _xfs_buf_find()
555 return bp; in _xfs_buf_find()
570 struct xfs_buf *bp; in xfs_buf_get_map() local
574 bp = _xfs_buf_find(target, map, nmaps, flags, NULL); in xfs_buf_get_map()
575 if (likely(bp)) in xfs_buf_get_map()
588 bp = _xfs_buf_find(target, map, nmaps, flags, new_bp); in xfs_buf_get_map()
589 if (!bp) { in xfs_buf_get_map()
594 if (bp != new_bp) in xfs_buf_get_map()
598 if (!bp->b_addr) { in xfs_buf_get_map()
599 error = _xfs_buf_map_pages(bp, flags); in xfs_buf_get_map()
603 xfs_buf_relse(bp); in xfs_buf_get_map()
613 xfs_buf_ioerror(bp, 0); in xfs_buf_get_map()
616 trace_xfs_buf_get(bp, flags, _RET_IP_); in xfs_buf_get_map()
617 return bp; in xfs_buf_get_map()
622 xfs_buf_t *bp, in _xfs_buf_read() argument
626 ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL); in _xfs_buf_read()
628 bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD); in _xfs_buf_read()
629 bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD); in _xfs_buf_read()
632 xfs_buf_submit(bp); in _xfs_buf_read()
635 return xfs_buf_submit_wait(bp); in _xfs_buf_read()
646 struct xfs_buf *bp; in xfs_buf_read_map() local
650 bp = xfs_buf_get_map(target, map, nmaps, flags); in xfs_buf_read_map()
651 if (bp) { in xfs_buf_read_map()
652 trace_xfs_buf_read(bp, flags, _RET_IP_); in xfs_buf_read_map()
654 if (!XFS_BUF_ISDONE(bp)) { in xfs_buf_read_map()
656 bp->b_ops = ops; in xfs_buf_read_map()
657 _xfs_buf_read(bp, flags); in xfs_buf_read_map()
663 xfs_buf_relse(bp); in xfs_buf_read_map()
667 bp->b_flags &= ~XBF_READ; in xfs_buf_read_map()
671 return bp; in xfs_buf_read_map()
705 struct xfs_buf *bp; in xfs_buf_read_uncached() local
709 bp = xfs_buf_get_uncached(target, numblks, flags); in xfs_buf_read_uncached()
710 if (!bp) in xfs_buf_read_uncached()
714 ASSERT(bp->b_map_count == 1); in xfs_buf_read_uncached()
715 bp->b_bn = XFS_BUF_DADDR_NULL; /* always null for uncached buffers */ in xfs_buf_read_uncached()
716 bp->b_maps[0].bm_bn = daddr; in xfs_buf_read_uncached()
717 bp->b_flags |= XBF_READ; in xfs_buf_read_uncached()
718 bp->b_ops = ops; in xfs_buf_read_uncached()
720 xfs_buf_submit_wait(bp); in xfs_buf_read_uncached()
721 if (bp->b_error) { in xfs_buf_read_uncached()
722 int error = bp->b_error; in xfs_buf_read_uncached()
723 xfs_buf_relse(bp); in xfs_buf_read_uncached()
727 *bpp = bp; in xfs_buf_read_uncached()
737 struct xfs_buf *bp, in xfs_buf_set_empty() argument
740 if (bp->b_pages) in xfs_buf_set_empty()
741 _xfs_buf_free_pages(bp); in xfs_buf_set_empty()
743 bp->b_pages = NULL; in xfs_buf_set_empty()
744 bp->b_page_count = 0; in xfs_buf_set_empty()
745 bp->b_addr = NULL; in xfs_buf_set_empty()
746 bp->b_length = numblks; in xfs_buf_set_empty()
747 bp->b_io_length = numblks; in xfs_buf_set_empty()
749 ASSERT(bp->b_map_count == 1); in xfs_buf_set_empty()
750 bp->b_bn = XFS_BUF_DADDR_NULL; in xfs_buf_set_empty()
751 bp->b_maps[0].bm_bn = XFS_BUF_DADDR_NULL; in xfs_buf_set_empty()
752 bp->b_maps[0].bm_len = bp->b_length; in xfs_buf_set_empty()
768 xfs_buf_t *bp, in xfs_buf_associate_memory() argument
785 if (bp->b_pages) in xfs_buf_associate_memory()
786 _xfs_buf_free_pages(bp); in xfs_buf_associate_memory()
788 bp->b_pages = NULL; in xfs_buf_associate_memory()
789 bp->b_addr = mem; in xfs_buf_associate_memory()
791 rval = _xfs_buf_get_pages(bp, page_count); in xfs_buf_associate_memory()
795 bp->b_offset = offset; in xfs_buf_associate_memory()
797 for (i = 0; i < bp->b_page_count; i++) { in xfs_buf_associate_memory()
798 bp->b_pages[i] = mem_to_page((void *)pageaddr); in xfs_buf_associate_memory()
802 bp->b_io_length = BTOBB(len); in xfs_buf_associate_memory()
803 bp->b_length = BTOBB(buflen); in xfs_buf_associate_memory()
816 struct xfs_buf *bp; in xfs_buf_get_uncached() local
819 bp = _xfs_buf_alloc(target, &map, 1, 0); in xfs_buf_get_uncached()
820 if (unlikely(bp == NULL)) in xfs_buf_get_uncached()
824 error = _xfs_buf_get_pages(bp, page_count); in xfs_buf_get_uncached()
829 bp->b_pages[i] = alloc_page(xb_to_gfp(flags)); in xfs_buf_get_uncached()
830 if (!bp->b_pages[i]) in xfs_buf_get_uncached()
833 bp->b_flags |= _XBF_PAGES; in xfs_buf_get_uncached()
835 error = _xfs_buf_map_pages(bp, 0); in xfs_buf_get_uncached()
842 trace_xfs_buf_get_uncached(bp, _RET_IP_); in xfs_buf_get_uncached()
843 return bp; in xfs_buf_get_uncached()
847 __free_page(bp->b_pages[i]); in xfs_buf_get_uncached()
848 _xfs_buf_free_pages(bp); in xfs_buf_get_uncached()
850 xfs_buf_free_maps(bp); in xfs_buf_get_uncached()
851 kmem_zone_free(xfs_buf_zone, bp); in xfs_buf_get_uncached()
863 xfs_buf_t *bp) in xfs_buf_hold() argument
865 trace_xfs_buf_hold(bp, _RET_IP_); in xfs_buf_hold()
866 atomic_inc(&bp->b_hold); in xfs_buf_hold()
875 xfs_buf_t *bp) in xfs_buf_rele() argument
877 struct xfs_perag *pag = bp->b_pag; in xfs_buf_rele()
879 trace_xfs_buf_rele(bp, _RET_IP_); in xfs_buf_rele()
882 ASSERT(list_empty(&bp->b_lru)); in xfs_buf_rele()
883 ASSERT(RB_EMPTY_NODE(&bp->b_rbnode)); in xfs_buf_rele()
884 if (atomic_dec_and_test(&bp->b_hold)) in xfs_buf_rele()
885 xfs_buf_free(bp); in xfs_buf_rele()
889 ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode)); in xfs_buf_rele()
891 ASSERT(atomic_read(&bp->b_hold) > 0); in xfs_buf_rele()
892 if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) { in xfs_buf_rele()
893 spin_lock(&bp->b_lock); in xfs_buf_rele()
894 if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) { in xfs_buf_rele()
900 if (list_lru_add(&bp->b_target->bt_lru, &bp->b_lru)) { in xfs_buf_rele()
901 bp->b_state &= ~XFS_BSTATE_DISPOSE; in xfs_buf_rele()
902 atomic_inc(&bp->b_hold); in xfs_buf_rele()
904 spin_unlock(&bp->b_lock); in xfs_buf_rele()
913 if (!(bp->b_state & XFS_BSTATE_DISPOSE)) { in xfs_buf_rele()
914 list_lru_del(&bp->b_target->bt_lru, &bp->b_lru); in xfs_buf_rele()
916 ASSERT(list_empty(&bp->b_lru)); in xfs_buf_rele()
918 spin_unlock(&bp->b_lock); in xfs_buf_rele()
920 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q)); in xfs_buf_rele()
921 rb_erase(&bp->b_rbnode, &pag->pag_buf_tree); in xfs_buf_rele()
924 xfs_buf_free(bp); in xfs_buf_rele()
943 struct xfs_buf *bp) in xfs_buf_trylock() argument
947 locked = down_trylock(&bp->b_sema) == 0; in xfs_buf_trylock()
949 XB_SET_OWNER(bp); in xfs_buf_trylock()
951 trace_xfs_buf_trylock(bp, _RET_IP_); in xfs_buf_trylock()
966 struct xfs_buf *bp) in xfs_buf_lock() argument
968 trace_xfs_buf_lock(bp, _RET_IP_); in xfs_buf_lock()
970 if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE)) in xfs_buf_lock()
971 xfs_log_force(bp->b_target->bt_mount, 0); in xfs_buf_lock()
972 down(&bp->b_sema); in xfs_buf_lock()
973 XB_SET_OWNER(bp); in xfs_buf_lock()
975 trace_xfs_buf_lock_done(bp, _RET_IP_); in xfs_buf_lock()
980 struct xfs_buf *bp) in xfs_buf_unlock() argument
982 XB_CLEAR_OWNER(bp); in xfs_buf_unlock()
983 up(&bp->b_sema); in xfs_buf_unlock()
985 trace_xfs_buf_unlock(bp, _RET_IP_); in xfs_buf_unlock()
990 xfs_buf_t *bp) in xfs_buf_wait_unpin() argument
994 if (atomic_read(&bp->b_pin_count) == 0) in xfs_buf_wait_unpin()
997 add_wait_queue(&bp->b_waiters, &wait); in xfs_buf_wait_unpin()
1000 if (atomic_read(&bp->b_pin_count) == 0) in xfs_buf_wait_unpin()
1004 remove_wait_queue(&bp->b_waiters, &wait); in xfs_buf_wait_unpin()
1014 struct xfs_buf *bp) in xfs_buf_ioend() argument
1016 bool read = bp->b_flags & XBF_READ; in xfs_buf_ioend()
1018 trace_xfs_buf_iodone(bp, _RET_IP_); in xfs_buf_ioend()
1020 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD); in xfs_buf_ioend()
1026 if (!bp->b_error && bp->b_io_error) in xfs_buf_ioend()
1027 xfs_buf_ioerror(bp, bp->b_io_error); in xfs_buf_ioend()
1030 if (read && !bp->b_error && bp->b_ops) { in xfs_buf_ioend()
1031 ASSERT(!bp->b_iodone); in xfs_buf_ioend()
1032 bp->b_ops->verify_read(bp); in xfs_buf_ioend()
1035 if (!bp->b_error) in xfs_buf_ioend()
1036 bp->b_flags |= XBF_DONE; in xfs_buf_ioend()
1038 if (bp->b_iodone) in xfs_buf_ioend()
1039 (*(bp->b_iodone))(bp); in xfs_buf_ioend()
1040 else if (bp->b_flags & XBF_ASYNC) in xfs_buf_ioend()
1041 xfs_buf_relse(bp); in xfs_buf_ioend()
1043 complete(&bp->b_iowait); in xfs_buf_ioend()
1050 struct xfs_buf *bp = in xfs_buf_ioend_work() local
1053 xfs_buf_ioend(bp); in xfs_buf_ioend_work()
1058 struct xfs_buf *bp) in xfs_buf_ioend_async() argument
1060 INIT_WORK(&bp->b_ioend_work, xfs_buf_ioend_work); in xfs_buf_ioend_async()
1061 queue_work(bp->b_ioend_wq, &bp->b_ioend_work); in xfs_buf_ioend_async()
1066 xfs_buf_t *bp, in xfs_buf_ioerror() argument
1070 bp->b_error = error; in xfs_buf_ioerror()
1071 trace_xfs_buf_ioerror(bp, error, _RET_IP_); in xfs_buf_ioerror()
1076 struct xfs_buf *bp, in xfs_buf_ioerror_alert() argument
1079 xfs_alert(bp->b_target->bt_mount, in xfs_buf_ioerror_alert()
1081 (__uint64_t)XFS_BUF_ADDR(bp), func, -bp->b_error, bp->b_length); in xfs_buf_ioerror_alert()
1086 struct xfs_buf *bp) in xfs_bwrite() argument
1090 ASSERT(xfs_buf_islocked(bp)); in xfs_bwrite()
1092 bp->b_flags |= XBF_WRITE; in xfs_bwrite()
1093 bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q | in xfs_bwrite()
1096 error = xfs_buf_submit_wait(bp); in xfs_bwrite()
1098 xfs_force_shutdown(bp->b_target->bt_mount, in xfs_bwrite()
1109 xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private; in xfs_buf_bio_end_io() local
1116 spin_lock(&bp->b_lock); in xfs_buf_bio_end_io()
1117 if (!bp->b_io_error) in xfs_buf_bio_end_io()
1118 bp->b_io_error = error; in xfs_buf_bio_end_io()
1119 spin_unlock(&bp->b_lock); in xfs_buf_bio_end_io()
1122 if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ)) in xfs_buf_bio_end_io()
1123 invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp)); in xfs_buf_bio_end_io()
1125 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) in xfs_buf_bio_end_io()
1126 xfs_buf_ioend_async(bp); in xfs_buf_bio_end_io()
1132 struct xfs_buf *bp, in xfs_buf_ioapply_map() argument
1139 int total_nr_pages = bp->b_page_count; in xfs_buf_ioapply_map()
1142 sector_t sector = bp->b_maps[map].bm_bn; in xfs_buf_ioapply_map()
1146 total_nr_pages = bp->b_page_count; in xfs_buf_ioapply_map()
1160 size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count); in xfs_buf_ioapply_map()
1165 atomic_inc(&bp->b_io_remaining); in xfs_buf_ioapply_map()
1171 bio->bi_bdev = bp->b_target->bt_bdev; in xfs_buf_ioapply_map()
1174 bio->bi_private = bp; in xfs_buf_ioapply_map()
1183 rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes, in xfs_buf_ioapply_map()
1195 if (xfs_buf_is_vmapped(bp)) { in xfs_buf_ioapply_map()
1196 flush_kernel_vmap_range(bp->b_addr, in xfs_buf_ioapply_map()
1197 xfs_buf_vmap_len(bp)); in xfs_buf_ioapply_map()
1207 atomic_dec(&bp->b_io_remaining); in xfs_buf_ioapply_map()
1208 xfs_buf_ioerror(bp, -EIO); in xfs_buf_ioapply_map()
1216 struct xfs_buf *bp) in _xfs_buf_ioapply() argument
1228 bp->b_error = 0; in _xfs_buf_ioapply()
1234 if (!bp->b_ioend_wq) in _xfs_buf_ioapply()
1235 bp->b_ioend_wq = bp->b_target->bt_mount->m_buf_workqueue; in _xfs_buf_ioapply()
1237 if (bp->b_flags & XBF_WRITE) { in _xfs_buf_ioapply()
1238 if (bp->b_flags & XBF_SYNCIO) in _xfs_buf_ioapply()
1242 if (bp->b_flags & XBF_FUA) in _xfs_buf_ioapply()
1244 if (bp->b_flags & XBF_FLUSH) in _xfs_buf_ioapply()
1252 if (bp->b_ops) { in _xfs_buf_ioapply()
1253 bp->b_ops->verify_write(bp); in _xfs_buf_ioapply()
1254 if (bp->b_error) { in _xfs_buf_ioapply()
1255 xfs_force_shutdown(bp->b_target->bt_mount, in _xfs_buf_ioapply()
1259 } else if (bp->b_bn != XFS_BUF_DADDR_NULL) { in _xfs_buf_ioapply()
1260 struct xfs_mount *mp = bp->b_target->bt_mount; in _xfs_buf_ioapply()
1269 __func__, bp->b_bn, bp->b_length); in _xfs_buf_ioapply()
1270 xfs_hex_dump(bp->b_addr, 64); in _xfs_buf_ioapply()
1274 } else if (bp->b_flags & XBF_READ_AHEAD) { in _xfs_buf_ioapply()
1289 offset = bp->b_offset; in _xfs_buf_ioapply()
1290 size = BBTOB(bp->b_io_length); in _xfs_buf_ioapply()
1292 for (i = 0; i < bp->b_map_count; i++) { in _xfs_buf_ioapply()
1293 xfs_buf_ioapply_map(bp, i, &offset, &size, rw); in _xfs_buf_ioapply()
1294 if (bp->b_error) in _xfs_buf_ioapply()
1310 struct xfs_buf *bp) in xfs_buf_submit() argument
1312 trace_xfs_buf_submit(bp, _RET_IP_); in xfs_buf_submit()
1314 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q)); in xfs_buf_submit()
1315 ASSERT(bp->b_flags & XBF_ASYNC); in xfs_buf_submit()
1318 if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) { in xfs_buf_submit()
1319 xfs_buf_ioerror(bp, -EIO); in xfs_buf_submit()
1320 bp->b_flags &= ~XBF_DONE; in xfs_buf_submit()
1321 xfs_buf_stale(bp); in xfs_buf_submit()
1322 xfs_buf_ioend(bp); in xfs_buf_submit()
1326 if (bp->b_flags & XBF_WRITE) in xfs_buf_submit()
1327 xfs_buf_wait_unpin(bp); in xfs_buf_submit()
1330 bp->b_io_error = 0; in xfs_buf_submit()
1340 xfs_buf_hold(bp); in xfs_buf_submit()
1347 atomic_set(&bp->b_io_remaining, 1); in xfs_buf_submit()
1348 _xfs_buf_ioapply(bp); in xfs_buf_submit()
1355 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) { in xfs_buf_submit()
1356 if (bp->b_error) in xfs_buf_submit()
1357 xfs_buf_ioend(bp); in xfs_buf_submit()
1359 xfs_buf_ioend_async(bp); in xfs_buf_submit()
1362 xfs_buf_rele(bp); in xfs_buf_submit()
1371 struct xfs_buf *bp) in xfs_buf_submit_wait() argument
1375 trace_xfs_buf_submit_wait(bp, _RET_IP_); in xfs_buf_submit_wait()
1377 ASSERT(!(bp->b_flags & (_XBF_DELWRI_Q | XBF_ASYNC))); in xfs_buf_submit_wait()
1379 if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) { in xfs_buf_submit_wait()
1380 xfs_buf_ioerror(bp, -EIO); in xfs_buf_submit_wait()
1381 xfs_buf_stale(bp); in xfs_buf_submit_wait()
1382 bp->b_flags &= ~XBF_DONE; in xfs_buf_submit_wait()
1386 if (bp->b_flags & XBF_WRITE) in xfs_buf_submit_wait()
1387 xfs_buf_wait_unpin(bp); in xfs_buf_submit_wait()
1390 bp->b_io_error = 0; in xfs_buf_submit_wait()
1398 xfs_buf_hold(bp); in xfs_buf_submit_wait()
1405 atomic_set(&bp->b_io_remaining, 1); in xfs_buf_submit_wait()
1406 _xfs_buf_ioapply(bp); in xfs_buf_submit_wait()
1412 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) in xfs_buf_submit_wait()
1413 xfs_buf_ioend(bp); in xfs_buf_submit_wait()
1416 trace_xfs_buf_iowait(bp, _RET_IP_); in xfs_buf_submit_wait()
1417 wait_for_completion(&bp->b_iowait); in xfs_buf_submit_wait()
1418 trace_xfs_buf_iowait_done(bp, _RET_IP_); in xfs_buf_submit_wait()
1419 error = bp->b_error; in xfs_buf_submit_wait()
1425 xfs_buf_rele(bp); in xfs_buf_submit_wait()
1431 xfs_buf_t *bp, in xfs_buf_offset() argument
1436 if (bp->b_addr) in xfs_buf_offset()
1437 return bp->b_addr + offset; in xfs_buf_offset()
1439 offset += bp->b_offset; in xfs_buf_offset()
1440 page = bp->b_pages[offset >> PAGE_SHIFT]; in xfs_buf_offset()
1449 xfs_buf_t *bp, /* buffer to process */ in xfs_buf_iomove() argument
1462 page_index = (boff + bp->b_offset) >> PAGE_SHIFT; in xfs_buf_iomove()
1463 page_offset = (boff + bp->b_offset) & ~PAGE_MASK; in xfs_buf_iomove()
1464 page = bp->b_pages[page_index]; in xfs_buf_iomove()
1466 BBTOB(bp->b_io_length) - boff); in xfs_buf_iomove()
1503 struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru); in xfs_buftarg_wait_rele() local
1506 if (atomic_read(&bp->b_hold) > 1) { in xfs_buftarg_wait_rele()
1508 trace_xfs_buf_wait_buftarg(bp, _RET_IP_); in xfs_buftarg_wait_rele()
1511 if (!spin_trylock(&bp->b_lock)) in xfs_buftarg_wait_rele()
1518 atomic_set(&bp->b_lru_ref, 0); in xfs_buftarg_wait_rele()
1519 bp->b_state |= XFS_BSTATE_DISPOSE; in xfs_buftarg_wait_rele()
1521 spin_unlock(&bp->b_lock); in xfs_buftarg_wait_rele()
1548 struct xfs_buf *bp; in xfs_wait_buftarg() local
1549 bp = list_first_entry(&dispose, struct xfs_buf, b_lru); in xfs_wait_buftarg()
1550 list_del_init(&bp->b_lru); in xfs_wait_buftarg()
1551 if (bp->b_flags & XBF_WRITE_FAIL) { in xfs_wait_buftarg()
1555 (long long)bp->b_bn); in xfs_wait_buftarg()
1557 xfs_buf_rele(bp); in xfs_wait_buftarg()
1571 struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru); in xfs_buftarg_isolate() local
1578 if (!spin_trylock(&bp->b_lock)) in xfs_buftarg_isolate()
1585 if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) { in xfs_buftarg_isolate()
1586 spin_unlock(&bp->b_lock); in xfs_buftarg_isolate()
1590 bp->b_state |= XFS_BSTATE_DISPOSE; in xfs_buftarg_isolate()
1592 spin_unlock(&bp->b_lock); in xfs_buftarg_isolate()
1610 struct xfs_buf *bp; in xfs_buftarg_shrink_scan() local
1611 bp = list_first_entry(&dispose, struct xfs_buf, b_lru); in xfs_buftarg_shrink_scan()
1612 list_del_init(&bp->b_lru); in xfs_buftarg_shrink_scan()
1613 xfs_buf_rele(bp); in xfs_buftarg_shrink_scan()
1728 struct xfs_buf *bp, in xfs_buf_delwri_queue() argument
1731 ASSERT(xfs_buf_islocked(bp)); in xfs_buf_delwri_queue()
1732 ASSERT(!(bp->b_flags & XBF_READ)); in xfs_buf_delwri_queue()
1739 if (bp->b_flags & _XBF_DELWRI_Q) { in xfs_buf_delwri_queue()
1740 trace_xfs_buf_delwri_queued(bp, _RET_IP_); in xfs_buf_delwri_queue()
1744 trace_xfs_buf_delwri_queue(bp, _RET_IP_); in xfs_buf_delwri_queue()
1754 bp->b_flags |= _XBF_DELWRI_Q; in xfs_buf_delwri_queue()
1755 if (list_empty(&bp->b_list)) { in xfs_buf_delwri_queue()
1756 atomic_inc(&bp->b_hold); in xfs_buf_delwri_queue()
1757 list_add_tail(&bp->b_list, list); in xfs_buf_delwri_queue()
1775 struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list); in xfs_buf_cmp() local
1778 diff = ap->b_maps[0].bm_bn - bp->b_maps[0].bm_bn; in xfs_buf_cmp()
1793 struct xfs_buf *bp, *n; in __xfs_buf_delwri_submit() local
1796 list_for_each_entry_safe(bp, n, buffer_list, b_list) { in __xfs_buf_delwri_submit()
1798 if (xfs_buf_ispinned(bp)) { in __xfs_buf_delwri_submit()
1802 if (!xfs_buf_trylock(bp)) in __xfs_buf_delwri_submit()
1805 xfs_buf_lock(bp); in __xfs_buf_delwri_submit()
1814 if (!(bp->b_flags & _XBF_DELWRI_Q)) { in __xfs_buf_delwri_submit()
1815 list_del_init(&bp->b_list); in __xfs_buf_delwri_submit()
1816 xfs_buf_relse(bp); in __xfs_buf_delwri_submit()
1820 list_move_tail(&bp->b_list, io_list); in __xfs_buf_delwri_submit()
1821 trace_xfs_buf_delwri_split(bp, _RET_IP_); in __xfs_buf_delwri_submit()
1827 list_for_each_entry_safe(bp, n, io_list, b_list) { in __xfs_buf_delwri_submit()
1828 bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC | XBF_WRITE_FAIL); in __xfs_buf_delwri_submit()
1829 bp->b_flags |= XBF_WRITE | XBF_ASYNC; in __xfs_buf_delwri_submit()
1837 xfs_buf_hold(bp); in __xfs_buf_delwri_submit()
1839 list_del_init(&bp->b_list); in __xfs_buf_delwri_submit()
1841 xfs_buf_submit(bp); in __xfs_buf_delwri_submit()
1879 struct xfs_buf *bp; in xfs_buf_delwri_submit() local
1885 bp = list_first_entry(&io_list, struct xfs_buf, b_list); in xfs_buf_delwri_submit()
1887 list_del_init(&bp->b_list); in xfs_buf_delwri_submit()
1890 xfs_buf_lock(bp); in xfs_buf_delwri_submit()
1891 error2 = bp->b_error; in xfs_buf_delwri_submit()
1892 xfs_buf_relse(bp); in xfs_buf_delwri_submit()