Lines Matching refs:bp
48 # define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid) argument
49 # define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1) argument
50 # define XB_GET_OWNER(bp) ((bp)->b_last_holder) argument
52 # define XB_SET_OWNER(bp) do { } while (0) argument
53 # define XB_CLEAR_OWNER(bp) do { } while (0) argument
54 # define XB_GET_OWNER(bp) do { } while (0) argument
63 struct xfs_buf *bp) in xfs_buf_is_vmapped() argument
72 return bp->b_addr && bp->b_page_count > 1; in xfs_buf_is_vmapped()
77 struct xfs_buf *bp) in xfs_buf_vmap_len() argument
79 return (bp->b_page_count * PAGE_SIZE) - bp->b_offset; in xfs_buf_vmap_len()
92 struct xfs_buf *bp) in xfs_buf_stale() argument
94 ASSERT(xfs_buf_islocked(bp)); in xfs_buf_stale()
96 bp->b_flags |= XBF_STALE; in xfs_buf_stale()
103 bp->b_flags &= ~_XBF_DELWRI_Q; in xfs_buf_stale()
105 spin_lock(&bp->b_lock); in xfs_buf_stale()
106 atomic_set(&bp->b_lru_ref, 0); in xfs_buf_stale()
107 if (!(bp->b_state & XFS_BSTATE_DISPOSE) && in xfs_buf_stale()
108 (list_lru_del(&bp->b_target->bt_lru, &bp->b_lru))) in xfs_buf_stale()
109 atomic_dec(&bp->b_hold); in xfs_buf_stale()
111 ASSERT(atomic_read(&bp->b_hold) >= 1); in xfs_buf_stale()
112 spin_unlock(&bp->b_lock); in xfs_buf_stale()
117 struct xfs_buf *bp, in xfs_buf_get_maps() argument
120 ASSERT(bp->b_maps == NULL); in xfs_buf_get_maps()
121 bp->b_map_count = map_count; in xfs_buf_get_maps()
124 bp->b_maps = &bp->__b_map; in xfs_buf_get_maps()
128 bp->b_maps = kmem_zalloc(map_count * sizeof(struct xfs_buf_map), in xfs_buf_get_maps()
130 if (!bp->b_maps) in xfs_buf_get_maps()
140 struct xfs_buf *bp) in xfs_buf_free_maps() argument
142 if (bp->b_maps != &bp->__b_map) { in xfs_buf_free_maps()
143 kmem_free(bp->b_maps); in xfs_buf_free_maps()
144 bp->b_maps = NULL; in xfs_buf_free_maps()
155 struct xfs_buf *bp; in _xfs_buf_alloc() local
159 bp = kmem_zone_zalloc(xfs_buf_zone, KM_NOFS); in _xfs_buf_alloc()
160 if (unlikely(!bp)) in _xfs_buf_alloc()
169 atomic_set(&bp->b_hold, 1); in _xfs_buf_alloc()
170 atomic_set(&bp->b_lru_ref, 1); in _xfs_buf_alloc()
171 init_completion(&bp->b_iowait); in _xfs_buf_alloc()
172 INIT_LIST_HEAD(&bp->b_lru); in _xfs_buf_alloc()
173 INIT_LIST_HEAD(&bp->b_list); in _xfs_buf_alloc()
174 RB_CLEAR_NODE(&bp->b_rbnode); in _xfs_buf_alloc()
175 sema_init(&bp->b_sema, 0); /* held, no waiters */ in _xfs_buf_alloc()
176 spin_lock_init(&bp->b_lock); in _xfs_buf_alloc()
177 XB_SET_OWNER(bp); in _xfs_buf_alloc()
178 bp->b_target = target; in _xfs_buf_alloc()
179 bp->b_flags = flags; in _xfs_buf_alloc()
186 error = xfs_buf_get_maps(bp, nmaps); in _xfs_buf_alloc()
188 kmem_zone_free(xfs_buf_zone, bp); in _xfs_buf_alloc()
192 bp->b_bn = map[0].bm_bn; in _xfs_buf_alloc()
193 bp->b_length = 0; in _xfs_buf_alloc()
195 bp->b_maps[i].bm_bn = map[i].bm_bn; in _xfs_buf_alloc()
196 bp->b_maps[i].bm_len = map[i].bm_len; in _xfs_buf_alloc()
197 bp->b_length += map[i].bm_len; in _xfs_buf_alloc()
199 bp->b_io_length = bp->b_length; in _xfs_buf_alloc()
201 atomic_set(&bp->b_pin_count, 0); in _xfs_buf_alloc()
202 init_waitqueue_head(&bp->b_waiters); in _xfs_buf_alloc()
205 trace_xfs_buf_init(bp, _RET_IP_); in _xfs_buf_alloc()
207 return bp; in _xfs_buf_alloc()
216 xfs_buf_t *bp, in _xfs_buf_get_pages() argument
220 if (bp->b_pages == NULL) { in _xfs_buf_get_pages()
221 bp->b_page_count = page_count; in _xfs_buf_get_pages()
223 bp->b_pages = bp->b_page_array; in _xfs_buf_get_pages()
225 bp->b_pages = kmem_alloc(sizeof(struct page *) * in _xfs_buf_get_pages()
227 if (bp->b_pages == NULL) in _xfs_buf_get_pages()
230 memset(bp->b_pages, 0, sizeof(struct page *) * page_count); in _xfs_buf_get_pages()
240 xfs_buf_t *bp) in _xfs_buf_free_pages() argument
242 if (bp->b_pages != bp->b_page_array) { in _xfs_buf_free_pages()
243 kmem_free(bp->b_pages); in _xfs_buf_free_pages()
244 bp->b_pages = NULL; in _xfs_buf_free_pages()
257 xfs_buf_t *bp) in xfs_buf_free() argument
259 trace_xfs_buf_free(bp, _RET_IP_); in xfs_buf_free()
261 ASSERT(list_empty(&bp->b_lru)); in xfs_buf_free()
263 if (bp->b_flags & _XBF_PAGES) { in xfs_buf_free()
266 if (xfs_buf_is_vmapped(bp)) in xfs_buf_free()
267 vm_unmap_ram(bp->b_addr - bp->b_offset, in xfs_buf_free()
268 bp->b_page_count); in xfs_buf_free()
270 for (i = 0; i < bp->b_page_count; i++) { in xfs_buf_free()
271 struct page *page = bp->b_pages[i]; in xfs_buf_free()
275 } else if (bp->b_flags & _XBF_KMEM) in xfs_buf_free()
276 kmem_free(bp->b_addr); in xfs_buf_free()
277 _xfs_buf_free_pages(bp); in xfs_buf_free()
278 xfs_buf_free_maps(bp); in xfs_buf_free()
279 kmem_zone_free(xfs_buf_zone, bp); in xfs_buf_free()
287 xfs_buf_t *bp, in xfs_buf_allocate_memory() argument
302 size = BBTOB(bp->b_length); in xfs_buf_allocate_memory()
304 bp->b_addr = kmem_alloc(size, KM_NOFS); in xfs_buf_allocate_memory()
305 if (!bp->b_addr) { in xfs_buf_allocate_memory()
310 if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) != in xfs_buf_allocate_memory()
311 ((unsigned long)bp->b_addr & PAGE_MASK)) { in xfs_buf_allocate_memory()
313 kmem_free(bp->b_addr); in xfs_buf_allocate_memory()
314 bp->b_addr = NULL; in xfs_buf_allocate_memory()
317 bp->b_offset = offset_in_page(bp->b_addr); in xfs_buf_allocate_memory()
318 bp->b_pages = bp->b_page_array; in xfs_buf_allocate_memory()
319 bp->b_pages[0] = virt_to_page(bp->b_addr); in xfs_buf_allocate_memory()
320 bp->b_page_count = 1; in xfs_buf_allocate_memory()
321 bp->b_flags |= _XBF_KMEM; in xfs_buf_allocate_memory()
326 start = BBTOB(bp->b_maps[0].bm_bn) >> PAGE_SHIFT; in xfs_buf_allocate_memory()
327 end = (BBTOB(bp->b_maps[0].bm_bn + bp->b_length) + PAGE_SIZE - 1) in xfs_buf_allocate_memory()
330 error = _xfs_buf_get_pages(bp, page_count); in xfs_buf_allocate_memory()
334 offset = bp->b_offset; in xfs_buf_allocate_memory()
335 bp->b_flags |= _XBF_PAGES; in xfs_buf_allocate_memory()
337 for (i = 0; i < bp->b_page_count; i++) { in xfs_buf_allocate_memory()
344 bp->b_page_count = i; in xfs_buf_allocate_memory()
361 XFS_STATS_INC(bp->b_target->bt_mount, xb_page_retries); in xfs_buf_allocate_memory()
366 XFS_STATS_INC(bp->b_target->bt_mount, xb_page_found); in xfs_buf_allocate_memory()
370 bp->b_pages[i] = page; in xfs_buf_allocate_memory()
376 for (i = 0; i < bp->b_page_count; i++) in xfs_buf_allocate_memory()
377 __free_page(bp->b_pages[i]); in xfs_buf_allocate_memory()
386 xfs_buf_t *bp, in _xfs_buf_map_pages() argument
389 ASSERT(bp->b_flags & _XBF_PAGES); in _xfs_buf_map_pages()
390 if (bp->b_page_count == 1) { in _xfs_buf_map_pages()
392 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset; in _xfs_buf_map_pages()
394 bp->b_addr = NULL; in _xfs_buf_map_pages()
409 bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count, in _xfs_buf_map_pages()
411 if (bp->b_addr) in _xfs_buf_map_pages()
417 if (!bp->b_addr) in _xfs_buf_map_pages()
419 bp->b_addr += bp->b_offset; in _xfs_buf_map_pages()
445 xfs_buf_t *bp; in _xfs_buf_find() local
484 bp = NULL; in _xfs_buf_find()
487 bp = rb_entry(parent, struct xfs_buf, b_rbnode); in _xfs_buf_find()
489 if (blkno < bp->b_bn) in _xfs_buf_find()
491 else if (blkno > bp->b_bn) in _xfs_buf_find()
502 if (bp->b_length != numblks) { in _xfs_buf_find()
503 ASSERT(bp->b_flags & XBF_STALE); in _xfs_buf_find()
507 atomic_inc(&bp->b_hold); in _xfs_buf_find()
530 if (!xfs_buf_trylock(bp)) { in _xfs_buf_find()
532 xfs_buf_rele(bp); in _xfs_buf_find()
536 xfs_buf_lock(bp); in _xfs_buf_find()
545 if (bp->b_flags & XBF_STALE) { in _xfs_buf_find()
546 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0); in _xfs_buf_find()
547 ASSERT(bp->b_iodone == NULL); in _xfs_buf_find()
548 bp->b_flags &= _XBF_KMEM | _XBF_PAGES; in _xfs_buf_find()
549 bp->b_ops = NULL; in _xfs_buf_find()
552 trace_xfs_buf_find(bp, flags, _RET_IP_); in _xfs_buf_find()
554 return bp; in _xfs_buf_find()
569 struct xfs_buf *bp; in xfs_buf_get_map() local
573 bp = _xfs_buf_find(target, map, nmaps, flags, NULL); in xfs_buf_get_map()
574 if (likely(bp)) in xfs_buf_get_map()
587 bp = _xfs_buf_find(target, map, nmaps, flags, new_bp); in xfs_buf_get_map()
588 if (!bp) { in xfs_buf_get_map()
593 if (bp != new_bp) in xfs_buf_get_map()
597 if (!bp->b_addr) { in xfs_buf_get_map()
598 error = _xfs_buf_map_pages(bp, flags); in xfs_buf_get_map()
602 xfs_buf_relse(bp); in xfs_buf_get_map()
612 xfs_buf_ioerror(bp, 0); in xfs_buf_get_map()
615 trace_xfs_buf_get(bp, flags, _RET_IP_); in xfs_buf_get_map()
616 return bp; in xfs_buf_get_map()
621 xfs_buf_t *bp, in _xfs_buf_read() argument
625 ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL); in _xfs_buf_read()
627 bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD); in _xfs_buf_read()
628 bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD); in _xfs_buf_read()
631 xfs_buf_submit(bp); in _xfs_buf_read()
634 return xfs_buf_submit_wait(bp); in _xfs_buf_read()
645 struct xfs_buf *bp; in xfs_buf_read_map() local
649 bp = xfs_buf_get_map(target, map, nmaps, flags); in xfs_buf_read_map()
650 if (bp) { in xfs_buf_read_map()
651 trace_xfs_buf_read(bp, flags, _RET_IP_); in xfs_buf_read_map()
653 if (!XFS_BUF_ISDONE(bp)) { in xfs_buf_read_map()
655 bp->b_ops = ops; in xfs_buf_read_map()
656 _xfs_buf_read(bp, flags); in xfs_buf_read_map()
662 xfs_buf_relse(bp); in xfs_buf_read_map()
666 bp->b_flags &= ~XBF_READ; in xfs_buf_read_map()
670 return bp; in xfs_buf_read_map()
704 struct xfs_buf *bp; in xfs_buf_read_uncached() local
708 bp = xfs_buf_get_uncached(target, numblks, flags); in xfs_buf_read_uncached()
709 if (!bp) in xfs_buf_read_uncached()
713 ASSERT(bp->b_map_count == 1); in xfs_buf_read_uncached()
714 bp->b_bn = XFS_BUF_DADDR_NULL; /* always null for uncached buffers */ in xfs_buf_read_uncached()
715 bp->b_maps[0].bm_bn = daddr; in xfs_buf_read_uncached()
716 bp->b_flags |= XBF_READ; in xfs_buf_read_uncached()
717 bp->b_ops = ops; in xfs_buf_read_uncached()
719 xfs_buf_submit_wait(bp); in xfs_buf_read_uncached()
720 if (bp->b_error) { in xfs_buf_read_uncached()
721 int error = bp->b_error; in xfs_buf_read_uncached()
722 xfs_buf_relse(bp); in xfs_buf_read_uncached()
726 *bpp = bp; in xfs_buf_read_uncached()
736 struct xfs_buf *bp, in xfs_buf_set_empty() argument
739 if (bp->b_pages) in xfs_buf_set_empty()
740 _xfs_buf_free_pages(bp); in xfs_buf_set_empty()
742 bp->b_pages = NULL; in xfs_buf_set_empty()
743 bp->b_page_count = 0; in xfs_buf_set_empty()
744 bp->b_addr = NULL; in xfs_buf_set_empty()
745 bp->b_length = numblks; in xfs_buf_set_empty()
746 bp->b_io_length = numblks; in xfs_buf_set_empty()
748 ASSERT(bp->b_map_count == 1); in xfs_buf_set_empty()
749 bp->b_bn = XFS_BUF_DADDR_NULL; in xfs_buf_set_empty()
750 bp->b_maps[0].bm_bn = XFS_BUF_DADDR_NULL; in xfs_buf_set_empty()
751 bp->b_maps[0].bm_len = bp->b_length; in xfs_buf_set_empty()
767 xfs_buf_t *bp, in xfs_buf_associate_memory() argument
784 if (bp->b_pages) in xfs_buf_associate_memory()
785 _xfs_buf_free_pages(bp); in xfs_buf_associate_memory()
787 bp->b_pages = NULL; in xfs_buf_associate_memory()
788 bp->b_addr = mem; in xfs_buf_associate_memory()
790 rval = _xfs_buf_get_pages(bp, page_count); in xfs_buf_associate_memory()
794 bp->b_offset = offset; in xfs_buf_associate_memory()
796 for (i = 0; i < bp->b_page_count; i++) { in xfs_buf_associate_memory()
797 bp->b_pages[i] = mem_to_page((void *)pageaddr); in xfs_buf_associate_memory()
801 bp->b_io_length = BTOBB(len); in xfs_buf_associate_memory()
802 bp->b_length = BTOBB(buflen); in xfs_buf_associate_memory()
815 struct xfs_buf *bp; in xfs_buf_get_uncached() local
818 bp = _xfs_buf_alloc(target, &map, 1, 0); in xfs_buf_get_uncached()
819 if (unlikely(bp == NULL)) in xfs_buf_get_uncached()
823 error = _xfs_buf_get_pages(bp, page_count); in xfs_buf_get_uncached()
828 bp->b_pages[i] = alloc_page(xb_to_gfp(flags)); in xfs_buf_get_uncached()
829 if (!bp->b_pages[i]) in xfs_buf_get_uncached()
832 bp->b_flags |= _XBF_PAGES; in xfs_buf_get_uncached()
834 error = _xfs_buf_map_pages(bp, 0); in xfs_buf_get_uncached()
841 trace_xfs_buf_get_uncached(bp, _RET_IP_); in xfs_buf_get_uncached()
842 return bp; in xfs_buf_get_uncached()
846 __free_page(bp->b_pages[i]); in xfs_buf_get_uncached()
847 _xfs_buf_free_pages(bp); in xfs_buf_get_uncached()
849 xfs_buf_free_maps(bp); in xfs_buf_get_uncached()
850 kmem_zone_free(xfs_buf_zone, bp); in xfs_buf_get_uncached()
862 xfs_buf_t *bp) in xfs_buf_hold() argument
864 trace_xfs_buf_hold(bp, _RET_IP_); in xfs_buf_hold()
865 atomic_inc(&bp->b_hold); in xfs_buf_hold()
874 xfs_buf_t *bp) in xfs_buf_rele() argument
876 struct xfs_perag *pag = bp->b_pag; in xfs_buf_rele()
878 trace_xfs_buf_rele(bp, _RET_IP_); in xfs_buf_rele()
881 ASSERT(list_empty(&bp->b_lru)); in xfs_buf_rele()
882 ASSERT(RB_EMPTY_NODE(&bp->b_rbnode)); in xfs_buf_rele()
883 if (atomic_dec_and_test(&bp->b_hold)) in xfs_buf_rele()
884 xfs_buf_free(bp); in xfs_buf_rele()
888 ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode)); in xfs_buf_rele()
890 ASSERT(atomic_read(&bp->b_hold) > 0); in xfs_buf_rele()
891 if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) { in xfs_buf_rele()
892 spin_lock(&bp->b_lock); in xfs_buf_rele()
893 if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) { in xfs_buf_rele()
899 if (list_lru_add(&bp->b_target->bt_lru, &bp->b_lru)) { in xfs_buf_rele()
900 bp->b_state &= ~XFS_BSTATE_DISPOSE; in xfs_buf_rele()
901 atomic_inc(&bp->b_hold); in xfs_buf_rele()
903 spin_unlock(&bp->b_lock); in xfs_buf_rele()
912 if (!(bp->b_state & XFS_BSTATE_DISPOSE)) { in xfs_buf_rele()
913 list_lru_del(&bp->b_target->bt_lru, &bp->b_lru); in xfs_buf_rele()
915 ASSERT(list_empty(&bp->b_lru)); in xfs_buf_rele()
917 spin_unlock(&bp->b_lock); in xfs_buf_rele()
919 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q)); in xfs_buf_rele()
920 rb_erase(&bp->b_rbnode, &pag->pag_buf_tree); in xfs_buf_rele()
923 xfs_buf_free(bp); in xfs_buf_rele()
942 struct xfs_buf *bp) in xfs_buf_trylock() argument
946 locked = down_trylock(&bp->b_sema) == 0; in xfs_buf_trylock()
948 XB_SET_OWNER(bp); in xfs_buf_trylock()
950 trace_xfs_buf_trylock(bp, _RET_IP_); in xfs_buf_trylock()
965 struct xfs_buf *bp) in xfs_buf_lock() argument
967 trace_xfs_buf_lock(bp, _RET_IP_); in xfs_buf_lock()
969 if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE)) in xfs_buf_lock()
970 xfs_log_force(bp->b_target->bt_mount, 0); in xfs_buf_lock()
971 down(&bp->b_sema); in xfs_buf_lock()
972 XB_SET_OWNER(bp); in xfs_buf_lock()
974 trace_xfs_buf_lock_done(bp, _RET_IP_); in xfs_buf_lock()
979 struct xfs_buf *bp) in xfs_buf_unlock() argument
981 XB_CLEAR_OWNER(bp); in xfs_buf_unlock()
982 up(&bp->b_sema); in xfs_buf_unlock()
984 trace_xfs_buf_unlock(bp, _RET_IP_); in xfs_buf_unlock()
989 xfs_buf_t *bp) in xfs_buf_wait_unpin() argument
993 if (atomic_read(&bp->b_pin_count) == 0) in xfs_buf_wait_unpin()
996 add_wait_queue(&bp->b_waiters, &wait); in xfs_buf_wait_unpin()
999 if (atomic_read(&bp->b_pin_count) == 0) in xfs_buf_wait_unpin()
1003 remove_wait_queue(&bp->b_waiters, &wait); in xfs_buf_wait_unpin()
1013 struct xfs_buf *bp) in xfs_buf_ioend() argument
1015 bool read = bp->b_flags & XBF_READ; in xfs_buf_ioend()
1017 trace_xfs_buf_iodone(bp, _RET_IP_); in xfs_buf_ioend()
1019 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD); in xfs_buf_ioend()
1025 if (!bp->b_error && bp->b_io_error) in xfs_buf_ioend()
1026 xfs_buf_ioerror(bp, bp->b_io_error); in xfs_buf_ioend()
1029 if (read && !bp->b_error && bp->b_ops) { in xfs_buf_ioend()
1030 ASSERT(!bp->b_iodone); in xfs_buf_ioend()
1031 bp->b_ops->verify_read(bp); in xfs_buf_ioend()
1034 if (!bp->b_error) in xfs_buf_ioend()
1035 bp->b_flags |= XBF_DONE; in xfs_buf_ioend()
1037 if (bp->b_iodone) in xfs_buf_ioend()
1038 (*(bp->b_iodone))(bp); in xfs_buf_ioend()
1039 else if (bp->b_flags & XBF_ASYNC) in xfs_buf_ioend()
1040 xfs_buf_relse(bp); in xfs_buf_ioend()
1042 complete(&bp->b_iowait); in xfs_buf_ioend()
1049 struct xfs_buf *bp = in xfs_buf_ioend_work() local
1052 xfs_buf_ioend(bp); in xfs_buf_ioend_work()
1057 struct xfs_buf *bp) in xfs_buf_ioend_async() argument
1059 INIT_WORK(&bp->b_ioend_work, xfs_buf_ioend_work); in xfs_buf_ioend_async()
1060 queue_work(bp->b_ioend_wq, &bp->b_ioend_work); in xfs_buf_ioend_async()
1065 xfs_buf_t *bp, in xfs_buf_ioerror() argument
1069 bp->b_error = error; in xfs_buf_ioerror()
1070 trace_xfs_buf_ioerror(bp, error, _RET_IP_); in xfs_buf_ioerror()
1075 struct xfs_buf *bp, in xfs_buf_ioerror_alert() argument
1078 xfs_alert(bp->b_target->bt_mount, in xfs_buf_ioerror_alert()
1080 (__uint64_t)XFS_BUF_ADDR(bp), func, -bp->b_error, bp->b_length); in xfs_buf_ioerror_alert()
1085 struct xfs_buf *bp) in xfs_bwrite() argument
1089 ASSERT(xfs_buf_islocked(bp)); in xfs_bwrite()
1091 bp->b_flags |= XBF_WRITE; in xfs_bwrite()
1092 bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q | in xfs_bwrite()
1095 error = xfs_buf_submit_wait(bp); in xfs_bwrite()
1097 xfs_force_shutdown(bp->b_target->bt_mount, in xfs_bwrite()
1107 xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private; in xfs_buf_bio_end_io() local
1114 spin_lock(&bp->b_lock); in xfs_buf_bio_end_io()
1115 if (!bp->b_io_error) in xfs_buf_bio_end_io()
1116 bp->b_io_error = bio->bi_error; in xfs_buf_bio_end_io()
1117 spin_unlock(&bp->b_lock); in xfs_buf_bio_end_io()
1120 if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ)) in xfs_buf_bio_end_io()
1121 invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp)); in xfs_buf_bio_end_io()
1123 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) in xfs_buf_bio_end_io()
1124 xfs_buf_ioend_async(bp); in xfs_buf_bio_end_io()
1130 struct xfs_buf *bp, in xfs_buf_ioapply_map() argument
1137 int total_nr_pages = bp->b_page_count; in xfs_buf_ioapply_map()
1140 sector_t sector = bp->b_maps[map].bm_bn; in xfs_buf_ioapply_map()
1144 total_nr_pages = bp->b_page_count; in xfs_buf_ioapply_map()
1158 size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count); in xfs_buf_ioapply_map()
1163 atomic_inc(&bp->b_io_remaining); in xfs_buf_ioapply_map()
1169 bio->bi_bdev = bp->b_target->bt_bdev; in xfs_buf_ioapply_map()
1172 bio->bi_private = bp; in xfs_buf_ioapply_map()
1181 rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes, in xfs_buf_ioapply_map()
1193 if (xfs_buf_is_vmapped(bp)) { in xfs_buf_ioapply_map()
1194 flush_kernel_vmap_range(bp->b_addr, in xfs_buf_ioapply_map()
1195 xfs_buf_vmap_len(bp)); in xfs_buf_ioapply_map()
1205 atomic_dec(&bp->b_io_remaining); in xfs_buf_ioapply_map()
1206 xfs_buf_ioerror(bp, -EIO); in xfs_buf_ioapply_map()
1214 struct xfs_buf *bp) in _xfs_buf_ioapply() argument
1226 bp->b_error = 0; in _xfs_buf_ioapply()
1232 if (!bp->b_ioend_wq) in _xfs_buf_ioapply()
1233 bp->b_ioend_wq = bp->b_target->bt_mount->m_buf_workqueue; in _xfs_buf_ioapply()
1235 if (bp->b_flags & XBF_WRITE) { in _xfs_buf_ioapply()
1236 if (bp->b_flags & XBF_SYNCIO) in _xfs_buf_ioapply()
1240 if (bp->b_flags & XBF_FUA) in _xfs_buf_ioapply()
1242 if (bp->b_flags & XBF_FLUSH) in _xfs_buf_ioapply()
1250 if (bp->b_ops) { in _xfs_buf_ioapply()
1251 bp->b_ops->verify_write(bp); in _xfs_buf_ioapply()
1252 if (bp->b_error) { in _xfs_buf_ioapply()
1253 xfs_force_shutdown(bp->b_target->bt_mount, in _xfs_buf_ioapply()
1257 } else if (bp->b_bn != XFS_BUF_DADDR_NULL) { in _xfs_buf_ioapply()
1258 struct xfs_mount *mp = bp->b_target->bt_mount; in _xfs_buf_ioapply()
1267 __func__, bp->b_bn, bp->b_length); in _xfs_buf_ioapply()
1268 xfs_hex_dump(bp->b_addr, 64); in _xfs_buf_ioapply()
1272 } else if (bp->b_flags & XBF_READ_AHEAD) { in _xfs_buf_ioapply()
1287 offset = bp->b_offset; in _xfs_buf_ioapply()
1288 size = BBTOB(bp->b_io_length); in _xfs_buf_ioapply()
1290 for (i = 0; i < bp->b_map_count; i++) { in _xfs_buf_ioapply()
1291 xfs_buf_ioapply_map(bp, i, &offset, &size, rw); in _xfs_buf_ioapply()
1292 if (bp->b_error) in _xfs_buf_ioapply()
1308 struct xfs_buf *bp) in xfs_buf_submit() argument
1310 trace_xfs_buf_submit(bp, _RET_IP_); in xfs_buf_submit()
1312 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q)); in xfs_buf_submit()
1313 ASSERT(bp->b_flags & XBF_ASYNC); in xfs_buf_submit()
1316 if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) { in xfs_buf_submit()
1317 xfs_buf_ioerror(bp, -EIO); in xfs_buf_submit()
1318 bp->b_flags &= ~XBF_DONE; in xfs_buf_submit()
1319 xfs_buf_stale(bp); in xfs_buf_submit()
1320 xfs_buf_ioend(bp); in xfs_buf_submit()
1324 if (bp->b_flags & XBF_WRITE) in xfs_buf_submit()
1325 xfs_buf_wait_unpin(bp); in xfs_buf_submit()
1328 bp->b_io_error = 0; in xfs_buf_submit()
1338 xfs_buf_hold(bp); in xfs_buf_submit()
1345 atomic_set(&bp->b_io_remaining, 1); in xfs_buf_submit()
1346 _xfs_buf_ioapply(bp); in xfs_buf_submit()
1353 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) { in xfs_buf_submit()
1354 if (bp->b_error) in xfs_buf_submit()
1355 xfs_buf_ioend(bp); in xfs_buf_submit()
1357 xfs_buf_ioend_async(bp); in xfs_buf_submit()
1360 xfs_buf_rele(bp); in xfs_buf_submit()
1369 struct xfs_buf *bp) in xfs_buf_submit_wait() argument
1373 trace_xfs_buf_submit_wait(bp, _RET_IP_); in xfs_buf_submit_wait()
1375 ASSERT(!(bp->b_flags & (_XBF_DELWRI_Q | XBF_ASYNC))); in xfs_buf_submit_wait()
1377 if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) { in xfs_buf_submit_wait()
1378 xfs_buf_ioerror(bp, -EIO); in xfs_buf_submit_wait()
1379 xfs_buf_stale(bp); in xfs_buf_submit_wait()
1380 bp->b_flags &= ~XBF_DONE; in xfs_buf_submit_wait()
1384 if (bp->b_flags & XBF_WRITE) in xfs_buf_submit_wait()
1385 xfs_buf_wait_unpin(bp); in xfs_buf_submit_wait()
1388 bp->b_io_error = 0; in xfs_buf_submit_wait()
1396 xfs_buf_hold(bp); in xfs_buf_submit_wait()
1403 atomic_set(&bp->b_io_remaining, 1); in xfs_buf_submit_wait()
1404 _xfs_buf_ioapply(bp); in xfs_buf_submit_wait()
1410 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) in xfs_buf_submit_wait()
1411 xfs_buf_ioend(bp); in xfs_buf_submit_wait()
1414 trace_xfs_buf_iowait(bp, _RET_IP_); in xfs_buf_submit_wait()
1415 wait_for_completion(&bp->b_iowait); in xfs_buf_submit_wait()
1416 trace_xfs_buf_iowait_done(bp, _RET_IP_); in xfs_buf_submit_wait()
1417 error = bp->b_error; in xfs_buf_submit_wait()
1423 xfs_buf_rele(bp); in xfs_buf_submit_wait()
1429 struct xfs_buf *bp, in xfs_buf_offset() argument
1434 if (bp->b_addr) in xfs_buf_offset()
1435 return bp->b_addr + offset; in xfs_buf_offset()
1437 offset += bp->b_offset; in xfs_buf_offset()
1438 page = bp->b_pages[offset >> PAGE_SHIFT]; in xfs_buf_offset()
1447 xfs_buf_t *bp, /* buffer to process */ in xfs_buf_iomove() argument
1460 page_index = (boff + bp->b_offset) >> PAGE_SHIFT; in xfs_buf_iomove()
1461 page_offset = (boff + bp->b_offset) & ~PAGE_MASK; in xfs_buf_iomove()
1462 page = bp->b_pages[page_index]; in xfs_buf_iomove()
1464 BBTOB(bp->b_io_length) - boff); in xfs_buf_iomove()
1501 struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru); in xfs_buftarg_wait_rele() local
1504 if (atomic_read(&bp->b_hold) > 1) { in xfs_buftarg_wait_rele()
1506 trace_xfs_buf_wait_buftarg(bp, _RET_IP_); in xfs_buftarg_wait_rele()
1509 if (!spin_trylock(&bp->b_lock)) in xfs_buftarg_wait_rele()
1516 atomic_set(&bp->b_lru_ref, 0); in xfs_buftarg_wait_rele()
1517 bp->b_state |= XFS_BSTATE_DISPOSE; in xfs_buftarg_wait_rele()
1519 spin_unlock(&bp->b_lock); in xfs_buftarg_wait_rele()
1546 struct xfs_buf *bp; in xfs_wait_buftarg() local
1547 bp = list_first_entry(&dispose, struct xfs_buf, b_lru); in xfs_wait_buftarg()
1548 list_del_init(&bp->b_lru); in xfs_wait_buftarg()
1549 if (bp->b_flags & XBF_WRITE_FAIL) { in xfs_wait_buftarg()
1552 (long long)bp->b_bn); in xfs_wait_buftarg()
1556 xfs_buf_rele(bp); in xfs_wait_buftarg()
1570 struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru); in xfs_buftarg_isolate() local
1577 if (!spin_trylock(&bp->b_lock)) in xfs_buftarg_isolate()
1584 if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) { in xfs_buftarg_isolate()
1585 spin_unlock(&bp->b_lock); in xfs_buftarg_isolate()
1589 bp->b_state |= XFS_BSTATE_DISPOSE; in xfs_buftarg_isolate()
1591 spin_unlock(&bp->b_lock); in xfs_buftarg_isolate()
1609 struct xfs_buf *bp; in xfs_buftarg_shrink_scan() local
1610 bp = list_first_entry(&dispose, struct xfs_buf, b_lru); in xfs_buftarg_shrink_scan()
1611 list_del_init(&bp->b_lru); in xfs_buftarg_shrink_scan()
1612 xfs_buf_rele(bp); in xfs_buftarg_shrink_scan()
1727 struct xfs_buf *bp, in xfs_buf_delwri_queue() argument
1730 ASSERT(xfs_buf_islocked(bp)); in xfs_buf_delwri_queue()
1731 ASSERT(!(bp->b_flags & XBF_READ)); in xfs_buf_delwri_queue()
1738 if (bp->b_flags & _XBF_DELWRI_Q) { in xfs_buf_delwri_queue()
1739 trace_xfs_buf_delwri_queued(bp, _RET_IP_); in xfs_buf_delwri_queue()
1743 trace_xfs_buf_delwri_queue(bp, _RET_IP_); in xfs_buf_delwri_queue()
1753 bp->b_flags |= _XBF_DELWRI_Q; in xfs_buf_delwri_queue()
1754 if (list_empty(&bp->b_list)) { in xfs_buf_delwri_queue()
1755 atomic_inc(&bp->b_hold); in xfs_buf_delwri_queue()
1756 list_add_tail(&bp->b_list, list); in xfs_buf_delwri_queue()
1774 struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list); in xfs_buf_cmp() local
1777 diff = ap->b_maps[0].bm_bn - bp->b_maps[0].bm_bn; in xfs_buf_cmp()
1792 struct xfs_buf *bp, *n; in __xfs_buf_delwri_submit() local
1795 list_for_each_entry_safe(bp, n, buffer_list, b_list) { in __xfs_buf_delwri_submit()
1797 if (xfs_buf_ispinned(bp)) { in __xfs_buf_delwri_submit()
1801 if (!xfs_buf_trylock(bp)) in __xfs_buf_delwri_submit()
1804 xfs_buf_lock(bp); in __xfs_buf_delwri_submit()
1813 if (!(bp->b_flags & _XBF_DELWRI_Q)) { in __xfs_buf_delwri_submit()
1814 list_del_init(&bp->b_list); in __xfs_buf_delwri_submit()
1815 xfs_buf_relse(bp); in __xfs_buf_delwri_submit()
1819 list_move_tail(&bp->b_list, io_list); in __xfs_buf_delwri_submit()
1820 trace_xfs_buf_delwri_split(bp, _RET_IP_); in __xfs_buf_delwri_submit()
1826 list_for_each_entry_safe(bp, n, io_list, b_list) { in __xfs_buf_delwri_submit()
1827 bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC | XBF_WRITE_FAIL); in __xfs_buf_delwri_submit()
1828 bp->b_flags |= XBF_WRITE | XBF_ASYNC; in __xfs_buf_delwri_submit()
1836 xfs_buf_hold(bp); in __xfs_buf_delwri_submit()
1838 list_del_init(&bp->b_list); in __xfs_buf_delwri_submit()
1840 xfs_buf_submit(bp); in __xfs_buf_delwri_submit()
1878 struct xfs_buf *bp; in xfs_buf_delwri_submit() local
1884 bp = list_first_entry(&io_list, struct xfs_buf, b_list); in xfs_buf_delwri_submit()
1886 list_del_init(&bp->b_list); in xfs_buf_delwri_submit()
1889 xfs_buf_lock(bp); in xfs_buf_delwri_submit()
1890 error2 = bp->b_error; in xfs_buf_delwri_submit()
1891 xfs_buf_relse(bp); in xfs_buf_delwri_submit()