Lines Matching refs:bp
52 static int __xfs_buf_submit(struct xfs_buf *bp, bool wait);
56 struct xfs_buf *bp) in xfs_buf_submit() argument
58 return __xfs_buf_submit(bp, !(bp->b_flags & XBF_ASYNC)); in xfs_buf_submit()
63 struct xfs_buf *bp) in xfs_buf_is_vmapped() argument
72 return bp->b_addr && bp->b_page_count > 1; in xfs_buf_is_vmapped()
77 struct xfs_buf *bp) in xfs_buf_vmap_len() argument
79 return (bp->b_page_count * PAGE_SIZE); in xfs_buf_vmap_len()
97 struct xfs_buf *bp) in xfs_buf_ioacct_inc() argument
99 if (bp->b_flags & XBF_NO_IOACCT) in xfs_buf_ioacct_inc()
102 ASSERT(bp->b_flags & XBF_ASYNC); in xfs_buf_ioacct_inc()
103 spin_lock(&bp->b_lock); in xfs_buf_ioacct_inc()
104 if (!(bp->b_state & XFS_BSTATE_IN_FLIGHT)) { in xfs_buf_ioacct_inc()
105 bp->b_state |= XFS_BSTATE_IN_FLIGHT; in xfs_buf_ioacct_inc()
106 percpu_counter_inc(&bp->b_target->bt_io_count); in xfs_buf_ioacct_inc()
108 spin_unlock(&bp->b_lock); in xfs_buf_ioacct_inc()
117 struct xfs_buf *bp) in __xfs_buf_ioacct_dec() argument
119 lockdep_assert_held(&bp->b_lock); in __xfs_buf_ioacct_dec()
121 if (bp->b_state & XFS_BSTATE_IN_FLIGHT) { in __xfs_buf_ioacct_dec()
122 bp->b_state &= ~XFS_BSTATE_IN_FLIGHT; in __xfs_buf_ioacct_dec()
123 percpu_counter_dec(&bp->b_target->bt_io_count); in __xfs_buf_ioacct_dec()
129 struct xfs_buf *bp) in xfs_buf_ioacct_dec() argument
131 spin_lock(&bp->b_lock); in xfs_buf_ioacct_dec()
132 __xfs_buf_ioacct_dec(bp); in xfs_buf_ioacct_dec()
133 spin_unlock(&bp->b_lock); in xfs_buf_ioacct_dec()
146 struct xfs_buf *bp) in xfs_buf_stale() argument
148 ASSERT(xfs_buf_islocked(bp)); in xfs_buf_stale()
150 bp->b_flags |= XBF_STALE; in xfs_buf_stale()
157 bp->b_flags &= ~_XBF_DELWRI_Q; in xfs_buf_stale()
165 spin_lock(&bp->b_lock); in xfs_buf_stale()
166 __xfs_buf_ioacct_dec(bp); in xfs_buf_stale()
168 atomic_set(&bp->b_lru_ref, 0); in xfs_buf_stale()
169 if (!(bp->b_state & XFS_BSTATE_DISPOSE) && in xfs_buf_stale()
170 (list_lru_del(&bp->b_target->bt_lru, &bp->b_lru))) in xfs_buf_stale()
171 atomic_dec(&bp->b_hold); in xfs_buf_stale()
173 ASSERT(atomic_read(&bp->b_hold) >= 1); in xfs_buf_stale()
174 spin_unlock(&bp->b_lock); in xfs_buf_stale()
179 struct xfs_buf *bp, in xfs_buf_get_maps() argument
182 ASSERT(bp->b_maps == NULL); in xfs_buf_get_maps()
183 bp->b_map_count = map_count; in xfs_buf_get_maps()
186 bp->b_maps = &bp->__b_map; in xfs_buf_get_maps()
190 bp->b_maps = kmem_zalloc(map_count * sizeof(struct xfs_buf_map), in xfs_buf_get_maps()
192 if (!bp->b_maps) in xfs_buf_get_maps()
202 struct xfs_buf *bp) in xfs_buf_free_maps() argument
204 if (bp->b_maps != &bp->__b_map) { in xfs_buf_free_maps()
205 kmem_free(bp->b_maps); in xfs_buf_free_maps()
206 bp->b_maps = NULL; in xfs_buf_free_maps()
218 struct xfs_buf *bp; in _xfs_buf_alloc() local
223 bp = kmem_cache_zalloc(xfs_buf_zone, GFP_NOFS | __GFP_NOFAIL); in _xfs_buf_alloc()
231 atomic_set(&bp->b_hold, 1); in _xfs_buf_alloc()
232 atomic_set(&bp->b_lru_ref, 1); in _xfs_buf_alloc()
233 init_completion(&bp->b_iowait); in _xfs_buf_alloc()
234 INIT_LIST_HEAD(&bp->b_lru); in _xfs_buf_alloc()
235 INIT_LIST_HEAD(&bp->b_list); in _xfs_buf_alloc()
236 INIT_LIST_HEAD(&bp->b_li_list); in _xfs_buf_alloc()
237 sema_init(&bp->b_sema, 0); /* held, no waiters */ in _xfs_buf_alloc()
238 spin_lock_init(&bp->b_lock); in _xfs_buf_alloc()
239 bp->b_target = target; in _xfs_buf_alloc()
240 bp->b_mount = target->bt_mount; in _xfs_buf_alloc()
241 bp->b_flags = flags; in _xfs_buf_alloc()
248 error = xfs_buf_get_maps(bp, nmaps); in _xfs_buf_alloc()
250 kmem_cache_free(xfs_buf_zone, bp); in _xfs_buf_alloc()
254 bp->b_rhash_key = map[0].bm_bn; in _xfs_buf_alloc()
255 bp->b_length = 0; in _xfs_buf_alloc()
257 bp->b_maps[i].bm_bn = map[i].bm_bn; in _xfs_buf_alloc()
258 bp->b_maps[i].bm_len = map[i].bm_len; in _xfs_buf_alloc()
259 bp->b_length += map[i].bm_len; in _xfs_buf_alloc()
262 atomic_set(&bp->b_pin_count, 0); in _xfs_buf_alloc()
263 init_waitqueue_head(&bp->b_waiters); in _xfs_buf_alloc()
265 XFS_STATS_INC(bp->b_mount, xb_create); in _xfs_buf_alloc()
266 trace_xfs_buf_init(bp, _RET_IP_); in _xfs_buf_alloc()
268 *bpp = bp; in _xfs_buf_alloc()
274 struct xfs_buf *bp) in xfs_buf_free_pages() argument
278 ASSERT(bp->b_flags & _XBF_PAGES); in xfs_buf_free_pages()
280 if (xfs_buf_is_vmapped(bp)) in xfs_buf_free_pages()
281 vm_unmap_ram(bp->b_addr, bp->b_page_count); in xfs_buf_free_pages()
283 for (i = 0; i < bp->b_page_count; i++) { in xfs_buf_free_pages()
284 if (bp->b_pages[i]) in xfs_buf_free_pages()
285 __free_page(bp->b_pages[i]); in xfs_buf_free_pages()
288 current->reclaim_state->reclaimed_slab += bp->b_page_count; in xfs_buf_free_pages()
290 if (bp->b_pages != bp->b_page_array) in xfs_buf_free_pages()
291 kmem_free(bp->b_pages); in xfs_buf_free_pages()
292 bp->b_pages = NULL; in xfs_buf_free_pages()
293 bp->b_flags &= ~_XBF_PAGES; in xfs_buf_free_pages()
298 struct xfs_buf *bp) in xfs_buf_free() argument
300 trace_xfs_buf_free(bp, _RET_IP_); in xfs_buf_free()
302 ASSERT(list_empty(&bp->b_lru)); in xfs_buf_free()
304 if (bp->b_flags & _XBF_PAGES) in xfs_buf_free()
305 xfs_buf_free_pages(bp); in xfs_buf_free()
306 else if (bp->b_flags & _XBF_KMEM) in xfs_buf_free()
307 kmem_free(bp->b_addr); in xfs_buf_free()
309 xfs_buf_free_maps(bp); in xfs_buf_free()
310 kmem_cache_free(xfs_buf_zone, bp); in xfs_buf_free()
315 struct xfs_buf *bp, in xfs_buf_alloc_kmem() argument
319 size_t size = BBTOB(bp->b_length); in xfs_buf_alloc_kmem()
325 bp->b_addr = kmem_alloc(size, kmflag_mask); in xfs_buf_alloc_kmem()
326 if (!bp->b_addr) in xfs_buf_alloc_kmem()
329 if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) != in xfs_buf_alloc_kmem()
330 ((unsigned long)bp->b_addr & PAGE_MASK)) { in xfs_buf_alloc_kmem()
332 kmem_free(bp->b_addr); in xfs_buf_alloc_kmem()
333 bp->b_addr = NULL; in xfs_buf_alloc_kmem()
336 bp->b_offset = offset_in_page(bp->b_addr); in xfs_buf_alloc_kmem()
337 bp->b_pages = bp->b_page_array; in xfs_buf_alloc_kmem()
338 bp->b_pages[0] = kmem_to_page(bp->b_addr); in xfs_buf_alloc_kmem()
339 bp->b_page_count = 1; in xfs_buf_alloc_kmem()
340 bp->b_flags |= _XBF_KMEM; in xfs_buf_alloc_kmem()
346 struct xfs_buf *bp, in xfs_buf_alloc_pages() argument
358 bp->b_page_count = DIV_ROUND_UP(BBTOB(bp->b_length), PAGE_SIZE); in xfs_buf_alloc_pages()
359 if (bp->b_page_count <= XB_PAGES) { in xfs_buf_alloc_pages()
360 bp->b_pages = bp->b_page_array; in xfs_buf_alloc_pages()
362 bp->b_pages = kzalloc(sizeof(struct page *) * bp->b_page_count, in xfs_buf_alloc_pages()
364 if (!bp->b_pages) in xfs_buf_alloc_pages()
367 bp->b_flags |= _XBF_PAGES; in xfs_buf_alloc_pages()
381 filled = alloc_pages_bulk_array(gfp_mask, bp->b_page_count, in xfs_buf_alloc_pages()
382 bp->b_pages); in xfs_buf_alloc_pages()
383 if (filled == bp->b_page_count) { in xfs_buf_alloc_pages()
384 XFS_STATS_INC(bp->b_mount, xb_page_found); in xfs_buf_alloc_pages()
392 xfs_buf_free_pages(bp); in xfs_buf_alloc_pages()
396 XFS_STATS_INC(bp->b_mount, xb_page_retries); in xfs_buf_alloc_pages()
407 struct xfs_buf *bp, in _xfs_buf_map_pages() argument
410 ASSERT(bp->b_flags & _XBF_PAGES); in _xfs_buf_map_pages()
411 if (bp->b_page_count == 1) { in _xfs_buf_map_pages()
413 bp->b_addr = page_address(bp->b_pages[0]); in _xfs_buf_map_pages()
415 bp->b_addr = NULL; in _xfs_buf_map_pages()
430 bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count, in _xfs_buf_map_pages()
432 if (bp->b_addr) in _xfs_buf_map_pages()
438 if (!bp->b_addr) in _xfs_buf_map_pages()
454 const struct xfs_buf *bp = obj; in _xfs_buf_obj_cmp() local
462 if (bp->b_rhash_key != map->bm_bn) in _xfs_buf_obj_cmp()
465 if (unlikely(bp->b_length != map->bm_len)) { in _xfs_buf_obj_cmp()
474 ASSERT(bp->b_flags & XBF_STALE); in _xfs_buf_obj_cmp()
533 struct xfs_buf *bp; in xfs_buf_find() local
564 bp = rhashtable_lookup_fast(&pag->pag_buf_hash, &cmap, in xfs_buf_find()
566 if (bp) { in xfs_buf_find()
567 atomic_inc(&bp->b_hold); in xfs_buf_find()
591 if (!xfs_buf_trylock(bp)) { in xfs_buf_find()
593 xfs_buf_rele(bp); in xfs_buf_find()
597 xfs_buf_lock(bp); in xfs_buf_find()
606 if (bp->b_flags & XBF_STALE) { in xfs_buf_find()
607 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0); in xfs_buf_find()
608 bp->b_flags &= _XBF_KMEM | _XBF_PAGES; in xfs_buf_find()
609 bp->b_ops = NULL; in xfs_buf_find()
612 trace_xfs_buf_find(bp, flags, _RET_IP_); in xfs_buf_find()
614 *found_bp = bp; in xfs_buf_find()
625 struct xfs_buf *bp; in xfs_buf_incore() local
629 error = xfs_buf_find(target, &map, 1, flags, NULL, &bp); in xfs_buf_incore()
632 return bp; in xfs_buf_incore()
648 struct xfs_buf *bp; in xfs_buf_get_map() local
653 error = xfs_buf_find(target, map, nmaps, flags, NULL, &bp); in xfs_buf_get_map()
676 error = xfs_buf_find(target, map, nmaps, flags, new_bp, &bp); in xfs_buf_get_map()
680 if (bp != new_bp) in xfs_buf_get_map()
684 if (!bp->b_addr) { in xfs_buf_get_map()
685 error = _xfs_buf_map_pages(bp, flags); in xfs_buf_get_map()
689 bp->b_page_count); in xfs_buf_get_map()
690 xfs_buf_relse(bp); in xfs_buf_get_map()
700 xfs_buf_ioerror(bp, 0); in xfs_buf_get_map()
703 trace_xfs_buf_get(bp, flags, _RET_IP_); in xfs_buf_get_map()
704 *bpp = bp; in xfs_buf_get_map()
713 struct xfs_buf *bp, in _xfs_buf_read() argument
717 ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL); in _xfs_buf_read()
719 bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD | XBF_DONE); in _xfs_buf_read()
720 bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD); in _xfs_buf_read()
722 return xfs_buf_submit(bp); in _xfs_buf_read()
744 struct xfs_buf *bp, in xfs_buf_reverify() argument
747 ASSERT(bp->b_flags & XBF_DONE); in xfs_buf_reverify()
748 ASSERT(bp->b_error == 0); in xfs_buf_reverify()
750 if (!ops || bp->b_ops) in xfs_buf_reverify()
753 bp->b_ops = ops; in xfs_buf_reverify()
754 bp->b_ops->verify_read(bp); in xfs_buf_reverify()
755 if (bp->b_error) in xfs_buf_reverify()
756 bp->b_flags &= ~XBF_DONE; in xfs_buf_reverify()
757 return bp->b_error; in xfs_buf_reverify()
770 struct xfs_buf *bp; in xfs_buf_read_map() local
776 error = xfs_buf_get_map(target, map, nmaps, flags, &bp); in xfs_buf_read_map()
780 trace_xfs_buf_read(bp, flags, _RET_IP_); in xfs_buf_read_map()
782 if (!(bp->b_flags & XBF_DONE)) { in xfs_buf_read_map()
785 bp->b_ops = ops; in xfs_buf_read_map()
786 error = _xfs_buf_read(bp, flags); in xfs_buf_read_map()
793 error = xfs_buf_reverify(bp, ops); in xfs_buf_read_map()
797 xfs_buf_relse(bp); in xfs_buf_read_map()
802 bp->b_flags &= ~XBF_READ; in xfs_buf_read_map()
803 ASSERT(bp->b_ops != NULL || ops == NULL); in xfs_buf_read_map()
817 xfs_buf_ioerror_alert(bp, fa); in xfs_buf_read_map()
819 bp->b_flags &= ~XBF_DONE; in xfs_buf_read_map()
820 xfs_buf_stale(bp); in xfs_buf_read_map()
821 xfs_buf_relse(bp); in xfs_buf_read_map()
829 *bpp = bp; in xfs_buf_read_map()
844 struct xfs_buf *bp; in xfs_buf_readahead_map() local
850 XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD, &bp, ops, in xfs_buf_readahead_map()
869 struct xfs_buf *bp; in xfs_buf_read_uncached() local
874 error = xfs_buf_get_uncached(target, numblks, flags, &bp); in xfs_buf_read_uncached()
879 ASSERT(bp->b_map_count == 1); in xfs_buf_read_uncached()
880 bp->b_rhash_key = XFS_BUF_DADDR_NULL; in xfs_buf_read_uncached()
881 bp->b_maps[0].bm_bn = daddr; in xfs_buf_read_uncached()
882 bp->b_flags |= XBF_READ; in xfs_buf_read_uncached()
883 bp->b_ops = ops; in xfs_buf_read_uncached()
885 xfs_buf_submit(bp); in xfs_buf_read_uncached()
886 if (bp->b_error) { in xfs_buf_read_uncached()
887 error = bp->b_error; in xfs_buf_read_uncached()
888 xfs_buf_relse(bp); in xfs_buf_read_uncached()
892 *bpp = bp; in xfs_buf_read_uncached()
904 struct xfs_buf *bp; in xfs_buf_get_uncached() local
910 error = _xfs_buf_alloc(target, &map, 1, flags & XBF_NO_IOACCT, &bp); in xfs_buf_get_uncached()
914 error = xfs_buf_alloc_pages(bp, flags); in xfs_buf_get_uncached()
918 error = _xfs_buf_map_pages(bp, 0); in xfs_buf_get_uncached()
925 trace_xfs_buf_get_uncached(bp, _RET_IP_); in xfs_buf_get_uncached()
926 *bpp = bp; in xfs_buf_get_uncached()
930 xfs_buf_free(bp); in xfs_buf_get_uncached()
941 struct xfs_buf *bp) in xfs_buf_hold() argument
943 trace_xfs_buf_hold(bp, _RET_IP_); in xfs_buf_hold()
944 atomic_inc(&bp->b_hold); in xfs_buf_hold()
953 struct xfs_buf *bp) in xfs_buf_rele() argument
955 struct xfs_perag *pag = bp->b_pag; in xfs_buf_rele()
959 trace_xfs_buf_rele(bp, _RET_IP_); in xfs_buf_rele()
962 ASSERT(list_empty(&bp->b_lru)); in xfs_buf_rele()
963 if (atomic_dec_and_test(&bp->b_hold)) { in xfs_buf_rele()
964 xfs_buf_ioacct_dec(bp); in xfs_buf_rele()
965 xfs_buf_free(bp); in xfs_buf_rele()
970 ASSERT(atomic_read(&bp->b_hold) > 0); in xfs_buf_rele()
982 spin_lock(&bp->b_lock); in xfs_buf_rele()
983 release = atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock); in xfs_buf_rele()
991 if ((atomic_read(&bp->b_hold) == 1) && !list_empty(&bp->b_lru)) in xfs_buf_rele()
992 __xfs_buf_ioacct_dec(bp); in xfs_buf_rele()
997 __xfs_buf_ioacct_dec(bp); in xfs_buf_rele()
998 if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) { in xfs_buf_rele()
1004 if (list_lru_add(&bp->b_target->bt_lru, &bp->b_lru)) { in xfs_buf_rele()
1005 bp->b_state &= ~XFS_BSTATE_DISPOSE; in xfs_buf_rele()
1006 atomic_inc(&bp->b_hold); in xfs_buf_rele()
1016 if (!(bp->b_state & XFS_BSTATE_DISPOSE)) { in xfs_buf_rele()
1017 list_lru_del(&bp->b_target->bt_lru, &bp->b_lru); in xfs_buf_rele()
1019 ASSERT(list_empty(&bp->b_lru)); in xfs_buf_rele()
1022 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q)); in xfs_buf_rele()
1023 rhashtable_remove_fast(&pag->pag_buf_hash, &bp->b_rhash_head, in xfs_buf_rele()
1031 spin_unlock(&bp->b_lock); in xfs_buf_rele()
1034 xfs_buf_free(bp); in xfs_buf_rele()
1051 struct xfs_buf *bp) in xfs_buf_trylock() argument
1055 locked = down_trylock(&bp->b_sema) == 0; in xfs_buf_trylock()
1057 trace_xfs_buf_trylock(bp, _RET_IP_); in xfs_buf_trylock()
1059 trace_xfs_buf_trylock_fail(bp, _RET_IP_); in xfs_buf_trylock()
1074 struct xfs_buf *bp) in xfs_buf_lock() argument
1076 trace_xfs_buf_lock(bp, _RET_IP_); in xfs_buf_lock()
1078 if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE)) in xfs_buf_lock()
1079 xfs_log_force(bp->b_mount, 0); in xfs_buf_lock()
1080 down(&bp->b_sema); in xfs_buf_lock()
1082 trace_xfs_buf_lock_done(bp, _RET_IP_); in xfs_buf_lock()
1087 struct xfs_buf *bp) in xfs_buf_unlock() argument
1089 ASSERT(xfs_buf_islocked(bp)); in xfs_buf_unlock()
1091 up(&bp->b_sema); in xfs_buf_unlock()
1092 trace_xfs_buf_unlock(bp, _RET_IP_); in xfs_buf_unlock()
1097 struct xfs_buf *bp) in xfs_buf_wait_unpin() argument
1101 if (atomic_read(&bp->b_pin_count) == 0) in xfs_buf_wait_unpin()
1104 add_wait_queue(&bp->b_waiters, &wait); in xfs_buf_wait_unpin()
1107 if (atomic_read(&bp->b_pin_count) == 0) in xfs_buf_wait_unpin()
1111 remove_wait_queue(&bp->b_waiters, &wait); in xfs_buf_wait_unpin()
1117 struct xfs_buf *bp) in xfs_buf_ioerror_alert_ratelimited() argument
1122 if (bp->b_target != lasttarg || in xfs_buf_ioerror_alert_ratelimited()
1125 xfs_buf_ioerror_alert(bp, __this_address); in xfs_buf_ioerror_alert_ratelimited()
1127 lasttarg = bp->b_target; in xfs_buf_ioerror_alert_ratelimited()
1136 struct xfs_buf *bp, in xfs_buf_ioerror_permanent() argument
1139 struct xfs_mount *mp = bp->b_mount; in xfs_buf_ioerror_permanent()
1142 ++bp->b_retries > cfg->max_retries) in xfs_buf_ioerror_permanent()
1145 time_after(jiffies, cfg->retry_timeout + bp->b_first_retry_time)) in xfs_buf_ioerror_permanent()
1174 struct xfs_buf *bp) in xfs_buf_ioend_handle_error() argument
1176 struct xfs_mount *mp = bp->b_mount; in xfs_buf_ioend_handle_error()
1186 xfs_buf_ioerror_alert_ratelimited(bp); in xfs_buf_ioend_handle_error()
1192 if (bp->b_flags & _XBF_LOGRECOVERY) { in xfs_buf_ioend_handle_error()
1200 if (!(bp->b_flags & XBF_ASYNC)) in xfs_buf_ioend_handle_error()
1203 trace_xfs_buf_iodone_async(bp, _RET_IP_); in xfs_buf_ioend_handle_error()
1205 cfg = xfs_error_get_cfg(mp, XFS_ERR_METADATA, bp->b_error); in xfs_buf_ioend_handle_error()
1206 if (bp->b_last_error != bp->b_error || in xfs_buf_ioend_handle_error()
1207 !(bp->b_flags & (XBF_STALE | XBF_WRITE_FAIL))) { in xfs_buf_ioend_handle_error()
1208 bp->b_last_error = bp->b_error; in xfs_buf_ioend_handle_error()
1210 !bp->b_first_retry_time) in xfs_buf_ioend_handle_error()
1211 bp->b_first_retry_time = jiffies; in xfs_buf_ioend_handle_error()
1219 if (xfs_buf_ioerror_permanent(bp, cfg)) { in xfs_buf_ioend_handle_error()
1225 if (bp->b_flags & _XBF_INODES) in xfs_buf_ioend_handle_error()
1226 xfs_buf_inode_io_fail(bp); in xfs_buf_ioend_handle_error()
1227 else if (bp->b_flags & _XBF_DQUOTS) in xfs_buf_ioend_handle_error()
1228 xfs_buf_dquot_io_fail(bp); in xfs_buf_ioend_handle_error()
1230 ASSERT(list_empty(&bp->b_li_list)); in xfs_buf_ioend_handle_error()
1231 xfs_buf_ioerror(bp, 0); in xfs_buf_ioend_handle_error()
1232 xfs_buf_relse(bp); in xfs_buf_ioend_handle_error()
1236 xfs_buf_ioerror(bp, 0); in xfs_buf_ioend_handle_error()
1237 bp->b_flags |= (XBF_DONE | XBF_WRITE_FAIL); in xfs_buf_ioend_handle_error()
1238 xfs_buf_submit(bp); in xfs_buf_ioend_handle_error()
1241 xfs_buf_stale(bp); in xfs_buf_ioend_handle_error()
1242 bp->b_flags |= XBF_DONE; in xfs_buf_ioend_handle_error()
1243 bp->b_flags &= ~XBF_WRITE; in xfs_buf_ioend_handle_error()
1244 trace_xfs_buf_error_relse(bp, _RET_IP_); in xfs_buf_ioend_handle_error()
1250 struct xfs_buf *bp) in xfs_buf_ioend() argument
1252 trace_xfs_buf_iodone(bp, _RET_IP_); in xfs_buf_ioend()
1258 if (!bp->b_error && bp->b_io_error) in xfs_buf_ioend()
1259 xfs_buf_ioerror(bp, bp->b_io_error); in xfs_buf_ioend()
1261 if (bp->b_flags & XBF_READ) { in xfs_buf_ioend()
1262 if (!bp->b_error && bp->b_ops) in xfs_buf_ioend()
1263 bp->b_ops->verify_read(bp); in xfs_buf_ioend()
1264 if (!bp->b_error) in xfs_buf_ioend()
1265 bp->b_flags |= XBF_DONE; in xfs_buf_ioend()
1267 if (!bp->b_error) { in xfs_buf_ioend()
1268 bp->b_flags &= ~XBF_WRITE_FAIL; in xfs_buf_ioend()
1269 bp->b_flags |= XBF_DONE; in xfs_buf_ioend()
1272 if (unlikely(bp->b_error) && xfs_buf_ioend_handle_error(bp)) in xfs_buf_ioend()
1276 bp->b_last_error = 0; in xfs_buf_ioend()
1277 bp->b_retries = 0; in xfs_buf_ioend()
1278 bp->b_first_retry_time = 0; in xfs_buf_ioend()
1285 if (bp->b_log_item) in xfs_buf_ioend()
1286 xfs_buf_item_done(bp); in xfs_buf_ioend()
1288 if (bp->b_flags & _XBF_INODES) in xfs_buf_ioend()
1289 xfs_buf_inode_iodone(bp); in xfs_buf_ioend()
1290 else if (bp->b_flags & _XBF_DQUOTS) in xfs_buf_ioend()
1291 xfs_buf_dquot_iodone(bp); in xfs_buf_ioend()
1295 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD | in xfs_buf_ioend()
1298 if (bp->b_flags & XBF_ASYNC) in xfs_buf_ioend()
1299 xfs_buf_relse(bp); in xfs_buf_ioend()
1301 complete(&bp->b_iowait); in xfs_buf_ioend()
1308 struct xfs_buf *bp = in xfs_buf_ioend_work() local
1311 xfs_buf_ioend(bp); in xfs_buf_ioend_work()
1316 struct xfs_buf *bp) in xfs_buf_ioend_async() argument
1318 INIT_WORK(&bp->b_ioend_work, xfs_buf_ioend_work); in xfs_buf_ioend_async()
1319 queue_work(bp->b_mount->m_buf_workqueue, &bp->b_ioend_work); in xfs_buf_ioend_async()
1324 struct xfs_buf *bp, in __xfs_buf_ioerror() argument
1329 bp->b_error = error; in __xfs_buf_ioerror()
1330 trace_xfs_buf_ioerror(bp, error, failaddr); in __xfs_buf_ioerror()
1335 struct xfs_buf *bp, in xfs_buf_ioerror_alert() argument
1338 xfs_buf_alert_ratelimited(bp, "XFS: metadata IO error", in xfs_buf_ioerror_alert()
1340 func, (uint64_t)xfs_buf_daddr(bp), in xfs_buf_ioerror_alert()
1341 bp->b_length, -bp->b_error); in xfs_buf_ioerror_alert()
1352 struct xfs_buf *bp) in xfs_buf_ioend_fail() argument
1354 bp->b_flags &= ~XBF_DONE; in xfs_buf_ioend_fail()
1355 xfs_buf_stale(bp); in xfs_buf_ioend_fail()
1356 xfs_buf_ioerror(bp, -EIO); in xfs_buf_ioend_fail()
1357 xfs_buf_ioend(bp); in xfs_buf_ioend_fail()
1362 struct xfs_buf *bp) in xfs_bwrite() argument
1366 ASSERT(xfs_buf_islocked(bp)); in xfs_bwrite()
1368 bp->b_flags |= XBF_WRITE; in xfs_bwrite()
1369 bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q | in xfs_bwrite()
1372 error = xfs_buf_submit(bp); in xfs_bwrite()
1374 xfs_force_shutdown(bp->b_mount, SHUTDOWN_META_IO_ERROR); in xfs_bwrite()
1382 struct xfs_buf *bp = (struct xfs_buf *)bio->bi_private; in xfs_buf_bio_end_io() local
1385 (bp->b_flags & XBF_WRITE) && (bp->b_flags & XBF_ASYNC) && in xfs_buf_bio_end_io()
1386 XFS_TEST_ERROR(false, bp->b_mount, XFS_ERRTAG_BUF_IOERROR)) in xfs_buf_bio_end_io()
1396 cmpxchg(&bp->b_io_error, 0, error); in xfs_buf_bio_end_io()
1399 if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ)) in xfs_buf_bio_end_io()
1400 invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp)); in xfs_buf_bio_end_io()
1402 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) in xfs_buf_bio_end_io()
1403 xfs_buf_ioend_async(bp); in xfs_buf_bio_end_io()
1409 struct xfs_buf *bp, in xfs_buf_ioapply_map() argument
1416 unsigned int total_nr_pages = bp->b_page_count; in xfs_buf_ioapply_map()
1419 sector_t sector = bp->b_maps[map].bm_bn; in xfs_buf_ioapply_map()
1435 size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count); in xfs_buf_ioapply_map()
1440 atomic_inc(&bp->b_io_remaining); in xfs_buf_ioapply_map()
1444 bio_set_dev(bio, bp->b_target->bt_bdev); in xfs_buf_ioapply_map()
1447 bio->bi_private = bp; in xfs_buf_ioapply_map()
1456 rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes, in xfs_buf_ioapply_map()
1468 if (xfs_buf_is_vmapped(bp)) { in xfs_buf_ioapply_map()
1469 flush_kernel_vmap_range(bp->b_addr, in xfs_buf_ioapply_map()
1470 xfs_buf_vmap_len(bp)); in xfs_buf_ioapply_map()
1480 atomic_dec(&bp->b_io_remaining); in xfs_buf_ioapply_map()
1481 xfs_buf_ioerror(bp, -EIO); in xfs_buf_ioapply_map()
1489 struct xfs_buf *bp) in _xfs_buf_ioapply() argument
1501 bp->b_error = 0; in _xfs_buf_ioapply()
1503 if (bp->b_flags & XBF_WRITE) { in _xfs_buf_ioapply()
1511 if (bp->b_ops) { in _xfs_buf_ioapply()
1512 bp->b_ops->verify_write(bp); in _xfs_buf_ioapply()
1513 if (bp->b_error) { in _xfs_buf_ioapply()
1514 xfs_force_shutdown(bp->b_mount, in _xfs_buf_ioapply()
1518 } else if (bp->b_rhash_key != XFS_BUF_DADDR_NULL) { in _xfs_buf_ioapply()
1519 struct xfs_mount *mp = bp->b_mount; in _xfs_buf_ioapply()
1528 __func__, xfs_buf_daddr(bp), in _xfs_buf_ioapply()
1529 bp->b_length); in _xfs_buf_ioapply()
1530 xfs_hex_dump(bp->b_addr, in _xfs_buf_ioapply()
1537 if (bp->b_flags & XBF_READ_AHEAD) in _xfs_buf_ioapply()
1550 offset = bp->b_offset; in _xfs_buf_ioapply()
1551 size = BBTOB(bp->b_length); in _xfs_buf_ioapply()
1553 for (i = 0; i < bp->b_map_count; i++) { in _xfs_buf_ioapply()
1554 xfs_buf_ioapply_map(bp, i, &offset, &size, op); in _xfs_buf_ioapply()
1555 if (bp->b_error) in _xfs_buf_ioapply()
1568 struct xfs_buf *bp) in xfs_buf_iowait() argument
1570 ASSERT(!(bp->b_flags & XBF_ASYNC)); in xfs_buf_iowait()
1572 trace_xfs_buf_iowait(bp, _RET_IP_); in xfs_buf_iowait()
1573 wait_for_completion(&bp->b_iowait); in xfs_buf_iowait()
1574 trace_xfs_buf_iowait_done(bp, _RET_IP_); in xfs_buf_iowait()
1576 return bp->b_error; in xfs_buf_iowait()
1587 struct xfs_buf *bp, in __xfs_buf_submit() argument
1592 trace_xfs_buf_submit(bp, _RET_IP_); in __xfs_buf_submit()
1594 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q)); in __xfs_buf_submit()
1597 if (xfs_is_shutdown(bp->b_mount)) { in __xfs_buf_submit()
1598 xfs_buf_ioend_fail(bp); in __xfs_buf_submit()
1607 xfs_buf_hold(bp); in __xfs_buf_submit()
1609 if (bp->b_flags & XBF_WRITE) in __xfs_buf_submit()
1610 xfs_buf_wait_unpin(bp); in __xfs_buf_submit()
1613 bp->b_io_error = 0; in __xfs_buf_submit()
1620 atomic_set(&bp->b_io_remaining, 1); in __xfs_buf_submit()
1621 if (bp->b_flags & XBF_ASYNC) in __xfs_buf_submit()
1622 xfs_buf_ioacct_inc(bp); in __xfs_buf_submit()
1623 _xfs_buf_ioapply(bp); in __xfs_buf_submit()
1630 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) { in __xfs_buf_submit()
1631 if (bp->b_error || !(bp->b_flags & XBF_ASYNC)) in __xfs_buf_submit()
1632 xfs_buf_ioend(bp); in __xfs_buf_submit()
1634 xfs_buf_ioend_async(bp); in __xfs_buf_submit()
1638 error = xfs_buf_iowait(bp); in __xfs_buf_submit()
1645 xfs_buf_rele(bp); in __xfs_buf_submit()
1651 struct xfs_buf *bp, in xfs_buf_offset() argument
1656 if (bp->b_addr) in xfs_buf_offset()
1657 return bp->b_addr + offset; in xfs_buf_offset()
1659 page = bp->b_pages[offset >> PAGE_SHIFT]; in xfs_buf_offset()
1665 struct xfs_buf *bp, in xfs_buf_zero() argument
1676 page_index = (boff + bp->b_offset) >> PAGE_SHIFT; in xfs_buf_zero()
1677 page_offset = (boff + bp->b_offset) & ~PAGE_MASK; in xfs_buf_zero()
1678 page = bp->b_pages[page_index]; in xfs_buf_zero()
1680 BBTOB(bp->b_length) - boff); in xfs_buf_zero()
1703 struct xfs_buf *bp, in __xfs_buf_mark_corrupt() argument
1706 ASSERT(bp->b_flags & XBF_DONE); in __xfs_buf_mark_corrupt()
1708 xfs_buf_corruption_error(bp, fa); in __xfs_buf_mark_corrupt()
1709 xfs_buf_stale(bp); in __xfs_buf_mark_corrupt()
1729 struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru); in xfs_buftarg_drain_rele() local
1732 if (atomic_read(&bp->b_hold) > 1) { in xfs_buftarg_drain_rele()
1734 trace_xfs_buf_drain_buftarg(bp, _RET_IP_); in xfs_buftarg_drain_rele()
1737 if (!spin_trylock(&bp->b_lock)) in xfs_buftarg_drain_rele()
1744 atomic_set(&bp->b_lru_ref, 0); in xfs_buftarg_drain_rele()
1745 bp->b_state |= XFS_BSTATE_DISPOSE; in xfs_buftarg_drain_rele()
1747 spin_unlock(&bp->b_lock); in xfs_buftarg_drain_rele()
1791 struct xfs_buf *bp; in xfs_buftarg_drain() local
1792 bp = list_first_entry(&dispose, struct xfs_buf, b_lru); in xfs_buftarg_drain()
1793 list_del_init(&bp->b_lru); in xfs_buftarg_drain()
1794 if (bp->b_flags & XBF_WRITE_FAIL) { in xfs_buftarg_drain()
1796 xfs_buf_alert_ratelimited(bp, in xfs_buftarg_drain()
1799 (long long)xfs_buf_daddr(bp)); in xfs_buftarg_drain()
1801 xfs_buf_rele(bp); in xfs_buftarg_drain()
1827 struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru); in xfs_buftarg_isolate() local
1834 if (!spin_trylock(&bp->b_lock)) in xfs_buftarg_isolate()
1841 if (atomic_add_unless(&bp->b_lru_ref, -1, 0)) { in xfs_buftarg_isolate()
1842 spin_unlock(&bp->b_lock); in xfs_buftarg_isolate()
1846 bp->b_state |= XFS_BSTATE_DISPOSE; in xfs_buftarg_isolate()
1848 spin_unlock(&bp->b_lock); in xfs_buftarg_isolate()
1866 struct xfs_buf *bp; in xfs_buftarg_shrink_scan() local
1867 bp = list_first_entry(&dispose, struct xfs_buf, b_lru); in xfs_buftarg_shrink_scan()
1868 list_del_init(&bp->b_lru); in xfs_buftarg_shrink_scan()
1869 xfs_buf_rele(bp); in xfs_buftarg_shrink_scan()
1993 struct xfs_buf *bp; in xfs_buf_delwri_cancel() local
1996 bp = list_first_entry(list, struct xfs_buf, b_list); in xfs_buf_delwri_cancel()
1998 xfs_buf_lock(bp); in xfs_buf_delwri_cancel()
1999 bp->b_flags &= ~_XBF_DELWRI_Q; in xfs_buf_delwri_cancel()
2000 list_del_init(&bp->b_list); in xfs_buf_delwri_cancel()
2001 xfs_buf_relse(bp); in xfs_buf_delwri_cancel()
2018 struct xfs_buf *bp, in xfs_buf_delwri_queue() argument
2021 ASSERT(xfs_buf_islocked(bp)); in xfs_buf_delwri_queue()
2022 ASSERT(!(bp->b_flags & XBF_READ)); in xfs_buf_delwri_queue()
2029 if (bp->b_flags & _XBF_DELWRI_Q) { in xfs_buf_delwri_queue()
2030 trace_xfs_buf_delwri_queued(bp, _RET_IP_); in xfs_buf_delwri_queue()
2034 trace_xfs_buf_delwri_queue(bp, _RET_IP_); in xfs_buf_delwri_queue()
2044 bp->b_flags |= _XBF_DELWRI_Q; in xfs_buf_delwri_queue()
2045 if (list_empty(&bp->b_list)) { in xfs_buf_delwri_queue()
2046 atomic_inc(&bp->b_hold); in xfs_buf_delwri_queue()
2047 list_add_tail(&bp->b_list, list); in xfs_buf_delwri_queue()
2065 struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list); in xfs_buf_cmp() local
2068 diff = ap->b_maps[0].bm_bn - bp->b_maps[0].bm_bn; in xfs_buf_cmp()
2088 struct xfs_buf *bp, *n; in xfs_buf_delwri_submit_buffers() local
2095 list_for_each_entry_safe(bp, n, buffer_list, b_list) { in xfs_buf_delwri_submit_buffers()
2097 if (xfs_buf_ispinned(bp)) { in xfs_buf_delwri_submit_buffers()
2101 if (!xfs_buf_trylock(bp)) in xfs_buf_delwri_submit_buffers()
2104 xfs_buf_lock(bp); in xfs_buf_delwri_submit_buffers()
2113 if (!(bp->b_flags & _XBF_DELWRI_Q)) { in xfs_buf_delwri_submit_buffers()
2114 list_del_init(&bp->b_list); in xfs_buf_delwri_submit_buffers()
2115 xfs_buf_relse(bp); in xfs_buf_delwri_submit_buffers()
2119 trace_xfs_buf_delwri_split(bp, _RET_IP_); in xfs_buf_delwri_submit_buffers()
2127 bp->b_flags &= ~_XBF_DELWRI_Q; in xfs_buf_delwri_submit_buffers()
2128 bp->b_flags |= XBF_WRITE; in xfs_buf_delwri_submit_buffers()
2130 bp->b_flags &= ~XBF_ASYNC; in xfs_buf_delwri_submit_buffers()
2131 list_move_tail(&bp->b_list, wait_list); in xfs_buf_delwri_submit_buffers()
2133 bp->b_flags |= XBF_ASYNC; in xfs_buf_delwri_submit_buffers()
2134 list_del_init(&bp->b_list); in xfs_buf_delwri_submit_buffers()
2136 __xfs_buf_submit(bp, false); in xfs_buf_delwri_submit_buffers()
2180 struct xfs_buf *bp; in xfs_buf_delwri_submit() local
2186 bp = list_first_entry(&wait_list, struct xfs_buf, b_list); in xfs_buf_delwri_submit()
2188 list_del_init(&bp->b_list); in xfs_buf_delwri_submit()
2194 error2 = xfs_buf_iowait(bp); in xfs_buf_delwri_submit()
2195 xfs_buf_relse(bp); in xfs_buf_delwri_submit()
2220 struct xfs_buf *bp, in xfs_buf_delwri_pushbuf() argument
2226 ASSERT(bp->b_flags & _XBF_DELWRI_Q); in xfs_buf_delwri_pushbuf()
2228 trace_xfs_buf_delwri_pushbuf(bp, _RET_IP_); in xfs_buf_delwri_pushbuf()
2234 xfs_buf_lock(bp); in xfs_buf_delwri_pushbuf()
2235 list_move(&bp->b_list, &submit_list); in xfs_buf_delwri_pushbuf()
2236 xfs_buf_unlock(bp); in xfs_buf_delwri_pushbuf()
2251 error = xfs_buf_iowait(bp); in xfs_buf_delwri_pushbuf()
2252 bp->b_flags |= _XBF_DELWRI_Q; in xfs_buf_delwri_pushbuf()
2253 xfs_buf_unlock(bp); in xfs_buf_delwri_pushbuf()
2281 void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref) in xfs_buf_set_ref() argument
2288 if (XFS_TEST_ERROR(false, bp->b_mount, XFS_ERRTAG_BUF_LRU_REF)) in xfs_buf_set_ref()
2291 atomic_set(&bp->b_lru_ref, lru_ref); in xfs_buf_set_ref()
2301 struct xfs_buf *bp, in xfs_verify_magic() argument
2304 struct xfs_mount *mp = bp->b_mount; in xfs_verify_magic()
2308 if (WARN_ON(!bp->b_ops || !bp->b_ops->magic[idx])) in xfs_verify_magic()
2310 return dmagic == bp->b_ops->magic[idx]; in xfs_verify_magic()
2319 struct xfs_buf *bp, in xfs_verify_magic16() argument
2322 struct xfs_mount *mp = bp->b_mount; in xfs_verify_magic16()
2326 if (WARN_ON(!bp->b_ops || !bp->b_ops->magic16[idx])) in xfs_verify_magic16()
2328 return dmagic == bp->b_ops->magic16[idx]; in xfs_verify_magic16()