• Home
  • Raw
  • Download

Lines Matching refs:bp

55 static int __xfs_buf_submit(struct xfs_buf *bp, bool wait);
59 struct xfs_buf *bp) in xfs_buf_submit() argument
61 return __xfs_buf_submit(bp, !(bp->b_flags & XBF_ASYNC)); in xfs_buf_submit()
66 struct xfs_buf *bp) in xfs_buf_is_vmapped() argument
75 return bp->b_addr && bp->b_page_count > 1; in xfs_buf_is_vmapped()
80 struct xfs_buf *bp) in xfs_buf_vmap_len() argument
82 return (bp->b_page_count * PAGE_SIZE) - bp->b_offset; in xfs_buf_vmap_len()
100 struct xfs_buf *bp) in xfs_buf_ioacct_inc() argument
102 if (bp->b_flags & XBF_NO_IOACCT) in xfs_buf_ioacct_inc()
105 ASSERT(bp->b_flags & XBF_ASYNC); in xfs_buf_ioacct_inc()
106 spin_lock(&bp->b_lock); in xfs_buf_ioacct_inc()
107 if (!(bp->b_state & XFS_BSTATE_IN_FLIGHT)) { in xfs_buf_ioacct_inc()
108 bp->b_state |= XFS_BSTATE_IN_FLIGHT; in xfs_buf_ioacct_inc()
109 percpu_counter_inc(&bp->b_target->bt_io_count); in xfs_buf_ioacct_inc()
111 spin_unlock(&bp->b_lock); in xfs_buf_ioacct_inc()
120 struct xfs_buf *bp) in __xfs_buf_ioacct_dec() argument
122 lockdep_assert_held(&bp->b_lock); in __xfs_buf_ioacct_dec()
124 if (bp->b_state & XFS_BSTATE_IN_FLIGHT) { in __xfs_buf_ioacct_dec()
125 bp->b_state &= ~XFS_BSTATE_IN_FLIGHT; in __xfs_buf_ioacct_dec()
126 percpu_counter_dec(&bp->b_target->bt_io_count); in __xfs_buf_ioacct_dec()
132 struct xfs_buf *bp) in xfs_buf_ioacct_dec() argument
134 spin_lock(&bp->b_lock); in xfs_buf_ioacct_dec()
135 __xfs_buf_ioacct_dec(bp); in xfs_buf_ioacct_dec()
136 spin_unlock(&bp->b_lock); in xfs_buf_ioacct_dec()
149 struct xfs_buf *bp) in xfs_buf_stale() argument
151 ASSERT(xfs_buf_islocked(bp)); in xfs_buf_stale()
153 bp->b_flags |= XBF_STALE; in xfs_buf_stale()
160 bp->b_flags &= ~_XBF_DELWRI_Q; in xfs_buf_stale()
168 spin_lock(&bp->b_lock); in xfs_buf_stale()
169 __xfs_buf_ioacct_dec(bp); in xfs_buf_stale()
171 atomic_set(&bp->b_lru_ref, 0); in xfs_buf_stale()
172 if (!(bp->b_state & XFS_BSTATE_DISPOSE) && in xfs_buf_stale()
173 (list_lru_del(&bp->b_target->bt_lru, &bp->b_lru))) in xfs_buf_stale()
174 atomic_dec(&bp->b_hold); in xfs_buf_stale()
176 ASSERT(atomic_read(&bp->b_hold) >= 1); in xfs_buf_stale()
177 spin_unlock(&bp->b_lock); in xfs_buf_stale()
182 struct xfs_buf *bp, in xfs_buf_get_maps() argument
185 ASSERT(bp->b_maps == NULL); in xfs_buf_get_maps()
186 bp->b_map_count = map_count; in xfs_buf_get_maps()
189 bp->b_maps = &bp->__b_map; in xfs_buf_get_maps()
193 bp->b_maps = kmem_zalloc(map_count * sizeof(struct xfs_buf_map), in xfs_buf_get_maps()
195 if (!bp->b_maps) in xfs_buf_get_maps()
205 struct xfs_buf *bp) in xfs_buf_free_maps() argument
207 if (bp->b_maps != &bp->__b_map) { in xfs_buf_free_maps()
208 kmem_free(bp->b_maps); in xfs_buf_free_maps()
209 bp->b_maps = NULL; in xfs_buf_free_maps()
221 struct xfs_buf *bp; in _xfs_buf_alloc() local
226 bp = kmem_cache_zalloc(xfs_buf_zone, GFP_NOFS | __GFP_NOFAIL); in _xfs_buf_alloc()
234 atomic_set(&bp->b_hold, 1); in _xfs_buf_alloc()
235 atomic_set(&bp->b_lru_ref, 1); in _xfs_buf_alloc()
236 init_completion(&bp->b_iowait); in _xfs_buf_alloc()
237 INIT_LIST_HEAD(&bp->b_lru); in _xfs_buf_alloc()
238 INIT_LIST_HEAD(&bp->b_list); in _xfs_buf_alloc()
239 INIT_LIST_HEAD(&bp->b_li_list); in _xfs_buf_alloc()
240 sema_init(&bp->b_sema, 0); /* held, no waiters */ in _xfs_buf_alloc()
241 spin_lock_init(&bp->b_lock); in _xfs_buf_alloc()
242 bp->b_target = target; in _xfs_buf_alloc()
243 bp->b_mount = target->bt_mount; in _xfs_buf_alloc()
244 bp->b_flags = flags; in _xfs_buf_alloc()
251 error = xfs_buf_get_maps(bp, nmaps); in _xfs_buf_alloc()
253 kmem_cache_free(xfs_buf_zone, bp); in _xfs_buf_alloc()
257 bp->b_bn = map[0].bm_bn; in _xfs_buf_alloc()
258 bp->b_length = 0; in _xfs_buf_alloc()
260 bp->b_maps[i].bm_bn = map[i].bm_bn; in _xfs_buf_alloc()
261 bp->b_maps[i].bm_len = map[i].bm_len; in _xfs_buf_alloc()
262 bp->b_length += map[i].bm_len; in _xfs_buf_alloc()
265 atomic_set(&bp->b_pin_count, 0); in _xfs_buf_alloc()
266 init_waitqueue_head(&bp->b_waiters); in _xfs_buf_alloc()
268 XFS_STATS_INC(bp->b_mount, xb_create); in _xfs_buf_alloc()
269 trace_xfs_buf_init(bp, _RET_IP_); in _xfs_buf_alloc()
271 *bpp = bp; in _xfs_buf_alloc()
281 xfs_buf_t *bp, in _xfs_buf_get_pages() argument
285 if (bp->b_pages == NULL) { in _xfs_buf_get_pages()
286 bp->b_page_count = page_count; in _xfs_buf_get_pages()
288 bp->b_pages = bp->b_page_array; in _xfs_buf_get_pages()
290 bp->b_pages = kmem_alloc(sizeof(struct page *) * in _xfs_buf_get_pages()
292 if (bp->b_pages == NULL) in _xfs_buf_get_pages()
295 memset(bp->b_pages, 0, sizeof(struct page *) * page_count); in _xfs_buf_get_pages()
305 xfs_buf_t *bp) in _xfs_buf_free_pages() argument
307 if (bp->b_pages != bp->b_page_array) { in _xfs_buf_free_pages()
308 kmem_free(bp->b_pages); in _xfs_buf_free_pages()
309 bp->b_pages = NULL; in _xfs_buf_free_pages()
322 xfs_buf_t *bp) in xfs_buf_free() argument
324 trace_xfs_buf_free(bp, _RET_IP_); in xfs_buf_free()
326 ASSERT(list_empty(&bp->b_lru)); in xfs_buf_free()
328 if (bp->b_flags & _XBF_PAGES) { in xfs_buf_free()
331 if (xfs_buf_is_vmapped(bp)) in xfs_buf_free()
332 vm_unmap_ram(bp->b_addr - bp->b_offset, in xfs_buf_free()
333 bp->b_page_count); in xfs_buf_free()
335 for (i = 0; i < bp->b_page_count; i++) { in xfs_buf_free()
336 struct page *page = bp->b_pages[i]; in xfs_buf_free()
342 bp->b_page_count; in xfs_buf_free()
343 } else if (bp->b_flags & _XBF_KMEM) in xfs_buf_free()
344 kmem_free(bp->b_addr); in xfs_buf_free()
345 _xfs_buf_free_pages(bp); in xfs_buf_free()
346 xfs_buf_free_maps(bp); in xfs_buf_free()
347 kmem_cache_free(xfs_buf_zone, bp); in xfs_buf_free()
355 xfs_buf_t *bp, in xfs_buf_allocate_memory() argument
379 size = BBTOB(bp->b_length); in xfs_buf_allocate_memory()
381 int align_mask = xfs_buftarg_dma_alignment(bp->b_target); in xfs_buf_allocate_memory()
382 bp->b_addr = kmem_alloc_io(size, align_mask, in xfs_buf_allocate_memory()
384 if (!bp->b_addr) { in xfs_buf_allocate_memory()
389 if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) != in xfs_buf_allocate_memory()
390 ((unsigned long)bp->b_addr & PAGE_MASK)) { in xfs_buf_allocate_memory()
392 kmem_free(bp->b_addr); in xfs_buf_allocate_memory()
393 bp->b_addr = NULL; in xfs_buf_allocate_memory()
396 bp->b_offset = offset_in_page(bp->b_addr); in xfs_buf_allocate_memory()
397 bp->b_pages = bp->b_page_array; in xfs_buf_allocate_memory()
398 bp->b_pages[0] = kmem_to_page(bp->b_addr); in xfs_buf_allocate_memory()
399 bp->b_page_count = 1; in xfs_buf_allocate_memory()
400 bp->b_flags |= _XBF_KMEM; in xfs_buf_allocate_memory()
405 start = BBTOB(bp->b_maps[0].bm_bn) >> PAGE_SHIFT; in xfs_buf_allocate_memory()
406 end = (BBTOB(bp->b_maps[0].bm_bn + bp->b_length) + PAGE_SIZE - 1) in xfs_buf_allocate_memory()
409 error = _xfs_buf_get_pages(bp, page_count); in xfs_buf_allocate_memory()
413 offset = bp->b_offset; in xfs_buf_allocate_memory()
414 bp->b_flags |= _XBF_PAGES; in xfs_buf_allocate_memory()
416 for (i = 0; i < bp->b_page_count; i++) { in xfs_buf_allocate_memory()
423 bp->b_page_count = i; in xfs_buf_allocate_memory()
440 XFS_STATS_INC(bp->b_mount, xb_page_retries); in xfs_buf_allocate_memory()
445 XFS_STATS_INC(bp->b_mount, xb_page_found); in xfs_buf_allocate_memory()
449 bp->b_pages[i] = page; in xfs_buf_allocate_memory()
455 for (i = 0; i < bp->b_page_count; i++) in xfs_buf_allocate_memory()
456 __free_page(bp->b_pages[i]); in xfs_buf_allocate_memory()
457 bp->b_flags &= ~_XBF_PAGES; in xfs_buf_allocate_memory()
466 xfs_buf_t *bp, in _xfs_buf_map_pages() argument
469 ASSERT(bp->b_flags & _XBF_PAGES); in _xfs_buf_map_pages()
470 if (bp->b_page_count == 1) { in _xfs_buf_map_pages()
472 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset; in _xfs_buf_map_pages()
474 bp->b_addr = NULL; in _xfs_buf_map_pages()
489 bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count, in _xfs_buf_map_pages()
491 if (bp->b_addr) in _xfs_buf_map_pages()
497 if (!bp->b_addr) in _xfs_buf_map_pages()
499 bp->b_addr += bp->b_offset; in _xfs_buf_map_pages()
514 const struct xfs_buf *bp = obj; in _xfs_buf_obj_cmp() local
522 if (bp->b_bn != map->bm_bn) in _xfs_buf_obj_cmp()
525 if (unlikely(bp->b_length != map->bm_len)) { in _xfs_buf_obj_cmp()
534 ASSERT(bp->b_flags & XBF_STALE); in _xfs_buf_obj_cmp()
593 xfs_buf_t *bp; in xfs_buf_find() local
624 bp = rhashtable_lookup_fast(&pag->pag_buf_hash, &cmap, in xfs_buf_find()
626 if (bp) { in xfs_buf_find()
627 atomic_inc(&bp->b_hold); in xfs_buf_find()
651 if (!xfs_buf_trylock(bp)) { in xfs_buf_find()
653 xfs_buf_rele(bp); in xfs_buf_find()
657 xfs_buf_lock(bp); in xfs_buf_find()
666 if (bp->b_flags & XBF_STALE) { in xfs_buf_find()
667 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0); in xfs_buf_find()
668 bp->b_flags &= _XBF_KMEM | _XBF_PAGES; in xfs_buf_find()
669 bp->b_ops = NULL; in xfs_buf_find()
672 trace_xfs_buf_find(bp, flags, _RET_IP_); in xfs_buf_find()
674 *found_bp = bp; in xfs_buf_find()
685 struct xfs_buf *bp; in xfs_buf_incore() local
689 error = xfs_buf_find(target, &map, 1, flags, NULL, &bp); in xfs_buf_incore()
692 return bp; in xfs_buf_incore()
708 struct xfs_buf *bp; in xfs_buf_get_map() local
713 error = xfs_buf_find(target, map, nmaps, flags, NULL, &bp); in xfs_buf_get_map()
729 error = xfs_buf_find(target, map, nmaps, flags, new_bp, &bp); in xfs_buf_get_map()
735 if (bp != new_bp) in xfs_buf_get_map()
739 if (!bp->b_addr) { in xfs_buf_get_map()
740 error = _xfs_buf_map_pages(bp, flags); in xfs_buf_get_map()
744 bp->b_page_count); in xfs_buf_get_map()
745 xfs_buf_relse(bp); in xfs_buf_get_map()
755 xfs_buf_ioerror(bp, 0); in xfs_buf_get_map()
758 trace_xfs_buf_get(bp, flags, _RET_IP_); in xfs_buf_get_map()
759 *bpp = bp; in xfs_buf_get_map()
765 xfs_buf_t *bp, in _xfs_buf_read() argument
769 ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL); in _xfs_buf_read()
771 bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD | XBF_DONE); in _xfs_buf_read()
772 bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD); in _xfs_buf_read()
774 return xfs_buf_submit(bp); in _xfs_buf_read()
796 struct xfs_buf *bp, in xfs_buf_reverify() argument
799 ASSERT(bp->b_flags & XBF_DONE); in xfs_buf_reverify()
800 ASSERT(bp->b_error == 0); in xfs_buf_reverify()
802 if (!ops || bp->b_ops) in xfs_buf_reverify()
805 bp->b_ops = ops; in xfs_buf_reverify()
806 bp->b_ops->verify_read(bp); in xfs_buf_reverify()
807 if (bp->b_error) in xfs_buf_reverify()
808 bp->b_flags &= ~XBF_DONE; in xfs_buf_reverify()
809 return bp->b_error; in xfs_buf_reverify()
822 struct xfs_buf *bp; in xfs_buf_read_map() local
828 error = xfs_buf_get_map(target, map, nmaps, flags, &bp); in xfs_buf_read_map()
832 trace_xfs_buf_read(bp, flags, _RET_IP_); in xfs_buf_read_map()
834 if (!(bp->b_flags & XBF_DONE)) { in xfs_buf_read_map()
837 bp->b_ops = ops; in xfs_buf_read_map()
838 error = _xfs_buf_read(bp, flags); in xfs_buf_read_map()
845 error = xfs_buf_reverify(bp, ops); in xfs_buf_read_map()
849 xfs_buf_relse(bp); in xfs_buf_read_map()
854 bp->b_flags &= ~XBF_READ; in xfs_buf_read_map()
855 ASSERT(bp->b_ops != NULL || ops == NULL); in xfs_buf_read_map()
869 xfs_buf_ioerror_alert(bp, fa); in xfs_buf_read_map()
871 bp->b_flags &= ~XBF_DONE; in xfs_buf_read_map()
872 xfs_buf_stale(bp); in xfs_buf_read_map()
873 xfs_buf_relse(bp); in xfs_buf_read_map()
881 *bpp = bp; in xfs_buf_read_map()
896 struct xfs_buf *bp; in xfs_buf_readahead_map() local
902 XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD, &bp, ops, in xfs_buf_readahead_map()
919 struct xfs_buf *bp; in xfs_buf_read_uncached() local
924 error = xfs_buf_get_uncached(target, numblks, flags, &bp); in xfs_buf_read_uncached()
929 ASSERT(bp->b_map_count == 1); in xfs_buf_read_uncached()
930 bp->b_bn = XFS_BUF_DADDR_NULL; /* always null for uncached buffers */ in xfs_buf_read_uncached()
931 bp->b_maps[0].bm_bn = daddr; in xfs_buf_read_uncached()
932 bp->b_flags |= XBF_READ; in xfs_buf_read_uncached()
933 bp->b_ops = ops; in xfs_buf_read_uncached()
935 xfs_buf_submit(bp); in xfs_buf_read_uncached()
936 if (bp->b_error) { in xfs_buf_read_uncached()
937 error = bp->b_error; in xfs_buf_read_uncached()
938 xfs_buf_relse(bp); in xfs_buf_read_uncached()
942 *bpp = bp; in xfs_buf_read_uncached()
955 struct xfs_buf *bp; in xfs_buf_get_uncached() local
961 error = _xfs_buf_alloc(target, &map, 1, flags & XBF_NO_IOACCT, &bp); in xfs_buf_get_uncached()
966 error = _xfs_buf_get_pages(bp, page_count); in xfs_buf_get_uncached()
971 bp->b_pages[i] = alloc_page(xb_to_gfp(flags)); in xfs_buf_get_uncached()
972 if (!bp->b_pages[i]) { in xfs_buf_get_uncached()
977 bp->b_flags |= _XBF_PAGES; in xfs_buf_get_uncached()
979 error = _xfs_buf_map_pages(bp, 0); in xfs_buf_get_uncached()
986 trace_xfs_buf_get_uncached(bp, _RET_IP_); in xfs_buf_get_uncached()
987 *bpp = bp; in xfs_buf_get_uncached()
992 __free_page(bp->b_pages[i]); in xfs_buf_get_uncached()
993 _xfs_buf_free_pages(bp); in xfs_buf_get_uncached()
995 xfs_buf_free_maps(bp); in xfs_buf_get_uncached()
996 kmem_cache_free(xfs_buf_zone, bp); in xfs_buf_get_uncached()
1008 xfs_buf_t *bp) in xfs_buf_hold() argument
1010 trace_xfs_buf_hold(bp, _RET_IP_); in xfs_buf_hold()
1011 atomic_inc(&bp->b_hold); in xfs_buf_hold()
1020 xfs_buf_t *bp) in xfs_buf_rele() argument
1022 struct xfs_perag *pag = bp->b_pag; in xfs_buf_rele()
1026 trace_xfs_buf_rele(bp, _RET_IP_); in xfs_buf_rele()
1029 ASSERT(list_empty(&bp->b_lru)); in xfs_buf_rele()
1030 if (atomic_dec_and_test(&bp->b_hold)) { in xfs_buf_rele()
1031 xfs_buf_ioacct_dec(bp); in xfs_buf_rele()
1032 xfs_buf_free(bp); in xfs_buf_rele()
1037 ASSERT(atomic_read(&bp->b_hold) > 0); in xfs_buf_rele()
1049 spin_lock(&bp->b_lock); in xfs_buf_rele()
1050 release = atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock); in xfs_buf_rele()
1058 if ((atomic_read(&bp->b_hold) == 1) && !list_empty(&bp->b_lru)) in xfs_buf_rele()
1059 __xfs_buf_ioacct_dec(bp); in xfs_buf_rele()
1064 __xfs_buf_ioacct_dec(bp); in xfs_buf_rele()
1065 if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) { in xfs_buf_rele()
1071 if (list_lru_add(&bp->b_target->bt_lru, &bp->b_lru)) { in xfs_buf_rele()
1072 bp->b_state &= ~XFS_BSTATE_DISPOSE; in xfs_buf_rele()
1073 atomic_inc(&bp->b_hold); in xfs_buf_rele()
1083 if (!(bp->b_state & XFS_BSTATE_DISPOSE)) { in xfs_buf_rele()
1084 list_lru_del(&bp->b_target->bt_lru, &bp->b_lru); in xfs_buf_rele()
1086 ASSERT(list_empty(&bp->b_lru)); in xfs_buf_rele()
1089 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q)); in xfs_buf_rele()
1090 rhashtable_remove_fast(&pag->pag_buf_hash, &bp->b_rhash_head, in xfs_buf_rele()
1098 spin_unlock(&bp->b_lock); in xfs_buf_rele()
1101 xfs_buf_free(bp); in xfs_buf_rele()
1118 struct xfs_buf *bp) in xfs_buf_trylock() argument
1122 locked = down_trylock(&bp->b_sema) == 0; in xfs_buf_trylock()
1124 trace_xfs_buf_trylock(bp, _RET_IP_); in xfs_buf_trylock()
1126 trace_xfs_buf_trylock_fail(bp, _RET_IP_); in xfs_buf_trylock()
1141 struct xfs_buf *bp) in xfs_buf_lock() argument
1143 trace_xfs_buf_lock(bp, _RET_IP_); in xfs_buf_lock()
1145 if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE)) in xfs_buf_lock()
1146 xfs_log_force(bp->b_mount, 0); in xfs_buf_lock()
1147 down(&bp->b_sema); in xfs_buf_lock()
1149 trace_xfs_buf_lock_done(bp, _RET_IP_); in xfs_buf_lock()
1154 struct xfs_buf *bp) in xfs_buf_unlock() argument
1156 ASSERT(xfs_buf_islocked(bp)); in xfs_buf_unlock()
1158 up(&bp->b_sema); in xfs_buf_unlock()
1159 trace_xfs_buf_unlock(bp, _RET_IP_); in xfs_buf_unlock()
1164 xfs_buf_t *bp) in xfs_buf_wait_unpin() argument
1168 if (atomic_read(&bp->b_pin_count) == 0) in xfs_buf_wait_unpin()
1171 add_wait_queue(&bp->b_waiters, &wait); in xfs_buf_wait_unpin()
1174 if (atomic_read(&bp->b_pin_count) == 0) in xfs_buf_wait_unpin()
1178 remove_wait_queue(&bp->b_waiters, &wait); in xfs_buf_wait_unpin()
1184 struct xfs_buf *bp) in xfs_buf_ioerror_alert_ratelimited() argument
1189 if (bp->b_target != lasttarg || in xfs_buf_ioerror_alert_ratelimited()
1192 xfs_buf_ioerror_alert(bp, __this_address); in xfs_buf_ioerror_alert_ratelimited()
1194 lasttarg = bp->b_target; in xfs_buf_ioerror_alert_ratelimited()
1203 struct xfs_buf *bp, in xfs_buf_ioerror_permanent() argument
1206 struct xfs_mount *mp = bp->b_mount; in xfs_buf_ioerror_permanent()
1209 ++bp->b_retries > cfg->max_retries) in xfs_buf_ioerror_permanent()
1212 time_after(jiffies, cfg->retry_timeout + bp->b_first_retry_time)) in xfs_buf_ioerror_permanent()
1241 struct xfs_buf *bp) in xfs_buf_ioend_handle_error() argument
1243 struct xfs_mount *mp = bp->b_mount; in xfs_buf_ioend_handle_error()
1253 xfs_buf_ioerror_alert_ratelimited(bp); in xfs_buf_ioend_handle_error()
1259 if (bp->b_flags & _XBF_LOGRECOVERY) { in xfs_buf_ioend_handle_error()
1267 if (!(bp->b_flags & XBF_ASYNC)) in xfs_buf_ioend_handle_error()
1270 trace_xfs_buf_iodone_async(bp, _RET_IP_); in xfs_buf_ioend_handle_error()
1272 cfg = xfs_error_get_cfg(mp, XFS_ERR_METADATA, bp->b_error); in xfs_buf_ioend_handle_error()
1273 if (bp->b_last_error != bp->b_error || in xfs_buf_ioend_handle_error()
1274 !(bp->b_flags & (XBF_STALE | XBF_WRITE_FAIL))) { in xfs_buf_ioend_handle_error()
1275 bp->b_last_error = bp->b_error; in xfs_buf_ioend_handle_error()
1277 !bp->b_first_retry_time) in xfs_buf_ioend_handle_error()
1278 bp->b_first_retry_time = jiffies; in xfs_buf_ioend_handle_error()
1286 if (xfs_buf_ioerror_permanent(bp, cfg)) { in xfs_buf_ioend_handle_error()
1292 if (bp->b_flags & _XBF_INODES) in xfs_buf_ioend_handle_error()
1293 xfs_buf_inode_io_fail(bp); in xfs_buf_ioend_handle_error()
1294 else if (bp->b_flags & _XBF_DQUOTS) in xfs_buf_ioend_handle_error()
1295 xfs_buf_dquot_io_fail(bp); in xfs_buf_ioend_handle_error()
1297 ASSERT(list_empty(&bp->b_li_list)); in xfs_buf_ioend_handle_error()
1298 xfs_buf_ioerror(bp, 0); in xfs_buf_ioend_handle_error()
1299 xfs_buf_relse(bp); in xfs_buf_ioend_handle_error()
1303 xfs_buf_ioerror(bp, 0); in xfs_buf_ioend_handle_error()
1304 bp->b_flags |= (XBF_DONE | XBF_WRITE_FAIL); in xfs_buf_ioend_handle_error()
1305 xfs_buf_submit(bp); in xfs_buf_ioend_handle_error()
1308 xfs_buf_stale(bp); in xfs_buf_ioend_handle_error()
1309 bp->b_flags |= XBF_DONE; in xfs_buf_ioend_handle_error()
1310 bp->b_flags &= ~XBF_WRITE; in xfs_buf_ioend_handle_error()
1311 trace_xfs_buf_error_relse(bp, _RET_IP_); in xfs_buf_ioend_handle_error()
1317 struct xfs_buf *bp) in xfs_buf_ioend() argument
1319 trace_xfs_buf_iodone(bp, _RET_IP_); in xfs_buf_ioend()
1325 if (!bp->b_error && bp->b_io_error) in xfs_buf_ioend()
1326 xfs_buf_ioerror(bp, bp->b_io_error); in xfs_buf_ioend()
1328 if (bp->b_flags & XBF_READ) { in xfs_buf_ioend()
1329 if (!bp->b_error && bp->b_ops) in xfs_buf_ioend()
1330 bp->b_ops->verify_read(bp); in xfs_buf_ioend()
1331 if (!bp->b_error) in xfs_buf_ioend()
1332 bp->b_flags |= XBF_DONE; in xfs_buf_ioend()
1334 if (!bp->b_error) { in xfs_buf_ioend()
1335 bp->b_flags &= ~XBF_WRITE_FAIL; in xfs_buf_ioend()
1336 bp->b_flags |= XBF_DONE; in xfs_buf_ioend()
1339 if (unlikely(bp->b_error) && xfs_buf_ioend_handle_error(bp)) in xfs_buf_ioend()
1343 bp->b_last_error = 0; in xfs_buf_ioend()
1344 bp->b_retries = 0; in xfs_buf_ioend()
1345 bp->b_first_retry_time = 0; in xfs_buf_ioend()
1352 if (bp->b_log_item) in xfs_buf_ioend()
1353 xfs_buf_item_done(bp); in xfs_buf_ioend()
1355 if (bp->b_flags & _XBF_INODES) in xfs_buf_ioend()
1356 xfs_buf_inode_iodone(bp); in xfs_buf_ioend()
1357 else if (bp->b_flags & _XBF_DQUOTS) in xfs_buf_ioend()
1358 xfs_buf_dquot_iodone(bp); in xfs_buf_ioend()
1362 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD | in xfs_buf_ioend()
1365 if (bp->b_flags & XBF_ASYNC) in xfs_buf_ioend()
1366 xfs_buf_relse(bp); in xfs_buf_ioend()
1368 complete(&bp->b_iowait); in xfs_buf_ioend()
1375 struct xfs_buf *bp = in xfs_buf_ioend_work() local
1378 xfs_buf_ioend(bp); in xfs_buf_ioend_work()
1383 struct xfs_buf *bp) in xfs_buf_ioend_async() argument
1385 INIT_WORK(&bp->b_ioend_work, xfs_buf_ioend_work); in xfs_buf_ioend_async()
1386 queue_work(bp->b_mount->m_buf_workqueue, &bp->b_ioend_work); in xfs_buf_ioend_async()
1391 xfs_buf_t *bp, in __xfs_buf_ioerror() argument
1396 bp->b_error = error; in __xfs_buf_ioerror()
1397 trace_xfs_buf_ioerror(bp, error, failaddr); in __xfs_buf_ioerror()
1402 struct xfs_buf *bp, in xfs_buf_ioerror_alert() argument
1405 xfs_buf_alert_ratelimited(bp, "XFS: metadata IO error", in xfs_buf_ioerror_alert()
1407 func, (uint64_t)XFS_BUF_ADDR(bp), in xfs_buf_ioerror_alert()
1408 bp->b_length, -bp->b_error); in xfs_buf_ioerror_alert()
1419 struct xfs_buf *bp) in xfs_buf_ioend_fail() argument
1421 bp->b_flags &= ~XBF_DONE; in xfs_buf_ioend_fail()
1422 xfs_buf_stale(bp); in xfs_buf_ioend_fail()
1423 xfs_buf_ioerror(bp, -EIO); in xfs_buf_ioend_fail()
1424 xfs_buf_ioend(bp); in xfs_buf_ioend_fail()
1429 struct xfs_buf *bp) in xfs_bwrite() argument
1433 ASSERT(xfs_buf_islocked(bp)); in xfs_bwrite()
1435 bp->b_flags |= XBF_WRITE; in xfs_bwrite()
1436 bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q | in xfs_bwrite()
1439 error = xfs_buf_submit(bp); in xfs_bwrite()
1441 xfs_force_shutdown(bp->b_mount, SHUTDOWN_META_IO_ERROR); in xfs_bwrite()
1449 struct xfs_buf *bp = (struct xfs_buf *)bio->bi_private; in xfs_buf_bio_end_io() local
1452 (bp->b_flags & XBF_WRITE) && (bp->b_flags & XBF_ASYNC) && in xfs_buf_bio_end_io()
1453 XFS_TEST_ERROR(false, bp->b_mount, XFS_ERRTAG_BUF_IOERROR)) in xfs_buf_bio_end_io()
1463 cmpxchg(&bp->b_io_error, 0, error); in xfs_buf_bio_end_io()
1466 if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ)) in xfs_buf_bio_end_io()
1467 invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp)); in xfs_buf_bio_end_io()
1469 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) in xfs_buf_bio_end_io()
1470 xfs_buf_ioend_async(bp); in xfs_buf_bio_end_io()
1476 struct xfs_buf *bp, in xfs_buf_ioapply_map() argument
1483 int total_nr_pages = bp->b_page_count; in xfs_buf_ioapply_map()
1486 sector_t sector = bp->b_maps[map].bm_bn; in xfs_buf_ioapply_map()
1502 size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count); in xfs_buf_ioapply_map()
1507 atomic_inc(&bp->b_io_remaining); in xfs_buf_ioapply_map()
1511 bio_set_dev(bio, bp->b_target->bt_bdev); in xfs_buf_ioapply_map()
1514 bio->bi_private = bp; in xfs_buf_ioapply_map()
1523 rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes, in xfs_buf_ioapply_map()
1535 if (xfs_buf_is_vmapped(bp)) { in xfs_buf_ioapply_map()
1536 flush_kernel_vmap_range(bp->b_addr, in xfs_buf_ioapply_map()
1537 xfs_buf_vmap_len(bp)); in xfs_buf_ioapply_map()
1547 atomic_dec(&bp->b_io_remaining); in xfs_buf_ioapply_map()
1548 xfs_buf_ioerror(bp, -EIO); in xfs_buf_ioapply_map()
1556 struct xfs_buf *bp) in _xfs_buf_ioapply() argument
1568 bp->b_error = 0; in _xfs_buf_ioapply()
1570 if (bp->b_flags & XBF_WRITE) { in _xfs_buf_ioapply()
1578 if (bp->b_ops) { in _xfs_buf_ioapply()
1579 bp->b_ops->verify_write(bp); in _xfs_buf_ioapply()
1580 if (bp->b_error) { in _xfs_buf_ioapply()
1581 xfs_force_shutdown(bp->b_mount, in _xfs_buf_ioapply()
1585 } else if (bp->b_bn != XFS_BUF_DADDR_NULL) { in _xfs_buf_ioapply()
1586 struct xfs_mount *mp = bp->b_mount; in _xfs_buf_ioapply()
1595 __func__, bp->b_bn, bp->b_length); in _xfs_buf_ioapply()
1596 xfs_hex_dump(bp->b_addr, in _xfs_buf_ioapply()
1603 if (bp->b_flags & XBF_READ_AHEAD) in _xfs_buf_ioapply()
1616 offset = bp->b_offset; in _xfs_buf_ioapply()
1617 size = BBTOB(bp->b_length); in _xfs_buf_ioapply()
1619 for (i = 0; i < bp->b_map_count; i++) { in _xfs_buf_ioapply()
1620 xfs_buf_ioapply_map(bp, i, &offset, &size, op); in _xfs_buf_ioapply()
1621 if (bp->b_error) in _xfs_buf_ioapply()
1634 struct xfs_buf *bp) in xfs_buf_iowait() argument
1636 ASSERT(!(bp->b_flags & XBF_ASYNC)); in xfs_buf_iowait()
1638 trace_xfs_buf_iowait(bp, _RET_IP_); in xfs_buf_iowait()
1639 wait_for_completion(&bp->b_iowait); in xfs_buf_iowait()
1640 trace_xfs_buf_iowait_done(bp, _RET_IP_); in xfs_buf_iowait()
1642 return bp->b_error; in xfs_buf_iowait()
1653 struct xfs_buf *bp, in __xfs_buf_submit() argument
1658 trace_xfs_buf_submit(bp, _RET_IP_); in __xfs_buf_submit()
1660 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q)); in __xfs_buf_submit()
1663 if (XFS_FORCED_SHUTDOWN(bp->b_mount)) { in __xfs_buf_submit()
1664 xfs_buf_ioend_fail(bp); in __xfs_buf_submit()
1673 xfs_buf_hold(bp); in __xfs_buf_submit()
1675 if (bp->b_flags & XBF_WRITE) in __xfs_buf_submit()
1676 xfs_buf_wait_unpin(bp); in __xfs_buf_submit()
1679 bp->b_io_error = 0; in __xfs_buf_submit()
1686 atomic_set(&bp->b_io_remaining, 1); in __xfs_buf_submit()
1687 if (bp->b_flags & XBF_ASYNC) in __xfs_buf_submit()
1688 xfs_buf_ioacct_inc(bp); in __xfs_buf_submit()
1689 _xfs_buf_ioapply(bp); in __xfs_buf_submit()
1696 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) { in __xfs_buf_submit()
1697 if (bp->b_error || !(bp->b_flags & XBF_ASYNC)) in __xfs_buf_submit()
1698 xfs_buf_ioend(bp); in __xfs_buf_submit()
1700 xfs_buf_ioend_async(bp); in __xfs_buf_submit()
1704 error = xfs_buf_iowait(bp); in __xfs_buf_submit()
1711 xfs_buf_rele(bp); in __xfs_buf_submit()
1717 struct xfs_buf *bp, in xfs_buf_offset() argument
1722 if (bp->b_addr) in xfs_buf_offset()
1723 return bp->b_addr + offset; in xfs_buf_offset()
1725 offset += bp->b_offset; in xfs_buf_offset()
1726 page = bp->b_pages[offset >> PAGE_SHIFT]; in xfs_buf_offset()
1732 struct xfs_buf *bp, in xfs_buf_zero() argument
1743 page_index = (boff + bp->b_offset) >> PAGE_SHIFT; in xfs_buf_zero()
1744 page_offset = (boff + bp->b_offset) & ~PAGE_MASK; in xfs_buf_zero()
1745 page = bp->b_pages[page_index]; in xfs_buf_zero()
1747 BBTOB(bp->b_length) - boff); in xfs_buf_zero()
1770 struct xfs_buf *bp, in __xfs_buf_mark_corrupt() argument
1773 ASSERT(bp->b_flags & XBF_DONE); in __xfs_buf_mark_corrupt()
1775 xfs_buf_corruption_error(bp, fa); in __xfs_buf_mark_corrupt()
1776 xfs_buf_stale(bp); in __xfs_buf_mark_corrupt()
1796 struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru); in xfs_buftarg_wait_rele() local
1799 if (atomic_read(&bp->b_hold) > 1) { in xfs_buftarg_wait_rele()
1801 trace_xfs_buf_wait_buftarg(bp, _RET_IP_); in xfs_buftarg_wait_rele()
1804 if (!spin_trylock(&bp->b_lock)) in xfs_buftarg_wait_rele()
1811 atomic_set(&bp->b_lru_ref, 0); in xfs_buftarg_wait_rele()
1812 bp->b_state |= XFS_BSTATE_DISPOSE; in xfs_buftarg_wait_rele()
1814 spin_unlock(&bp->b_lock); in xfs_buftarg_wait_rele()
1848 struct xfs_buf *bp; in xfs_wait_buftarg() local
1849 bp = list_first_entry(&dispose, struct xfs_buf, b_lru); in xfs_wait_buftarg()
1850 list_del_init(&bp->b_lru); in xfs_wait_buftarg()
1851 if (bp->b_flags & XBF_WRITE_FAIL) { in xfs_wait_buftarg()
1853 xfs_buf_alert_ratelimited(bp, in xfs_wait_buftarg()
1856 (long long)bp->b_bn); in xfs_wait_buftarg()
1858 xfs_buf_rele(bp); in xfs_wait_buftarg()
1884 struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru); in xfs_buftarg_isolate() local
1891 if (!spin_trylock(&bp->b_lock)) in xfs_buftarg_isolate()
1898 if (atomic_add_unless(&bp->b_lru_ref, -1, 0)) { in xfs_buftarg_isolate()
1899 spin_unlock(&bp->b_lock); in xfs_buftarg_isolate()
1903 bp->b_state |= XFS_BSTATE_DISPOSE; in xfs_buftarg_isolate()
1905 spin_unlock(&bp->b_lock); in xfs_buftarg_isolate()
1923 struct xfs_buf *bp; in xfs_buftarg_shrink_scan() local
1924 bp = list_first_entry(&dispose, struct xfs_buf, b_lru); in xfs_buftarg_shrink_scan()
1925 list_del_init(&bp->b_lru); in xfs_buftarg_shrink_scan()
1926 xfs_buf_rele(bp); in xfs_buftarg_shrink_scan()
2050 struct xfs_buf *bp; in xfs_buf_delwri_cancel() local
2053 bp = list_first_entry(list, struct xfs_buf, b_list); in xfs_buf_delwri_cancel()
2055 xfs_buf_lock(bp); in xfs_buf_delwri_cancel()
2056 bp->b_flags &= ~_XBF_DELWRI_Q; in xfs_buf_delwri_cancel()
2057 list_del_init(&bp->b_list); in xfs_buf_delwri_cancel()
2058 xfs_buf_relse(bp); in xfs_buf_delwri_cancel()
2075 struct xfs_buf *bp, in xfs_buf_delwri_queue() argument
2078 ASSERT(xfs_buf_islocked(bp)); in xfs_buf_delwri_queue()
2079 ASSERT(!(bp->b_flags & XBF_READ)); in xfs_buf_delwri_queue()
2086 if (bp->b_flags & _XBF_DELWRI_Q) { in xfs_buf_delwri_queue()
2087 trace_xfs_buf_delwri_queued(bp, _RET_IP_); in xfs_buf_delwri_queue()
2091 trace_xfs_buf_delwri_queue(bp, _RET_IP_); in xfs_buf_delwri_queue()
2101 bp->b_flags |= _XBF_DELWRI_Q; in xfs_buf_delwri_queue()
2102 if (list_empty(&bp->b_list)) { in xfs_buf_delwri_queue()
2103 atomic_inc(&bp->b_hold); in xfs_buf_delwri_queue()
2104 list_add_tail(&bp->b_list, list); in xfs_buf_delwri_queue()
2122 struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list); in xfs_buf_cmp() local
2125 diff = ap->b_maps[0].bm_bn - bp->b_maps[0].bm_bn; in xfs_buf_cmp()
2145 struct xfs_buf *bp, *n; in xfs_buf_delwri_submit_buffers() local
2152 list_for_each_entry_safe(bp, n, buffer_list, b_list) { in xfs_buf_delwri_submit_buffers()
2154 if (xfs_buf_ispinned(bp)) { in xfs_buf_delwri_submit_buffers()
2158 if (!xfs_buf_trylock(bp)) in xfs_buf_delwri_submit_buffers()
2161 xfs_buf_lock(bp); in xfs_buf_delwri_submit_buffers()
2170 if (!(bp->b_flags & _XBF_DELWRI_Q)) { in xfs_buf_delwri_submit_buffers()
2171 list_del_init(&bp->b_list); in xfs_buf_delwri_submit_buffers()
2172 xfs_buf_relse(bp); in xfs_buf_delwri_submit_buffers()
2176 trace_xfs_buf_delwri_split(bp, _RET_IP_); in xfs_buf_delwri_submit_buffers()
2184 bp->b_flags &= ~_XBF_DELWRI_Q; in xfs_buf_delwri_submit_buffers()
2185 bp->b_flags |= XBF_WRITE; in xfs_buf_delwri_submit_buffers()
2187 bp->b_flags &= ~XBF_ASYNC; in xfs_buf_delwri_submit_buffers()
2188 list_move_tail(&bp->b_list, wait_list); in xfs_buf_delwri_submit_buffers()
2190 bp->b_flags |= XBF_ASYNC; in xfs_buf_delwri_submit_buffers()
2191 list_del_init(&bp->b_list); in xfs_buf_delwri_submit_buffers()
2193 __xfs_buf_submit(bp, false); in xfs_buf_delwri_submit_buffers()
2237 struct xfs_buf *bp; in xfs_buf_delwri_submit() local
2243 bp = list_first_entry(&wait_list, struct xfs_buf, b_list); in xfs_buf_delwri_submit()
2245 list_del_init(&bp->b_list); in xfs_buf_delwri_submit()
2251 error2 = xfs_buf_iowait(bp); in xfs_buf_delwri_submit()
2252 xfs_buf_relse(bp); in xfs_buf_delwri_submit()
2277 struct xfs_buf *bp, in xfs_buf_delwri_pushbuf() argument
2283 ASSERT(bp->b_flags & _XBF_DELWRI_Q); in xfs_buf_delwri_pushbuf()
2285 trace_xfs_buf_delwri_pushbuf(bp, _RET_IP_); in xfs_buf_delwri_pushbuf()
2291 xfs_buf_lock(bp); in xfs_buf_delwri_pushbuf()
2292 list_move(&bp->b_list, &submit_list); in xfs_buf_delwri_pushbuf()
2293 xfs_buf_unlock(bp); in xfs_buf_delwri_pushbuf()
2308 error = xfs_buf_iowait(bp); in xfs_buf_delwri_pushbuf()
2309 bp->b_flags |= _XBF_DELWRI_Q; in xfs_buf_delwri_pushbuf()
2310 xfs_buf_unlock(bp); in xfs_buf_delwri_pushbuf()
2338 void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref) in xfs_buf_set_ref() argument
2345 if (XFS_TEST_ERROR(false, bp->b_mount, XFS_ERRTAG_BUF_LRU_REF)) in xfs_buf_set_ref()
2348 atomic_set(&bp->b_lru_ref, lru_ref); in xfs_buf_set_ref()
2358 struct xfs_buf *bp, in xfs_verify_magic() argument
2361 struct xfs_mount *mp = bp->b_mount; in xfs_verify_magic()
2365 if (WARN_ON(!bp->b_ops || !bp->b_ops->magic[idx])) in xfs_verify_magic()
2367 return dmagic == bp->b_ops->magic[idx]; in xfs_verify_magic()
2376 struct xfs_buf *bp, in xfs_verify_magic16() argument
2379 struct xfs_mount *mp = bp->b_mount; in xfs_verify_magic16()
2383 if (WARN_ON(!bp->b_ops || !bp->b_ops->magic16[idx])) in xfs_verify_magic16()
2385 return dmagic == bp->b_ops->magic16[idx]; in xfs_verify_magic16()