1 /*
2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18 #include "xfs.h"
19 #include <linux/stddef.h>
20 #include <linux/errno.h>
21 #include <linux/gfp.h>
22 #include <linux/pagemap.h>
23 #include <linux/init.h>
24 #include <linux/vmalloc.h>
25 #include <linux/bio.h>
26 #include <linux/sysctl.h>
27 #include <linux/proc_fs.h>
28 #include <linux/workqueue.h>
29 #include <linux/percpu.h>
30 #include <linux/blkdev.h>
31 #include <linux/hash.h>
32 #include <linux/kthread.h>
33 #include <linux/migrate.h>
34 #include <linux/backing-dev.h>
35 #include <linux/freezer.h>
36
37 #include "xfs_log_format.h"
38 #include "xfs_trans_resv.h"
39 #include "xfs_sb.h"
40 #include "xfs_ag.h"
41 #include "xfs_mount.h"
42 #include "xfs_trace.h"
43 #include "xfs_log.h"
44
45 static kmem_zone_t *xfs_buf_zone;
46
47 static struct workqueue_struct *xfslogd_workqueue;
48
49 #ifdef XFS_BUF_LOCK_TRACKING
50 # define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid)
51 # define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1)
52 # define XB_GET_OWNER(bp) ((bp)->b_last_holder)
53 #else
54 # define XB_SET_OWNER(bp) do { } while (0)
55 # define XB_CLEAR_OWNER(bp) do { } while (0)
56 # define XB_GET_OWNER(bp) do { } while (0)
57 #endif
58
59 #define xb_to_gfp(flags) \
60 ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : GFP_NOFS) | __GFP_NOWARN)
61
62
63 static inline int
xfs_buf_is_vmapped(struct xfs_buf * bp)64 xfs_buf_is_vmapped(
65 struct xfs_buf *bp)
66 {
67 /*
68 * Return true if the buffer is vmapped.
69 *
70 * b_addr is null if the buffer is not mapped, but the code is clever
71 * enough to know it doesn't have to map a single page, so the check has
72 * to be both for b_addr and bp->b_page_count > 1.
73 */
74 return bp->b_addr && bp->b_page_count > 1;
75 }
76
77 static inline int
xfs_buf_vmap_len(struct xfs_buf * bp)78 xfs_buf_vmap_len(
79 struct xfs_buf *bp)
80 {
81 return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
82 }
83
84 /*
85 * When we mark a buffer stale, we remove the buffer from the LRU and clear the
86 * b_lru_ref count so that the buffer is freed immediately when the buffer
87 * reference count falls to zero. If the buffer is already on the LRU, we need
88 * to remove the reference that LRU holds on the buffer.
89 *
90 * This prevents build-up of stale buffers on the LRU.
91 */
92 void
xfs_buf_stale(struct xfs_buf * bp)93 xfs_buf_stale(
94 struct xfs_buf *bp)
95 {
96 ASSERT(xfs_buf_islocked(bp));
97
98 bp->b_flags |= XBF_STALE;
99
100 /*
101 * Clear the delwri status so that a delwri queue walker will not
102 * flush this buffer to disk now that it is stale. The delwri queue has
103 * a reference to the buffer, so this is safe to do.
104 */
105 bp->b_flags &= ~_XBF_DELWRI_Q;
106
107 spin_lock(&bp->b_lock);
108 atomic_set(&bp->b_lru_ref, 0);
109 if (!(bp->b_state & XFS_BSTATE_DISPOSE) &&
110 (list_lru_del(&bp->b_target->bt_lru, &bp->b_lru)))
111 atomic_dec(&bp->b_hold);
112
113 ASSERT(atomic_read(&bp->b_hold) >= 1);
114 spin_unlock(&bp->b_lock);
115 }
116
117 static int
xfs_buf_get_maps(struct xfs_buf * bp,int map_count)118 xfs_buf_get_maps(
119 struct xfs_buf *bp,
120 int map_count)
121 {
122 ASSERT(bp->b_maps == NULL);
123 bp->b_map_count = map_count;
124
125 if (map_count == 1) {
126 bp->b_maps = &bp->__b_map;
127 return 0;
128 }
129
130 bp->b_maps = kmem_zalloc(map_count * sizeof(struct xfs_buf_map),
131 KM_NOFS);
132 if (!bp->b_maps)
133 return -ENOMEM;
134 return 0;
135 }
136
137 /*
138 * Frees b_pages if it was allocated.
139 */
140 static void
xfs_buf_free_maps(struct xfs_buf * bp)141 xfs_buf_free_maps(
142 struct xfs_buf *bp)
143 {
144 if (bp->b_maps != &bp->__b_map) {
145 kmem_free(bp->b_maps);
146 bp->b_maps = NULL;
147 }
148 }
149
150 struct xfs_buf *
_xfs_buf_alloc(struct xfs_buftarg * target,struct xfs_buf_map * map,int nmaps,xfs_buf_flags_t flags)151 _xfs_buf_alloc(
152 struct xfs_buftarg *target,
153 struct xfs_buf_map *map,
154 int nmaps,
155 xfs_buf_flags_t flags)
156 {
157 struct xfs_buf *bp;
158 int error;
159 int i;
160
161 bp = kmem_zone_zalloc(xfs_buf_zone, KM_NOFS);
162 if (unlikely(!bp))
163 return NULL;
164
165 /*
166 * We don't want certain flags to appear in b_flags unless they are
167 * specifically set by later operations on the buffer.
168 */
169 flags &= ~(XBF_UNMAPPED | XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD);
170
171 atomic_set(&bp->b_hold, 1);
172 atomic_set(&bp->b_lru_ref, 1);
173 init_completion(&bp->b_iowait);
174 INIT_LIST_HEAD(&bp->b_lru);
175 INIT_LIST_HEAD(&bp->b_list);
176 RB_CLEAR_NODE(&bp->b_rbnode);
177 sema_init(&bp->b_sema, 0); /* held, no waiters */
178 spin_lock_init(&bp->b_lock);
179 XB_SET_OWNER(bp);
180 bp->b_target = target;
181 bp->b_flags = flags;
182
183 /*
184 * Set length and io_length to the same value initially.
185 * I/O routines should use io_length, which will be the same in
186 * most cases but may be reset (e.g. XFS recovery).
187 */
188 error = xfs_buf_get_maps(bp, nmaps);
189 if (error) {
190 kmem_zone_free(xfs_buf_zone, bp);
191 return NULL;
192 }
193
194 bp->b_bn = map[0].bm_bn;
195 bp->b_length = 0;
196 for (i = 0; i < nmaps; i++) {
197 bp->b_maps[i].bm_bn = map[i].bm_bn;
198 bp->b_maps[i].bm_len = map[i].bm_len;
199 bp->b_length += map[i].bm_len;
200 }
201 bp->b_io_length = bp->b_length;
202
203 atomic_set(&bp->b_pin_count, 0);
204 init_waitqueue_head(&bp->b_waiters);
205
206 XFS_STATS_INC(xb_create);
207 trace_xfs_buf_init(bp, _RET_IP_);
208
209 return bp;
210 }
211
212 /*
213 * Allocate a page array capable of holding a specified number
214 * of pages, and point the page buf at it.
215 */
216 STATIC int
_xfs_buf_get_pages(xfs_buf_t * bp,int page_count)217 _xfs_buf_get_pages(
218 xfs_buf_t *bp,
219 int page_count)
220 {
221 /* Make sure that we have a page list */
222 if (bp->b_pages == NULL) {
223 bp->b_page_count = page_count;
224 if (page_count <= XB_PAGES) {
225 bp->b_pages = bp->b_page_array;
226 } else {
227 bp->b_pages = kmem_alloc(sizeof(struct page *) *
228 page_count, KM_NOFS);
229 if (bp->b_pages == NULL)
230 return -ENOMEM;
231 }
232 memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
233 }
234 return 0;
235 }
236
237 /*
238 * Frees b_pages if it was allocated.
239 */
240 STATIC void
_xfs_buf_free_pages(xfs_buf_t * bp)241 _xfs_buf_free_pages(
242 xfs_buf_t *bp)
243 {
244 if (bp->b_pages != bp->b_page_array) {
245 kmem_free(bp->b_pages);
246 bp->b_pages = NULL;
247 }
248 }
249
250 /*
251 * Releases the specified buffer.
252 *
253 * The modification state of any associated pages is left unchanged.
254 * The buffer must not be on any hash - use xfs_buf_rele instead for
255 * hashed and refcounted buffers
256 */
257 void
xfs_buf_free(xfs_buf_t * bp)258 xfs_buf_free(
259 xfs_buf_t *bp)
260 {
261 trace_xfs_buf_free(bp, _RET_IP_);
262
263 ASSERT(list_empty(&bp->b_lru));
264
265 if (bp->b_flags & _XBF_PAGES) {
266 uint i;
267
268 if (xfs_buf_is_vmapped(bp))
269 vm_unmap_ram(bp->b_addr - bp->b_offset,
270 bp->b_page_count);
271
272 for (i = 0; i < bp->b_page_count; i++) {
273 struct page *page = bp->b_pages[i];
274
275 __free_page(page);
276 }
277 } else if (bp->b_flags & _XBF_KMEM)
278 kmem_free(bp->b_addr);
279 _xfs_buf_free_pages(bp);
280 xfs_buf_free_maps(bp);
281 kmem_zone_free(xfs_buf_zone, bp);
282 }
283
284 /*
285 * Allocates all the pages for buffer in question and builds it's page list.
286 */
287 STATIC int
xfs_buf_allocate_memory(xfs_buf_t * bp,uint flags)288 xfs_buf_allocate_memory(
289 xfs_buf_t *bp,
290 uint flags)
291 {
292 size_t size;
293 size_t nbytes, offset;
294 gfp_t gfp_mask = xb_to_gfp(flags);
295 unsigned short page_count, i;
296 xfs_off_t start, end;
297 int error;
298
299 /*
300 * for buffers that are contained within a single page, just allocate
301 * the memory from the heap - there's no need for the complexity of
302 * page arrays to keep allocation down to order 0.
303 */
304 size = BBTOB(bp->b_length);
305 if (size < PAGE_SIZE) {
306 bp->b_addr = kmem_alloc(size, KM_NOFS);
307 if (!bp->b_addr) {
308 /* low memory - use alloc_page loop instead */
309 goto use_alloc_page;
310 }
311
312 if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) !=
313 ((unsigned long)bp->b_addr & PAGE_MASK)) {
314 /* b_addr spans two pages - use alloc_page instead */
315 kmem_free(bp->b_addr);
316 bp->b_addr = NULL;
317 goto use_alloc_page;
318 }
319 bp->b_offset = offset_in_page(bp->b_addr);
320 bp->b_pages = bp->b_page_array;
321 bp->b_pages[0] = virt_to_page(bp->b_addr);
322 bp->b_page_count = 1;
323 bp->b_flags |= _XBF_KMEM;
324 return 0;
325 }
326
327 use_alloc_page:
328 start = BBTOB(bp->b_maps[0].bm_bn) >> PAGE_SHIFT;
329 end = (BBTOB(bp->b_maps[0].bm_bn + bp->b_length) + PAGE_SIZE - 1)
330 >> PAGE_SHIFT;
331 page_count = end - start;
332 error = _xfs_buf_get_pages(bp, page_count);
333 if (unlikely(error))
334 return error;
335
336 offset = bp->b_offset;
337 bp->b_flags |= _XBF_PAGES;
338
339 for (i = 0; i < bp->b_page_count; i++) {
340 struct page *page;
341 uint retries = 0;
342 retry:
343 page = alloc_page(gfp_mask);
344 if (unlikely(page == NULL)) {
345 if (flags & XBF_READ_AHEAD) {
346 bp->b_page_count = i;
347 error = -ENOMEM;
348 goto out_free_pages;
349 }
350
351 /*
352 * This could deadlock.
353 *
354 * But until all the XFS lowlevel code is revamped to
355 * handle buffer allocation failures we can't do much.
356 */
357 if (!(++retries % 100))
358 xfs_err(NULL,
359 "possible memory allocation deadlock in %s (mode:0x%x)",
360 __func__, gfp_mask);
361
362 XFS_STATS_INC(xb_page_retries);
363 congestion_wait(BLK_RW_ASYNC, HZ/50);
364 goto retry;
365 }
366
367 XFS_STATS_INC(xb_page_found);
368
369 nbytes = min_t(size_t, size, PAGE_SIZE - offset);
370 size -= nbytes;
371 bp->b_pages[i] = page;
372 offset = 0;
373 }
374 return 0;
375
376 out_free_pages:
377 for (i = 0; i < bp->b_page_count; i++)
378 __free_page(bp->b_pages[i]);
379 bp->b_flags &= ~_XBF_PAGES;
380 return error;
381 }
382
383 /*
384 * Map buffer into kernel address-space if necessary.
385 */
386 STATIC int
_xfs_buf_map_pages(xfs_buf_t * bp,uint flags)387 _xfs_buf_map_pages(
388 xfs_buf_t *bp,
389 uint flags)
390 {
391 ASSERT(bp->b_flags & _XBF_PAGES);
392 if (bp->b_page_count == 1) {
393 /* A single page buffer is always mappable */
394 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
395 } else if (flags & XBF_UNMAPPED) {
396 bp->b_addr = NULL;
397 } else {
398 int retried = 0;
399 unsigned noio_flag;
400
401 /*
402 * vm_map_ram() will allocate auxillary structures (e.g.
403 * pagetables) with GFP_KERNEL, yet we are likely to be under
404 * GFP_NOFS context here. Hence we need to tell memory reclaim
405 * that we are in such a context via PF_MEMALLOC_NOIO to prevent
406 * memory reclaim re-entering the filesystem here and
407 * potentially deadlocking.
408 */
409 noio_flag = memalloc_noio_save();
410 do {
411 bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
412 -1, PAGE_KERNEL);
413 if (bp->b_addr)
414 break;
415 vm_unmap_aliases();
416 } while (retried++ <= 1);
417 memalloc_noio_restore(noio_flag);
418
419 if (!bp->b_addr)
420 return -ENOMEM;
421 bp->b_addr += bp->b_offset;
422 }
423
424 return 0;
425 }
426
427 /*
428 * Finding and Reading Buffers
429 */
430
431 /*
432 * Look up, and creates if absent, a lockable buffer for
433 * a given range of an inode. The buffer is returned
434 * locked. No I/O is implied by this call.
435 */
436 xfs_buf_t *
_xfs_buf_find(struct xfs_buftarg * btp,struct xfs_buf_map * map,int nmaps,xfs_buf_flags_t flags,xfs_buf_t * new_bp)437 _xfs_buf_find(
438 struct xfs_buftarg *btp,
439 struct xfs_buf_map *map,
440 int nmaps,
441 xfs_buf_flags_t flags,
442 xfs_buf_t *new_bp)
443 {
444 size_t numbytes;
445 struct xfs_perag *pag;
446 struct rb_node **rbp;
447 struct rb_node *parent;
448 xfs_buf_t *bp;
449 xfs_daddr_t blkno = map[0].bm_bn;
450 xfs_daddr_t eofs;
451 int numblks = 0;
452 int i;
453
454 for (i = 0; i < nmaps; i++)
455 numblks += map[i].bm_len;
456 numbytes = BBTOB(numblks);
457
458 /* Check for IOs smaller than the sector size / not sector aligned */
459 ASSERT(!(numbytes < btp->bt_meta_sectorsize));
460 ASSERT(!(BBTOB(blkno) & (xfs_off_t)btp->bt_meta_sectormask));
461
462 /*
463 * Corrupted block numbers can get through to here, unfortunately, so we
464 * have to check that the buffer falls within the filesystem bounds.
465 */
466 eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks);
467 if (blkno >= eofs) {
468 /*
469 * XXX (dgc): we should really be returning -EFSCORRUPTED here,
470 * but none of the higher level infrastructure supports
471 * returning a specific error on buffer lookup failures.
472 */
473 xfs_alert(btp->bt_mount,
474 "%s: Block out of range: block 0x%llx, EOFS 0x%llx ",
475 __func__, blkno, eofs);
476 WARN_ON(1);
477 return NULL;
478 }
479
480 /* get tree root */
481 pag = xfs_perag_get(btp->bt_mount,
482 xfs_daddr_to_agno(btp->bt_mount, blkno));
483
484 /* walk tree */
485 spin_lock(&pag->pag_buf_lock);
486 rbp = &pag->pag_buf_tree.rb_node;
487 parent = NULL;
488 bp = NULL;
489 while (*rbp) {
490 parent = *rbp;
491 bp = rb_entry(parent, struct xfs_buf, b_rbnode);
492
493 if (blkno < bp->b_bn)
494 rbp = &(*rbp)->rb_left;
495 else if (blkno > bp->b_bn)
496 rbp = &(*rbp)->rb_right;
497 else {
498 /*
499 * found a block number match. If the range doesn't
500 * match, the only way this is allowed is if the buffer
501 * in the cache is stale and the transaction that made
502 * it stale has not yet committed. i.e. we are
503 * reallocating a busy extent. Skip this buffer and
504 * continue searching to the right for an exact match.
505 */
506 if (bp->b_length != numblks) {
507 ASSERT(bp->b_flags & XBF_STALE);
508 rbp = &(*rbp)->rb_right;
509 continue;
510 }
511 atomic_inc(&bp->b_hold);
512 goto found;
513 }
514 }
515
516 /* No match found */
517 if (new_bp) {
518 rb_link_node(&new_bp->b_rbnode, parent, rbp);
519 rb_insert_color(&new_bp->b_rbnode, &pag->pag_buf_tree);
520 /* the buffer keeps the perag reference until it is freed */
521 new_bp->b_pag = pag;
522 spin_unlock(&pag->pag_buf_lock);
523 } else {
524 XFS_STATS_INC(xb_miss_locked);
525 spin_unlock(&pag->pag_buf_lock);
526 xfs_perag_put(pag);
527 }
528 return new_bp;
529
530 found:
531 spin_unlock(&pag->pag_buf_lock);
532 xfs_perag_put(pag);
533
534 if (!xfs_buf_trylock(bp)) {
535 if (flags & XBF_TRYLOCK) {
536 xfs_buf_rele(bp);
537 XFS_STATS_INC(xb_busy_locked);
538 return NULL;
539 }
540 xfs_buf_lock(bp);
541 XFS_STATS_INC(xb_get_locked_waited);
542 }
543
544 /*
545 * if the buffer is stale, clear all the external state associated with
546 * it. We need to keep flags such as how we allocated the buffer memory
547 * intact here.
548 */
549 if (bp->b_flags & XBF_STALE) {
550 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
551 ASSERT(bp->b_iodone == NULL);
552 bp->b_flags &= _XBF_KMEM | _XBF_PAGES;
553 bp->b_ops = NULL;
554 }
555
556 trace_xfs_buf_find(bp, flags, _RET_IP_);
557 XFS_STATS_INC(xb_get_locked);
558 return bp;
559 }
560
561 /*
562 * Assembles a buffer covering the specified range. The code is optimised for
563 * cache hits, as metadata intensive workloads will see 3 orders of magnitude
564 * more hits than misses.
565 */
566 struct xfs_buf *
xfs_buf_get_map(struct xfs_buftarg * target,struct xfs_buf_map * map,int nmaps,xfs_buf_flags_t flags)567 xfs_buf_get_map(
568 struct xfs_buftarg *target,
569 struct xfs_buf_map *map,
570 int nmaps,
571 xfs_buf_flags_t flags)
572 {
573 struct xfs_buf *bp;
574 struct xfs_buf *new_bp;
575 int error = 0;
576
577 bp = _xfs_buf_find(target, map, nmaps, flags, NULL);
578 if (likely(bp))
579 goto found;
580
581 new_bp = _xfs_buf_alloc(target, map, nmaps, flags);
582 if (unlikely(!new_bp))
583 return NULL;
584
585 error = xfs_buf_allocate_memory(new_bp, flags);
586 if (error) {
587 xfs_buf_free(new_bp);
588 return NULL;
589 }
590
591 bp = _xfs_buf_find(target, map, nmaps, flags, new_bp);
592 if (!bp) {
593 xfs_buf_free(new_bp);
594 return NULL;
595 }
596
597 if (bp != new_bp)
598 xfs_buf_free(new_bp);
599
600 found:
601 if (!bp->b_addr) {
602 error = _xfs_buf_map_pages(bp, flags);
603 if (unlikely(error)) {
604 xfs_warn(target->bt_mount,
605 "%s: failed to map pagesn", __func__);
606 xfs_buf_relse(bp);
607 return NULL;
608 }
609 }
610
611 /*
612 * Clear b_error if this is a lookup from a caller that doesn't expect
613 * valid data to be found in the buffer.
614 */
615 if (!(flags & XBF_READ))
616 xfs_buf_ioerror(bp, 0);
617
618 XFS_STATS_INC(xb_get);
619 trace_xfs_buf_get(bp, flags, _RET_IP_);
620 return bp;
621 }
622
623 STATIC int
_xfs_buf_read(xfs_buf_t * bp,xfs_buf_flags_t flags)624 _xfs_buf_read(
625 xfs_buf_t *bp,
626 xfs_buf_flags_t flags)
627 {
628 ASSERT(!(flags & XBF_WRITE));
629 ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL);
630
631 bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD);
632 bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
633
634 if (flags & XBF_ASYNC) {
635 xfs_buf_submit(bp);
636 return 0;
637 }
638 return xfs_buf_submit_wait(bp);
639 }
640
641 xfs_buf_t *
xfs_buf_read_map(struct xfs_buftarg * target,struct xfs_buf_map * map,int nmaps,xfs_buf_flags_t flags,const struct xfs_buf_ops * ops)642 xfs_buf_read_map(
643 struct xfs_buftarg *target,
644 struct xfs_buf_map *map,
645 int nmaps,
646 xfs_buf_flags_t flags,
647 const struct xfs_buf_ops *ops)
648 {
649 struct xfs_buf *bp;
650
651 flags |= XBF_READ;
652
653 bp = xfs_buf_get_map(target, map, nmaps, flags);
654 if (bp) {
655 trace_xfs_buf_read(bp, flags, _RET_IP_);
656
657 if (!XFS_BUF_ISDONE(bp)) {
658 XFS_STATS_INC(xb_get_read);
659 bp->b_ops = ops;
660 _xfs_buf_read(bp, flags);
661 } else if (flags & XBF_ASYNC) {
662 /*
663 * Read ahead call which is already satisfied,
664 * drop the buffer
665 */
666 xfs_buf_relse(bp);
667 return NULL;
668 } else {
669 /* We do not want read in the flags */
670 bp->b_flags &= ~XBF_READ;
671 }
672 }
673
674 return bp;
675 }
676
677 /*
678 * If we are not low on memory then do the readahead in a deadlock
679 * safe manner.
680 */
681 void
xfs_buf_readahead_map(struct xfs_buftarg * target,struct xfs_buf_map * map,int nmaps,const struct xfs_buf_ops * ops)682 xfs_buf_readahead_map(
683 struct xfs_buftarg *target,
684 struct xfs_buf_map *map,
685 int nmaps,
686 const struct xfs_buf_ops *ops)
687 {
688 if (bdi_read_congested(target->bt_bdi))
689 return;
690
691 xfs_buf_read_map(target, map, nmaps,
692 XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD, ops);
693 }
694
695 /*
696 * Read an uncached buffer from disk. Allocates and returns a locked
697 * buffer containing the disk contents or nothing.
698 */
699 int
xfs_buf_read_uncached(struct xfs_buftarg * target,xfs_daddr_t daddr,size_t numblks,int flags,struct xfs_buf ** bpp,const struct xfs_buf_ops * ops)700 xfs_buf_read_uncached(
701 struct xfs_buftarg *target,
702 xfs_daddr_t daddr,
703 size_t numblks,
704 int flags,
705 struct xfs_buf **bpp,
706 const struct xfs_buf_ops *ops)
707 {
708 struct xfs_buf *bp;
709
710 *bpp = NULL;
711
712 bp = xfs_buf_get_uncached(target, numblks, flags);
713 if (!bp)
714 return -ENOMEM;
715
716 /* set up the buffer for a read IO */
717 ASSERT(bp->b_map_count == 1);
718 bp->b_bn = XFS_BUF_DADDR_NULL; /* always null for uncached buffers */
719 bp->b_maps[0].bm_bn = daddr;
720 bp->b_flags |= XBF_READ;
721 bp->b_ops = ops;
722
723 xfs_buf_submit_wait(bp);
724 if (bp->b_error) {
725 int error = bp->b_error;
726 xfs_buf_relse(bp);
727 return error;
728 }
729
730 *bpp = bp;
731 return 0;
732 }
733
734 /*
735 * Return a buffer allocated as an empty buffer and associated to external
736 * memory via xfs_buf_associate_memory() back to it's empty state.
737 */
738 void
xfs_buf_set_empty(struct xfs_buf * bp,size_t numblks)739 xfs_buf_set_empty(
740 struct xfs_buf *bp,
741 size_t numblks)
742 {
743 if (bp->b_pages)
744 _xfs_buf_free_pages(bp);
745
746 bp->b_pages = NULL;
747 bp->b_page_count = 0;
748 bp->b_addr = NULL;
749 bp->b_length = numblks;
750 bp->b_io_length = numblks;
751
752 ASSERT(bp->b_map_count == 1);
753 bp->b_bn = XFS_BUF_DADDR_NULL;
754 bp->b_maps[0].bm_bn = XFS_BUF_DADDR_NULL;
755 bp->b_maps[0].bm_len = bp->b_length;
756 }
757
758 static inline struct page *
mem_to_page(void * addr)759 mem_to_page(
760 void *addr)
761 {
762 if ((!is_vmalloc_addr(addr))) {
763 return virt_to_page(addr);
764 } else {
765 return vmalloc_to_page(addr);
766 }
767 }
768
769 int
xfs_buf_associate_memory(xfs_buf_t * bp,void * mem,size_t len)770 xfs_buf_associate_memory(
771 xfs_buf_t *bp,
772 void *mem,
773 size_t len)
774 {
775 int rval;
776 int i = 0;
777 unsigned long pageaddr;
778 unsigned long offset;
779 size_t buflen;
780 int page_count;
781
782 pageaddr = (unsigned long)mem & PAGE_MASK;
783 offset = (unsigned long)mem - pageaddr;
784 buflen = PAGE_ALIGN(len + offset);
785 page_count = buflen >> PAGE_SHIFT;
786
787 /* Free any previous set of page pointers */
788 if (bp->b_pages)
789 _xfs_buf_free_pages(bp);
790
791 bp->b_pages = NULL;
792 bp->b_addr = mem;
793
794 rval = _xfs_buf_get_pages(bp, page_count);
795 if (rval)
796 return rval;
797
798 bp->b_offset = offset;
799
800 for (i = 0; i < bp->b_page_count; i++) {
801 bp->b_pages[i] = mem_to_page((void *)pageaddr);
802 pageaddr += PAGE_SIZE;
803 }
804
805 bp->b_io_length = BTOBB(len);
806 bp->b_length = BTOBB(buflen);
807
808 return 0;
809 }
810
811 xfs_buf_t *
xfs_buf_get_uncached(struct xfs_buftarg * target,size_t numblks,int flags)812 xfs_buf_get_uncached(
813 struct xfs_buftarg *target,
814 size_t numblks,
815 int flags)
816 {
817 unsigned long page_count;
818 int error, i;
819 struct xfs_buf *bp;
820 DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks);
821
822 bp = _xfs_buf_alloc(target, &map, 1, 0);
823 if (unlikely(bp == NULL))
824 goto fail;
825
826 page_count = PAGE_ALIGN(numblks << BBSHIFT) >> PAGE_SHIFT;
827 error = _xfs_buf_get_pages(bp, page_count);
828 if (error)
829 goto fail_free_buf;
830
831 for (i = 0; i < page_count; i++) {
832 bp->b_pages[i] = alloc_page(xb_to_gfp(flags));
833 if (!bp->b_pages[i])
834 goto fail_free_mem;
835 }
836 bp->b_flags |= _XBF_PAGES;
837
838 error = _xfs_buf_map_pages(bp, 0);
839 if (unlikely(error)) {
840 xfs_warn(target->bt_mount,
841 "%s: failed to map pages", __func__);
842 goto fail_free_mem;
843 }
844
845 trace_xfs_buf_get_uncached(bp, _RET_IP_);
846 return bp;
847
848 fail_free_mem:
849 while (--i >= 0)
850 __free_page(bp->b_pages[i]);
851 _xfs_buf_free_pages(bp);
852 fail_free_buf:
853 xfs_buf_free_maps(bp);
854 kmem_zone_free(xfs_buf_zone, bp);
855 fail:
856 return NULL;
857 }
858
859 /*
860 * Increment reference count on buffer, to hold the buffer concurrently
861 * with another thread which may release (free) the buffer asynchronously.
862 * Must hold the buffer already to call this function.
863 */
864 void
xfs_buf_hold(xfs_buf_t * bp)865 xfs_buf_hold(
866 xfs_buf_t *bp)
867 {
868 trace_xfs_buf_hold(bp, _RET_IP_);
869 atomic_inc(&bp->b_hold);
870 }
871
872 /*
873 * Releases a hold on the specified buffer. If the
874 * the hold count is 1, calls xfs_buf_free.
875 */
876 void
xfs_buf_rele(xfs_buf_t * bp)877 xfs_buf_rele(
878 xfs_buf_t *bp)
879 {
880 struct xfs_perag *pag = bp->b_pag;
881
882 trace_xfs_buf_rele(bp, _RET_IP_);
883
884 if (!pag) {
885 ASSERT(list_empty(&bp->b_lru));
886 ASSERT(RB_EMPTY_NODE(&bp->b_rbnode));
887 if (atomic_dec_and_test(&bp->b_hold))
888 xfs_buf_free(bp);
889 return;
890 }
891
892 ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode));
893
894 ASSERT(atomic_read(&bp->b_hold) > 0);
895 if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) {
896 spin_lock(&bp->b_lock);
897 if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) {
898 /*
899 * If the buffer is added to the LRU take a new
900 * reference to the buffer for the LRU and clear the
901 * (now stale) dispose list state flag
902 */
903 if (list_lru_add(&bp->b_target->bt_lru, &bp->b_lru)) {
904 bp->b_state &= ~XFS_BSTATE_DISPOSE;
905 atomic_inc(&bp->b_hold);
906 }
907 spin_unlock(&bp->b_lock);
908 spin_unlock(&pag->pag_buf_lock);
909 } else {
910 /*
911 * most of the time buffers will already be removed from
912 * the LRU, so optimise that case by checking for the
913 * XFS_BSTATE_DISPOSE flag indicating the last list the
914 * buffer was on was the disposal list
915 */
916 if (!(bp->b_state & XFS_BSTATE_DISPOSE)) {
917 list_lru_del(&bp->b_target->bt_lru, &bp->b_lru);
918 } else {
919 ASSERT(list_empty(&bp->b_lru));
920 }
921 spin_unlock(&bp->b_lock);
922
923 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
924 rb_erase(&bp->b_rbnode, &pag->pag_buf_tree);
925 spin_unlock(&pag->pag_buf_lock);
926 xfs_perag_put(pag);
927 xfs_buf_free(bp);
928 }
929 }
930 }
931
932
933 /*
934 * Lock a buffer object, if it is not already locked.
935 *
936 * If we come across a stale, pinned, locked buffer, we know that we are
937 * being asked to lock a buffer that has been reallocated. Because it is
938 * pinned, we know that the log has not been pushed to disk and hence it
939 * will still be locked. Rather than continuing to have trylock attempts
940 * fail until someone else pushes the log, push it ourselves before
941 * returning. This means that the xfsaild will not get stuck trying
942 * to push on stale inode buffers.
943 */
944 int
xfs_buf_trylock(struct xfs_buf * bp)945 xfs_buf_trylock(
946 struct xfs_buf *bp)
947 {
948 int locked;
949
950 locked = down_trylock(&bp->b_sema) == 0;
951 if (locked)
952 XB_SET_OWNER(bp);
953
954 trace_xfs_buf_trylock(bp, _RET_IP_);
955 return locked;
956 }
957
958 /*
959 * Lock a buffer object.
960 *
961 * If we come across a stale, pinned, locked buffer, we know that we
962 * are being asked to lock a buffer that has been reallocated. Because
963 * it is pinned, we know that the log has not been pushed to disk and
964 * hence it will still be locked. Rather than sleeping until someone
965 * else pushes the log, push it ourselves before trying to get the lock.
966 */
967 void
xfs_buf_lock(struct xfs_buf * bp)968 xfs_buf_lock(
969 struct xfs_buf *bp)
970 {
971 trace_xfs_buf_lock(bp, _RET_IP_);
972
973 if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
974 xfs_log_force(bp->b_target->bt_mount, 0);
975 down(&bp->b_sema);
976 XB_SET_OWNER(bp);
977
978 trace_xfs_buf_lock_done(bp, _RET_IP_);
979 }
980
981 void
xfs_buf_unlock(struct xfs_buf * bp)982 xfs_buf_unlock(
983 struct xfs_buf *bp)
984 {
985 ASSERT(xfs_buf_islocked(bp));
986
987 XB_CLEAR_OWNER(bp);
988 up(&bp->b_sema);
989
990 trace_xfs_buf_unlock(bp, _RET_IP_);
991 }
992
993 STATIC void
xfs_buf_wait_unpin(xfs_buf_t * bp)994 xfs_buf_wait_unpin(
995 xfs_buf_t *bp)
996 {
997 DECLARE_WAITQUEUE (wait, current);
998
999 if (atomic_read(&bp->b_pin_count) == 0)
1000 return;
1001
1002 add_wait_queue(&bp->b_waiters, &wait);
1003 for (;;) {
1004 set_current_state(TASK_UNINTERRUPTIBLE);
1005 if (atomic_read(&bp->b_pin_count) == 0)
1006 break;
1007 io_schedule();
1008 }
1009 remove_wait_queue(&bp->b_waiters, &wait);
1010 set_current_state(TASK_RUNNING);
1011 }
1012
1013 /*
1014 * Buffer Utility Routines
1015 */
1016
1017 void
xfs_buf_ioend(struct xfs_buf * bp)1018 xfs_buf_ioend(
1019 struct xfs_buf *bp)
1020 {
1021 bool read = bp->b_flags & XBF_READ;
1022
1023 trace_xfs_buf_iodone(bp, _RET_IP_);
1024
1025 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
1026
1027 /*
1028 * Pull in IO completion errors now. We are guaranteed to be running
1029 * single threaded, so we don't need the lock to read b_io_error.
1030 */
1031 if (!bp->b_error && bp->b_io_error)
1032 xfs_buf_ioerror(bp, bp->b_io_error);
1033
1034 /* Only validate buffers that were read without errors */
1035 if (read && !bp->b_error && bp->b_ops) {
1036 ASSERT(!bp->b_iodone);
1037 bp->b_ops->verify_read(bp);
1038 }
1039
1040 if (!bp->b_error)
1041 bp->b_flags |= XBF_DONE;
1042
1043 if (bp->b_iodone)
1044 (*(bp->b_iodone))(bp);
1045 else if (bp->b_flags & XBF_ASYNC)
1046 xfs_buf_relse(bp);
1047 else
1048 complete(&bp->b_iowait);
1049 }
1050
1051 static void
xfs_buf_ioend_work(struct work_struct * work)1052 xfs_buf_ioend_work(
1053 struct work_struct *work)
1054 {
1055 struct xfs_buf *bp =
1056 container_of(work, xfs_buf_t, b_iodone_work);
1057
1058 xfs_buf_ioend(bp);
1059 }
1060
1061 void
xfs_buf_ioend_async(struct xfs_buf * bp)1062 xfs_buf_ioend_async(
1063 struct xfs_buf *bp)
1064 {
1065 INIT_WORK(&bp->b_iodone_work, xfs_buf_ioend_work);
1066 queue_work(xfslogd_workqueue, &bp->b_iodone_work);
1067 }
1068
1069 void
xfs_buf_ioerror(xfs_buf_t * bp,int error)1070 xfs_buf_ioerror(
1071 xfs_buf_t *bp,
1072 int error)
1073 {
1074 ASSERT(error <= 0 && error >= -1000);
1075 bp->b_error = error;
1076 trace_xfs_buf_ioerror(bp, error, _RET_IP_);
1077 }
1078
1079 void
xfs_buf_ioerror_alert(struct xfs_buf * bp,const char * func)1080 xfs_buf_ioerror_alert(
1081 struct xfs_buf *bp,
1082 const char *func)
1083 {
1084 xfs_alert(bp->b_target->bt_mount,
1085 "metadata I/O error: block 0x%llx (\"%s\") error %d numblks %d",
1086 (__uint64_t)XFS_BUF_ADDR(bp), func, -bp->b_error, bp->b_length);
1087 }
1088
1089 int
xfs_bwrite(struct xfs_buf * bp)1090 xfs_bwrite(
1091 struct xfs_buf *bp)
1092 {
1093 int error;
1094
1095 ASSERT(xfs_buf_islocked(bp));
1096
1097 bp->b_flags |= XBF_WRITE;
1098 bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q |
1099 XBF_WRITE_FAIL | XBF_DONE);
1100
1101 error = xfs_buf_submit_wait(bp);
1102 if (error) {
1103 xfs_force_shutdown(bp->b_target->bt_mount,
1104 SHUTDOWN_META_IO_ERROR);
1105 }
1106 return error;
1107 }
1108
1109 STATIC void
xfs_buf_bio_end_io(struct bio * bio,int error)1110 xfs_buf_bio_end_io(
1111 struct bio *bio,
1112 int error)
1113 {
1114 xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private;
1115
1116 /*
1117 * don't overwrite existing errors - otherwise we can lose errors on
1118 * buffers that require multiple bios to complete.
1119 */
1120 if (error) {
1121 spin_lock(&bp->b_lock);
1122 if (!bp->b_io_error)
1123 bp->b_io_error = error;
1124 spin_unlock(&bp->b_lock);
1125 }
1126
1127 if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
1128 invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
1129
1130 if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
1131 xfs_buf_ioend_async(bp);
1132 bio_put(bio);
1133 }
1134
1135 static void
xfs_buf_ioapply_map(struct xfs_buf * bp,int map,int * buf_offset,int * count,int rw)1136 xfs_buf_ioapply_map(
1137 struct xfs_buf *bp,
1138 int map,
1139 int *buf_offset,
1140 int *count,
1141 int rw)
1142 {
1143 int page_index;
1144 int total_nr_pages = bp->b_page_count;
1145 int nr_pages;
1146 struct bio *bio;
1147 sector_t sector = bp->b_maps[map].bm_bn;
1148 int size;
1149 int offset;
1150
1151 total_nr_pages = bp->b_page_count;
1152
1153 /* skip the pages in the buffer before the start offset */
1154 page_index = 0;
1155 offset = *buf_offset;
1156 while (offset >= PAGE_SIZE) {
1157 page_index++;
1158 offset -= PAGE_SIZE;
1159 }
1160
1161 /*
1162 * Limit the IO size to the length of the current vector, and update the
1163 * remaining IO count for the next time around.
1164 */
1165 size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count);
1166 *count -= size;
1167 *buf_offset += size;
1168
1169 next_chunk:
1170 atomic_inc(&bp->b_io_remaining);
1171 nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
1172 if (nr_pages > total_nr_pages)
1173 nr_pages = total_nr_pages;
1174
1175 bio = bio_alloc(GFP_NOIO, nr_pages);
1176 bio->bi_bdev = bp->b_target->bt_bdev;
1177 bio->bi_iter.bi_sector = sector;
1178 bio->bi_end_io = xfs_buf_bio_end_io;
1179 bio->bi_private = bp;
1180
1181
1182 for (; size && nr_pages; nr_pages--, page_index++) {
1183 int rbytes, nbytes = PAGE_SIZE - offset;
1184
1185 if (nbytes > size)
1186 nbytes = size;
1187
1188 rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes,
1189 offset);
1190 if (rbytes < nbytes)
1191 break;
1192
1193 offset = 0;
1194 sector += BTOBB(nbytes);
1195 size -= nbytes;
1196 total_nr_pages--;
1197 }
1198
1199 if (likely(bio->bi_iter.bi_size)) {
1200 if (xfs_buf_is_vmapped(bp)) {
1201 flush_kernel_vmap_range(bp->b_addr,
1202 xfs_buf_vmap_len(bp));
1203 }
1204 submit_bio(rw, bio);
1205 if (size)
1206 goto next_chunk;
1207 } else {
1208 /*
1209 * This is guaranteed not to be the last io reference count
1210 * because the caller (xfs_buf_submit) holds a count itself.
1211 */
1212 atomic_dec(&bp->b_io_remaining);
1213 xfs_buf_ioerror(bp, -EIO);
1214 bio_put(bio);
1215 }
1216
1217 }
1218
1219 STATIC void
_xfs_buf_ioapply(struct xfs_buf * bp)1220 _xfs_buf_ioapply(
1221 struct xfs_buf *bp)
1222 {
1223 struct blk_plug plug;
1224 int rw;
1225 int offset;
1226 int size;
1227 int i;
1228
1229 /*
1230 * Make sure we capture only current IO errors rather than stale errors
1231 * left over from previous use of the buffer (e.g. failed readahead).
1232 */
1233 bp->b_error = 0;
1234
1235 if (bp->b_flags & XBF_WRITE) {
1236 if (bp->b_flags & XBF_SYNCIO)
1237 rw = WRITE_SYNC;
1238 else
1239 rw = WRITE;
1240 if (bp->b_flags & XBF_FUA)
1241 rw |= REQ_FUA;
1242 if (bp->b_flags & XBF_FLUSH)
1243 rw |= REQ_FLUSH;
1244
1245 /*
1246 * Run the write verifier callback function if it exists. If
1247 * this function fails it will mark the buffer with an error and
1248 * the IO should not be dispatched.
1249 */
1250 if (bp->b_ops) {
1251 bp->b_ops->verify_write(bp);
1252 if (bp->b_error) {
1253 xfs_force_shutdown(bp->b_target->bt_mount,
1254 SHUTDOWN_CORRUPT_INCORE);
1255 return;
1256 }
1257 } else if (bp->b_bn != XFS_BUF_DADDR_NULL) {
1258 struct xfs_mount *mp = bp->b_target->bt_mount;
1259
1260 /*
1261 * non-crc filesystems don't attach verifiers during
1262 * log recovery, so don't warn for such filesystems.
1263 */
1264 if (xfs_sb_version_hascrc(&mp->m_sb)) {
1265 xfs_warn(mp,
1266 "%s: no ops on block 0x%llx/0x%x",
1267 __func__, bp->b_bn, bp->b_length);
1268 xfs_hex_dump(bp->b_addr, 64);
1269 dump_stack();
1270 }
1271 }
1272 } else if (bp->b_flags & XBF_READ_AHEAD) {
1273 rw = READA;
1274 } else {
1275 rw = READ;
1276 }
1277
1278 /* we only use the buffer cache for meta-data */
1279 rw |= REQ_META;
1280
1281 /*
1282 * Walk all the vectors issuing IO on them. Set up the initial offset
1283 * into the buffer and the desired IO size before we start -
1284 * _xfs_buf_ioapply_vec() will modify them appropriately for each
1285 * subsequent call.
1286 */
1287 offset = bp->b_offset;
1288 size = BBTOB(bp->b_io_length);
1289 blk_start_plug(&plug);
1290 for (i = 0; i < bp->b_map_count; i++) {
1291 xfs_buf_ioapply_map(bp, i, &offset, &size, rw);
1292 if (bp->b_error)
1293 break;
1294 if (size <= 0)
1295 break; /* all done */
1296 }
1297 blk_finish_plug(&plug);
1298 }
1299
1300 /*
1301 * Asynchronous IO submission path. This transfers the buffer lock ownership and
1302 * the current reference to the IO. It is not safe to reference the buffer after
1303 * a call to this function unless the caller holds an additional reference
1304 * itself.
1305 */
1306 void
xfs_buf_submit(struct xfs_buf * bp)1307 xfs_buf_submit(
1308 struct xfs_buf *bp)
1309 {
1310 trace_xfs_buf_submit(bp, _RET_IP_);
1311
1312 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
1313 ASSERT(bp->b_flags & XBF_ASYNC);
1314
1315 /* on shutdown we stale and complete the buffer immediately */
1316 if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
1317 xfs_buf_ioerror(bp, -EIO);
1318 bp->b_flags &= ~XBF_DONE;
1319 xfs_buf_stale(bp);
1320 xfs_buf_ioend(bp);
1321 return;
1322 }
1323
1324 if (bp->b_flags & XBF_WRITE)
1325 xfs_buf_wait_unpin(bp);
1326
1327 /* clear the internal error state to avoid spurious errors */
1328 bp->b_io_error = 0;
1329
1330 /*
1331 * The caller's reference is released during I/O completion.
1332 * This occurs some time after the last b_io_remaining reference is
1333 * released, so after we drop our Io reference we have to have some
1334 * other reference to ensure the buffer doesn't go away from underneath
1335 * us. Take a direct reference to ensure we have safe access to the
1336 * buffer until we are finished with it.
1337 */
1338 xfs_buf_hold(bp);
1339
1340 /*
1341 * Set the count to 1 initially, this will stop an I/O completion
1342 * callout which happens before we have started all the I/O from calling
1343 * xfs_buf_ioend too early.
1344 */
1345 atomic_set(&bp->b_io_remaining, 1);
1346 _xfs_buf_ioapply(bp);
1347
1348 /*
1349 * If _xfs_buf_ioapply failed, we can get back here with only the IO
1350 * reference we took above. If we drop it to zero, run completion so
1351 * that we don't return to the caller with completion still pending.
1352 */
1353 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
1354 if (bp->b_error)
1355 xfs_buf_ioend(bp);
1356 else
1357 xfs_buf_ioend_async(bp);
1358 }
1359
1360 xfs_buf_rele(bp);
1361 /* Note: it is not safe to reference bp now we've dropped our ref */
1362 }
1363
1364 /*
1365 * Synchronous buffer IO submission path, read or write.
1366 */
1367 int
xfs_buf_submit_wait(struct xfs_buf * bp)1368 xfs_buf_submit_wait(
1369 struct xfs_buf *bp)
1370 {
1371 int error;
1372
1373 trace_xfs_buf_submit_wait(bp, _RET_IP_);
1374
1375 ASSERT(!(bp->b_flags & (_XBF_DELWRI_Q | XBF_ASYNC)));
1376
1377 if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
1378 xfs_buf_ioerror(bp, -EIO);
1379 xfs_buf_stale(bp);
1380 bp->b_flags &= ~XBF_DONE;
1381 return -EIO;
1382 }
1383
1384 if (bp->b_flags & XBF_WRITE)
1385 xfs_buf_wait_unpin(bp);
1386
1387 /* clear the internal error state to avoid spurious errors */
1388 bp->b_io_error = 0;
1389
1390 /*
1391 * For synchronous IO, the IO does not inherit the submitters reference
1392 * count, nor the buffer lock. Hence we cannot release the reference we
1393 * are about to take until we've waited for all IO completion to occur,
1394 * including any xfs_buf_ioend_async() work that may be pending.
1395 */
1396 xfs_buf_hold(bp);
1397
1398 /*
1399 * Set the count to 1 initially, this will stop an I/O completion
1400 * callout which happens before we have started all the I/O from calling
1401 * xfs_buf_ioend too early.
1402 */
1403 atomic_set(&bp->b_io_remaining, 1);
1404 _xfs_buf_ioapply(bp);
1405
1406 /*
1407 * make sure we run completion synchronously if it raced with us and is
1408 * already complete.
1409 */
1410 if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
1411 xfs_buf_ioend(bp);
1412
1413 /* wait for completion before gathering the error from the buffer */
1414 trace_xfs_buf_iowait(bp, _RET_IP_);
1415 wait_for_completion(&bp->b_iowait);
1416 trace_xfs_buf_iowait_done(bp, _RET_IP_);
1417 error = bp->b_error;
1418
1419 /*
1420 * all done now, we can release the hold that keeps the buffer
1421 * referenced for the entire IO.
1422 */
1423 xfs_buf_rele(bp);
1424 return error;
1425 }
1426
1427 xfs_caddr_t
xfs_buf_offset(xfs_buf_t * bp,size_t offset)1428 xfs_buf_offset(
1429 xfs_buf_t *bp,
1430 size_t offset)
1431 {
1432 struct page *page;
1433
1434 if (bp->b_addr)
1435 return bp->b_addr + offset;
1436
1437 offset += bp->b_offset;
1438 page = bp->b_pages[offset >> PAGE_SHIFT];
1439 return (xfs_caddr_t)page_address(page) + (offset & (PAGE_SIZE-1));
1440 }
1441
1442 /*
1443 * Move data into or out of a buffer.
1444 */
1445 void
xfs_buf_iomove(xfs_buf_t * bp,size_t boff,size_t bsize,void * data,xfs_buf_rw_t mode)1446 xfs_buf_iomove(
1447 xfs_buf_t *bp, /* buffer to process */
1448 size_t boff, /* starting buffer offset */
1449 size_t bsize, /* length to copy */
1450 void *data, /* data address */
1451 xfs_buf_rw_t mode) /* read/write/zero flag */
1452 {
1453 size_t bend;
1454
1455 bend = boff + bsize;
1456 while (boff < bend) {
1457 struct page *page;
1458 int page_index, page_offset, csize;
1459
1460 page_index = (boff + bp->b_offset) >> PAGE_SHIFT;
1461 page_offset = (boff + bp->b_offset) & ~PAGE_MASK;
1462 page = bp->b_pages[page_index];
1463 csize = min_t(size_t, PAGE_SIZE - page_offset,
1464 BBTOB(bp->b_io_length) - boff);
1465
1466 ASSERT((csize + page_offset) <= PAGE_SIZE);
1467
1468 switch (mode) {
1469 case XBRW_ZERO:
1470 memset(page_address(page) + page_offset, 0, csize);
1471 break;
1472 case XBRW_READ:
1473 memcpy(data, page_address(page) + page_offset, csize);
1474 break;
1475 case XBRW_WRITE:
1476 memcpy(page_address(page) + page_offset, data, csize);
1477 }
1478
1479 boff += csize;
1480 data += csize;
1481 }
1482 }
1483
1484 /*
1485 * Handling of buffer targets (buftargs).
1486 */
1487
1488 /*
1489 * Wait for any bufs with callbacks that have been submitted but have not yet
1490 * returned. These buffers will have an elevated hold count, so wait on those
1491 * while freeing all the buffers only held by the LRU.
1492 */
1493 static enum lru_status
xfs_buftarg_wait_rele(struct list_head * item,spinlock_t * lru_lock,void * arg)1494 xfs_buftarg_wait_rele(
1495 struct list_head *item,
1496 spinlock_t *lru_lock,
1497 void *arg)
1498
1499 {
1500 struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru);
1501 struct list_head *dispose = arg;
1502
1503 if (atomic_read(&bp->b_hold) > 1) {
1504 /* need to wait, so skip it this pass */
1505 trace_xfs_buf_wait_buftarg(bp, _RET_IP_);
1506 return LRU_SKIP;
1507 }
1508 if (!spin_trylock(&bp->b_lock))
1509 return LRU_SKIP;
1510
1511 /*
1512 * clear the LRU reference count so the buffer doesn't get
1513 * ignored in xfs_buf_rele().
1514 */
1515 atomic_set(&bp->b_lru_ref, 0);
1516 bp->b_state |= XFS_BSTATE_DISPOSE;
1517 list_move(item, dispose);
1518 spin_unlock(&bp->b_lock);
1519 return LRU_REMOVED;
1520 }
1521
1522 void
xfs_wait_buftarg(struct xfs_buftarg * btp)1523 xfs_wait_buftarg(
1524 struct xfs_buftarg *btp)
1525 {
1526 LIST_HEAD(dispose);
1527 int loop = 0;
1528
1529 /* loop until there is nothing left on the lru list. */
1530 while (list_lru_count(&btp->bt_lru)) {
1531 list_lru_walk(&btp->bt_lru, xfs_buftarg_wait_rele,
1532 &dispose, LONG_MAX);
1533
1534 while (!list_empty(&dispose)) {
1535 struct xfs_buf *bp;
1536 bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
1537 list_del_init(&bp->b_lru);
1538 if (bp->b_flags & XBF_WRITE_FAIL) {
1539 xfs_alert(btp->bt_mount,
1540 "Corruption Alert: Buffer at block 0x%llx had permanent write failures!\n"
1541 "Please run xfs_repair to determine the extent of the problem.",
1542 (long long)bp->b_bn);
1543 }
1544 xfs_buf_rele(bp);
1545 }
1546 if (loop++ != 0)
1547 delay(100);
1548 }
1549 }
1550
1551 static enum lru_status
xfs_buftarg_isolate(struct list_head * item,spinlock_t * lru_lock,void * arg)1552 xfs_buftarg_isolate(
1553 struct list_head *item,
1554 spinlock_t *lru_lock,
1555 void *arg)
1556 {
1557 struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru);
1558 struct list_head *dispose = arg;
1559
1560 /*
1561 * we are inverting the lru lock/bp->b_lock here, so use a trylock.
1562 * If we fail to get the lock, just skip it.
1563 */
1564 if (!spin_trylock(&bp->b_lock))
1565 return LRU_SKIP;
1566 /*
1567 * Decrement the b_lru_ref count unless the value is already
1568 * zero. If the value is already zero, we need to reclaim the
1569 * buffer, otherwise it gets another trip through the LRU.
1570 */
1571 if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
1572 spin_unlock(&bp->b_lock);
1573 return LRU_ROTATE;
1574 }
1575
1576 bp->b_state |= XFS_BSTATE_DISPOSE;
1577 list_move(item, dispose);
1578 spin_unlock(&bp->b_lock);
1579 return LRU_REMOVED;
1580 }
1581
1582 static unsigned long
xfs_buftarg_shrink_scan(struct shrinker * shrink,struct shrink_control * sc)1583 xfs_buftarg_shrink_scan(
1584 struct shrinker *shrink,
1585 struct shrink_control *sc)
1586 {
1587 struct xfs_buftarg *btp = container_of(shrink,
1588 struct xfs_buftarg, bt_shrinker);
1589 LIST_HEAD(dispose);
1590 unsigned long freed;
1591 unsigned long nr_to_scan = sc->nr_to_scan;
1592
1593 freed = list_lru_walk_node(&btp->bt_lru, sc->nid, xfs_buftarg_isolate,
1594 &dispose, &nr_to_scan);
1595
1596 while (!list_empty(&dispose)) {
1597 struct xfs_buf *bp;
1598 bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
1599 list_del_init(&bp->b_lru);
1600 xfs_buf_rele(bp);
1601 }
1602
1603 return freed;
1604 }
1605
1606 static unsigned long
xfs_buftarg_shrink_count(struct shrinker * shrink,struct shrink_control * sc)1607 xfs_buftarg_shrink_count(
1608 struct shrinker *shrink,
1609 struct shrink_control *sc)
1610 {
1611 struct xfs_buftarg *btp = container_of(shrink,
1612 struct xfs_buftarg, bt_shrinker);
1613 return list_lru_count_node(&btp->bt_lru, sc->nid);
1614 }
1615
1616 void
xfs_free_buftarg(struct xfs_mount * mp,struct xfs_buftarg * btp)1617 xfs_free_buftarg(
1618 struct xfs_mount *mp,
1619 struct xfs_buftarg *btp)
1620 {
1621 unregister_shrinker(&btp->bt_shrinker);
1622 list_lru_destroy(&btp->bt_lru);
1623
1624 if (mp->m_flags & XFS_MOUNT_BARRIER)
1625 xfs_blkdev_issue_flush(btp);
1626
1627 kmem_free(btp);
1628 }
1629
1630 int
xfs_setsize_buftarg(xfs_buftarg_t * btp,unsigned int sectorsize)1631 xfs_setsize_buftarg(
1632 xfs_buftarg_t *btp,
1633 unsigned int sectorsize)
1634 {
1635 /* Set up metadata sector size info */
1636 btp->bt_meta_sectorsize = sectorsize;
1637 btp->bt_meta_sectormask = sectorsize - 1;
1638
1639 if (set_blocksize(btp->bt_bdev, sectorsize)) {
1640 char name[BDEVNAME_SIZE];
1641
1642 bdevname(btp->bt_bdev, name);
1643
1644 xfs_warn(btp->bt_mount,
1645 "Cannot set_blocksize to %u on device %s",
1646 sectorsize, name);
1647 return -EINVAL;
1648 }
1649
1650 /* Set up device logical sector size mask */
1651 btp->bt_logical_sectorsize = bdev_logical_block_size(btp->bt_bdev);
1652 btp->bt_logical_sectormask = bdev_logical_block_size(btp->bt_bdev) - 1;
1653
1654 return 0;
1655 }
1656
1657 /*
1658 * When allocating the initial buffer target we have not yet
1659 * read in the superblock, so don't know what sized sectors
1660 * are being used at this early stage. Play safe.
1661 */
1662 STATIC int
xfs_setsize_buftarg_early(xfs_buftarg_t * btp,struct block_device * bdev)1663 xfs_setsize_buftarg_early(
1664 xfs_buftarg_t *btp,
1665 struct block_device *bdev)
1666 {
1667 return xfs_setsize_buftarg(btp, bdev_logical_block_size(bdev));
1668 }
1669
1670 xfs_buftarg_t *
xfs_alloc_buftarg(struct xfs_mount * mp,struct block_device * bdev)1671 xfs_alloc_buftarg(
1672 struct xfs_mount *mp,
1673 struct block_device *bdev)
1674 {
1675 xfs_buftarg_t *btp;
1676
1677 btp = kmem_zalloc(sizeof(*btp), KM_SLEEP | KM_NOFS);
1678
1679 btp->bt_mount = mp;
1680 btp->bt_dev = bdev->bd_dev;
1681 btp->bt_bdev = bdev;
1682 btp->bt_bdi = blk_get_backing_dev_info(bdev);
1683
1684 if (xfs_setsize_buftarg_early(btp, bdev))
1685 goto error;
1686
1687 if (list_lru_init(&btp->bt_lru))
1688 goto error;
1689
1690 btp->bt_shrinker.count_objects = xfs_buftarg_shrink_count;
1691 btp->bt_shrinker.scan_objects = xfs_buftarg_shrink_scan;
1692 btp->bt_shrinker.seeks = DEFAULT_SEEKS;
1693 btp->bt_shrinker.flags = SHRINKER_NUMA_AWARE;
1694 register_shrinker(&btp->bt_shrinker);
1695 return btp;
1696
1697 error:
1698 kmem_free(btp);
1699 return NULL;
1700 }
1701
1702 /*
1703 * Cancel a delayed write list.
1704 *
1705 * Remove each buffer from the list, clear the delwri queue flag and drop the
1706 * associated buffer reference.
1707 */
1708 void
xfs_buf_delwri_cancel(struct list_head * list)1709 xfs_buf_delwri_cancel(
1710 struct list_head *list)
1711 {
1712 struct xfs_buf *bp;
1713
1714 while (!list_empty(list)) {
1715 bp = list_first_entry(list, struct xfs_buf, b_list);
1716
1717 xfs_buf_lock(bp);
1718 bp->b_flags &= ~_XBF_DELWRI_Q;
1719 list_del_init(&bp->b_list);
1720 xfs_buf_relse(bp);
1721 }
1722 }
1723
1724 /*
1725 * Add a buffer to the delayed write list.
1726 *
1727 * This queues a buffer for writeout if it hasn't already been. Note that
1728 * neither this routine nor the buffer list submission functions perform
1729 * any internal synchronization. It is expected that the lists are thread-local
1730 * to the callers.
1731 *
1732 * Returns true if we queued up the buffer, or false if it already had
1733 * been on the buffer list.
1734 */
1735 bool
xfs_buf_delwri_queue(struct xfs_buf * bp,struct list_head * list)1736 xfs_buf_delwri_queue(
1737 struct xfs_buf *bp,
1738 struct list_head *list)
1739 {
1740 ASSERT(xfs_buf_islocked(bp));
1741 ASSERT(!(bp->b_flags & XBF_READ));
1742
1743 /*
1744 * If the buffer is already marked delwri it already is queued up
1745 * by someone else for imediate writeout. Just ignore it in that
1746 * case.
1747 */
1748 if (bp->b_flags & _XBF_DELWRI_Q) {
1749 trace_xfs_buf_delwri_queued(bp, _RET_IP_);
1750 return false;
1751 }
1752
1753 trace_xfs_buf_delwri_queue(bp, _RET_IP_);
1754
1755 /*
1756 * If a buffer gets written out synchronously or marked stale while it
1757 * is on a delwri list we lazily remove it. To do this, the other party
1758 * clears the _XBF_DELWRI_Q flag but otherwise leaves the buffer alone.
1759 * It remains referenced and on the list. In a rare corner case it
1760 * might get readded to a delwri list after the synchronous writeout, in
1761 * which case we need just need to re-add the flag here.
1762 */
1763 bp->b_flags |= _XBF_DELWRI_Q;
1764 if (list_empty(&bp->b_list)) {
1765 atomic_inc(&bp->b_hold);
1766 list_add_tail(&bp->b_list, list);
1767 }
1768
1769 return true;
1770 }
1771
1772 /*
1773 * Compare function is more complex than it needs to be because
1774 * the return value is only 32 bits and we are doing comparisons
1775 * on 64 bit values
1776 */
1777 static int
xfs_buf_cmp(void * priv,struct list_head * a,struct list_head * b)1778 xfs_buf_cmp(
1779 void *priv,
1780 struct list_head *a,
1781 struct list_head *b)
1782 {
1783 struct xfs_buf *ap = container_of(a, struct xfs_buf, b_list);
1784 struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list);
1785 xfs_daddr_t diff;
1786
1787 diff = ap->b_maps[0].bm_bn - bp->b_maps[0].bm_bn;
1788 if (diff < 0)
1789 return -1;
1790 if (diff > 0)
1791 return 1;
1792 return 0;
1793 }
1794
1795 static int
__xfs_buf_delwri_submit(struct list_head * buffer_list,struct list_head * io_list,bool wait)1796 __xfs_buf_delwri_submit(
1797 struct list_head *buffer_list,
1798 struct list_head *io_list,
1799 bool wait)
1800 {
1801 struct blk_plug plug;
1802 struct xfs_buf *bp, *n;
1803 int pinned = 0;
1804
1805 list_for_each_entry_safe(bp, n, buffer_list, b_list) {
1806 if (!wait) {
1807 if (xfs_buf_ispinned(bp)) {
1808 pinned++;
1809 continue;
1810 }
1811 if (!xfs_buf_trylock(bp))
1812 continue;
1813 } else {
1814 xfs_buf_lock(bp);
1815 }
1816
1817 /*
1818 * Someone else might have written the buffer synchronously or
1819 * marked it stale in the meantime. In that case only the
1820 * _XBF_DELWRI_Q flag got cleared, and we have to drop the
1821 * reference and remove it from the list here.
1822 */
1823 if (!(bp->b_flags & _XBF_DELWRI_Q)) {
1824 list_del_init(&bp->b_list);
1825 xfs_buf_relse(bp);
1826 continue;
1827 }
1828
1829 list_move_tail(&bp->b_list, io_list);
1830 trace_xfs_buf_delwri_split(bp, _RET_IP_);
1831 }
1832
1833 list_sort(NULL, io_list, xfs_buf_cmp);
1834
1835 blk_start_plug(&plug);
1836 list_for_each_entry_safe(bp, n, io_list, b_list) {
1837 bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC | XBF_WRITE_FAIL);
1838 bp->b_flags |= XBF_WRITE | XBF_ASYNC;
1839
1840 /*
1841 * we do all Io submission async. This means if we need to wait
1842 * for IO completion we need to take an extra reference so the
1843 * buffer is still valid on the other side.
1844 */
1845 if (wait)
1846 xfs_buf_hold(bp);
1847 else
1848 list_del_init(&bp->b_list);
1849
1850 xfs_buf_submit(bp);
1851 }
1852 blk_finish_plug(&plug);
1853
1854 return pinned;
1855 }
1856
1857 /*
1858 * Write out a buffer list asynchronously.
1859 *
1860 * This will take the @buffer_list, write all non-locked and non-pinned buffers
1861 * out and not wait for I/O completion on any of the buffers. This interface
1862 * is only safely useable for callers that can track I/O completion by higher
1863 * level means, e.g. AIL pushing as the @buffer_list is consumed in this
1864 * function.
1865 */
1866 int
xfs_buf_delwri_submit_nowait(struct list_head * buffer_list)1867 xfs_buf_delwri_submit_nowait(
1868 struct list_head *buffer_list)
1869 {
1870 LIST_HEAD (io_list);
1871 return __xfs_buf_delwri_submit(buffer_list, &io_list, false);
1872 }
1873
1874 /*
1875 * Write out a buffer list synchronously.
1876 *
1877 * This will take the @buffer_list, write all buffers out and wait for I/O
1878 * completion on all of the buffers. @buffer_list is consumed by the function,
1879 * so callers must have some other way of tracking buffers if they require such
1880 * functionality.
1881 */
1882 int
xfs_buf_delwri_submit(struct list_head * buffer_list)1883 xfs_buf_delwri_submit(
1884 struct list_head *buffer_list)
1885 {
1886 LIST_HEAD (io_list);
1887 int error = 0, error2;
1888 struct xfs_buf *bp;
1889
1890 __xfs_buf_delwri_submit(buffer_list, &io_list, true);
1891
1892 /* Wait for IO to complete. */
1893 while (!list_empty(&io_list)) {
1894 bp = list_first_entry(&io_list, struct xfs_buf, b_list);
1895
1896 list_del_init(&bp->b_list);
1897
1898 /* locking the buffer will wait for async IO completion. */
1899 xfs_buf_lock(bp);
1900 error2 = bp->b_error;
1901 xfs_buf_relse(bp);
1902 if (!error)
1903 error = error2;
1904 }
1905
1906 return error;
1907 }
1908
1909 int __init
xfs_buf_init(void)1910 xfs_buf_init(void)
1911 {
1912 xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
1913 KM_ZONE_HWALIGN, NULL);
1914 if (!xfs_buf_zone)
1915 goto out;
1916
1917 xfslogd_workqueue = alloc_workqueue("xfslogd",
1918 WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_FREEZABLE, 1);
1919 if (!xfslogd_workqueue)
1920 goto out_free_buf_zone;
1921
1922 return 0;
1923
1924 out_free_buf_zone:
1925 kmem_zone_destroy(xfs_buf_zone);
1926 out:
1927 return -ENOMEM;
1928 }
1929
1930 void
xfs_buf_terminate(void)1931 xfs_buf_terminate(void)
1932 {
1933 destroy_workqueue(xfslogd_workqueue);
1934 kmem_zone_destroy(xfs_buf_zone);
1935 }
1936