1 /*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18 #ifndef __XFS_BUF_H__
19 #define __XFS_BUF_H__
20
21 #include <linux/list.h>
22 #include <linux/types.h>
23 #include <linux/spinlock.h>
24 #include <linux/mm.h>
25 #include <linux/fs.h>
26 #include <linux/dax.h>
27 #include <linux/buffer_head.h>
28 #include <linux/uio.h>
29 #include <linux/list_lru.h>
30
31 /*
32 * Base types
33 */
34
35 #define XFS_BUF_DADDR_NULL ((xfs_daddr_t) (-1LL))
36
37 typedef enum {
38 XBRW_READ = 1, /* transfer into target memory */
39 XBRW_WRITE = 2, /* transfer from target memory */
40 XBRW_ZERO = 3, /* Zero target memory */
41 } xfs_buf_rw_t;
42
43 #define XBF_READ (1 << 0) /* buffer intended for reading from device */
44 #define XBF_WRITE (1 << 1) /* buffer intended for writing to device */
45 #define XBF_READ_AHEAD (1 << 2) /* asynchronous read-ahead */
46 #define XBF_NO_IOACCT (1 << 3) /* bypass I/O accounting (non-LRU bufs) */
47 #define XBF_ASYNC (1 << 4) /* initiator will not wait for completion */
48 #define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */
49 #define XBF_STALE (1 << 6) /* buffer has been staled, do not find it */
50 #define XBF_WRITE_FAIL (1 << 24)/* async writes have failed on this buffer */
51
52 /* I/O hints for the BIO layer */
53 #define XBF_SYNCIO (1 << 10)/* treat this buffer as synchronous I/O */
54 #define XBF_FUA (1 << 11)/* force cache write through mode */
55 #define XBF_FLUSH (1 << 12)/* flush the disk cache before a write */
56
57 /* flags used only as arguments to access routines */
58 #define XBF_TRYLOCK (1 << 16)/* lock requested, but do not wait */
59 #define XBF_UNMAPPED (1 << 17)/* do not map the buffer */
60
61 /* flags used only internally */
62 #define _XBF_PAGES (1 << 20)/* backed by refcounted pages */
63 #define _XBF_KMEM (1 << 21)/* backed by heap memory */
64 #define _XBF_DELWRI_Q (1 << 22)/* buffer on a delwri queue */
65 #define _XBF_COMPOUND (1 << 23)/* compound buffer */
66
67 typedef unsigned int xfs_buf_flags_t;
68
69 #define XFS_BUF_FLAGS \
70 { XBF_READ, "READ" }, \
71 { XBF_WRITE, "WRITE" }, \
72 { XBF_READ_AHEAD, "READ_AHEAD" }, \
73 { XBF_ASYNC, "ASYNC" }, \
74 { XBF_DONE, "DONE" }, \
75 { XBF_STALE, "STALE" }, \
76 { XBF_WRITE_FAIL, "WRITE_FAIL" }, \
77 { XBF_SYNCIO, "SYNCIO" }, \
78 { XBF_FUA, "FUA" }, \
79 { XBF_FLUSH, "FLUSH" }, \
80 { XBF_TRYLOCK, "TRYLOCK" }, /* should never be set */\
81 { XBF_UNMAPPED, "UNMAPPED" }, /* ditto */\
82 { _XBF_PAGES, "PAGES" }, \
83 { _XBF_KMEM, "KMEM" }, \
84 { _XBF_DELWRI_Q, "DELWRI_Q" }, \
85 { _XBF_COMPOUND, "COMPOUND" }
86
87
88 /*
89 * Internal state flags.
90 */
91 #define XFS_BSTATE_DISPOSE (1 << 0) /* buffer being discarded */
92 #define XFS_BSTATE_IN_FLIGHT (1 << 1) /* I/O in flight */
93
94 /*
95 * The xfs_buftarg contains 2 notions of "sector size" -
96 *
97 * 1) The metadata sector size, which is the minimum unit and
98 * alignment of IO which will be performed by metadata operations.
99 * 2) The device logical sector size
100 *
101 * The first is specified at mkfs time, and is stored on-disk in the
102 * superblock's sb_sectsize.
103 *
104 * The latter is derived from the underlying device, and controls direct IO
105 * alignment constraints.
106 */
107 typedef struct xfs_buftarg {
108 dev_t bt_dev;
109 struct block_device *bt_bdev;
110 struct backing_dev_info *bt_bdi;
111 struct xfs_mount *bt_mount;
112 unsigned int bt_meta_sectorsize;
113 size_t bt_meta_sectormask;
114 size_t bt_logical_sectorsize;
115 size_t bt_logical_sectormask;
116
117 /* LRU control structures */
118 struct shrinker bt_shrinker;
119 struct list_lru bt_lru;
120
121 struct percpu_counter bt_io_count;
122 } xfs_buftarg_t;
123
124 struct xfs_buf;
125 typedef void (*xfs_buf_iodone_t)(struct xfs_buf *);
126
127
128 #define XB_PAGES 2
129
130 struct xfs_buf_map {
131 xfs_daddr_t bm_bn; /* block number for I/O */
132 int bm_len; /* size of I/O */
133 };
134
135 #define DEFINE_SINGLE_BUF_MAP(map, blkno, numblk) \
136 struct xfs_buf_map (map) = { .bm_bn = (blkno), .bm_len = (numblk) };
137
138 struct xfs_buf_ops {
139 char *name;
140 void (*verify_read)(struct xfs_buf *);
141 void (*verify_write)(struct xfs_buf *);
142 };
143
144 typedef struct xfs_buf {
145 /*
146 * first cacheline holds all the fields needed for an uncontended cache
147 * hit to be fully processed. The semaphore straddles the cacheline
148 * boundary, but the counter and lock sits on the first cacheline,
149 * which is the only bit that is touched if we hit the semaphore
150 * fast-path on locking.
151 */
152 struct rb_node b_rbnode; /* rbtree node */
153 xfs_daddr_t b_bn; /* block number of buffer */
154 int b_length; /* size of buffer in BBs */
155 atomic_t b_hold; /* reference count */
156 atomic_t b_lru_ref; /* lru reclaim ref count */
157 xfs_buf_flags_t b_flags; /* status flags */
158 struct semaphore b_sema; /* semaphore for lockables */
159
160 /*
161 * concurrent access to b_lru and b_lru_flags are protected by
162 * bt_lru_lock and not by b_sema
163 */
164 struct list_head b_lru; /* lru list */
165 spinlock_t b_lock; /* internal state lock */
166 unsigned int b_state; /* internal state flags */
167 int b_io_error; /* internal IO error state */
168 wait_queue_head_t b_waiters; /* unpin waiters */
169 struct list_head b_list;
170 struct xfs_perag *b_pag; /* contains rbtree root */
171 xfs_buftarg_t *b_target; /* buffer target (device) */
172 void *b_addr; /* virtual address of buffer */
173 struct work_struct b_ioend_work;
174 struct workqueue_struct *b_ioend_wq; /* I/O completion wq */
175 xfs_buf_iodone_t b_iodone; /* I/O completion function */
176 struct completion b_iowait; /* queue for I/O waiters */
177 void *b_fspriv;
178 struct xfs_trans *b_transp;
179 struct page **b_pages; /* array of page pointers */
180 struct page *b_page_array[XB_PAGES]; /* inline pages */
181 struct xfs_buf_map *b_maps; /* compound buffer map */
182 struct xfs_buf_map __b_map; /* inline compound buffer map */
183 int b_map_count;
184 int b_io_length; /* IO size in BBs */
185 atomic_t b_pin_count; /* pin count */
186 atomic_t b_io_remaining; /* #outstanding I/O requests */
187 unsigned int b_page_count; /* size of page array */
188 unsigned int b_offset; /* page offset in first page */
189 int b_error; /* error code on I/O */
190
191 /*
192 * async write failure retry count. Initialised to zero on the first
193 * failure, then when it exceeds the maximum configured without a
194 * success the write is considered to be failed permanently and the
195 * iodone handler will take appropriate action.
196 *
197 * For retry timeouts, we record the jiffie of the first failure. This
198 * means that we can change the retry timeout for buffers already under
199 * I/O and thus avoid getting stuck in a retry loop with a long timeout.
200 *
201 * last_error is used to ensure that we are getting repeated errors, not
202 * different errors. e.g. a block device might change ENOSPC to EIO when
203 * a failure timeout occurs, so we want to re-initialise the error
204 * retry behaviour appropriately when that happens.
205 */
206 int b_retries;
207 unsigned long b_first_retry_time; /* in jiffies */
208 int b_last_error;
209
210 const struct xfs_buf_ops *b_ops;
211
212 #ifdef XFS_BUF_LOCK_TRACKING
213 int b_last_holder;
214 #endif
215 } xfs_buf_t;
216
217 /* Finding and Reading Buffers */
218 struct xfs_buf *_xfs_buf_find(struct xfs_buftarg *target,
219 struct xfs_buf_map *map, int nmaps,
220 xfs_buf_flags_t flags, struct xfs_buf *new_bp);
221
222 static inline struct xfs_buf *
xfs_incore(struct xfs_buftarg * target,xfs_daddr_t blkno,size_t numblks,xfs_buf_flags_t flags)223 xfs_incore(
224 struct xfs_buftarg *target,
225 xfs_daddr_t blkno,
226 size_t numblks,
227 xfs_buf_flags_t flags)
228 {
229 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
230 return _xfs_buf_find(target, &map, 1, flags, NULL);
231 }
232
233 struct xfs_buf *_xfs_buf_alloc(struct xfs_buftarg *target,
234 struct xfs_buf_map *map, int nmaps,
235 xfs_buf_flags_t flags);
236
237 static inline struct xfs_buf *
xfs_buf_alloc(struct xfs_buftarg * target,xfs_daddr_t blkno,size_t numblks,xfs_buf_flags_t flags)238 xfs_buf_alloc(
239 struct xfs_buftarg *target,
240 xfs_daddr_t blkno,
241 size_t numblks,
242 xfs_buf_flags_t flags)
243 {
244 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
245 return _xfs_buf_alloc(target, &map, 1, flags);
246 }
247
248 struct xfs_buf *xfs_buf_get_map(struct xfs_buftarg *target,
249 struct xfs_buf_map *map, int nmaps,
250 xfs_buf_flags_t flags);
251 struct xfs_buf *xfs_buf_read_map(struct xfs_buftarg *target,
252 struct xfs_buf_map *map, int nmaps,
253 xfs_buf_flags_t flags,
254 const struct xfs_buf_ops *ops);
255 void xfs_buf_readahead_map(struct xfs_buftarg *target,
256 struct xfs_buf_map *map, int nmaps,
257 const struct xfs_buf_ops *ops);
258
259 static inline struct xfs_buf *
xfs_buf_get(struct xfs_buftarg * target,xfs_daddr_t blkno,size_t numblks,xfs_buf_flags_t flags)260 xfs_buf_get(
261 struct xfs_buftarg *target,
262 xfs_daddr_t blkno,
263 size_t numblks,
264 xfs_buf_flags_t flags)
265 {
266 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
267 return xfs_buf_get_map(target, &map, 1, flags);
268 }
269
270 static inline struct xfs_buf *
xfs_buf_read(struct xfs_buftarg * target,xfs_daddr_t blkno,size_t numblks,xfs_buf_flags_t flags,const struct xfs_buf_ops * ops)271 xfs_buf_read(
272 struct xfs_buftarg *target,
273 xfs_daddr_t blkno,
274 size_t numblks,
275 xfs_buf_flags_t flags,
276 const struct xfs_buf_ops *ops)
277 {
278 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
279 return xfs_buf_read_map(target, &map, 1, flags, ops);
280 }
281
282 static inline void
xfs_buf_readahead(struct xfs_buftarg * target,xfs_daddr_t blkno,size_t numblks,const struct xfs_buf_ops * ops)283 xfs_buf_readahead(
284 struct xfs_buftarg *target,
285 xfs_daddr_t blkno,
286 size_t numblks,
287 const struct xfs_buf_ops *ops)
288 {
289 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
290 return xfs_buf_readahead_map(target, &map, 1, ops);
291 }
292
293 struct xfs_buf *xfs_buf_get_empty(struct xfs_buftarg *target, size_t numblks);
294 void xfs_buf_set_empty(struct xfs_buf *bp, size_t numblks);
295 int xfs_buf_associate_memory(struct xfs_buf *bp, void *mem, size_t length);
296
297 struct xfs_buf *xfs_buf_get_uncached(struct xfs_buftarg *target, size_t numblks,
298 int flags);
299 int xfs_buf_read_uncached(struct xfs_buftarg *target, xfs_daddr_t daddr,
300 size_t numblks, int flags, struct xfs_buf **bpp,
301 const struct xfs_buf_ops *ops);
302 void xfs_buf_hold(struct xfs_buf *bp);
303
304 /* Releasing Buffers */
305 extern void xfs_buf_free(xfs_buf_t *);
306 extern void xfs_buf_rele(xfs_buf_t *);
307
308 /* Locking and Unlocking Buffers */
309 extern int xfs_buf_trylock(xfs_buf_t *);
310 extern void xfs_buf_lock(xfs_buf_t *);
311 extern void xfs_buf_unlock(xfs_buf_t *);
312 #define xfs_buf_islocked(bp) \
313 ((bp)->b_sema.count <= 0)
314
315 /* Buffer Read and Write Routines */
316 extern int xfs_bwrite(struct xfs_buf *bp);
317 extern void xfs_buf_ioend(struct xfs_buf *bp);
318 extern void xfs_buf_ioerror(xfs_buf_t *, int);
319 extern void xfs_buf_ioerror_alert(struct xfs_buf *, const char *func);
320 extern void xfs_buf_submit(struct xfs_buf *bp);
321 extern int xfs_buf_submit_wait(struct xfs_buf *bp);
322 extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *,
323 xfs_buf_rw_t);
324 #define xfs_buf_zero(bp, off, len) \
325 xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO)
326
327 /* Buffer Utility Routines */
328 extern void *xfs_buf_offset(struct xfs_buf *, size_t);
329 extern void xfs_buf_stale(struct xfs_buf *bp);
330
331 /* Delayed Write Buffer Routines */
332 extern void xfs_buf_delwri_cancel(struct list_head *);
333 extern bool xfs_buf_delwri_queue(struct xfs_buf *, struct list_head *);
334 extern int xfs_buf_delwri_submit(struct list_head *);
335 extern int xfs_buf_delwri_submit_nowait(struct list_head *);
336 extern int xfs_buf_delwri_pushbuf(struct xfs_buf *, struct list_head *);
337
338 /* Buffer Daemon Setup Routines */
339 extern int xfs_buf_init(void);
340 extern void xfs_buf_terminate(void);
341
342 /*
343 * These macros use the IO block map rather than b_bn. b_bn is now really
344 * just for the buffer cache index for cached buffers. As IO does not use b_bn
345 * anymore, uncached buffers do not use b_bn at all and hence must modify the IO
346 * map directly. Uncached buffers are not allowed to be discontiguous, so this
347 * is safe to do.
348 *
349 * In future, uncached buffers will pass the block number directly to the io
350 * request function and hence these macros will go away at that point.
351 */
352 #define XFS_BUF_ADDR(bp) ((bp)->b_maps[0].bm_bn)
353 #define XFS_BUF_SET_ADDR(bp, bno) ((bp)->b_maps[0].bm_bn = (xfs_daddr_t)(bno))
354
xfs_buf_set_ref(struct xfs_buf * bp,int lru_ref)355 static inline void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref)
356 {
357 atomic_set(&bp->b_lru_ref, lru_ref);
358 }
359
xfs_buf_ispinned(struct xfs_buf * bp)360 static inline int xfs_buf_ispinned(struct xfs_buf *bp)
361 {
362 return atomic_read(&bp->b_pin_count);
363 }
364
xfs_buf_relse(xfs_buf_t * bp)365 static inline void xfs_buf_relse(xfs_buf_t *bp)
366 {
367 xfs_buf_unlock(bp);
368 xfs_buf_rele(bp);
369 }
370
371 static inline int
xfs_buf_verify_cksum(struct xfs_buf * bp,unsigned long cksum_offset)372 xfs_buf_verify_cksum(struct xfs_buf *bp, unsigned long cksum_offset)
373 {
374 return xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length),
375 cksum_offset);
376 }
377
378 static inline void
xfs_buf_update_cksum(struct xfs_buf * bp,unsigned long cksum_offset)379 xfs_buf_update_cksum(struct xfs_buf *bp, unsigned long cksum_offset)
380 {
381 xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length),
382 cksum_offset);
383 }
384
385 /*
386 * Handling of buftargs.
387 */
388 extern xfs_buftarg_t *xfs_alloc_buftarg(struct xfs_mount *,
389 struct block_device *);
390 extern void xfs_free_buftarg(struct xfs_mount *, struct xfs_buftarg *);
391 extern void xfs_wait_buftarg(xfs_buftarg_t *);
392 extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int);
393
394 #define xfs_getsize_buftarg(buftarg) block_size((buftarg)->bt_bdev)
395 #define xfs_readonly_buftarg(buftarg) bdev_read_only((buftarg)->bt_bdev)
396
397 #endif /* __XFS_BUF_H__ */
398