• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #ifndef __XFS_BUF_H__
7 #define __XFS_BUF_H__
8 
9 #include <linux/list.h>
10 #include <linux/types.h>
11 #include <linux/spinlock.h>
12 #include <linux/mm.h>
13 #include <linux/fs.h>
14 #include <linux/dax.h>
15 #include <linux/uio.h>
16 #include <linux/list_lru.h>
17 
18 /*
19  *	Base types
20  */
21 
22 #define XFS_BUF_DADDR_NULL	((xfs_daddr_t) (-1LL))
23 
24 #define XBF_READ	 (1 << 0) /* buffer intended for reading from device */
25 #define XBF_WRITE	 (1 << 1) /* buffer intended for writing to device */
26 #define XBF_READ_AHEAD	 (1 << 2) /* asynchronous read-ahead */
27 #define XBF_NO_IOACCT	 (1 << 3) /* bypass I/O accounting (non-LRU bufs) */
28 #define XBF_ASYNC	 (1 << 4) /* initiator will not wait for completion */
29 #define XBF_DONE	 (1 << 5) /* all pages in the buffer uptodate */
30 #define XBF_STALE	 (1 << 6) /* buffer has been staled, do not find it */
31 #define XBF_WRITE_FAIL	 (1 << 7) /* async writes have failed on this buffer */
32 
33 /* flags used only as arguments to access routines */
34 #define XBF_TRYLOCK	 (1 << 16)/* lock requested, but do not wait */
35 #define XBF_UNMAPPED	 (1 << 17)/* do not map the buffer */
36 
37 /* flags used only internally */
38 #define _XBF_PAGES	 (1 << 20)/* backed by refcounted pages */
39 #define _XBF_KMEM	 (1 << 21)/* backed by heap memory */
40 #define _XBF_DELWRI_Q	 (1 << 22)/* buffer on a delwri queue */
41 
42 typedef unsigned int xfs_buf_flags_t;
43 
44 #define XFS_BUF_FLAGS \
45 	{ XBF_READ,		"READ" }, \
46 	{ XBF_WRITE,		"WRITE" }, \
47 	{ XBF_READ_AHEAD,	"READ_AHEAD" }, \
48 	{ XBF_NO_IOACCT,	"NO_IOACCT" }, \
49 	{ XBF_ASYNC,		"ASYNC" }, \
50 	{ XBF_DONE,		"DONE" }, \
51 	{ XBF_STALE,		"STALE" }, \
52 	{ XBF_WRITE_FAIL,	"WRITE_FAIL" }, \
53 	{ XBF_TRYLOCK,		"TRYLOCK" },	/* should never be set */\
54 	{ XBF_UNMAPPED,		"UNMAPPED" },	/* ditto */\
55 	{ _XBF_PAGES,		"PAGES" }, \
56 	{ _XBF_KMEM,		"KMEM" }, \
57 	{ _XBF_DELWRI_Q,	"DELWRI_Q" }
58 
59 
60 /*
61  * Internal state flags.
62  */
63 #define XFS_BSTATE_DISPOSE	 (1 << 0)	/* buffer being discarded */
64 #define XFS_BSTATE_IN_FLIGHT	 (1 << 1)	/* I/O in flight */
65 
66 /*
67  * The xfs_buftarg contains 2 notions of "sector size" -
68  *
69  * 1) The metadata sector size, which is the minimum unit and
70  *    alignment of IO which will be performed by metadata operations.
71  * 2) The device logical sector size
72  *
73  * The first is specified at mkfs time, and is stored on-disk in the
74  * superblock's sb_sectsize.
75  *
76  * The latter is derived from the underlying device, and controls direct IO
77  * alignment constraints.
78  */
79 typedef struct xfs_buftarg {
80 	dev_t			bt_dev;
81 	struct block_device	*bt_bdev;
82 	struct dax_device	*bt_daxdev;
83 	struct xfs_mount	*bt_mount;
84 	unsigned int		bt_meta_sectorsize;
85 	size_t			bt_meta_sectormask;
86 	size_t			bt_logical_sectorsize;
87 	size_t			bt_logical_sectormask;
88 
89 	/* LRU control structures */
90 	struct shrinker		bt_shrinker;
91 	struct list_lru		bt_lru;
92 
93 	struct percpu_counter	bt_io_count;
94 } xfs_buftarg_t;
95 
96 struct xfs_buf;
97 typedef void (*xfs_buf_iodone_t)(struct xfs_buf *);
98 
99 
100 #define XB_PAGES	2
101 
102 struct xfs_buf_map {
103 	xfs_daddr_t		bm_bn;	/* block number for I/O */
104 	int			bm_len;	/* size of I/O */
105 };
106 
107 #define DEFINE_SINGLE_BUF_MAP(map, blkno, numblk) \
108 	struct xfs_buf_map (map) = { .bm_bn = (blkno), .bm_len = (numblk) };
109 
110 struct xfs_buf_ops {
111 	char *name;
112 	union {
113 		__be32 magic[2];	/* v4 and v5 on disk magic values */
114 		__be16 magic16[2];	/* v4 and v5 on disk magic values */
115 	};
116 	void (*verify_read)(struct xfs_buf *);
117 	void (*verify_write)(struct xfs_buf *);
118 	xfs_failaddr_t (*verify_struct)(struct xfs_buf *bp);
119 };
120 
121 typedef struct xfs_buf {
122 	/*
123 	 * first cacheline holds all the fields needed for an uncontended cache
124 	 * hit to be fully processed. The semaphore straddles the cacheline
125 	 * boundary, but the counter and lock sits on the first cacheline,
126 	 * which is the only bit that is touched if we hit the semaphore
127 	 * fast-path on locking.
128 	 */
129 	struct rhash_head	b_rhash_head;	/* pag buffer hash node */
130 	xfs_daddr_t		b_bn;		/* block number of buffer */
131 	int			b_length;	/* size of buffer in BBs */
132 	atomic_t		b_hold;		/* reference count */
133 	atomic_t		b_lru_ref;	/* lru reclaim ref count */
134 	xfs_buf_flags_t		b_flags;	/* status flags */
135 	struct semaphore	b_sema;		/* semaphore for lockables */
136 
137 	/*
138 	 * concurrent access to b_lru and b_lru_flags are protected by
139 	 * bt_lru_lock and not by b_sema
140 	 */
141 	struct list_head	b_lru;		/* lru list */
142 	spinlock_t		b_lock;		/* internal state lock */
143 	unsigned int		b_state;	/* internal state flags */
144 	int			b_io_error;	/* internal IO error state */
145 	wait_queue_head_t	b_waiters;	/* unpin waiters */
146 	struct list_head	b_list;
147 	struct xfs_perag	*b_pag;		/* contains rbtree root */
148 	struct xfs_mount	*b_mount;
149 	xfs_buftarg_t		*b_target;	/* buffer target (device) */
150 	void			*b_addr;	/* virtual address of buffer */
151 	struct work_struct	b_ioend_work;
152 	xfs_buf_iodone_t	b_iodone;	/* I/O completion function */
153 	struct completion	b_iowait;	/* queue for I/O waiters */
154 	struct xfs_buf_log_item	*b_log_item;
155 	struct list_head	b_li_list;	/* Log items list head */
156 	struct xfs_trans	*b_transp;
157 	struct page		**b_pages;	/* array of page pointers */
158 	struct page		*b_page_array[XB_PAGES]; /* inline pages */
159 	struct xfs_buf_map	*b_maps;	/* compound buffer map */
160 	struct xfs_buf_map	__b_map;	/* inline compound buffer map */
161 	int			b_map_count;
162 	atomic_t		b_pin_count;	/* pin count */
163 	atomic_t		b_io_remaining;	/* #outstanding I/O requests */
164 	unsigned int		b_page_count;	/* size of page array */
165 	unsigned int		b_offset;	/* page offset in first page */
166 	int			b_error;	/* error code on I/O */
167 
168 	/*
169 	 * async write failure retry count. Initialised to zero on the first
170 	 * failure, then when it exceeds the maximum configured without a
171 	 * success the write is considered to be failed permanently and the
172 	 * iodone handler will take appropriate action.
173 	 *
174 	 * For retry timeouts, we record the jiffie of the first failure. This
175 	 * means that we can change the retry timeout for buffers already under
176 	 * I/O and thus avoid getting stuck in a retry loop with a long timeout.
177 	 *
178 	 * last_error is used to ensure that we are getting repeated errors, not
179 	 * different errors. e.g. a block device might change ENOSPC to EIO when
180 	 * a failure timeout occurs, so we want to re-initialise the error
181 	 * retry behaviour appropriately when that happens.
182 	 */
183 	int			b_retries;
184 	unsigned long		b_first_retry_time; /* in jiffies */
185 	int			b_last_error;
186 
187 	const struct xfs_buf_ops	*b_ops;
188 } xfs_buf_t;
189 
190 /* Finding and Reading Buffers */
191 struct xfs_buf *xfs_buf_incore(struct xfs_buftarg *target,
192 			   xfs_daddr_t blkno, size_t numblks,
193 			   xfs_buf_flags_t flags);
194 
195 struct xfs_buf *xfs_buf_get_map(struct xfs_buftarg *target,
196 			       struct xfs_buf_map *map, int nmaps,
197 			       xfs_buf_flags_t flags);
198 struct xfs_buf *xfs_buf_read_map(struct xfs_buftarg *target,
199 			       struct xfs_buf_map *map, int nmaps,
200 			       xfs_buf_flags_t flags,
201 			       const struct xfs_buf_ops *ops);
202 void xfs_buf_readahead_map(struct xfs_buftarg *target,
203 			       struct xfs_buf_map *map, int nmaps,
204 			       const struct xfs_buf_ops *ops);
205 
206 static inline struct xfs_buf *
xfs_buf_get(struct xfs_buftarg * target,xfs_daddr_t blkno,size_t numblks)207 xfs_buf_get(
208 	struct xfs_buftarg	*target,
209 	xfs_daddr_t		blkno,
210 	size_t			numblks)
211 {
212 	DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
213 	return xfs_buf_get_map(target, &map, 1, 0);
214 }
215 
216 static inline struct xfs_buf *
xfs_buf_read(struct xfs_buftarg * target,xfs_daddr_t blkno,size_t numblks,xfs_buf_flags_t flags,const struct xfs_buf_ops * ops)217 xfs_buf_read(
218 	struct xfs_buftarg	*target,
219 	xfs_daddr_t		blkno,
220 	size_t			numblks,
221 	xfs_buf_flags_t		flags,
222 	const struct xfs_buf_ops *ops)
223 {
224 	DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
225 	return xfs_buf_read_map(target, &map, 1, flags, ops);
226 }
227 
228 static inline void
xfs_buf_readahead(struct xfs_buftarg * target,xfs_daddr_t blkno,size_t numblks,const struct xfs_buf_ops * ops)229 xfs_buf_readahead(
230 	struct xfs_buftarg	*target,
231 	xfs_daddr_t		blkno,
232 	size_t			numblks,
233 	const struct xfs_buf_ops *ops)
234 {
235 	DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
236 	return xfs_buf_readahead_map(target, &map, 1, ops);
237 }
238 
239 struct xfs_buf *xfs_buf_get_uncached(struct xfs_buftarg *target, size_t numblks,
240 				int flags);
241 int xfs_buf_read_uncached(struct xfs_buftarg *target, xfs_daddr_t daddr,
242 			  size_t numblks, int flags, struct xfs_buf **bpp,
243 			  const struct xfs_buf_ops *ops);
244 void xfs_buf_hold(struct xfs_buf *bp);
245 
246 /* Releasing Buffers */
247 extern void xfs_buf_free(xfs_buf_t *);
248 extern void xfs_buf_rele(xfs_buf_t *);
249 
250 /* Locking and Unlocking Buffers */
251 extern int xfs_buf_trylock(xfs_buf_t *);
252 extern void xfs_buf_lock(xfs_buf_t *);
253 extern void xfs_buf_unlock(xfs_buf_t *);
254 #define xfs_buf_islocked(bp) \
255 	((bp)->b_sema.count <= 0)
256 
257 /* Buffer Read and Write Routines */
258 extern int xfs_bwrite(struct xfs_buf *bp);
259 extern void xfs_buf_ioend(struct xfs_buf *bp);
260 extern void __xfs_buf_ioerror(struct xfs_buf *bp, int error,
261 		xfs_failaddr_t failaddr);
262 #define xfs_buf_ioerror(bp, err) __xfs_buf_ioerror((bp), (err), __this_address)
263 extern void xfs_buf_ioerror_alert(struct xfs_buf *, const char *func);
264 
265 extern int __xfs_buf_submit(struct xfs_buf *bp, bool);
xfs_buf_submit(struct xfs_buf * bp)266 static inline int xfs_buf_submit(struct xfs_buf *bp)
267 {
268 	bool wait = bp->b_flags & XBF_ASYNC ? false : true;
269 	return __xfs_buf_submit(bp, wait);
270 }
271 
272 void xfs_buf_zero(struct xfs_buf *bp, size_t boff, size_t bsize);
273 void __xfs_buf_mark_corrupt(struct xfs_buf *bp, xfs_failaddr_t fa);
274 #define xfs_buf_mark_corrupt(bp) __xfs_buf_mark_corrupt((bp), __this_address)
275 
276 /* Buffer Utility Routines */
277 extern void *xfs_buf_offset(struct xfs_buf *, size_t);
278 extern void xfs_buf_stale(struct xfs_buf *bp);
279 
280 /* Delayed Write Buffer Routines */
281 extern void xfs_buf_delwri_cancel(struct list_head *);
282 extern bool xfs_buf_delwri_queue(struct xfs_buf *, struct list_head *);
283 extern int xfs_buf_delwri_submit(struct list_head *);
284 extern int xfs_buf_delwri_submit_nowait(struct list_head *);
285 extern int xfs_buf_delwri_pushbuf(struct xfs_buf *, struct list_head *);
286 
287 /* Buffer Daemon Setup Routines */
288 extern int xfs_buf_init(void);
289 extern void xfs_buf_terminate(void);
290 
291 /*
292  * These macros use the IO block map rather than b_bn. b_bn is now really
293  * just for the buffer cache index for cached buffers. As IO does not use b_bn
294  * anymore, uncached buffers do not use b_bn at all and hence must modify the IO
295  * map directly. Uncached buffers are not allowed to be discontiguous, so this
296  * is safe to do.
297  *
298  * In future, uncached buffers will pass the block number directly to the io
299  * request function and hence these macros will go away at that point.
300  */
301 #define XFS_BUF_ADDR(bp)		((bp)->b_maps[0].bm_bn)
302 #define XFS_BUF_SET_ADDR(bp, bno)	((bp)->b_maps[0].bm_bn = (xfs_daddr_t)(bno))
303 
304 void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref);
305 
306 /*
307  * If the buffer is already on the LRU, do nothing. Otherwise set the buffer
308  * up with a reference count of 0 so it will be tossed from the cache when
309  * released.
310  */
xfs_buf_oneshot(struct xfs_buf * bp)311 static inline void xfs_buf_oneshot(struct xfs_buf *bp)
312 {
313 	if (!list_empty(&bp->b_lru) || atomic_read(&bp->b_lru_ref) > 1)
314 		return;
315 	atomic_set(&bp->b_lru_ref, 0);
316 }
317 
xfs_buf_ispinned(struct xfs_buf * bp)318 static inline int xfs_buf_ispinned(struct xfs_buf *bp)
319 {
320 	return atomic_read(&bp->b_pin_count);
321 }
322 
xfs_buf_relse(xfs_buf_t * bp)323 static inline void xfs_buf_relse(xfs_buf_t *bp)
324 {
325 	xfs_buf_unlock(bp);
326 	xfs_buf_rele(bp);
327 }
328 
329 static inline int
xfs_buf_verify_cksum(struct xfs_buf * bp,unsigned long cksum_offset)330 xfs_buf_verify_cksum(struct xfs_buf *bp, unsigned long cksum_offset)
331 {
332 	return xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length),
333 				cksum_offset);
334 }
335 
336 static inline void
xfs_buf_update_cksum(struct xfs_buf * bp,unsigned long cksum_offset)337 xfs_buf_update_cksum(struct xfs_buf *bp, unsigned long cksum_offset)
338 {
339 	xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length),
340 			 cksum_offset);
341 }
342 
343 /*
344  *	Handling of buftargs.
345  */
346 extern xfs_buftarg_t *xfs_alloc_buftarg(struct xfs_mount *,
347 			struct block_device *, struct dax_device *);
348 extern void xfs_free_buftarg(struct xfs_buftarg *);
349 extern void xfs_wait_buftarg(xfs_buftarg_t *);
350 extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int);
351 
352 #define xfs_getsize_buftarg(buftarg)	block_size((buftarg)->bt_bdev)
353 #define xfs_readonly_buftarg(buftarg)	bdev_read_only((buftarg)->bt_bdev)
354 
355 static inline int
xfs_buftarg_dma_alignment(struct xfs_buftarg * bt)356 xfs_buftarg_dma_alignment(struct xfs_buftarg *bt)
357 {
358 	return queue_dma_alignment(bt->bt_bdev->bd_disk->queue);
359 }
360 
361 int xfs_buf_reverify(struct xfs_buf *bp, const struct xfs_buf_ops *ops);
362 bool xfs_verify_magic(struct xfs_buf *bp, __be32 dmagic);
363 bool xfs_verify_magic16(struct xfs_buf *bp, __be16 dmagic);
364 
365 #endif	/* __XFS_BUF_H__ */
366