• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #ifndef __XFS_BUF_H__
19 #define __XFS_BUF_H__
20 
21 #include <linux/list.h>
22 #include <linux/types.h>
23 #include <linux/spinlock.h>
24 #include <asm/system.h>
25 #include <linux/mm.h>
26 #include <linux/fs.h>
27 #include <linux/buffer_head.h>
28 #include <linux/uio.h>
29 
30 /*
31  *	Base types
32  */
33 
34 #define XFS_BUF_DADDR_NULL	((xfs_daddr_t) (-1LL))
35 
36 #define xfs_buf_ctob(pp)	((pp) * PAGE_CACHE_SIZE)
37 #define xfs_buf_btoc(dd)	(((dd) + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT)
38 #define xfs_buf_btoct(dd)	((dd) >> PAGE_CACHE_SHIFT)
39 #define xfs_buf_poff(aa)	((aa) & ~PAGE_CACHE_MASK)
40 
41 typedef enum {
42 	XBRW_READ = 1,			/* transfer into target memory */
43 	XBRW_WRITE = 2,			/* transfer from target memory */
44 	XBRW_ZERO = 3,			/* Zero target memory */
45 } xfs_buf_rw_t;
46 
47 typedef enum {
48 	XBF_READ = (1 << 0),	/* buffer intended for reading from device */
49 	XBF_WRITE = (1 << 1),	/* buffer intended for writing to device   */
50 	XBF_MAPPED = (1 << 2),  /* buffer mapped (b_addr valid)            */
51 	XBF_ASYNC = (1 << 4),   /* initiator will not wait for completion  */
52 	XBF_DONE = (1 << 5),    /* all pages in the buffer uptodate	   */
53 	XBF_DELWRI = (1 << 6),  /* buffer has dirty pages                  */
54 	XBF_STALE = (1 << 7),	/* buffer has been staled, do not find it  */
55 	XBF_FS_MANAGED = (1 << 8),  /* filesystem controls freeing memory  */
56  	XBF_ORDERED = (1 << 11),    /* use ordered writes		   */
57 	XBF_READ_AHEAD = (1 << 12), /* asynchronous read-ahead		   */
58 
59 	/* flags used only as arguments to access routines */
60 	XBF_LOCK = (1 << 14),       /* lock requested			   */
61 	XBF_TRYLOCK = (1 << 15),    /* lock requested, but do not wait	   */
62 	XBF_DONT_BLOCK = (1 << 16), /* do not block in current thread	   */
63 
64 	/* flags used only internally */
65 	_XBF_PAGE_CACHE = (1 << 17),/* backed by pagecache		   */
66 	_XBF_PAGES = (1 << 18),	    /* backed by refcounted pages	   */
67 	_XBF_RUN_QUEUES = (1 << 19),/* run block device task queue	   */
68 	_XBF_DELWRI_Q = (1 << 21),   /* buffer on delwri queue		   */
69 
70 	/*
71 	 * Special flag for supporting metadata blocks smaller than a FSB.
72 	 *
73 	 * In this case we can have multiple xfs_buf_t on a single page and
74 	 * need to lock out concurrent xfs_buf_t readers as they only
75 	 * serialise access to the buffer.
76 	 *
77 	 * If the FSB size >= PAGE_CACHE_SIZE case, we have no serialisation
78 	 * between reads of the page. Hence we can have one thread read the
79 	 * page and modify it, but then race with another thread that thinks
80 	 * the page is not up-to-date and hence reads it again.
81 	 *
82 	 * The result is that the first modifcation to the page is lost.
83 	 * This sort of AGF/AGI reading race can happen when unlinking inodes
84 	 * that require truncation and results in the AGI unlinked list
85 	 * modifications being lost.
86 	 */
87 	_XBF_PAGE_LOCKED = (1 << 22),
88 
89 	/*
90 	 * If we try a barrier write, but it fails we have to communicate
91 	 * this to the upper layers.  Unfortunately b_error gets overwritten
92 	 * when the buffer is re-issued so we have to add another flag to
93 	 * keep this information.
94 	 */
95 	_XFS_BARRIER_FAILED = (1 << 23),
96 } xfs_buf_flags_t;
97 
98 typedef enum {
99 	XBT_FORCE_SLEEP = 0,
100 	XBT_FORCE_FLUSH = 1,
101 } xfs_buftarg_flags_t;
102 
103 typedef struct xfs_bufhash {
104 	struct list_head	bh_list;
105 	spinlock_t		bh_lock;
106 } xfs_bufhash_t;
107 
108 typedef struct xfs_buftarg {
109 	dev_t			bt_dev;
110 	struct block_device	*bt_bdev;
111 	struct address_space	*bt_mapping;
112 	unsigned int		bt_bsize;
113 	unsigned int		bt_sshift;
114 	size_t			bt_smask;
115 
116 	/* per device buffer hash table */
117 	uint			bt_hashmask;
118 	uint			bt_hashshift;
119 	xfs_bufhash_t		*bt_hash;
120 
121 	/* per device delwri queue */
122 	struct task_struct	*bt_task;
123 	struct list_head	bt_list;
124 	struct list_head	bt_delwrite_queue;
125 	spinlock_t		bt_delwrite_lock;
126 	unsigned long		bt_flags;
127 } xfs_buftarg_t;
128 
129 /*
130  *	xfs_buf_t:  Buffer structure for pagecache-based buffers
131  *
132  * This buffer structure is used by the pagecache buffer management routines
133  * to refer to an assembly of pages forming a logical buffer.
134  *
135  * The buffer structure is used on a temporary basis only, and discarded when
136  * released.  The real data storage is recorded in the pagecache. Buffers are
137  * hashed to the block device on which the file system resides.
138  */
139 
140 struct xfs_buf;
141 typedef void (*xfs_buf_iodone_t)(struct xfs_buf *);
142 typedef void (*xfs_buf_relse_t)(struct xfs_buf *);
143 typedef int (*xfs_buf_bdstrat_t)(struct xfs_buf *);
144 
145 #define XB_PAGES	2
146 
147 typedef struct xfs_buf {
148 	struct semaphore	b_sema;		/* semaphore for lockables */
149 	unsigned long		b_queuetime;	/* time buffer was queued */
150 	atomic_t		b_pin_count;	/* pin count */
151 	wait_queue_head_t	b_waiters;	/* unpin waiters */
152 	struct list_head	b_list;
153 	xfs_buf_flags_t		b_flags;	/* status flags */
154 	struct list_head	b_hash_list;	/* hash table list */
155 	xfs_bufhash_t		*b_hash;	/* hash table list start */
156 	xfs_buftarg_t		*b_target;	/* buffer target (device) */
157 	atomic_t		b_hold;		/* reference count */
158 	xfs_daddr_t		b_bn;		/* block number for I/O */
159 	xfs_off_t		b_file_offset;	/* offset in file */
160 	size_t			b_buffer_length;/* size of buffer in bytes */
161 	size_t			b_count_desired;/* desired transfer size */
162 	void			*b_addr;	/* virtual address of buffer */
163 	struct work_struct	b_iodone_work;
164 	atomic_t		b_io_remaining;	/* #outstanding I/O requests */
165 	xfs_buf_iodone_t	b_iodone;	/* I/O completion function */
166 	xfs_buf_relse_t		b_relse;	/* releasing function */
167 	xfs_buf_bdstrat_t	b_strat;	/* pre-write function */
168 	struct completion	b_iowait;	/* queue for I/O waiters */
169 	void			*b_fspriv;
170 	void			*b_fspriv2;
171 	struct xfs_mount	*b_mount;
172 	unsigned short		b_error;	/* error code on I/O */
173 	unsigned int		b_page_count;	/* size of page array */
174 	unsigned int		b_offset;	/* page offset in first page */
175 	struct page		**b_pages;	/* array of page pointers */
176 	struct page		*b_page_array[XB_PAGES]; /* inline pages */
177 #ifdef XFS_BUF_LOCK_TRACKING
178 	int			b_last_holder;
179 #endif
180 } xfs_buf_t;
181 
182 
183 /* Finding and Reading Buffers */
184 extern xfs_buf_t *_xfs_buf_find(xfs_buftarg_t *, xfs_off_t, size_t,
185 				xfs_buf_flags_t, xfs_buf_t *);
186 #define xfs_incore(buftarg,blkno,len,lockit) \
187 	_xfs_buf_find(buftarg, blkno ,len, lockit, NULL)
188 
189 extern xfs_buf_t *xfs_buf_get_flags(xfs_buftarg_t *, xfs_off_t, size_t,
190 				xfs_buf_flags_t);
191 #define xfs_buf_get(target, blkno, len, flags) \
192 	xfs_buf_get_flags((target), (blkno), (len), XBF_LOCK | XBF_MAPPED)
193 
194 extern xfs_buf_t *xfs_buf_read_flags(xfs_buftarg_t *, xfs_off_t, size_t,
195 				xfs_buf_flags_t);
196 #define xfs_buf_read(target, blkno, len, flags) \
197 	xfs_buf_read_flags((target), (blkno), (len), XBF_LOCK | XBF_MAPPED)
198 
199 extern xfs_buf_t *xfs_buf_get_empty(size_t, xfs_buftarg_t *);
200 extern xfs_buf_t *xfs_buf_get_noaddr(size_t, xfs_buftarg_t *);
201 extern int xfs_buf_associate_memory(xfs_buf_t *, void *, size_t);
202 extern void xfs_buf_hold(xfs_buf_t *);
203 extern void xfs_buf_readahead(xfs_buftarg_t *, xfs_off_t, size_t,
204 				xfs_buf_flags_t);
205 
206 /* Releasing Buffers */
207 extern void xfs_buf_free(xfs_buf_t *);
208 extern void xfs_buf_rele(xfs_buf_t *);
209 
210 /* Locking and Unlocking Buffers */
211 extern int xfs_buf_cond_lock(xfs_buf_t *);
212 extern int xfs_buf_lock_value(xfs_buf_t *);
213 extern void xfs_buf_lock(xfs_buf_t *);
214 extern void xfs_buf_unlock(xfs_buf_t *);
215 
216 /* Buffer Read and Write Routines */
217 extern int xfs_bawrite(void *mp, xfs_buf_t *bp);
218 extern void xfs_bdwrite(void *mp, xfs_buf_t *bp);
219 extern void xfs_buf_ioend(xfs_buf_t *,	int);
220 extern void xfs_buf_ioerror(xfs_buf_t *, int);
221 extern int xfs_buf_iorequest(xfs_buf_t *);
222 extern int xfs_buf_iowait(xfs_buf_t *);
223 extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, xfs_caddr_t,
224 				xfs_buf_rw_t);
225 
xfs_buf_iostrategy(xfs_buf_t * bp)226 static inline int xfs_buf_iostrategy(xfs_buf_t *bp)
227 {
228 	return bp->b_strat ? bp->b_strat(bp) : xfs_buf_iorequest(bp);
229 }
230 
xfs_buf_geterror(xfs_buf_t * bp)231 static inline int xfs_buf_geterror(xfs_buf_t *bp)
232 {
233 	return bp ? bp->b_error : ENOMEM;
234 }
235 
236 /* Buffer Utility Routines */
237 extern xfs_caddr_t xfs_buf_offset(xfs_buf_t *, size_t);
238 
239 /* Pinning Buffer Storage in Memory */
240 extern void xfs_buf_pin(xfs_buf_t *);
241 extern void xfs_buf_unpin(xfs_buf_t *);
242 extern int xfs_buf_ispin(xfs_buf_t *);
243 
244 /* Delayed Write Buffer Routines */
245 extern void xfs_buf_delwri_dequeue(xfs_buf_t *);
246 
247 /* Buffer Daemon Setup Routines */
248 extern int xfs_buf_init(void);
249 extern void xfs_buf_terminate(void);
250 
251 #ifdef XFS_BUF_TRACE
252 extern ktrace_t *xfs_buf_trace_buf;
253 extern void xfs_buf_trace(xfs_buf_t *, char *, void *, void *);
254 #else
255 #define xfs_buf_trace(bp,id,ptr,ra)	do { } while (0)
256 #endif
257 
258 #define xfs_buf_target_name(target)	\
259 	({ char __b[BDEVNAME_SIZE]; bdevname((target)->bt_bdev, __b); __b; })
260 
261 
262 #define XFS_B_ASYNC		XBF_ASYNC
263 #define XFS_B_DELWRI		XBF_DELWRI
264 #define XFS_B_READ		XBF_READ
265 #define XFS_B_WRITE		XBF_WRITE
266 #define XFS_B_STALE		XBF_STALE
267 
268 #define XFS_BUF_TRYLOCK		XBF_TRYLOCK
269 #define XFS_INCORE_TRYLOCK	XBF_TRYLOCK
270 #define XFS_BUF_LOCK		XBF_LOCK
271 #define XFS_BUF_MAPPED		XBF_MAPPED
272 
273 #define BUF_BUSY		XBF_DONT_BLOCK
274 
275 #define XFS_BUF_BFLAGS(bp)	((bp)->b_flags)
276 #define XFS_BUF_ZEROFLAGS(bp)	((bp)->b_flags &= \
277 		~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI|XBF_ORDERED))
278 
279 #define XFS_BUF_STALE(bp)	((bp)->b_flags |= XFS_B_STALE)
280 #define XFS_BUF_UNSTALE(bp)	((bp)->b_flags &= ~XFS_B_STALE)
281 #define XFS_BUF_ISSTALE(bp)	((bp)->b_flags & XFS_B_STALE)
282 #define XFS_BUF_SUPER_STALE(bp)	do {				\
283 					XFS_BUF_STALE(bp);	\
284 					xfs_buf_delwri_dequeue(bp);	\
285 					XFS_BUF_DONE(bp);	\
286 				} while (0)
287 
288 #define XFS_BUF_MANAGE		XBF_FS_MANAGED
289 #define XFS_BUF_UNMANAGE(bp)	((bp)->b_flags &= ~XBF_FS_MANAGED)
290 
291 #define XFS_BUF_DELAYWRITE(bp)		((bp)->b_flags |= XBF_DELWRI)
292 #define XFS_BUF_UNDELAYWRITE(bp)	xfs_buf_delwri_dequeue(bp)
293 #define XFS_BUF_ISDELAYWRITE(bp)	((bp)->b_flags & XBF_DELWRI)
294 
295 #define XFS_BUF_ERROR(bp,no)	xfs_buf_ioerror(bp,no)
296 #define XFS_BUF_GETERROR(bp)	xfs_buf_geterror(bp)
297 #define XFS_BUF_ISERROR(bp)	(xfs_buf_geterror(bp) ? 1 : 0)
298 
299 #define XFS_BUF_DONE(bp)	((bp)->b_flags |= XBF_DONE)
300 #define XFS_BUF_UNDONE(bp)	((bp)->b_flags &= ~XBF_DONE)
301 #define XFS_BUF_ISDONE(bp)	((bp)->b_flags & XBF_DONE)
302 
303 #define XFS_BUF_BUSY(bp)	do { } while (0)
304 #define XFS_BUF_UNBUSY(bp)	do { } while (0)
305 #define XFS_BUF_ISBUSY(bp)	(1)
306 
307 #define XFS_BUF_ASYNC(bp)	((bp)->b_flags |= XBF_ASYNC)
308 #define XFS_BUF_UNASYNC(bp)	((bp)->b_flags &= ~XBF_ASYNC)
309 #define XFS_BUF_ISASYNC(bp)	((bp)->b_flags & XBF_ASYNC)
310 
311 #define XFS_BUF_ORDERED(bp)	((bp)->b_flags |= XBF_ORDERED)
312 #define XFS_BUF_UNORDERED(bp)	((bp)->b_flags &= ~XBF_ORDERED)
313 #define XFS_BUF_ISORDERED(bp)	((bp)->b_flags & XBF_ORDERED)
314 
315 #define XFS_BUF_HOLD(bp)	xfs_buf_hold(bp)
316 #define XFS_BUF_READ(bp)	((bp)->b_flags |= XBF_READ)
317 #define XFS_BUF_UNREAD(bp)	((bp)->b_flags &= ~XBF_READ)
318 #define XFS_BUF_ISREAD(bp)	((bp)->b_flags & XBF_READ)
319 
320 #define XFS_BUF_WRITE(bp)	((bp)->b_flags |= XBF_WRITE)
321 #define XFS_BUF_UNWRITE(bp)	((bp)->b_flags &= ~XBF_WRITE)
322 #define XFS_BUF_ISWRITE(bp)	((bp)->b_flags & XBF_WRITE)
323 
324 #define XFS_BUF_IODONE_FUNC(bp)			((bp)->b_iodone)
325 #define XFS_BUF_SET_IODONE_FUNC(bp, func)	((bp)->b_iodone = (func))
326 #define XFS_BUF_CLR_IODONE_FUNC(bp)		((bp)->b_iodone = NULL)
327 #define XFS_BUF_SET_BDSTRAT_FUNC(bp, func)	((bp)->b_strat = (func))
328 #define XFS_BUF_CLR_BDSTRAT_FUNC(bp)		((bp)->b_strat = NULL)
329 
330 #define XFS_BUF_FSPRIVATE(bp, type)		((type)(bp)->b_fspriv)
331 #define XFS_BUF_SET_FSPRIVATE(bp, val)		((bp)->b_fspriv = (void*)(val))
332 #define XFS_BUF_FSPRIVATE2(bp, type)		((type)(bp)->b_fspriv2)
333 #define XFS_BUF_SET_FSPRIVATE2(bp, val)		((bp)->b_fspriv2 = (void*)(val))
334 #define XFS_BUF_SET_START(bp)			do { } while (0)
335 #define XFS_BUF_SET_BRELSE_FUNC(bp, func)	((bp)->b_relse = (func))
336 
337 #define XFS_BUF_PTR(bp)			(xfs_caddr_t)((bp)->b_addr)
338 #define XFS_BUF_SET_PTR(bp, val, cnt)	xfs_buf_associate_memory(bp, val, cnt)
339 #define XFS_BUF_ADDR(bp)		((bp)->b_bn)
340 #define XFS_BUF_SET_ADDR(bp, bno)	((bp)->b_bn = (xfs_daddr_t)(bno))
341 #define XFS_BUF_OFFSET(bp)		((bp)->b_file_offset)
342 #define XFS_BUF_SET_OFFSET(bp, off)	((bp)->b_file_offset = (off))
343 #define XFS_BUF_COUNT(bp)		((bp)->b_count_desired)
344 #define XFS_BUF_SET_COUNT(bp, cnt)	((bp)->b_count_desired = (cnt))
345 #define XFS_BUF_SIZE(bp)		((bp)->b_buffer_length)
346 #define XFS_BUF_SET_SIZE(bp, cnt)	((bp)->b_buffer_length = (cnt))
347 
348 #define XFS_BUF_SET_VTYPE_REF(bp, type, ref)	do { } while (0)
349 #define XFS_BUF_SET_VTYPE(bp, type)		do { } while (0)
350 #define XFS_BUF_SET_REF(bp, ref)		do { } while (0)
351 
352 #define XFS_BUF_ISPINNED(bp)	xfs_buf_ispin(bp)
353 
354 #define XFS_BUF_VALUSEMA(bp)	xfs_buf_lock_value(bp)
355 #define XFS_BUF_CPSEMA(bp)	(xfs_buf_cond_lock(bp) == 0)
356 #define XFS_BUF_VSEMA(bp)	xfs_buf_unlock(bp)
357 #define XFS_BUF_PSEMA(bp,x)	xfs_buf_lock(bp)
358 #define XFS_BUF_FINISH_IOWAIT(bp)	complete(&bp->b_iowait);
359 
360 #define XFS_BUF_SET_TARGET(bp, target)	((bp)->b_target = (target))
361 #define XFS_BUF_TARGET(bp)		((bp)->b_target)
362 #define XFS_BUFTARG_NAME(target)	xfs_buf_target_name(target)
363 
xfs_buf_relse(xfs_buf_t * bp)364 static inline void xfs_buf_relse(xfs_buf_t *bp)
365 {
366 	if (!bp->b_relse)
367 		xfs_buf_unlock(bp);
368 	xfs_buf_rele(bp);
369 }
370 
371 #define xfs_bpin(bp)		xfs_buf_pin(bp)
372 #define xfs_bunpin(bp)		xfs_buf_unpin(bp)
373 
374 #define xfs_buftrace(id, bp)	\
375 	    xfs_buf_trace(bp, id, NULL, (void *)__builtin_return_address(0))
376 
377 #define xfs_biodone(bp)		xfs_buf_ioend(bp, 0)
378 
379 #define xfs_biomove(bp, off, len, data, rw) \
380 	    xfs_buf_iomove((bp), (off), (len), (data), \
381 		((rw) == XFS_B_WRITE) ? XBRW_WRITE : XBRW_READ)
382 
383 #define xfs_biozero(bp, off, len) \
384 	    xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO)
385 
386 
XFS_bwrite(xfs_buf_t * bp)387 static inline int XFS_bwrite(xfs_buf_t *bp)
388 {
389 	int	iowait = (bp->b_flags & XBF_ASYNC) == 0;
390 	int	error = 0;
391 
392 	if (!iowait)
393 		bp->b_flags |= _XBF_RUN_QUEUES;
394 
395 	xfs_buf_delwri_dequeue(bp);
396 	xfs_buf_iostrategy(bp);
397 	if (iowait) {
398 		error = xfs_buf_iowait(bp);
399 		xfs_buf_relse(bp);
400 	}
401 	return error;
402 }
403 
404 #define XFS_bdstrat(bp) xfs_buf_iorequest(bp)
405 
406 #define xfs_iowait(bp)	xfs_buf_iowait(bp)
407 
408 #define xfs_baread(target, rablkno, ralen)  \
409 	xfs_buf_readahead((target), (rablkno), (ralen), XBF_DONT_BLOCK)
410 
411 
412 /*
413  *	Handling of buftargs.
414  */
415 extern xfs_buftarg_t *xfs_alloc_buftarg(struct block_device *, int);
416 extern void xfs_free_buftarg(struct xfs_mount *, struct xfs_buftarg *);
417 extern void xfs_wait_buftarg(xfs_buftarg_t *);
418 extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int, unsigned int);
419 extern int xfs_flush_buftarg(xfs_buftarg_t *, int);
420 #ifdef CONFIG_KDB_MODULES
421 extern struct list_head *xfs_get_buftarg_list(void);
422 #endif
423 
424 #define xfs_getsize_buftarg(buftarg)	block_size((buftarg)->bt_bdev)
425 #define xfs_readonly_buftarg(buftarg)	bdev_read_only((buftarg)->bt_bdev)
426 
427 #define xfs_binval(buftarg)		xfs_flush_buftarg(buftarg, 1)
428 #define XFS_bflush(buftarg)		xfs_flush_buftarg(buftarg, 1)
429 
430 #endif	/* __XFS_BUF_H__ */
431