• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #ifndef __XFS_BUF_H__
19 #define __XFS_BUF_H__
20 
21 #include <linux/list.h>
22 #include <linux/types.h>
23 #include <linux/spinlock.h>
24 #include <linux/mm.h>
25 #include <linux/fs.h>
26 #include <linux/buffer_head.h>
27 #include <linux/uio.h>
28 
29 /*
30  *	Base types
31  */
32 
33 #define XFS_BUF_DADDR_NULL	((xfs_daddr_t) (-1LL))
34 
35 #define xfs_buf_ctob(pp)	((pp) * PAGE_CACHE_SIZE)
36 #define xfs_buf_btoc(dd)	(((dd) + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT)
37 #define xfs_buf_btoct(dd)	((dd) >> PAGE_CACHE_SHIFT)
38 #define xfs_buf_poff(aa)	((aa) & ~PAGE_CACHE_MASK)
39 
40 typedef enum {
41 	XBRW_READ = 1,			/* transfer into target memory */
42 	XBRW_WRITE = 2,			/* transfer from target memory */
43 	XBRW_ZERO = 3,			/* Zero target memory */
44 } xfs_buf_rw_t;
45 
46 #define XBF_READ	(1 << 0) /* buffer intended for reading from device */
47 #define XBF_WRITE	(1 << 1) /* buffer intended for writing to device */
48 #define XBF_READ_AHEAD	(1 << 2) /* asynchronous read-ahead */
49 #define XBF_MAPPED	(1 << 3) /* buffer mapped (b_addr valid) */
50 #define XBF_ASYNC	(1 << 4) /* initiator will not wait for completion */
51 #define XBF_DONE	(1 << 5) /* all pages in the buffer uptodate */
52 #define XBF_DELWRI	(1 << 6) /* buffer has dirty pages */
53 #define XBF_STALE	(1 << 7) /* buffer has been staled, do not find it */
54 
55 /* I/O hints for the BIO layer */
56 #define XBF_SYNCIO	(1 << 10)/* treat this buffer as synchronous I/O */
57 #define XBF_FUA		(1 << 11)/* force cache write through mode */
58 #define XBF_FLUSH	(1 << 12)/* flush the disk cache before a write */
59 
60 /* flags used only as arguments to access routines */
61 #define XBF_LOCK	(1 << 15)/* lock requested */
62 #define XBF_TRYLOCK	(1 << 16)/* lock requested, but do not wait */
63 #define XBF_DONT_BLOCK	(1 << 17)/* do not block in current thread */
64 
65 /* flags used only internally */
66 #define _XBF_PAGES	(1 << 20)/* backed by refcounted pages */
67 #define _XBF_KMEM	(1 << 21)/* backed by heap memory */
68 #define _XBF_DELWRI_Q	(1 << 22)/* buffer on delwri queue */
69 
70 typedef unsigned int xfs_buf_flags_t;
71 
72 #define XFS_BUF_FLAGS \
73 	{ XBF_READ,		"READ" }, \
74 	{ XBF_WRITE,		"WRITE" }, \
75 	{ XBF_READ_AHEAD,	"READ_AHEAD" }, \
76 	{ XBF_MAPPED,		"MAPPED" }, \
77 	{ XBF_ASYNC,		"ASYNC" }, \
78 	{ XBF_DONE,		"DONE" }, \
79 	{ XBF_DELWRI,		"DELWRI" }, \
80 	{ XBF_STALE,		"STALE" }, \
81 	{ XBF_SYNCIO,		"SYNCIO" }, \
82 	{ XBF_FUA,		"FUA" }, \
83 	{ XBF_FLUSH,		"FLUSH" }, \
84 	{ XBF_LOCK,		"LOCK" },  	/* should never be set */\
85 	{ XBF_TRYLOCK,		"TRYLOCK" }, 	/* ditto */\
86 	{ XBF_DONT_BLOCK,	"DONT_BLOCK" },	/* ditto */\
87 	{ _XBF_PAGES,		"PAGES" }, \
88 	{ _XBF_KMEM,		"KMEM" }, \
89 	{ _XBF_DELWRI_Q,	"DELWRI_Q" }
90 
91 typedef enum {
92 	XBT_FORCE_FLUSH = 0,
93 } xfs_buftarg_flags_t;
94 
95 typedef struct xfs_buftarg {
96 	dev_t			bt_dev;
97 	struct block_device	*bt_bdev;
98 	struct backing_dev_info	*bt_bdi;
99 	struct xfs_mount	*bt_mount;
100 	unsigned int		bt_bsize;
101 	unsigned int		bt_sshift;
102 	size_t			bt_smask;
103 
104 	/* per device delwri queue */
105 	struct task_struct	*bt_task;
106 	struct list_head	bt_delwri_queue;
107 	spinlock_t		bt_delwri_lock;
108 	unsigned long		bt_flags;
109 
110 	/* LRU control structures */
111 	struct shrinker		bt_shrinker;
112 	struct list_head	bt_lru;
113 	spinlock_t		bt_lru_lock;
114 	unsigned int		bt_lru_nr;
115 } xfs_buftarg_t;
116 
117 struct xfs_buf;
118 typedef void (*xfs_buf_iodone_t)(struct xfs_buf *);
119 
120 #define XB_PAGES	2
121 
122 typedef struct xfs_buf {
123 	/*
124 	 * first cacheline holds all the fields needed for an uncontended cache
125 	 * hit to be fully processed. The semaphore straddles the cacheline
126 	 * boundary, but the counter and lock sits on the first cacheline,
127 	 * which is the only bit that is touched if we hit the semaphore
128 	 * fast-path on locking.
129 	 */
130 	struct rb_node		b_rbnode;	/* rbtree node */
131 	xfs_off_t		b_file_offset;	/* offset in file */
132 	size_t			b_buffer_length;/* size of buffer in bytes */
133 	atomic_t		b_hold;		/* reference count */
134 	atomic_t		b_lru_ref;	/* lru reclaim ref count */
135 	xfs_buf_flags_t		b_flags;	/* status flags */
136 	struct semaphore	b_sema;		/* semaphore for lockables */
137 
138 	struct list_head	b_lru;		/* lru list */
139 	wait_queue_head_t	b_waiters;	/* unpin waiters */
140 	struct list_head	b_list;
141 	struct xfs_perag	*b_pag;		/* contains rbtree root */
142 	xfs_buftarg_t		*b_target;	/* buffer target (device) */
143 	xfs_daddr_t		b_bn;		/* block number for I/O */
144 	size_t			b_count_desired;/* desired transfer size */
145 	void			*b_addr;	/* virtual address of buffer */
146 	struct work_struct	b_iodone_work;
147 	xfs_buf_iodone_t	b_iodone;	/* I/O completion function */
148 	struct completion	b_iowait;	/* queue for I/O waiters */
149 	void			*b_fspriv;
150 	struct xfs_trans	*b_transp;
151 	struct page		**b_pages;	/* array of page pointers */
152 	struct page		*b_page_array[XB_PAGES]; /* inline pages */
153 	unsigned long		b_queuetime;	/* time buffer was queued */
154 	atomic_t		b_pin_count;	/* pin count */
155 	atomic_t		b_io_remaining;	/* #outstanding I/O requests */
156 	unsigned int		b_page_count;	/* size of page array */
157 	unsigned int		b_offset;	/* page offset in first page */
158 	unsigned short		b_error;	/* error code on I/O */
159 #ifdef XFS_BUF_LOCK_TRACKING
160 	int			b_last_holder;
161 #endif
162 } xfs_buf_t;
163 
164 
165 /* Finding and Reading Buffers */
166 extern xfs_buf_t *_xfs_buf_find(xfs_buftarg_t *, xfs_off_t, size_t,
167 				xfs_buf_flags_t, xfs_buf_t *);
168 #define xfs_incore(buftarg,blkno,len,lockit) \
169 	_xfs_buf_find(buftarg, blkno ,len, lockit, NULL)
170 
171 extern xfs_buf_t *xfs_buf_get(xfs_buftarg_t *, xfs_off_t, size_t,
172 				xfs_buf_flags_t);
173 extern xfs_buf_t *xfs_buf_read(xfs_buftarg_t *, xfs_off_t, size_t,
174 				xfs_buf_flags_t);
175 
176 struct xfs_buf *xfs_buf_alloc(struct xfs_buftarg *, xfs_off_t, size_t,
177 			      xfs_buf_flags_t);
178 extern void xfs_buf_set_empty(struct xfs_buf *bp, size_t len);
179 extern xfs_buf_t *xfs_buf_get_uncached(struct xfs_buftarg *, size_t, int);
180 extern int xfs_buf_associate_memory(xfs_buf_t *, void *, size_t);
181 extern void xfs_buf_hold(xfs_buf_t *);
182 extern void xfs_buf_readahead(xfs_buftarg_t *, xfs_off_t, size_t);
183 struct xfs_buf *xfs_buf_read_uncached(struct xfs_mount *mp,
184 				struct xfs_buftarg *target,
185 				xfs_daddr_t daddr, size_t length, int flags);
186 
187 /* Releasing Buffers */
188 extern void xfs_buf_free(xfs_buf_t *);
189 extern void xfs_buf_rele(xfs_buf_t *);
190 
191 /* Locking and Unlocking Buffers */
192 extern int xfs_buf_trylock(xfs_buf_t *);
193 extern void xfs_buf_lock(xfs_buf_t *);
194 extern void xfs_buf_unlock(xfs_buf_t *);
195 #define xfs_buf_islocked(bp) \
196 	((bp)->b_sema.count <= 0)
197 
198 /* Buffer Read and Write Routines */
199 extern int xfs_bwrite(struct xfs_buf *bp);
200 
201 extern void xfsbdstrat(struct xfs_mount *, struct xfs_buf *);
202 extern int xfs_bdstrat_cb(struct xfs_buf *);
203 
204 extern void xfs_buf_ioend(xfs_buf_t *,	int);
205 extern void xfs_buf_ioerror(xfs_buf_t *, int);
206 extern void xfs_buf_ioerror_alert(struct xfs_buf *, const char *func);
207 extern int xfs_buf_iorequest(xfs_buf_t *);
208 extern int xfs_buf_iowait(xfs_buf_t *);
209 extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *,
210 				xfs_buf_rw_t);
211 #define xfs_buf_zero(bp, off, len) \
212 	    xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO)
213 
xfs_buf_geterror(xfs_buf_t * bp)214 static inline int xfs_buf_geterror(xfs_buf_t *bp)
215 {
216 	return bp ? bp->b_error : ENOMEM;
217 }
218 
219 /* Buffer Utility Routines */
220 extern xfs_caddr_t xfs_buf_offset(xfs_buf_t *, size_t);
221 
222 /* Delayed Write Buffer Routines */
223 extern void xfs_buf_delwri_queue(struct xfs_buf *);
224 extern void xfs_buf_delwri_dequeue(struct xfs_buf *);
225 extern void xfs_buf_delwri_promote(struct xfs_buf *);
226 
227 /* Buffer Daemon Setup Routines */
228 extern int xfs_buf_init(void);
229 extern void xfs_buf_terminate(void);
230 
231 #define XFS_BUF_ZEROFLAGS(bp) \
232 	((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI| \
233 			    XBF_SYNCIO|XBF_FUA|XBF_FLUSH))
234 
235 void xfs_buf_stale(struct xfs_buf *bp);
236 #define XFS_BUF_UNSTALE(bp)	((bp)->b_flags &= ~XBF_STALE)
237 #define XFS_BUF_ISSTALE(bp)	((bp)->b_flags & XBF_STALE)
238 
239 #define XFS_BUF_ISDELAYWRITE(bp)	((bp)->b_flags & XBF_DELWRI)
240 
241 #define XFS_BUF_DONE(bp)	((bp)->b_flags |= XBF_DONE)
242 #define XFS_BUF_UNDONE(bp)	((bp)->b_flags &= ~XBF_DONE)
243 #define XFS_BUF_ISDONE(bp)	((bp)->b_flags & XBF_DONE)
244 
245 #define XFS_BUF_ASYNC(bp)	((bp)->b_flags |= XBF_ASYNC)
246 #define XFS_BUF_UNASYNC(bp)	((bp)->b_flags &= ~XBF_ASYNC)
247 #define XFS_BUF_ISASYNC(bp)	((bp)->b_flags & XBF_ASYNC)
248 
249 #define XFS_BUF_READ(bp)	((bp)->b_flags |= XBF_READ)
250 #define XFS_BUF_UNREAD(bp)	((bp)->b_flags &= ~XBF_READ)
251 #define XFS_BUF_ISREAD(bp)	((bp)->b_flags & XBF_READ)
252 
253 #define XFS_BUF_WRITE(bp)	((bp)->b_flags |= XBF_WRITE)
254 #define XFS_BUF_UNWRITE(bp)	((bp)->b_flags &= ~XBF_WRITE)
255 #define XFS_BUF_ISWRITE(bp)	((bp)->b_flags & XBF_WRITE)
256 
257 #define XFS_BUF_ADDR(bp)		((bp)->b_bn)
258 #define XFS_BUF_SET_ADDR(bp, bno)	((bp)->b_bn = (xfs_daddr_t)(bno))
259 #define XFS_BUF_OFFSET(bp)		((bp)->b_file_offset)
260 #define XFS_BUF_SET_OFFSET(bp, off)	((bp)->b_file_offset = (off))
261 #define XFS_BUF_COUNT(bp)		((bp)->b_count_desired)
262 #define XFS_BUF_SET_COUNT(bp, cnt)	((bp)->b_count_desired = (cnt))
263 #define XFS_BUF_SIZE(bp)		((bp)->b_buffer_length)
264 #define XFS_BUF_SET_SIZE(bp, cnt)	((bp)->b_buffer_length = (cnt))
265 
xfs_buf_set_ref(struct xfs_buf * bp,int lru_ref)266 static inline void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref)
267 {
268 	atomic_set(&bp->b_lru_ref, lru_ref);
269 }
270 
xfs_buf_ispinned(struct xfs_buf * bp)271 static inline int xfs_buf_ispinned(struct xfs_buf *bp)
272 {
273 	return atomic_read(&bp->b_pin_count);
274 }
275 
xfs_buf_relse(xfs_buf_t * bp)276 static inline void xfs_buf_relse(xfs_buf_t *bp)
277 {
278 	xfs_buf_unlock(bp);
279 	xfs_buf_rele(bp);
280 }
281 
282 /*
283  *	Handling of buftargs.
284  */
285 extern xfs_buftarg_t *xfs_alloc_buftarg(struct xfs_mount *,
286 			struct block_device *, int, const char *);
287 extern void xfs_free_buftarg(struct xfs_mount *, struct xfs_buftarg *);
288 extern void xfs_wait_buftarg(xfs_buftarg_t *);
289 extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int, unsigned int);
290 extern int xfs_flush_buftarg(xfs_buftarg_t *, int);
291 
292 #define xfs_getsize_buftarg(buftarg)	block_size((buftarg)->bt_bdev)
293 #define xfs_readonly_buftarg(buftarg)	bdev_read_only((buftarg)->bt_bdev)
294 
295 #endif	/* __XFS_BUF_H__ */
296