• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #ifndef __XFS_BMAP_H__
19 #define	__XFS_BMAP_H__
20 
21 struct getbmap;
22 struct xfs_bmbt_irec;
23 struct xfs_ifork;
24 struct xfs_inode;
25 struct xfs_mount;
26 struct xfs_trans;
27 
28 extern kmem_zone_t	*xfs_bmap_free_item_zone;
29 
30 /*
31  * List of extents to be free "later".
32  * The list is kept sorted on xbf_startblock.
33  */
34 typedef struct xfs_bmap_free_item
35 {
36 	xfs_fsblock_t		xbfi_startblock;/* starting fs block number */
37 	xfs_extlen_t		xbfi_blockcount;/* number of blocks in extent */
38 	struct xfs_bmap_free_item *xbfi_next;	/* link to next entry */
39 } xfs_bmap_free_item_t;
40 
41 /*
42  * Header for free extent list.
43  *
44  * xbf_low is used by the allocator to activate the lowspace algorithm -
45  * when free space is running low the extent allocator may choose to
46  * allocate an extent from an AG without leaving sufficient space for
47  * a btree split when inserting the new extent.  In this case the allocator
48  * will enable the lowspace algorithm which is supposed to allow further
49  * allocations (such as btree splits and newroots) to allocate from
50  * sequential AGs.  In order to avoid locking AGs out of order the lowspace
51  * algorithm will start searching for free space from AG 0.  If the correct
52  * transaction reservations have been made then this algorithm will eventually
53  * find all the space it needs.
54  */
55 typedef	struct xfs_bmap_free
56 {
57 	xfs_bmap_free_item_t	*xbf_first;	/* list of to-be-free extents */
58 	int			xbf_count;	/* count of items on list */
59 	int			xbf_low;	/* alloc in low mode */
60 } xfs_bmap_free_t;
61 
62 #define	XFS_BMAP_MAX_NMAP	4
63 
64 /*
65  * Flags for xfs_bmapi_*
66  */
67 #define XFS_BMAPI_ENTIRE	0x001	/* return entire extent, not trimmed */
68 #define XFS_BMAPI_METADATA	0x002	/* mapping metadata not user data */
69 #define XFS_BMAPI_ATTRFORK	0x004	/* use attribute fork not data */
70 #define XFS_BMAPI_PREALLOC	0x008	/* preallocation op: unwritten space */
71 #define XFS_BMAPI_IGSTATE	0x010	/* Ignore state - */
72 					/* combine contig. space */
73 #define XFS_BMAPI_CONTIG	0x020	/* must allocate only one extent */
74 /*
75  * unwritten extent conversion - this needs write cache flushing and no additional
76  * allocation alignments. When specified with XFS_BMAPI_PREALLOC it converts
77  * from written to unwritten, otherwise convert from unwritten to written.
78  */
79 #define XFS_BMAPI_CONVERT	0x040
80 
81 #define XFS_BMAPI_FLAGS \
82 	{ XFS_BMAPI_ENTIRE,	"ENTIRE" }, \
83 	{ XFS_BMAPI_METADATA,	"METADATA" }, \
84 	{ XFS_BMAPI_ATTRFORK,	"ATTRFORK" }, \
85 	{ XFS_BMAPI_PREALLOC,	"PREALLOC" }, \
86 	{ XFS_BMAPI_IGSTATE,	"IGSTATE" }, \
87 	{ XFS_BMAPI_CONTIG,	"CONTIG" }, \
88 	{ XFS_BMAPI_CONVERT,	"CONVERT" }
89 
90 
xfs_bmapi_aflag(int w)91 static inline int xfs_bmapi_aflag(int w)
92 {
93 	return (w == XFS_ATTR_FORK ? XFS_BMAPI_ATTRFORK : 0);
94 }
95 
96 /*
97  * Special values for xfs_bmbt_irec_t br_startblock field.
98  */
99 #define	DELAYSTARTBLOCK		((xfs_fsblock_t)-1LL)
100 #define	HOLESTARTBLOCK		((xfs_fsblock_t)-2LL)
101 
xfs_bmap_init(xfs_bmap_free_t * flp,xfs_fsblock_t * fbp)102 static inline void xfs_bmap_init(xfs_bmap_free_t *flp, xfs_fsblock_t *fbp)
103 {
104 	((flp)->xbf_first = NULL, (flp)->xbf_count = 0, \
105 		(flp)->xbf_low = 0, *(fbp) = NULLFSBLOCK);
106 }
107 
108 /*
109  * Flags for xfs_bmap_add_extent*.
110  */
111 #define BMAP_LEFT_CONTIG	(1 << 0)
112 #define BMAP_RIGHT_CONTIG	(1 << 1)
113 #define BMAP_LEFT_FILLING	(1 << 2)
114 #define BMAP_RIGHT_FILLING	(1 << 3)
115 #define BMAP_LEFT_DELAY		(1 << 4)
116 #define BMAP_RIGHT_DELAY	(1 << 5)
117 #define BMAP_LEFT_VALID		(1 << 6)
118 #define BMAP_RIGHT_VALID	(1 << 7)
119 #define BMAP_ATTRFORK		(1 << 8)
120 
121 #define XFS_BMAP_EXT_FLAGS \
122 	{ BMAP_LEFT_CONTIG,	"LC" }, \
123 	{ BMAP_RIGHT_CONTIG,	"RC" }, \
124 	{ BMAP_LEFT_FILLING,	"LF" }, \
125 	{ BMAP_RIGHT_FILLING,	"RF" }, \
126 	{ BMAP_ATTRFORK,	"ATTR" }
127 
128 
129 /*
130  * This macro is used to determine how many extents will be shifted
131  * in one write transaction. We could require two splits,
132  * an extent move on the first and an extent merge on the second,
133  * So it is proper that one extent is shifted inside write transaction
134  * at a time.
135  */
136 #define XFS_BMAP_MAX_SHIFT_EXTENTS	1
137 
138 #ifdef DEBUG
139 void	xfs_bmap_trace_exlist(struct xfs_inode *ip, xfs_extnum_t cnt,
140 		int whichfork, unsigned long caller_ip);
141 #define	XFS_BMAP_TRACE_EXLIST(ip,c,w)	\
142 	xfs_bmap_trace_exlist(ip,c,w, _THIS_IP_)
143 #else
144 #define	XFS_BMAP_TRACE_EXLIST(ip,c,w)
145 #endif
146 
147 int	xfs_bmap_add_attrfork(struct xfs_inode *ip, int size, int rsvd);
148 void	xfs_bmap_local_to_extents_empty(struct xfs_inode *ip, int whichfork);
149 void	xfs_bmap_add_free(xfs_fsblock_t bno, xfs_filblks_t len,
150 		struct xfs_bmap_free *flist, struct xfs_mount *mp);
151 void	xfs_bmap_cancel(struct xfs_bmap_free *flist);
152 void	xfs_bmap_compute_maxlevels(struct xfs_mount *mp, int whichfork);
153 int	xfs_bmap_first_unused(struct xfs_trans *tp, struct xfs_inode *ip,
154 		xfs_extlen_t len, xfs_fileoff_t *unused, int whichfork);
155 int	xfs_bmap_last_before(struct xfs_trans *tp, struct xfs_inode *ip,
156 		xfs_fileoff_t *last_block, int whichfork);
157 int	xfs_bmap_last_offset(struct xfs_inode *ip, xfs_fileoff_t *unused,
158 		int whichfork);
159 int	xfs_bmap_one_block(struct xfs_inode *ip, int whichfork);
160 int	xfs_bmap_read_extents(struct xfs_trans *tp, struct xfs_inode *ip,
161 		int whichfork);
162 int	xfs_bmapi_read(struct xfs_inode *ip, xfs_fileoff_t bno,
163 		xfs_filblks_t len, struct xfs_bmbt_irec *mval,
164 		int *nmap, int flags);
165 int	xfs_bmapi_delay(struct xfs_inode *ip, xfs_fileoff_t bno,
166 		xfs_filblks_t len, struct xfs_bmbt_irec *mval,
167 		int *nmap, int flags);
168 int	xfs_bmapi_write(struct xfs_trans *tp, struct xfs_inode *ip,
169 		xfs_fileoff_t bno, xfs_filblks_t len, int flags,
170 		xfs_fsblock_t *firstblock, xfs_extlen_t total,
171 		struct xfs_bmbt_irec *mval, int *nmap,
172 		struct xfs_bmap_free *flist);
173 int	xfs_bunmapi(struct xfs_trans *tp, struct xfs_inode *ip,
174 		xfs_fileoff_t bno, xfs_filblks_t len, int flags,
175 		xfs_extnum_t nexts, xfs_fsblock_t *firstblock,
176 		struct xfs_bmap_free *flist, int *done);
177 int	xfs_check_nostate_extents(struct xfs_ifork *ifp, xfs_extnum_t idx,
178 		xfs_extnum_t num);
179 uint	xfs_default_attroffset(struct xfs_inode *ip);
180 int	xfs_bmap_shift_extents(struct xfs_trans *tp, struct xfs_inode *ip,
181 		xfs_fileoff_t start_fsb, xfs_fileoff_t offset_shift_fsb,
182 		int *done, xfs_fileoff_t *next_fsb, xfs_fsblock_t *firstblock,
183 		struct xfs_bmap_free *flist, int num_exts);
184 
185 #endif	/* __XFS_BMAP_H__ */
186