• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * ext4_jbd2.h
3  *
4  * Written by Stephen C. Tweedie <sct@redhat.com>, 1999
5  *
6  * Copyright 1998--1999 Red Hat corp --- All Rights Reserved
7  *
8  * This file is part of the Linux kernel and is made available under
9  * the terms of the GNU General Public License, version 2, or at your
10  * option, any later version, incorporated herein by reference.
11  *
12  * Ext4-specific journaling extensions.
13  */
14 
15 #ifndef _EXT4_JBD2_H
16 #define _EXT4_JBD2_H
17 
18 #include <linux/fs.h>
19 #include <linux/jbd2.h>
20 #include "ext4.h"
21 
22 #define EXT4_JOURNAL(inode)	(EXT4_SB((inode)->i_sb)->s_journal)
23 
24 /* Define the number of blocks we need to account to a transaction to
25  * modify one block of data.
26  *
27  * We may have to touch one inode, one bitmap buffer, up to three
28  * indirection blocks, the group and superblock summaries, and the data
29  * block to complete the transaction.
30  *
31  * For extents-enabled fs we may have to allocate and modify up to
32  * 5 levels of tree, data block (for each of these we need bitmap + group
33  * summaries), root which is stored in the inode, sb
34  */
35 
36 #define EXT4_SINGLEDATA_TRANS_BLOCKS(sb)				\
37 	(EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)   \
38 	 ? 20U : 8U)
39 
40 /* Extended attribute operations touch at most two data buffers,
41  * two bitmap buffers, and two group summaries, in addition to the inode
42  * and the superblock, which are already accounted for. */
43 
44 #define EXT4_XATTR_TRANS_BLOCKS		6U
45 
46 /* Define the minimum size for a transaction which modifies data.  This
47  * needs to take into account the fact that we may end up modifying two
48  * quota files too (one for the group, one for the user quota).  The
49  * superblock only gets updated once, of course, so don't bother
50  * counting that again for the quota updates. */
51 
52 #define EXT4_DATA_TRANS_BLOCKS(sb)	(EXT4_SINGLEDATA_TRANS_BLOCKS(sb) + \
53 					 EXT4_XATTR_TRANS_BLOCKS - 2 + \
54 					 EXT4_MAXQUOTAS_TRANS_BLOCKS(sb))
55 
56 /*
57  * Define the number of metadata blocks we need to account to modify data.
58  *
59  * This include super block, inode block, quota blocks and xattr blocks
60  */
61 #define EXT4_META_TRANS_BLOCKS(sb)	(EXT4_XATTR_TRANS_BLOCKS + \
62 					EXT4_MAXQUOTAS_TRANS_BLOCKS(sb))
63 
64 /* Define an arbitrary limit for the amount of data we will anticipate
65  * writing to any given transaction.  For unbounded transactions such as
66  * write(2) and truncate(2) we can write more than this, but we always
67  * start off at the maximum transaction size and grow the transaction
68  * optimistically as we go. */
69 
70 #define EXT4_MAX_TRANS_DATA		64U
71 
72 /* We break up a large truncate or write transaction once the handle's
73  * buffer credits gets this low, we need either to extend the
74  * transaction or to start a new one.  Reserve enough space here for
75  * inode, bitmap, superblock, group and indirection updates for at least
76  * one block, plus two quota updates.  Quota allocations are not
77  * needed. */
78 
79 #define EXT4_RESERVE_TRANS_BLOCKS	12U
80 
81 #define EXT4_INDEX_EXTRA_TRANS_BLOCKS	8
82 
83 #ifdef CONFIG_QUOTA
84 /* Amount of blocks needed for quota update - we know that the structure was
85  * allocated so we need to update only data block */
86 #define EXT4_QUOTA_TRANS_BLOCKS(sb) ((test_opt(sb, QUOTA) ||\
87 		EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA)) ?\
88 		1 : 0)
89 /* Amount of blocks needed for quota insert/delete - we do some block writes
90  * but inode, sb and group updates are done only once */
91 #define EXT4_QUOTA_INIT_BLOCKS(sb) ((test_opt(sb, QUOTA) ||\
92 		EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA)) ?\
93 		(DQUOT_INIT_ALLOC*(EXT4_SINGLEDATA_TRANS_BLOCKS(sb)-3)\
94 		 +3+DQUOT_INIT_REWRITE) : 0)
95 
96 #define EXT4_QUOTA_DEL_BLOCKS(sb) ((test_opt(sb, QUOTA) ||\
97 		EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA)) ?\
98 		(DQUOT_DEL_ALLOC*(EXT4_SINGLEDATA_TRANS_BLOCKS(sb)-3)\
99 		 +3+DQUOT_DEL_REWRITE) : 0)
100 #else
101 #define EXT4_QUOTA_TRANS_BLOCKS(sb) 0
102 #define EXT4_QUOTA_INIT_BLOCKS(sb) 0
103 #define EXT4_QUOTA_DEL_BLOCKS(sb) 0
104 #endif
105 #define EXT4_MAXQUOTAS_TRANS_BLOCKS(sb) (EXT4_MAXQUOTAS*EXT4_QUOTA_TRANS_BLOCKS(sb))
106 #define EXT4_MAXQUOTAS_INIT_BLOCKS(sb) (EXT4_MAXQUOTAS*EXT4_QUOTA_INIT_BLOCKS(sb))
107 #define EXT4_MAXQUOTAS_DEL_BLOCKS(sb) (EXT4_MAXQUOTAS*EXT4_QUOTA_DEL_BLOCKS(sb))
108 
ext4_jbd2_credits_xattr(struct inode * inode)109 static inline int ext4_jbd2_credits_xattr(struct inode *inode)
110 {
111 	int credits = EXT4_DATA_TRANS_BLOCKS(inode->i_sb);
112 
113 	/*
114 	 * In case of inline data, we may push out the data to a block,
115 	 * so we need to reserve credits for this eventuality
116 	 */
117 	if (ext4_has_inline_data(inode))
118 		credits += ext4_writepage_trans_blocks(inode) + 1;
119 	return credits;
120 }
121 
122 
123 /*
124  * Ext4 handle operation types -- for logging purposes
125  */
126 #define EXT4_HT_MISC             0
127 #define EXT4_HT_INODE            1
128 #define EXT4_HT_WRITE_PAGE       2
129 #define EXT4_HT_MAP_BLOCKS       3
130 #define EXT4_HT_DIR              4
131 #define EXT4_HT_TRUNCATE         5
132 #define EXT4_HT_QUOTA            6
133 #define EXT4_HT_RESIZE           7
134 #define EXT4_HT_MIGRATE          8
135 #define EXT4_HT_MOVE_EXTENTS     9
136 #define EXT4_HT_XATTR           10
137 #define EXT4_HT_EXT_CONVERT     11
138 #define EXT4_HT_MAX             12
139 
140 /**
141  *   struct ext4_journal_cb_entry - Base structure for callback information.
142  *
143  *   This struct is a 'seed' structure for a using with your own callback
144  *   structs. If you are using callbacks you must allocate one of these
145  *   or another struct of your own definition which has this struct
146  *   as it's first element and pass it to ext4_journal_callback_add().
147  */
148 struct ext4_journal_cb_entry {
149 	/* list information for other callbacks attached to the same handle */
150 	struct list_head jce_list;
151 
152 	/*  Function to call with this callback structure */
153 	void (*jce_func)(struct super_block *sb,
154 			 struct ext4_journal_cb_entry *jce, int error);
155 
156 	/* user data goes here */
157 };
158 
159 /**
160  * ext4_journal_callback_add: add a function to call after transaction commit
161  * @handle: active journal transaction handle to register callback on
162  * @func: callback function to call after the transaction has committed:
163  *        @sb: superblock of current filesystem for transaction
164  *        @jce: returned journal callback data
165  *        @rc: journal state at commit (0 = transaction committed properly)
166  * @jce: journal callback data (internal and function private data struct)
167  *
168  * The registered function will be called in the context of the journal thread
169  * after the transaction for which the handle was created has completed.
170  *
171  * No locks are held when the callback function is called, so it is safe to
172  * call blocking functions from within the callback, but the callback should
173  * not block or run for too long, or the filesystem will be blocked waiting for
174  * the next transaction to commit. No journaling functions can be used, or
175  * there is a risk of deadlock.
176  *
177  * There is no guaranteed calling order of multiple registered callbacks on
178  * the same transaction.
179  */
ext4_journal_callback_add(handle_t * handle,void (* func)(struct super_block * sb,struct ext4_journal_cb_entry * jce,int rc),struct ext4_journal_cb_entry * jce)180 static inline void ext4_journal_callback_add(handle_t *handle,
181 			void (*func)(struct super_block *sb,
182 				     struct ext4_journal_cb_entry *jce,
183 				     int rc),
184 			struct ext4_journal_cb_entry *jce)
185 {
186 	struct ext4_sb_info *sbi =
187 			EXT4_SB(handle->h_transaction->t_journal->j_private);
188 
189 	/* Add the jce to transaction's private list */
190 	jce->jce_func = func;
191 	spin_lock(&sbi->s_md_lock);
192 	list_add_tail(&jce->jce_list, &handle->h_transaction->t_private_list);
193 	spin_unlock(&sbi->s_md_lock);
194 }
195 
196 /**
197  * ext4_journal_callback_del: delete a registered callback
198  * @handle: active journal transaction handle on which callback was registered
199  * @jce: registered journal callback entry to unregister
200  * Return true if object was successfully removed
201  */
ext4_journal_callback_try_del(handle_t * handle,struct ext4_journal_cb_entry * jce)202 static inline bool ext4_journal_callback_try_del(handle_t *handle,
203 					     struct ext4_journal_cb_entry *jce)
204 {
205 	bool deleted;
206 	struct ext4_sb_info *sbi =
207 			EXT4_SB(handle->h_transaction->t_journal->j_private);
208 
209 	spin_lock(&sbi->s_md_lock);
210 	deleted = !list_empty(&jce->jce_list);
211 	list_del_init(&jce->jce_list);
212 	spin_unlock(&sbi->s_md_lock);
213 	return deleted;
214 }
215 
216 int
217 ext4_mark_iloc_dirty(handle_t *handle,
218 		     struct inode *inode,
219 		     struct ext4_iloc *iloc);
220 
221 /*
222  * On success, We end up with an outstanding reference count against
223  * iloc->bh.  This _must_ be cleaned up later.
224  */
225 
226 int ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
227 			struct ext4_iloc *iloc);
228 
229 int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode);
230 
231 /*
232  * Wrapper functions with which ext4 calls into JBD.
233  */
234 int __ext4_journal_get_write_access(const char *where, unsigned int line,
235 				    handle_t *handle, struct buffer_head *bh);
236 
237 int __ext4_forget(const char *where, unsigned int line, handle_t *handle,
238 		  int is_metadata, struct inode *inode,
239 		  struct buffer_head *bh, ext4_fsblk_t blocknr);
240 
241 int __ext4_journal_get_create_access(const char *where, unsigned int line,
242 				handle_t *handle, struct buffer_head *bh);
243 
244 int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
245 				 handle_t *handle, struct inode *inode,
246 				 struct buffer_head *bh);
247 
248 int __ext4_handle_dirty_super(const char *where, unsigned int line,
249 			      handle_t *handle, struct super_block *sb);
250 
251 #define ext4_journal_get_write_access(handle, bh) \
252 	__ext4_journal_get_write_access(__func__, __LINE__, (handle), (bh))
253 #define ext4_forget(handle, is_metadata, inode, bh, block_nr) \
254 	__ext4_forget(__func__, __LINE__, (handle), (is_metadata), (inode), \
255 		      (bh), (block_nr))
256 #define ext4_journal_get_create_access(handle, bh) \
257 	__ext4_journal_get_create_access(__func__, __LINE__, (handle), (bh))
258 #define ext4_handle_dirty_metadata(handle, inode, bh) \
259 	__ext4_handle_dirty_metadata(__func__, __LINE__, (handle), (inode), \
260 				     (bh))
261 #define ext4_handle_dirty_super(handle, sb) \
262 	__ext4_handle_dirty_super(__func__, __LINE__, (handle), (sb))
263 
264 handle_t *__ext4_journal_start_sb(struct super_block *sb, unsigned int line,
265 				  int type, int blocks, int rsv_blocks);
266 int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle);
267 
268 #define EXT4_NOJOURNAL_MAX_REF_COUNT ((unsigned long) 4096)
269 
270 /* Note:  Do not use this for NULL handles.  This is only to determine if
271  * a properly allocated handle is using a journal or not. */
ext4_handle_valid(handle_t * handle)272 static inline int ext4_handle_valid(handle_t *handle)
273 {
274 	if ((unsigned long)handle < EXT4_NOJOURNAL_MAX_REF_COUNT)
275 		return 0;
276 	return 1;
277 }
278 
ext4_handle_sync(handle_t * handle)279 static inline void ext4_handle_sync(handle_t *handle)
280 {
281 	if (ext4_handle_valid(handle))
282 		handle->h_sync = 1;
283 }
284 
ext4_handle_is_aborted(handle_t * handle)285 static inline int ext4_handle_is_aborted(handle_t *handle)
286 {
287 	if (ext4_handle_valid(handle))
288 		return is_handle_aborted(handle);
289 	return 0;
290 }
291 
ext4_handle_has_enough_credits(handle_t * handle,int needed)292 static inline int ext4_handle_has_enough_credits(handle_t *handle, int needed)
293 {
294 	if (ext4_handle_valid(handle) && handle->h_buffer_credits < needed)
295 		return 0;
296 	return 1;
297 }
298 
299 #define ext4_journal_start_sb(sb, type, nblocks)			\
300 	__ext4_journal_start_sb((sb), __LINE__, (type), (nblocks), 0)
301 
302 #define ext4_journal_start(inode, type, nblocks)			\
303 	__ext4_journal_start((inode), __LINE__, (type), (nblocks), 0)
304 
305 #define ext4_journal_start_with_reserve(inode, type, blocks, rsv_blocks) \
306 	__ext4_journal_start((inode), __LINE__, (type), (blocks), (rsv_blocks))
307 
__ext4_journal_start(struct inode * inode,unsigned int line,int type,int blocks,int rsv_blocks)308 static inline handle_t *__ext4_journal_start(struct inode *inode,
309 					     unsigned int line, int type,
310 					     int blocks, int rsv_blocks)
311 {
312 	return __ext4_journal_start_sb(inode->i_sb, line, type, blocks,
313 				       rsv_blocks);
314 }
315 
316 #define ext4_journal_stop(handle) \
317 	__ext4_journal_stop(__func__, __LINE__, (handle))
318 
319 #define ext4_journal_start_reserved(handle, type) \
320 	__ext4_journal_start_reserved((handle), __LINE__, (type))
321 
322 handle_t *__ext4_journal_start_reserved(handle_t *handle, unsigned int line,
323 					int type);
324 
ext4_journal_free_reserved(handle_t * handle)325 static inline void ext4_journal_free_reserved(handle_t *handle)
326 {
327 	if (ext4_handle_valid(handle))
328 		jbd2_journal_free_reserved(handle);
329 }
330 
ext4_journal_current_handle(void)331 static inline handle_t *ext4_journal_current_handle(void)
332 {
333 	return journal_current_handle();
334 }
335 
ext4_journal_extend(handle_t * handle,int nblocks)336 static inline int ext4_journal_extend(handle_t *handle, int nblocks)
337 {
338 	if (ext4_handle_valid(handle))
339 		return jbd2_journal_extend(handle, nblocks);
340 	return 0;
341 }
342 
ext4_journal_restart(handle_t * handle,int nblocks)343 static inline int ext4_journal_restart(handle_t *handle, int nblocks)
344 {
345 	if (ext4_handle_valid(handle))
346 		return jbd2_journal_restart(handle, nblocks);
347 	return 0;
348 }
349 
ext4_journal_blocks_per_page(struct inode * inode)350 static inline int ext4_journal_blocks_per_page(struct inode *inode)
351 {
352 	if (EXT4_JOURNAL(inode) != NULL)
353 		return jbd2_journal_blocks_per_page(inode);
354 	return 0;
355 }
356 
ext4_journal_force_commit(journal_t * journal)357 static inline int ext4_journal_force_commit(journal_t *journal)
358 {
359 	if (journal)
360 		return jbd2_journal_force_commit(journal);
361 	return 0;
362 }
363 
ext4_jbd2_file_inode(handle_t * handle,struct inode * inode)364 static inline int ext4_jbd2_file_inode(handle_t *handle, struct inode *inode)
365 {
366 	if (ext4_handle_valid(handle))
367 		return jbd2_journal_file_inode(handle, EXT4_I(inode)->jinode);
368 	return 0;
369 }
370 
ext4_update_inode_fsync_trans(handle_t * handle,struct inode * inode,int datasync)371 static inline void ext4_update_inode_fsync_trans(handle_t *handle,
372 						 struct inode *inode,
373 						 int datasync)
374 {
375 	struct ext4_inode_info *ei = EXT4_I(inode);
376 
377 	if (ext4_handle_valid(handle)) {
378 		ei->i_sync_tid = handle->h_transaction->t_tid;
379 		if (datasync)
380 			ei->i_datasync_tid = handle->h_transaction->t_tid;
381 	}
382 }
383 
384 /* super.c */
385 int ext4_force_commit(struct super_block *sb);
386 
387 /*
388  * Ext4 inode journal modes
389  */
390 #define EXT4_INODE_JOURNAL_DATA_MODE	0x01 /* journal data mode */
391 #define EXT4_INODE_ORDERED_DATA_MODE	0x02 /* ordered data mode */
392 #define EXT4_INODE_WRITEBACK_DATA_MODE	0x04 /* writeback data mode */
393 
ext4_inode_journal_mode(struct inode * inode)394 static inline int ext4_inode_journal_mode(struct inode *inode)
395 {
396 	if (EXT4_JOURNAL(inode) == NULL)
397 		return EXT4_INODE_WRITEBACK_DATA_MODE;	/* writeback */
398 	/* We do not support data journalling with delayed allocation */
399 	if (!S_ISREG(inode->i_mode) ||
400 	    test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
401 		return EXT4_INODE_JOURNAL_DATA_MODE;	/* journal data */
402 	if (ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA) &&
403 	    !test_opt(inode->i_sb, DELALLOC))
404 		return EXT4_INODE_JOURNAL_DATA_MODE;	/* journal data */
405 	if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
406 		return EXT4_INODE_ORDERED_DATA_MODE;	/* ordered */
407 	if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
408 		return EXT4_INODE_WRITEBACK_DATA_MODE;	/* writeback */
409 	else
410 		BUG();
411 }
412 
ext4_should_journal_data(struct inode * inode)413 static inline int ext4_should_journal_data(struct inode *inode)
414 {
415 	return ext4_inode_journal_mode(inode) & EXT4_INODE_JOURNAL_DATA_MODE;
416 }
417 
ext4_should_order_data(struct inode * inode)418 static inline int ext4_should_order_data(struct inode *inode)
419 {
420 	return ext4_inode_journal_mode(inode) & EXT4_INODE_ORDERED_DATA_MODE;
421 }
422 
ext4_should_writeback_data(struct inode * inode)423 static inline int ext4_should_writeback_data(struct inode *inode)
424 {
425 	return ext4_inode_journal_mode(inode) & EXT4_INODE_WRITEBACK_DATA_MODE;
426 }
427 
428 /*
429  * This function controls whether or not we should try to go down the
430  * dioread_nolock code paths, which makes it safe to avoid taking
431  * i_mutex for direct I/O reads.  This only works for extent-based
432  * files, and it doesn't work if data journaling is enabled, since the
433  * dioread_nolock code uses b_private to pass information back to the
434  * I/O completion handler, and this conflicts with the jbd's use of
435  * b_private.
436  */
ext4_should_dioread_nolock(struct inode * inode)437 static inline int ext4_should_dioread_nolock(struct inode *inode)
438 {
439 	if (!test_opt(inode->i_sb, DIOREAD_NOLOCK))
440 		return 0;
441 	if (!S_ISREG(inode->i_mode))
442 		return 0;
443 	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
444 		return 0;
445 	if (ext4_should_journal_data(inode))
446 		return 0;
447 	return 1;
448 }
449 
450 #endif	/* _EXT4_JBD2_H */
451