xref: /OK3568_Linux_fs/kernel/fs/ext4/ext4_jbd2.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0+
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * ext4_jbd2.h
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Written by Stephen C. Tweedie <sct@redhat.com>, 1999
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Copyright 1998--1999 Red Hat corp --- All Rights Reserved
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * Ext4-specific journaling extensions.
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #ifndef _EXT4_JBD2_H
13*4882a593Smuzhiyun #define _EXT4_JBD2_H
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #include <linux/fs.h>
16*4882a593Smuzhiyun #include <linux/jbd2.h>
17*4882a593Smuzhiyun #include "ext4.h"
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #define EXT4_JOURNAL(inode)	(EXT4_SB((inode)->i_sb)->s_journal)
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun /* Define the number of blocks we need to account to a transaction to
22*4882a593Smuzhiyun  * modify one block of data.
23*4882a593Smuzhiyun  *
24*4882a593Smuzhiyun  * We may have to touch one inode, one bitmap buffer, up to three
25*4882a593Smuzhiyun  * indirection blocks, the group and superblock summaries, and the data
26*4882a593Smuzhiyun  * block to complete the transaction.
27*4882a593Smuzhiyun  *
28*4882a593Smuzhiyun  * For extents-enabled fs we may have to allocate and modify up to
29*4882a593Smuzhiyun  * 5 levels of tree, data block (for each of these we need bitmap + group
30*4882a593Smuzhiyun  * summaries), root which is stored in the inode, sb
31*4882a593Smuzhiyun  */
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun #define EXT4_SINGLEDATA_TRANS_BLOCKS(sb)				\
34*4882a593Smuzhiyun 	(ext4_has_feature_extents(sb) ? 20U : 8U)
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun /* Extended attribute operations touch at most two data buffers,
37*4882a593Smuzhiyun  * two bitmap buffers, and two group summaries, in addition to the inode
38*4882a593Smuzhiyun  * and the superblock, which are already accounted for. */
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun #define EXT4_XATTR_TRANS_BLOCKS		6U
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun /* Define the minimum size for a transaction which modifies data.  This
43*4882a593Smuzhiyun  * needs to take into account the fact that we may end up modifying two
44*4882a593Smuzhiyun  * quota files too (one for the group, one for the user quota).  The
45*4882a593Smuzhiyun  * superblock only gets updated once, of course, so don't bother
46*4882a593Smuzhiyun  * counting that again for the quota updates. */
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun #define EXT4_DATA_TRANS_BLOCKS(sb)	(EXT4_SINGLEDATA_TRANS_BLOCKS(sb) + \
49*4882a593Smuzhiyun 					 EXT4_XATTR_TRANS_BLOCKS - 2 + \
50*4882a593Smuzhiyun 					 EXT4_MAXQUOTAS_TRANS_BLOCKS(sb))
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun /*
53*4882a593Smuzhiyun  * Define the number of metadata blocks we need to account to modify data.
54*4882a593Smuzhiyun  *
55*4882a593Smuzhiyun  * This include super block, inode block, quota blocks and xattr blocks
56*4882a593Smuzhiyun  */
57*4882a593Smuzhiyun #define EXT4_META_TRANS_BLOCKS(sb)	(EXT4_XATTR_TRANS_BLOCKS + \
58*4882a593Smuzhiyun 					EXT4_MAXQUOTAS_TRANS_BLOCKS(sb))
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun /* Define an arbitrary limit for the amount of data we will anticipate
61*4882a593Smuzhiyun  * writing to any given transaction.  For unbounded transactions such as
62*4882a593Smuzhiyun  * write(2) and truncate(2) we can write more than this, but we always
63*4882a593Smuzhiyun  * start off at the maximum transaction size and grow the transaction
64*4882a593Smuzhiyun  * optimistically as we go. */
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun #define EXT4_MAX_TRANS_DATA		64U
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun /* We break up a large truncate or write transaction once the handle's
69*4882a593Smuzhiyun  * buffer credits gets this low, we need either to extend the
70*4882a593Smuzhiyun  * transaction or to start a new one.  Reserve enough space here for
71*4882a593Smuzhiyun  * inode, bitmap, superblock, group and indirection updates for at least
72*4882a593Smuzhiyun  * one block, plus two quota updates.  Quota allocations are not
73*4882a593Smuzhiyun  * needed. */
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun #define EXT4_RESERVE_TRANS_BLOCKS	12U
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun /*
78*4882a593Smuzhiyun  * Number of credits needed if we need to insert an entry into a
79*4882a593Smuzhiyun  * directory.  For each new index block, we need 4 blocks (old index
80*4882a593Smuzhiyun  * block, new index block, bitmap block, bg summary).  For normal
81*4882a593Smuzhiyun  * htree directories there are 2 levels; if the largedir feature
82*4882a593Smuzhiyun  * enabled it's 3 levels.
83*4882a593Smuzhiyun  */
84*4882a593Smuzhiyun #define EXT4_INDEX_EXTRA_TRANS_BLOCKS	12U
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun #ifdef CONFIG_QUOTA
87*4882a593Smuzhiyun /* Amount of blocks needed for quota update - we know that the structure was
88*4882a593Smuzhiyun  * allocated so we need to update only data block */
89*4882a593Smuzhiyun #define EXT4_QUOTA_TRANS_BLOCKS(sb) ((test_opt(sb, QUOTA) ||\
90*4882a593Smuzhiyun 		ext4_has_feature_quota(sb)) ? 1 : 0)
91*4882a593Smuzhiyun /* Amount of blocks needed for quota insert/delete - we do some block writes
92*4882a593Smuzhiyun  * but inode, sb and group updates are done only once */
93*4882a593Smuzhiyun #define EXT4_QUOTA_INIT_BLOCKS(sb) ((test_opt(sb, QUOTA) ||\
94*4882a593Smuzhiyun 		ext4_has_feature_quota(sb)) ?\
95*4882a593Smuzhiyun 		(DQUOT_INIT_ALLOC*(EXT4_SINGLEDATA_TRANS_BLOCKS(sb)-3)\
96*4882a593Smuzhiyun 		 +3+DQUOT_INIT_REWRITE) : 0)
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun #define EXT4_QUOTA_DEL_BLOCKS(sb) ((test_opt(sb, QUOTA) ||\
99*4882a593Smuzhiyun 		ext4_has_feature_quota(sb)) ?\
100*4882a593Smuzhiyun 		(DQUOT_DEL_ALLOC*(EXT4_SINGLEDATA_TRANS_BLOCKS(sb)-3)\
101*4882a593Smuzhiyun 		 +3+DQUOT_DEL_REWRITE) : 0)
102*4882a593Smuzhiyun #else
103*4882a593Smuzhiyun #define EXT4_QUOTA_TRANS_BLOCKS(sb) 0
104*4882a593Smuzhiyun #define EXT4_QUOTA_INIT_BLOCKS(sb) 0
105*4882a593Smuzhiyun #define EXT4_QUOTA_DEL_BLOCKS(sb) 0
106*4882a593Smuzhiyun #endif
107*4882a593Smuzhiyun #define EXT4_MAXQUOTAS_TRANS_BLOCKS(sb) (EXT4_MAXQUOTAS*EXT4_QUOTA_TRANS_BLOCKS(sb))
108*4882a593Smuzhiyun #define EXT4_MAXQUOTAS_INIT_BLOCKS(sb) (EXT4_MAXQUOTAS*EXT4_QUOTA_INIT_BLOCKS(sb))
109*4882a593Smuzhiyun #define EXT4_MAXQUOTAS_DEL_BLOCKS(sb) (EXT4_MAXQUOTAS*EXT4_QUOTA_DEL_BLOCKS(sb))
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun /*
112*4882a593Smuzhiyun  * Ext4 handle operation types -- for logging purposes
113*4882a593Smuzhiyun  */
114*4882a593Smuzhiyun #define EXT4_HT_MISC             0
115*4882a593Smuzhiyun #define EXT4_HT_INODE            1
116*4882a593Smuzhiyun #define EXT4_HT_WRITE_PAGE       2
117*4882a593Smuzhiyun #define EXT4_HT_MAP_BLOCKS       3
118*4882a593Smuzhiyun #define EXT4_HT_DIR              4
119*4882a593Smuzhiyun #define EXT4_HT_TRUNCATE         5
120*4882a593Smuzhiyun #define EXT4_HT_QUOTA            6
121*4882a593Smuzhiyun #define EXT4_HT_RESIZE           7
122*4882a593Smuzhiyun #define EXT4_HT_MIGRATE          8
123*4882a593Smuzhiyun #define EXT4_HT_MOVE_EXTENTS     9
124*4882a593Smuzhiyun #define EXT4_HT_XATTR           10
125*4882a593Smuzhiyun #define EXT4_HT_EXT_CONVERT     11
126*4882a593Smuzhiyun #define EXT4_HT_MAX             12
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun /**
129*4882a593Smuzhiyun  *   struct ext4_journal_cb_entry - Base structure for callback information.
130*4882a593Smuzhiyun  *
131*4882a593Smuzhiyun  *   This struct is a 'seed' structure for a using with your own callback
132*4882a593Smuzhiyun  *   structs. If you are using callbacks you must allocate one of these
133*4882a593Smuzhiyun  *   or another struct of your own definition which has this struct
134*4882a593Smuzhiyun  *   as it's first element and pass it to ext4_journal_callback_add().
135*4882a593Smuzhiyun  */
136*4882a593Smuzhiyun struct ext4_journal_cb_entry {
137*4882a593Smuzhiyun 	/* list information for other callbacks attached to the same handle */
138*4882a593Smuzhiyun 	struct list_head jce_list;
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	/*  Function to call with this callback structure */
141*4882a593Smuzhiyun 	void (*jce_func)(struct super_block *sb,
142*4882a593Smuzhiyun 			 struct ext4_journal_cb_entry *jce, int error);
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	/* user data goes here */
145*4882a593Smuzhiyun };
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun /**
148*4882a593Smuzhiyun  * ext4_journal_callback_add: add a function to call after transaction commit
149*4882a593Smuzhiyun  * @handle: active journal transaction handle to register callback on
150*4882a593Smuzhiyun  * @func: callback function to call after the transaction has committed:
151*4882a593Smuzhiyun  *        @sb: superblock of current filesystem for transaction
152*4882a593Smuzhiyun  *        @jce: returned journal callback data
153*4882a593Smuzhiyun  *        @rc: journal state at commit (0 = transaction committed properly)
154*4882a593Smuzhiyun  * @jce: journal callback data (internal and function private data struct)
155*4882a593Smuzhiyun  *
156*4882a593Smuzhiyun  * The registered function will be called in the context of the journal thread
157*4882a593Smuzhiyun  * after the transaction for which the handle was created has completed.
158*4882a593Smuzhiyun  *
159*4882a593Smuzhiyun  * No locks are held when the callback function is called, so it is safe to
160*4882a593Smuzhiyun  * call blocking functions from within the callback, but the callback should
161*4882a593Smuzhiyun  * not block or run for too long, or the filesystem will be blocked waiting for
162*4882a593Smuzhiyun  * the next transaction to commit. No journaling functions can be used, or
163*4882a593Smuzhiyun  * there is a risk of deadlock.
164*4882a593Smuzhiyun  *
165*4882a593Smuzhiyun  * There is no guaranteed calling order of multiple registered callbacks on
166*4882a593Smuzhiyun  * the same transaction.
167*4882a593Smuzhiyun  */
_ext4_journal_callback_add(handle_t * handle,struct ext4_journal_cb_entry * jce)168*4882a593Smuzhiyun static inline void _ext4_journal_callback_add(handle_t *handle,
169*4882a593Smuzhiyun 			struct ext4_journal_cb_entry *jce)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun 	/* Add the jce to transaction's private list */
172*4882a593Smuzhiyun 	list_add_tail(&jce->jce_list, &handle->h_transaction->t_private_list);
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun 
ext4_journal_callback_add(handle_t * handle,void (* func)(struct super_block * sb,struct ext4_journal_cb_entry * jce,int rc),struct ext4_journal_cb_entry * jce)175*4882a593Smuzhiyun static inline void ext4_journal_callback_add(handle_t *handle,
176*4882a593Smuzhiyun 			void (*func)(struct super_block *sb,
177*4882a593Smuzhiyun 				     struct ext4_journal_cb_entry *jce,
178*4882a593Smuzhiyun 				     int rc),
179*4882a593Smuzhiyun 			struct ext4_journal_cb_entry *jce)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun 	struct ext4_sb_info *sbi =
182*4882a593Smuzhiyun 			EXT4_SB(handle->h_transaction->t_journal->j_private);
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	/* Add the jce to transaction's private list */
185*4882a593Smuzhiyun 	jce->jce_func = func;
186*4882a593Smuzhiyun 	spin_lock(&sbi->s_md_lock);
187*4882a593Smuzhiyun 	_ext4_journal_callback_add(handle, jce);
188*4882a593Smuzhiyun 	spin_unlock(&sbi->s_md_lock);
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun /**
193*4882a593Smuzhiyun  * ext4_journal_callback_del: delete a registered callback
194*4882a593Smuzhiyun  * @handle: active journal transaction handle on which callback was registered
195*4882a593Smuzhiyun  * @jce: registered journal callback entry to unregister
196*4882a593Smuzhiyun  * Return true if object was successfully removed
197*4882a593Smuzhiyun  */
ext4_journal_callback_try_del(handle_t * handle,struct ext4_journal_cb_entry * jce)198*4882a593Smuzhiyun static inline bool ext4_journal_callback_try_del(handle_t *handle,
199*4882a593Smuzhiyun 					     struct ext4_journal_cb_entry *jce)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun 	bool deleted;
202*4882a593Smuzhiyun 	struct ext4_sb_info *sbi =
203*4882a593Smuzhiyun 			EXT4_SB(handle->h_transaction->t_journal->j_private);
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	spin_lock(&sbi->s_md_lock);
206*4882a593Smuzhiyun 	deleted = !list_empty(&jce->jce_list);
207*4882a593Smuzhiyun 	list_del_init(&jce->jce_list);
208*4882a593Smuzhiyun 	spin_unlock(&sbi->s_md_lock);
209*4882a593Smuzhiyun 	return deleted;
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun int
213*4882a593Smuzhiyun ext4_mark_iloc_dirty(handle_t *handle,
214*4882a593Smuzhiyun 		     struct inode *inode,
215*4882a593Smuzhiyun 		     struct ext4_iloc *iloc);
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun /*
218*4882a593Smuzhiyun  * On success, We end up with an outstanding reference count against
219*4882a593Smuzhiyun  * iloc->bh.  This _must_ be cleaned up later.
220*4882a593Smuzhiyun  */
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun int ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
223*4882a593Smuzhiyun 			struct ext4_iloc *iloc);
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun #define ext4_mark_inode_dirty(__h, __i)					\
226*4882a593Smuzhiyun 		__ext4_mark_inode_dirty((__h), (__i), __func__, __LINE__)
227*4882a593Smuzhiyun int __ext4_mark_inode_dirty(handle_t *handle, struct inode *inode,
228*4882a593Smuzhiyun 				const char *func, unsigned int line);
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun int ext4_expand_extra_isize(struct inode *inode,
231*4882a593Smuzhiyun 			    unsigned int new_extra_isize,
232*4882a593Smuzhiyun 			    struct ext4_iloc *iloc);
233*4882a593Smuzhiyun /*
234*4882a593Smuzhiyun  * Wrapper functions with which ext4 calls into JBD.
235*4882a593Smuzhiyun  */
236*4882a593Smuzhiyun int __ext4_journal_get_write_access(const char *where, unsigned int line,
237*4882a593Smuzhiyun 				    handle_t *handle, struct buffer_head *bh);
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun int __ext4_forget(const char *where, unsigned int line, handle_t *handle,
240*4882a593Smuzhiyun 		  int is_metadata, struct inode *inode,
241*4882a593Smuzhiyun 		  struct buffer_head *bh, ext4_fsblk_t blocknr);
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun int __ext4_journal_get_create_access(const char *where, unsigned int line,
244*4882a593Smuzhiyun 				handle_t *handle, struct buffer_head *bh);
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
247*4882a593Smuzhiyun 				 handle_t *handle, struct inode *inode,
248*4882a593Smuzhiyun 				 struct buffer_head *bh);
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun int __ext4_handle_dirty_super(const char *where, unsigned int line,
251*4882a593Smuzhiyun 			      handle_t *handle, struct super_block *sb);
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun #define ext4_journal_get_write_access(handle, bh) \
254*4882a593Smuzhiyun 	__ext4_journal_get_write_access(__func__, __LINE__, (handle), (bh))
255*4882a593Smuzhiyun #define ext4_forget(handle, is_metadata, inode, bh, block_nr) \
256*4882a593Smuzhiyun 	__ext4_forget(__func__, __LINE__, (handle), (is_metadata), (inode), \
257*4882a593Smuzhiyun 		      (bh), (block_nr))
258*4882a593Smuzhiyun #define ext4_journal_get_create_access(handle, bh) \
259*4882a593Smuzhiyun 	__ext4_journal_get_create_access(__func__, __LINE__, (handle), (bh))
260*4882a593Smuzhiyun #define ext4_handle_dirty_metadata(handle, inode, bh) \
261*4882a593Smuzhiyun 	__ext4_handle_dirty_metadata(__func__, __LINE__, (handle), (inode), \
262*4882a593Smuzhiyun 				     (bh))
263*4882a593Smuzhiyun #define ext4_handle_dirty_super(handle, sb) \
264*4882a593Smuzhiyun 	__ext4_handle_dirty_super(__func__, __LINE__, (handle), (sb))
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun handle_t *__ext4_journal_start_sb(struct super_block *sb, unsigned int line,
267*4882a593Smuzhiyun 				  int type, int blocks, int rsv_blocks,
268*4882a593Smuzhiyun 				  int revoke_creds);
269*4882a593Smuzhiyun int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle);
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun #define EXT4_NOJOURNAL_MAX_REF_COUNT ((unsigned long) 4096)
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun /* Note:  Do not use this for NULL handles.  This is only to determine if
274*4882a593Smuzhiyun  * a properly allocated handle is using a journal or not. */
ext4_handle_valid(handle_t * handle)275*4882a593Smuzhiyun static inline int ext4_handle_valid(handle_t *handle)
276*4882a593Smuzhiyun {
277*4882a593Smuzhiyun 	if ((unsigned long)handle < EXT4_NOJOURNAL_MAX_REF_COUNT)
278*4882a593Smuzhiyun 		return 0;
279*4882a593Smuzhiyun 	return 1;
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun 
ext4_handle_sync(handle_t * handle)282*4882a593Smuzhiyun static inline void ext4_handle_sync(handle_t *handle)
283*4882a593Smuzhiyun {
284*4882a593Smuzhiyun 	if (ext4_handle_valid(handle))
285*4882a593Smuzhiyun 		handle->h_sync = 1;
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun 
ext4_handle_is_aborted(handle_t * handle)288*4882a593Smuzhiyun static inline int ext4_handle_is_aborted(handle_t *handle)
289*4882a593Smuzhiyun {
290*4882a593Smuzhiyun 	if (ext4_handle_valid(handle))
291*4882a593Smuzhiyun 		return is_handle_aborted(handle);
292*4882a593Smuzhiyun 	return 0;
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun 
ext4_free_metadata_revoke_credits(struct super_block * sb,int blocks)295*4882a593Smuzhiyun static inline int ext4_free_metadata_revoke_credits(struct super_block *sb,
296*4882a593Smuzhiyun 						    int blocks)
297*4882a593Smuzhiyun {
298*4882a593Smuzhiyun 	/* Freeing each metadata block can result in freeing one cluster */
299*4882a593Smuzhiyun 	return blocks * EXT4_SB(sb)->s_cluster_ratio;
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun 
ext4_trans_default_revoke_credits(struct super_block * sb)302*4882a593Smuzhiyun static inline int ext4_trans_default_revoke_credits(struct super_block *sb)
303*4882a593Smuzhiyun {
304*4882a593Smuzhiyun 	return ext4_free_metadata_revoke_credits(sb, 8);
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun #define ext4_journal_start_sb(sb, type, nblocks)			\
308*4882a593Smuzhiyun 	__ext4_journal_start_sb((sb), __LINE__, (type), (nblocks), 0,	\
309*4882a593Smuzhiyun 				ext4_trans_default_revoke_credits(sb))
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun #define ext4_journal_start(inode, type, nblocks)			\
312*4882a593Smuzhiyun 	__ext4_journal_start((inode), __LINE__, (type), (nblocks), 0,	\
313*4882a593Smuzhiyun 			     ext4_trans_default_revoke_credits((inode)->i_sb))
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun #define ext4_journal_start_with_reserve(inode, type, blocks, rsv_blocks)\
316*4882a593Smuzhiyun 	__ext4_journal_start((inode), __LINE__, (type), (blocks), (rsv_blocks),\
317*4882a593Smuzhiyun 			     ext4_trans_default_revoke_credits((inode)->i_sb))
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun #define ext4_journal_start_with_revoke(inode, type, blocks, revoke_creds) \
320*4882a593Smuzhiyun 	__ext4_journal_start((inode), __LINE__, (type), (blocks), 0,	\
321*4882a593Smuzhiyun 			     (revoke_creds))
322*4882a593Smuzhiyun 
__ext4_journal_start(struct inode * inode,unsigned int line,int type,int blocks,int rsv_blocks,int revoke_creds)323*4882a593Smuzhiyun static inline handle_t *__ext4_journal_start(struct inode *inode,
324*4882a593Smuzhiyun 					     unsigned int line, int type,
325*4882a593Smuzhiyun 					     int blocks, int rsv_blocks,
326*4882a593Smuzhiyun 					     int revoke_creds)
327*4882a593Smuzhiyun {
328*4882a593Smuzhiyun 	return __ext4_journal_start_sb(inode->i_sb, line, type, blocks,
329*4882a593Smuzhiyun 				       rsv_blocks, revoke_creds);
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun #define ext4_journal_stop(handle) \
333*4882a593Smuzhiyun 	__ext4_journal_stop(__func__, __LINE__, (handle))
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun #define ext4_journal_start_reserved(handle, type) \
336*4882a593Smuzhiyun 	__ext4_journal_start_reserved((handle), __LINE__, (type))
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun handle_t *__ext4_journal_start_reserved(handle_t *handle, unsigned int line,
339*4882a593Smuzhiyun 					int type);
340*4882a593Smuzhiyun 
ext4_journal_current_handle(void)341*4882a593Smuzhiyun static inline handle_t *ext4_journal_current_handle(void)
342*4882a593Smuzhiyun {
343*4882a593Smuzhiyun 	return journal_current_handle();
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun 
ext4_journal_extend(handle_t * handle,int nblocks,int revoke)346*4882a593Smuzhiyun static inline int ext4_journal_extend(handle_t *handle, int nblocks, int revoke)
347*4882a593Smuzhiyun {
348*4882a593Smuzhiyun 	if (ext4_handle_valid(handle))
349*4882a593Smuzhiyun 		return jbd2_journal_extend(handle, nblocks, revoke);
350*4882a593Smuzhiyun 	return 0;
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun 
ext4_journal_restart(handle_t * handle,int nblocks,int revoke)353*4882a593Smuzhiyun static inline int ext4_journal_restart(handle_t *handle, int nblocks,
354*4882a593Smuzhiyun 				       int revoke)
355*4882a593Smuzhiyun {
356*4882a593Smuzhiyun 	if (ext4_handle_valid(handle))
357*4882a593Smuzhiyun 		return jbd2__journal_restart(handle, nblocks, revoke, GFP_NOFS);
358*4882a593Smuzhiyun 	return 0;
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun int __ext4_journal_ensure_credits(handle_t *handle, int check_cred,
362*4882a593Smuzhiyun 				  int extend_cred, int revoke_cred);
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun /*
366*4882a593Smuzhiyun  * Ensure @handle has at least @check_creds credits available. If not,
367*4882a593Smuzhiyun  * transaction will be extended or restarted to contain at least @extend_cred
368*4882a593Smuzhiyun  * credits. Before restarting transaction @fn is executed to allow for cleanup
369*4882a593Smuzhiyun  * before the transaction is restarted.
370*4882a593Smuzhiyun  *
371*4882a593Smuzhiyun  * The return value is < 0 in case of error, 0 in case the handle has enough
372*4882a593Smuzhiyun  * credits or transaction extension succeeded, 1 in case transaction had to be
373*4882a593Smuzhiyun  * restarted.
374*4882a593Smuzhiyun  */
375*4882a593Smuzhiyun #define ext4_journal_ensure_credits_fn(handle, check_cred, extend_cred,	\
376*4882a593Smuzhiyun 				       revoke_cred, fn) \
377*4882a593Smuzhiyun ({									\
378*4882a593Smuzhiyun 	__label__ __ensure_end;						\
379*4882a593Smuzhiyun 	int err = __ext4_journal_ensure_credits((handle), (check_cred),	\
380*4882a593Smuzhiyun 					(extend_cred), (revoke_cred));	\
381*4882a593Smuzhiyun 									\
382*4882a593Smuzhiyun 	if (err <= 0)							\
383*4882a593Smuzhiyun 		goto __ensure_end;					\
384*4882a593Smuzhiyun 	err = (fn);							\
385*4882a593Smuzhiyun 	if (err < 0)							\
386*4882a593Smuzhiyun 		goto __ensure_end;					\
387*4882a593Smuzhiyun 	err = ext4_journal_restart((handle), (extend_cred), (revoke_cred)); \
388*4882a593Smuzhiyun 	if (err == 0)							\
389*4882a593Smuzhiyun 		err = 1;						\
390*4882a593Smuzhiyun __ensure_end:								\
391*4882a593Smuzhiyun 	err;								\
392*4882a593Smuzhiyun })
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun /*
395*4882a593Smuzhiyun  * Ensure given handle has at least requested amount of credits available,
396*4882a593Smuzhiyun  * possibly restarting transaction if needed. We also make sure the transaction
397*4882a593Smuzhiyun  * has space for at least ext4_trans_default_revoke_credits(sb) revoke records
398*4882a593Smuzhiyun  * as freeing one or two blocks is very common pattern and requesting this is
399*4882a593Smuzhiyun  * very cheap.
400*4882a593Smuzhiyun  */
ext4_journal_ensure_credits(handle_t * handle,int credits,int revoke_creds)401*4882a593Smuzhiyun static inline int ext4_journal_ensure_credits(handle_t *handle, int credits,
402*4882a593Smuzhiyun 					      int revoke_creds)
403*4882a593Smuzhiyun {
404*4882a593Smuzhiyun 	return ext4_journal_ensure_credits_fn(handle, credits, credits,
405*4882a593Smuzhiyun 				revoke_creds, 0);
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun 
ext4_journal_blocks_per_page(struct inode * inode)408*4882a593Smuzhiyun static inline int ext4_journal_blocks_per_page(struct inode *inode)
409*4882a593Smuzhiyun {
410*4882a593Smuzhiyun 	if (EXT4_JOURNAL(inode) != NULL)
411*4882a593Smuzhiyun 		return jbd2_journal_blocks_per_page(inode);
412*4882a593Smuzhiyun 	return 0;
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun 
ext4_journal_force_commit(journal_t * journal)415*4882a593Smuzhiyun static inline int ext4_journal_force_commit(journal_t *journal)
416*4882a593Smuzhiyun {
417*4882a593Smuzhiyun 	if (journal)
418*4882a593Smuzhiyun 		return jbd2_journal_force_commit(journal);
419*4882a593Smuzhiyun 	return 0;
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun 
ext4_jbd2_inode_add_write(handle_t * handle,struct inode * inode,loff_t start_byte,loff_t length)422*4882a593Smuzhiyun static inline int ext4_jbd2_inode_add_write(handle_t *handle,
423*4882a593Smuzhiyun 		struct inode *inode, loff_t start_byte, loff_t length)
424*4882a593Smuzhiyun {
425*4882a593Smuzhiyun 	if (ext4_handle_valid(handle))
426*4882a593Smuzhiyun 		return jbd2_journal_inode_ranged_write(handle,
427*4882a593Smuzhiyun 				EXT4_I(inode)->jinode, start_byte, length);
428*4882a593Smuzhiyun 	return 0;
429*4882a593Smuzhiyun }
430*4882a593Smuzhiyun 
ext4_jbd2_inode_add_wait(handle_t * handle,struct inode * inode,loff_t start_byte,loff_t length)431*4882a593Smuzhiyun static inline int ext4_jbd2_inode_add_wait(handle_t *handle,
432*4882a593Smuzhiyun 		struct inode *inode, loff_t start_byte, loff_t length)
433*4882a593Smuzhiyun {
434*4882a593Smuzhiyun 	if (ext4_handle_valid(handle))
435*4882a593Smuzhiyun 		return jbd2_journal_inode_ranged_wait(handle,
436*4882a593Smuzhiyun 				EXT4_I(inode)->jinode, start_byte, length);
437*4882a593Smuzhiyun 	return 0;
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun 
ext4_update_inode_fsync_trans(handle_t * handle,struct inode * inode,int datasync)440*4882a593Smuzhiyun static inline void ext4_update_inode_fsync_trans(handle_t *handle,
441*4882a593Smuzhiyun 						 struct inode *inode,
442*4882a593Smuzhiyun 						 int datasync)
443*4882a593Smuzhiyun {
444*4882a593Smuzhiyun 	struct ext4_inode_info *ei = EXT4_I(inode);
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 	if (ext4_handle_valid(handle) && !is_handle_aborted(handle)) {
447*4882a593Smuzhiyun 		ei->i_sync_tid = handle->h_transaction->t_tid;
448*4882a593Smuzhiyun 		if (datasync)
449*4882a593Smuzhiyun 			ei->i_datasync_tid = handle->h_transaction->t_tid;
450*4882a593Smuzhiyun 	}
451*4882a593Smuzhiyun }
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun /* super.c */
454*4882a593Smuzhiyun int ext4_force_commit(struct super_block *sb);
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun /*
457*4882a593Smuzhiyun  * Ext4 inode journal modes
458*4882a593Smuzhiyun  */
459*4882a593Smuzhiyun #define EXT4_INODE_JOURNAL_DATA_MODE	0x01 /* journal data mode */
460*4882a593Smuzhiyun #define EXT4_INODE_ORDERED_DATA_MODE	0x02 /* ordered data mode */
461*4882a593Smuzhiyun #define EXT4_INODE_WRITEBACK_DATA_MODE	0x04 /* writeback data mode */
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun int ext4_inode_journal_mode(struct inode *inode);
464*4882a593Smuzhiyun 
ext4_should_journal_data(struct inode * inode)465*4882a593Smuzhiyun static inline int ext4_should_journal_data(struct inode *inode)
466*4882a593Smuzhiyun {
467*4882a593Smuzhiyun 	return ext4_inode_journal_mode(inode) & EXT4_INODE_JOURNAL_DATA_MODE;
468*4882a593Smuzhiyun }
469*4882a593Smuzhiyun 
ext4_should_order_data(struct inode * inode)470*4882a593Smuzhiyun static inline int ext4_should_order_data(struct inode *inode)
471*4882a593Smuzhiyun {
472*4882a593Smuzhiyun 	return ext4_inode_journal_mode(inode) & EXT4_INODE_ORDERED_DATA_MODE;
473*4882a593Smuzhiyun }
474*4882a593Smuzhiyun 
ext4_should_writeback_data(struct inode * inode)475*4882a593Smuzhiyun static inline int ext4_should_writeback_data(struct inode *inode)
476*4882a593Smuzhiyun {
477*4882a593Smuzhiyun 	return ext4_inode_journal_mode(inode) & EXT4_INODE_WRITEBACK_DATA_MODE;
478*4882a593Smuzhiyun }
479*4882a593Smuzhiyun 
ext4_free_data_revoke_credits(struct inode * inode,int blocks)480*4882a593Smuzhiyun static inline int ext4_free_data_revoke_credits(struct inode *inode, int blocks)
481*4882a593Smuzhiyun {
482*4882a593Smuzhiyun 	if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
483*4882a593Smuzhiyun 		return 0;
484*4882a593Smuzhiyun 	if (!ext4_should_journal_data(inode))
485*4882a593Smuzhiyun 		return 0;
486*4882a593Smuzhiyun 	/*
487*4882a593Smuzhiyun 	 * Data blocks in one extent are contiguous, just account for partial
488*4882a593Smuzhiyun 	 * clusters at extent boundaries
489*4882a593Smuzhiyun 	 */
490*4882a593Smuzhiyun 	return blocks + 2*(EXT4_SB(inode->i_sb)->s_cluster_ratio - 1);
491*4882a593Smuzhiyun }
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun /*
494*4882a593Smuzhiyun  * This function controls whether or not we should try to go down the
495*4882a593Smuzhiyun  * dioread_nolock code paths, which makes it safe to avoid taking
496*4882a593Smuzhiyun  * i_mutex for direct I/O reads.  This only works for extent-based
497*4882a593Smuzhiyun  * files, and it doesn't work if data journaling is enabled, since the
498*4882a593Smuzhiyun  * dioread_nolock code uses b_private to pass information back to the
499*4882a593Smuzhiyun  * I/O completion handler, and this conflicts with the jbd's use of
500*4882a593Smuzhiyun  * b_private.
501*4882a593Smuzhiyun  */
ext4_should_dioread_nolock(struct inode * inode)502*4882a593Smuzhiyun static inline int ext4_should_dioread_nolock(struct inode *inode)
503*4882a593Smuzhiyun {
504*4882a593Smuzhiyun 	if (!test_opt(inode->i_sb, DIOREAD_NOLOCK))
505*4882a593Smuzhiyun 		return 0;
506*4882a593Smuzhiyun 	if (!S_ISREG(inode->i_mode))
507*4882a593Smuzhiyun 		return 0;
508*4882a593Smuzhiyun 	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
509*4882a593Smuzhiyun 		return 0;
510*4882a593Smuzhiyun 	if (ext4_should_journal_data(inode))
511*4882a593Smuzhiyun 		return 0;
512*4882a593Smuzhiyun 	/* temporary fix to prevent generic/422 test failures */
513*4882a593Smuzhiyun 	if (!test_opt(inode->i_sb, DELALLOC))
514*4882a593Smuzhiyun 		return 0;
515*4882a593Smuzhiyun 	return 1;
516*4882a593Smuzhiyun }
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun #endif	/* _EXT4_JBD2_H */
519