1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Write ahead logging implementation copyright Chris Mason 2000
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * The background commits make this code very interrelated, and
6*4882a593Smuzhiyun * overly complex. I need to rethink things a bit....The major players:
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * journal_begin -- call with the number of blocks you expect to log.
9*4882a593Smuzhiyun * If the current transaction is too
10*4882a593Smuzhiyun * old, it will block until the current transaction is
11*4882a593Smuzhiyun * finished, and then start a new one.
12*4882a593Smuzhiyun * Usually, your transaction will get joined in with
13*4882a593Smuzhiyun * previous ones for speed.
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun * journal_join -- same as journal_begin, but won't block on the current
16*4882a593Smuzhiyun * transaction regardless of age. Don't ever call
17*4882a593Smuzhiyun * this. Ever. There are only two places it should be
18*4882a593Smuzhiyun * called from, and they are both inside this file.
19*4882a593Smuzhiyun *
20*4882a593Smuzhiyun * journal_mark_dirty -- adds blocks into this transaction. clears any flags
21*4882a593Smuzhiyun * that might make them get sent to disk
22*4882a593Smuzhiyun * and then marks them BH_JDirty. Puts the buffer head
23*4882a593Smuzhiyun * into the current transaction hash.
24*4882a593Smuzhiyun *
25*4882a593Smuzhiyun * journal_end -- if the current transaction is batchable, it does nothing
26*4882a593Smuzhiyun * otherwise, it could do an async/synchronous commit, or
27*4882a593Smuzhiyun * a full flush of all log and real blocks in the
28*4882a593Smuzhiyun * transaction.
29*4882a593Smuzhiyun *
30*4882a593Smuzhiyun * flush_old_commits -- if the current transaction is too old, it is ended and
31*4882a593Smuzhiyun * commit blocks are sent to disk. Forces commit blocks
32*4882a593Smuzhiyun * to disk for all backgrounded commits that have been
33*4882a593Smuzhiyun * around too long.
34*4882a593Smuzhiyun * -- Note, if you call this as an immediate flush from
35*4882a593Smuzhiyun * within kupdate, it will ignore the immediate flag
36*4882a593Smuzhiyun */
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun #include <linux/time.h>
39*4882a593Smuzhiyun #include <linux/semaphore.h>
40*4882a593Smuzhiyun #include <linux/vmalloc.h>
41*4882a593Smuzhiyun #include "reiserfs.h"
42*4882a593Smuzhiyun #include <linux/kernel.h>
43*4882a593Smuzhiyun #include <linux/errno.h>
44*4882a593Smuzhiyun #include <linux/fcntl.h>
45*4882a593Smuzhiyun #include <linux/stat.h>
46*4882a593Smuzhiyun #include <linux/string.h>
47*4882a593Smuzhiyun #include <linux/buffer_head.h>
48*4882a593Smuzhiyun #include <linux/workqueue.h>
49*4882a593Smuzhiyun #include <linux/writeback.h>
50*4882a593Smuzhiyun #include <linux/blkdev.h>
51*4882a593Smuzhiyun #include <linux/backing-dev.h>
52*4882a593Smuzhiyun #include <linux/uaccess.h>
53*4882a593Smuzhiyun #include <linux/slab.h>
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun /* gets a struct reiserfs_journal_list * from a list head */
57*4882a593Smuzhiyun #define JOURNAL_LIST_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \
58*4882a593Smuzhiyun j_list))
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun /* must be correct to keep the desc and commit structs at 4k */
61*4882a593Smuzhiyun #define JOURNAL_TRANS_HALF 1018
62*4882a593Smuzhiyun #define BUFNR 64 /*read ahead */
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun /* cnode stat bits. Move these into reiserfs_fs.h */
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun /* this block was freed, and can't be written. */
67*4882a593Smuzhiyun #define BLOCK_FREED 2
68*4882a593Smuzhiyun /* this block was freed during this transaction, and can't be written */
69*4882a593Smuzhiyun #define BLOCK_FREED_HOLDER 3
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun /* used in flush_journal_list */
72*4882a593Smuzhiyun #define BLOCK_NEEDS_FLUSH 4
73*4882a593Smuzhiyun #define BLOCK_DIRTIED 5
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun /* journal list state bits */
76*4882a593Smuzhiyun #define LIST_TOUCHED 1
77*4882a593Smuzhiyun #define LIST_DIRTY 2
78*4882a593Smuzhiyun #define LIST_COMMIT_PENDING 4 /* someone will commit this list */
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun /* flags for do_journal_end */
81*4882a593Smuzhiyun #define FLUSH_ALL 1 /* flush commit and real blocks */
82*4882a593Smuzhiyun #define COMMIT_NOW 2 /* end and commit this transaction */
83*4882a593Smuzhiyun #define WAIT 4 /* wait for the log blocks to hit the disk */
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun static int do_journal_end(struct reiserfs_transaction_handle *, int flags);
86*4882a593Smuzhiyun static int flush_journal_list(struct super_block *s,
87*4882a593Smuzhiyun struct reiserfs_journal_list *jl, int flushall);
88*4882a593Smuzhiyun static int flush_commit_list(struct super_block *s,
89*4882a593Smuzhiyun struct reiserfs_journal_list *jl, int flushall);
90*4882a593Smuzhiyun static int can_dirty(struct reiserfs_journal_cnode *cn);
91*4882a593Smuzhiyun static int journal_join(struct reiserfs_transaction_handle *th,
92*4882a593Smuzhiyun struct super_block *sb);
93*4882a593Smuzhiyun static void release_journal_dev(struct super_block *super,
94*4882a593Smuzhiyun struct reiserfs_journal *journal);
95*4882a593Smuzhiyun static void dirty_one_transaction(struct super_block *s,
96*4882a593Smuzhiyun struct reiserfs_journal_list *jl);
97*4882a593Smuzhiyun static void flush_async_commits(struct work_struct *work);
98*4882a593Smuzhiyun static void queue_log_writer(struct super_block *s);
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun /* values for join in do_journal_begin_r */
101*4882a593Smuzhiyun enum {
102*4882a593Smuzhiyun JBEGIN_REG = 0, /* regular journal begin */
103*4882a593Smuzhiyun /* join the running transaction if at all possible */
104*4882a593Smuzhiyun JBEGIN_JOIN = 1,
105*4882a593Smuzhiyun /* called from cleanup code, ignores aborted flag */
106*4882a593Smuzhiyun JBEGIN_ABORT = 2,
107*4882a593Smuzhiyun };
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
110*4882a593Smuzhiyun struct super_block *sb,
111*4882a593Smuzhiyun unsigned long nblocks, int join);
112*4882a593Smuzhiyun
init_journal_hash(struct super_block * sb)113*4882a593Smuzhiyun static void init_journal_hash(struct super_block *sb)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun struct reiserfs_journal *journal = SB_JOURNAL(sb);
116*4882a593Smuzhiyun memset(journal->j_hash_table, 0,
117*4882a593Smuzhiyun JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *));
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun /*
121*4882a593Smuzhiyun * clears BH_Dirty and sticks the buffer on the clean list. Called because
122*4882a593Smuzhiyun * I can't allow refile_buffer to make schedule happen after I've freed a
123*4882a593Smuzhiyun * block. Look at remove_from_transaction and journal_mark_freed for
124*4882a593Smuzhiyun * more details.
125*4882a593Smuzhiyun */
reiserfs_clean_and_file_buffer(struct buffer_head * bh)126*4882a593Smuzhiyun static int reiserfs_clean_and_file_buffer(struct buffer_head *bh)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun if (bh) {
129*4882a593Smuzhiyun clear_buffer_dirty(bh);
130*4882a593Smuzhiyun clear_buffer_journal_test(bh);
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun return 0;
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun
allocate_bitmap_node(struct super_block * sb)135*4882a593Smuzhiyun static struct reiserfs_bitmap_node *allocate_bitmap_node(struct super_block
136*4882a593Smuzhiyun *sb)
137*4882a593Smuzhiyun {
138*4882a593Smuzhiyun struct reiserfs_bitmap_node *bn;
139*4882a593Smuzhiyun static int id;
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun bn = kmalloc(sizeof(struct reiserfs_bitmap_node), GFP_NOFS);
142*4882a593Smuzhiyun if (!bn) {
143*4882a593Smuzhiyun return NULL;
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun bn->data = kzalloc(sb->s_blocksize, GFP_NOFS);
146*4882a593Smuzhiyun if (!bn->data) {
147*4882a593Smuzhiyun kfree(bn);
148*4882a593Smuzhiyun return NULL;
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun bn->id = id++;
151*4882a593Smuzhiyun INIT_LIST_HEAD(&bn->list);
152*4882a593Smuzhiyun return bn;
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun
get_bitmap_node(struct super_block * sb)155*4882a593Smuzhiyun static struct reiserfs_bitmap_node *get_bitmap_node(struct super_block *sb)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun struct reiserfs_journal *journal = SB_JOURNAL(sb);
158*4882a593Smuzhiyun struct reiserfs_bitmap_node *bn = NULL;
159*4882a593Smuzhiyun struct list_head *entry = journal->j_bitmap_nodes.next;
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun journal->j_used_bitmap_nodes++;
162*4882a593Smuzhiyun repeat:
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun if (entry != &journal->j_bitmap_nodes) {
165*4882a593Smuzhiyun bn = list_entry(entry, struct reiserfs_bitmap_node, list);
166*4882a593Smuzhiyun list_del(entry);
167*4882a593Smuzhiyun memset(bn->data, 0, sb->s_blocksize);
168*4882a593Smuzhiyun journal->j_free_bitmap_nodes--;
169*4882a593Smuzhiyun return bn;
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun bn = allocate_bitmap_node(sb);
172*4882a593Smuzhiyun if (!bn) {
173*4882a593Smuzhiyun yield();
174*4882a593Smuzhiyun goto repeat;
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun return bn;
177*4882a593Smuzhiyun }
free_bitmap_node(struct super_block * sb,struct reiserfs_bitmap_node * bn)178*4882a593Smuzhiyun static inline void free_bitmap_node(struct super_block *sb,
179*4882a593Smuzhiyun struct reiserfs_bitmap_node *bn)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun struct reiserfs_journal *journal = SB_JOURNAL(sb);
182*4882a593Smuzhiyun journal->j_used_bitmap_nodes--;
183*4882a593Smuzhiyun if (journal->j_free_bitmap_nodes > REISERFS_MAX_BITMAP_NODES) {
184*4882a593Smuzhiyun kfree(bn->data);
185*4882a593Smuzhiyun kfree(bn);
186*4882a593Smuzhiyun } else {
187*4882a593Smuzhiyun list_add(&bn->list, &journal->j_bitmap_nodes);
188*4882a593Smuzhiyun journal->j_free_bitmap_nodes++;
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun
allocate_bitmap_nodes(struct super_block * sb)192*4882a593Smuzhiyun static void allocate_bitmap_nodes(struct super_block *sb)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun int i;
195*4882a593Smuzhiyun struct reiserfs_journal *journal = SB_JOURNAL(sb);
196*4882a593Smuzhiyun struct reiserfs_bitmap_node *bn = NULL;
197*4882a593Smuzhiyun for (i = 0; i < REISERFS_MIN_BITMAP_NODES; i++) {
198*4882a593Smuzhiyun bn = allocate_bitmap_node(sb);
199*4882a593Smuzhiyun if (bn) {
200*4882a593Smuzhiyun list_add(&bn->list, &journal->j_bitmap_nodes);
201*4882a593Smuzhiyun journal->j_free_bitmap_nodes++;
202*4882a593Smuzhiyun } else {
203*4882a593Smuzhiyun /* this is ok, we'll try again when more are needed */
204*4882a593Smuzhiyun break;
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun
set_bit_in_list_bitmap(struct super_block * sb,b_blocknr_t block,struct reiserfs_list_bitmap * jb)209*4882a593Smuzhiyun static int set_bit_in_list_bitmap(struct super_block *sb,
210*4882a593Smuzhiyun b_blocknr_t block,
211*4882a593Smuzhiyun struct reiserfs_list_bitmap *jb)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun unsigned int bmap_nr = block / (sb->s_blocksize << 3);
214*4882a593Smuzhiyun unsigned int bit_nr = block % (sb->s_blocksize << 3);
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun if (!jb->bitmaps[bmap_nr]) {
217*4882a593Smuzhiyun jb->bitmaps[bmap_nr] = get_bitmap_node(sb);
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun set_bit(bit_nr, (unsigned long *)jb->bitmaps[bmap_nr]->data);
220*4882a593Smuzhiyun return 0;
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun
cleanup_bitmap_list(struct super_block * sb,struct reiserfs_list_bitmap * jb)223*4882a593Smuzhiyun static void cleanup_bitmap_list(struct super_block *sb,
224*4882a593Smuzhiyun struct reiserfs_list_bitmap *jb)
225*4882a593Smuzhiyun {
226*4882a593Smuzhiyun int i;
227*4882a593Smuzhiyun if (jb->bitmaps == NULL)
228*4882a593Smuzhiyun return;
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun for (i = 0; i < reiserfs_bmap_count(sb); i++) {
231*4882a593Smuzhiyun if (jb->bitmaps[i]) {
232*4882a593Smuzhiyun free_bitmap_node(sb, jb->bitmaps[i]);
233*4882a593Smuzhiyun jb->bitmaps[i] = NULL;
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun /*
239*4882a593Smuzhiyun * only call this on FS unmount.
240*4882a593Smuzhiyun */
free_list_bitmaps(struct super_block * sb,struct reiserfs_list_bitmap * jb_array)241*4882a593Smuzhiyun static int free_list_bitmaps(struct super_block *sb,
242*4882a593Smuzhiyun struct reiserfs_list_bitmap *jb_array)
243*4882a593Smuzhiyun {
244*4882a593Smuzhiyun int i;
245*4882a593Smuzhiyun struct reiserfs_list_bitmap *jb;
246*4882a593Smuzhiyun for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) {
247*4882a593Smuzhiyun jb = jb_array + i;
248*4882a593Smuzhiyun jb->journal_list = NULL;
249*4882a593Smuzhiyun cleanup_bitmap_list(sb, jb);
250*4882a593Smuzhiyun vfree(jb->bitmaps);
251*4882a593Smuzhiyun jb->bitmaps = NULL;
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun return 0;
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun
free_bitmap_nodes(struct super_block * sb)256*4882a593Smuzhiyun static int free_bitmap_nodes(struct super_block *sb)
257*4882a593Smuzhiyun {
258*4882a593Smuzhiyun struct reiserfs_journal *journal = SB_JOURNAL(sb);
259*4882a593Smuzhiyun struct list_head *next = journal->j_bitmap_nodes.next;
260*4882a593Smuzhiyun struct reiserfs_bitmap_node *bn;
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun while (next != &journal->j_bitmap_nodes) {
263*4882a593Smuzhiyun bn = list_entry(next, struct reiserfs_bitmap_node, list);
264*4882a593Smuzhiyun list_del(next);
265*4882a593Smuzhiyun kfree(bn->data);
266*4882a593Smuzhiyun kfree(bn);
267*4882a593Smuzhiyun next = journal->j_bitmap_nodes.next;
268*4882a593Smuzhiyun journal->j_free_bitmap_nodes--;
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun return 0;
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun /*
275*4882a593Smuzhiyun * get memory for JOURNAL_NUM_BITMAPS worth of bitmaps.
276*4882a593Smuzhiyun * jb_array is the array to be filled in.
277*4882a593Smuzhiyun */
reiserfs_allocate_list_bitmaps(struct super_block * sb,struct reiserfs_list_bitmap * jb_array,unsigned int bmap_nr)278*4882a593Smuzhiyun int reiserfs_allocate_list_bitmaps(struct super_block *sb,
279*4882a593Smuzhiyun struct reiserfs_list_bitmap *jb_array,
280*4882a593Smuzhiyun unsigned int bmap_nr)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun int i;
283*4882a593Smuzhiyun int failed = 0;
284*4882a593Smuzhiyun struct reiserfs_list_bitmap *jb;
285*4882a593Smuzhiyun int mem = bmap_nr * sizeof(struct reiserfs_bitmap_node *);
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) {
288*4882a593Smuzhiyun jb = jb_array + i;
289*4882a593Smuzhiyun jb->journal_list = NULL;
290*4882a593Smuzhiyun jb->bitmaps = vzalloc(mem);
291*4882a593Smuzhiyun if (!jb->bitmaps) {
292*4882a593Smuzhiyun reiserfs_warning(sb, "clm-2000", "unable to "
293*4882a593Smuzhiyun "allocate bitmaps for journal lists");
294*4882a593Smuzhiyun failed = 1;
295*4882a593Smuzhiyun break;
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun if (failed) {
299*4882a593Smuzhiyun free_list_bitmaps(sb, jb_array);
300*4882a593Smuzhiyun return -1;
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun return 0;
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun /*
306*4882a593Smuzhiyun * find an available list bitmap. If you can't find one, flush a commit list
307*4882a593Smuzhiyun * and try again
308*4882a593Smuzhiyun */
get_list_bitmap(struct super_block * sb,struct reiserfs_journal_list * jl)309*4882a593Smuzhiyun static struct reiserfs_list_bitmap *get_list_bitmap(struct super_block *sb,
310*4882a593Smuzhiyun struct reiserfs_journal_list
311*4882a593Smuzhiyun *jl)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun int i, j;
314*4882a593Smuzhiyun struct reiserfs_journal *journal = SB_JOURNAL(sb);
315*4882a593Smuzhiyun struct reiserfs_list_bitmap *jb = NULL;
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun for (j = 0; j < (JOURNAL_NUM_BITMAPS * 3); j++) {
318*4882a593Smuzhiyun i = journal->j_list_bitmap_index;
319*4882a593Smuzhiyun journal->j_list_bitmap_index = (i + 1) % JOURNAL_NUM_BITMAPS;
320*4882a593Smuzhiyun jb = journal->j_list_bitmap + i;
321*4882a593Smuzhiyun if (journal->j_list_bitmap[i].journal_list) {
322*4882a593Smuzhiyun flush_commit_list(sb,
323*4882a593Smuzhiyun journal->j_list_bitmap[i].
324*4882a593Smuzhiyun journal_list, 1);
325*4882a593Smuzhiyun if (!journal->j_list_bitmap[i].journal_list) {
326*4882a593Smuzhiyun break;
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun } else {
329*4882a593Smuzhiyun break;
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun /* double check to make sure if flushed correctly */
333*4882a593Smuzhiyun if (jb->journal_list)
334*4882a593Smuzhiyun return NULL;
335*4882a593Smuzhiyun jb->journal_list = jl;
336*4882a593Smuzhiyun return jb;
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun /*
340*4882a593Smuzhiyun * allocates a new chunk of X nodes, and links them all together as a list.
341*4882a593Smuzhiyun * Uses the cnode->next and cnode->prev pointers
342*4882a593Smuzhiyun * returns NULL on failure
343*4882a593Smuzhiyun */
allocate_cnodes(int num_cnodes)344*4882a593Smuzhiyun static struct reiserfs_journal_cnode *allocate_cnodes(int num_cnodes)
345*4882a593Smuzhiyun {
346*4882a593Smuzhiyun struct reiserfs_journal_cnode *head;
347*4882a593Smuzhiyun int i;
348*4882a593Smuzhiyun if (num_cnodes <= 0) {
349*4882a593Smuzhiyun return NULL;
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun head = vzalloc(array_size(num_cnodes,
352*4882a593Smuzhiyun sizeof(struct reiserfs_journal_cnode)));
353*4882a593Smuzhiyun if (!head) {
354*4882a593Smuzhiyun return NULL;
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun head[0].prev = NULL;
357*4882a593Smuzhiyun head[0].next = head + 1;
358*4882a593Smuzhiyun for (i = 1; i < num_cnodes; i++) {
359*4882a593Smuzhiyun head[i].prev = head + (i - 1);
360*4882a593Smuzhiyun head[i].next = head + (i + 1); /* if last one, overwrite it after the if */
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun head[num_cnodes - 1].next = NULL;
363*4882a593Smuzhiyun return head;
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun /* pulls a cnode off the free list, or returns NULL on failure */
get_cnode(struct super_block * sb)367*4882a593Smuzhiyun static struct reiserfs_journal_cnode *get_cnode(struct super_block *sb)
368*4882a593Smuzhiyun {
369*4882a593Smuzhiyun struct reiserfs_journal_cnode *cn;
370*4882a593Smuzhiyun struct reiserfs_journal *journal = SB_JOURNAL(sb);
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun reiserfs_check_lock_depth(sb, "get_cnode");
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun if (journal->j_cnode_free <= 0) {
375*4882a593Smuzhiyun return NULL;
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun journal->j_cnode_used++;
378*4882a593Smuzhiyun journal->j_cnode_free--;
379*4882a593Smuzhiyun cn = journal->j_cnode_free_list;
380*4882a593Smuzhiyun if (!cn) {
381*4882a593Smuzhiyun return cn;
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun if (cn->next) {
384*4882a593Smuzhiyun cn->next->prev = NULL;
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun journal->j_cnode_free_list = cn->next;
387*4882a593Smuzhiyun memset(cn, 0, sizeof(struct reiserfs_journal_cnode));
388*4882a593Smuzhiyun return cn;
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun /*
392*4882a593Smuzhiyun * returns a cnode to the free list
393*4882a593Smuzhiyun */
free_cnode(struct super_block * sb,struct reiserfs_journal_cnode * cn)394*4882a593Smuzhiyun static void free_cnode(struct super_block *sb,
395*4882a593Smuzhiyun struct reiserfs_journal_cnode *cn)
396*4882a593Smuzhiyun {
397*4882a593Smuzhiyun struct reiserfs_journal *journal = SB_JOURNAL(sb);
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun reiserfs_check_lock_depth(sb, "free_cnode");
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun journal->j_cnode_used--;
402*4882a593Smuzhiyun journal->j_cnode_free++;
403*4882a593Smuzhiyun /* memset(cn, 0, sizeof(struct reiserfs_journal_cnode)) ; */
404*4882a593Smuzhiyun cn->next = journal->j_cnode_free_list;
405*4882a593Smuzhiyun if (journal->j_cnode_free_list) {
406*4882a593Smuzhiyun journal->j_cnode_free_list->prev = cn;
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun cn->prev = NULL; /* not needed with the memset, but I might kill the memset, and forget to do this */
409*4882a593Smuzhiyun journal->j_cnode_free_list = cn;
410*4882a593Smuzhiyun }
411*4882a593Smuzhiyun
clear_prepared_bits(struct buffer_head * bh)412*4882a593Smuzhiyun static void clear_prepared_bits(struct buffer_head *bh)
413*4882a593Smuzhiyun {
414*4882a593Smuzhiyun clear_buffer_journal_prepared(bh);
415*4882a593Smuzhiyun clear_buffer_journal_restore_dirty(bh);
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun /*
419*4882a593Smuzhiyun * return a cnode with same dev, block number and size in table,
420*4882a593Smuzhiyun * or null if not found
421*4882a593Smuzhiyun */
get_journal_hash_dev(struct super_block * sb,struct reiserfs_journal_cnode ** table,long bl)422*4882a593Smuzhiyun static inline struct reiserfs_journal_cnode *get_journal_hash_dev(struct
423*4882a593Smuzhiyun super_block
424*4882a593Smuzhiyun *sb,
425*4882a593Smuzhiyun struct
426*4882a593Smuzhiyun reiserfs_journal_cnode
427*4882a593Smuzhiyun **table,
428*4882a593Smuzhiyun long bl)
429*4882a593Smuzhiyun {
430*4882a593Smuzhiyun struct reiserfs_journal_cnode *cn;
431*4882a593Smuzhiyun cn = journal_hash(table, sb, bl);
432*4882a593Smuzhiyun while (cn) {
433*4882a593Smuzhiyun if (cn->blocknr == bl && cn->sb == sb)
434*4882a593Smuzhiyun return cn;
435*4882a593Smuzhiyun cn = cn->hnext;
436*4882a593Smuzhiyun }
437*4882a593Smuzhiyun return (struct reiserfs_journal_cnode *)0;
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun /*
441*4882a593Smuzhiyun * this actually means 'can this block be reallocated yet?'. If you set
442*4882a593Smuzhiyun * search_all, a block can only be allocated if it is not in the current
443*4882a593Smuzhiyun * transaction, was not freed by the current transaction, and has no chance
444*4882a593Smuzhiyun * of ever being overwritten by a replay after crashing.
445*4882a593Smuzhiyun *
446*4882a593Smuzhiyun * If you don't set search_all, a block can only be allocated if it is not
447*4882a593Smuzhiyun * in the current transaction. Since deleting a block removes it from the
448*4882a593Smuzhiyun * current transaction, this case should never happen. If you don't set
449*4882a593Smuzhiyun * search_all, make sure you never write the block without logging it.
450*4882a593Smuzhiyun *
451*4882a593Smuzhiyun * next_zero_bit is a suggestion about the next block to try for find_forward.
452*4882a593Smuzhiyun * when bl is rejected because it is set in a journal list bitmap, we search
453*4882a593Smuzhiyun * for the next zero bit in the bitmap that rejected bl. Then, we return
454*4882a593Smuzhiyun * that through next_zero_bit for find_forward to try.
455*4882a593Smuzhiyun *
456*4882a593Smuzhiyun * Just because we return something in next_zero_bit does not mean we won't
457*4882a593Smuzhiyun * reject it on the next call to reiserfs_in_journal
458*4882a593Smuzhiyun */
reiserfs_in_journal(struct super_block * sb,unsigned int bmap_nr,int bit_nr,int search_all,b_blocknr_t * next_zero_bit)459*4882a593Smuzhiyun int reiserfs_in_journal(struct super_block *sb,
460*4882a593Smuzhiyun unsigned int bmap_nr, int bit_nr, int search_all,
461*4882a593Smuzhiyun b_blocknr_t * next_zero_bit)
462*4882a593Smuzhiyun {
463*4882a593Smuzhiyun struct reiserfs_journal *journal = SB_JOURNAL(sb);
464*4882a593Smuzhiyun struct reiserfs_journal_cnode *cn;
465*4882a593Smuzhiyun struct reiserfs_list_bitmap *jb;
466*4882a593Smuzhiyun int i;
467*4882a593Smuzhiyun unsigned long bl;
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun *next_zero_bit = 0; /* always start this at zero. */
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun PROC_INFO_INC(sb, journal.in_journal);
472*4882a593Smuzhiyun /*
473*4882a593Smuzhiyun * If we aren't doing a search_all, this is a metablock, and it
474*4882a593Smuzhiyun * will be logged before use. if we crash before the transaction
475*4882a593Smuzhiyun * that freed it commits, this transaction won't have committed
476*4882a593Smuzhiyun * either, and the block will never be written
477*4882a593Smuzhiyun */
478*4882a593Smuzhiyun if (search_all) {
479*4882a593Smuzhiyun for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) {
480*4882a593Smuzhiyun PROC_INFO_INC(sb, journal.in_journal_bitmap);
481*4882a593Smuzhiyun jb = journal->j_list_bitmap + i;
482*4882a593Smuzhiyun if (jb->journal_list && jb->bitmaps[bmap_nr] &&
483*4882a593Smuzhiyun test_bit(bit_nr,
484*4882a593Smuzhiyun (unsigned long *)jb->bitmaps[bmap_nr]->
485*4882a593Smuzhiyun data)) {
486*4882a593Smuzhiyun *next_zero_bit =
487*4882a593Smuzhiyun find_next_zero_bit((unsigned long *)
488*4882a593Smuzhiyun (jb->bitmaps[bmap_nr]->
489*4882a593Smuzhiyun data),
490*4882a593Smuzhiyun sb->s_blocksize << 3,
491*4882a593Smuzhiyun bit_nr + 1);
492*4882a593Smuzhiyun return 1;
493*4882a593Smuzhiyun }
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun bl = bmap_nr * (sb->s_blocksize << 3) + bit_nr;
498*4882a593Smuzhiyun /* is it in any old transactions? */
499*4882a593Smuzhiyun if (search_all
500*4882a593Smuzhiyun && (cn =
501*4882a593Smuzhiyun get_journal_hash_dev(sb, journal->j_list_hash_table, bl))) {
502*4882a593Smuzhiyun return 1;
503*4882a593Smuzhiyun }
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun /* is it in the current transaction. This should never happen */
506*4882a593Smuzhiyun if ((cn = get_journal_hash_dev(sb, journal->j_hash_table, bl))) {
507*4882a593Smuzhiyun BUG();
508*4882a593Smuzhiyun return 1;
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun PROC_INFO_INC(sb, journal.in_journal_reusable);
512*4882a593Smuzhiyun /* safe for reuse */
513*4882a593Smuzhiyun return 0;
514*4882a593Smuzhiyun }
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun /* insert cn into table */
insert_journal_hash(struct reiserfs_journal_cnode ** table,struct reiserfs_journal_cnode * cn)517*4882a593Smuzhiyun static inline void insert_journal_hash(struct reiserfs_journal_cnode **table,
518*4882a593Smuzhiyun struct reiserfs_journal_cnode *cn)
519*4882a593Smuzhiyun {
520*4882a593Smuzhiyun struct reiserfs_journal_cnode *cn_orig;
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun cn_orig = journal_hash(table, cn->sb, cn->blocknr);
523*4882a593Smuzhiyun cn->hnext = cn_orig;
524*4882a593Smuzhiyun cn->hprev = NULL;
525*4882a593Smuzhiyun if (cn_orig) {
526*4882a593Smuzhiyun cn_orig->hprev = cn;
527*4882a593Smuzhiyun }
528*4882a593Smuzhiyun journal_hash(table, cn->sb, cn->blocknr) = cn;
529*4882a593Smuzhiyun }
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun /* lock the current transaction */
lock_journal(struct super_block * sb)532*4882a593Smuzhiyun static inline void lock_journal(struct super_block *sb)
533*4882a593Smuzhiyun {
534*4882a593Smuzhiyun PROC_INFO_INC(sb, journal.lock_journal);
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun reiserfs_mutex_lock_safe(&SB_JOURNAL(sb)->j_mutex, sb);
537*4882a593Smuzhiyun }
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun /* unlock the current transaction */
unlock_journal(struct super_block * sb)540*4882a593Smuzhiyun static inline void unlock_journal(struct super_block *sb)
541*4882a593Smuzhiyun {
542*4882a593Smuzhiyun mutex_unlock(&SB_JOURNAL(sb)->j_mutex);
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun
get_journal_list(struct reiserfs_journal_list * jl)545*4882a593Smuzhiyun static inline void get_journal_list(struct reiserfs_journal_list *jl)
546*4882a593Smuzhiyun {
547*4882a593Smuzhiyun jl->j_refcount++;
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun
put_journal_list(struct super_block * s,struct reiserfs_journal_list * jl)550*4882a593Smuzhiyun static inline void put_journal_list(struct super_block *s,
551*4882a593Smuzhiyun struct reiserfs_journal_list *jl)
552*4882a593Smuzhiyun {
553*4882a593Smuzhiyun if (jl->j_refcount < 1) {
554*4882a593Smuzhiyun reiserfs_panic(s, "journal-2", "trans id %u, refcount at %d",
555*4882a593Smuzhiyun jl->j_trans_id, jl->j_refcount);
556*4882a593Smuzhiyun }
557*4882a593Smuzhiyun if (--jl->j_refcount == 0)
558*4882a593Smuzhiyun kfree(jl);
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun /*
562*4882a593Smuzhiyun * this used to be much more involved, and I'm keeping it just in case
563*4882a593Smuzhiyun * things get ugly again. it gets called by flush_commit_list, and
564*4882a593Smuzhiyun * cleans up any data stored about blocks freed during a transaction.
565*4882a593Smuzhiyun */
cleanup_freed_for_journal_list(struct super_block * sb,struct reiserfs_journal_list * jl)566*4882a593Smuzhiyun static void cleanup_freed_for_journal_list(struct super_block *sb,
567*4882a593Smuzhiyun struct reiserfs_journal_list *jl)
568*4882a593Smuzhiyun {
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun struct reiserfs_list_bitmap *jb = jl->j_list_bitmap;
571*4882a593Smuzhiyun if (jb) {
572*4882a593Smuzhiyun cleanup_bitmap_list(sb, jb);
573*4882a593Smuzhiyun }
574*4882a593Smuzhiyun jl->j_list_bitmap->journal_list = NULL;
575*4882a593Smuzhiyun jl->j_list_bitmap = NULL;
576*4882a593Smuzhiyun }
577*4882a593Smuzhiyun
journal_list_still_alive(struct super_block * s,unsigned int trans_id)578*4882a593Smuzhiyun static int journal_list_still_alive(struct super_block *s,
579*4882a593Smuzhiyun unsigned int trans_id)
580*4882a593Smuzhiyun {
581*4882a593Smuzhiyun struct reiserfs_journal *journal = SB_JOURNAL(s);
582*4882a593Smuzhiyun struct list_head *entry = &journal->j_journal_list;
583*4882a593Smuzhiyun struct reiserfs_journal_list *jl;
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun if (!list_empty(entry)) {
586*4882a593Smuzhiyun jl = JOURNAL_LIST_ENTRY(entry->next);
587*4882a593Smuzhiyun if (jl->j_trans_id <= trans_id) {
588*4882a593Smuzhiyun return 1;
589*4882a593Smuzhiyun }
590*4882a593Smuzhiyun }
591*4882a593Smuzhiyun return 0;
592*4882a593Smuzhiyun }
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun /*
595*4882a593Smuzhiyun * If page->mapping was null, we failed to truncate this page for
596*4882a593Smuzhiyun * some reason. Most likely because it was truncated after being
597*4882a593Smuzhiyun * logged via data=journal.
598*4882a593Smuzhiyun *
599*4882a593Smuzhiyun * This does a check to see if the buffer belongs to one of these
600*4882a593Smuzhiyun * lost pages before doing the final put_bh. If page->mapping was
601*4882a593Smuzhiyun * null, it tries to free buffers on the page, which should make the
602*4882a593Smuzhiyun * final put_page drop the page from the lru.
603*4882a593Smuzhiyun */
release_buffer_page(struct buffer_head * bh)604*4882a593Smuzhiyun static void release_buffer_page(struct buffer_head *bh)
605*4882a593Smuzhiyun {
606*4882a593Smuzhiyun struct page *page = bh->b_page;
607*4882a593Smuzhiyun if (!page->mapping && trylock_page(page)) {
608*4882a593Smuzhiyun get_page(page);
609*4882a593Smuzhiyun put_bh(bh);
610*4882a593Smuzhiyun if (!page->mapping)
611*4882a593Smuzhiyun try_to_free_buffers(page);
612*4882a593Smuzhiyun unlock_page(page);
613*4882a593Smuzhiyun put_page(page);
614*4882a593Smuzhiyun } else {
615*4882a593Smuzhiyun put_bh(bh);
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun }
618*4882a593Smuzhiyun
reiserfs_end_buffer_io_sync(struct buffer_head * bh,int uptodate)619*4882a593Smuzhiyun static void reiserfs_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
620*4882a593Smuzhiyun {
621*4882a593Smuzhiyun if (buffer_journaled(bh)) {
622*4882a593Smuzhiyun reiserfs_warning(NULL, "clm-2084",
623*4882a593Smuzhiyun "pinned buffer %lu:%pg sent to disk",
624*4882a593Smuzhiyun bh->b_blocknr, bh->b_bdev);
625*4882a593Smuzhiyun }
626*4882a593Smuzhiyun if (uptodate)
627*4882a593Smuzhiyun set_buffer_uptodate(bh);
628*4882a593Smuzhiyun else
629*4882a593Smuzhiyun clear_buffer_uptodate(bh);
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun unlock_buffer(bh);
632*4882a593Smuzhiyun release_buffer_page(bh);
633*4882a593Smuzhiyun }
634*4882a593Smuzhiyun
reiserfs_end_ordered_io(struct buffer_head * bh,int uptodate)635*4882a593Smuzhiyun static void reiserfs_end_ordered_io(struct buffer_head *bh, int uptodate)
636*4882a593Smuzhiyun {
637*4882a593Smuzhiyun if (uptodate)
638*4882a593Smuzhiyun set_buffer_uptodate(bh);
639*4882a593Smuzhiyun else
640*4882a593Smuzhiyun clear_buffer_uptodate(bh);
641*4882a593Smuzhiyun unlock_buffer(bh);
642*4882a593Smuzhiyun put_bh(bh);
643*4882a593Smuzhiyun }
644*4882a593Smuzhiyun
submit_logged_buffer(struct buffer_head * bh)645*4882a593Smuzhiyun static void submit_logged_buffer(struct buffer_head *bh)
646*4882a593Smuzhiyun {
647*4882a593Smuzhiyun get_bh(bh);
648*4882a593Smuzhiyun bh->b_end_io = reiserfs_end_buffer_io_sync;
649*4882a593Smuzhiyun clear_buffer_journal_new(bh);
650*4882a593Smuzhiyun clear_buffer_dirty(bh);
651*4882a593Smuzhiyun if (!test_clear_buffer_journal_test(bh))
652*4882a593Smuzhiyun BUG();
653*4882a593Smuzhiyun if (!buffer_uptodate(bh))
654*4882a593Smuzhiyun BUG();
655*4882a593Smuzhiyun submit_bh(REQ_OP_WRITE, 0, bh);
656*4882a593Smuzhiyun }
657*4882a593Smuzhiyun
submit_ordered_buffer(struct buffer_head * bh)658*4882a593Smuzhiyun static void submit_ordered_buffer(struct buffer_head *bh)
659*4882a593Smuzhiyun {
660*4882a593Smuzhiyun get_bh(bh);
661*4882a593Smuzhiyun bh->b_end_io = reiserfs_end_ordered_io;
662*4882a593Smuzhiyun clear_buffer_dirty(bh);
663*4882a593Smuzhiyun if (!buffer_uptodate(bh))
664*4882a593Smuzhiyun BUG();
665*4882a593Smuzhiyun submit_bh(REQ_OP_WRITE, 0, bh);
666*4882a593Smuzhiyun }
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun #define CHUNK_SIZE 32
669*4882a593Smuzhiyun struct buffer_chunk {
670*4882a593Smuzhiyun struct buffer_head *bh[CHUNK_SIZE];
671*4882a593Smuzhiyun int nr;
672*4882a593Smuzhiyun };
673*4882a593Smuzhiyun
write_chunk(struct buffer_chunk * chunk)674*4882a593Smuzhiyun static void write_chunk(struct buffer_chunk *chunk)
675*4882a593Smuzhiyun {
676*4882a593Smuzhiyun int i;
677*4882a593Smuzhiyun for (i = 0; i < chunk->nr; i++) {
678*4882a593Smuzhiyun submit_logged_buffer(chunk->bh[i]);
679*4882a593Smuzhiyun }
680*4882a593Smuzhiyun chunk->nr = 0;
681*4882a593Smuzhiyun }
682*4882a593Smuzhiyun
write_ordered_chunk(struct buffer_chunk * chunk)683*4882a593Smuzhiyun static void write_ordered_chunk(struct buffer_chunk *chunk)
684*4882a593Smuzhiyun {
685*4882a593Smuzhiyun int i;
686*4882a593Smuzhiyun for (i = 0; i < chunk->nr; i++) {
687*4882a593Smuzhiyun submit_ordered_buffer(chunk->bh[i]);
688*4882a593Smuzhiyun }
689*4882a593Smuzhiyun chunk->nr = 0;
690*4882a593Smuzhiyun }
691*4882a593Smuzhiyun
add_to_chunk(struct buffer_chunk * chunk,struct buffer_head * bh,spinlock_t * lock,void (fn)(struct buffer_chunk *))692*4882a593Smuzhiyun static int add_to_chunk(struct buffer_chunk *chunk, struct buffer_head *bh,
693*4882a593Smuzhiyun spinlock_t * lock, void (fn) (struct buffer_chunk *))
694*4882a593Smuzhiyun {
695*4882a593Smuzhiyun int ret = 0;
696*4882a593Smuzhiyun BUG_ON(chunk->nr >= CHUNK_SIZE);
697*4882a593Smuzhiyun chunk->bh[chunk->nr++] = bh;
698*4882a593Smuzhiyun if (chunk->nr >= CHUNK_SIZE) {
699*4882a593Smuzhiyun ret = 1;
700*4882a593Smuzhiyun if (lock) {
701*4882a593Smuzhiyun spin_unlock(lock);
702*4882a593Smuzhiyun fn(chunk);
703*4882a593Smuzhiyun spin_lock(lock);
704*4882a593Smuzhiyun } else {
705*4882a593Smuzhiyun fn(chunk);
706*4882a593Smuzhiyun }
707*4882a593Smuzhiyun }
708*4882a593Smuzhiyun return ret;
709*4882a593Smuzhiyun }
710*4882a593Smuzhiyun
711*4882a593Smuzhiyun static atomic_t nr_reiserfs_jh = ATOMIC_INIT(0);
alloc_jh(void)712*4882a593Smuzhiyun static struct reiserfs_jh *alloc_jh(void)
713*4882a593Smuzhiyun {
714*4882a593Smuzhiyun struct reiserfs_jh *jh;
715*4882a593Smuzhiyun while (1) {
716*4882a593Smuzhiyun jh = kmalloc(sizeof(*jh), GFP_NOFS);
717*4882a593Smuzhiyun if (jh) {
718*4882a593Smuzhiyun atomic_inc(&nr_reiserfs_jh);
719*4882a593Smuzhiyun return jh;
720*4882a593Smuzhiyun }
721*4882a593Smuzhiyun yield();
722*4882a593Smuzhiyun }
723*4882a593Smuzhiyun }
724*4882a593Smuzhiyun
725*4882a593Smuzhiyun /*
726*4882a593Smuzhiyun * we want to free the jh when the buffer has been written
727*4882a593Smuzhiyun * and waited on
728*4882a593Smuzhiyun */
reiserfs_free_jh(struct buffer_head * bh)729*4882a593Smuzhiyun void reiserfs_free_jh(struct buffer_head *bh)
730*4882a593Smuzhiyun {
731*4882a593Smuzhiyun struct reiserfs_jh *jh;
732*4882a593Smuzhiyun
733*4882a593Smuzhiyun jh = bh->b_private;
734*4882a593Smuzhiyun if (jh) {
735*4882a593Smuzhiyun bh->b_private = NULL;
736*4882a593Smuzhiyun jh->bh = NULL;
737*4882a593Smuzhiyun list_del_init(&jh->list);
738*4882a593Smuzhiyun kfree(jh);
739*4882a593Smuzhiyun if (atomic_read(&nr_reiserfs_jh) <= 0)
740*4882a593Smuzhiyun BUG();
741*4882a593Smuzhiyun atomic_dec(&nr_reiserfs_jh);
742*4882a593Smuzhiyun put_bh(bh);
743*4882a593Smuzhiyun }
744*4882a593Smuzhiyun }
745*4882a593Smuzhiyun
__add_jh(struct reiserfs_journal * j,struct buffer_head * bh,int tail)746*4882a593Smuzhiyun static inline int __add_jh(struct reiserfs_journal *j, struct buffer_head *bh,
747*4882a593Smuzhiyun int tail)
748*4882a593Smuzhiyun {
749*4882a593Smuzhiyun struct reiserfs_jh *jh;
750*4882a593Smuzhiyun
751*4882a593Smuzhiyun if (bh->b_private) {
752*4882a593Smuzhiyun spin_lock(&j->j_dirty_buffers_lock);
753*4882a593Smuzhiyun if (!bh->b_private) {
754*4882a593Smuzhiyun spin_unlock(&j->j_dirty_buffers_lock);
755*4882a593Smuzhiyun goto no_jh;
756*4882a593Smuzhiyun }
757*4882a593Smuzhiyun jh = bh->b_private;
758*4882a593Smuzhiyun list_del_init(&jh->list);
759*4882a593Smuzhiyun } else {
760*4882a593Smuzhiyun no_jh:
761*4882a593Smuzhiyun get_bh(bh);
762*4882a593Smuzhiyun jh = alloc_jh();
763*4882a593Smuzhiyun spin_lock(&j->j_dirty_buffers_lock);
764*4882a593Smuzhiyun /*
765*4882a593Smuzhiyun * buffer must be locked for __add_jh, should be able to have
766*4882a593Smuzhiyun * two adds at the same time
767*4882a593Smuzhiyun */
768*4882a593Smuzhiyun BUG_ON(bh->b_private);
769*4882a593Smuzhiyun jh->bh = bh;
770*4882a593Smuzhiyun bh->b_private = jh;
771*4882a593Smuzhiyun }
772*4882a593Smuzhiyun jh->jl = j->j_current_jl;
773*4882a593Smuzhiyun if (tail)
774*4882a593Smuzhiyun list_add_tail(&jh->list, &jh->jl->j_tail_bh_list);
775*4882a593Smuzhiyun else {
776*4882a593Smuzhiyun list_add_tail(&jh->list, &jh->jl->j_bh_list);
777*4882a593Smuzhiyun }
778*4882a593Smuzhiyun spin_unlock(&j->j_dirty_buffers_lock);
779*4882a593Smuzhiyun return 0;
780*4882a593Smuzhiyun }
781*4882a593Smuzhiyun
reiserfs_add_tail_list(struct inode * inode,struct buffer_head * bh)782*4882a593Smuzhiyun int reiserfs_add_tail_list(struct inode *inode, struct buffer_head *bh)
783*4882a593Smuzhiyun {
784*4882a593Smuzhiyun return __add_jh(SB_JOURNAL(inode->i_sb), bh, 1);
785*4882a593Smuzhiyun }
reiserfs_add_ordered_list(struct inode * inode,struct buffer_head * bh)786*4882a593Smuzhiyun int reiserfs_add_ordered_list(struct inode *inode, struct buffer_head *bh)
787*4882a593Smuzhiyun {
788*4882a593Smuzhiyun return __add_jh(SB_JOURNAL(inode->i_sb), bh, 0);
789*4882a593Smuzhiyun }
790*4882a593Smuzhiyun
791*4882a593Smuzhiyun #define JH_ENTRY(l) list_entry((l), struct reiserfs_jh, list)
write_ordered_buffers(spinlock_t * lock,struct reiserfs_journal * j,struct reiserfs_journal_list * jl,struct list_head * list)792*4882a593Smuzhiyun static int write_ordered_buffers(spinlock_t * lock,
793*4882a593Smuzhiyun struct reiserfs_journal *j,
794*4882a593Smuzhiyun struct reiserfs_journal_list *jl,
795*4882a593Smuzhiyun struct list_head *list)
796*4882a593Smuzhiyun {
797*4882a593Smuzhiyun struct buffer_head *bh;
798*4882a593Smuzhiyun struct reiserfs_jh *jh;
799*4882a593Smuzhiyun int ret = j->j_errno;
800*4882a593Smuzhiyun struct buffer_chunk chunk;
801*4882a593Smuzhiyun struct list_head tmp;
802*4882a593Smuzhiyun INIT_LIST_HEAD(&tmp);
803*4882a593Smuzhiyun
804*4882a593Smuzhiyun chunk.nr = 0;
805*4882a593Smuzhiyun spin_lock(lock);
806*4882a593Smuzhiyun while (!list_empty(list)) {
807*4882a593Smuzhiyun jh = JH_ENTRY(list->next);
808*4882a593Smuzhiyun bh = jh->bh;
809*4882a593Smuzhiyun get_bh(bh);
810*4882a593Smuzhiyun if (!trylock_buffer(bh)) {
811*4882a593Smuzhiyun if (!buffer_dirty(bh)) {
812*4882a593Smuzhiyun list_move(&jh->list, &tmp);
813*4882a593Smuzhiyun goto loop_next;
814*4882a593Smuzhiyun }
815*4882a593Smuzhiyun spin_unlock(lock);
816*4882a593Smuzhiyun if (chunk.nr)
817*4882a593Smuzhiyun write_ordered_chunk(&chunk);
818*4882a593Smuzhiyun wait_on_buffer(bh);
819*4882a593Smuzhiyun cond_resched();
820*4882a593Smuzhiyun spin_lock(lock);
821*4882a593Smuzhiyun goto loop_next;
822*4882a593Smuzhiyun }
823*4882a593Smuzhiyun /*
824*4882a593Smuzhiyun * in theory, dirty non-uptodate buffers should never get here,
825*4882a593Smuzhiyun * but the upper layer io error paths still have a few quirks.
826*4882a593Smuzhiyun * Handle them here as gracefully as we can
827*4882a593Smuzhiyun */
828*4882a593Smuzhiyun if (!buffer_uptodate(bh) && buffer_dirty(bh)) {
829*4882a593Smuzhiyun clear_buffer_dirty(bh);
830*4882a593Smuzhiyun ret = -EIO;
831*4882a593Smuzhiyun }
832*4882a593Smuzhiyun if (buffer_dirty(bh)) {
833*4882a593Smuzhiyun list_move(&jh->list, &tmp);
834*4882a593Smuzhiyun add_to_chunk(&chunk, bh, lock, write_ordered_chunk);
835*4882a593Smuzhiyun } else {
836*4882a593Smuzhiyun reiserfs_free_jh(bh);
837*4882a593Smuzhiyun unlock_buffer(bh);
838*4882a593Smuzhiyun }
839*4882a593Smuzhiyun loop_next:
840*4882a593Smuzhiyun put_bh(bh);
841*4882a593Smuzhiyun cond_resched_lock(lock);
842*4882a593Smuzhiyun }
843*4882a593Smuzhiyun if (chunk.nr) {
844*4882a593Smuzhiyun spin_unlock(lock);
845*4882a593Smuzhiyun write_ordered_chunk(&chunk);
846*4882a593Smuzhiyun spin_lock(lock);
847*4882a593Smuzhiyun }
848*4882a593Smuzhiyun while (!list_empty(&tmp)) {
849*4882a593Smuzhiyun jh = JH_ENTRY(tmp.prev);
850*4882a593Smuzhiyun bh = jh->bh;
851*4882a593Smuzhiyun get_bh(bh);
852*4882a593Smuzhiyun reiserfs_free_jh(bh);
853*4882a593Smuzhiyun
854*4882a593Smuzhiyun if (buffer_locked(bh)) {
855*4882a593Smuzhiyun spin_unlock(lock);
856*4882a593Smuzhiyun wait_on_buffer(bh);
857*4882a593Smuzhiyun spin_lock(lock);
858*4882a593Smuzhiyun }
859*4882a593Smuzhiyun if (!buffer_uptodate(bh)) {
860*4882a593Smuzhiyun ret = -EIO;
861*4882a593Smuzhiyun }
862*4882a593Smuzhiyun /*
863*4882a593Smuzhiyun * ugly interaction with invalidatepage here.
864*4882a593Smuzhiyun * reiserfs_invalidate_page will pin any buffer that has a
865*4882a593Smuzhiyun * valid journal head from an older transaction. If someone
866*4882a593Smuzhiyun * else sets our buffer dirty after we write it in the first
867*4882a593Smuzhiyun * loop, and then someone truncates the page away, nobody
868*4882a593Smuzhiyun * will ever write the buffer. We're safe if we write the
869*4882a593Smuzhiyun * page one last time after freeing the journal header.
870*4882a593Smuzhiyun */
871*4882a593Smuzhiyun if (buffer_dirty(bh) && unlikely(bh->b_page->mapping == NULL)) {
872*4882a593Smuzhiyun spin_unlock(lock);
873*4882a593Smuzhiyun ll_rw_block(REQ_OP_WRITE, 0, 1, &bh);
874*4882a593Smuzhiyun spin_lock(lock);
875*4882a593Smuzhiyun }
876*4882a593Smuzhiyun put_bh(bh);
877*4882a593Smuzhiyun cond_resched_lock(lock);
878*4882a593Smuzhiyun }
879*4882a593Smuzhiyun spin_unlock(lock);
880*4882a593Smuzhiyun return ret;
881*4882a593Smuzhiyun }
882*4882a593Smuzhiyun
flush_older_commits(struct super_block * s,struct reiserfs_journal_list * jl)883*4882a593Smuzhiyun static int flush_older_commits(struct super_block *s,
884*4882a593Smuzhiyun struct reiserfs_journal_list *jl)
885*4882a593Smuzhiyun {
886*4882a593Smuzhiyun struct reiserfs_journal *journal = SB_JOURNAL(s);
887*4882a593Smuzhiyun struct reiserfs_journal_list *other_jl;
888*4882a593Smuzhiyun struct reiserfs_journal_list *first_jl;
889*4882a593Smuzhiyun struct list_head *entry;
890*4882a593Smuzhiyun unsigned int trans_id = jl->j_trans_id;
891*4882a593Smuzhiyun unsigned int other_trans_id;
892*4882a593Smuzhiyun
893*4882a593Smuzhiyun find_first:
894*4882a593Smuzhiyun /*
895*4882a593Smuzhiyun * first we walk backwards to find the oldest uncommitted transation
896*4882a593Smuzhiyun */
897*4882a593Smuzhiyun first_jl = jl;
898*4882a593Smuzhiyun entry = jl->j_list.prev;
899*4882a593Smuzhiyun while (1) {
900*4882a593Smuzhiyun other_jl = JOURNAL_LIST_ENTRY(entry);
901*4882a593Smuzhiyun if (entry == &journal->j_journal_list ||
902*4882a593Smuzhiyun atomic_read(&other_jl->j_older_commits_done))
903*4882a593Smuzhiyun break;
904*4882a593Smuzhiyun
905*4882a593Smuzhiyun first_jl = other_jl;
906*4882a593Smuzhiyun entry = other_jl->j_list.prev;
907*4882a593Smuzhiyun }
908*4882a593Smuzhiyun
909*4882a593Smuzhiyun /* if we didn't find any older uncommitted transactions, return now */
910*4882a593Smuzhiyun if (first_jl == jl) {
911*4882a593Smuzhiyun return 0;
912*4882a593Smuzhiyun }
913*4882a593Smuzhiyun
914*4882a593Smuzhiyun entry = &first_jl->j_list;
915*4882a593Smuzhiyun while (1) {
916*4882a593Smuzhiyun other_jl = JOURNAL_LIST_ENTRY(entry);
917*4882a593Smuzhiyun other_trans_id = other_jl->j_trans_id;
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun if (other_trans_id < trans_id) {
920*4882a593Smuzhiyun if (atomic_read(&other_jl->j_commit_left) != 0) {
921*4882a593Smuzhiyun flush_commit_list(s, other_jl, 0);
922*4882a593Smuzhiyun
923*4882a593Smuzhiyun /* list we were called with is gone, return */
924*4882a593Smuzhiyun if (!journal_list_still_alive(s, trans_id))
925*4882a593Smuzhiyun return 1;
926*4882a593Smuzhiyun
927*4882a593Smuzhiyun /*
928*4882a593Smuzhiyun * the one we just flushed is gone, this means
929*4882a593Smuzhiyun * all older lists are also gone, so first_jl
930*4882a593Smuzhiyun * is no longer valid either. Go back to the
931*4882a593Smuzhiyun * beginning.
932*4882a593Smuzhiyun */
933*4882a593Smuzhiyun if (!journal_list_still_alive
934*4882a593Smuzhiyun (s, other_trans_id)) {
935*4882a593Smuzhiyun goto find_first;
936*4882a593Smuzhiyun }
937*4882a593Smuzhiyun }
938*4882a593Smuzhiyun entry = entry->next;
939*4882a593Smuzhiyun if (entry == &journal->j_journal_list)
940*4882a593Smuzhiyun return 0;
941*4882a593Smuzhiyun } else {
942*4882a593Smuzhiyun return 0;
943*4882a593Smuzhiyun }
944*4882a593Smuzhiyun }
945*4882a593Smuzhiyun return 0;
946*4882a593Smuzhiyun }
947*4882a593Smuzhiyun
reiserfs_async_progress_wait(struct super_block * s)948*4882a593Smuzhiyun static int reiserfs_async_progress_wait(struct super_block *s)
949*4882a593Smuzhiyun {
950*4882a593Smuzhiyun struct reiserfs_journal *j = SB_JOURNAL(s);
951*4882a593Smuzhiyun
952*4882a593Smuzhiyun if (atomic_read(&j->j_async_throttle)) {
953*4882a593Smuzhiyun int depth;
954*4882a593Smuzhiyun
955*4882a593Smuzhiyun depth = reiserfs_write_unlock_nested(s);
956*4882a593Smuzhiyun congestion_wait(BLK_RW_ASYNC, HZ / 10);
957*4882a593Smuzhiyun reiserfs_write_lock_nested(s, depth);
958*4882a593Smuzhiyun }
959*4882a593Smuzhiyun
960*4882a593Smuzhiyun return 0;
961*4882a593Smuzhiyun }
962*4882a593Smuzhiyun
963*4882a593Smuzhiyun /*
964*4882a593Smuzhiyun * if this journal list still has commit blocks unflushed, send them to disk.
965*4882a593Smuzhiyun *
966*4882a593Smuzhiyun * log areas must be flushed in order (transaction 2 can't commit before
967*4882a593Smuzhiyun * transaction 1) Before the commit block can by written, every other log
968*4882a593Smuzhiyun * block must be safely on disk
969*4882a593Smuzhiyun */
flush_commit_list(struct super_block * s,struct reiserfs_journal_list * jl,int flushall)970*4882a593Smuzhiyun static int flush_commit_list(struct super_block *s,
971*4882a593Smuzhiyun struct reiserfs_journal_list *jl, int flushall)
972*4882a593Smuzhiyun {
973*4882a593Smuzhiyun int i;
974*4882a593Smuzhiyun b_blocknr_t bn;
975*4882a593Smuzhiyun struct buffer_head *tbh = NULL;
976*4882a593Smuzhiyun unsigned int trans_id = jl->j_trans_id;
977*4882a593Smuzhiyun struct reiserfs_journal *journal = SB_JOURNAL(s);
978*4882a593Smuzhiyun int retval = 0;
979*4882a593Smuzhiyun int write_len;
980*4882a593Smuzhiyun int depth;
981*4882a593Smuzhiyun
982*4882a593Smuzhiyun reiserfs_check_lock_depth(s, "flush_commit_list");
983*4882a593Smuzhiyun
984*4882a593Smuzhiyun if (atomic_read(&jl->j_older_commits_done)) {
985*4882a593Smuzhiyun return 0;
986*4882a593Smuzhiyun }
987*4882a593Smuzhiyun
988*4882a593Smuzhiyun /*
989*4882a593Smuzhiyun * before we can put our commit blocks on disk, we have to make
990*4882a593Smuzhiyun * sure everyone older than us is on disk too
991*4882a593Smuzhiyun */
992*4882a593Smuzhiyun BUG_ON(jl->j_len <= 0);
993*4882a593Smuzhiyun BUG_ON(trans_id == journal->j_trans_id);
994*4882a593Smuzhiyun
995*4882a593Smuzhiyun get_journal_list(jl);
996*4882a593Smuzhiyun if (flushall) {
997*4882a593Smuzhiyun if (flush_older_commits(s, jl) == 1) {
998*4882a593Smuzhiyun /*
999*4882a593Smuzhiyun * list disappeared during flush_older_commits.
1000*4882a593Smuzhiyun * return
1001*4882a593Smuzhiyun */
1002*4882a593Smuzhiyun goto put_jl;
1003*4882a593Smuzhiyun }
1004*4882a593Smuzhiyun }
1005*4882a593Smuzhiyun
1006*4882a593Smuzhiyun /* make sure nobody is trying to flush this one at the same time */
1007*4882a593Smuzhiyun reiserfs_mutex_lock_safe(&jl->j_commit_mutex, s);
1008*4882a593Smuzhiyun
1009*4882a593Smuzhiyun if (!journal_list_still_alive(s, trans_id)) {
1010*4882a593Smuzhiyun mutex_unlock(&jl->j_commit_mutex);
1011*4882a593Smuzhiyun goto put_jl;
1012*4882a593Smuzhiyun }
1013*4882a593Smuzhiyun BUG_ON(jl->j_trans_id == 0);
1014*4882a593Smuzhiyun
1015*4882a593Smuzhiyun /* this commit is done, exit */
1016*4882a593Smuzhiyun if (atomic_read(&jl->j_commit_left) <= 0) {
1017*4882a593Smuzhiyun if (flushall) {
1018*4882a593Smuzhiyun atomic_set(&jl->j_older_commits_done, 1);
1019*4882a593Smuzhiyun }
1020*4882a593Smuzhiyun mutex_unlock(&jl->j_commit_mutex);
1021*4882a593Smuzhiyun goto put_jl;
1022*4882a593Smuzhiyun }
1023*4882a593Smuzhiyun
1024*4882a593Smuzhiyun if (!list_empty(&jl->j_bh_list)) {
1025*4882a593Smuzhiyun int ret;
1026*4882a593Smuzhiyun
1027*4882a593Smuzhiyun /*
1028*4882a593Smuzhiyun * We might sleep in numerous places inside
1029*4882a593Smuzhiyun * write_ordered_buffers. Relax the write lock.
1030*4882a593Smuzhiyun */
1031*4882a593Smuzhiyun depth = reiserfs_write_unlock_nested(s);
1032*4882a593Smuzhiyun ret = write_ordered_buffers(&journal->j_dirty_buffers_lock,
1033*4882a593Smuzhiyun journal, jl, &jl->j_bh_list);
1034*4882a593Smuzhiyun if (ret < 0 && retval == 0)
1035*4882a593Smuzhiyun retval = ret;
1036*4882a593Smuzhiyun reiserfs_write_lock_nested(s, depth);
1037*4882a593Smuzhiyun }
1038*4882a593Smuzhiyun BUG_ON(!list_empty(&jl->j_bh_list));
1039*4882a593Smuzhiyun /*
1040*4882a593Smuzhiyun * for the description block and all the log blocks, submit any buffers
1041*4882a593Smuzhiyun * that haven't already reached the disk. Try to write at least 256
1042*4882a593Smuzhiyun * log blocks. later on, we will only wait on blocks that correspond
1043*4882a593Smuzhiyun * to this transaction, but while we're unplugging we might as well
1044*4882a593Smuzhiyun * get a chunk of data on there.
1045*4882a593Smuzhiyun */
1046*4882a593Smuzhiyun atomic_inc(&journal->j_async_throttle);
1047*4882a593Smuzhiyun write_len = jl->j_len + 1;
1048*4882a593Smuzhiyun if (write_len < 256)
1049*4882a593Smuzhiyun write_len = 256;
1050*4882a593Smuzhiyun for (i = 0 ; i < write_len ; i++) {
1051*4882a593Smuzhiyun bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) + (jl->j_start + i) %
1052*4882a593Smuzhiyun SB_ONDISK_JOURNAL_SIZE(s);
1053*4882a593Smuzhiyun tbh = journal_find_get_block(s, bn);
1054*4882a593Smuzhiyun if (tbh) {
1055*4882a593Smuzhiyun if (buffer_dirty(tbh)) {
1056*4882a593Smuzhiyun depth = reiserfs_write_unlock_nested(s);
1057*4882a593Smuzhiyun ll_rw_block(REQ_OP_WRITE, 0, 1, &tbh);
1058*4882a593Smuzhiyun reiserfs_write_lock_nested(s, depth);
1059*4882a593Smuzhiyun }
1060*4882a593Smuzhiyun put_bh(tbh) ;
1061*4882a593Smuzhiyun }
1062*4882a593Smuzhiyun }
1063*4882a593Smuzhiyun atomic_dec(&journal->j_async_throttle);
1064*4882a593Smuzhiyun
1065*4882a593Smuzhiyun for (i = 0; i < (jl->j_len + 1); i++) {
1066*4882a593Smuzhiyun bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) +
1067*4882a593Smuzhiyun (jl->j_start + i) % SB_ONDISK_JOURNAL_SIZE(s);
1068*4882a593Smuzhiyun tbh = journal_find_get_block(s, bn);
1069*4882a593Smuzhiyun
1070*4882a593Smuzhiyun depth = reiserfs_write_unlock_nested(s);
1071*4882a593Smuzhiyun __wait_on_buffer(tbh);
1072*4882a593Smuzhiyun reiserfs_write_lock_nested(s, depth);
1073*4882a593Smuzhiyun /*
1074*4882a593Smuzhiyun * since we're using ll_rw_blk above, it might have skipped
1075*4882a593Smuzhiyun * over a locked buffer. Double check here
1076*4882a593Smuzhiyun */
1077*4882a593Smuzhiyun /* redundant, sync_dirty_buffer() checks */
1078*4882a593Smuzhiyun if (buffer_dirty(tbh)) {
1079*4882a593Smuzhiyun depth = reiserfs_write_unlock_nested(s);
1080*4882a593Smuzhiyun sync_dirty_buffer(tbh);
1081*4882a593Smuzhiyun reiserfs_write_lock_nested(s, depth);
1082*4882a593Smuzhiyun }
1083*4882a593Smuzhiyun if (unlikely(!buffer_uptodate(tbh))) {
1084*4882a593Smuzhiyun #ifdef CONFIG_REISERFS_CHECK
1085*4882a593Smuzhiyun reiserfs_warning(s, "journal-601",
1086*4882a593Smuzhiyun "buffer write failed");
1087*4882a593Smuzhiyun #endif
1088*4882a593Smuzhiyun retval = -EIO;
1089*4882a593Smuzhiyun }
1090*4882a593Smuzhiyun /* once for journal_find_get_block */
1091*4882a593Smuzhiyun put_bh(tbh);
1092*4882a593Smuzhiyun /* once due to original getblk in do_journal_end */
1093*4882a593Smuzhiyun put_bh(tbh);
1094*4882a593Smuzhiyun atomic_dec(&jl->j_commit_left);
1095*4882a593Smuzhiyun }
1096*4882a593Smuzhiyun
1097*4882a593Smuzhiyun BUG_ON(atomic_read(&jl->j_commit_left) != 1);
1098*4882a593Smuzhiyun
1099*4882a593Smuzhiyun /*
1100*4882a593Smuzhiyun * If there was a write error in the journal - we can't commit
1101*4882a593Smuzhiyun * this transaction - it will be invalid and, if successful,
1102*4882a593Smuzhiyun * will just end up propagating the write error out to
1103*4882a593Smuzhiyun * the file system.
1104*4882a593Smuzhiyun */
1105*4882a593Smuzhiyun if (likely(!retval && !reiserfs_is_journal_aborted (journal))) {
1106*4882a593Smuzhiyun if (buffer_dirty(jl->j_commit_bh))
1107*4882a593Smuzhiyun BUG();
1108*4882a593Smuzhiyun mark_buffer_dirty(jl->j_commit_bh) ;
1109*4882a593Smuzhiyun depth = reiserfs_write_unlock_nested(s);
1110*4882a593Smuzhiyun if (reiserfs_barrier_flush(s))
1111*4882a593Smuzhiyun __sync_dirty_buffer(jl->j_commit_bh,
1112*4882a593Smuzhiyun REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
1113*4882a593Smuzhiyun else
1114*4882a593Smuzhiyun sync_dirty_buffer(jl->j_commit_bh);
1115*4882a593Smuzhiyun reiserfs_write_lock_nested(s, depth);
1116*4882a593Smuzhiyun }
1117*4882a593Smuzhiyun
1118*4882a593Smuzhiyun /*
1119*4882a593Smuzhiyun * If there was a write error in the journal - we can't commit this
1120*4882a593Smuzhiyun * transaction - it will be invalid and, if successful, will just end
1121*4882a593Smuzhiyun * up propagating the write error out to the filesystem.
1122*4882a593Smuzhiyun */
1123*4882a593Smuzhiyun if (unlikely(!buffer_uptodate(jl->j_commit_bh))) {
1124*4882a593Smuzhiyun #ifdef CONFIG_REISERFS_CHECK
1125*4882a593Smuzhiyun reiserfs_warning(s, "journal-615", "buffer write failed");
1126*4882a593Smuzhiyun #endif
1127*4882a593Smuzhiyun retval = -EIO;
1128*4882a593Smuzhiyun }
1129*4882a593Smuzhiyun bforget(jl->j_commit_bh);
1130*4882a593Smuzhiyun if (journal->j_last_commit_id != 0 &&
1131*4882a593Smuzhiyun (jl->j_trans_id - journal->j_last_commit_id) != 1) {
1132*4882a593Smuzhiyun reiserfs_warning(s, "clm-2200", "last commit %lu, current %lu",
1133*4882a593Smuzhiyun journal->j_last_commit_id, jl->j_trans_id);
1134*4882a593Smuzhiyun }
1135*4882a593Smuzhiyun journal->j_last_commit_id = jl->j_trans_id;
1136*4882a593Smuzhiyun
1137*4882a593Smuzhiyun /*
1138*4882a593Smuzhiyun * now, every commit block is on the disk. It is safe to allow
1139*4882a593Smuzhiyun * blocks freed during this transaction to be reallocated
1140*4882a593Smuzhiyun */
1141*4882a593Smuzhiyun cleanup_freed_for_journal_list(s, jl);
1142*4882a593Smuzhiyun
1143*4882a593Smuzhiyun retval = retval ? retval : journal->j_errno;
1144*4882a593Smuzhiyun
1145*4882a593Smuzhiyun /* mark the metadata dirty */
1146*4882a593Smuzhiyun if (!retval)
1147*4882a593Smuzhiyun dirty_one_transaction(s, jl);
1148*4882a593Smuzhiyun atomic_dec(&jl->j_commit_left);
1149*4882a593Smuzhiyun
1150*4882a593Smuzhiyun if (flushall) {
1151*4882a593Smuzhiyun atomic_set(&jl->j_older_commits_done, 1);
1152*4882a593Smuzhiyun }
1153*4882a593Smuzhiyun mutex_unlock(&jl->j_commit_mutex);
1154*4882a593Smuzhiyun put_jl:
1155*4882a593Smuzhiyun put_journal_list(s, jl);
1156*4882a593Smuzhiyun
1157*4882a593Smuzhiyun if (retval)
1158*4882a593Smuzhiyun reiserfs_abort(s, retval, "Journal write error in %s",
1159*4882a593Smuzhiyun __func__);
1160*4882a593Smuzhiyun return retval;
1161*4882a593Smuzhiyun }
1162*4882a593Smuzhiyun
1163*4882a593Smuzhiyun /*
1164*4882a593Smuzhiyun * flush_journal_list frequently needs to find a newer transaction for a
1165*4882a593Smuzhiyun * given block. This does that, or returns NULL if it can't find anything
1166*4882a593Smuzhiyun */
find_newer_jl_for_cn(struct reiserfs_journal_cnode * cn)1167*4882a593Smuzhiyun static struct reiserfs_journal_list *find_newer_jl_for_cn(struct
1168*4882a593Smuzhiyun reiserfs_journal_cnode
1169*4882a593Smuzhiyun *cn)
1170*4882a593Smuzhiyun {
1171*4882a593Smuzhiyun struct super_block *sb = cn->sb;
1172*4882a593Smuzhiyun b_blocknr_t blocknr = cn->blocknr;
1173*4882a593Smuzhiyun
1174*4882a593Smuzhiyun cn = cn->hprev;
1175*4882a593Smuzhiyun while (cn) {
1176*4882a593Smuzhiyun if (cn->sb == sb && cn->blocknr == blocknr && cn->jlist) {
1177*4882a593Smuzhiyun return cn->jlist;
1178*4882a593Smuzhiyun }
1179*4882a593Smuzhiyun cn = cn->hprev;
1180*4882a593Smuzhiyun }
1181*4882a593Smuzhiyun return NULL;
1182*4882a593Smuzhiyun }
1183*4882a593Smuzhiyun
1184*4882a593Smuzhiyun static void remove_journal_hash(struct super_block *,
1185*4882a593Smuzhiyun struct reiserfs_journal_cnode **,
1186*4882a593Smuzhiyun struct reiserfs_journal_list *, unsigned long,
1187*4882a593Smuzhiyun int);
1188*4882a593Smuzhiyun
1189*4882a593Smuzhiyun /*
1190*4882a593Smuzhiyun * once all the real blocks have been flushed, it is safe to remove them
1191*4882a593Smuzhiyun * from the journal list for this transaction. Aside from freeing the
1192*4882a593Smuzhiyun * cnode, this also allows the block to be reallocated for data blocks
1193*4882a593Smuzhiyun * if it had been deleted.
1194*4882a593Smuzhiyun */
remove_all_from_journal_list(struct super_block * sb,struct reiserfs_journal_list * jl,int debug)1195*4882a593Smuzhiyun static void remove_all_from_journal_list(struct super_block *sb,
1196*4882a593Smuzhiyun struct reiserfs_journal_list *jl,
1197*4882a593Smuzhiyun int debug)
1198*4882a593Smuzhiyun {
1199*4882a593Smuzhiyun struct reiserfs_journal *journal = SB_JOURNAL(sb);
1200*4882a593Smuzhiyun struct reiserfs_journal_cnode *cn, *last;
1201*4882a593Smuzhiyun cn = jl->j_realblock;
1202*4882a593Smuzhiyun
1203*4882a593Smuzhiyun /*
1204*4882a593Smuzhiyun * which is better, to lock once around the whole loop, or
1205*4882a593Smuzhiyun * to lock for each call to remove_journal_hash?
1206*4882a593Smuzhiyun */
1207*4882a593Smuzhiyun while (cn) {
1208*4882a593Smuzhiyun if (cn->blocknr != 0) {
1209*4882a593Smuzhiyun if (debug) {
1210*4882a593Smuzhiyun reiserfs_warning(sb, "reiserfs-2201",
1211*4882a593Smuzhiyun "block %u, bh is %d, state %ld",
1212*4882a593Smuzhiyun cn->blocknr, cn->bh ? 1 : 0,
1213*4882a593Smuzhiyun cn->state);
1214*4882a593Smuzhiyun }
1215*4882a593Smuzhiyun cn->state = 0;
1216*4882a593Smuzhiyun remove_journal_hash(sb, journal->j_list_hash_table,
1217*4882a593Smuzhiyun jl, cn->blocknr, 1);
1218*4882a593Smuzhiyun }
1219*4882a593Smuzhiyun last = cn;
1220*4882a593Smuzhiyun cn = cn->next;
1221*4882a593Smuzhiyun free_cnode(sb, last);
1222*4882a593Smuzhiyun }
1223*4882a593Smuzhiyun jl->j_realblock = NULL;
1224*4882a593Smuzhiyun }
1225*4882a593Smuzhiyun
1226*4882a593Smuzhiyun /*
1227*4882a593Smuzhiyun * if this timestamp is greater than the timestamp we wrote last to the
1228*4882a593Smuzhiyun * header block, write it to the header block. once this is done, I can
1229*4882a593Smuzhiyun * safely say the log area for this transaction won't ever be replayed,
1230*4882a593Smuzhiyun * and I can start releasing blocks in this transaction for reuse as data
1231*4882a593Smuzhiyun * blocks. called by flush_journal_list, before it calls
1232*4882a593Smuzhiyun * remove_all_from_journal_list
1233*4882a593Smuzhiyun */
_update_journal_header_block(struct super_block * sb,unsigned long offset,unsigned int trans_id)1234*4882a593Smuzhiyun static int _update_journal_header_block(struct super_block *sb,
1235*4882a593Smuzhiyun unsigned long offset,
1236*4882a593Smuzhiyun unsigned int trans_id)
1237*4882a593Smuzhiyun {
1238*4882a593Smuzhiyun struct reiserfs_journal_header *jh;
1239*4882a593Smuzhiyun struct reiserfs_journal *journal = SB_JOURNAL(sb);
1240*4882a593Smuzhiyun int depth;
1241*4882a593Smuzhiyun
1242*4882a593Smuzhiyun if (reiserfs_is_journal_aborted(journal))
1243*4882a593Smuzhiyun return -EIO;
1244*4882a593Smuzhiyun
1245*4882a593Smuzhiyun if (trans_id >= journal->j_last_flush_trans_id) {
1246*4882a593Smuzhiyun if (buffer_locked((journal->j_header_bh))) {
1247*4882a593Smuzhiyun depth = reiserfs_write_unlock_nested(sb);
1248*4882a593Smuzhiyun __wait_on_buffer(journal->j_header_bh);
1249*4882a593Smuzhiyun reiserfs_write_lock_nested(sb, depth);
1250*4882a593Smuzhiyun if (unlikely(!buffer_uptodate(journal->j_header_bh))) {
1251*4882a593Smuzhiyun #ifdef CONFIG_REISERFS_CHECK
1252*4882a593Smuzhiyun reiserfs_warning(sb, "journal-699",
1253*4882a593Smuzhiyun "buffer write failed");
1254*4882a593Smuzhiyun #endif
1255*4882a593Smuzhiyun return -EIO;
1256*4882a593Smuzhiyun }
1257*4882a593Smuzhiyun }
1258*4882a593Smuzhiyun journal->j_last_flush_trans_id = trans_id;
1259*4882a593Smuzhiyun journal->j_first_unflushed_offset = offset;
1260*4882a593Smuzhiyun jh = (struct reiserfs_journal_header *)(journal->j_header_bh->
1261*4882a593Smuzhiyun b_data);
1262*4882a593Smuzhiyun jh->j_last_flush_trans_id = cpu_to_le32(trans_id);
1263*4882a593Smuzhiyun jh->j_first_unflushed_offset = cpu_to_le32(offset);
1264*4882a593Smuzhiyun jh->j_mount_id = cpu_to_le32(journal->j_mount_id);
1265*4882a593Smuzhiyun
1266*4882a593Smuzhiyun set_buffer_dirty(journal->j_header_bh);
1267*4882a593Smuzhiyun depth = reiserfs_write_unlock_nested(sb);
1268*4882a593Smuzhiyun
1269*4882a593Smuzhiyun if (reiserfs_barrier_flush(sb))
1270*4882a593Smuzhiyun __sync_dirty_buffer(journal->j_header_bh,
1271*4882a593Smuzhiyun REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
1272*4882a593Smuzhiyun else
1273*4882a593Smuzhiyun sync_dirty_buffer(journal->j_header_bh);
1274*4882a593Smuzhiyun
1275*4882a593Smuzhiyun reiserfs_write_lock_nested(sb, depth);
1276*4882a593Smuzhiyun if (!buffer_uptodate(journal->j_header_bh)) {
1277*4882a593Smuzhiyun reiserfs_warning(sb, "journal-837",
1278*4882a593Smuzhiyun "IO error during journal replay");
1279*4882a593Smuzhiyun return -EIO;
1280*4882a593Smuzhiyun }
1281*4882a593Smuzhiyun }
1282*4882a593Smuzhiyun return 0;
1283*4882a593Smuzhiyun }
1284*4882a593Smuzhiyun
update_journal_header_block(struct super_block * sb,unsigned long offset,unsigned int trans_id)1285*4882a593Smuzhiyun static int update_journal_header_block(struct super_block *sb,
1286*4882a593Smuzhiyun unsigned long offset,
1287*4882a593Smuzhiyun unsigned int trans_id)
1288*4882a593Smuzhiyun {
1289*4882a593Smuzhiyun return _update_journal_header_block(sb, offset, trans_id);
1290*4882a593Smuzhiyun }
1291*4882a593Smuzhiyun
1292*4882a593Smuzhiyun /*
1293*4882a593Smuzhiyun ** flush any and all journal lists older than you are
1294*4882a593Smuzhiyun ** can only be called from flush_journal_list
1295*4882a593Smuzhiyun */
flush_older_journal_lists(struct super_block * sb,struct reiserfs_journal_list * jl)1296*4882a593Smuzhiyun static int flush_older_journal_lists(struct super_block *sb,
1297*4882a593Smuzhiyun struct reiserfs_journal_list *jl)
1298*4882a593Smuzhiyun {
1299*4882a593Smuzhiyun struct list_head *entry;
1300*4882a593Smuzhiyun struct reiserfs_journal_list *other_jl;
1301*4882a593Smuzhiyun struct reiserfs_journal *journal = SB_JOURNAL(sb);
1302*4882a593Smuzhiyun unsigned int trans_id = jl->j_trans_id;
1303*4882a593Smuzhiyun
1304*4882a593Smuzhiyun /*
1305*4882a593Smuzhiyun * we know we are the only ones flushing things, no extra race
1306*4882a593Smuzhiyun * protection is required.
1307*4882a593Smuzhiyun */
1308*4882a593Smuzhiyun restart:
1309*4882a593Smuzhiyun entry = journal->j_journal_list.next;
1310*4882a593Smuzhiyun /* Did we wrap? */
1311*4882a593Smuzhiyun if (entry == &journal->j_journal_list)
1312*4882a593Smuzhiyun return 0;
1313*4882a593Smuzhiyun other_jl = JOURNAL_LIST_ENTRY(entry);
1314*4882a593Smuzhiyun if (other_jl->j_trans_id < trans_id) {
1315*4882a593Smuzhiyun BUG_ON(other_jl->j_refcount <= 0);
1316*4882a593Smuzhiyun /* do not flush all */
1317*4882a593Smuzhiyun flush_journal_list(sb, other_jl, 0);
1318*4882a593Smuzhiyun
1319*4882a593Smuzhiyun /* other_jl is now deleted from the list */
1320*4882a593Smuzhiyun goto restart;
1321*4882a593Smuzhiyun }
1322*4882a593Smuzhiyun return 0;
1323*4882a593Smuzhiyun }
1324*4882a593Smuzhiyun
del_from_work_list(struct super_block * s,struct reiserfs_journal_list * jl)1325*4882a593Smuzhiyun static void del_from_work_list(struct super_block *s,
1326*4882a593Smuzhiyun struct reiserfs_journal_list *jl)
1327*4882a593Smuzhiyun {
1328*4882a593Smuzhiyun struct reiserfs_journal *journal = SB_JOURNAL(s);
1329*4882a593Smuzhiyun if (!list_empty(&jl->j_working_list)) {
1330*4882a593Smuzhiyun list_del_init(&jl->j_working_list);
1331*4882a593Smuzhiyun journal->j_num_work_lists--;
1332*4882a593Smuzhiyun }
1333*4882a593Smuzhiyun }
1334*4882a593Smuzhiyun
1335*4882a593Smuzhiyun /*
1336*4882a593Smuzhiyun * flush a journal list, both commit and real blocks
1337*4882a593Smuzhiyun *
1338*4882a593Smuzhiyun * always set flushall to 1, unless you are calling from inside
1339*4882a593Smuzhiyun * flush_journal_list
1340*4882a593Smuzhiyun *
1341*4882a593Smuzhiyun * IMPORTANT. This can only be called while there are no journal writers,
1342*4882a593Smuzhiyun * and the journal is locked. That means it can only be called from
1343*4882a593Smuzhiyun * do_journal_end, or by journal_release
1344*4882a593Smuzhiyun */
flush_journal_list(struct super_block * s,struct reiserfs_journal_list * jl,int flushall)1345*4882a593Smuzhiyun static int flush_journal_list(struct super_block *s,
1346*4882a593Smuzhiyun struct reiserfs_journal_list *jl, int flushall)
1347*4882a593Smuzhiyun {
1348*4882a593Smuzhiyun struct reiserfs_journal_list *pjl;
1349*4882a593Smuzhiyun struct reiserfs_journal_cnode *cn;
1350*4882a593Smuzhiyun int count;
1351*4882a593Smuzhiyun int was_jwait = 0;
1352*4882a593Smuzhiyun int was_dirty = 0;
1353*4882a593Smuzhiyun struct buffer_head *saved_bh;
1354*4882a593Smuzhiyun unsigned long j_len_saved = jl->j_len;
1355*4882a593Smuzhiyun struct reiserfs_journal *journal = SB_JOURNAL(s);
1356*4882a593Smuzhiyun int err = 0;
1357*4882a593Smuzhiyun int depth;
1358*4882a593Smuzhiyun
1359*4882a593Smuzhiyun BUG_ON(j_len_saved <= 0);
1360*4882a593Smuzhiyun
1361*4882a593Smuzhiyun if (atomic_read(&journal->j_wcount) != 0) {
1362*4882a593Smuzhiyun reiserfs_warning(s, "clm-2048", "called with wcount %d",
1363*4882a593Smuzhiyun atomic_read(&journal->j_wcount));
1364*4882a593Smuzhiyun }
1365*4882a593Smuzhiyun
1366*4882a593Smuzhiyun /* if flushall == 0, the lock is already held */
1367*4882a593Smuzhiyun if (flushall) {
1368*4882a593Smuzhiyun reiserfs_mutex_lock_safe(&journal->j_flush_mutex, s);
1369*4882a593Smuzhiyun } else if (mutex_trylock(&journal->j_flush_mutex)) {
1370*4882a593Smuzhiyun BUG();
1371*4882a593Smuzhiyun }
1372*4882a593Smuzhiyun
1373*4882a593Smuzhiyun count = 0;
1374*4882a593Smuzhiyun if (j_len_saved > journal->j_trans_max) {
1375*4882a593Smuzhiyun reiserfs_panic(s, "journal-715", "length is %lu, trans id %lu",
1376*4882a593Smuzhiyun j_len_saved, jl->j_trans_id);
1377*4882a593Smuzhiyun return 0;
1378*4882a593Smuzhiyun }
1379*4882a593Smuzhiyun
1380*4882a593Smuzhiyun /* if all the work is already done, get out of here */
1381*4882a593Smuzhiyun if (atomic_read(&jl->j_nonzerolen) <= 0 &&
1382*4882a593Smuzhiyun atomic_read(&jl->j_commit_left) <= 0) {
1383*4882a593Smuzhiyun goto flush_older_and_return;
1384*4882a593Smuzhiyun }
1385*4882a593Smuzhiyun
1386*4882a593Smuzhiyun /*
1387*4882a593Smuzhiyun * start by putting the commit list on disk. This will also flush
1388*4882a593Smuzhiyun * the commit lists of any olders transactions
1389*4882a593Smuzhiyun */
1390*4882a593Smuzhiyun flush_commit_list(s, jl, 1);
1391*4882a593Smuzhiyun
1392*4882a593Smuzhiyun if (!(jl->j_state & LIST_DIRTY)
1393*4882a593Smuzhiyun && !reiserfs_is_journal_aborted(journal))
1394*4882a593Smuzhiyun BUG();
1395*4882a593Smuzhiyun
1396*4882a593Smuzhiyun /* are we done now? */
1397*4882a593Smuzhiyun if (atomic_read(&jl->j_nonzerolen) <= 0 &&
1398*4882a593Smuzhiyun atomic_read(&jl->j_commit_left) <= 0) {
1399*4882a593Smuzhiyun goto flush_older_and_return;
1400*4882a593Smuzhiyun }
1401*4882a593Smuzhiyun
1402*4882a593Smuzhiyun /*
1403*4882a593Smuzhiyun * loop through each cnode, see if we need to write it,
1404*4882a593Smuzhiyun * or wait on a more recent transaction, or just ignore it
1405*4882a593Smuzhiyun */
1406*4882a593Smuzhiyun if (atomic_read(&journal->j_wcount) != 0) {
1407*4882a593Smuzhiyun reiserfs_panic(s, "journal-844", "journal list is flushing, "
1408*4882a593Smuzhiyun "wcount is not 0");
1409*4882a593Smuzhiyun }
1410*4882a593Smuzhiyun cn = jl->j_realblock;
1411*4882a593Smuzhiyun while (cn) {
1412*4882a593Smuzhiyun was_jwait = 0;
1413*4882a593Smuzhiyun was_dirty = 0;
1414*4882a593Smuzhiyun saved_bh = NULL;
1415*4882a593Smuzhiyun /* blocknr of 0 is no longer in the hash, ignore it */
1416*4882a593Smuzhiyun if (cn->blocknr == 0) {
1417*4882a593Smuzhiyun goto free_cnode;
1418*4882a593Smuzhiyun }
1419*4882a593Smuzhiyun
1420*4882a593Smuzhiyun /*
1421*4882a593Smuzhiyun * This transaction failed commit.
1422*4882a593Smuzhiyun * Don't write out to the disk
1423*4882a593Smuzhiyun */
1424*4882a593Smuzhiyun if (!(jl->j_state & LIST_DIRTY))
1425*4882a593Smuzhiyun goto free_cnode;
1426*4882a593Smuzhiyun
1427*4882a593Smuzhiyun pjl = find_newer_jl_for_cn(cn);
1428*4882a593Smuzhiyun /*
1429*4882a593Smuzhiyun * the order is important here. We check pjl to make sure we
1430*4882a593Smuzhiyun * don't clear BH_JDirty_wait if we aren't the one writing this
1431*4882a593Smuzhiyun * block to disk
1432*4882a593Smuzhiyun */
1433*4882a593Smuzhiyun if (!pjl && cn->bh) {
1434*4882a593Smuzhiyun saved_bh = cn->bh;
1435*4882a593Smuzhiyun
1436*4882a593Smuzhiyun /*
1437*4882a593Smuzhiyun * we do this to make sure nobody releases the
1438*4882a593Smuzhiyun * buffer while we are working with it
1439*4882a593Smuzhiyun */
1440*4882a593Smuzhiyun get_bh(saved_bh);
1441*4882a593Smuzhiyun
1442*4882a593Smuzhiyun if (buffer_journal_dirty(saved_bh)) {
1443*4882a593Smuzhiyun BUG_ON(!can_dirty(cn));
1444*4882a593Smuzhiyun was_jwait = 1;
1445*4882a593Smuzhiyun was_dirty = 1;
1446*4882a593Smuzhiyun } else if (can_dirty(cn)) {
1447*4882a593Smuzhiyun /*
1448*4882a593Smuzhiyun * everything with !pjl && jwait
1449*4882a593Smuzhiyun * should be writable
1450*4882a593Smuzhiyun */
1451*4882a593Smuzhiyun BUG();
1452*4882a593Smuzhiyun }
1453*4882a593Smuzhiyun }
1454*4882a593Smuzhiyun
1455*4882a593Smuzhiyun /*
1456*4882a593Smuzhiyun * if someone has this block in a newer transaction, just make
1457*4882a593Smuzhiyun * sure they are committed, and don't try writing it to disk
1458*4882a593Smuzhiyun */
1459*4882a593Smuzhiyun if (pjl) {
1460*4882a593Smuzhiyun if (atomic_read(&pjl->j_commit_left))
1461*4882a593Smuzhiyun flush_commit_list(s, pjl, 1);
1462*4882a593Smuzhiyun goto free_cnode;
1463*4882a593Smuzhiyun }
1464*4882a593Smuzhiyun
1465*4882a593Smuzhiyun /*
1466*4882a593Smuzhiyun * bh == NULL when the block got to disk on its own, OR,
1467*4882a593Smuzhiyun * the block got freed in a future transaction
1468*4882a593Smuzhiyun */
1469*4882a593Smuzhiyun if (saved_bh == NULL) {
1470*4882a593Smuzhiyun goto free_cnode;
1471*4882a593Smuzhiyun }
1472*4882a593Smuzhiyun
1473*4882a593Smuzhiyun /*
1474*4882a593Smuzhiyun * this should never happen. kupdate_one_transaction has
1475*4882a593Smuzhiyun * this list locked while it works, so we should never see a
1476*4882a593Smuzhiyun * buffer here that is not marked JDirty_wait
1477*4882a593Smuzhiyun */
1478*4882a593Smuzhiyun if ((!was_jwait) && !buffer_locked(saved_bh)) {
1479*4882a593Smuzhiyun reiserfs_warning(s, "journal-813",
1480*4882a593Smuzhiyun "BAD! buffer %llu %cdirty %cjwait, "
1481*4882a593Smuzhiyun "not in a newer transaction",
1482*4882a593Smuzhiyun (unsigned long long)saved_bh->
1483*4882a593Smuzhiyun b_blocknr, was_dirty ? ' ' : '!',
1484*4882a593Smuzhiyun was_jwait ? ' ' : '!');
1485*4882a593Smuzhiyun }
1486*4882a593Smuzhiyun if (was_dirty) {
1487*4882a593Smuzhiyun /*
1488*4882a593Smuzhiyun * we inc again because saved_bh gets decremented
1489*4882a593Smuzhiyun * at free_cnode
1490*4882a593Smuzhiyun */
1491*4882a593Smuzhiyun get_bh(saved_bh);
1492*4882a593Smuzhiyun set_bit(BLOCK_NEEDS_FLUSH, &cn->state);
1493*4882a593Smuzhiyun lock_buffer(saved_bh);
1494*4882a593Smuzhiyun BUG_ON(cn->blocknr != saved_bh->b_blocknr);
1495*4882a593Smuzhiyun if (buffer_dirty(saved_bh))
1496*4882a593Smuzhiyun submit_logged_buffer(saved_bh);
1497*4882a593Smuzhiyun else
1498*4882a593Smuzhiyun unlock_buffer(saved_bh);
1499*4882a593Smuzhiyun count++;
1500*4882a593Smuzhiyun } else {
1501*4882a593Smuzhiyun reiserfs_warning(s, "clm-2082",
1502*4882a593Smuzhiyun "Unable to flush buffer %llu in %s",
1503*4882a593Smuzhiyun (unsigned long long)saved_bh->
1504*4882a593Smuzhiyun b_blocknr, __func__);
1505*4882a593Smuzhiyun }
1506*4882a593Smuzhiyun free_cnode:
1507*4882a593Smuzhiyun cn = cn->next;
1508*4882a593Smuzhiyun if (saved_bh) {
1509*4882a593Smuzhiyun /*
1510*4882a593Smuzhiyun * we incremented this to keep others from
1511*4882a593Smuzhiyun * taking the buffer head away
1512*4882a593Smuzhiyun */
1513*4882a593Smuzhiyun put_bh(saved_bh);
1514*4882a593Smuzhiyun if (atomic_read(&saved_bh->b_count) < 0) {
1515*4882a593Smuzhiyun reiserfs_warning(s, "journal-945",
1516*4882a593Smuzhiyun "saved_bh->b_count < 0");
1517*4882a593Smuzhiyun }
1518*4882a593Smuzhiyun }
1519*4882a593Smuzhiyun }
1520*4882a593Smuzhiyun if (count > 0) {
1521*4882a593Smuzhiyun cn = jl->j_realblock;
1522*4882a593Smuzhiyun while (cn) {
1523*4882a593Smuzhiyun if (test_bit(BLOCK_NEEDS_FLUSH, &cn->state)) {
1524*4882a593Smuzhiyun if (!cn->bh) {
1525*4882a593Smuzhiyun reiserfs_panic(s, "journal-1011",
1526*4882a593Smuzhiyun "cn->bh is NULL");
1527*4882a593Smuzhiyun }
1528*4882a593Smuzhiyun
1529*4882a593Smuzhiyun depth = reiserfs_write_unlock_nested(s);
1530*4882a593Smuzhiyun __wait_on_buffer(cn->bh);
1531*4882a593Smuzhiyun reiserfs_write_lock_nested(s, depth);
1532*4882a593Smuzhiyun
1533*4882a593Smuzhiyun if (!cn->bh) {
1534*4882a593Smuzhiyun reiserfs_panic(s, "journal-1012",
1535*4882a593Smuzhiyun "cn->bh is NULL");
1536*4882a593Smuzhiyun }
1537*4882a593Smuzhiyun if (unlikely(!buffer_uptodate(cn->bh))) {
1538*4882a593Smuzhiyun #ifdef CONFIG_REISERFS_CHECK
1539*4882a593Smuzhiyun reiserfs_warning(s, "journal-949",
1540*4882a593Smuzhiyun "buffer write failed");
1541*4882a593Smuzhiyun #endif
1542*4882a593Smuzhiyun err = -EIO;
1543*4882a593Smuzhiyun }
1544*4882a593Smuzhiyun /*
1545*4882a593Smuzhiyun * note, we must clear the JDirty_wait bit
1546*4882a593Smuzhiyun * after the up to date check, otherwise we
1547*4882a593Smuzhiyun * race against our flushpage routine
1548*4882a593Smuzhiyun */
1549*4882a593Smuzhiyun BUG_ON(!test_clear_buffer_journal_dirty
1550*4882a593Smuzhiyun (cn->bh));
1551*4882a593Smuzhiyun
1552*4882a593Smuzhiyun /* drop one ref for us */
1553*4882a593Smuzhiyun put_bh(cn->bh);
1554*4882a593Smuzhiyun /* drop one ref for journal_mark_dirty */
1555*4882a593Smuzhiyun release_buffer_page(cn->bh);
1556*4882a593Smuzhiyun }
1557*4882a593Smuzhiyun cn = cn->next;
1558*4882a593Smuzhiyun }
1559*4882a593Smuzhiyun }
1560*4882a593Smuzhiyun
1561*4882a593Smuzhiyun if (err)
1562*4882a593Smuzhiyun reiserfs_abort(s, -EIO,
1563*4882a593Smuzhiyun "Write error while pushing transaction to disk in %s",
1564*4882a593Smuzhiyun __func__);
1565*4882a593Smuzhiyun flush_older_and_return:
1566*4882a593Smuzhiyun
1567*4882a593Smuzhiyun /*
1568*4882a593Smuzhiyun * before we can update the journal header block, we _must_ flush all
1569*4882a593Smuzhiyun * real blocks from all older transactions to disk. This is because
1570*4882a593Smuzhiyun * once the header block is updated, this transaction will not be
1571*4882a593Smuzhiyun * replayed after a crash
1572*4882a593Smuzhiyun */
1573*4882a593Smuzhiyun if (flushall) {
1574*4882a593Smuzhiyun flush_older_journal_lists(s, jl);
1575*4882a593Smuzhiyun }
1576*4882a593Smuzhiyun
1577*4882a593Smuzhiyun err = journal->j_errno;
1578*4882a593Smuzhiyun /*
1579*4882a593Smuzhiyun * before we can remove everything from the hash tables for this
1580*4882a593Smuzhiyun * transaction, we must make sure it can never be replayed
1581*4882a593Smuzhiyun *
1582*4882a593Smuzhiyun * since we are only called from do_journal_end, we know for sure there
1583*4882a593Smuzhiyun * are no allocations going on while we are flushing journal lists. So,
1584*4882a593Smuzhiyun * we only need to update the journal header block for the last list
1585*4882a593Smuzhiyun * being flushed
1586*4882a593Smuzhiyun */
1587*4882a593Smuzhiyun if (!err && flushall) {
1588*4882a593Smuzhiyun err =
1589*4882a593Smuzhiyun update_journal_header_block(s,
1590*4882a593Smuzhiyun (jl->j_start + jl->j_len +
1591*4882a593Smuzhiyun 2) % SB_ONDISK_JOURNAL_SIZE(s),
1592*4882a593Smuzhiyun jl->j_trans_id);
1593*4882a593Smuzhiyun if (err)
1594*4882a593Smuzhiyun reiserfs_abort(s, -EIO,
1595*4882a593Smuzhiyun "Write error while updating journal header in %s",
1596*4882a593Smuzhiyun __func__);
1597*4882a593Smuzhiyun }
1598*4882a593Smuzhiyun remove_all_from_journal_list(s, jl, 0);
1599*4882a593Smuzhiyun list_del_init(&jl->j_list);
1600*4882a593Smuzhiyun journal->j_num_lists--;
1601*4882a593Smuzhiyun del_from_work_list(s, jl);
1602*4882a593Smuzhiyun
1603*4882a593Smuzhiyun if (journal->j_last_flush_id != 0 &&
1604*4882a593Smuzhiyun (jl->j_trans_id - journal->j_last_flush_id) != 1) {
1605*4882a593Smuzhiyun reiserfs_warning(s, "clm-2201", "last flush %lu, current %lu",
1606*4882a593Smuzhiyun journal->j_last_flush_id, jl->j_trans_id);
1607*4882a593Smuzhiyun }
1608*4882a593Smuzhiyun journal->j_last_flush_id = jl->j_trans_id;
1609*4882a593Smuzhiyun
1610*4882a593Smuzhiyun /*
1611*4882a593Smuzhiyun * not strictly required since we are freeing the list, but it should
1612*4882a593Smuzhiyun * help find code using dead lists later on
1613*4882a593Smuzhiyun */
1614*4882a593Smuzhiyun jl->j_len = 0;
1615*4882a593Smuzhiyun atomic_set(&jl->j_nonzerolen, 0);
1616*4882a593Smuzhiyun jl->j_start = 0;
1617*4882a593Smuzhiyun jl->j_realblock = NULL;
1618*4882a593Smuzhiyun jl->j_commit_bh = NULL;
1619*4882a593Smuzhiyun jl->j_trans_id = 0;
1620*4882a593Smuzhiyun jl->j_state = 0;
1621*4882a593Smuzhiyun put_journal_list(s, jl);
1622*4882a593Smuzhiyun if (flushall)
1623*4882a593Smuzhiyun mutex_unlock(&journal->j_flush_mutex);
1624*4882a593Smuzhiyun return err;
1625*4882a593Smuzhiyun }
1626*4882a593Smuzhiyun
write_one_transaction(struct super_block * s,struct reiserfs_journal_list * jl,struct buffer_chunk * chunk)1627*4882a593Smuzhiyun static int write_one_transaction(struct super_block *s,
1628*4882a593Smuzhiyun struct reiserfs_journal_list *jl,
1629*4882a593Smuzhiyun struct buffer_chunk *chunk)
1630*4882a593Smuzhiyun {
1631*4882a593Smuzhiyun struct reiserfs_journal_cnode *cn;
1632*4882a593Smuzhiyun int ret = 0;
1633*4882a593Smuzhiyun
1634*4882a593Smuzhiyun jl->j_state |= LIST_TOUCHED;
1635*4882a593Smuzhiyun del_from_work_list(s, jl);
1636*4882a593Smuzhiyun if (jl->j_len == 0 || atomic_read(&jl->j_nonzerolen) == 0) {
1637*4882a593Smuzhiyun return 0;
1638*4882a593Smuzhiyun }
1639*4882a593Smuzhiyun
1640*4882a593Smuzhiyun cn = jl->j_realblock;
1641*4882a593Smuzhiyun while (cn) {
1642*4882a593Smuzhiyun /*
1643*4882a593Smuzhiyun * if the blocknr == 0, this has been cleared from the hash,
1644*4882a593Smuzhiyun * skip it
1645*4882a593Smuzhiyun */
1646*4882a593Smuzhiyun if (cn->blocknr == 0) {
1647*4882a593Smuzhiyun goto next;
1648*4882a593Smuzhiyun }
1649*4882a593Smuzhiyun if (cn->bh && can_dirty(cn) && buffer_dirty(cn->bh)) {
1650*4882a593Smuzhiyun struct buffer_head *tmp_bh;
1651*4882a593Smuzhiyun /*
1652*4882a593Smuzhiyun * we can race against journal_mark_freed when we try
1653*4882a593Smuzhiyun * to lock_buffer(cn->bh), so we have to inc the buffer
1654*4882a593Smuzhiyun * count, and recheck things after locking
1655*4882a593Smuzhiyun */
1656*4882a593Smuzhiyun tmp_bh = cn->bh;
1657*4882a593Smuzhiyun get_bh(tmp_bh);
1658*4882a593Smuzhiyun lock_buffer(tmp_bh);
1659*4882a593Smuzhiyun if (cn->bh && can_dirty(cn) && buffer_dirty(tmp_bh)) {
1660*4882a593Smuzhiyun if (!buffer_journal_dirty(tmp_bh) ||
1661*4882a593Smuzhiyun buffer_journal_prepared(tmp_bh))
1662*4882a593Smuzhiyun BUG();
1663*4882a593Smuzhiyun add_to_chunk(chunk, tmp_bh, NULL, write_chunk);
1664*4882a593Smuzhiyun ret++;
1665*4882a593Smuzhiyun } else {
1666*4882a593Smuzhiyun /* note, cn->bh might be null now */
1667*4882a593Smuzhiyun unlock_buffer(tmp_bh);
1668*4882a593Smuzhiyun }
1669*4882a593Smuzhiyun put_bh(tmp_bh);
1670*4882a593Smuzhiyun }
1671*4882a593Smuzhiyun next:
1672*4882a593Smuzhiyun cn = cn->next;
1673*4882a593Smuzhiyun cond_resched();
1674*4882a593Smuzhiyun }
1675*4882a593Smuzhiyun return ret;
1676*4882a593Smuzhiyun }
1677*4882a593Smuzhiyun
1678*4882a593Smuzhiyun /* used by flush_commit_list */
dirty_one_transaction(struct super_block * s,struct reiserfs_journal_list * jl)1679*4882a593Smuzhiyun static void dirty_one_transaction(struct super_block *s,
1680*4882a593Smuzhiyun struct reiserfs_journal_list *jl)
1681*4882a593Smuzhiyun {
1682*4882a593Smuzhiyun struct reiserfs_journal_cnode *cn;
1683*4882a593Smuzhiyun struct reiserfs_journal_list *pjl;
1684*4882a593Smuzhiyun
1685*4882a593Smuzhiyun jl->j_state |= LIST_DIRTY;
1686*4882a593Smuzhiyun cn = jl->j_realblock;
1687*4882a593Smuzhiyun while (cn) {
1688*4882a593Smuzhiyun /*
1689*4882a593Smuzhiyun * look for a more recent transaction that logged this
1690*4882a593Smuzhiyun * buffer. Only the most recent transaction with a buffer in
1691*4882a593Smuzhiyun * it is allowed to send that buffer to disk
1692*4882a593Smuzhiyun */
1693*4882a593Smuzhiyun pjl = find_newer_jl_for_cn(cn);
1694*4882a593Smuzhiyun if (!pjl && cn->blocknr && cn->bh
1695*4882a593Smuzhiyun && buffer_journal_dirty(cn->bh)) {
1696*4882a593Smuzhiyun BUG_ON(!can_dirty(cn));
1697*4882a593Smuzhiyun /*
1698*4882a593Smuzhiyun * if the buffer is prepared, it will either be logged
1699*4882a593Smuzhiyun * or restored. If restored, we need to make sure
1700*4882a593Smuzhiyun * it actually gets marked dirty
1701*4882a593Smuzhiyun */
1702*4882a593Smuzhiyun clear_buffer_journal_new(cn->bh);
1703*4882a593Smuzhiyun if (buffer_journal_prepared(cn->bh)) {
1704*4882a593Smuzhiyun set_buffer_journal_restore_dirty(cn->bh);
1705*4882a593Smuzhiyun } else {
1706*4882a593Smuzhiyun set_buffer_journal_test(cn->bh);
1707*4882a593Smuzhiyun mark_buffer_dirty(cn->bh);
1708*4882a593Smuzhiyun }
1709*4882a593Smuzhiyun }
1710*4882a593Smuzhiyun cn = cn->next;
1711*4882a593Smuzhiyun }
1712*4882a593Smuzhiyun }
1713*4882a593Smuzhiyun
kupdate_transactions(struct super_block * s,struct reiserfs_journal_list * jl,struct reiserfs_journal_list ** next_jl,unsigned int * next_trans_id,int num_blocks,int num_trans)1714*4882a593Smuzhiyun static int kupdate_transactions(struct super_block *s,
1715*4882a593Smuzhiyun struct reiserfs_journal_list *jl,
1716*4882a593Smuzhiyun struct reiserfs_journal_list **next_jl,
1717*4882a593Smuzhiyun unsigned int *next_trans_id,
1718*4882a593Smuzhiyun int num_blocks, int num_trans)
1719*4882a593Smuzhiyun {
1720*4882a593Smuzhiyun int ret = 0;
1721*4882a593Smuzhiyun int written = 0;
1722*4882a593Smuzhiyun int transactions_flushed = 0;
1723*4882a593Smuzhiyun unsigned int orig_trans_id = jl->j_trans_id;
1724*4882a593Smuzhiyun struct buffer_chunk chunk;
1725*4882a593Smuzhiyun struct list_head *entry;
1726*4882a593Smuzhiyun struct reiserfs_journal *journal = SB_JOURNAL(s);
1727*4882a593Smuzhiyun chunk.nr = 0;
1728*4882a593Smuzhiyun
1729*4882a593Smuzhiyun reiserfs_mutex_lock_safe(&journal->j_flush_mutex, s);
1730*4882a593Smuzhiyun if (!journal_list_still_alive(s, orig_trans_id)) {
1731*4882a593Smuzhiyun goto done;
1732*4882a593Smuzhiyun }
1733*4882a593Smuzhiyun
1734*4882a593Smuzhiyun /*
1735*4882a593Smuzhiyun * we've got j_flush_mutex held, nobody is going to delete any
1736*4882a593Smuzhiyun * of these lists out from underneath us
1737*4882a593Smuzhiyun */
1738*4882a593Smuzhiyun while ((num_trans && transactions_flushed < num_trans) ||
1739*4882a593Smuzhiyun (!num_trans && written < num_blocks)) {
1740*4882a593Smuzhiyun
1741*4882a593Smuzhiyun if (jl->j_len == 0 || (jl->j_state & LIST_TOUCHED) ||
1742*4882a593Smuzhiyun atomic_read(&jl->j_commit_left)
1743*4882a593Smuzhiyun || !(jl->j_state & LIST_DIRTY)) {
1744*4882a593Smuzhiyun del_from_work_list(s, jl);
1745*4882a593Smuzhiyun break;
1746*4882a593Smuzhiyun }
1747*4882a593Smuzhiyun ret = write_one_transaction(s, jl, &chunk);
1748*4882a593Smuzhiyun
1749*4882a593Smuzhiyun if (ret < 0)
1750*4882a593Smuzhiyun goto done;
1751*4882a593Smuzhiyun transactions_flushed++;
1752*4882a593Smuzhiyun written += ret;
1753*4882a593Smuzhiyun entry = jl->j_list.next;
1754*4882a593Smuzhiyun
1755*4882a593Smuzhiyun /* did we wrap? */
1756*4882a593Smuzhiyun if (entry == &journal->j_journal_list) {
1757*4882a593Smuzhiyun break;
1758*4882a593Smuzhiyun }
1759*4882a593Smuzhiyun jl = JOURNAL_LIST_ENTRY(entry);
1760*4882a593Smuzhiyun
1761*4882a593Smuzhiyun /* don't bother with older transactions */
1762*4882a593Smuzhiyun if (jl->j_trans_id <= orig_trans_id)
1763*4882a593Smuzhiyun break;
1764*4882a593Smuzhiyun }
1765*4882a593Smuzhiyun if (chunk.nr) {
1766*4882a593Smuzhiyun write_chunk(&chunk);
1767*4882a593Smuzhiyun }
1768*4882a593Smuzhiyun
1769*4882a593Smuzhiyun done:
1770*4882a593Smuzhiyun mutex_unlock(&journal->j_flush_mutex);
1771*4882a593Smuzhiyun return ret;
1772*4882a593Smuzhiyun }
1773*4882a593Smuzhiyun
1774*4882a593Smuzhiyun /*
1775*4882a593Smuzhiyun * for o_sync and fsync heavy applications, they tend to use
1776*4882a593Smuzhiyun * all the journa list slots with tiny transactions. These
1777*4882a593Smuzhiyun * trigger lots and lots of calls to update the header block, which
1778*4882a593Smuzhiyun * adds seeks and slows things down.
1779*4882a593Smuzhiyun *
1780*4882a593Smuzhiyun * This function tries to clear out a large chunk of the journal lists
1781*4882a593Smuzhiyun * at once, which makes everything faster since only the newest journal
1782*4882a593Smuzhiyun * list updates the header block
1783*4882a593Smuzhiyun */
flush_used_journal_lists(struct super_block * s,struct reiserfs_journal_list * jl)1784*4882a593Smuzhiyun static int flush_used_journal_lists(struct super_block *s,
1785*4882a593Smuzhiyun struct reiserfs_journal_list *jl)
1786*4882a593Smuzhiyun {
1787*4882a593Smuzhiyun unsigned long len = 0;
1788*4882a593Smuzhiyun unsigned long cur_len;
1789*4882a593Smuzhiyun int i;
1790*4882a593Smuzhiyun int limit = 256;
1791*4882a593Smuzhiyun struct reiserfs_journal_list *tjl;
1792*4882a593Smuzhiyun struct reiserfs_journal_list *flush_jl;
1793*4882a593Smuzhiyun unsigned int trans_id;
1794*4882a593Smuzhiyun struct reiserfs_journal *journal = SB_JOURNAL(s);
1795*4882a593Smuzhiyun
1796*4882a593Smuzhiyun flush_jl = tjl = jl;
1797*4882a593Smuzhiyun
1798*4882a593Smuzhiyun /* in data logging mode, try harder to flush a lot of blocks */
1799*4882a593Smuzhiyun if (reiserfs_data_log(s))
1800*4882a593Smuzhiyun limit = 1024;
1801*4882a593Smuzhiyun /* flush for 256 transactions or limit blocks, whichever comes first */
1802*4882a593Smuzhiyun for (i = 0; i < 256 && len < limit; i++) {
1803*4882a593Smuzhiyun if (atomic_read(&tjl->j_commit_left) ||
1804*4882a593Smuzhiyun tjl->j_trans_id < jl->j_trans_id) {
1805*4882a593Smuzhiyun break;
1806*4882a593Smuzhiyun }
1807*4882a593Smuzhiyun cur_len = atomic_read(&tjl->j_nonzerolen);
1808*4882a593Smuzhiyun if (cur_len > 0) {
1809*4882a593Smuzhiyun tjl->j_state &= ~LIST_TOUCHED;
1810*4882a593Smuzhiyun }
1811*4882a593Smuzhiyun len += cur_len;
1812*4882a593Smuzhiyun flush_jl = tjl;
1813*4882a593Smuzhiyun if (tjl->j_list.next == &journal->j_journal_list)
1814*4882a593Smuzhiyun break;
1815*4882a593Smuzhiyun tjl = JOURNAL_LIST_ENTRY(tjl->j_list.next);
1816*4882a593Smuzhiyun }
1817*4882a593Smuzhiyun get_journal_list(jl);
1818*4882a593Smuzhiyun get_journal_list(flush_jl);
1819*4882a593Smuzhiyun
1820*4882a593Smuzhiyun /*
1821*4882a593Smuzhiyun * try to find a group of blocks we can flush across all the
1822*4882a593Smuzhiyun * transactions, but only bother if we've actually spanned
1823*4882a593Smuzhiyun * across multiple lists
1824*4882a593Smuzhiyun */
1825*4882a593Smuzhiyun if (flush_jl != jl)
1826*4882a593Smuzhiyun kupdate_transactions(s, jl, &tjl, &trans_id, len, i);
1827*4882a593Smuzhiyun
1828*4882a593Smuzhiyun flush_journal_list(s, flush_jl, 1);
1829*4882a593Smuzhiyun put_journal_list(s, flush_jl);
1830*4882a593Smuzhiyun put_journal_list(s, jl);
1831*4882a593Smuzhiyun return 0;
1832*4882a593Smuzhiyun }
1833*4882a593Smuzhiyun
1834*4882a593Smuzhiyun /*
1835*4882a593Smuzhiyun * removes any nodes in table with name block and dev as bh.
1836*4882a593Smuzhiyun * only touchs the hnext and hprev pointers.
1837*4882a593Smuzhiyun */
remove_journal_hash(struct super_block * sb,struct reiserfs_journal_cnode ** table,struct reiserfs_journal_list * jl,unsigned long block,int remove_freed)1838*4882a593Smuzhiyun static void remove_journal_hash(struct super_block *sb,
1839*4882a593Smuzhiyun struct reiserfs_journal_cnode **table,
1840*4882a593Smuzhiyun struct reiserfs_journal_list *jl,
1841*4882a593Smuzhiyun unsigned long block, int remove_freed)
1842*4882a593Smuzhiyun {
1843*4882a593Smuzhiyun struct reiserfs_journal_cnode *cur;
1844*4882a593Smuzhiyun struct reiserfs_journal_cnode **head;
1845*4882a593Smuzhiyun
1846*4882a593Smuzhiyun head = &(journal_hash(table, sb, block));
1847*4882a593Smuzhiyun if (!head) {
1848*4882a593Smuzhiyun return;
1849*4882a593Smuzhiyun }
1850*4882a593Smuzhiyun cur = *head;
1851*4882a593Smuzhiyun while (cur) {
1852*4882a593Smuzhiyun if (cur->blocknr == block && cur->sb == sb
1853*4882a593Smuzhiyun && (jl == NULL || jl == cur->jlist)
1854*4882a593Smuzhiyun && (!test_bit(BLOCK_FREED, &cur->state) || remove_freed)) {
1855*4882a593Smuzhiyun if (cur->hnext) {
1856*4882a593Smuzhiyun cur->hnext->hprev = cur->hprev;
1857*4882a593Smuzhiyun }
1858*4882a593Smuzhiyun if (cur->hprev) {
1859*4882a593Smuzhiyun cur->hprev->hnext = cur->hnext;
1860*4882a593Smuzhiyun } else {
1861*4882a593Smuzhiyun *head = cur->hnext;
1862*4882a593Smuzhiyun }
1863*4882a593Smuzhiyun cur->blocknr = 0;
1864*4882a593Smuzhiyun cur->sb = NULL;
1865*4882a593Smuzhiyun cur->state = 0;
1866*4882a593Smuzhiyun /*
1867*4882a593Smuzhiyun * anybody who clears the cur->bh will also
1868*4882a593Smuzhiyun * dec the nonzerolen
1869*4882a593Smuzhiyun */
1870*4882a593Smuzhiyun if (cur->bh && cur->jlist)
1871*4882a593Smuzhiyun atomic_dec(&cur->jlist->j_nonzerolen);
1872*4882a593Smuzhiyun cur->bh = NULL;
1873*4882a593Smuzhiyun cur->jlist = NULL;
1874*4882a593Smuzhiyun }
1875*4882a593Smuzhiyun cur = cur->hnext;
1876*4882a593Smuzhiyun }
1877*4882a593Smuzhiyun }
1878*4882a593Smuzhiyun
free_journal_ram(struct super_block * sb)1879*4882a593Smuzhiyun static void free_journal_ram(struct super_block *sb)
1880*4882a593Smuzhiyun {
1881*4882a593Smuzhiyun struct reiserfs_journal *journal = SB_JOURNAL(sb);
1882*4882a593Smuzhiyun kfree(journal->j_current_jl);
1883*4882a593Smuzhiyun journal->j_num_lists--;
1884*4882a593Smuzhiyun
1885*4882a593Smuzhiyun vfree(journal->j_cnode_free_orig);
1886*4882a593Smuzhiyun free_list_bitmaps(sb, journal->j_list_bitmap);
1887*4882a593Smuzhiyun free_bitmap_nodes(sb); /* must be after free_list_bitmaps */
1888*4882a593Smuzhiyun if (journal->j_header_bh) {
1889*4882a593Smuzhiyun brelse(journal->j_header_bh);
1890*4882a593Smuzhiyun }
1891*4882a593Smuzhiyun /*
1892*4882a593Smuzhiyun * j_header_bh is on the journal dev, make sure
1893*4882a593Smuzhiyun * not to release the journal dev until we brelse j_header_bh
1894*4882a593Smuzhiyun */
1895*4882a593Smuzhiyun release_journal_dev(sb, journal);
1896*4882a593Smuzhiyun vfree(journal);
1897*4882a593Smuzhiyun }
1898*4882a593Smuzhiyun
1899*4882a593Smuzhiyun /*
1900*4882a593Smuzhiyun * call on unmount. Only set error to 1 if you haven't made your way out
1901*4882a593Smuzhiyun * of read_super() yet. Any other caller must keep error at 0.
1902*4882a593Smuzhiyun */
do_journal_release(struct reiserfs_transaction_handle * th,struct super_block * sb,int error)1903*4882a593Smuzhiyun static int do_journal_release(struct reiserfs_transaction_handle *th,
1904*4882a593Smuzhiyun struct super_block *sb, int error)
1905*4882a593Smuzhiyun {
1906*4882a593Smuzhiyun struct reiserfs_transaction_handle myth;
1907*4882a593Smuzhiyun struct reiserfs_journal *journal = SB_JOURNAL(sb);
1908*4882a593Smuzhiyun
1909*4882a593Smuzhiyun /*
1910*4882a593Smuzhiyun * we only want to flush out transactions if we were
1911*4882a593Smuzhiyun * called with error == 0
1912*4882a593Smuzhiyun */
1913*4882a593Smuzhiyun if (!error && !sb_rdonly(sb)) {
1914*4882a593Smuzhiyun /* end the current trans */
1915*4882a593Smuzhiyun BUG_ON(!th->t_trans_id);
1916*4882a593Smuzhiyun do_journal_end(th, FLUSH_ALL);
1917*4882a593Smuzhiyun
1918*4882a593Smuzhiyun /*
1919*4882a593Smuzhiyun * make sure something gets logged to force
1920*4882a593Smuzhiyun * our way into the flush code
1921*4882a593Smuzhiyun */
1922*4882a593Smuzhiyun if (!journal_join(&myth, sb)) {
1923*4882a593Smuzhiyun reiserfs_prepare_for_journal(sb,
1924*4882a593Smuzhiyun SB_BUFFER_WITH_SB(sb),
1925*4882a593Smuzhiyun 1);
1926*4882a593Smuzhiyun journal_mark_dirty(&myth, SB_BUFFER_WITH_SB(sb));
1927*4882a593Smuzhiyun do_journal_end(&myth, FLUSH_ALL);
1928*4882a593Smuzhiyun }
1929*4882a593Smuzhiyun }
1930*4882a593Smuzhiyun
1931*4882a593Smuzhiyun /* this also catches errors during the do_journal_end above */
1932*4882a593Smuzhiyun if (!error && reiserfs_is_journal_aborted(journal)) {
1933*4882a593Smuzhiyun memset(&myth, 0, sizeof(myth));
1934*4882a593Smuzhiyun if (!journal_join_abort(&myth, sb)) {
1935*4882a593Smuzhiyun reiserfs_prepare_for_journal(sb,
1936*4882a593Smuzhiyun SB_BUFFER_WITH_SB(sb),
1937*4882a593Smuzhiyun 1);
1938*4882a593Smuzhiyun journal_mark_dirty(&myth, SB_BUFFER_WITH_SB(sb));
1939*4882a593Smuzhiyun do_journal_end(&myth, FLUSH_ALL);
1940*4882a593Smuzhiyun }
1941*4882a593Smuzhiyun }
1942*4882a593Smuzhiyun
1943*4882a593Smuzhiyun
1944*4882a593Smuzhiyun /*
1945*4882a593Smuzhiyun * We must release the write lock here because
1946*4882a593Smuzhiyun * the workqueue job (flush_async_commit) needs this lock
1947*4882a593Smuzhiyun */
1948*4882a593Smuzhiyun reiserfs_write_unlock(sb);
1949*4882a593Smuzhiyun
1950*4882a593Smuzhiyun /*
1951*4882a593Smuzhiyun * Cancel flushing of old commits. Note that neither of these works
1952*4882a593Smuzhiyun * will be requeued because superblock is being shutdown and doesn't
1953*4882a593Smuzhiyun * have SB_ACTIVE set.
1954*4882a593Smuzhiyun */
1955*4882a593Smuzhiyun reiserfs_cancel_old_flush(sb);
1956*4882a593Smuzhiyun /* wait for all commits to finish */
1957*4882a593Smuzhiyun cancel_delayed_work_sync(&SB_JOURNAL(sb)->j_work);
1958*4882a593Smuzhiyun
1959*4882a593Smuzhiyun free_journal_ram(sb);
1960*4882a593Smuzhiyun
1961*4882a593Smuzhiyun reiserfs_write_lock(sb);
1962*4882a593Smuzhiyun
1963*4882a593Smuzhiyun return 0;
1964*4882a593Smuzhiyun }
1965*4882a593Smuzhiyun
1966*4882a593Smuzhiyun /* * call on unmount. flush all journal trans, release all alloc'd ram */
journal_release(struct reiserfs_transaction_handle * th,struct super_block * sb)1967*4882a593Smuzhiyun int journal_release(struct reiserfs_transaction_handle *th,
1968*4882a593Smuzhiyun struct super_block *sb)
1969*4882a593Smuzhiyun {
1970*4882a593Smuzhiyun return do_journal_release(th, sb, 0);
1971*4882a593Smuzhiyun }
1972*4882a593Smuzhiyun
1973*4882a593Smuzhiyun /* only call from an error condition inside reiserfs_read_super! */
journal_release_error(struct reiserfs_transaction_handle * th,struct super_block * sb)1974*4882a593Smuzhiyun int journal_release_error(struct reiserfs_transaction_handle *th,
1975*4882a593Smuzhiyun struct super_block *sb)
1976*4882a593Smuzhiyun {
1977*4882a593Smuzhiyun return do_journal_release(th, sb, 1);
1978*4882a593Smuzhiyun }
1979*4882a593Smuzhiyun
1980*4882a593Smuzhiyun /*
1981*4882a593Smuzhiyun * compares description block with commit block.
1982*4882a593Smuzhiyun * returns 1 if they differ, 0 if they are the same
1983*4882a593Smuzhiyun */
journal_compare_desc_commit(struct super_block * sb,struct reiserfs_journal_desc * desc,struct reiserfs_journal_commit * commit)1984*4882a593Smuzhiyun static int journal_compare_desc_commit(struct super_block *sb,
1985*4882a593Smuzhiyun struct reiserfs_journal_desc *desc,
1986*4882a593Smuzhiyun struct reiserfs_journal_commit *commit)
1987*4882a593Smuzhiyun {
1988*4882a593Smuzhiyun if (get_commit_trans_id(commit) != get_desc_trans_id(desc) ||
1989*4882a593Smuzhiyun get_commit_trans_len(commit) != get_desc_trans_len(desc) ||
1990*4882a593Smuzhiyun get_commit_trans_len(commit) > SB_JOURNAL(sb)->j_trans_max ||
1991*4882a593Smuzhiyun get_commit_trans_len(commit) <= 0) {
1992*4882a593Smuzhiyun return 1;
1993*4882a593Smuzhiyun }
1994*4882a593Smuzhiyun return 0;
1995*4882a593Smuzhiyun }
1996*4882a593Smuzhiyun
1997*4882a593Smuzhiyun /*
1998*4882a593Smuzhiyun * returns 0 if it did not find a description block
1999*4882a593Smuzhiyun * returns -1 if it found a corrupt commit block
2000*4882a593Smuzhiyun * returns 1 if both desc and commit were valid
2001*4882a593Smuzhiyun * NOTE: only called during fs mount
2002*4882a593Smuzhiyun */
journal_transaction_is_valid(struct super_block * sb,struct buffer_head * d_bh,unsigned int * oldest_invalid_trans_id,unsigned long * newest_mount_id)2003*4882a593Smuzhiyun static int journal_transaction_is_valid(struct super_block *sb,
2004*4882a593Smuzhiyun struct buffer_head *d_bh,
2005*4882a593Smuzhiyun unsigned int *oldest_invalid_trans_id,
2006*4882a593Smuzhiyun unsigned long *newest_mount_id)
2007*4882a593Smuzhiyun {
2008*4882a593Smuzhiyun struct reiserfs_journal_desc *desc;
2009*4882a593Smuzhiyun struct reiserfs_journal_commit *commit;
2010*4882a593Smuzhiyun struct buffer_head *c_bh;
2011*4882a593Smuzhiyun unsigned long offset;
2012*4882a593Smuzhiyun
2013*4882a593Smuzhiyun if (!d_bh)
2014*4882a593Smuzhiyun return 0;
2015*4882a593Smuzhiyun
2016*4882a593Smuzhiyun desc = (struct reiserfs_journal_desc *)d_bh->b_data;
2017*4882a593Smuzhiyun if (get_desc_trans_len(desc) > 0
2018*4882a593Smuzhiyun && !memcmp(get_journal_desc_magic(d_bh), JOURNAL_DESC_MAGIC, 8)) {
2019*4882a593Smuzhiyun if (oldest_invalid_trans_id && *oldest_invalid_trans_id
2020*4882a593Smuzhiyun && get_desc_trans_id(desc) > *oldest_invalid_trans_id) {
2021*4882a593Smuzhiyun reiserfs_debug(sb, REISERFS_DEBUG_CODE,
2022*4882a593Smuzhiyun "journal-986: transaction "
2023*4882a593Smuzhiyun "is valid returning because trans_id %d is greater than "
2024*4882a593Smuzhiyun "oldest_invalid %lu",
2025*4882a593Smuzhiyun get_desc_trans_id(desc),
2026*4882a593Smuzhiyun *oldest_invalid_trans_id);
2027*4882a593Smuzhiyun return 0;
2028*4882a593Smuzhiyun }
2029*4882a593Smuzhiyun if (newest_mount_id
2030*4882a593Smuzhiyun && *newest_mount_id > get_desc_mount_id(desc)) {
2031*4882a593Smuzhiyun reiserfs_debug(sb, REISERFS_DEBUG_CODE,
2032*4882a593Smuzhiyun "journal-1087: transaction "
2033*4882a593Smuzhiyun "is valid returning because mount_id %d is less than "
2034*4882a593Smuzhiyun "newest_mount_id %lu",
2035*4882a593Smuzhiyun get_desc_mount_id(desc),
2036*4882a593Smuzhiyun *newest_mount_id);
2037*4882a593Smuzhiyun return -1;
2038*4882a593Smuzhiyun }
2039*4882a593Smuzhiyun if (get_desc_trans_len(desc) > SB_JOURNAL(sb)->j_trans_max) {
2040*4882a593Smuzhiyun reiserfs_warning(sb, "journal-2018",
2041*4882a593Smuzhiyun "Bad transaction length %d "
2042*4882a593Smuzhiyun "encountered, ignoring transaction",
2043*4882a593Smuzhiyun get_desc_trans_len(desc));
2044*4882a593Smuzhiyun return -1;
2045*4882a593Smuzhiyun }
2046*4882a593Smuzhiyun offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(sb);
2047*4882a593Smuzhiyun
2048*4882a593Smuzhiyun /*
2049*4882a593Smuzhiyun * ok, we have a journal description block,
2050*4882a593Smuzhiyun * let's see if the transaction was valid
2051*4882a593Smuzhiyun */
2052*4882a593Smuzhiyun c_bh =
2053*4882a593Smuzhiyun journal_bread(sb,
2054*4882a593Smuzhiyun SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
2055*4882a593Smuzhiyun ((offset + get_desc_trans_len(desc) +
2056*4882a593Smuzhiyun 1) % SB_ONDISK_JOURNAL_SIZE(sb)));
2057*4882a593Smuzhiyun if (!c_bh)
2058*4882a593Smuzhiyun return 0;
2059*4882a593Smuzhiyun commit = (struct reiserfs_journal_commit *)c_bh->b_data;
2060*4882a593Smuzhiyun if (journal_compare_desc_commit(sb, desc, commit)) {
2061*4882a593Smuzhiyun reiserfs_debug(sb, REISERFS_DEBUG_CODE,
2062*4882a593Smuzhiyun "journal_transaction_is_valid, commit offset %ld had bad "
2063*4882a593Smuzhiyun "time %d or length %d",
2064*4882a593Smuzhiyun c_bh->b_blocknr -
2065*4882a593Smuzhiyun SB_ONDISK_JOURNAL_1st_BLOCK(sb),
2066*4882a593Smuzhiyun get_commit_trans_id(commit),
2067*4882a593Smuzhiyun get_commit_trans_len(commit));
2068*4882a593Smuzhiyun brelse(c_bh);
2069*4882a593Smuzhiyun if (oldest_invalid_trans_id) {
2070*4882a593Smuzhiyun *oldest_invalid_trans_id =
2071*4882a593Smuzhiyun get_desc_trans_id(desc);
2072*4882a593Smuzhiyun reiserfs_debug(sb, REISERFS_DEBUG_CODE,
2073*4882a593Smuzhiyun "journal-1004: "
2074*4882a593Smuzhiyun "transaction_is_valid setting oldest invalid trans_id "
2075*4882a593Smuzhiyun "to %d",
2076*4882a593Smuzhiyun get_desc_trans_id(desc));
2077*4882a593Smuzhiyun }
2078*4882a593Smuzhiyun return -1;
2079*4882a593Smuzhiyun }
2080*4882a593Smuzhiyun brelse(c_bh);
2081*4882a593Smuzhiyun reiserfs_debug(sb, REISERFS_DEBUG_CODE,
2082*4882a593Smuzhiyun "journal-1006: found valid "
2083*4882a593Smuzhiyun "transaction start offset %llu, len %d id %d",
2084*4882a593Smuzhiyun d_bh->b_blocknr -
2085*4882a593Smuzhiyun SB_ONDISK_JOURNAL_1st_BLOCK(sb),
2086*4882a593Smuzhiyun get_desc_trans_len(desc),
2087*4882a593Smuzhiyun get_desc_trans_id(desc));
2088*4882a593Smuzhiyun return 1;
2089*4882a593Smuzhiyun } else {
2090*4882a593Smuzhiyun return 0;
2091*4882a593Smuzhiyun }
2092*4882a593Smuzhiyun }
2093*4882a593Smuzhiyun
brelse_array(struct buffer_head ** heads,int num)2094*4882a593Smuzhiyun static void brelse_array(struct buffer_head **heads, int num)
2095*4882a593Smuzhiyun {
2096*4882a593Smuzhiyun int i;
2097*4882a593Smuzhiyun for (i = 0; i < num; i++) {
2098*4882a593Smuzhiyun brelse(heads[i]);
2099*4882a593Smuzhiyun }
2100*4882a593Smuzhiyun }
2101*4882a593Smuzhiyun
2102*4882a593Smuzhiyun /*
2103*4882a593Smuzhiyun * given the start, and values for the oldest acceptable transactions,
2104*4882a593Smuzhiyun * this either reads in a replays a transaction, or returns because the
2105*4882a593Smuzhiyun * transaction is invalid, or too old.
2106*4882a593Smuzhiyun * NOTE: only called during fs mount
2107*4882a593Smuzhiyun */
journal_read_transaction(struct super_block * sb,unsigned long cur_dblock,unsigned long oldest_start,unsigned int oldest_trans_id,unsigned long newest_mount_id)2108*4882a593Smuzhiyun static int journal_read_transaction(struct super_block *sb,
2109*4882a593Smuzhiyun unsigned long cur_dblock,
2110*4882a593Smuzhiyun unsigned long oldest_start,
2111*4882a593Smuzhiyun unsigned int oldest_trans_id,
2112*4882a593Smuzhiyun unsigned long newest_mount_id)
2113*4882a593Smuzhiyun {
2114*4882a593Smuzhiyun struct reiserfs_journal *journal = SB_JOURNAL(sb);
2115*4882a593Smuzhiyun struct reiserfs_journal_desc *desc;
2116*4882a593Smuzhiyun struct reiserfs_journal_commit *commit;
2117*4882a593Smuzhiyun unsigned int trans_id = 0;
2118*4882a593Smuzhiyun struct buffer_head *c_bh;
2119*4882a593Smuzhiyun struct buffer_head *d_bh;
2120*4882a593Smuzhiyun struct buffer_head **log_blocks = NULL;
2121*4882a593Smuzhiyun struct buffer_head **real_blocks = NULL;
2122*4882a593Smuzhiyun unsigned int trans_offset;
2123*4882a593Smuzhiyun int i;
2124*4882a593Smuzhiyun int trans_half;
2125*4882a593Smuzhiyun
2126*4882a593Smuzhiyun d_bh = journal_bread(sb, cur_dblock);
2127*4882a593Smuzhiyun if (!d_bh)
2128*4882a593Smuzhiyun return 1;
2129*4882a593Smuzhiyun desc = (struct reiserfs_journal_desc *)d_bh->b_data;
2130*4882a593Smuzhiyun trans_offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(sb);
2131*4882a593Smuzhiyun reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1037: "
2132*4882a593Smuzhiyun "journal_read_transaction, offset %llu, len %d mount_id %d",
2133*4882a593Smuzhiyun d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(sb),
2134*4882a593Smuzhiyun get_desc_trans_len(desc), get_desc_mount_id(desc));
2135*4882a593Smuzhiyun if (get_desc_trans_id(desc) < oldest_trans_id) {
2136*4882a593Smuzhiyun reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1039: "
2137*4882a593Smuzhiyun "journal_read_trans skipping because %lu is too old",
2138*4882a593Smuzhiyun cur_dblock -
2139*4882a593Smuzhiyun SB_ONDISK_JOURNAL_1st_BLOCK(sb));
2140*4882a593Smuzhiyun brelse(d_bh);
2141*4882a593Smuzhiyun return 1;
2142*4882a593Smuzhiyun }
2143*4882a593Smuzhiyun if (get_desc_mount_id(desc) != newest_mount_id) {
2144*4882a593Smuzhiyun reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1146: "
2145*4882a593Smuzhiyun "journal_read_trans skipping because %d is != "
2146*4882a593Smuzhiyun "newest_mount_id %lu", get_desc_mount_id(desc),
2147*4882a593Smuzhiyun newest_mount_id);
2148*4882a593Smuzhiyun brelse(d_bh);
2149*4882a593Smuzhiyun return 1;
2150*4882a593Smuzhiyun }
2151*4882a593Smuzhiyun c_bh = journal_bread(sb, SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
2152*4882a593Smuzhiyun ((trans_offset + get_desc_trans_len(desc) + 1) %
2153*4882a593Smuzhiyun SB_ONDISK_JOURNAL_SIZE(sb)));
2154*4882a593Smuzhiyun if (!c_bh) {
2155*4882a593Smuzhiyun brelse(d_bh);
2156*4882a593Smuzhiyun return 1;
2157*4882a593Smuzhiyun }
2158*4882a593Smuzhiyun commit = (struct reiserfs_journal_commit *)c_bh->b_data;
2159*4882a593Smuzhiyun if (journal_compare_desc_commit(sb, desc, commit)) {
2160*4882a593Smuzhiyun reiserfs_debug(sb, REISERFS_DEBUG_CODE,
2161*4882a593Smuzhiyun "journal_read_transaction, "
2162*4882a593Smuzhiyun "commit offset %llu had bad time %d or length %d",
2163*4882a593Smuzhiyun c_bh->b_blocknr -
2164*4882a593Smuzhiyun SB_ONDISK_JOURNAL_1st_BLOCK(sb),
2165*4882a593Smuzhiyun get_commit_trans_id(commit),
2166*4882a593Smuzhiyun get_commit_trans_len(commit));
2167*4882a593Smuzhiyun brelse(c_bh);
2168*4882a593Smuzhiyun brelse(d_bh);
2169*4882a593Smuzhiyun return 1;
2170*4882a593Smuzhiyun }
2171*4882a593Smuzhiyun
2172*4882a593Smuzhiyun if (bdev_read_only(sb->s_bdev)) {
2173*4882a593Smuzhiyun reiserfs_warning(sb, "clm-2076",
2174*4882a593Smuzhiyun "device is readonly, unable to replay log");
2175*4882a593Smuzhiyun brelse(c_bh);
2176*4882a593Smuzhiyun brelse(d_bh);
2177*4882a593Smuzhiyun return -EROFS;
2178*4882a593Smuzhiyun }
2179*4882a593Smuzhiyun
2180*4882a593Smuzhiyun trans_id = get_desc_trans_id(desc);
2181*4882a593Smuzhiyun /*
2182*4882a593Smuzhiyun * now we know we've got a good transaction, and it was
2183*4882a593Smuzhiyun * inside the valid time ranges
2184*4882a593Smuzhiyun */
2185*4882a593Smuzhiyun log_blocks = kmalloc_array(get_desc_trans_len(desc),
2186*4882a593Smuzhiyun sizeof(struct buffer_head *),
2187*4882a593Smuzhiyun GFP_NOFS);
2188*4882a593Smuzhiyun real_blocks = kmalloc_array(get_desc_trans_len(desc),
2189*4882a593Smuzhiyun sizeof(struct buffer_head *),
2190*4882a593Smuzhiyun GFP_NOFS);
2191*4882a593Smuzhiyun if (!log_blocks || !real_blocks) {
2192*4882a593Smuzhiyun brelse(c_bh);
2193*4882a593Smuzhiyun brelse(d_bh);
2194*4882a593Smuzhiyun kfree(log_blocks);
2195*4882a593Smuzhiyun kfree(real_blocks);
2196*4882a593Smuzhiyun reiserfs_warning(sb, "journal-1169",
2197*4882a593Smuzhiyun "kmalloc failed, unable to mount FS");
2198*4882a593Smuzhiyun return -1;
2199*4882a593Smuzhiyun }
2200*4882a593Smuzhiyun /* get all the buffer heads */
2201*4882a593Smuzhiyun trans_half = journal_trans_half(sb->s_blocksize);
2202*4882a593Smuzhiyun for (i = 0; i < get_desc_trans_len(desc); i++) {
2203*4882a593Smuzhiyun log_blocks[i] =
2204*4882a593Smuzhiyun journal_getblk(sb,
2205*4882a593Smuzhiyun SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
2206*4882a593Smuzhiyun (trans_offset + 1 +
2207*4882a593Smuzhiyun i) % SB_ONDISK_JOURNAL_SIZE(sb));
2208*4882a593Smuzhiyun if (i < trans_half) {
2209*4882a593Smuzhiyun real_blocks[i] =
2210*4882a593Smuzhiyun sb_getblk(sb,
2211*4882a593Smuzhiyun le32_to_cpu(desc->j_realblock[i]));
2212*4882a593Smuzhiyun } else {
2213*4882a593Smuzhiyun real_blocks[i] =
2214*4882a593Smuzhiyun sb_getblk(sb,
2215*4882a593Smuzhiyun le32_to_cpu(commit->
2216*4882a593Smuzhiyun j_realblock[i - trans_half]));
2217*4882a593Smuzhiyun }
2218*4882a593Smuzhiyun if (real_blocks[i]->b_blocknr > SB_BLOCK_COUNT(sb)) {
2219*4882a593Smuzhiyun reiserfs_warning(sb, "journal-1207",
2220*4882a593Smuzhiyun "REPLAY FAILURE fsck required! "
2221*4882a593Smuzhiyun "Block to replay is outside of "
2222*4882a593Smuzhiyun "filesystem");
2223*4882a593Smuzhiyun goto abort_replay;
2224*4882a593Smuzhiyun }
2225*4882a593Smuzhiyun /* make sure we don't try to replay onto log or reserved area */
2226*4882a593Smuzhiyun if (is_block_in_log_or_reserved_area
2227*4882a593Smuzhiyun (sb, real_blocks[i]->b_blocknr)) {
2228*4882a593Smuzhiyun reiserfs_warning(sb, "journal-1204",
2229*4882a593Smuzhiyun "REPLAY FAILURE fsck required! "
2230*4882a593Smuzhiyun "Trying to replay onto a log block");
2231*4882a593Smuzhiyun abort_replay:
2232*4882a593Smuzhiyun brelse_array(log_blocks, i);
2233*4882a593Smuzhiyun brelse_array(real_blocks, i);
2234*4882a593Smuzhiyun brelse(c_bh);
2235*4882a593Smuzhiyun brelse(d_bh);
2236*4882a593Smuzhiyun kfree(log_blocks);
2237*4882a593Smuzhiyun kfree(real_blocks);
2238*4882a593Smuzhiyun return -1;
2239*4882a593Smuzhiyun }
2240*4882a593Smuzhiyun }
2241*4882a593Smuzhiyun /* read in the log blocks, memcpy to the corresponding real block */
2242*4882a593Smuzhiyun ll_rw_block(REQ_OP_READ, 0, get_desc_trans_len(desc), log_blocks);
2243*4882a593Smuzhiyun for (i = 0; i < get_desc_trans_len(desc); i++) {
2244*4882a593Smuzhiyun
2245*4882a593Smuzhiyun wait_on_buffer(log_blocks[i]);
2246*4882a593Smuzhiyun if (!buffer_uptodate(log_blocks[i])) {
2247*4882a593Smuzhiyun reiserfs_warning(sb, "journal-1212",
2248*4882a593Smuzhiyun "REPLAY FAILURE fsck required! "
2249*4882a593Smuzhiyun "buffer write failed");
2250*4882a593Smuzhiyun brelse_array(log_blocks + i,
2251*4882a593Smuzhiyun get_desc_trans_len(desc) - i);
2252*4882a593Smuzhiyun brelse_array(real_blocks, get_desc_trans_len(desc));
2253*4882a593Smuzhiyun brelse(c_bh);
2254*4882a593Smuzhiyun brelse(d_bh);
2255*4882a593Smuzhiyun kfree(log_blocks);
2256*4882a593Smuzhiyun kfree(real_blocks);
2257*4882a593Smuzhiyun return -1;
2258*4882a593Smuzhiyun }
2259*4882a593Smuzhiyun memcpy(real_blocks[i]->b_data, log_blocks[i]->b_data,
2260*4882a593Smuzhiyun real_blocks[i]->b_size);
2261*4882a593Smuzhiyun set_buffer_uptodate(real_blocks[i]);
2262*4882a593Smuzhiyun brelse(log_blocks[i]);
2263*4882a593Smuzhiyun }
2264*4882a593Smuzhiyun /* flush out the real blocks */
2265*4882a593Smuzhiyun for (i = 0; i < get_desc_trans_len(desc); i++) {
2266*4882a593Smuzhiyun set_buffer_dirty(real_blocks[i]);
2267*4882a593Smuzhiyun write_dirty_buffer(real_blocks[i], 0);
2268*4882a593Smuzhiyun }
2269*4882a593Smuzhiyun for (i = 0; i < get_desc_trans_len(desc); i++) {
2270*4882a593Smuzhiyun wait_on_buffer(real_blocks[i]);
2271*4882a593Smuzhiyun if (!buffer_uptodate(real_blocks[i])) {
2272*4882a593Smuzhiyun reiserfs_warning(sb, "journal-1226",
2273*4882a593Smuzhiyun "REPLAY FAILURE, fsck required! "
2274*4882a593Smuzhiyun "buffer write failed");
2275*4882a593Smuzhiyun brelse_array(real_blocks + i,
2276*4882a593Smuzhiyun get_desc_trans_len(desc) - i);
2277*4882a593Smuzhiyun brelse(c_bh);
2278*4882a593Smuzhiyun brelse(d_bh);
2279*4882a593Smuzhiyun kfree(log_blocks);
2280*4882a593Smuzhiyun kfree(real_blocks);
2281*4882a593Smuzhiyun return -1;
2282*4882a593Smuzhiyun }
2283*4882a593Smuzhiyun brelse(real_blocks[i]);
2284*4882a593Smuzhiyun }
2285*4882a593Smuzhiyun cur_dblock =
2286*4882a593Smuzhiyun SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
2287*4882a593Smuzhiyun ((trans_offset + get_desc_trans_len(desc) +
2288*4882a593Smuzhiyun 2) % SB_ONDISK_JOURNAL_SIZE(sb));
2289*4882a593Smuzhiyun reiserfs_debug(sb, REISERFS_DEBUG_CODE,
2290*4882a593Smuzhiyun "journal-1095: setting journal " "start to offset %ld",
2291*4882a593Smuzhiyun cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(sb));
2292*4882a593Smuzhiyun
2293*4882a593Smuzhiyun /*
2294*4882a593Smuzhiyun * init starting values for the first transaction, in case
2295*4882a593Smuzhiyun * this is the last transaction to be replayed.
2296*4882a593Smuzhiyun */
2297*4882a593Smuzhiyun journal->j_start = cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(sb);
2298*4882a593Smuzhiyun journal->j_last_flush_trans_id = trans_id;
2299*4882a593Smuzhiyun journal->j_trans_id = trans_id + 1;
2300*4882a593Smuzhiyun /* check for trans_id overflow */
2301*4882a593Smuzhiyun if (journal->j_trans_id == 0)
2302*4882a593Smuzhiyun journal->j_trans_id = 10;
2303*4882a593Smuzhiyun brelse(c_bh);
2304*4882a593Smuzhiyun brelse(d_bh);
2305*4882a593Smuzhiyun kfree(log_blocks);
2306*4882a593Smuzhiyun kfree(real_blocks);
2307*4882a593Smuzhiyun return 0;
2308*4882a593Smuzhiyun }
2309*4882a593Smuzhiyun
2310*4882a593Smuzhiyun /*
2311*4882a593Smuzhiyun * This function reads blocks starting from block and to max_block of bufsize
2312*4882a593Smuzhiyun * size (but no more than BUFNR blocks at a time). This proved to improve
2313*4882a593Smuzhiyun * mounting speed on self-rebuilding raid5 arrays at least.
2314*4882a593Smuzhiyun * Right now it is only used from journal code. But later we might use it
2315*4882a593Smuzhiyun * from other places.
2316*4882a593Smuzhiyun * Note: Do not use journal_getblk/sb_getblk functions here!
2317*4882a593Smuzhiyun */
reiserfs_breada(struct block_device * dev,b_blocknr_t block,int bufsize,b_blocknr_t max_block)2318*4882a593Smuzhiyun static struct buffer_head *reiserfs_breada(struct block_device *dev,
2319*4882a593Smuzhiyun b_blocknr_t block, int bufsize,
2320*4882a593Smuzhiyun b_blocknr_t max_block)
2321*4882a593Smuzhiyun {
2322*4882a593Smuzhiyun struct buffer_head *bhlist[BUFNR];
2323*4882a593Smuzhiyun unsigned int blocks = BUFNR;
2324*4882a593Smuzhiyun struct buffer_head *bh;
2325*4882a593Smuzhiyun int i, j;
2326*4882a593Smuzhiyun
2327*4882a593Smuzhiyun bh = __getblk(dev, block, bufsize);
2328*4882a593Smuzhiyun if (buffer_uptodate(bh))
2329*4882a593Smuzhiyun return (bh);
2330*4882a593Smuzhiyun
2331*4882a593Smuzhiyun if (block + BUFNR > max_block) {
2332*4882a593Smuzhiyun blocks = max_block - block;
2333*4882a593Smuzhiyun }
2334*4882a593Smuzhiyun bhlist[0] = bh;
2335*4882a593Smuzhiyun j = 1;
2336*4882a593Smuzhiyun for (i = 1; i < blocks; i++) {
2337*4882a593Smuzhiyun bh = __getblk(dev, block + i, bufsize);
2338*4882a593Smuzhiyun if (buffer_uptodate(bh)) {
2339*4882a593Smuzhiyun brelse(bh);
2340*4882a593Smuzhiyun break;
2341*4882a593Smuzhiyun } else
2342*4882a593Smuzhiyun bhlist[j++] = bh;
2343*4882a593Smuzhiyun }
2344*4882a593Smuzhiyun ll_rw_block(REQ_OP_READ, 0, j, bhlist);
2345*4882a593Smuzhiyun for (i = 1; i < j; i++)
2346*4882a593Smuzhiyun brelse(bhlist[i]);
2347*4882a593Smuzhiyun bh = bhlist[0];
2348*4882a593Smuzhiyun wait_on_buffer(bh);
2349*4882a593Smuzhiyun if (buffer_uptodate(bh))
2350*4882a593Smuzhiyun return bh;
2351*4882a593Smuzhiyun brelse(bh);
2352*4882a593Smuzhiyun return NULL;
2353*4882a593Smuzhiyun }
2354*4882a593Smuzhiyun
2355*4882a593Smuzhiyun /*
2356*4882a593Smuzhiyun * read and replay the log
2357*4882a593Smuzhiyun * on a clean unmount, the journal header's next unflushed pointer will be
2358*4882a593Smuzhiyun * to an invalid transaction. This tests that before finding all the
2359*4882a593Smuzhiyun * transactions in the log, which makes normal mount times fast.
2360*4882a593Smuzhiyun *
2361*4882a593Smuzhiyun * After a crash, this starts with the next unflushed transaction, and
2362*4882a593Smuzhiyun * replays until it finds one too old, or invalid.
2363*4882a593Smuzhiyun *
2364*4882a593Smuzhiyun * On exit, it sets things up so the first transaction will work correctly.
2365*4882a593Smuzhiyun * NOTE: only called during fs mount
2366*4882a593Smuzhiyun */
journal_read(struct super_block * sb)2367*4882a593Smuzhiyun static int journal_read(struct super_block *sb)
2368*4882a593Smuzhiyun {
2369*4882a593Smuzhiyun struct reiserfs_journal *journal = SB_JOURNAL(sb);
2370*4882a593Smuzhiyun struct reiserfs_journal_desc *desc;
2371*4882a593Smuzhiyun unsigned int oldest_trans_id = 0;
2372*4882a593Smuzhiyun unsigned int oldest_invalid_trans_id = 0;
2373*4882a593Smuzhiyun time64_t start;
2374*4882a593Smuzhiyun unsigned long oldest_start = 0;
2375*4882a593Smuzhiyun unsigned long cur_dblock = 0;
2376*4882a593Smuzhiyun unsigned long newest_mount_id = 9;
2377*4882a593Smuzhiyun struct buffer_head *d_bh;
2378*4882a593Smuzhiyun struct reiserfs_journal_header *jh;
2379*4882a593Smuzhiyun int valid_journal_header = 0;
2380*4882a593Smuzhiyun int replay_count = 0;
2381*4882a593Smuzhiyun int continue_replay = 1;
2382*4882a593Smuzhiyun int ret;
2383*4882a593Smuzhiyun
2384*4882a593Smuzhiyun cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(sb);
2385*4882a593Smuzhiyun reiserfs_info(sb, "checking transaction log (%pg)\n",
2386*4882a593Smuzhiyun journal->j_dev_bd);
2387*4882a593Smuzhiyun start = ktime_get_seconds();
2388*4882a593Smuzhiyun
2389*4882a593Smuzhiyun /*
2390*4882a593Smuzhiyun * step 1, read in the journal header block. Check the transaction
2391*4882a593Smuzhiyun * it says is the first unflushed, and if that transaction is not
2392*4882a593Smuzhiyun * valid, replay is done
2393*4882a593Smuzhiyun */
2394*4882a593Smuzhiyun journal->j_header_bh = journal_bread(sb,
2395*4882a593Smuzhiyun SB_ONDISK_JOURNAL_1st_BLOCK(sb)
2396*4882a593Smuzhiyun + SB_ONDISK_JOURNAL_SIZE(sb));
2397*4882a593Smuzhiyun if (!journal->j_header_bh) {
2398*4882a593Smuzhiyun return 1;
2399*4882a593Smuzhiyun }
2400*4882a593Smuzhiyun jh = (struct reiserfs_journal_header *)(journal->j_header_bh->b_data);
2401*4882a593Smuzhiyun if (le32_to_cpu(jh->j_first_unflushed_offset) <
2402*4882a593Smuzhiyun SB_ONDISK_JOURNAL_SIZE(sb)
2403*4882a593Smuzhiyun && le32_to_cpu(jh->j_last_flush_trans_id) > 0) {
2404*4882a593Smuzhiyun oldest_start =
2405*4882a593Smuzhiyun SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
2406*4882a593Smuzhiyun le32_to_cpu(jh->j_first_unflushed_offset);
2407*4882a593Smuzhiyun oldest_trans_id = le32_to_cpu(jh->j_last_flush_trans_id) + 1;
2408*4882a593Smuzhiyun newest_mount_id = le32_to_cpu(jh->j_mount_id);
2409*4882a593Smuzhiyun reiserfs_debug(sb, REISERFS_DEBUG_CODE,
2410*4882a593Smuzhiyun "journal-1153: found in "
2411*4882a593Smuzhiyun "header: first_unflushed_offset %d, last_flushed_trans_id "
2412*4882a593Smuzhiyun "%lu", le32_to_cpu(jh->j_first_unflushed_offset),
2413*4882a593Smuzhiyun le32_to_cpu(jh->j_last_flush_trans_id));
2414*4882a593Smuzhiyun valid_journal_header = 1;
2415*4882a593Smuzhiyun
2416*4882a593Smuzhiyun /*
2417*4882a593Smuzhiyun * now, we try to read the first unflushed offset. If it
2418*4882a593Smuzhiyun * is not valid, there is nothing more we can do, and it
2419*4882a593Smuzhiyun * makes no sense to read through the whole log.
2420*4882a593Smuzhiyun */
2421*4882a593Smuzhiyun d_bh =
2422*4882a593Smuzhiyun journal_bread(sb,
2423*4882a593Smuzhiyun SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
2424*4882a593Smuzhiyun le32_to_cpu(jh->j_first_unflushed_offset));
2425*4882a593Smuzhiyun ret = journal_transaction_is_valid(sb, d_bh, NULL, NULL);
2426*4882a593Smuzhiyun if (!ret) {
2427*4882a593Smuzhiyun continue_replay = 0;
2428*4882a593Smuzhiyun }
2429*4882a593Smuzhiyun brelse(d_bh);
2430*4882a593Smuzhiyun goto start_log_replay;
2431*4882a593Smuzhiyun }
2432*4882a593Smuzhiyun
2433*4882a593Smuzhiyun /*
2434*4882a593Smuzhiyun * ok, there are transactions that need to be replayed. start
2435*4882a593Smuzhiyun * with the first log block, find all the valid transactions, and
2436*4882a593Smuzhiyun * pick out the oldest.
2437*4882a593Smuzhiyun */
2438*4882a593Smuzhiyun while (continue_replay
2439*4882a593Smuzhiyun && cur_dblock <
2440*4882a593Smuzhiyun (SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
2441*4882a593Smuzhiyun SB_ONDISK_JOURNAL_SIZE(sb))) {
2442*4882a593Smuzhiyun /*
2443*4882a593Smuzhiyun * Note that it is required for blocksize of primary fs
2444*4882a593Smuzhiyun * device and journal device to be the same
2445*4882a593Smuzhiyun */
2446*4882a593Smuzhiyun d_bh =
2447*4882a593Smuzhiyun reiserfs_breada(journal->j_dev_bd, cur_dblock,
2448*4882a593Smuzhiyun sb->s_blocksize,
2449*4882a593Smuzhiyun SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
2450*4882a593Smuzhiyun SB_ONDISK_JOURNAL_SIZE(sb));
2451*4882a593Smuzhiyun ret =
2452*4882a593Smuzhiyun journal_transaction_is_valid(sb, d_bh,
2453*4882a593Smuzhiyun &oldest_invalid_trans_id,
2454*4882a593Smuzhiyun &newest_mount_id);
2455*4882a593Smuzhiyun if (ret == 1) {
2456*4882a593Smuzhiyun desc = (struct reiserfs_journal_desc *)d_bh->b_data;
2457*4882a593Smuzhiyun if (oldest_start == 0) { /* init all oldest_ values */
2458*4882a593Smuzhiyun oldest_trans_id = get_desc_trans_id(desc);
2459*4882a593Smuzhiyun oldest_start = d_bh->b_blocknr;
2460*4882a593Smuzhiyun newest_mount_id = get_desc_mount_id(desc);
2461*4882a593Smuzhiyun reiserfs_debug(sb, REISERFS_DEBUG_CODE,
2462*4882a593Smuzhiyun "journal-1179: Setting "
2463*4882a593Smuzhiyun "oldest_start to offset %llu, trans_id %lu",
2464*4882a593Smuzhiyun oldest_start -
2465*4882a593Smuzhiyun SB_ONDISK_JOURNAL_1st_BLOCK
2466*4882a593Smuzhiyun (sb), oldest_trans_id);
2467*4882a593Smuzhiyun } else if (oldest_trans_id > get_desc_trans_id(desc)) {
2468*4882a593Smuzhiyun /* one we just read was older */
2469*4882a593Smuzhiyun oldest_trans_id = get_desc_trans_id(desc);
2470*4882a593Smuzhiyun oldest_start = d_bh->b_blocknr;
2471*4882a593Smuzhiyun reiserfs_debug(sb, REISERFS_DEBUG_CODE,
2472*4882a593Smuzhiyun "journal-1180: Resetting "
2473*4882a593Smuzhiyun "oldest_start to offset %lu, trans_id %lu",
2474*4882a593Smuzhiyun oldest_start -
2475*4882a593Smuzhiyun SB_ONDISK_JOURNAL_1st_BLOCK
2476*4882a593Smuzhiyun (sb), oldest_trans_id);
2477*4882a593Smuzhiyun }
2478*4882a593Smuzhiyun if (newest_mount_id < get_desc_mount_id(desc)) {
2479*4882a593Smuzhiyun newest_mount_id = get_desc_mount_id(desc);
2480*4882a593Smuzhiyun reiserfs_debug(sb, REISERFS_DEBUG_CODE,
2481*4882a593Smuzhiyun "journal-1299: Setting "
2482*4882a593Smuzhiyun "newest_mount_id to %d",
2483*4882a593Smuzhiyun get_desc_mount_id(desc));
2484*4882a593Smuzhiyun }
2485*4882a593Smuzhiyun cur_dblock += get_desc_trans_len(desc) + 2;
2486*4882a593Smuzhiyun } else {
2487*4882a593Smuzhiyun cur_dblock++;
2488*4882a593Smuzhiyun }
2489*4882a593Smuzhiyun brelse(d_bh);
2490*4882a593Smuzhiyun }
2491*4882a593Smuzhiyun
2492*4882a593Smuzhiyun start_log_replay:
2493*4882a593Smuzhiyun cur_dblock = oldest_start;
2494*4882a593Smuzhiyun if (oldest_trans_id) {
2495*4882a593Smuzhiyun reiserfs_debug(sb, REISERFS_DEBUG_CODE,
2496*4882a593Smuzhiyun "journal-1206: Starting replay "
2497*4882a593Smuzhiyun "from offset %llu, trans_id %lu",
2498*4882a593Smuzhiyun cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(sb),
2499*4882a593Smuzhiyun oldest_trans_id);
2500*4882a593Smuzhiyun
2501*4882a593Smuzhiyun }
2502*4882a593Smuzhiyun replay_count = 0;
2503*4882a593Smuzhiyun while (continue_replay && oldest_trans_id > 0) {
2504*4882a593Smuzhiyun ret =
2505*4882a593Smuzhiyun journal_read_transaction(sb, cur_dblock, oldest_start,
2506*4882a593Smuzhiyun oldest_trans_id, newest_mount_id);
2507*4882a593Smuzhiyun if (ret < 0) {
2508*4882a593Smuzhiyun return ret;
2509*4882a593Smuzhiyun } else if (ret != 0) {
2510*4882a593Smuzhiyun break;
2511*4882a593Smuzhiyun }
2512*4882a593Smuzhiyun cur_dblock =
2513*4882a593Smuzhiyun SB_ONDISK_JOURNAL_1st_BLOCK(sb) + journal->j_start;
2514*4882a593Smuzhiyun replay_count++;
2515*4882a593Smuzhiyun if (cur_dblock == oldest_start)
2516*4882a593Smuzhiyun break;
2517*4882a593Smuzhiyun }
2518*4882a593Smuzhiyun
2519*4882a593Smuzhiyun if (oldest_trans_id == 0) {
2520*4882a593Smuzhiyun reiserfs_debug(sb, REISERFS_DEBUG_CODE,
2521*4882a593Smuzhiyun "journal-1225: No valid " "transactions found");
2522*4882a593Smuzhiyun }
2523*4882a593Smuzhiyun /*
2524*4882a593Smuzhiyun * j_start does not get set correctly if we don't replay any
2525*4882a593Smuzhiyun * transactions. if we had a valid journal_header, set j_start
2526*4882a593Smuzhiyun * to the first unflushed transaction value, copy the trans_id
2527*4882a593Smuzhiyun * from the header
2528*4882a593Smuzhiyun */
2529*4882a593Smuzhiyun if (valid_journal_header && replay_count == 0) {
2530*4882a593Smuzhiyun journal->j_start = le32_to_cpu(jh->j_first_unflushed_offset);
2531*4882a593Smuzhiyun journal->j_trans_id =
2532*4882a593Smuzhiyun le32_to_cpu(jh->j_last_flush_trans_id) + 1;
2533*4882a593Smuzhiyun /* check for trans_id overflow */
2534*4882a593Smuzhiyun if (journal->j_trans_id == 0)
2535*4882a593Smuzhiyun journal->j_trans_id = 10;
2536*4882a593Smuzhiyun journal->j_last_flush_trans_id =
2537*4882a593Smuzhiyun le32_to_cpu(jh->j_last_flush_trans_id);
2538*4882a593Smuzhiyun journal->j_mount_id = le32_to_cpu(jh->j_mount_id) + 1;
2539*4882a593Smuzhiyun } else {
2540*4882a593Smuzhiyun journal->j_mount_id = newest_mount_id + 1;
2541*4882a593Smuzhiyun }
2542*4882a593Smuzhiyun reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1299: Setting "
2543*4882a593Smuzhiyun "newest_mount_id to %lu", journal->j_mount_id);
2544*4882a593Smuzhiyun journal->j_first_unflushed_offset = journal->j_start;
2545*4882a593Smuzhiyun if (replay_count > 0) {
2546*4882a593Smuzhiyun reiserfs_info(sb,
2547*4882a593Smuzhiyun "replayed %d transactions in %lu seconds\n",
2548*4882a593Smuzhiyun replay_count, ktime_get_seconds() - start);
2549*4882a593Smuzhiyun }
2550*4882a593Smuzhiyun /* needed to satisfy the locking in _update_journal_header_block */
2551*4882a593Smuzhiyun reiserfs_write_lock(sb);
2552*4882a593Smuzhiyun if (!bdev_read_only(sb->s_bdev) &&
2553*4882a593Smuzhiyun _update_journal_header_block(sb, journal->j_start,
2554*4882a593Smuzhiyun journal->j_last_flush_trans_id)) {
2555*4882a593Smuzhiyun reiserfs_write_unlock(sb);
2556*4882a593Smuzhiyun /*
2557*4882a593Smuzhiyun * replay failed, caller must call free_journal_ram and abort
2558*4882a593Smuzhiyun * the mount
2559*4882a593Smuzhiyun */
2560*4882a593Smuzhiyun return -1;
2561*4882a593Smuzhiyun }
2562*4882a593Smuzhiyun reiserfs_write_unlock(sb);
2563*4882a593Smuzhiyun return 0;
2564*4882a593Smuzhiyun }
2565*4882a593Smuzhiyun
alloc_journal_list(struct super_block * s)2566*4882a593Smuzhiyun static struct reiserfs_journal_list *alloc_journal_list(struct super_block *s)
2567*4882a593Smuzhiyun {
2568*4882a593Smuzhiyun struct reiserfs_journal_list *jl;
2569*4882a593Smuzhiyun jl = kzalloc(sizeof(struct reiserfs_journal_list),
2570*4882a593Smuzhiyun GFP_NOFS | __GFP_NOFAIL);
2571*4882a593Smuzhiyun INIT_LIST_HEAD(&jl->j_list);
2572*4882a593Smuzhiyun INIT_LIST_HEAD(&jl->j_working_list);
2573*4882a593Smuzhiyun INIT_LIST_HEAD(&jl->j_tail_bh_list);
2574*4882a593Smuzhiyun INIT_LIST_HEAD(&jl->j_bh_list);
2575*4882a593Smuzhiyun mutex_init(&jl->j_commit_mutex);
2576*4882a593Smuzhiyun SB_JOURNAL(s)->j_num_lists++;
2577*4882a593Smuzhiyun get_journal_list(jl);
2578*4882a593Smuzhiyun return jl;
2579*4882a593Smuzhiyun }
2580*4882a593Smuzhiyun
journal_list_init(struct super_block * sb)2581*4882a593Smuzhiyun static void journal_list_init(struct super_block *sb)
2582*4882a593Smuzhiyun {
2583*4882a593Smuzhiyun SB_JOURNAL(sb)->j_current_jl = alloc_journal_list(sb);
2584*4882a593Smuzhiyun }
2585*4882a593Smuzhiyun
release_journal_dev(struct super_block * super,struct reiserfs_journal * journal)2586*4882a593Smuzhiyun static void release_journal_dev(struct super_block *super,
2587*4882a593Smuzhiyun struct reiserfs_journal *journal)
2588*4882a593Smuzhiyun {
2589*4882a593Smuzhiyun if (journal->j_dev_bd != NULL) {
2590*4882a593Smuzhiyun blkdev_put(journal->j_dev_bd, journal->j_dev_mode);
2591*4882a593Smuzhiyun journal->j_dev_bd = NULL;
2592*4882a593Smuzhiyun }
2593*4882a593Smuzhiyun }
2594*4882a593Smuzhiyun
journal_init_dev(struct super_block * super,struct reiserfs_journal * journal,const char * jdev_name)2595*4882a593Smuzhiyun static int journal_init_dev(struct super_block *super,
2596*4882a593Smuzhiyun struct reiserfs_journal *journal,
2597*4882a593Smuzhiyun const char *jdev_name)
2598*4882a593Smuzhiyun {
2599*4882a593Smuzhiyun int result;
2600*4882a593Smuzhiyun dev_t jdev;
2601*4882a593Smuzhiyun fmode_t blkdev_mode = FMODE_READ | FMODE_WRITE | FMODE_EXCL;
2602*4882a593Smuzhiyun
2603*4882a593Smuzhiyun result = 0;
2604*4882a593Smuzhiyun
2605*4882a593Smuzhiyun journal->j_dev_bd = NULL;
2606*4882a593Smuzhiyun jdev = SB_ONDISK_JOURNAL_DEVICE(super) ?
2607*4882a593Smuzhiyun new_decode_dev(SB_ONDISK_JOURNAL_DEVICE(super)) : super->s_dev;
2608*4882a593Smuzhiyun
2609*4882a593Smuzhiyun if (bdev_read_only(super->s_bdev))
2610*4882a593Smuzhiyun blkdev_mode = FMODE_READ;
2611*4882a593Smuzhiyun
2612*4882a593Smuzhiyun /* there is no "jdev" option and journal is on separate device */
2613*4882a593Smuzhiyun if ((!jdev_name || !jdev_name[0])) {
2614*4882a593Smuzhiyun if (jdev == super->s_dev)
2615*4882a593Smuzhiyun blkdev_mode &= ~FMODE_EXCL;
2616*4882a593Smuzhiyun journal->j_dev_bd = blkdev_get_by_dev(jdev, blkdev_mode,
2617*4882a593Smuzhiyun journal);
2618*4882a593Smuzhiyun journal->j_dev_mode = blkdev_mode;
2619*4882a593Smuzhiyun if (IS_ERR(journal->j_dev_bd)) {
2620*4882a593Smuzhiyun result = PTR_ERR(journal->j_dev_bd);
2621*4882a593Smuzhiyun journal->j_dev_bd = NULL;
2622*4882a593Smuzhiyun reiserfs_warning(super, "sh-458",
2623*4882a593Smuzhiyun "cannot init journal device unknown-block(%u,%u): %i",
2624*4882a593Smuzhiyun MAJOR(jdev), MINOR(jdev), result);
2625*4882a593Smuzhiyun return result;
2626*4882a593Smuzhiyun } else if (jdev != super->s_dev)
2627*4882a593Smuzhiyun set_blocksize(journal->j_dev_bd, super->s_blocksize);
2628*4882a593Smuzhiyun
2629*4882a593Smuzhiyun return 0;
2630*4882a593Smuzhiyun }
2631*4882a593Smuzhiyun
2632*4882a593Smuzhiyun journal->j_dev_mode = blkdev_mode;
2633*4882a593Smuzhiyun journal->j_dev_bd = blkdev_get_by_path(jdev_name, blkdev_mode, journal);
2634*4882a593Smuzhiyun if (IS_ERR(journal->j_dev_bd)) {
2635*4882a593Smuzhiyun result = PTR_ERR(journal->j_dev_bd);
2636*4882a593Smuzhiyun journal->j_dev_bd = NULL;
2637*4882a593Smuzhiyun reiserfs_warning(super, "sh-457",
2638*4882a593Smuzhiyun "journal_init_dev: Cannot open '%s': %i",
2639*4882a593Smuzhiyun jdev_name, result);
2640*4882a593Smuzhiyun return result;
2641*4882a593Smuzhiyun }
2642*4882a593Smuzhiyun
2643*4882a593Smuzhiyun set_blocksize(journal->j_dev_bd, super->s_blocksize);
2644*4882a593Smuzhiyun reiserfs_info(super,
2645*4882a593Smuzhiyun "journal_init_dev: journal device: %pg\n",
2646*4882a593Smuzhiyun journal->j_dev_bd);
2647*4882a593Smuzhiyun return 0;
2648*4882a593Smuzhiyun }
2649*4882a593Smuzhiyun
2650*4882a593Smuzhiyun /*
2651*4882a593Smuzhiyun * When creating/tuning a file system user can assign some
2652*4882a593Smuzhiyun * journal params within boundaries which depend on the ratio
2653*4882a593Smuzhiyun * blocksize/standard_blocksize.
2654*4882a593Smuzhiyun *
2655*4882a593Smuzhiyun * For blocks >= standard_blocksize transaction size should
2656*4882a593Smuzhiyun * be not less then JOURNAL_TRANS_MIN_DEFAULT, and not more
2657*4882a593Smuzhiyun * then JOURNAL_TRANS_MAX_DEFAULT.
2658*4882a593Smuzhiyun *
2659*4882a593Smuzhiyun * For blocks < standard_blocksize these boundaries should be
2660*4882a593Smuzhiyun * decreased proportionally.
2661*4882a593Smuzhiyun */
2662*4882a593Smuzhiyun #define REISERFS_STANDARD_BLKSIZE (4096)
2663*4882a593Smuzhiyun
check_advise_trans_params(struct super_block * sb,struct reiserfs_journal * journal)2664*4882a593Smuzhiyun static int check_advise_trans_params(struct super_block *sb,
2665*4882a593Smuzhiyun struct reiserfs_journal *journal)
2666*4882a593Smuzhiyun {
2667*4882a593Smuzhiyun if (journal->j_trans_max) {
2668*4882a593Smuzhiyun /* Non-default journal params. Do sanity check for them. */
2669*4882a593Smuzhiyun int ratio = 1;
2670*4882a593Smuzhiyun if (sb->s_blocksize < REISERFS_STANDARD_BLKSIZE)
2671*4882a593Smuzhiyun ratio = REISERFS_STANDARD_BLKSIZE / sb->s_blocksize;
2672*4882a593Smuzhiyun
2673*4882a593Smuzhiyun if (journal->j_trans_max > JOURNAL_TRANS_MAX_DEFAULT / ratio ||
2674*4882a593Smuzhiyun journal->j_trans_max < JOURNAL_TRANS_MIN_DEFAULT / ratio ||
2675*4882a593Smuzhiyun SB_ONDISK_JOURNAL_SIZE(sb) / journal->j_trans_max <
2676*4882a593Smuzhiyun JOURNAL_MIN_RATIO) {
2677*4882a593Smuzhiyun reiserfs_warning(sb, "sh-462",
2678*4882a593Smuzhiyun "bad transaction max size (%u). "
2679*4882a593Smuzhiyun "FSCK?", journal->j_trans_max);
2680*4882a593Smuzhiyun return 1;
2681*4882a593Smuzhiyun }
2682*4882a593Smuzhiyun if (journal->j_max_batch != (journal->j_trans_max) *
2683*4882a593Smuzhiyun JOURNAL_MAX_BATCH_DEFAULT/JOURNAL_TRANS_MAX_DEFAULT) {
2684*4882a593Smuzhiyun reiserfs_warning(sb, "sh-463",
2685*4882a593Smuzhiyun "bad transaction max batch (%u). "
2686*4882a593Smuzhiyun "FSCK?", journal->j_max_batch);
2687*4882a593Smuzhiyun return 1;
2688*4882a593Smuzhiyun }
2689*4882a593Smuzhiyun } else {
2690*4882a593Smuzhiyun /*
2691*4882a593Smuzhiyun * Default journal params.
2692*4882a593Smuzhiyun * The file system was created by old version
2693*4882a593Smuzhiyun * of mkreiserfs, so some fields contain zeros,
2694*4882a593Smuzhiyun * and we need to advise proper values for them
2695*4882a593Smuzhiyun */
2696*4882a593Smuzhiyun if (sb->s_blocksize != REISERFS_STANDARD_BLKSIZE) {
2697*4882a593Smuzhiyun reiserfs_warning(sb, "sh-464", "bad blocksize (%u)",
2698*4882a593Smuzhiyun sb->s_blocksize);
2699*4882a593Smuzhiyun return 1;
2700*4882a593Smuzhiyun }
2701*4882a593Smuzhiyun journal->j_trans_max = JOURNAL_TRANS_MAX_DEFAULT;
2702*4882a593Smuzhiyun journal->j_max_batch = JOURNAL_MAX_BATCH_DEFAULT;
2703*4882a593Smuzhiyun journal->j_max_commit_age = JOURNAL_MAX_COMMIT_AGE;
2704*4882a593Smuzhiyun }
2705*4882a593Smuzhiyun return 0;
2706*4882a593Smuzhiyun }
2707*4882a593Smuzhiyun
2708*4882a593Smuzhiyun /* must be called once on fs mount. calls journal_read for you */
journal_init(struct super_block * sb,const char * j_dev_name,int old_format,unsigned int commit_max_age)2709*4882a593Smuzhiyun int journal_init(struct super_block *sb, const char *j_dev_name,
2710*4882a593Smuzhiyun int old_format, unsigned int commit_max_age)
2711*4882a593Smuzhiyun {
2712*4882a593Smuzhiyun int num_cnodes = SB_ONDISK_JOURNAL_SIZE(sb) * 2;
2713*4882a593Smuzhiyun struct buffer_head *bhjh;
2714*4882a593Smuzhiyun struct reiserfs_super_block *rs;
2715*4882a593Smuzhiyun struct reiserfs_journal_header *jh;
2716*4882a593Smuzhiyun struct reiserfs_journal *journal;
2717*4882a593Smuzhiyun struct reiserfs_journal_list *jl;
2718*4882a593Smuzhiyun int ret;
2719*4882a593Smuzhiyun
2720*4882a593Smuzhiyun journal = SB_JOURNAL(sb) = vzalloc(sizeof(struct reiserfs_journal));
2721*4882a593Smuzhiyun if (!journal) {
2722*4882a593Smuzhiyun reiserfs_warning(sb, "journal-1256",
2723*4882a593Smuzhiyun "unable to get memory for journal structure");
2724*4882a593Smuzhiyun return 1;
2725*4882a593Smuzhiyun }
2726*4882a593Smuzhiyun INIT_LIST_HEAD(&journal->j_bitmap_nodes);
2727*4882a593Smuzhiyun INIT_LIST_HEAD(&journal->j_prealloc_list);
2728*4882a593Smuzhiyun INIT_LIST_HEAD(&journal->j_working_list);
2729*4882a593Smuzhiyun INIT_LIST_HEAD(&journal->j_journal_list);
2730*4882a593Smuzhiyun journal->j_persistent_trans = 0;
2731*4882a593Smuzhiyun if (reiserfs_allocate_list_bitmaps(sb, journal->j_list_bitmap,
2732*4882a593Smuzhiyun reiserfs_bmap_count(sb)))
2733*4882a593Smuzhiyun goto free_and_return;
2734*4882a593Smuzhiyun
2735*4882a593Smuzhiyun allocate_bitmap_nodes(sb);
2736*4882a593Smuzhiyun
2737*4882a593Smuzhiyun /* reserved for journal area support */
2738*4882a593Smuzhiyun SB_JOURNAL_1st_RESERVED_BLOCK(sb) = (old_format ?
2739*4882a593Smuzhiyun REISERFS_OLD_DISK_OFFSET_IN_BYTES
2740*4882a593Smuzhiyun / sb->s_blocksize +
2741*4882a593Smuzhiyun reiserfs_bmap_count(sb) +
2742*4882a593Smuzhiyun 1 :
2743*4882a593Smuzhiyun REISERFS_DISK_OFFSET_IN_BYTES /
2744*4882a593Smuzhiyun sb->s_blocksize + 2);
2745*4882a593Smuzhiyun
2746*4882a593Smuzhiyun /*
2747*4882a593Smuzhiyun * Sanity check to see is the standard journal fitting
2748*4882a593Smuzhiyun * within first bitmap (actual for small blocksizes)
2749*4882a593Smuzhiyun */
2750*4882a593Smuzhiyun if (!SB_ONDISK_JOURNAL_DEVICE(sb) &&
2751*4882a593Smuzhiyun (SB_JOURNAL_1st_RESERVED_BLOCK(sb) +
2752*4882a593Smuzhiyun SB_ONDISK_JOURNAL_SIZE(sb) > sb->s_blocksize * 8)) {
2753*4882a593Smuzhiyun reiserfs_warning(sb, "journal-1393",
2754*4882a593Smuzhiyun "journal does not fit for area addressed "
2755*4882a593Smuzhiyun "by first of bitmap blocks. It starts at "
2756*4882a593Smuzhiyun "%u and its size is %u. Block size %ld",
2757*4882a593Smuzhiyun SB_JOURNAL_1st_RESERVED_BLOCK(sb),
2758*4882a593Smuzhiyun SB_ONDISK_JOURNAL_SIZE(sb),
2759*4882a593Smuzhiyun sb->s_blocksize);
2760*4882a593Smuzhiyun goto free_and_return;
2761*4882a593Smuzhiyun }
2762*4882a593Smuzhiyun
2763*4882a593Smuzhiyun /*
2764*4882a593Smuzhiyun * Sanity check to see if journal first block is correct.
2765*4882a593Smuzhiyun * If journal first block is invalid it can cause
2766*4882a593Smuzhiyun * zeroing important superblock members.
2767*4882a593Smuzhiyun */
2768*4882a593Smuzhiyun if (!SB_ONDISK_JOURNAL_DEVICE(sb) &&
2769*4882a593Smuzhiyun SB_ONDISK_JOURNAL_1st_BLOCK(sb) < SB_JOURNAL_1st_RESERVED_BLOCK(sb)) {
2770*4882a593Smuzhiyun reiserfs_warning(sb, "journal-1393",
2771*4882a593Smuzhiyun "journal 1st super block is invalid: 1st reserved block %d, but actual 1st block is %d",
2772*4882a593Smuzhiyun SB_JOURNAL_1st_RESERVED_BLOCK(sb),
2773*4882a593Smuzhiyun SB_ONDISK_JOURNAL_1st_BLOCK(sb));
2774*4882a593Smuzhiyun goto free_and_return;
2775*4882a593Smuzhiyun }
2776*4882a593Smuzhiyun
2777*4882a593Smuzhiyun if (journal_init_dev(sb, journal, j_dev_name) != 0) {
2778*4882a593Smuzhiyun reiserfs_warning(sb, "sh-462",
2779*4882a593Smuzhiyun "unable to initialize journal device");
2780*4882a593Smuzhiyun goto free_and_return;
2781*4882a593Smuzhiyun }
2782*4882a593Smuzhiyun
2783*4882a593Smuzhiyun rs = SB_DISK_SUPER_BLOCK(sb);
2784*4882a593Smuzhiyun
2785*4882a593Smuzhiyun /* read journal header */
2786*4882a593Smuzhiyun bhjh = journal_bread(sb,
2787*4882a593Smuzhiyun SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
2788*4882a593Smuzhiyun SB_ONDISK_JOURNAL_SIZE(sb));
2789*4882a593Smuzhiyun if (!bhjh) {
2790*4882a593Smuzhiyun reiserfs_warning(sb, "sh-459",
2791*4882a593Smuzhiyun "unable to read journal header");
2792*4882a593Smuzhiyun goto free_and_return;
2793*4882a593Smuzhiyun }
2794*4882a593Smuzhiyun jh = (struct reiserfs_journal_header *)(bhjh->b_data);
2795*4882a593Smuzhiyun
2796*4882a593Smuzhiyun /* make sure that journal matches to the super block */
2797*4882a593Smuzhiyun if (is_reiserfs_jr(rs)
2798*4882a593Smuzhiyun && (le32_to_cpu(jh->jh_journal.jp_journal_magic) !=
2799*4882a593Smuzhiyun sb_jp_journal_magic(rs))) {
2800*4882a593Smuzhiyun reiserfs_warning(sb, "sh-460",
2801*4882a593Smuzhiyun "journal header magic %x (device %pg) does "
2802*4882a593Smuzhiyun "not match to magic found in super block %x",
2803*4882a593Smuzhiyun jh->jh_journal.jp_journal_magic,
2804*4882a593Smuzhiyun journal->j_dev_bd,
2805*4882a593Smuzhiyun sb_jp_journal_magic(rs));
2806*4882a593Smuzhiyun brelse(bhjh);
2807*4882a593Smuzhiyun goto free_and_return;
2808*4882a593Smuzhiyun }
2809*4882a593Smuzhiyun
2810*4882a593Smuzhiyun journal->j_trans_max = le32_to_cpu(jh->jh_journal.jp_journal_trans_max);
2811*4882a593Smuzhiyun journal->j_max_batch = le32_to_cpu(jh->jh_journal.jp_journal_max_batch);
2812*4882a593Smuzhiyun journal->j_max_commit_age =
2813*4882a593Smuzhiyun le32_to_cpu(jh->jh_journal.jp_journal_max_commit_age);
2814*4882a593Smuzhiyun journal->j_max_trans_age = JOURNAL_MAX_TRANS_AGE;
2815*4882a593Smuzhiyun
2816*4882a593Smuzhiyun if (check_advise_trans_params(sb, journal) != 0)
2817*4882a593Smuzhiyun goto free_and_return;
2818*4882a593Smuzhiyun journal->j_default_max_commit_age = journal->j_max_commit_age;
2819*4882a593Smuzhiyun
2820*4882a593Smuzhiyun if (commit_max_age != 0) {
2821*4882a593Smuzhiyun journal->j_max_commit_age = commit_max_age;
2822*4882a593Smuzhiyun journal->j_max_trans_age = commit_max_age;
2823*4882a593Smuzhiyun }
2824*4882a593Smuzhiyun
2825*4882a593Smuzhiyun reiserfs_info(sb, "journal params: device %pg, size %u, "
2826*4882a593Smuzhiyun "journal first block %u, max trans len %u, max batch %u, "
2827*4882a593Smuzhiyun "max commit age %u, max trans age %u\n",
2828*4882a593Smuzhiyun journal->j_dev_bd,
2829*4882a593Smuzhiyun SB_ONDISK_JOURNAL_SIZE(sb),
2830*4882a593Smuzhiyun SB_ONDISK_JOURNAL_1st_BLOCK(sb),
2831*4882a593Smuzhiyun journal->j_trans_max,
2832*4882a593Smuzhiyun journal->j_max_batch,
2833*4882a593Smuzhiyun journal->j_max_commit_age, journal->j_max_trans_age);
2834*4882a593Smuzhiyun
2835*4882a593Smuzhiyun brelse(bhjh);
2836*4882a593Smuzhiyun
2837*4882a593Smuzhiyun journal->j_list_bitmap_index = 0;
2838*4882a593Smuzhiyun journal_list_init(sb);
2839*4882a593Smuzhiyun
2840*4882a593Smuzhiyun memset(journal->j_list_hash_table, 0,
2841*4882a593Smuzhiyun JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *));
2842*4882a593Smuzhiyun
2843*4882a593Smuzhiyun INIT_LIST_HEAD(&journal->j_dirty_buffers);
2844*4882a593Smuzhiyun spin_lock_init(&journal->j_dirty_buffers_lock);
2845*4882a593Smuzhiyun
2846*4882a593Smuzhiyun journal->j_start = 0;
2847*4882a593Smuzhiyun journal->j_len = 0;
2848*4882a593Smuzhiyun journal->j_len_alloc = 0;
2849*4882a593Smuzhiyun atomic_set(&journal->j_wcount, 0);
2850*4882a593Smuzhiyun atomic_set(&journal->j_async_throttle, 0);
2851*4882a593Smuzhiyun journal->j_bcount = 0;
2852*4882a593Smuzhiyun journal->j_trans_start_time = 0;
2853*4882a593Smuzhiyun journal->j_last = NULL;
2854*4882a593Smuzhiyun journal->j_first = NULL;
2855*4882a593Smuzhiyun init_waitqueue_head(&journal->j_join_wait);
2856*4882a593Smuzhiyun mutex_init(&journal->j_mutex);
2857*4882a593Smuzhiyun mutex_init(&journal->j_flush_mutex);
2858*4882a593Smuzhiyun
2859*4882a593Smuzhiyun journal->j_trans_id = 10;
2860*4882a593Smuzhiyun journal->j_mount_id = 10;
2861*4882a593Smuzhiyun journal->j_state = 0;
2862*4882a593Smuzhiyun atomic_set(&journal->j_jlock, 0);
2863*4882a593Smuzhiyun journal->j_cnode_free_list = allocate_cnodes(num_cnodes);
2864*4882a593Smuzhiyun journal->j_cnode_free_orig = journal->j_cnode_free_list;
2865*4882a593Smuzhiyun journal->j_cnode_free = journal->j_cnode_free_list ? num_cnodes : 0;
2866*4882a593Smuzhiyun journal->j_cnode_used = 0;
2867*4882a593Smuzhiyun journal->j_must_wait = 0;
2868*4882a593Smuzhiyun
2869*4882a593Smuzhiyun if (journal->j_cnode_free == 0) {
2870*4882a593Smuzhiyun reiserfs_warning(sb, "journal-2004", "Journal cnode memory "
2871*4882a593Smuzhiyun "allocation failed (%ld bytes). Journal is "
2872*4882a593Smuzhiyun "too large for available memory. Usually "
2873*4882a593Smuzhiyun "this is due to a journal that is too large.",
2874*4882a593Smuzhiyun sizeof (struct reiserfs_journal_cnode) * num_cnodes);
2875*4882a593Smuzhiyun goto free_and_return;
2876*4882a593Smuzhiyun }
2877*4882a593Smuzhiyun
2878*4882a593Smuzhiyun init_journal_hash(sb);
2879*4882a593Smuzhiyun jl = journal->j_current_jl;
2880*4882a593Smuzhiyun
2881*4882a593Smuzhiyun /*
2882*4882a593Smuzhiyun * get_list_bitmap() may call flush_commit_list() which
2883*4882a593Smuzhiyun * requires the lock. Calling flush_commit_list() shouldn't happen
2884*4882a593Smuzhiyun * this early but I like to be paranoid.
2885*4882a593Smuzhiyun */
2886*4882a593Smuzhiyun reiserfs_write_lock(sb);
2887*4882a593Smuzhiyun jl->j_list_bitmap = get_list_bitmap(sb, jl);
2888*4882a593Smuzhiyun reiserfs_write_unlock(sb);
2889*4882a593Smuzhiyun if (!jl->j_list_bitmap) {
2890*4882a593Smuzhiyun reiserfs_warning(sb, "journal-2005",
2891*4882a593Smuzhiyun "get_list_bitmap failed for journal list 0");
2892*4882a593Smuzhiyun goto free_and_return;
2893*4882a593Smuzhiyun }
2894*4882a593Smuzhiyun
2895*4882a593Smuzhiyun ret = journal_read(sb);
2896*4882a593Smuzhiyun if (ret < 0) {
2897*4882a593Smuzhiyun reiserfs_warning(sb, "reiserfs-2006",
2898*4882a593Smuzhiyun "Replay Failure, unable to mount");
2899*4882a593Smuzhiyun goto free_and_return;
2900*4882a593Smuzhiyun }
2901*4882a593Smuzhiyun
2902*4882a593Smuzhiyun INIT_DELAYED_WORK(&journal->j_work, flush_async_commits);
2903*4882a593Smuzhiyun journal->j_work_sb = sb;
2904*4882a593Smuzhiyun return 0;
2905*4882a593Smuzhiyun free_and_return:
2906*4882a593Smuzhiyun free_journal_ram(sb);
2907*4882a593Smuzhiyun return 1;
2908*4882a593Smuzhiyun }
2909*4882a593Smuzhiyun
2910*4882a593Smuzhiyun /*
2911*4882a593Smuzhiyun * test for a polite end of the current transaction. Used by file_write,
2912*4882a593Smuzhiyun * and should be used by delete to make sure they don't write more than
2913*4882a593Smuzhiyun * can fit inside a single transaction
2914*4882a593Smuzhiyun */
journal_transaction_should_end(struct reiserfs_transaction_handle * th,int new_alloc)2915*4882a593Smuzhiyun int journal_transaction_should_end(struct reiserfs_transaction_handle *th,
2916*4882a593Smuzhiyun int new_alloc)
2917*4882a593Smuzhiyun {
2918*4882a593Smuzhiyun struct reiserfs_journal *journal = SB_JOURNAL(th->t_super);
2919*4882a593Smuzhiyun time64_t now = ktime_get_seconds();
2920*4882a593Smuzhiyun /* cannot restart while nested */
2921*4882a593Smuzhiyun BUG_ON(!th->t_trans_id);
2922*4882a593Smuzhiyun if (th->t_refcount > 1)
2923*4882a593Smuzhiyun return 0;
2924*4882a593Smuzhiyun if (journal->j_must_wait > 0 ||
2925*4882a593Smuzhiyun (journal->j_len_alloc + new_alloc) >= journal->j_max_batch ||
2926*4882a593Smuzhiyun atomic_read(&journal->j_jlock) ||
2927*4882a593Smuzhiyun (now - journal->j_trans_start_time) > journal->j_max_trans_age ||
2928*4882a593Smuzhiyun journal->j_cnode_free < (journal->j_trans_max * 3)) {
2929*4882a593Smuzhiyun return 1;
2930*4882a593Smuzhiyun }
2931*4882a593Smuzhiyun
2932*4882a593Smuzhiyun journal->j_len_alloc += new_alloc;
2933*4882a593Smuzhiyun th->t_blocks_allocated += new_alloc ;
2934*4882a593Smuzhiyun return 0;
2935*4882a593Smuzhiyun }
2936*4882a593Smuzhiyun
2937*4882a593Smuzhiyun /* this must be called inside a transaction */
reiserfs_block_writes(struct reiserfs_transaction_handle * th)2938*4882a593Smuzhiyun void reiserfs_block_writes(struct reiserfs_transaction_handle *th)
2939*4882a593Smuzhiyun {
2940*4882a593Smuzhiyun struct reiserfs_journal *journal = SB_JOURNAL(th->t_super);
2941*4882a593Smuzhiyun BUG_ON(!th->t_trans_id);
2942*4882a593Smuzhiyun journal->j_must_wait = 1;
2943*4882a593Smuzhiyun set_bit(J_WRITERS_BLOCKED, &journal->j_state);
2944*4882a593Smuzhiyun return;
2945*4882a593Smuzhiyun }
2946*4882a593Smuzhiyun
2947*4882a593Smuzhiyun /* this must be called without a transaction started */
reiserfs_allow_writes(struct super_block * s)2948*4882a593Smuzhiyun void reiserfs_allow_writes(struct super_block *s)
2949*4882a593Smuzhiyun {
2950*4882a593Smuzhiyun struct reiserfs_journal *journal = SB_JOURNAL(s);
2951*4882a593Smuzhiyun clear_bit(J_WRITERS_BLOCKED, &journal->j_state);
2952*4882a593Smuzhiyun wake_up(&journal->j_join_wait);
2953*4882a593Smuzhiyun }
2954*4882a593Smuzhiyun
2955*4882a593Smuzhiyun /* this must be called without a transaction started */
reiserfs_wait_on_write_block(struct super_block * s)2956*4882a593Smuzhiyun void reiserfs_wait_on_write_block(struct super_block *s)
2957*4882a593Smuzhiyun {
2958*4882a593Smuzhiyun struct reiserfs_journal *journal = SB_JOURNAL(s);
2959*4882a593Smuzhiyun wait_event(journal->j_join_wait,
2960*4882a593Smuzhiyun !test_bit(J_WRITERS_BLOCKED, &journal->j_state));
2961*4882a593Smuzhiyun }
2962*4882a593Smuzhiyun
queue_log_writer(struct super_block * s)2963*4882a593Smuzhiyun static void queue_log_writer(struct super_block *s)
2964*4882a593Smuzhiyun {
2965*4882a593Smuzhiyun wait_queue_entry_t wait;
2966*4882a593Smuzhiyun struct reiserfs_journal *journal = SB_JOURNAL(s);
2967*4882a593Smuzhiyun set_bit(J_WRITERS_QUEUED, &journal->j_state);
2968*4882a593Smuzhiyun
2969*4882a593Smuzhiyun /*
2970*4882a593Smuzhiyun * we don't want to use wait_event here because
2971*4882a593Smuzhiyun * we only want to wait once.
2972*4882a593Smuzhiyun */
2973*4882a593Smuzhiyun init_waitqueue_entry(&wait, current);
2974*4882a593Smuzhiyun add_wait_queue(&journal->j_join_wait, &wait);
2975*4882a593Smuzhiyun set_current_state(TASK_UNINTERRUPTIBLE);
2976*4882a593Smuzhiyun if (test_bit(J_WRITERS_QUEUED, &journal->j_state)) {
2977*4882a593Smuzhiyun int depth = reiserfs_write_unlock_nested(s);
2978*4882a593Smuzhiyun schedule();
2979*4882a593Smuzhiyun reiserfs_write_lock_nested(s, depth);
2980*4882a593Smuzhiyun }
2981*4882a593Smuzhiyun __set_current_state(TASK_RUNNING);
2982*4882a593Smuzhiyun remove_wait_queue(&journal->j_join_wait, &wait);
2983*4882a593Smuzhiyun }
2984*4882a593Smuzhiyun
wake_queued_writers(struct super_block * s)2985*4882a593Smuzhiyun static void wake_queued_writers(struct super_block *s)
2986*4882a593Smuzhiyun {
2987*4882a593Smuzhiyun struct reiserfs_journal *journal = SB_JOURNAL(s);
2988*4882a593Smuzhiyun if (test_and_clear_bit(J_WRITERS_QUEUED, &journal->j_state))
2989*4882a593Smuzhiyun wake_up(&journal->j_join_wait);
2990*4882a593Smuzhiyun }
2991*4882a593Smuzhiyun
let_transaction_grow(struct super_block * sb,unsigned int trans_id)2992*4882a593Smuzhiyun static void let_transaction_grow(struct super_block *sb, unsigned int trans_id)
2993*4882a593Smuzhiyun {
2994*4882a593Smuzhiyun struct reiserfs_journal *journal = SB_JOURNAL(sb);
2995*4882a593Smuzhiyun unsigned long bcount = journal->j_bcount;
2996*4882a593Smuzhiyun while (1) {
2997*4882a593Smuzhiyun int depth;
2998*4882a593Smuzhiyun
2999*4882a593Smuzhiyun depth = reiserfs_write_unlock_nested(sb);
3000*4882a593Smuzhiyun schedule_timeout_uninterruptible(1);
3001*4882a593Smuzhiyun reiserfs_write_lock_nested(sb, depth);
3002*4882a593Smuzhiyun
3003*4882a593Smuzhiyun journal->j_current_jl->j_state |= LIST_COMMIT_PENDING;
3004*4882a593Smuzhiyun while ((atomic_read(&journal->j_wcount) > 0 ||
3005*4882a593Smuzhiyun atomic_read(&journal->j_jlock)) &&
3006*4882a593Smuzhiyun journal->j_trans_id == trans_id) {
3007*4882a593Smuzhiyun queue_log_writer(sb);
3008*4882a593Smuzhiyun }
3009*4882a593Smuzhiyun if (journal->j_trans_id != trans_id)
3010*4882a593Smuzhiyun break;
3011*4882a593Smuzhiyun if (bcount == journal->j_bcount)
3012*4882a593Smuzhiyun break;
3013*4882a593Smuzhiyun bcount = journal->j_bcount;
3014*4882a593Smuzhiyun }
3015*4882a593Smuzhiyun }
3016*4882a593Smuzhiyun
3017*4882a593Smuzhiyun /*
3018*4882a593Smuzhiyun * join == true if you must join an existing transaction.
3019*4882a593Smuzhiyun * join == false if you can deal with waiting for others to finish
3020*4882a593Smuzhiyun *
3021*4882a593Smuzhiyun * this will block until the transaction is joinable. send the number of
3022*4882a593Smuzhiyun * blocks you expect to use in nblocks.
3023*4882a593Smuzhiyun */
do_journal_begin_r(struct reiserfs_transaction_handle * th,struct super_block * sb,unsigned long nblocks,int join)3024*4882a593Smuzhiyun static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
3025*4882a593Smuzhiyun struct super_block *sb, unsigned long nblocks,
3026*4882a593Smuzhiyun int join)
3027*4882a593Smuzhiyun {
3028*4882a593Smuzhiyun time64_t now = ktime_get_seconds();
3029*4882a593Smuzhiyun unsigned int old_trans_id;
3030*4882a593Smuzhiyun struct reiserfs_journal *journal = SB_JOURNAL(sb);
3031*4882a593Smuzhiyun struct reiserfs_transaction_handle myth;
3032*4882a593Smuzhiyun int sched_count = 0;
3033*4882a593Smuzhiyun int retval;
3034*4882a593Smuzhiyun int depth;
3035*4882a593Smuzhiyun
3036*4882a593Smuzhiyun reiserfs_check_lock_depth(sb, "journal_begin");
3037*4882a593Smuzhiyun BUG_ON(nblocks > journal->j_trans_max);
3038*4882a593Smuzhiyun
3039*4882a593Smuzhiyun PROC_INFO_INC(sb, journal.journal_being);
3040*4882a593Smuzhiyun /* set here for journal_join */
3041*4882a593Smuzhiyun th->t_refcount = 1;
3042*4882a593Smuzhiyun th->t_super = sb;
3043*4882a593Smuzhiyun
3044*4882a593Smuzhiyun relock:
3045*4882a593Smuzhiyun lock_journal(sb);
3046*4882a593Smuzhiyun if (join != JBEGIN_ABORT && reiserfs_is_journal_aborted(journal)) {
3047*4882a593Smuzhiyun unlock_journal(sb);
3048*4882a593Smuzhiyun retval = journal->j_errno;
3049*4882a593Smuzhiyun goto out_fail;
3050*4882a593Smuzhiyun }
3051*4882a593Smuzhiyun journal->j_bcount++;
3052*4882a593Smuzhiyun
3053*4882a593Smuzhiyun if (test_bit(J_WRITERS_BLOCKED, &journal->j_state)) {
3054*4882a593Smuzhiyun unlock_journal(sb);
3055*4882a593Smuzhiyun depth = reiserfs_write_unlock_nested(sb);
3056*4882a593Smuzhiyun reiserfs_wait_on_write_block(sb);
3057*4882a593Smuzhiyun reiserfs_write_lock_nested(sb, depth);
3058*4882a593Smuzhiyun PROC_INFO_INC(sb, journal.journal_relock_writers);
3059*4882a593Smuzhiyun goto relock;
3060*4882a593Smuzhiyun }
3061*4882a593Smuzhiyun now = ktime_get_seconds();
3062*4882a593Smuzhiyun
3063*4882a593Smuzhiyun /*
3064*4882a593Smuzhiyun * if there is no room in the journal OR
3065*4882a593Smuzhiyun * if this transaction is too old, and we weren't called joinable,
3066*4882a593Smuzhiyun * wait for it to finish before beginning we don't sleep if there
3067*4882a593Smuzhiyun * aren't other writers
3068*4882a593Smuzhiyun */
3069*4882a593Smuzhiyun
3070*4882a593Smuzhiyun if ((!join && journal->j_must_wait > 0) ||
3071*4882a593Smuzhiyun (!join
3072*4882a593Smuzhiyun && (journal->j_len_alloc + nblocks + 2) >= journal->j_max_batch)
3073*4882a593Smuzhiyun || (!join && atomic_read(&journal->j_wcount) > 0
3074*4882a593Smuzhiyun && journal->j_trans_start_time > 0
3075*4882a593Smuzhiyun && (now - journal->j_trans_start_time) >
3076*4882a593Smuzhiyun journal->j_max_trans_age) || (!join
3077*4882a593Smuzhiyun && atomic_read(&journal->j_jlock))
3078*4882a593Smuzhiyun || (!join && journal->j_cnode_free < (journal->j_trans_max * 3))) {
3079*4882a593Smuzhiyun
3080*4882a593Smuzhiyun old_trans_id = journal->j_trans_id;
3081*4882a593Smuzhiyun /* allow others to finish this transaction */
3082*4882a593Smuzhiyun unlock_journal(sb);
3083*4882a593Smuzhiyun
3084*4882a593Smuzhiyun if (!join && (journal->j_len_alloc + nblocks + 2) >=
3085*4882a593Smuzhiyun journal->j_max_batch &&
3086*4882a593Smuzhiyun ((journal->j_len + nblocks + 2) * 100) <
3087*4882a593Smuzhiyun (journal->j_len_alloc * 75)) {
3088*4882a593Smuzhiyun if (atomic_read(&journal->j_wcount) > 10) {
3089*4882a593Smuzhiyun sched_count++;
3090*4882a593Smuzhiyun queue_log_writer(sb);
3091*4882a593Smuzhiyun goto relock;
3092*4882a593Smuzhiyun }
3093*4882a593Smuzhiyun }
3094*4882a593Smuzhiyun /*
3095*4882a593Smuzhiyun * don't mess with joining the transaction if all we
3096*4882a593Smuzhiyun * have to do is wait for someone else to do a commit
3097*4882a593Smuzhiyun */
3098*4882a593Smuzhiyun if (atomic_read(&journal->j_jlock)) {
3099*4882a593Smuzhiyun while (journal->j_trans_id == old_trans_id &&
3100*4882a593Smuzhiyun atomic_read(&journal->j_jlock)) {
3101*4882a593Smuzhiyun queue_log_writer(sb);
3102*4882a593Smuzhiyun }
3103*4882a593Smuzhiyun goto relock;
3104*4882a593Smuzhiyun }
3105*4882a593Smuzhiyun retval = journal_join(&myth, sb);
3106*4882a593Smuzhiyun if (retval)
3107*4882a593Smuzhiyun goto out_fail;
3108*4882a593Smuzhiyun
3109*4882a593Smuzhiyun /* someone might have ended the transaction while we joined */
3110*4882a593Smuzhiyun if (old_trans_id != journal->j_trans_id) {
3111*4882a593Smuzhiyun retval = do_journal_end(&myth, 0);
3112*4882a593Smuzhiyun } else {
3113*4882a593Smuzhiyun retval = do_journal_end(&myth, COMMIT_NOW);
3114*4882a593Smuzhiyun }
3115*4882a593Smuzhiyun
3116*4882a593Smuzhiyun if (retval)
3117*4882a593Smuzhiyun goto out_fail;
3118*4882a593Smuzhiyun
3119*4882a593Smuzhiyun PROC_INFO_INC(sb, journal.journal_relock_wcount);
3120*4882a593Smuzhiyun goto relock;
3121*4882a593Smuzhiyun }
3122*4882a593Smuzhiyun /* we are the first writer, set trans_id */
3123*4882a593Smuzhiyun if (journal->j_trans_start_time == 0) {
3124*4882a593Smuzhiyun journal->j_trans_start_time = ktime_get_seconds();
3125*4882a593Smuzhiyun }
3126*4882a593Smuzhiyun atomic_inc(&journal->j_wcount);
3127*4882a593Smuzhiyun journal->j_len_alloc += nblocks;
3128*4882a593Smuzhiyun th->t_blocks_logged = 0;
3129*4882a593Smuzhiyun th->t_blocks_allocated = nblocks;
3130*4882a593Smuzhiyun th->t_trans_id = journal->j_trans_id;
3131*4882a593Smuzhiyun unlock_journal(sb);
3132*4882a593Smuzhiyun INIT_LIST_HEAD(&th->t_list);
3133*4882a593Smuzhiyun return 0;
3134*4882a593Smuzhiyun
3135*4882a593Smuzhiyun out_fail:
3136*4882a593Smuzhiyun memset(th, 0, sizeof(*th));
3137*4882a593Smuzhiyun /*
3138*4882a593Smuzhiyun * Re-set th->t_super, so we can properly keep track of how many
3139*4882a593Smuzhiyun * persistent transactions there are. We need to do this so if this
3140*4882a593Smuzhiyun * call is part of a failed restart_transaction, we can free it later
3141*4882a593Smuzhiyun */
3142*4882a593Smuzhiyun th->t_super = sb;
3143*4882a593Smuzhiyun return retval;
3144*4882a593Smuzhiyun }
3145*4882a593Smuzhiyun
reiserfs_persistent_transaction(struct super_block * s,int nblocks)3146*4882a593Smuzhiyun struct reiserfs_transaction_handle *reiserfs_persistent_transaction(struct
3147*4882a593Smuzhiyun super_block
3148*4882a593Smuzhiyun *s,
3149*4882a593Smuzhiyun int nblocks)
3150*4882a593Smuzhiyun {
3151*4882a593Smuzhiyun int ret;
3152*4882a593Smuzhiyun struct reiserfs_transaction_handle *th;
3153*4882a593Smuzhiyun
3154*4882a593Smuzhiyun /*
3155*4882a593Smuzhiyun * if we're nesting into an existing transaction. It will be
3156*4882a593Smuzhiyun * persistent on its own
3157*4882a593Smuzhiyun */
3158*4882a593Smuzhiyun if (reiserfs_transaction_running(s)) {
3159*4882a593Smuzhiyun th = current->journal_info;
3160*4882a593Smuzhiyun th->t_refcount++;
3161*4882a593Smuzhiyun BUG_ON(th->t_refcount < 2);
3162*4882a593Smuzhiyun
3163*4882a593Smuzhiyun return th;
3164*4882a593Smuzhiyun }
3165*4882a593Smuzhiyun th = kmalloc(sizeof(struct reiserfs_transaction_handle), GFP_NOFS);
3166*4882a593Smuzhiyun if (!th)
3167*4882a593Smuzhiyun return NULL;
3168*4882a593Smuzhiyun ret = journal_begin(th, s, nblocks);
3169*4882a593Smuzhiyun if (ret) {
3170*4882a593Smuzhiyun kfree(th);
3171*4882a593Smuzhiyun return NULL;
3172*4882a593Smuzhiyun }
3173*4882a593Smuzhiyun
3174*4882a593Smuzhiyun SB_JOURNAL(s)->j_persistent_trans++;
3175*4882a593Smuzhiyun return th;
3176*4882a593Smuzhiyun }
3177*4882a593Smuzhiyun
reiserfs_end_persistent_transaction(struct reiserfs_transaction_handle * th)3178*4882a593Smuzhiyun int reiserfs_end_persistent_transaction(struct reiserfs_transaction_handle *th)
3179*4882a593Smuzhiyun {
3180*4882a593Smuzhiyun struct super_block *s = th->t_super;
3181*4882a593Smuzhiyun int ret = 0;
3182*4882a593Smuzhiyun if (th->t_trans_id)
3183*4882a593Smuzhiyun ret = journal_end(th);
3184*4882a593Smuzhiyun else
3185*4882a593Smuzhiyun ret = -EIO;
3186*4882a593Smuzhiyun if (th->t_refcount == 0) {
3187*4882a593Smuzhiyun SB_JOURNAL(s)->j_persistent_trans--;
3188*4882a593Smuzhiyun kfree(th);
3189*4882a593Smuzhiyun }
3190*4882a593Smuzhiyun return ret;
3191*4882a593Smuzhiyun }
3192*4882a593Smuzhiyun
journal_join(struct reiserfs_transaction_handle * th,struct super_block * sb)3193*4882a593Smuzhiyun static int journal_join(struct reiserfs_transaction_handle *th,
3194*4882a593Smuzhiyun struct super_block *sb)
3195*4882a593Smuzhiyun {
3196*4882a593Smuzhiyun struct reiserfs_transaction_handle *cur_th = current->journal_info;
3197*4882a593Smuzhiyun
3198*4882a593Smuzhiyun /*
3199*4882a593Smuzhiyun * this keeps do_journal_end from NULLing out the
3200*4882a593Smuzhiyun * current->journal_info pointer
3201*4882a593Smuzhiyun */
3202*4882a593Smuzhiyun th->t_handle_save = cur_th;
3203*4882a593Smuzhiyun BUG_ON(cur_th && cur_th->t_refcount > 1);
3204*4882a593Smuzhiyun return do_journal_begin_r(th, sb, 1, JBEGIN_JOIN);
3205*4882a593Smuzhiyun }
3206*4882a593Smuzhiyun
journal_join_abort(struct reiserfs_transaction_handle * th,struct super_block * sb)3207*4882a593Smuzhiyun int journal_join_abort(struct reiserfs_transaction_handle *th,
3208*4882a593Smuzhiyun struct super_block *sb)
3209*4882a593Smuzhiyun {
3210*4882a593Smuzhiyun struct reiserfs_transaction_handle *cur_th = current->journal_info;
3211*4882a593Smuzhiyun
3212*4882a593Smuzhiyun /*
3213*4882a593Smuzhiyun * this keeps do_journal_end from NULLing out the
3214*4882a593Smuzhiyun * current->journal_info pointer
3215*4882a593Smuzhiyun */
3216*4882a593Smuzhiyun th->t_handle_save = cur_th;
3217*4882a593Smuzhiyun BUG_ON(cur_th && cur_th->t_refcount > 1);
3218*4882a593Smuzhiyun return do_journal_begin_r(th, sb, 1, JBEGIN_ABORT);
3219*4882a593Smuzhiyun }
3220*4882a593Smuzhiyun
journal_begin(struct reiserfs_transaction_handle * th,struct super_block * sb,unsigned long nblocks)3221*4882a593Smuzhiyun int journal_begin(struct reiserfs_transaction_handle *th,
3222*4882a593Smuzhiyun struct super_block *sb, unsigned long nblocks)
3223*4882a593Smuzhiyun {
3224*4882a593Smuzhiyun struct reiserfs_transaction_handle *cur_th = current->journal_info;
3225*4882a593Smuzhiyun int ret;
3226*4882a593Smuzhiyun
3227*4882a593Smuzhiyun th->t_handle_save = NULL;
3228*4882a593Smuzhiyun if (cur_th) {
3229*4882a593Smuzhiyun /* we are nesting into the current transaction */
3230*4882a593Smuzhiyun if (cur_th->t_super == sb) {
3231*4882a593Smuzhiyun BUG_ON(!cur_th->t_refcount);
3232*4882a593Smuzhiyun cur_th->t_refcount++;
3233*4882a593Smuzhiyun memcpy(th, cur_th, sizeof(*th));
3234*4882a593Smuzhiyun if (th->t_refcount <= 1)
3235*4882a593Smuzhiyun reiserfs_warning(sb, "reiserfs-2005",
3236*4882a593Smuzhiyun "BAD: refcount <= 1, but "
3237*4882a593Smuzhiyun "journal_info != 0");
3238*4882a593Smuzhiyun return 0;
3239*4882a593Smuzhiyun } else {
3240*4882a593Smuzhiyun /*
3241*4882a593Smuzhiyun * we've ended up with a handle from a different
3242*4882a593Smuzhiyun * filesystem. save it and restore on journal_end.
3243*4882a593Smuzhiyun * This should never really happen...
3244*4882a593Smuzhiyun */
3245*4882a593Smuzhiyun reiserfs_warning(sb, "clm-2100",
3246*4882a593Smuzhiyun "nesting info a different FS");
3247*4882a593Smuzhiyun th->t_handle_save = current->journal_info;
3248*4882a593Smuzhiyun current->journal_info = th;
3249*4882a593Smuzhiyun }
3250*4882a593Smuzhiyun } else {
3251*4882a593Smuzhiyun current->journal_info = th;
3252*4882a593Smuzhiyun }
3253*4882a593Smuzhiyun ret = do_journal_begin_r(th, sb, nblocks, JBEGIN_REG);
3254*4882a593Smuzhiyun BUG_ON(current->journal_info != th);
3255*4882a593Smuzhiyun
3256*4882a593Smuzhiyun /*
3257*4882a593Smuzhiyun * I guess this boils down to being the reciprocal of clm-2100 above.
3258*4882a593Smuzhiyun * If do_journal_begin_r fails, we need to put it back, since
3259*4882a593Smuzhiyun * journal_end won't be called to do it. */
3260*4882a593Smuzhiyun if (ret)
3261*4882a593Smuzhiyun current->journal_info = th->t_handle_save;
3262*4882a593Smuzhiyun else
3263*4882a593Smuzhiyun BUG_ON(!th->t_refcount);
3264*4882a593Smuzhiyun
3265*4882a593Smuzhiyun return ret;
3266*4882a593Smuzhiyun }
3267*4882a593Smuzhiyun
3268*4882a593Smuzhiyun /*
3269*4882a593Smuzhiyun * puts bh into the current transaction. If it was already there, reorders
3270*4882a593Smuzhiyun * removes the old pointers from the hash, and puts new ones in (to make
3271*4882a593Smuzhiyun * sure replay happen in the right order).
3272*4882a593Smuzhiyun *
3273*4882a593Smuzhiyun * if it was dirty, cleans and files onto the clean list. I can't let it
3274*4882a593Smuzhiyun * be dirty again until the transaction is committed.
3275*4882a593Smuzhiyun *
3276*4882a593Smuzhiyun * if j_len, is bigger than j_len_alloc, it pushes j_len_alloc to 10 + j_len.
3277*4882a593Smuzhiyun */
journal_mark_dirty(struct reiserfs_transaction_handle * th,struct buffer_head * bh)3278*4882a593Smuzhiyun int journal_mark_dirty(struct reiserfs_transaction_handle *th,
3279*4882a593Smuzhiyun struct buffer_head *bh)
3280*4882a593Smuzhiyun {
3281*4882a593Smuzhiyun struct super_block *sb = th->t_super;
3282*4882a593Smuzhiyun struct reiserfs_journal *journal = SB_JOURNAL(sb);
3283*4882a593Smuzhiyun struct reiserfs_journal_cnode *cn = NULL;
3284*4882a593Smuzhiyun int count_already_incd = 0;
3285*4882a593Smuzhiyun int prepared = 0;
3286*4882a593Smuzhiyun BUG_ON(!th->t_trans_id);
3287*4882a593Smuzhiyun
3288*4882a593Smuzhiyun PROC_INFO_INC(sb, journal.mark_dirty);
3289*4882a593Smuzhiyun if (th->t_trans_id != journal->j_trans_id) {
3290*4882a593Smuzhiyun reiserfs_panic(th->t_super, "journal-1577",
3291*4882a593Smuzhiyun "handle trans id %ld != current trans id %ld",
3292*4882a593Smuzhiyun th->t_trans_id, journal->j_trans_id);
3293*4882a593Smuzhiyun }
3294*4882a593Smuzhiyun
3295*4882a593Smuzhiyun prepared = test_clear_buffer_journal_prepared(bh);
3296*4882a593Smuzhiyun clear_buffer_journal_restore_dirty(bh);
3297*4882a593Smuzhiyun /* already in this transaction, we are done */
3298*4882a593Smuzhiyun if (buffer_journaled(bh)) {
3299*4882a593Smuzhiyun PROC_INFO_INC(sb, journal.mark_dirty_already);
3300*4882a593Smuzhiyun return 0;
3301*4882a593Smuzhiyun }
3302*4882a593Smuzhiyun
3303*4882a593Smuzhiyun /*
3304*4882a593Smuzhiyun * this must be turned into a panic instead of a warning. We can't
3305*4882a593Smuzhiyun * allow a dirty or journal_dirty or locked buffer to be logged, as
3306*4882a593Smuzhiyun * some changes could get to disk too early. NOT GOOD.
3307*4882a593Smuzhiyun */
3308*4882a593Smuzhiyun if (!prepared || buffer_dirty(bh)) {
3309*4882a593Smuzhiyun reiserfs_warning(sb, "journal-1777",
3310*4882a593Smuzhiyun "buffer %llu bad state "
3311*4882a593Smuzhiyun "%cPREPARED %cLOCKED %cDIRTY %cJDIRTY_WAIT",
3312*4882a593Smuzhiyun (unsigned long long)bh->b_blocknr,
3313*4882a593Smuzhiyun prepared ? ' ' : '!',
3314*4882a593Smuzhiyun buffer_locked(bh) ? ' ' : '!',
3315*4882a593Smuzhiyun buffer_dirty(bh) ? ' ' : '!',
3316*4882a593Smuzhiyun buffer_journal_dirty(bh) ? ' ' : '!');
3317*4882a593Smuzhiyun }
3318*4882a593Smuzhiyun
3319*4882a593Smuzhiyun if (atomic_read(&journal->j_wcount) <= 0) {
3320*4882a593Smuzhiyun reiserfs_warning(sb, "journal-1409",
3321*4882a593Smuzhiyun "returning because j_wcount was %d",
3322*4882a593Smuzhiyun atomic_read(&journal->j_wcount));
3323*4882a593Smuzhiyun return 1;
3324*4882a593Smuzhiyun }
3325*4882a593Smuzhiyun /*
3326*4882a593Smuzhiyun * this error means I've screwed up, and we've overflowed
3327*4882a593Smuzhiyun * the transaction. Nothing can be done here, except make the
3328*4882a593Smuzhiyun * FS readonly or panic.
3329*4882a593Smuzhiyun */
3330*4882a593Smuzhiyun if (journal->j_len >= journal->j_trans_max) {
3331*4882a593Smuzhiyun reiserfs_panic(th->t_super, "journal-1413",
3332*4882a593Smuzhiyun "j_len (%lu) is too big",
3333*4882a593Smuzhiyun journal->j_len);
3334*4882a593Smuzhiyun }
3335*4882a593Smuzhiyun
3336*4882a593Smuzhiyun if (buffer_journal_dirty(bh)) {
3337*4882a593Smuzhiyun count_already_incd = 1;
3338*4882a593Smuzhiyun PROC_INFO_INC(sb, journal.mark_dirty_notjournal);
3339*4882a593Smuzhiyun clear_buffer_journal_dirty(bh);
3340*4882a593Smuzhiyun }
3341*4882a593Smuzhiyun
3342*4882a593Smuzhiyun if (journal->j_len > journal->j_len_alloc) {
3343*4882a593Smuzhiyun journal->j_len_alloc = journal->j_len + JOURNAL_PER_BALANCE_CNT;
3344*4882a593Smuzhiyun }
3345*4882a593Smuzhiyun
3346*4882a593Smuzhiyun set_buffer_journaled(bh);
3347*4882a593Smuzhiyun
3348*4882a593Smuzhiyun /* now put this guy on the end */
3349*4882a593Smuzhiyun if (!cn) {
3350*4882a593Smuzhiyun cn = get_cnode(sb);
3351*4882a593Smuzhiyun if (!cn) {
3352*4882a593Smuzhiyun reiserfs_panic(sb, "journal-4", "get_cnode failed!");
3353*4882a593Smuzhiyun }
3354*4882a593Smuzhiyun
3355*4882a593Smuzhiyun if (th->t_blocks_logged == th->t_blocks_allocated) {
3356*4882a593Smuzhiyun th->t_blocks_allocated += JOURNAL_PER_BALANCE_CNT;
3357*4882a593Smuzhiyun journal->j_len_alloc += JOURNAL_PER_BALANCE_CNT;
3358*4882a593Smuzhiyun }
3359*4882a593Smuzhiyun th->t_blocks_logged++;
3360*4882a593Smuzhiyun journal->j_len++;
3361*4882a593Smuzhiyun
3362*4882a593Smuzhiyun cn->bh = bh;
3363*4882a593Smuzhiyun cn->blocknr = bh->b_blocknr;
3364*4882a593Smuzhiyun cn->sb = sb;
3365*4882a593Smuzhiyun cn->jlist = NULL;
3366*4882a593Smuzhiyun insert_journal_hash(journal->j_hash_table, cn);
3367*4882a593Smuzhiyun if (!count_already_incd) {
3368*4882a593Smuzhiyun get_bh(bh);
3369*4882a593Smuzhiyun }
3370*4882a593Smuzhiyun }
3371*4882a593Smuzhiyun cn->next = NULL;
3372*4882a593Smuzhiyun cn->prev = journal->j_last;
3373*4882a593Smuzhiyun cn->bh = bh;
3374*4882a593Smuzhiyun if (journal->j_last) {
3375*4882a593Smuzhiyun journal->j_last->next = cn;
3376*4882a593Smuzhiyun journal->j_last = cn;
3377*4882a593Smuzhiyun } else {
3378*4882a593Smuzhiyun journal->j_first = cn;
3379*4882a593Smuzhiyun journal->j_last = cn;
3380*4882a593Smuzhiyun }
3381*4882a593Smuzhiyun reiserfs_schedule_old_flush(sb);
3382*4882a593Smuzhiyun return 0;
3383*4882a593Smuzhiyun }
3384*4882a593Smuzhiyun
journal_end(struct reiserfs_transaction_handle * th)3385*4882a593Smuzhiyun int journal_end(struct reiserfs_transaction_handle *th)
3386*4882a593Smuzhiyun {
3387*4882a593Smuzhiyun struct super_block *sb = th->t_super;
3388*4882a593Smuzhiyun if (!current->journal_info && th->t_refcount > 1)
3389*4882a593Smuzhiyun reiserfs_warning(sb, "REISER-NESTING",
3390*4882a593Smuzhiyun "th NULL, refcount %d", th->t_refcount);
3391*4882a593Smuzhiyun
3392*4882a593Smuzhiyun if (!th->t_trans_id) {
3393*4882a593Smuzhiyun WARN_ON(1);
3394*4882a593Smuzhiyun return -EIO;
3395*4882a593Smuzhiyun }
3396*4882a593Smuzhiyun
3397*4882a593Smuzhiyun th->t_refcount--;
3398*4882a593Smuzhiyun if (th->t_refcount > 0) {
3399*4882a593Smuzhiyun struct reiserfs_transaction_handle *cur_th =
3400*4882a593Smuzhiyun current->journal_info;
3401*4882a593Smuzhiyun
3402*4882a593Smuzhiyun /*
3403*4882a593Smuzhiyun * we aren't allowed to close a nested transaction on a
3404*4882a593Smuzhiyun * different filesystem from the one in the task struct
3405*4882a593Smuzhiyun */
3406*4882a593Smuzhiyun BUG_ON(cur_th->t_super != th->t_super);
3407*4882a593Smuzhiyun
3408*4882a593Smuzhiyun if (th != cur_th) {
3409*4882a593Smuzhiyun memcpy(current->journal_info, th, sizeof(*th));
3410*4882a593Smuzhiyun th->t_trans_id = 0;
3411*4882a593Smuzhiyun }
3412*4882a593Smuzhiyun return 0;
3413*4882a593Smuzhiyun } else {
3414*4882a593Smuzhiyun return do_journal_end(th, 0);
3415*4882a593Smuzhiyun }
3416*4882a593Smuzhiyun }
3417*4882a593Smuzhiyun
3418*4882a593Smuzhiyun /*
3419*4882a593Smuzhiyun * removes from the current transaction, relsing and descrementing any counters.
3420*4882a593Smuzhiyun * also files the removed buffer directly onto the clean list
3421*4882a593Smuzhiyun *
3422*4882a593Smuzhiyun * called by journal_mark_freed when a block has been deleted
3423*4882a593Smuzhiyun *
3424*4882a593Smuzhiyun * returns 1 if it cleaned and relsed the buffer. 0 otherwise
3425*4882a593Smuzhiyun */
remove_from_transaction(struct super_block * sb,b_blocknr_t blocknr,int already_cleaned)3426*4882a593Smuzhiyun static int remove_from_transaction(struct super_block *sb,
3427*4882a593Smuzhiyun b_blocknr_t blocknr, int already_cleaned)
3428*4882a593Smuzhiyun {
3429*4882a593Smuzhiyun struct buffer_head *bh;
3430*4882a593Smuzhiyun struct reiserfs_journal_cnode *cn;
3431*4882a593Smuzhiyun struct reiserfs_journal *journal = SB_JOURNAL(sb);
3432*4882a593Smuzhiyun int ret = 0;
3433*4882a593Smuzhiyun
3434*4882a593Smuzhiyun cn = get_journal_hash_dev(sb, journal->j_hash_table, blocknr);
3435*4882a593Smuzhiyun if (!cn || !cn->bh) {
3436*4882a593Smuzhiyun return ret;
3437*4882a593Smuzhiyun }
3438*4882a593Smuzhiyun bh = cn->bh;
3439*4882a593Smuzhiyun if (cn->prev) {
3440*4882a593Smuzhiyun cn->prev->next = cn->next;
3441*4882a593Smuzhiyun }
3442*4882a593Smuzhiyun if (cn->next) {
3443*4882a593Smuzhiyun cn->next->prev = cn->prev;
3444*4882a593Smuzhiyun }
3445*4882a593Smuzhiyun if (cn == journal->j_first) {
3446*4882a593Smuzhiyun journal->j_first = cn->next;
3447*4882a593Smuzhiyun }
3448*4882a593Smuzhiyun if (cn == journal->j_last) {
3449*4882a593Smuzhiyun journal->j_last = cn->prev;
3450*4882a593Smuzhiyun }
3451*4882a593Smuzhiyun remove_journal_hash(sb, journal->j_hash_table, NULL,
3452*4882a593Smuzhiyun bh->b_blocknr, 0);
3453*4882a593Smuzhiyun clear_buffer_journaled(bh); /* don't log this one */
3454*4882a593Smuzhiyun
3455*4882a593Smuzhiyun if (!already_cleaned) {
3456*4882a593Smuzhiyun clear_buffer_journal_dirty(bh);
3457*4882a593Smuzhiyun clear_buffer_dirty(bh);
3458*4882a593Smuzhiyun clear_buffer_journal_test(bh);
3459*4882a593Smuzhiyun put_bh(bh);
3460*4882a593Smuzhiyun if (atomic_read(&bh->b_count) < 0) {
3461*4882a593Smuzhiyun reiserfs_warning(sb, "journal-1752",
3462*4882a593Smuzhiyun "b_count < 0");
3463*4882a593Smuzhiyun }
3464*4882a593Smuzhiyun ret = 1;
3465*4882a593Smuzhiyun }
3466*4882a593Smuzhiyun journal->j_len--;
3467*4882a593Smuzhiyun journal->j_len_alloc--;
3468*4882a593Smuzhiyun free_cnode(sb, cn);
3469*4882a593Smuzhiyun return ret;
3470*4882a593Smuzhiyun }
3471*4882a593Smuzhiyun
3472*4882a593Smuzhiyun /*
3473*4882a593Smuzhiyun * for any cnode in a journal list, it can only be dirtied of all the
3474*4882a593Smuzhiyun * transactions that include it are committed to disk.
3475*4882a593Smuzhiyun * this checks through each transaction, and returns 1 if you are allowed
3476*4882a593Smuzhiyun * to dirty, and 0 if you aren't
3477*4882a593Smuzhiyun *
3478*4882a593Smuzhiyun * it is called by dirty_journal_list, which is called after
3479*4882a593Smuzhiyun * flush_commit_list has gotten all the log blocks for a given
3480*4882a593Smuzhiyun * transaction on disk
3481*4882a593Smuzhiyun *
3482*4882a593Smuzhiyun */
can_dirty(struct reiserfs_journal_cnode * cn)3483*4882a593Smuzhiyun static int can_dirty(struct reiserfs_journal_cnode *cn)
3484*4882a593Smuzhiyun {
3485*4882a593Smuzhiyun struct super_block *sb = cn->sb;
3486*4882a593Smuzhiyun b_blocknr_t blocknr = cn->blocknr;
3487*4882a593Smuzhiyun struct reiserfs_journal_cnode *cur = cn->hprev;
3488*4882a593Smuzhiyun int can_dirty = 1;
3489*4882a593Smuzhiyun
3490*4882a593Smuzhiyun /*
3491*4882a593Smuzhiyun * first test hprev. These are all newer than cn, so any node here
3492*4882a593Smuzhiyun * with the same block number and dev means this node can't be sent
3493*4882a593Smuzhiyun * to disk right now.
3494*4882a593Smuzhiyun */
3495*4882a593Smuzhiyun while (cur && can_dirty) {
3496*4882a593Smuzhiyun if (cur->jlist && cur->bh && cur->blocknr && cur->sb == sb &&
3497*4882a593Smuzhiyun cur->blocknr == blocknr) {
3498*4882a593Smuzhiyun can_dirty = 0;
3499*4882a593Smuzhiyun }
3500*4882a593Smuzhiyun cur = cur->hprev;
3501*4882a593Smuzhiyun }
3502*4882a593Smuzhiyun /*
3503*4882a593Smuzhiyun * then test hnext. These are all older than cn. As long as they
3504*4882a593Smuzhiyun * are committed to the log, it is safe to write cn to disk
3505*4882a593Smuzhiyun */
3506*4882a593Smuzhiyun cur = cn->hnext;
3507*4882a593Smuzhiyun while (cur && can_dirty) {
3508*4882a593Smuzhiyun if (cur->jlist && cur->jlist->j_len > 0 &&
3509*4882a593Smuzhiyun atomic_read(&cur->jlist->j_commit_left) > 0 && cur->bh &&
3510*4882a593Smuzhiyun cur->blocknr && cur->sb == sb && cur->blocknr == blocknr) {
3511*4882a593Smuzhiyun can_dirty = 0;
3512*4882a593Smuzhiyun }
3513*4882a593Smuzhiyun cur = cur->hnext;
3514*4882a593Smuzhiyun }
3515*4882a593Smuzhiyun return can_dirty;
3516*4882a593Smuzhiyun }
3517*4882a593Smuzhiyun
3518*4882a593Smuzhiyun /*
3519*4882a593Smuzhiyun * syncs the commit blocks, but does not force the real buffers to disk
3520*4882a593Smuzhiyun * will wait until the current transaction is done/committed before returning
3521*4882a593Smuzhiyun */
journal_end_sync(struct reiserfs_transaction_handle * th)3522*4882a593Smuzhiyun int journal_end_sync(struct reiserfs_transaction_handle *th)
3523*4882a593Smuzhiyun {
3524*4882a593Smuzhiyun struct super_block *sb = th->t_super;
3525*4882a593Smuzhiyun struct reiserfs_journal *journal = SB_JOURNAL(sb);
3526*4882a593Smuzhiyun
3527*4882a593Smuzhiyun BUG_ON(!th->t_trans_id);
3528*4882a593Smuzhiyun /* you can sync while nested, very, very bad */
3529*4882a593Smuzhiyun BUG_ON(th->t_refcount > 1);
3530*4882a593Smuzhiyun if (journal->j_len == 0) {
3531*4882a593Smuzhiyun reiserfs_prepare_for_journal(sb, SB_BUFFER_WITH_SB(sb),
3532*4882a593Smuzhiyun 1);
3533*4882a593Smuzhiyun journal_mark_dirty(th, SB_BUFFER_WITH_SB(sb));
3534*4882a593Smuzhiyun }
3535*4882a593Smuzhiyun return do_journal_end(th, COMMIT_NOW | WAIT);
3536*4882a593Smuzhiyun }
3537*4882a593Smuzhiyun
3538*4882a593Smuzhiyun /* writeback the pending async commits to disk */
flush_async_commits(struct work_struct * work)3539*4882a593Smuzhiyun static void flush_async_commits(struct work_struct *work)
3540*4882a593Smuzhiyun {
3541*4882a593Smuzhiyun struct reiserfs_journal *journal =
3542*4882a593Smuzhiyun container_of(work, struct reiserfs_journal, j_work.work);
3543*4882a593Smuzhiyun struct super_block *sb = journal->j_work_sb;
3544*4882a593Smuzhiyun struct reiserfs_journal_list *jl;
3545*4882a593Smuzhiyun struct list_head *entry;
3546*4882a593Smuzhiyun
3547*4882a593Smuzhiyun reiserfs_write_lock(sb);
3548*4882a593Smuzhiyun if (!list_empty(&journal->j_journal_list)) {
3549*4882a593Smuzhiyun /* last entry is the youngest, commit it and you get everything */
3550*4882a593Smuzhiyun entry = journal->j_journal_list.prev;
3551*4882a593Smuzhiyun jl = JOURNAL_LIST_ENTRY(entry);
3552*4882a593Smuzhiyun flush_commit_list(sb, jl, 1);
3553*4882a593Smuzhiyun }
3554*4882a593Smuzhiyun reiserfs_write_unlock(sb);
3555*4882a593Smuzhiyun }
3556*4882a593Smuzhiyun
3557*4882a593Smuzhiyun /*
3558*4882a593Smuzhiyun * flushes any old transactions to disk
3559*4882a593Smuzhiyun * ends the current transaction if it is too old
3560*4882a593Smuzhiyun */
reiserfs_flush_old_commits(struct super_block * sb)3561*4882a593Smuzhiyun void reiserfs_flush_old_commits(struct super_block *sb)
3562*4882a593Smuzhiyun {
3563*4882a593Smuzhiyun time64_t now;
3564*4882a593Smuzhiyun struct reiserfs_transaction_handle th;
3565*4882a593Smuzhiyun struct reiserfs_journal *journal = SB_JOURNAL(sb);
3566*4882a593Smuzhiyun
3567*4882a593Smuzhiyun now = ktime_get_seconds();
3568*4882a593Smuzhiyun /*
3569*4882a593Smuzhiyun * safety check so we don't flush while we are replaying the log during
3570*4882a593Smuzhiyun * mount
3571*4882a593Smuzhiyun */
3572*4882a593Smuzhiyun if (list_empty(&journal->j_journal_list))
3573*4882a593Smuzhiyun return;
3574*4882a593Smuzhiyun
3575*4882a593Smuzhiyun /*
3576*4882a593Smuzhiyun * check the current transaction. If there are no writers, and it is
3577*4882a593Smuzhiyun * too old, finish it, and force the commit blocks to disk
3578*4882a593Smuzhiyun */
3579*4882a593Smuzhiyun if (atomic_read(&journal->j_wcount) <= 0 &&
3580*4882a593Smuzhiyun journal->j_trans_start_time > 0 &&
3581*4882a593Smuzhiyun journal->j_len > 0 &&
3582*4882a593Smuzhiyun (now - journal->j_trans_start_time) > journal->j_max_trans_age) {
3583*4882a593Smuzhiyun if (!journal_join(&th, sb)) {
3584*4882a593Smuzhiyun reiserfs_prepare_for_journal(sb,
3585*4882a593Smuzhiyun SB_BUFFER_WITH_SB(sb),
3586*4882a593Smuzhiyun 1);
3587*4882a593Smuzhiyun journal_mark_dirty(&th, SB_BUFFER_WITH_SB(sb));
3588*4882a593Smuzhiyun
3589*4882a593Smuzhiyun /*
3590*4882a593Smuzhiyun * we're only being called from kreiserfsd, it makes
3591*4882a593Smuzhiyun * no sense to do an async commit so that kreiserfsd
3592*4882a593Smuzhiyun * can do it later
3593*4882a593Smuzhiyun */
3594*4882a593Smuzhiyun do_journal_end(&th, COMMIT_NOW | WAIT);
3595*4882a593Smuzhiyun }
3596*4882a593Smuzhiyun }
3597*4882a593Smuzhiyun }
3598*4882a593Smuzhiyun
3599*4882a593Smuzhiyun /*
3600*4882a593Smuzhiyun * returns 0 if do_journal_end should return right away, returns 1 if
3601*4882a593Smuzhiyun * do_journal_end should finish the commit
3602*4882a593Smuzhiyun *
3603*4882a593Smuzhiyun * if the current transaction is too old, but still has writers, this will
3604*4882a593Smuzhiyun * wait on j_join_wait until all the writers are done. By the time it
3605*4882a593Smuzhiyun * wakes up, the transaction it was called has already ended, so it just
3606*4882a593Smuzhiyun * flushes the commit list and returns 0.
3607*4882a593Smuzhiyun *
3608*4882a593Smuzhiyun * Won't batch when flush or commit_now is set. Also won't batch when
3609*4882a593Smuzhiyun * others are waiting on j_join_wait.
3610*4882a593Smuzhiyun *
3611*4882a593Smuzhiyun * Note, we can't allow the journal_end to proceed while there are still
3612*4882a593Smuzhiyun * writers in the log.
3613*4882a593Smuzhiyun */
check_journal_end(struct reiserfs_transaction_handle * th,int flags)3614*4882a593Smuzhiyun static int check_journal_end(struct reiserfs_transaction_handle *th, int flags)
3615*4882a593Smuzhiyun {
3616*4882a593Smuzhiyun
3617*4882a593Smuzhiyun time64_t now;
3618*4882a593Smuzhiyun int flush = flags & FLUSH_ALL;
3619*4882a593Smuzhiyun int commit_now = flags & COMMIT_NOW;
3620*4882a593Smuzhiyun int wait_on_commit = flags & WAIT;
3621*4882a593Smuzhiyun struct reiserfs_journal_list *jl;
3622*4882a593Smuzhiyun struct super_block *sb = th->t_super;
3623*4882a593Smuzhiyun struct reiserfs_journal *journal = SB_JOURNAL(sb);
3624*4882a593Smuzhiyun
3625*4882a593Smuzhiyun BUG_ON(!th->t_trans_id);
3626*4882a593Smuzhiyun
3627*4882a593Smuzhiyun if (th->t_trans_id != journal->j_trans_id) {
3628*4882a593Smuzhiyun reiserfs_panic(th->t_super, "journal-1577",
3629*4882a593Smuzhiyun "handle trans id %ld != current trans id %ld",
3630*4882a593Smuzhiyun th->t_trans_id, journal->j_trans_id);
3631*4882a593Smuzhiyun }
3632*4882a593Smuzhiyun
3633*4882a593Smuzhiyun journal->j_len_alloc -= (th->t_blocks_allocated - th->t_blocks_logged);
3634*4882a593Smuzhiyun /* <= 0 is allowed. unmounting might not call begin */
3635*4882a593Smuzhiyun if (atomic_read(&journal->j_wcount) > 0)
3636*4882a593Smuzhiyun atomic_dec(&journal->j_wcount);
3637*4882a593Smuzhiyun
3638*4882a593Smuzhiyun /*
3639*4882a593Smuzhiyun * BUG, deal with case where j_len is 0, but people previously
3640*4882a593Smuzhiyun * freed blocks need to be released will be dealt with by next
3641*4882a593Smuzhiyun * transaction that actually writes something, but should be taken
3642*4882a593Smuzhiyun * care of in this trans
3643*4882a593Smuzhiyun */
3644*4882a593Smuzhiyun BUG_ON(journal->j_len == 0);
3645*4882a593Smuzhiyun
3646*4882a593Smuzhiyun /*
3647*4882a593Smuzhiyun * if wcount > 0, and we are called to with flush or commit_now,
3648*4882a593Smuzhiyun * we wait on j_join_wait. We will wake up when the last writer has
3649*4882a593Smuzhiyun * finished the transaction, and started it on its way to the disk.
3650*4882a593Smuzhiyun * Then, we flush the commit or journal list, and just return 0
3651*4882a593Smuzhiyun * because the rest of journal end was already done for this
3652*4882a593Smuzhiyun * transaction.
3653*4882a593Smuzhiyun */
3654*4882a593Smuzhiyun if (atomic_read(&journal->j_wcount) > 0) {
3655*4882a593Smuzhiyun if (flush || commit_now) {
3656*4882a593Smuzhiyun unsigned trans_id;
3657*4882a593Smuzhiyun
3658*4882a593Smuzhiyun jl = journal->j_current_jl;
3659*4882a593Smuzhiyun trans_id = jl->j_trans_id;
3660*4882a593Smuzhiyun if (wait_on_commit)
3661*4882a593Smuzhiyun jl->j_state |= LIST_COMMIT_PENDING;
3662*4882a593Smuzhiyun atomic_set(&journal->j_jlock, 1);
3663*4882a593Smuzhiyun if (flush) {
3664*4882a593Smuzhiyun journal->j_next_full_flush = 1;
3665*4882a593Smuzhiyun }
3666*4882a593Smuzhiyun unlock_journal(sb);
3667*4882a593Smuzhiyun
3668*4882a593Smuzhiyun /*
3669*4882a593Smuzhiyun * sleep while the current transaction is
3670*4882a593Smuzhiyun * still j_jlocked
3671*4882a593Smuzhiyun */
3672*4882a593Smuzhiyun while (journal->j_trans_id == trans_id) {
3673*4882a593Smuzhiyun if (atomic_read(&journal->j_jlock)) {
3674*4882a593Smuzhiyun queue_log_writer(sb);
3675*4882a593Smuzhiyun } else {
3676*4882a593Smuzhiyun lock_journal(sb);
3677*4882a593Smuzhiyun if (journal->j_trans_id == trans_id) {
3678*4882a593Smuzhiyun atomic_set(&journal->j_jlock,
3679*4882a593Smuzhiyun 1);
3680*4882a593Smuzhiyun }
3681*4882a593Smuzhiyun unlock_journal(sb);
3682*4882a593Smuzhiyun }
3683*4882a593Smuzhiyun }
3684*4882a593Smuzhiyun BUG_ON(journal->j_trans_id == trans_id);
3685*4882a593Smuzhiyun
3686*4882a593Smuzhiyun if (commit_now
3687*4882a593Smuzhiyun && journal_list_still_alive(sb, trans_id)
3688*4882a593Smuzhiyun && wait_on_commit) {
3689*4882a593Smuzhiyun flush_commit_list(sb, jl, 1);
3690*4882a593Smuzhiyun }
3691*4882a593Smuzhiyun return 0;
3692*4882a593Smuzhiyun }
3693*4882a593Smuzhiyun unlock_journal(sb);
3694*4882a593Smuzhiyun return 0;
3695*4882a593Smuzhiyun }
3696*4882a593Smuzhiyun
3697*4882a593Smuzhiyun /* deal with old transactions where we are the last writers */
3698*4882a593Smuzhiyun now = ktime_get_seconds();
3699*4882a593Smuzhiyun if ((now - journal->j_trans_start_time) > journal->j_max_trans_age) {
3700*4882a593Smuzhiyun commit_now = 1;
3701*4882a593Smuzhiyun journal->j_next_async_flush = 1;
3702*4882a593Smuzhiyun }
3703*4882a593Smuzhiyun /* don't batch when someone is waiting on j_join_wait */
3704*4882a593Smuzhiyun /* don't batch when syncing the commit or flushing the whole trans */
3705*4882a593Smuzhiyun if (!(journal->j_must_wait > 0) && !(atomic_read(&journal->j_jlock))
3706*4882a593Smuzhiyun && !flush && !commit_now && (journal->j_len < journal->j_max_batch)
3707*4882a593Smuzhiyun && journal->j_len_alloc < journal->j_max_batch
3708*4882a593Smuzhiyun && journal->j_cnode_free > (journal->j_trans_max * 3)) {
3709*4882a593Smuzhiyun journal->j_bcount++;
3710*4882a593Smuzhiyun unlock_journal(sb);
3711*4882a593Smuzhiyun return 0;
3712*4882a593Smuzhiyun }
3713*4882a593Smuzhiyun
3714*4882a593Smuzhiyun if (journal->j_start > SB_ONDISK_JOURNAL_SIZE(sb)) {
3715*4882a593Smuzhiyun reiserfs_panic(sb, "journal-003",
3716*4882a593Smuzhiyun "j_start (%ld) is too high",
3717*4882a593Smuzhiyun journal->j_start);
3718*4882a593Smuzhiyun }
3719*4882a593Smuzhiyun return 1;
3720*4882a593Smuzhiyun }
3721*4882a593Smuzhiyun
3722*4882a593Smuzhiyun /*
3723*4882a593Smuzhiyun * Does all the work that makes deleting blocks safe.
3724*4882a593Smuzhiyun * when deleting a block mark BH_JNew, just remove it from the current
3725*4882a593Smuzhiyun * transaction, clean it's buffer_head and move on.
3726*4882a593Smuzhiyun *
3727*4882a593Smuzhiyun * otherwise:
3728*4882a593Smuzhiyun * set a bit for the block in the journal bitmap. That will prevent it from
3729*4882a593Smuzhiyun * being allocated for unformatted nodes before this transaction has finished.
3730*4882a593Smuzhiyun *
3731*4882a593Smuzhiyun * mark any cnodes for this block as BLOCK_FREED, and clear their bh pointers.
3732*4882a593Smuzhiyun * That will prevent any old transactions with this block from trying to flush
3733*4882a593Smuzhiyun * to the real location. Since we aren't removing the cnode from the
3734*4882a593Smuzhiyun * journal_list_hash, *the block can't be reallocated yet.
3735*4882a593Smuzhiyun *
3736*4882a593Smuzhiyun * Then remove it from the current transaction, decrementing any counters and
3737*4882a593Smuzhiyun * filing it on the clean list.
3738*4882a593Smuzhiyun */
journal_mark_freed(struct reiserfs_transaction_handle * th,struct super_block * sb,b_blocknr_t blocknr)3739*4882a593Smuzhiyun int journal_mark_freed(struct reiserfs_transaction_handle *th,
3740*4882a593Smuzhiyun struct super_block *sb, b_blocknr_t blocknr)
3741*4882a593Smuzhiyun {
3742*4882a593Smuzhiyun struct reiserfs_journal *journal = SB_JOURNAL(sb);
3743*4882a593Smuzhiyun struct reiserfs_journal_cnode *cn = NULL;
3744*4882a593Smuzhiyun struct buffer_head *bh = NULL;
3745*4882a593Smuzhiyun struct reiserfs_list_bitmap *jb = NULL;
3746*4882a593Smuzhiyun int cleaned = 0;
3747*4882a593Smuzhiyun BUG_ON(!th->t_trans_id);
3748*4882a593Smuzhiyun
3749*4882a593Smuzhiyun cn = get_journal_hash_dev(sb, journal->j_hash_table, blocknr);
3750*4882a593Smuzhiyun if (cn && cn->bh) {
3751*4882a593Smuzhiyun bh = cn->bh;
3752*4882a593Smuzhiyun get_bh(bh);
3753*4882a593Smuzhiyun }
3754*4882a593Smuzhiyun /* if it is journal new, we just remove it from this transaction */
3755*4882a593Smuzhiyun if (bh && buffer_journal_new(bh)) {
3756*4882a593Smuzhiyun clear_buffer_journal_new(bh);
3757*4882a593Smuzhiyun clear_prepared_bits(bh);
3758*4882a593Smuzhiyun reiserfs_clean_and_file_buffer(bh);
3759*4882a593Smuzhiyun cleaned = remove_from_transaction(sb, blocknr, cleaned);
3760*4882a593Smuzhiyun } else {
3761*4882a593Smuzhiyun /*
3762*4882a593Smuzhiyun * set the bit for this block in the journal bitmap
3763*4882a593Smuzhiyun * for this transaction
3764*4882a593Smuzhiyun */
3765*4882a593Smuzhiyun jb = journal->j_current_jl->j_list_bitmap;
3766*4882a593Smuzhiyun if (!jb) {
3767*4882a593Smuzhiyun reiserfs_panic(sb, "journal-1702",
3768*4882a593Smuzhiyun "journal_list_bitmap is NULL");
3769*4882a593Smuzhiyun }
3770*4882a593Smuzhiyun set_bit_in_list_bitmap(sb, blocknr, jb);
3771*4882a593Smuzhiyun
3772*4882a593Smuzhiyun /* Note, the entire while loop is not allowed to schedule. */
3773*4882a593Smuzhiyun
3774*4882a593Smuzhiyun if (bh) {
3775*4882a593Smuzhiyun clear_prepared_bits(bh);
3776*4882a593Smuzhiyun reiserfs_clean_and_file_buffer(bh);
3777*4882a593Smuzhiyun }
3778*4882a593Smuzhiyun cleaned = remove_from_transaction(sb, blocknr, cleaned);
3779*4882a593Smuzhiyun
3780*4882a593Smuzhiyun /*
3781*4882a593Smuzhiyun * find all older transactions with this block,
3782*4882a593Smuzhiyun * make sure they don't try to write it out
3783*4882a593Smuzhiyun */
3784*4882a593Smuzhiyun cn = get_journal_hash_dev(sb, journal->j_list_hash_table,
3785*4882a593Smuzhiyun blocknr);
3786*4882a593Smuzhiyun while (cn) {
3787*4882a593Smuzhiyun if (sb == cn->sb && blocknr == cn->blocknr) {
3788*4882a593Smuzhiyun set_bit(BLOCK_FREED, &cn->state);
3789*4882a593Smuzhiyun if (cn->bh) {
3790*4882a593Smuzhiyun /*
3791*4882a593Smuzhiyun * remove_from_transaction will brelse
3792*4882a593Smuzhiyun * the buffer if it was in the current
3793*4882a593Smuzhiyun * trans
3794*4882a593Smuzhiyun */
3795*4882a593Smuzhiyun if (!cleaned) {
3796*4882a593Smuzhiyun clear_buffer_journal_dirty(cn->
3797*4882a593Smuzhiyun bh);
3798*4882a593Smuzhiyun clear_buffer_dirty(cn->bh);
3799*4882a593Smuzhiyun clear_buffer_journal_test(cn->
3800*4882a593Smuzhiyun bh);
3801*4882a593Smuzhiyun cleaned = 1;
3802*4882a593Smuzhiyun put_bh(cn->bh);
3803*4882a593Smuzhiyun if (atomic_read
3804*4882a593Smuzhiyun (&cn->bh->b_count) < 0) {
3805*4882a593Smuzhiyun reiserfs_warning(sb,
3806*4882a593Smuzhiyun "journal-2138",
3807*4882a593Smuzhiyun "cn->bh->b_count < 0");
3808*4882a593Smuzhiyun }
3809*4882a593Smuzhiyun }
3810*4882a593Smuzhiyun /*
3811*4882a593Smuzhiyun * since we are clearing the bh,
3812*4882a593Smuzhiyun * we MUST dec nonzerolen
3813*4882a593Smuzhiyun */
3814*4882a593Smuzhiyun if (cn->jlist) {
3815*4882a593Smuzhiyun atomic_dec(&cn->jlist->
3816*4882a593Smuzhiyun j_nonzerolen);
3817*4882a593Smuzhiyun }
3818*4882a593Smuzhiyun cn->bh = NULL;
3819*4882a593Smuzhiyun }
3820*4882a593Smuzhiyun }
3821*4882a593Smuzhiyun cn = cn->hnext;
3822*4882a593Smuzhiyun }
3823*4882a593Smuzhiyun }
3824*4882a593Smuzhiyun
3825*4882a593Smuzhiyun if (bh)
3826*4882a593Smuzhiyun release_buffer_page(bh); /* get_hash grabs the buffer */
3827*4882a593Smuzhiyun return 0;
3828*4882a593Smuzhiyun }
3829*4882a593Smuzhiyun
reiserfs_update_inode_transaction(struct inode * inode)3830*4882a593Smuzhiyun void reiserfs_update_inode_transaction(struct inode *inode)
3831*4882a593Smuzhiyun {
3832*4882a593Smuzhiyun struct reiserfs_journal *journal = SB_JOURNAL(inode->i_sb);
3833*4882a593Smuzhiyun REISERFS_I(inode)->i_jl = journal->j_current_jl;
3834*4882a593Smuzhiyun REISERFS_I(inode)->i_trans_id = journal->j_trans_id;
3835*4882a593Smuzhiyun }
3836*4882a593Smuzhiyun
3837*4882a593Smuzhiyun /*
3838*4882a593Smuzhiyun * returns -1 on error, 0 if no commits/barriers were done and 1
3839*4882a593Smuzhiyun * if a transaction was actually committed and the barrier was done
3840*4882a593Smuzhiyun */
__commit_trans_jl(struct inode * inode,unsigned long id,struct reiserfs_journal_list * jl)3841*4882a593Smuzhiyun static int __commit_trans_jl(struct inode *inode, unsigned long id,
3842*4882a593Smuzhiyun struct reiserfs_journal_list *jl)
3843*4882a593Smuzhiyun {
3844*4882a593Smuzhiyun struct reiserfs_transaction_handle th;
3845*4882a593Smuzhiyun struct super_block *sb = inode->i_sb;
3846*4882a593Smuzhiyun struct reiserfs_journal *journal = SB_JOURNAL(sb);
3847*4882a593Smuzhiyun int ret = 0;
3848*4882a593Smuzhiyun
3849*4882a593Smuzhiyun /*
3850*4882a593Smuzhiyun * is it from the current transaction,
3851*4882a593Smuzhiyun * or from an unknown transaction?
3852*4882a593Smuzhiyun */
3853*4882a593Smuzhiyun if (id == journal->j_trans_id) {
3854*4882a593Smuzhiyun jl = journal->j_current_jl;
3855*4882a593Smuzhiyun /*
3856*4882a593Smuzhiyun * try to let other writers come in and
3857*4882a593Smuzhiyun * grow this transaction
3858*4882a593Smuzhiyun */
3859*4882a593Smuzhiyun let_transaction_grow(sb, id);
3860*4882a593Smuzhiyun if (journal->j_trans_id != id) {
3861*4882a593Smuzhiyun goto flush_commit_only;
3862*4882a593Smuzhiyun }
3863*4882a593Smuzhiyun
3864*4882a593Smuzhiyun ret = journal_begin(&th, sb, 1);
3865*4882a593Smuzhiyun if (ret)
3866*4882a593Smuzhiyun return ret;
3867*4882a593Smuzhiyun
3868*4882a593Smuzhiyun /* someone might have ended this transaction while we joined */
3869*4882a593Smuzhiyun if (journal->j_trans_id != id) {
3870*4882a593Smuzhiyun reiserfs_prepare_for_journal(sb, SB_BUFFER_WITH_SB(sb),
3871*4882a593Smuzhiyun 1);
3872*4882a593Smuzhiyun journal_mark_dirty(&th, SB_BUFFER_WITH_SB(sb));
3873*4882a593Smuzhiyun ret = journal_end(&th);
3874*4882a593Smuzhiyun goto flush_commit_only;
3875*4882a593Smuzhiyun }
3876*4882a593Smuzhiyun
3877*4882a593Smuzhiyun ret = journal_end_sync(&th);
3878*4882a593Smuzhiyun if (!ret)
3879*4882a593Smuzhiyun ret = 1;
3880*4882a593Smuzhiyun
3881*4882a593Smuzhiyun } else {
3882*4882a593Smuzhiyun /*
3883*4882a593Smuzhiyun * this gets tricky, we have to make sure the journal list in
3884*4882a593Smuzhiyun * the inode still exists. We know the list is still around
3885*4882a593Smuzhiyun * if we've got a larger transaction id than the oldest list
3886*4882a593Smuzhiyun */
3887*4882a593Smuzhiyun flush_commit_only:
3888*4882a593Smuzhiyun if (journal_list_still_alive(inode->i_sb, id)) {
3889*4882a593Smuzhiyun /*
3890*4882a593Smuzhiyun * we only set ret to 1 when we know for sure
3891*4882a593Smuzhiyun * the barrier hasn't been started yet on the commit
3892*4882a593Smuzhiyun * block.
3893*4882a593Smuzhiyun */
3894*4882a593Smuzhiyun if (atomic_read(&jl->j_commit_left) > 1)
3895*4882a593Smuzhiyun ret = 1;
3896*4882a593Smuzhiyun flush_commit_list(sb, jl, 1);
3897*4882a593Smuzhiyun if (journal->j_errno)
3898*4882a593Smuzhiyun ret = journal->j_errno;
3899*4882a593Smuzhiyun }
3900*4882a593Smuzhiyun }
3901*4882a593Smuzhiyun /* otherwise the list is gone, and long since committed */
3902*4882a593Smuzhiyun return ret;
3903*4882a593Smuzhiyun }
3904*4882a593Smuzhiyun
reiserfs_commit_for_inode(struct inode * inode)3905*4882a593Smuzhiyun int reiserfs_commit_for_inode(struct inode *inode)
3906*4882a593Smuzhiyun {
3907*4882a593Smuzhiyun unsigned int id = REISERFS_I(inode)->i_trans_id;
3908*4882a593Smuzhiyun struct reiserfs_journal_list *jl = REISERFS_I(inode)->i_jl;
3909*4882a593Smuzhiyun
3910*4882a593Smuzhiyun /*
3911*4882a593Smuzhiyun * for the whole inode, assume unset id means it was
3912*4882a593Smuzhiyun * changed in the current transaction. More conservative
3913*4882a593Smuzhiyun */
3914*4882a593Smuzhiyun if (!id || !jl) {
3915*4882a593Smuzhiyun reiserfs_update_inode_transaction(inode);
3916*4882a593Smuzhiyun id = REISERFS_I(inode)->i_trans_id;
3917*4882a593Smuzhiyun /* jl will be updated in __commit_trans_jl */
3918*4882a593Smuzhiyun }
3919*4882a593Smuzhiyun
3920*4882a593Smuzhiyun return __commit_trans_jl(inode, id, jl);
3921*4882a593Smuzhiyun }
3922*4882a593Smuzhiyun
reiserfs_restore_prepared_buffer(struct super_block * sb,struct buffer_head * bh)3923*4882a593Smuzhiyun void reiserfs_restore_prepared_buffer(struct super_block *sb,
3924*4882a593Smuzhiyun struct buffer_head *bh)
3925*4882a593Smuzhiyun {
3926*4882a593Smuzhiyun struct reiserfs_journal *journal = SB_JOURNAL(sb);
3927*4882a593Smuzhiyun PROC_INFO_INC(sb, journal.restore_prepared);
3928*4882a593Smuzhiyun if (!bh) {
3929*4882a593Smuzhiyun return;
3930*4882a593Smuzhiyun }
3931*4882a593Smuzhiyun if (test_clear_buffer_journal_restore_dirty(bh) &&
3932*4882a593Smuzhiyun buffer_journal_dirty(bh)) {
3933*4882a593Smuzhiyun struct reiserfs_journal_cnode *cn;
3934*4882a593Smuzhiyun reiserfs_write_lock(sb);
3935*4882a593Smuzhiyun cn = get_journal_hash_dev(sb,
3936*4882a593Smuzhiyun journal->j_list_hash_table,
3937*4882a593Smuzhiyun bh->b_blocknr);
3938*4882a593Smuzhiyun if (cn && can_dirty(cn)) {
3939*4882a593Smuzhiyun set_buffer_journal_test(bh);
3940*4882a593Smuzhiyun mark_buffer_dirty(bh);
3941*4882a593Smuzhiyun }
3942*4882a593Smuzhiyun reiserfs_write_unlock(sb);
3943*4882a593Smuzhiyun }
3944*4882a593Smuzhiyun clear_buffer_journal_prepared(bh);
3945*4882a593Smuzhiyun }
3946*4882a593Smuzhiyun
3947*4882a593Smuzhiyun extern struct tree_balance *cur_tb;
3948*4882a593Smuzhiyun /*
3949*4882a593Smuzhiyun * before we can change a metadata block, we have to make sure it won't
3950*4882a593Smuzhiyun * be written to disk while we are altering it. So, we must:
3951*4882a593Smuzhiyun * clean it
3952*4882a593Smuzhiyun * wait on it.
3953*4882a593Smuzhiyun */
reiserfs_prepare_for_journal(struct super_block * sb,struct buffer_head * bh,int wait)3954*4882a593Smuzhiyun int reiserfs_prepare_for_journal(struct super_block *sb,
3955*4882a593Smuzhiyun struct buffer_head *bh, int wait)
3956*4882a593Smuzhiyun {
3957*4882a593Smuzhiyun PROC_INFO_INC(sb, journal.prepare);
3958*4882a593Smuzhiyun
3959*4882a593Smuzhiyun if (!trylock_buffer(bh)) {
3960*4882a593Smuzhiyun if (!wait)
3961*4882a593Smuzhiyun return 0;
3962*4882a593Smuzhiyun lock_buffer(bh);
3963*4882a593Smuzhiyun }
3964*4882a593Smuzhiyun set_buffer_journal_prepared(bh);
3965*4882a593Smuzhiyun if (test_clear_buffer_dirty(bh) && buffer_journal_dirty(bh)) {
3966*4882a593Smuzhiyun clear_buffer_journal_test(bh);
3967*4882a593Smuzhiyun set_buffer_journal_restore_dirty(bh);
3968*4882a593Smuzhiyun }
3969*4882a593Smuzhiyun unlock_buffer(bh);
3970*4882a593Smuzhiyun return 1;
3971*4882a593Smuzhiyun }
3972*4882a593Smuzhiyun
3973*4882a593Smuzhiyun /*
3974*4882a593Smuzhiyun * long and ugly. If flush, will not return until all commit
3975*4882a593Smuzhiyun * blocks and all real buffers in the trans are on disk.
3976*4882a593Smuzhiyun * If no_async, won't return until all commit blocks are on disk.
3977*4882a593Smuzhiyun *
3978*4882a593Smuzhiyun * keep reading, there are comments as you go along
3979*4882a593Smuzhiyun *
3980*4882a593Smuzhiyun * If the journal is aborted, we just clean up. Things like flushing
3981*4882a593Smuzhiyun * journal lists, etc just won't happen.
3982*4882a593Smuzhiyun */
do_journal_end(struct reiserfs_transaction_handle * th,int flags)3983*4882a593Smuzhiyun static int do_journal_end(struct reiserfs_transaction_handle *th, int flags)
3984*4882a593Smuzhiyun {
3985*4882a593Smuzhiyun struct super_block *sb = th->t_super;
3986*4882a593Smuzhiyun struct reiserfs_journal *journal = SB_JOURNAL(sb);
3987*4882a593Smuzhiyun struct reiserfs_journal_cnode *cn, *next, *jl_cn;
3988*4882a593Smuzhiyun struct reiserfs_journal_cnode *last_cn = NULL;
3989*4882a593Smuzhiyun struct reiserfs_journal_desc *desc;
3990*4882a593Smuzhiyun struct reiserfs_journal_commit *commit;
3991*4882a593Smuzhiyun struct buffer_head *c_bh; /* commit bh */
3992*4882a593Smuzhiyun struct buffer_head *d_bh; /* desc bh */
3993*4882a593Smuzhiyun int cur_write_start = 0; /* start index of current log write */
3994*4882a593Smuzhiyun int i;
3995*4882a593Smuzhiyun int flush;
3996*4882a593Smuzhiyun int wait_on_commit;
3997*4882a593Smuzhiyun struct reiserfs_journal_list *jl, *temp_jl;
3998*4882a593Smuzhiyun struct list_head *entry, *safe;
3999*4882a593Smuzhiyun unsigned long jindex;
4000*4882a593Smuzhiyun unsigned int commit_trans_id;
4001*4882a593Smuzhiyun int trans_half;
4002*4882a593Smuzhiyun int depth;
4003*4882a593Smuzhiyun
4004*4882a593Smuzhiyun BUG_ON(th->t_refcount > 1);
4005*4882a593Smuzhiyun BUG_ON(!th->t_trans_id);
4006*4882a593Smuzhiyun BUG_ON(!th->t_super);
4007*4882a593Smuzhiyun
4008*4882a593Smuzhiyun /*
4009*4882a593Smuzhiyun * protect flush_older_commits from doing mistakes if the
4010*4882a593Smuzhiyun * transaction ID counter gets overflowed.
4011*4882a593Smuzhiyun */
4012*4882a593Smuzhiyun if (th->t_trans_id == ~0U)
4013*4882a593Smuzhiyun flags |= FLUSH_ALL | COMMIT_NOW | WAIT;
4014*4882a593Smuzhiyun flush = flags & FLUSH_ALL;
4015*4882a593Smuzhiyun wait_on_commit = flags & WAIT;
4016*4882a593Smuzhiyun
4017*4882a593Smuzhiyun current->journal_info = th->t_handle_save;
4018*4882a593Smuzhiyun reiserfs_check_lock_depth(sb, "journal end");
4019*4882a593Smuzhiyun if (journal->j_len == 0) {
4020*4882a593Smuzhiyun reiserfs_prepare_for_journal(sb, SB_BUFFER_WITH_SB(sb),
4021*4882a593Smuzhiyun 1);
4022*4882a593Smuzhiyun journal_mark_dirty(th, SB_BUFFER_WITH_SB(sb));
4023*4882a593Smuzhiyun }
4024*4882a593Smuzhiyun
4025*4882a593Smuzhiyun lock_journal(sb);
4026*4882a593Smuzhiyun if (journal->j_next_full_flush) {
4027*4882a593Smuzhiyun flags |= FLUSH_ALL;
4028*4882a593Smuzhiyun flush = 1;
4029*4882a593Smuzhiyun }
4030*4882a593Smuzhiyun if (journal->j_next_async_flush) {
4031*4882a593Smuzhiyun flags |= COMMIT_NOW | WAIT;
4032*4882a593Smuzhiyun wait_on_commit = 1;
4033*4882a593Smuzhiyun }
4034*4882a593Smuzhiyun
4035*4882a593Smuzhiyun /*
4036*4882a593Smuzhiyun * check_journal_end locks the journal, and unlocks if it does
4037*4882a593Smuzhiyun * not return 1 it tells us if we should continue with the
4038*4882a593Smuzhiyun * journal_end, or just return
4039*4882a593Smuzhiyun */
4040*4882a593Smuzhiyun if (!check_journal_end(th, flags)) {
4041*4882a593Smuzhiyun reiserfs_schedule_old_flush(sb);
4042*4882a593Smuzhiyun wake_queued_writers(sb);
4043*4882a593Smuzhiyun reiserfs_async_progress_wait(sb);
4044*4882a593Smuzhiyun goto out;
4045*4882a593Smuzhiyun }
4046*4882a593Smuzhiyun
4047*4882a593Smuzhiyun /* check_journal_end might set these, check again */
4048*4882a593Smuzhiyun if (journal->j_next_full_flush) {
4049*4882a593Smuzhiyun flush = 1;
4050*4882a593Smuzhiyun }
4051*4882a593Smuzhiyun
4052*4882a593Smuzhiyun /*
4053*4882a593Smuzhiyun * j must wait means we have to flush the log blocks, and the
4054*4882a593Smuzhiyun * real blocks for this transaction
4055*4882a593Smuzhiyun */
4056*4882a593Smuzhiyun if (journal->j_must_wait > 0) {
4057*4882a593Smuzhiyun flush = 1;
4058*4882a593Smuzhiyun }
4059*4882a593Smuzhiyun #ifdef REISERFS_PREALLOCATE
4060*4882a593Smuzhiyun /*
4061*4882a593Smuzhiyun * quota ops might need to nest, setup the journal_info pointer
4062*4882a593Smuzhiyun * for them and raise the refcount so that it is > 0.
4063*4882a593Smuzhiyun */
4064*4882a593Smuzhiyun current->journal_info = th;
4065*4882a593Smuzhiyun th->t_refcount++;
4066*4882a593Smuzhiyun
4067*4882a593Smuzhiyun /* it should not involve new blocks into the transaction */
4068*4882a593Smuzhiyun reiserfs_discard_all_prealloc(th);
4069*4882a593Smuzhiyun
4070*4882a593Smuzhiyun th->t_refcount--;
4071*4882a593Smuzhiyun current->journal_info = th->t_handle_save;
4072*4882a593Smuzhiyun #endif
4073*4882a593Smuzhiyun
4074*4882a593Smuzhiyun /* setup description block */
4075*4882a593Smuzhiyun d_bh =
4076*4882a593Smuzhiyun journal_getblk(sb,
4077*4882a593Smuzhiyun SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
4078*4882a593Smuzhiyun journal->j_start);
4079*4882a593Smuzhiyun set_buffer_uptodate(d_bh);
4080*4882a593Smuzhiyun desc = (struct reiserfs_journal_desc *)(d_bh)->b_data;
4081*4882a593Smuzhiyun memset(d_bh->b_data, 0, d_bh->b_size);
4082*4882a593Smuzhiyun memcpy(get_journal_desc_magic(d_bh), JOURNAL_DESC_MAGIC, 8);
4083*4882a593Smuzhiyun set_desc_trans_id(desc, journal->j_trans_id);
4084*4882a593Smuzhiyun
4085*4882a593Smuzhiyun /*
4086*4882a593Smuzhiyun * setup commit block. Don't write (keep it clean too) this one
4087*4882a593Smuzhiyun * until after everyone else is written
4088*4882a593Smuzhiyun */
4089*4882a593Smuzhiyun c_bh = journal_getblk(sb, SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
4090*4882a593Smuzhiyun ((journal->j_start + journal->j_len +
4091*4882a593Smuzhiyun 1) % SB_ONDISK_JOURNAL_SIZE(sb)));
4092*4882a593Smuzhiyun commit = (struct reiserfs_journal_commit *)c_bh->b_data;
4093*4882a593Smuzhiyun memset(c_bh->b_data, 0, c_bh->b_size);
4094*4882a593Smuzhiyun set_commit_trans_id(commit, journal->j_trans_id);
4095*4882a593Smuzhiyun set_buffer_uptodate(c_bh);
4096*4882a593Smuzhiyun
4097*4882a593Smuzhiyun /* init this journal list */
4098*4882a593Smuzhiyun jl = journal->j_current_jl;
4099*4882a593Smuzhiyun
4100*4882a593Smuzhiyun /*
4101*4882a593Smuzhiyun * we lock the commit before doing anything because
4102*4882a593Smuzhiyun * we want to make sure nobody tries to run flush_commit_list until
4103*4882a593Smuzhiyun * the new transaction is fully setup, and we've already flushed the
4104*4882a593Smuzhiyun * ordered bh list
4105*4882a593Smuzhiyun */
4106*4882a593Smuzhiyun reiserfs_mutex_lock_safe(&jl->j_commit_mutex, sb);
4107*4882a593Smuzhiyun
4108*4882a593Smuzhiyun /* save the transaction id in case we need to commit it later */
4109*4882a593Smuzhiyun commit_trans_id = jl->j_trans_id;
4110*4882a593Smuzhiyun
4111*4882a593Smuzhiyun atomic_set(&jl->j_older_commits_done, 0);
4112*4882a593Smuzhiyun jl->j_trans_id = journal->j_trans_id;
4113*4882a593Smuzhiyun jl->j_timestamp = journal->j_trans_start_time;
4114*4882a593Smuzhiyun jl->j_commit_bh = c_bh;
4115*4882a593Smuzhiyun jl->j_start = journal->j_start;
4116*4882a593Smuzhiyun jl->j_len = journal->j_len;
4117*4882a593Smuzhiyun atomic_set(&jl->j_nonzerolen, journal->j_len);
4118*4882a593Smuzhiyun atomic_set(&jl->j_commit_left, journal->j_len + 2);
4119*4882a593Smuzhiyun jl->j_realblock = NULL;
4120*4882a593Smuzhiyun
4121*4882a593Smuzhiyun /*
4122*4882a593Smuzhiyun * The ENTIRE FOR LOOP MUST not cause schedule to occur.
4123*4882a593Smuzhiyun * for each real block, add it to the journal list hash,
4124*4882a593Smuzhiyun * copy into real block index array in the commit or desc block
4125*4882a593Smuzhiyun */
4126*4882a593Smuzhiyun trans_half = journal_trans_half(sb->s_blocksize);
4127*4882a593Smuzhiyun for (i = 0, cn = journal->j_first; cn; cn = cn->next, i++) {
4128*4882a593Smuzhiyun if (buffer_journaled(cn->bh)) {
4129*4882a593Smuzhiyun jl_cn = get_cnode(sb);
4130*4882a593Smuzhiyun if (!jl_cn) {
4131*4882a593Smuzhiyun reiserfs_panic(sb, "journal-1676",
4132*4882a593Smuzhiyun "get_cnode returned NULL");
4133*4882a593Smuzhiyun }
4134*4882a593Smuzhiyun if (i == 0) {
4135*4882a593Smuzhiyun jl->j_realblock = jl_cn;
4136*4882a593Smuzhiyun }
4137*4882a593Smuzhiyun jl_cn->prev = last_cn;
4138*4882a593Smuzhiyun jl_cn->next = NULL;
4139*4882a593Smuzhiyun if (last_cn) {
4140*4882a593Smuzhiyun last_cn->next = jl_cn;
4141*4882a593Smuzhiyun }
4142*4882a593Smuzhiyun last_cn = jl_cn;
4143*4882a593Smuzhiyun /*
4144*4882a593Smuzhiyun * make sure the block we are trying to log
4145*4882a593Smuzhiyun * is not a block of journal or reserved area
4146*4882a593Smuzhiyun */
4147*4882a593Smuzhiyun if (is_block_in_log_or_reserved_area
4148*4882a593Smuzhiyun (sb, cn->bh->b_blocknr)) {
4149*4882a593Smuzhiyun reiserfs_panic(sb, "journal-2332",
4150*4882a593Smuzhiyun "Trying to log block %lu, "
4151*4882a593Smuzhiyun "which is a log block",
4152*4882a593Smuzhiyun cn->bh->b_blocknr);
4153*4882a593Smuzhiyun }
4154*4882a593Smuzhiyun jl_cn->blocknr = cn->bh->b_blocknr;
4155*4882a593Smuzhiyun jl_cn->state = 0;
4156*4882a593Smuzhiyun jl_cn->sb = sb;
4157*4882a593Smuzhiyun jl_cn->bh = cn->bh;
4158*4882a593Smuzhiyun jl_cn->jlist = jl;
4159*4882a593Smuzhiyun insert_journal_hash(journal->j_list_hash_table, jl_cn);
4160*4882a593Smuzhiyun if (i < trans_half) {
4161*4882a593Smuzhiyun desc->j_realblock[i] =
4162*4882a593Smuzhiyun cpu_to_le32(cn->bh->b_blocknr);
4163*4882a593Smuzhiyun } else {
4164*4882a593Smuzhiyun commit->j_realblock[i - trans_half] =
4165*4882a593Smuzhiyun cpu_to_le32(cn->bh->b_blocknr);
4166*4882a593Smuzhiyun }
4167*4882a593Smuzhiyun } else {
4168*4882a593Smuzhiyun i--;
4169*4882a593Smuzhiyun }
4170*4882a593Smuzhiyun }
4171*4882a593Smuzhiyun set_desc_trans_len(desc, journal->j_len);
4172*4882a593Smuzhiyun set_desc_mount_id(desc, journal->j_mount_id);
4173*4882a593Smuzhiyun set_desc_trans_id(desc, journal->j_trans_id);
4174*4882a593Smuzhiyun set_commit_trans_len(commit, journal->j_len);
4175*4882a593Smuzhiyun
4176*4882a593Smuzhiyun /*
4177*4882a593Smuzhiyun * special check in case all buffers in the journal
4178*4882a593Smuzhiyun * were marked for not logging
4179*4882a593Smuzhiyun */
4180*4882a593Smuzhiyun BUG_ON(journal->j_len == 0);
4181*4882a593Smuzhiyun
4182*4882a593Smuzhiyun /*
4183*4882a593Smuzhiyun * we're about to dirty all the log blocks, mark the description block
4184*4882a593Smuzhiyun * dirty now too. Don't mark the commit block dirty until all the
4185*4882a593Smuzhiyun * others are on disk
4186*4882a593Smuzhiyun */
4187*4882a593Smuzhiyun mark_buffer_dirty(d_bh);
4188*4882a593Smuzhiyun
4189*4882a593Smuzhiyun /*
4190*4882a593Smuzhiyun * first data block is j_start + 1, so add one to
4191*4882a593Smuzhiyun * cur_write_start wherever you use it
4192*4882a593Smuzhiyun */
4193*4882a593Smuzhiyun cur_write_start = journal->j_start;
4194*4882a593Smuzhiyun cn = journal->j_first;
4195*4882a593Smuzhiyun jindex = 1; /* start at one so we don't get the desc again */
4196*4882a593Smuzhiyun while (cn) {
4197*4882a593Smuzhiyun clear_buffer_journal_new(cn->bh);
4198*4882a593Smuzhiyun /* copy all the real blocks into log area. dirty log blocks */
4199*4882a593Smuzhiyun if (buffer_journaled(cn->bh)) {
4200*4882a593Smuzhiyun struct buffer_head *tmp_bh;
4201*4882a593Smuzhiyun char *addr;
4202*4882a593Smuzhiyun struct page *page;
4203*4882a593Smuzhiyun tmp_bh =
4204*4882a593Smuzhiyun journal_getblk(sb,
4205*4882a593Smuzhiyun SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
4206*4882a593Smuzhiyun ((cur_write_start +
4207*4882a593Smuzhiyun jindex) %
4208*4882a593Smuzhiyun SB_ONDISK_JOURNAL_SIZE(sb)));
4209*4882a593Smuzhiyun set_buffer_uptodate(tmp_bh);
4210*4882a593Smuzhiyun page = cn->bh->b_page;
4211*4882a593Smuzhiyun addr = kmap(page);
4212*4882a593Smuzhiyun memcpy(tmp_bh->b_data,
4213*4882a593Smuzhiyun addr + offset_in_page(cn->bh->b_data),
4214*4882a593Smuzhiyun cn->bh->b_size);
4215*4882a593Smuzhiyun kunmap(page);
4216*4882a593Smuzhiyun mark_buffer_dirty(tmp_bh);
4217*4882a593Smuzhiyun jindex++;
4218*4882a593Smuzhiyun set_buffer_journal_dirty(cn->bh);
4219*4882a593Smuzhiyun clear_buffer_journaled(cn->bh);
4220*4882a593Smuzhiyun } else {
4221*4882a593Smuzhiyun /*
4222*4882a593Smuzhiyun * JDirty cleared sometime during transaction.
4223*4882a593Smuzhiyun * don't log this one
4224*4882a593Smuzhiyun */
4225*4882a593Smuzhiyun reiserfs_warning(sb, "journal-2048",
4226*4882a593Smuzhiyun "BAD, buffer in journal hash, "
4227*4882a593Smuzhiyun "but not JDirty!");
4228*4882a593Smuzhiyun brelse(cn->bh);
4229*4882a593Smuzhiyun }
4230*4882a593Smuzhiyun next = cn->next;
4231*4882a593Smuzhiyun free_cnode(sb, cn);
4232*4882a593Smuzhiyun cn = next;
4233*4882a593Smuzhiyun reiserfs_cond_resched(sb);
4234*4882a593Smuzhiyun }
4235*4882a593Smuzhiyun
4236*4882a593Smuzhiyun /*
4237*4882a593Smuzhiyun * we are done with both the c_bh and d_bh, but
4238*4882a593Smuzhiyun * c_bh must be written after all other commit blocks,
4239*4882a593Smuzhiyun * so we dirty/relse c_bh in flush_commit_list, with commit_left <= 1.
4240*4882a593Smuzhiyun */
4241*4882a593Smuzhiyun
4242*4882a593Smuzhiyun journal->j_current_jl = alloc_journal_list(sb);
4243*4882a593Smuzhiyun
4244*4882a593Smuzhiyun /* now it is safe to insert this transaction on the main list */
4245*4882a593Smuzhiyun list_add_tail(&jl->j_list, &journal->j_journal_list);
4246*4882a593Smuzhiyun list_add_tail(&jl->j_working_list, &journal->j_working_list);
4247*4882a593Smuzhiyun journal->j_num_work_lists++;
4248*4882a593Smuzhiyun
4249*4882a593Smuzhiyun /* reset journal values for the next transaction */
4250*4882a593Smuzhiyun journal->j_start =
4251*4882a593Smuzhiyun (journal->j_start + journal->j_len +
4252*4882a593Smuzhiyun 2) % SB_ONDISK_JOURNAL_SIZE(sb);
4253*4882a593Smuzhiyun atomic_set(&journal->j_wcount, 0);
4254*4882a593Smuzhiyun journal->j_bcount = 0;
4255*4882a593Smuzhiyun journal->j_last = NULL;
4256*4882a593Smuzhiyun journal->j_first = NULL;
4257*4882a593Smuzhiyun journal->j_len = 0;
4258*4882a593Smuzhiyun journal->j_trans_start_time = 0;
4259*4882a593Smuzhiyun /* check for trans_id overflow */
4260*4882a593Smuzhiyun if (++journal->j_trans_id == 0)
4261*4882a593Smuzhiyun journal->j_trans_id = 10;
4262*4882a593Smuzhiyun journal->j_current_jl->j_trans_id = journal->j_trans_id;
4263*4882a593Smuzhiyun journal->j_must_wait = 0;
4264*4882a593Smuzhiyun journal->j_len_alloc = 0;
4265*4882a593Smuzhiyun journal->j_next_full_flush = 0;
4266*4882a593Smuzhiyun journal->j_next_async_flush = 0;
4267*4882a593Smuzhiyun init_journal_hash(sb);
4268*4882a593Smuzhiyun
4269*4882a593Smuzhiyun /*
4270*4882a593Smuzhiyun * make sure reiserfs_add_jh sees the new current_jl before we
4271*4882a593Smuzhiyun * write out the tails
4272*4882a593Smuzhiyun */
4273*4882a593Smuzhiyun smp_mb();
4274*4882a593Smuzhiyun
4275*4882a593Smuzhiyun /*
4276*4882a593Smuzhiyun * tail conversion targets have to hit the disk before we end the
4277*4882a593Smuzhiyun * transaction. Otherwise a later transaction might repack the tail
4278*4882a593Smuzhiyun * before this transaction commits, leaving the data block unflushed
4279*4882a593Smuzhiyun * and clean, if we crash before the later transaction commits, the
4280*4882a593Smuzhiyun * data block is lost.
4281*4882a593Smuzhiyun */
4282*4882a593Smuzhiyun if (!list_empty(&jl->j_tail_bh_list)) {
4283*4882a593Smuzhiyun depth = reiserfs_write_unlock_nested(sb);
4284*4882a593Smuzhiyun write_ordered_buffers(&journal->j_dirty_buffers_lock,
4285*4882a593Smuzhiyun journal, jl, &jl->j_tail_bh_list);
4286*4882a593Smuzhiyun reiserfs_write_lock_nested(sb, depth);
4287*4882a593Smuzhiyun }
4288*4882a593Smuzhiyun BUG_ON(!list_empty(&jl->j_tail_bh_list));
4289*4882a593Smuzhiyun mutex_unlock(&jl->j_commit_mutex);
4290*4882a593Smuzhiyun
4291*4882a593Smuzhiyun /*
4292*4882a593Smuzhiyun * honor the flush wishes from the caller, simple commits can
4293*4882a593Smuzhiyun * be done outside the journal lock, they are done below
4294*4882a593Smuzhiyun *
4295*4882a593Smuzhiyun * if we don't flush the commit list right now, we put it into
4296*4882a593Smuzhiyun * the work queue so the people waiting on the async progress work
4297*4882a593Smuzhiyun * queue don't wait for this proc to flush journal lists and such.
4298*4882a593Smuzhiyun */
4299*4882a593Smuzhiyun if (flush) {
4300*4882a593Smuzhiyun flush_commit_list(sb, jl, 1);
4301*4882a593Smuzhiyun flush_journal_list(sb, jl, 1);
4302*4882a593Smuzhiyun } else if (!(jl->j_state & LIST_COMMIT_PENDING)) {
4303*4882a593Smuzhiyun /*
4304*4882a593Smuzhiyun * Avoid queueing work when sb is being shut down. Transaction
4305*4882a593Smuzhiyun * will be flushed on journal shutdown.
4306*4882a593Smuzhiyun */
4307*4882a593Smuzhiyun if (sb->s_flags & SB_ACTIVE)
4308*4882a593Smuzhiyun queue_delayed_work(REISERFS_SB(sb)->commit_wq,
4309*4882a593Smuzhiyun &journal->j_work, HZ / 10);
4310*4882a593Smuzhiyun }
4311*4882a593Smuzhiyun
4312*4882a593Smuzhiyun /*
4313*4882a593Smuzhiyun * if the next transaction has any chance of wrapping, flush
4314*4882a593Smuzhiyun * transactions that might get overwritten. If any journal lists
4315*4882a593Smuzhiyun * are very old flush them as well.
4316*4882a593Smuzhiyun */
4317*4882a593Smuzhiyun first_jl:
4318*4882a593Smuzhiyun list_for_each_safe(entry, safe, &journal->j_journal_list) {
4319*4882a593Smuzhiyun temp_jl = JOURNAL_LIST_ENTRY(entry);
4320*4882a593Smuzhiyun if (journal->j_start <= temp_jl->j_start) {
4321*4882a593Smuzhiyun if ((journal->j_start + journal->j_trans_max + 1) >=
4322*4882a593Smuzhiyun temp_jl->j_start) {
4323*4882a593Smuzhiyun flush_used_journal_lists(sb, temp_jl);
4324*4882a593Smuzhiyun goto first_jl;
4325*4882a593Smuzhiyun } else if ((journal->j_start +
4326*4882a593Smuzhiyun journal->j_trans_max + 1) <
4327*4882a593Smuzhiyun SB_ONDISK_JOURNAL_SIZE(sb)) {
4328*4882a593Smuzhiyun /*
4329*4882a593Smuzhiyun * if we don't cross into the next
4330*4882a593Smuzhiyun * transaction and we don't wrap, there is
4331*4882a593Smuzhiyun * no way we can overlap any later transactions
4332*4882a593Smuzhiyun * break now
4333*4882a593Smuzhiyun */
4334*4882a593Smuzhiyun break;
4335*4882a593Smuzhiyun }
4336*4882a593Smuzhiyun } else if ((journal->j_start +
4337*4882a593Smuzhiyun journal->j_trans_max + 1) >
4338*4882a593Smuzhiyun SB_ONDISK_JOURNAL_SIZE(sb)) {
4339*4882a593Smuzhiyun if (((journal->j_start + journal->j_trans_max + 1) %
4340*4882a593Smuzhiyun SB_ONDISK_JOURNAL_SIZE(sb)) >=
4341*4882a593Smuzhiyun temp_jl->j_start) {
4342*4882a593Smuzhiyun flush_used_journal_lists(sb, temp_jl);
4343*4882a593Smuzhiyun goto first_jl;
4344*4882a593Smuzhiyun } else {
4345*4882a593Smuzhiyun /*
4346*4882a593Smuzhiyun * we don't overlap anything from out start
4347*4882a593Smuzhiyun * to the end of the log, and our wrapped
4348*4882a593Smuzhiyun * portion doesn't overlap anything at
4349*4882a593Smuzhiyun * the start of the log. We can break
4350*4882a593Smuzhiyun */
4351*4882a593Smuzhiyun break;
4352*4882a593Smuzhiyun }
4353*4882a593Smuzhiyun }
4354*4882a593Smuzhiyun }
4355*4882a593Smuzhiyun
4356*4882a593Smuzhiyun journal->j_current_jl->j_list_bitmap =
4357*4882a593Smuzhiyun get_list_bitmap(sb, journal->j_current_jl);
4358*4882a593Smuzhiyun
4359*4882a593Smuzhiyun if (!(journal->j_current_jl->j_list_bitmap)) {
4360*4882a593Smuzhiyun reiserfs_panic(sb, "journal-1996",
4361*4882a593Smuzhiyun "could not get a list bitmap");
4362*4882a593Smuzhiyun }
4363*4882a593Smuzhiyun
4364*4882a593Smuzhiyun atomic_set(&journal->j_jlock, 0);
4365*4882a593Smuzhiyun unlock_journal(sb);
4366*4882a593Smuzhiyun /* wake up any body waiting to join. */
4367*4882a593Smuzhiyun clear_bit(J_WRITERS_QUEUED, &journal->j_state);
4368*4882a593Smuzhiyun wake_up(&journal->j_join_wait);
4369*4882a593Smuzhiyun
4370*4882a593Smuzhiyun if (!flush && wait_on_commit &&
4371*4882a593Smuzhiyun journal_list_still_alive(sb, commit_trans_id)) {
4372*4882a593Smuzhiyun flush_commit_list(sb, jl, 1);
4373*4882a593Smuzhiyun }
4374*4882a593Smuzhiyun out:
4375*4882a593Smuzhiyun reiserfs_check_lock_depth(sb, "journal end2");
4376*4882a593Smuzhiyun
4377*4882a593Smuzhiyun memset(th, 0, sizeof(*th));
4378*4882a593Smuzhiyun /*
4379*4882a593Smuzhiyun * Re-set th->t_super, so we can properly keep track of how many
4380*4882a593Smuzhiyun * persistent transactions there are. We need to do this so if this
4381*4882a593Smuzhiyun * call is part of a failed restart_transaction, we can free it later
4382*4882a593Smuzhiyun */
4383*4882a593Smuzhiyun th->t_super = sb;
4384*4882a593Smuzhiyun
4385*4882a593Smuzhiyun return journal->j_errno;
4386*4882a593Smuzhiyun }
4387*4882a593Smuzhiyun
4388*4882a593Smuzhiyun /* Send the file system read only and refuse new transactions */
reiserfs_abort_journal(struct super_block * sb,int errno)4389*4882a593Smuzhiyun void reiserfs_abort_journal(struct super_block *sb, int errno)
4390*4882a593Smuzhiyun {
4391*4882a593Smuzhiyun struct reiserfs_journal *journal = SB_JOURNAL(sb);
4392*4882a593Smuzhiyun if (test_bit(J_ABORTED, &journal->j_state))
4393*4882a593Smuzhiyun return;
4394*4882a593Smuzhiyun
4395*4882a593Smuzhiyun if (!journal->j_errno)
4396*4882a593Smuzhiyun journal->j_errno = errno;
4397*4882a593Smuzhiyun
4398*4882a593Smuzhiyun sb->s_flags |= SB_RDONLY;
4399*4882a593Smuzhiyun set_bit(J_ABORTED, &journal->j_state);
4400*4882a593Smuzhiyun
4401*4882a593Smuzhiyun #ifdef CONFIG_REISERFS_CHECK
4402*4882a593Smuzhiyun dump_stack();
4403*4882a593Smuzhiyun #endif
4404*4882a593Smuzhiyun }
4405