1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0+
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * page.c - buffer/page management specific to NILFS
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Written by Ryusuke Konishi and Seiji Kihara.
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include <linux/pagemap.h>
11*4882a593Smuzhiyun #include <linux/writeback.h>
12*4882a593Smuzhiyun #include <linux/swap.h>
13*4882a593Smuzhiyun #include <linux/bitops.h>
14*4882a593Smuzhiyun #include <linux/page-flags.h>
15*4882a593Smuzhiyun #include <linux/list.h>
16*4882a593Smuzhiyun #include <linux/highmem.h>
17*4882a593Smuzhiyun #include <linux/pagevec.h>
18*4882a593Smuzhiyun #include <linux/gfp.h>
19*4882a593Smuzhiyun #include "nilfs.h"
20*4882a593Smuzhiyun #include "page.h"
21*4882a593Smuzhiyun #include "mdt.h"
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #define NILFS_BUFFER_INHERENT_BITS \
25*4882a593Smuzhiyun (BIT(BH_Uptodate) | BIT(BH_Mapped) | BIT(BH_NILFS_Node) | \
26*4882a593Smuzhiyun BIT(BH_NILFS_Volatile) | BIT(BH_NILFS_Checked))
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun static struct buffer_head *
__nilfs_get_page_block(struct page * page,unsigned long block,pgoff_t index,int blkbits,unsigned long b_state)29*4882a593Smuzhiyun __nilfs_get_page_block(struct page *page, unsigned long block, pgoff_t index,
30*4882a593Smuzhiyun int blkbits, unsigned long b_state)
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun {
33*4882a593Smuzhiyun unsigned long first_block;
34*4882a593Smuzhiyun struct buffer_head *bh;
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun if (!page_has_buffers(page))
37*4882a593Smuzhiyun create_empty_buffers(page, 1 << blkbits, b_state);
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun first_block = (unsigned long)index << (PAGE_SHIFT - blkbits);
40*4882a593Smuzhiyun bh = nilfs_page_get_nth_block(page, block - first_block);
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun touch_buffer(bh);
43*4882a593Smuzhiyun wait_on_buffer(bh);
44*4882a593Smuzhiyun return bh;
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun
nilfs_grab_buffer(struct inode * inode,struct address_space * mapping,unsigned long blkoff,unsigned long b_state)47*4882a593Smuzhiyun struct buffer_head *nilfs_grab_buffer(struct inode *inode,
48*4882a593Smuzhiyun struct address_space *mapping,
49*4882a593Smuzhiyun unsigned long blkoff,
50*4882a593Smuzhiyun unsigned long b_state)
51*4882a593Smuzhiyun {
52*4882a593Smuzhiyun int blkbits = inode->i_blkbits;
53*4882a593Smuzhiyun pgoff_t index = blkoff >> (PAGE_SHIFT - blkbits);
54*4882a593Smuzhiyun struct page *page;
55*4882a593Smuzhiyun struct buffer_head *bh;
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun page = grab_cache_page(mapping, index);
58*4882a593Smuzhiyun if (unlikely(!page))
59*4882a593Smuzhiyun return NULL;
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun bh = __nilfs_get_page_block(page, blkoff, index, blkbits, b_state);
62*4882a593Smuzhiyun if (unlikely(!bh)) {
63*4882a593Smuzhiyun unlock_page(page);
64*4882a593Smuzhiyun put_page(page);
65*4882a593Smuzhiyun return NULL;
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun return bh;
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun /**
71*4882a593Smuzhiyun * nilfs_forget_buffer - discard dirty state
72*4882a593Smuzhiyun * @bh: buffer head of the buffer to be discarded
73*4882a593Smuzhiyun */
nilfs_forget_buffer(struct buffer_head * bh)74*4882a593Smuzhiyun void nilfs_forget_buffer(struct buffer_head *bh)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun struct page *page = bh->b_page;
77*4882a593Smuzhiyun const unsigned long clear_bits =
78*4882a593Smuzhiyun (BIT(BH_Uptodate) | BIT(BH_Dirty) | BIT(BH_Mapped) |
79*4882a593Smuzhiyun BIT(BH_Async_Write) | BIT(BH_NILFS_Volatile) |
80*4882a593Smuzhiyun BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected));
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun lock_buffer(bh);
83*4882a593Smuzhiyun set_mask_bits(&bh->b_state, clear_bits, 0);
84*4882a593Smuzhiyun if (nilfs_page_buffers_clean(page))
85*4882a593Smuzhiyun __nilfs_clear_page_dirty(page);
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun bh->b_blocknr = -1;
88*4882a593Smuzhiyun ClearPageUptodate(page);
89*4882a593Smuzhiyun ClearPageMappedToDisk(page);
90*4882a593Smuzhiyun unlock_buffer(bh);
91*4882a593Smuzhiyun brelse(bh);
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun /**
95*4882a593Smuzhiyun * nilfs_copy_buffer -- copy buffer data and flags
96*4882a593Smuzhiyun * @dbh: destination buffer
97*4882a593Smuzhiyun * @sbh: source buffer
98*4882a593Smuzhiyun */
nilfs_copy_buffer(struct buffer_head * dbh,struct buffer_head * sbh)99*4882a593Smuzhiyun void nilfs_copy_buffer(struct buffer_head *dbh, struct buffer_head *sbh)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun void *kaddr0, *kaddr1;
102*4882a593Smuzhiyun unsigned long bits;
103*4882a593Smuzhiyun struct page *spage = sbh->b_page, *dpage = dbh->b_page;
104*4882a593Smuzhiyun struct buffer_head *bh;
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun kaddr0 = kmap_atomic(spage);
107*4882a593Smuzhiyun kaddr1 = kmap_atomic(dpage);
108*4882a593Smuzhiyun memcpy(kaddr1 + bh_offset(dbh), kaddr0 + bh_offset(sbh), sbh->b_size);
109*4882a593Smuzhiyun kunmap_atomic(kaddr1);
110*4882a593Smuzhiyun kunmap_atomic(kaddr0);
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun dbh->b_state = sbh->b_state & NILFS_BUFFER_INHERENT_BITS;
113*4882a593Smuzhiyun dbh->b_blocknr = sbh->b_blocknr;
114*4882a593Smuzhiyun dbh->b_bdev = sbh->b_bdev;
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun bh = dbh;
117*4882a593Smuzhiyun bits = sbh->b_state & (BIT(BH_Uptodate) | BIT(BH_Mapped));
118*4882a593Smuzhiyun while ((bh = bh->b_this_page) != dbh) {
119*4882a593Smuzhiyun lock_buffer(bh);
120*4882a593Smuzhiyun bits &= bh->b_state;
121*4882a593Smuzhiyun unlock_buffer(bh);
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun if (bits & BIT(BH_Uptodate))
124*4882a593Smuzhiyun SetPageUptodate(dpage);
125*4882a593Smuzhiyun else
126*4882a593Smuzhiyun ClearPageUptodate(dpage);
127*4882a593Smuzhiyun if (bits & BIT(BH_Mapped))
128*4882a593Smuzhiyun SetPageMappedToDisk(dpage);
129*4882a593Smuzhiyun else
130*4882a593Smuzhiyun ClearPageMappedToDisk(dpage);
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun /**
134*4882a593Smuzhiyun * nilfs_page_buffers_clean - check if a page has dirty buffers or not.
135*4882a593Smuzhiyun * @page: page to be checked
136*4882a593Smuzhiyun *
137*4882a593Smuzhiyun * nilfs_page_buffers_clean() returns zero if the page has dirty buffers.
138*4882a593Smuzhiyun * Otherwise, it returns non-zero value.
139*4882a593Smuzhiyun */
nilfs_page_buffers_clean(struct page * page)140*4882a593Smuzhiyun int nilfs_page_buffers_clean(struct page *page)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun struct buffer_head *bh, *head;
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun bh = head = page_buffers(page);
145*4882a593Smuzhiyun do {
146*4882a593Smuzhiyun if (buffer_dirty(bh))
147*4882a593Smuzhiyun return 0;
148*4882a593Smuzhiyun bh = bh->b_this_page;
149*4882a593Smuzhiyun } while (bh != head);
150*4882a593Smuzhiyun return 1;
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun
nilfs_page_bug(struct page * page)153*4882a593Smuzhiyun void nilfs_page_bug(struct page *page)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun struct address_space *m;
156*4882a593Smuzhiyun unsigned long ino;
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun if (unlikely(!page)) {
159*4882a593Smuzhiyun printk(KERN_CRIT "NILFS_PAGE_BUG(NULL)\n");
160*4882a593Smuzhiyun return;
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun m = page->mapping;
164*4882a593Smuzhiyun ino = m ? m->host->i_ino : 0;
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun printk(KERN_CRIT "NILFS_PAGE_BUG(%p): cnt=%d index#=%llu flags=0x%lx "
167*4882a593Smuzhiyun "mapping=%p ino=%lu\n",
168*4882a593Smuzhiyun page, page_ref_count(page),
169*4882a593Smuzhiyun (unsigned long long)page->index, page->flags, m, ino);
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun if (page_has_buffers(page)) {
172*4882a593Smuzhiyun struct buffer_head *bh, *head;
173*4882a593Smuzhiyun int i = 0;
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun bh = head = page_buffers(page);
176*4882a593Smuzhiyun do {
177*4882a593Smuzhiyun printk(KERN_CRIT
178*4882a593Smuzhiyun " BH[%d] %p: cnt=%d block#=%llu state=0x%lx\n",
179*4882a593Smuzhiyun i++, bh, atomic_read(&bh->b_count),
180*4882a593Smuzhiyun (unsigned long long)bh->b_blocknr, bh->b_state);
181*4882a593Smuzhiyun bh = bh->b_this_page;
182*4882a593Smuzhiyun } while (bh != head);
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun /**
187*4882a593Smuzhiyun * nilfs_copy_page -- copy the page with buffers
188*4882a593Smuzhiyun * @dst: destination page
189*4882a593Smuzhiyun * @src: source page
190*4882a593Smuzhiyun * @copy_dirty: flag whether to copy dirty states on the page's buffer heads.
191*4882a593Smuzhiyun *
192*4882a593Smuzhiyun * This function is for both data pages and btnode pages. The dirty flag
193*4882a593Smuzhiyun * should be treated by caller. The page must not be under i/o.
194*4882a593Smuzhiyun * Both src and dst page must be locked
195*4882a593Smuzhiyun */
nilfs_copy_page(struct page * dst,struct page * src,int copy_dirty)196*4882a593Smuzhiyun static void nilfs_copy_page(struct page *dst, struct page *src, int copy_dirty)
197*4882a593Smuzhiyun {
198*4882a593Smuzhiyun struct buffer_head *dbh, *dbufs, *sbh, *sbufs;
199*4882a593Smuzhiyun unsigned long mask = NILFS_BUFFER_INHERENT_BITS;
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun BUG_ON(PageWriteback(dst));
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun sbh = sbufs = page_buffers(src);
204*4882a593Smuzhiyun if (!page_has_buffers(dst))
205*4882a593Smuzhiyun create_empty_buffers(dst, sbh->b_size, 0);
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun if (copy_dirty)
208*4882a593Smuzhiyun mask |= BIT(BH_Dirty);
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun dbh = dbufs = page_buffers(dst);
211*4882a593Smuzhiyun do {
212*4882a593Smuzhiyun lock_buffer(sbh);
213*4882a593Smuzhiyun lock_buffer(dbh);
214*4882a593Smuzhiyun dbh->b_state = sbh->b_state & mask;
215*4882a593Smuzhiyun dbh->b_blocknr = sbh->b_blocknr;
216*4882a593Smuzhiyun dbh->b_bdev = sbh->b_bdev;
217*4882a593Smuzhiyun sbh = sbh->b_this_page;
218*4882a593Smuzhiyun dbh = dbh->b_this_page;
219*4882a593Smuzhiyun } while (dbh != dbufs);
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun copy_highpage(dst, src);
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun if (PageUptodate(src) && !PageUptodate(dst))
224*4882a593Smuzhiyun SetPageUptodate(dst);
225*4882a593Smuzhiyun else if (!PageUptodate(src) && PageUptodate(dst))
226*4882a593Smuzhiyun ClearPageUptodate(dst);
227*4882a593Smuzhiyun if (PageMappedToDisk(src) && !PageMappedToDisk(dst))
228*4882a593Smuzhiyun SetPageMappedToDisk(dst);
229*4882a593Smuzhiyun else if (!PageMappedToDisk(src) && PageMappedToDisk(dst))
230*4882a593Smuzhiyun ClearPageMappedToDisk(dst);
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun do {
233*4882a593Smuzhiyun unlock_buffer(sbh);
234*4882a593Smuzhiyun unlock_buffer(dbh);
235*4882a593Smuzhiyun sbh = sbh->b_this_page;
236*4882a593Smuzhiyun dbh = dbh->b_this_page;
237*4882a593Smuzhiyun } while (dbh != dbufs);
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun
nilfs_copy_dirty_pages(struct address_space * dmap,struct address_space * smap)240*4882a593Smuzhiyun int nilfs_copy_dirty_pages(struct address_space *dmap,
241*4882a593Smuzhiyun struct address_space *smap)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun struct pagevec pvec;
244*4882a593Smuzhiyun unsigned int i;
245*4882a593Smuzhiyun pgoff_t index = 0;
246*4882a593Smuzhiyun int err = 0;
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun pagevec_init(&pvec);
249*4882a593Smuzhiyun repeat:
250*4882a593Smuzhiyun if (!pagevec_lookup_tag(&pvec, smap, &index, PAGECACHE_TAG_DIRTY))
251*4882a593Smuzhiyun return 0;
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun for (i = 0; i < pagevec_count(&pvec); i++) {
254*4882a593Smuzhiyun struct page *page = pvec.pages[i], *dpage;
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun lock_page(page);
257*4882a593Smuzhiyun if (unlikely(!PageDirty(page)))
258*4882a593Smuzhiyun NILFS_PAGE_BUG(page, "inconsistent dirty state");
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun dpage = grab_cache_page(dmap, page->index);
261*4882a593Smuzhiyun if (unlikely(!dpage)) {
262*4882a593Smuzhiyun /* No empty page is added to the page cache */
263*4882a593Smuzhiyun err = -ENOMEM;
264*4882a593Smuzhiyun unlock_page(page);
265*4882a593Smuzhiyun break;
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun if (unlikely(!page_has_buffers(page)))
268*4882a593Smuzhiyun NILFS_PAGE_BUG(page,
269*4882a593Smuzhiyun "found empty page in dat page cache");
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun nilfs_copy_page(dpage, page, 1);
272*4882a593Smuzhiyun __set_page_dirty_nobuffers(dpage);
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun unlock_page(dpage);
275*4882a593Smuzhiyun put_page(dpage);
276*4882a593Smuzhiyun unlock_page(page);
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun pagevec_release(&pvec);
279*4882a593Smuzhiyun cond_resched();
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun if (likely(!err))
282*4882a593Smuzhiyun goto repeat;
283*4882a593Smuzhiyun return err;
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun /**
287*4882a593Smuzhiyun * nilfs_copy_back_pages -- copy back pages to original cache from shadow cache
288*4882a593Smuzhiyun * @dmap: destination page cache
289*4882a593Smuzhiyun * @smap: source page cache
290*4882a593Smuzhiyun *
291*4882a593Smuzhiyun * No pages must be added to the cache during this process.
292*4882a593Smuzhiyun * This must be ensured by the caller.
293*4882a593Smuzhiyun */
nilfs_copy_back_pages(struct address_space * dmap,struct address_space * smap)294*4882a593Smuzhiyun void nilfs_copy_back_pages(struct address_space *dmap,
295*4882a593Smuzhiyun struct address_space *smap)
296*4882a593Smuzhiyun {
297*4882a593Smuzhiyun struct pagevec pvec;
298*4882a593Smuzhiyun unsigned int i, n;
299*4882a593Smuzhiyun pgoff_t index = 0;
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun pagevec_init(&pvec);
302*4882a593Smuzhiyun repeat:
303*4882a593Smuzhiyun n = pagevec_lookup(&pvec, smap, &index);
304*4882a593Smuzhiyun if (!n)
305*4882a593Smuzhiyun return;
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun for (i = 0; i < pagevec_count(&pvec); i++) {
308*4882a593Smuzhiyun struct page *page = pvec.pages[i], *dpage;
309*4882a593Smuzhiyun pgoff_t offset = page->index;
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun lock_page(page);
312*4882a593Smuzhiyun dpage = find_lock_page(dmap, offset);
313*4882a593Smuzhiyun if (dpage) {
314*4882a593Smuzhiyun /* overwrite existing page in the destination cache */
315*4882a593Smuzhiyun WARN_ON(PageDirty(dpage));
316*4882a593Smuzhiyun nilfs_copy_page(dpage, page, 0);
317*4882a593Smuzhiyun unlock_page(dpage);
318*4882a593Smuzhiyun put_page(dpage);
319*4882a593Smuzhiyun /* Do we not need to remove page from smap here? */
320*4882a593Smuzhiyun } else {
321*4882a593Smuzhiyun struct page *p;
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun /* move the page to the destination cache */
324*4882a593Smuzhiyun xa_lock_irq(&smap->i_pages);
325*4882a593Smuzhiyun p = __xa_erase(&smap->i_pages, offset);
326*4882a593Smuzhiyun WARN_ON(page != p);
327*4882a593Smuzhiyun smap->nrpages--;
328*4882a593Smuzhiyun xa_unlock_irq(&smap->i_pages);
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun xa_lock_irq(&dmap->i_pages);
331*4882a593Smuzhiyun p = __xa_store(&dmap->i_pages, offset, page, GFP_NOFS);
332*4882a593Smuzhiyun if (unlikely(p)) {
333*4882a593Smuzhiyun /* Probably -ENOMEM */
334*4882a593Smuzhiyun page->mapping = NULL;
335*4882a593Smuzhiyun put_page(page);
336*4882a593Smuzhiyun } else {
337*4882a593Smuzhiyun page->mapping = dmap;
338*4882a593Smuzhiyun dmap->nrpages++;
339*4882a593Smuzhiyun if (PageDirty(page))
340*4882a593Smuzhiyun __xa_set_mark(&dmap->i_pages, offset,
341*4882a593Smuzhiyun PAGECACHE_TAG_DIRTY);
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun xa_unlock_irq(&dmap->i_pages);
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun unlock_page(page);
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun pagevec_release(&pvec);
348*4882a593Smuzhiyun cond_resched();
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun goto repeat;
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun /**
354*4882a593Smuzhiyun * nilfs_clear_dirty_pages - discard dirty pages in address space
355*4882a593Smuzhiyun * @mapping: address space with dirty pages for discarding
356*4882a593Smuzhiyun * @silent: suppress [true] or print [false] warning messages
357*4882a593Smuzhiyun */
nilfs_clear_dirty_pages(struct address_space * mapping,bool silent)358*4882a593Smuzhiyun void nilfs_clear_dirty_pages(struct address_space *mapping, bool silent)
359*4882a593Smuzhiyun {
360*4882a593Smuzhiyun struct pagevec pvec;
361*4882a593Smuzhiyun unsigned int i;
362*4882a593Smuzhiyun pgoff_t index = 0;
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun pagevec_init(&pvec);
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun while (pagevec_lookup_tag(&pvec, mapping, &index,
367*4882a593Smuzhiyun PAGECACHE_TAG_DIRTY)) {
368*4882a593Smuzhiyun for (i = 0; i < pagevec_count(&pvec); i++) {
369*4882a593Smuzhiyun struct page *page = pvec.pages[i];
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun lock_page(page);
372*4882a593Smuzhiyun nilfs_clear_dirty_page(page, silent);
373*4882a593Smuzhiyun unlock_page(page);
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun pagevec_release(&pvec);
376*4882a593Smuzhiyun cond_resched();
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun /**
381*4882a593Smuzhiyun * nilfs_clear_dirty_page - discard dirty page
382*4882a593Smuzhiyun * @page: dirty page that will be discarded
383*4882a593Smuzhiyun * @silent: suppress [true] or print [false] warning messages
384*4882a593Smuzhiyun */
nilfs_clear_dirty_page(struct page * page,bool silent)385*4882a593Smuzhiyun void nilfs_clear_dirty_page(struct page *page, bool silent)
386*4882a593Smuzhiyun {
387*4882a593Smuzhiyun struct inode *inode = page->mapping->host;
388*4882a593Smuzhiyun struct super_block *sb = inode->i_sb;
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun BUG_ON(!PageLocked(page));
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun if (!silent)
393*4882a593Smuzhiyun nilfs_warn(sb, "discard dirty page: offset=%lld, ino=%lu",
394*4882a593Smuzhiyun page_offset(page), inode->i_ino);
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun ClearPageUptodate(page);
397*4882a593Smuzhiyun ClearPageMappedToDisk(page);
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun if (page_has_buffers(page)) {
400*4882a593Smuzhiyun struct buffer_head *bh, *head;
401*4882a593Smuzhiyun const unsigned long clear_bits =
402*4882a593Smuzhiyun (BIT(BH_Uptodate) | BIT(BH_Dirty) | BIT(BH_Mapped) |
403*4882a593Smuzhiyun BIT(BH_Async_Write) | BIT(BH_NILFS_Volatile) |
404*4882a593Smuzhiyun BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected));
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun bh = head = page_buffers(page);
407*4882a593Smuzhiyun do {
408*4882a593Smuzhiyun lock_buffer(bh);
409*4882a593Smuzhiyun if (!silent)
410*4882a593Smuzhiyun nilfs_warn(sb,
411*4882a593Smuzhiyun "discard dirty block: blocknr=%llu, size=%zu",
412*4882a593Smuzhiyun (u64)bh->b_blocknr, bh->b_size);
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun set_mask_bits(&bh->b_state, clear_bits, 0);
415*4882a593Smuzhiyun unlock_buffer(bh);
416*4882a593Smuzhiyun } while (bh = bh->b_this_page, bh != head);
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun __nilfs_clear_page_dirty(page);
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun
nilfs_page_count_clean_buffers(struct page * page,unsigned int from,unsigned int to)422*4882a593Smuzhiyun unsigned int nilfs_page_count_clean_buffers(struct page *page,
423*4882a593Smuzhiyun unsigned int from, unsigned int to)
424*4882a593Smuzhiyun {
425*4882a593Smuzhiyun unsigned int block_start, block_end;
426*4882a593Smuzhiyun struct buffer_head *bh, *head;
427*4882a593Smuzhiyun unsigned int nc = 0;
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun for (bh = head = page_buffers(page), block_start = 0;
430*4882a593Smuzhiyun bh != head || !block_start;
431*4882a593Smuzhiyun block_start = block_end, bh = bh->b_this_page) {
432*4882a593Smuzhiyun block_end = block_start + bh->b_size;
433*4882a593Smuzhiyun if (block_end > from && block_start < to && !buffer_dirty(bh))
434*4882a593Smuzhiyun nc++;
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun return nc;
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun
nilfs_mapping_init(struct address_space * mapping,struct inode * inode)439*4882a593Smuzhiyun void nilfs_mapping_init(struct address_space *mapping, struct inode *inode)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun mapping->host = inode;
442*4882a593Smuzhiyun mapping->flags = 0;
443*4882a593Smuzhiyun mapping_set_gfp_mask(mapping, GFP_NOFS);
444*4882a593Smuzhiyun mapping->private_data = NULL;
445*4882a593Smuzhiyun mapping->a_ops = &empty_aops;
446*4882a593Smuzhiyun }
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun /*
449*4882a593Smuzhiyun * NILFS2 needs clear_page_dirty() in the following two cases:
450*4882a593Smuzhiyun *
451*4882a593Smuzhiyun * 1) For B-tree node pages and data pages of DAT file, NILFS2 clears dirty
452*4882a593Smuzhiyun * flag of pages when it copies back pages from shadow cache to the
453*4882a593Smuzhiyun * original cache.
454*4882a593Smuzhiyun *
455*4882a593Smuzhiyun * 2) Some B-tree operations like insertion or deletion may dispose buffers
456*4882a593Smuzhiyun * in dirty state, and this needs to cancel the dirty state of their pages.
457*4882a593Smuzhiyun */
__nilfs_clear_page_dirty(struct page * page)458*4882a593Smuzhiyun int __nilfs_clear_page_dirty(struct page *page)
459*4882a593Smuzhiyun {
460*4882a593Smuzhiyun struct address_space *mapping = page->mapping;
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun if (mapping) {
463*4882a593Smuzhiyun xa_lock_irq(&mapping->i_pages);
464*4882a593Smuzhiyun if (test_bit(PG_dirty, &page->flags)) {
465*4882a593Smuzhiyun __xa_clear_mark(&mapping->i_pages, page_index(page),
466*4882a593Smuzhiyun PAGECACHE_TAG_DIRTY);
467*4882a593Smuzhiyun xa_unlock_irq(&mapping->i_pages);
468*4882a593Smuzhiyun return clear_page_dirty_for_io(page);
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun xa_unlock_irq(&mapping->i_pages);
471*4882a593Smuzhiyun return 0;
472*4882a593Smuzhiyun }
473*4882a593Smuzhiyun return TestClearPageDirty(page);
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun /**
477*4882a593Smuzhiyun * nilfs_find_uncommitted_extent - find extent of uncommitted data
478*4882a593Smuzhiyun * @inode: inode
479*4882a593Smuzhiyun * @start_blk: start block offset (in)
480*4882a593Smuzhiyun * @blkoff: start offset of the found extent (out)
481*4882a593Smuzhiyun *
482*4882a593Smuzhiyun * This function searches an extent of buffers marked "delayed" which
483*4882a593Smuzhiyun * starts from a block offset equal to or larger than @start_blk. If
484*4882a593Smuzhiyun * such an extent was found, this will store the start offset in
485*4882a593Smuzhiyun * @blkoff and return its length in blocks. Otherwise, zero is
486*4882a593Smuzhiyun * returned.
487*4882a593Smuzhiyun */
nilfs_find_uncommitted_extent(struct inode * inode,sector_t start_blk,sector_t * blkoff)488*4882a593Smuzhiyun unsigned long nilfs_find_uncommitted_extent(struct inode *inode,
489*4882a593Smuzhiyun sector_t start_blk,
490*4882a593Smuzhiyun sector_t *blkoff)
491*4882a593Smuzhiyun {
492*4882a593Smuzhiyun unsigned int i;
493*4882a593Smuzhiyun pgoff_t index;
494*4882a593Smuzhiyun unsigned int nblocks_in_page;
495*4882a593Smuzhiyun unsigned long length = 0;
496*4882a593Smuzhiyun sector_t b;
497*4882a593Smuzhiyun struct pagevec pvec;
498*4882a593Smuzhiyun struct page *page;
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun if (inode->i_mapping->nrpages == 0)
501*4882a593Smuzhiyun return 0;
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun index = start_blk >> (PAGE_SHIFT - inode->i_blkbits);
504*4882a593Smuzhiyun nblocks_in_page = 1U << (PAGE_SHIFT - inode->i_blkbits);
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun pagevec_init(&pvec);
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun repeat:
509*4882a593Smuzhiyun pvec.nr = find_get_pages_contig(inode->i_mapping, index, PAGEVEC_SIZE,
510*4882a593Smuzhiyun pvec.pages);
511*4882a593Smuzhiyun if (pvec.nr == 0)
512*4882a593Smuzhiyun return length;
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun if (length > 0 && pvec.pages[0]->index > index)
515*4882a593Smuzhiyun goto out;
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun b = pvec.pages[0]->index << (PAGE_SHIFT - inode->i_blkbits);
518*4882a593Smuzhiyun i = 0;
519*4882a593Smuzhiyun do {
520*4882a593Smuzhiyun page = pvec.pages[i];
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun lock_page(page);
523*4882a593Smuzhiyun if (page_has_buffers(page)) {
524*4882a593Smuzhiyun struct buffer_head *bh, *head;
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun bh = head = page_buffers(page);
527*4882a593Smuzhiyun do {
528*4882a593Smuzhiyun if (b < start_blk)
529*4882a593Smuzhiyun continue;
530*4882a593Smuzhiyun if (buffer_delay(bh)) {
531*4882a593Smuzhiyun if (length == 0)
532*4882a593Smuzhiyun *blkoff = b;
533*4882a593Smuzhiyun length++;
534*4882a593Smuzhiyun } else if (length > 0) {
535*4882a593Smuzhiyun goto out_locked;
536*4882a593Smuzhiyun }
537*4882a593Smuzhiyun } while (++b, bh = bh->b_this_page, bh != head);
538*4882a593Smuzhiyun } else {
539*4882a593Smuzhiyun if (length > 0)
540*4882a593Smuzhiyun goto out_locked;
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun b += nblocks_in_page;
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun unlock_page(page);
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun } while (++i < pagevec_count(&pvec));
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun index = page->index + 1;
549*4882a593Smuzhiyun pagevec_release(&pvec);
550*4882a593Smuzhiyun cond_resched();
551*4882a593Smuzhiyun goto repeat;
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun out_locked:
554*4882a593Smuzhiyun unlock_page(page);
555*4882a593Smuzhiyun out:
556*4882a593Smuzhiyun pagevec_release(&pvec);
557*4882a593Smuzhiyun return length;
558*4882a593Smuzhiyun }
559