xref: /OK3568_Linux_fs/kernel/fs/gfs2/aops.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
4*4882a593Smuzhiyun  * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <linux/sched.h>
8*4882a593Smuzhiyun #include <linux/slab.h>
9*4882a593Smuzhiyun #include <linux/spinlock.h>
10*4882a593Smuzhiyun #include <linux/completion.h>
11*4882a593Smuzhiyun #include <linux/buffer_head.h>
12*4882a593Smuzhiyun #include <linux/pagemap.h>
13*4882a593Smuzhiyun #include <linux/pagevec.h>
14*4882a593Smuzhiyun #include <linux/mpage.h>
15*4882a593Smuzhiyun #include <linux/fs.h>
16*4882a593Smuzhiyun #include <linux/writeback.h>
17*4882a593Smuzhiyun #include <linux/swap.h>
18*4882a593Smuzhiyun #include <linux/gfs2_ondisk.h>
19*4882a593Smuzhiyun #include <linux/backing-dev.h>
20*4882a593Smuzhiyun #include <linux/uio.h>
21*4882a593Smuzhiyun #include <trace/events/writeback.h>
22*4882a593Smuzhiyun #include <linux/sched/signal.h>
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #include "gfs2.h"
25*4882a593Smuzhiyun #include "incore.h"
26*4882a593Smuzhiyun #include "bmap.h"
27*4882a593Smuzhiyun #include "glock.h"
28*4882a593Smuzhiyun #include "inode.h"
29*4882a593Smuzhiyun #include "log.h"
30*4882a593Smuzhiyun #include "meta_io.h"
31*4882a593Smuzhiyun #include "quota.h"
32*4882a593Smuzhiyun #include "trans.h"
33*4882a593Smuzhiyun #include "rgrp.h"
34*4882a593Smuzhiyun #include "super.h"
35*4882a593Smuzhiyun #include "util.h"
36*4882a593Smuzhiyun #include "glops.h"
37*4882a593Smuzhiyun #include "aops.h"
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun 
gfs2_page_add_databufs(struct gfs2_inode * ip,struct page * page,unsigned int from,unsigned int len)40*4882a593Smuzhiyun void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
41*4882a593Smuzhiyun 			    unsigned int from, unsigned int len)
42*4882a593Smuzhiyun {
43*4882a593Smuzhiyun 	struct buffer_head *head = page_buffers(page);
44*4882a593Smuzhiyun 	unsigned int bsize = head->b_size;
45*4882a593Smuzhiyun 	struct buffer_head *bh;
46*4882a593Smuzhiyun 	unsigned int to = from + len;
47*4882a593Smuzhiyun 	unsigned int start, end;
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun 	for (bh = head, start = 0; bh != head || !start;
50*4882a593Smuzhiyun 	     bh = bh->b_this_page, start = end) {
51*4882a593Smuzhiyun 		end = start + bsize;
52*4882a593Smuzhiyun 		if (end <= from)
53*4882a593Smuzhiyun 			continue;
54*4882a593Smuzhiyun 		if (start >= to)
55*4882a593Smuzhiyun 			break;
56*4882a593Smuzhiyun 		set_buffer_uptodate(bh);
57*4882a593Smuzhiyun 		gfs2_trans_add_data(ip->i_gl, bh);
58*4882a593Smuzhiyun 	}
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun /**
62*4882a593Smuzhiyun  * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
63*4882a593Smuzhiyun  * @inode: The inode
64*4882a593Smuzhiyun  * @lblock: The block number to look up
65*4882a593Smuzhiyun  * @bh_result: The buffer head to return the result in
66*4882a593Smuzhiyun  * @create: Non-zero if we may add block to the file
67*4882a593Smuzhiyun  *
68*4882a593Smuzhiyun  * Returns: errno
69*4882a593Smuzhiyun  */
70*4882a593Smuzhiyun 
gfs2_get_block_noalloc(struct inode * inode,sector_t lblock,struct buffer_head * bh_result,int create)71*4882a593Smuzhiyun static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
72*4882a593Smuzhiyun 				  struct buffer_head *bh_result, int create)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun 	int error;
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	error = gfs2_block_map(inode, lblock, bh_result, 0);
77*4882a593Smuzhiyun 	if (error)
78*4882a593Smuzhiyun 		return error;
79*4882a593Smuzhiyun 	if (!buffer_mapped(bh_result))
80*4882a593Smuzhiyun 		return -ENODATA;
81*4882a593Smuzhiyun 	return 0;
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun /**
85*4882a593Smuzhiyun  * gfs2_writepage - Write page for writeback mappings
86*4882a593Smuzhiyun  * @page: The page
87*4882a593Smuzhiyun  * @wbc: The writeback control
88*4882a593Smuzhiyun  */
gfs2_writepage(struct page * page,struct writeback_control * wbc)89*4882a593Smuzhiyun static int gfs2_writepage(struct page *page, struct writeback_control *wbc)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun 	struct inode *inode = page->mapping->host;
92*4882a593Smuzhiyun 	struct gfs2_inode *ip = GFS2_I(inode);
93*4882a593Smuzhiyun 	struct gfs2_sbd *sdp = GFS2_SB(inode);
94*4882a593Smuzhiyun 	struct iomap_writepage_ctx wpc = { };
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
97*4882a593Smuzhiyun 		goto out;
98*4882a593Smuzhiyun 	if (current->journal_info)
99*4882a593Smuzhiyun 		goto redirty;
100*4882a593Smuzhiyun 	return iomap_writepage(page, wbc, &wpc, &gfs2_writeback_ops);
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun redirty:
103*4882a593Smuzhiyun 	redirty_page_for_writepage(wbc, page);
104*4882a593Smuzhiyun out:
105*4882a593Smuzhiyun 	unlock_page(page);
106*4882a593Smuzhiyun 	return 0;
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun /**
110*4882a593Smuzhiyun  * gfs2_write_jdata_page - gfs2 jdata-specific version of block_write_full_page
111*4882a593Smuzhiyun  * @page: The page to write
112*4882a593Smuzhiyun  * @wbc: The writeback control
113*4882a593Smuzhiyun  *
114*4882a593Smuzhiyun  * This is the same as calling block_write_full_page, but it also
115*4882a593Smuzhiyun  * writes pages outside of i_size
116*4882a593Smuzhiyun  */
gfs2_write_jdata_page(struct page * page,struct writeback_control * wbc)117*4882a593Smuzhiyun static int gfs2_write_jdata_page(struct page *page,
118*4882a593Smuzhiyun 				 struct writeback_control *wbc)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun 	struct inode * const inode = page->mapping->host;
121*4882a593Smuzhiyun 	loff_t i_size = i_size_read(inode);
122*4882a593Smuzhiyun 	const pgoff_t end_index = i_size >> PAGE_SHIFT;
123*4882a593Smuzhiyun 	unsigned offset;
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	/*
126*4882a593Smuzhiyun 	 * The page straddles i_size.  It must be zeroed out on each and every
127*4882a593Smuzhiyun 	 * writepage invocation because it may be mmapped.  "A file is mapped
128*4882a593Smuzhiyun 	 * in multiples of the page size.  For a file that is not a multiple of
129*4882a593Smuzhiyun 	 * the  page size, the remaining memory is zeroed when mapped, and
130*4882a593Smuzhiyun 	 * writes to that region are not written out to the file."
131*4882a593Smuzhiyun 	 */
132*4882a593Smuzhiyun 	offset = i_size & (PAGE_SIZE - 1);
133*4882a593Smuzhiyun 	if (page->index == end_index && offset)
134*4882a593Smuzhiyun 		zero_user_segment(page, offset, PAGE_SIZE);
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	return __block_write_full_page(inode, page, gfs2_get_block_noalloc, wbc,
137*4882a593Smuzhiyun 				       end_buffer_async_write);
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun /**
141*4882a593Smuzhiyun  * __gfs2_jdata_writepage - The core of jdata writepage
142*4882a593Smuzhiyun  * @page: The page to write
143*4882a593Smuzhiyun  * @wbc: The writeback control
144*4882a593Smuzhiyun  *
145*4882a593Smuzhiyun  * This is shared between writepage and writepages and implements the
146*4882a593Smuzhiyun  * core of the writepage operation. If a transaction is required then
147*4882a593Smuzhiyun  * PageChecked will have been set and the transaction will have
148*4882a593Smuzhiyun  * already been started before this is called.
149*4882a593Smuzhiyun  */
150*4882a593Smuzhiyun 
__gfs2_jdata_writepage(struct page * page,struct writeback_control * wbc)151*4882a593Smuzhiyun static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun 	struct inode *inode = page->mapping->host;
154*4882a593Smuzhiyun 	struct gfs2_inode *ip = GFS2_I(inode);
155*4882a593Smuzhiyun 	struct gfs2_sbd *sdp = GFS2_SB(inode);
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	if (PageChecked(page)) {
158*4882a593Smuzhiyun 		ClearPageChecked(page);
159*4882a593Smuzhiyun 		if (!page_has_buffers(page)) {
160*4882a593Smuzhiyun 			create_empty_buffers(page, inode->i_sb->s_blocksize,
161*4882a593Smuzhiyun 					     BIT(BH_Dirty)|BIT(BH_Uptodate));
162*4882a593Smuzhiyun 		}
163*4882a593Smuzhiyun 		gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize);
164*4882a593Smuzhiyun 	}
165*4882a593Smuzhiyun 	return gfs2_write_jdata_page(page, wbc);
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun /**
169*4882a593Smuzhiyun  * gfs2_jdata_writepage - Write complete page
170*4882a593Smuzhiyun  * @page: Page to write
171*4882a593Smuzhiyun  * @wbc: The writeback control
172*4882a593Smuzhiyun  *
173*4882a593Smuzhiyun  * Returns: errno
174*4882a593Smuzhiyun  *
175*4882a593Smuzhiyun  */
176*4882a593Smuzhiyun 
gfs2_jdata_writepage(struct page * page,struct writeback_control * wbc)177*4882a593Smuzhiyun static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun 	struct inode *inode = page->mapping->host;
180*4882a593Smuzhiyun 	struct gfs2_inode *ip = GFS2_I(inode);
181*4882a593Smuzhiyun 	struct gfs2_sbd *sdp = GFS2_SB(inode);
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
184*4882a593Smuzhiyun 		goto out;
185*4882a593Smuzhiyun 	if (PageChecked(page) || current->journal_info)
186*4882a593Smuzhiyun 		goto out_ignore;
187*4882a593Smuzhiyun 	return __gfs2_jdata_writepage(page, wbc);
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun out_ignore:
190*4882a593Smuzhiyun 	redirty_page_for_writepage(wbc, page);
191*4882a593Smuzhiyun out:
192*4882a593Smuzhiyun 	unlock_page(page);
193*4882a593Smuzhiyun 	return 0;
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun /**
197*4882a593Smuzhiyun  * gfs2_writepages - Write a bunch of dirty pages back to disk
198*4882a593Smuzhiyun  * @mapping: The mapping to write
199*4882a593Smuzhiyun  * @wbc: Write-back control
200*4882a593Smuzhiyun  *
201*4882a593Smuzhiyun  * Used for both ordered and writeback modes.
202*4882a593Smuzhiyun  */
gfs2_writepages(struct address_space * mapping,struct writeback_control * wbc)203*4882a593Smuzhiyun static int gfs2_writepages(struct address_space *mapping,
204*4882a593Smuzhiyun 			   struct writeback_control *wbc)
205*4882a593Smuzhiyun {
206*4882a593Smuzhiyun 	struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
207*4882a593Smuzhiyun 	struct iomap_writepage_ctx wpc = { };
208*4882a593Smuzhiyun 	int ret;
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	/*
211*4882a593Smuzhiyun 	 * Even if we didn't write any pages here, we might still be holding
212*4882a593Smuzhiyun 	 * dirty pages in the ail. We forcibly flush the ail because we don't
213*4882a593Smuzhiyun 	 * want balance_dirty_pages() to loop indefinitely trying to write out
214*4882a593Smuzhiyun 	 * pages held in the ail that it can't find.
215*4882a593Smuzhiyun 	 */
216*4882a593Smuzhiyun 	ret = iomap_writepages(mapping, wbc, &wpc, &gfs2_writeback_ops);
217*4882a593Smuzhiyun 	if (ret == 0)
218*4882a593Smuzhiyun 		set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
219*4882a593Smuzhiyun 	return ret;
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun /**
223*4882a593Smuzhiyun  * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages
224*4882a593Smuzhiyun  * @mapping: The mapping
225*4882a593Smuzhiyun  * @wbc: The writeback control
226*4882a593Smuzhiyun  * @pvec: The vector of pages
227*4882a593Smuzhiyun  * @nr_pages: The number of pages to write
228*4882a593Smuzhiyun  * @done_index: Page index
229*4882a593Smuzhiyun  *
230*4882a593Smuzhiyun  * Returns: non-zero if loop should terminate, zero otherwise
231*4882a593Smuzhiyun  */
232*4882a593Smuzhiyun 
gfs2_write_jdata_pagevec(struct address_space * mapping,struct writeback_control * wbc,struct pagevec * pvec,int nr_pages,pgoff_t * done_index)233*4882a593Smuzhiyun static int gfs2_write_jdata_pagevec(struct address_space *mapping,
234*4882a593Smuzhiyun 				    struct writeback_control *wbc,
235*4882a593Smuzhiyun 				    struct pagevec *pvec,
236*4882a593Smuzhiyun 				    int nr_pages,
237*4882a593Smuzhiyun 				    pgoff_t *done_index)
238*4882a593Smuzhiyun {
239*4882a593Smuzhiyun 	struct inode *inode = mapping->host;
240*4882a593Smuzhiyun 	struct gfs2_sbd *sdp = GFS2_SB(inode);
241*4882a593Smuzhiyun 	unsigned nrblocks = nr_pages * (PAGE_SIZE >> inode->i_blkbits);
242*4882a593Smuzhiyun 	int i;
243*4882a593Smuzhiyun 	int ret;
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
246*4882a593Smuzhiyun 	if (ret < 0)
247*4882a593Smuzhiyun 		return ret;
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	for(i = 0; i < nr_pages; i++) {
250*4882a593Smuzhiyun 		struct page *page = pvec->pages[i];
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 		*done_index = page->index;
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 		lock_page(page);
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 		if (unlikely(page->mapping != mapping)) {
257*4882a593Smuzhiyun continue_unlock:
258*4882a593Smuzhiyun 			unlock_page(page);
259*4882a593Smuzhiyun 			continue;
260*4882a593Smuzhiyun 		}
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 		if (!PageDirty(page)) {
263*4882a593Smuzhiyun 			/* someone wrote it for us */
264*4882a593Smuzhiyun 			goto continue_unlock;
265*4882a593Smuzhiyun 		}
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 		if (PageWriteback(page)) {
268*4882a593Smuzhiyun 			if (wbc->sync_mode != WB_SYNC_NONE)
269*4882a593Smuzhiyun 				wait_on_page_writeback(page);
270*4882a593Smuzhiyun 			else
271*4882a593Smuzhiyun 				goto continue_unlock;
272*4882a593Smuzhiyun 		}
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 		BUG_ON(PageWriteback(page));
275*4882a593Smuzhiyun 		if (!clear_page_dirty_for_io(page))
276*4882a593Smuzhiyun 			goto continue_unlock;
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 		trace_wbc_writepage(wbc, inode_to_bdi(inode));
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 		ret = __gfs2_jdata_writepage(page, wbc);
281*4882a593Smuzhiyun 		if (unlikely(ret)) {
282*4882a593Smuzhiyun 			if (ret == AOP_WRITEPAGE_ACTIVATE) {
283*4882a593Smuzhiyun 				unlock_page(page);
284*4882a593Smuzhiyun 				ret = 0;
285*4882a593Smuzhiyun 			} else {
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 				/*
288*4882a593Smuzhiyun 				 * done_index is set past this page,
289*4882a593Smuzhiyun 				 * so media errors will not choke
290*4882a593Smuzhiyun 				 * background writeout for the entire
291*4882a593Smuzhiyun 				 * file. This has consequences for
292*4882a593Smuzhiyun 				 * range_cyclic semantics (ie. it may
293*4882a593Smuzhiyun 				 * not be suitable for data integrity
294*4882a593Smuzhiyun 				 * writeout).
295*4882a593Smuzhiyun 				 */
296*4882a593Smuzhiyun 				*done_index = page->index + 1;
297*4882a593Smuzhiyun 				ret = 1;
298*4882a593Smuzhiyun 				break;
299*4882a593Smuzhiyun 			}
300*4882a593Smuzhiyun 		}
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 		/*
303*4882a593Smuzhiyun 		 * We stop writing back only if we are not doing
304*4882a593Smuzhiyun 		 * integrity sync. In case of integrity sync we have to
305*4882a593Smuzhiyun 		 * keep going until we have written all the pages
306*4882a593Smuzhiyun 		 * we tagged for writeback prior to entering this loop.
307*4882a593Smuzhiyun 		 */
308*4882a593Smuzhiyun 		if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) {
309*4882a593Smuzhiyun 			ret = 1;
310*4882a593Smuzhiyun 			break;
311*4882a593Smuzhiyun 		}
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 	}
314*4882a593Smuzhiyun 	gfs2_trans_end(sdp);
315*4882a593Smuzhiyun 	return ret;
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun /**
319*4882a593Smuzhiyun  * gfs2_write_cache_jdata - Like write_cache_pages but different
320*4882a593Smuzhiyun  * @mapping: The mapping to write
321*4882a593Smuzhiyun  * @wbc: The writeback control
322*4882a593Smuzhiyun  *
323*4882a593Smuzhiyun  * The reason that we use our own function here is that we need to
324*4882a593Smuzhiyun  * start transactions before we grab page locks. This allows us
325*4882a593Smuzhiyun  * to get the ordering right.
326*4882a593Smuzhiyun  */
327*4882a593Smuzhiyun 
gfs2_write_cache_jdata(struct address_space * mapping,struct writeback_control * wbc)328*4882a593Smuzhiyun static int gfs2_write_cache_jdata(struct address_space *mapping,
329*4882a593Smuzhiyun 				  struct writeback_control *wbc)
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun 	int ret = 0;
332*4882a593Smuzhiyun 	int done = 0;
333*4882a593Smuzhiyun 	struct pagevec pvec;
334*4882a593Smuzhiyun 	int nr_pages;
335*4882a593Smuzhiyun 	pgoff_t writeback_index;
336*4882a593Smuzhiyun 	pgoff_t index;
337*4882a593Smuzhiyun 	pgoff_t end;
338*4882a593Smuzhiyun 	pgoff_t done_index;
339*4882a593Smuzhiyun 	int cycled;
340*4882a593Smuzhiyun 	int range_whole = 0;
341*4882a593Smuzhiyun 	xa_mark_t tag;
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	pagevec_init(&pvec);
344*4882a593Smuzhiyun 	if (wbc->range_cyclic) {
345*4882a593Smuzhiyun 		writeback_index = mapping->writeback_index; /* prev offset */
346*4882a593Smuzhiyun 		index = writeback_index;
347*4882a593Smuzhiyun 		if (index == 0)
348*4882a593Smuzhiyun 			cycled = 1;
349*4882a593Smuzhiyun 		else
350*4882a593Smuzhiyun 			cycled = 0;
351*4882a593Smuzhiyun 		end = -1;
352*4882a593Smuzhiyun 	} else {
353*4882a593Smuzhiyun 		index = wbc->range_start >> PAGE_SHIFT;
354*4882a593Smuzhiyun 		end = wbc->range_end >> PAGE_SHIFT;
355*4882a593Smuzhiyun 		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
356*4882a593Smuzhiyun 			range_whole = 1;
357*4882a593Smuzhiyun 		cycled = 1; /* ignore range_cyclic tests */
358*4882a593Smuzhiyun 	}
359*4882a593Smuzhiyun 	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
360*4882a593Smuzhiyun 		tag = PAGECACHE_TAG_TOWRITE;
361*4882a593Smuzhiyun 	else
362*4882a593Smuzhiyun 		tag = PAGECACHE_TAG_DIRTY;
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun retry:
365*4882a593Smuzhiyun 	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
366*4882a593Smuzhiyun 		tag_pages_for_writeback(mapping, index, end);
367*4882a593Smuzhiyun 	done_index = index;
368*4882a593Smuzhiyun 	while (!done && (index <= end)) {
369*4882a593Smuzhiyun 		nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
370*4882a593Smuzhiyun 				tag);
371*4882a593Smuzhiyun 		if (nr_pages == 0)
372*4882a593Smuzhiyun 			break;
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 		ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, &done_index);
375*4882a593Smuzhiyun 		if (ret)
376*4882a593Smuzhiyun 			done = 1;
377*4882a593Smuzhiyun 		if (ret > 0)
378*4882a593Smuzhiyun 			ret = 0;
379*4882a593Smuzhiyun 		pagevec_release(&pvec);
380*4882a593Smuzhiyun 		cond_resched();
381*4882a593Smuzhiyun 	}
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	if (!cycled && !done) {
384*4882a593Smuzhiyun 		/*
385*4882a593Smuzhiyun 		 * range_cyclic:
386*4882a593Smuzhiyun 		 * We hit the last page and there is more work to be done: wrap
387*4882a593Smuzhiyun 		 * back to the start of the file
388*4882a593Smuzhiyun 		 */
389*4882a593Smuzhiyun 		cycled = 1;
390*4882a593Smuzhiyun 		index = 0;
391*4882a593Smuzhiyun 		end = writeback_index - 1;
392*4882a593Smuzhiyun 		goto retry;
393*4882a593Smuzhiyun 	}
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
396*4882a593Smuzhiyun 		mapping->writeback_index = done_index;
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 	return ret;
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun /**
403*4882a593Smuzhiyun  * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
404*4882a593Smuzhiyun  * @mapping: The mapping to write
405*4882a593Smuzhiyun  * @wbc: The writeback control
406*4882a593Smuzhiyun  *
407*4882a593Smuzhiyun  */
408*4882a593Smuzhiyun 
gfs2_jdata_writepages(struct address_space * mapping,struct writeback_control * wbc)409*4882a593Smuzhiyun static int gfs2_jdata_writepages(struct address_space *mapping,
410*4882a593Smuzhiyun 				 struct writeback_control *wbc)
411*4882a593Smuzhiyun {
412*4882a593Smuzhiyun 	struct gfs2_inode *ip = GFS2_I(mapping->host);
413*4882a593Smuzhiyun 	struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
414*4882a593Smuzhiyun 	int ret;
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 	ret = gfs2_write_cache_jdata(mapping, wbc);
417*4882a593Smuzhiyun 	if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
418*4882a593Smuzhiyun 		gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
419*4882a593Smuzhiyun 			       GFS2_LFC_JDATA_WPAGES);
420*4882a593Smuzhiyun 		ret = gfs2_write_cache_jdata(mapping, wbc);
421*4882a593Smuzhiyun 	}
422*4882a593Smuzhiyun 	return ret;
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun /**
426*4882a593Smuzhiyun  * stuffed_readpage - Fill in a Linux page with stuffed file data
427*4882a593Smuzhiyun  * @ip: the inode
428*4882a593Smuzhiyun  * @page: the page
429*4882a593Smuzhiyun  *
430*4882a593Smuzhiyun  * Returns: errno
431*4882a593Smuzhiyun  */
stuffed_readpage(struct gfs2_inode * ip,struct page * page)432*4882a593Smuzhiyun static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
433*4882a593Smuzhiyun {
434*4882a593Smuzhiyun 	struct buffer_head *dibh;
435*4882a593Smuzhiyun 	u64 dsize = i_size_read(&ip->i_inode);
436*4882a593Smuzhiyun 	void *kaddr;
437*4882a593Smuzhiyun 	int error;
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	/*
440*4882a593Smuzhiyun 	 * Due to the order of unstuffing files and ->fault(), we can be
441*4882a593Smuzhiyun 	 * asked for a zero page in the case of a stuffed file being extended,
442*4882a593Smuzhiyun 	 * so we need to supply one here. It doesn't happen often.
443*4882a593Smuzhiyun 	 */
444*4882a593Smuzhiyun 	if (unlikely(page->index)) {
445*4882a593Smuzhiyun 		zero_user(page, 0, PAGE_SIZE);
446*4882a593Smuzhiyun 		SetPageUptodate(page);
447*4882a593Smuzhiyun 		return 0;
448*4882a593Smuzhiyun 	}
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 	error = gfs2_meta_inode_buffer(ip, &dibh);
451*4882a593Smuzhiyun 	if (error)
452*4882a593Smuzhiyun 		return error;
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 	kaddr = kmap_atomic(page);
455*4882a593Smuzhiyun 	if (dsize > gfs2_max_stuffed_size(ip))
456*4882a593Smuzhiyun 		dsize = gfs2_max_stuffed_size(ip);
457*4882a593Smuzhiyun 	memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
458*4882a593Smuzhiyun 	memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
459*4882a593Smuzhiyun 	kunmap_atomic(kaddr);
460*4882a593Smuzhiyun 	flush_dcache_page(page);
461*4882a593Smuzhiyun 	brelse(dibh);
462*4882a593Smuzhiyun 	SetPageUptodate(page);
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 	return 0;
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 
__gfs2_readpage(void * file,struct page * page)468*4882a593Smuzhiyun static int __gfs2_readpage(void *file, struct page *page)
469*4882a593Smuzhiyun {
470*4882a593Smuzhiyun 	struct inode *inode = page->mapping->host;
471*4882a593Smuzhiyun 	struct gfs2_inode *ip = GFS2_I(inode);
472*4882a593Smuzhiyun 	struct gfs2_sbd *sdp = GFS2_SB(inode);
473*4882a593Smuzhiyun 	int error;
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 	if (!gfs2_is_jdata(ip) ||
476*4882a593Smuzhiyun 	    (i_blocksize(inode) == PAGE_SIZE && !page_has_buffers(page))) {
477*4882a593Smuzhiyun 		error = iomap_readpage(page, &gfs2_iomap_ops);
478*4882a593Smuzhiyun 	} else if (gfs2_is_stuffed(ip)) {
479*4882a593Smuzhiyun 		error = stuffed_readpage(ip, page);
480*4882a593Smuzhiyun 		unlock_page(page);
481*4882a593Smuzhiyun 	} else {
482*4882a593Smuzhiyun 		error = mpage_readpage(page, gfs2_block_map);
483*4882a593Smuzhiyun 	}
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 	if (unlikely(gfs2_withdrawn(sdp)))
486*4882a593Smuzhiyun 		return -EIO;
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun 	return error;
489*4882a593Smuzhiyun }
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun /**
492*4882a593Smuzhiyun  * gfs2_readpage - read a page of a file
493*4882a593Smuzhiyun  * @file: The file to read
494*4882a593Smuzhiyun  * @page: The page of the file
495*4882a593Smuzhiyun  */
496*4882a593Smuzhiyun 
gfs2_readpage(struct file * file,struct page * page)497*4882a593Smuzhiyun static int gfs2_readpage(struct file *file, struct page *page)
498*4882a593Smuzhiyun {
499*4882a593Smuzhiyun 	return __gfs2_readpage(file, page);
500*4882a593Smuzhiyun }
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun /**
503*4882a593Smuzhiyun  * gfs2_internal_read - read an internal file
504*4882a593Smuzhiyun  * @ip: The gfs2 inode
505*4882a593Smuzhiyun  * @buf: The buffer to fill
506*4882a593Smuzhiyun  * @pos: The file position
507*4882a593Smuzhiyun  * @size: The amount to read
508*4882a593Smuzhiyun  *
509*4882a593Smuzhiyun  */
510*4882a593Smuzhiyun 
gfs2_internal_read(struct gfs2_inode * ip,char * buf,loff_t * pos,unsigned size)511*4882a593Smuzhiyun int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
512*4882a593Smuzhiyun                        unsigned size)
513*4882a593Smuzhiyun {
514*4882a593Smuzhiyun 	struct address_space *mapping = ip->i_inode.i_mapping;
515*4882a593Smuzhiyun 	unsigned long index = *pos >> PAGE_SHIFT;
516*4882a593Smuzhiyun 	unsigned offset = *pos & (PAGE_SIZE - 1);
517*4882a593Smuzhiyun 	unsigned copied = 0;
518*4882a593Smuzhiyun 	unsigned amt;
519*4882a593Smuzhiyun 	struct page *page;
520*4882a593Smuzhiyun 	void *p;
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun 	do {
523*4882a593Smuzhiyun 		amt = size - copied;
524*4882a593Smuzhiyun 		if (offset + size > PAGE_SIZE)
525*4882a593Smuzhiyun 			amt = PAGE_SIZE - offset;
526*4882a593Smuzhiyun 		page = read_cache_page(mapping, index, __gfs2_readpage, NULL);
527*4882a593Smuzhiyun 		if (IS_ERR(page))
528*4882a593Smuzhiyun 			return PTR_ERR(page);
529*4882a593Smuzhiyun 		p = kmap_atomic(page);
530*4882a593Smuzhiyun 		memcpy(buf + copied, p + offset, amt);
531*4882a593Smuzhiyun 		kunmap_atomic(p);
532*4882a593Smuzhiyun 		put_page(page);
533*4882a593Smuzhiyun 		copied += amt;
534*4882a593Smuzhiyun 		index++;
535*4882a593Smuzhiyun 		offset = 0;
536*4882a593Smuzhiyun 	} while(copied < size);
537*4882a593Smuzhiyun 	(*pos) += size;
538*4882a593Smuzhiyun 	return size;
539*4882a593Smuzhiyun }
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun /**
542*4882a593Smuzhiyun  * gfs2_readahead - Read a bunch of pages at once
543*4882a593Smuzhiyun  * @file: The file to read from
544*4882a593Smuzhiyun  * @mapping: Address space info
545*4882a593Smuzhiyun  * @pages: List of pages to read
546*4882a593Smuzhiyun  * @nr_pages: Number of pages to read
547*4882a593Smuzhiyun  *
548*4882a593Smuzhiyun  * Some notes:
549*4882a593Smuzhiyun  * 1. This is only for readahead, so we can simply ignore any things
550*4882a593Smuzhiyun  *    which are slightly inconvenient (such as locking conflicts between
551*4882a593Smuzhiyun  *    the page lock and the glock) and return having done no I/O. Its
552*4882a593Smuzhiyun  *    obviously not something we'd want to do on too regular a basis.
553*4882a593Smuzhiyun  *    Any I/O we ignore at this time will be done via readpage later.
554*4882a593Smuzhiyun  * 2. We don't handle stuffed files here we let readpage do the honours.
555*4882a593Smuzhiyun  * 3. mpage_readahead() does most of the heavy lifting in the common case.
556*4882a593Smuzhiyun  * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
557*4882a593Smuzhiyun  */
558*4882a593Smuzhiyun 
gfs2_readahead(struct readahead_control * rac)559*4882a593Smuzhiyun static void gfs2_readahead(struct readahead_control *rac)
560*4882a593Smuzhiyun {
561*4882a593Smuzhiyun 	struct inode *inode = rac->mapping->host;
562*4882a593Smuzhiyun 	struct gfs2_inode *ip = GFS2_I(inode);
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 	if (gfs2_is_stuffed(ip))
565*4882a593Smuzhiyun 		;
566*4882a593Smuzhiyun 	else if (gfs2_is_jdata(ip))
567*4882a593Smuzhiyun 		mpage_readahead(rac, gfs2_block_map);
568*4882a593Smuzhiyun 	else
569*4882a593Smuzhiyun 		iomap_readahead(rac, &gfs2_iomap_ops);
570*4882a593Smuzhiyun }
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun /**
573*4882a593Smuzhiyun  * adjust_fs_space - Adjusts the free space available due to gfs2_grow
574*4882a593Smuzhiyun  * @inode: the rindex inode
575*4882a593Smuzhiyun  */
adjust_fs_space(struct inode * inode)576*4882a593Smuzhiyun void adjust_fs_space(struct inode *inode)
577*4882a593Smuzhiyun {
578*4882a593Smuzhiyun 	struct gfs2_sbd *sdp = GFS2_SB(inode);
579*4882a593Smuzhiyun 	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
580*4882a593Smuzhiyun 	struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
581*4882a593Smuzhiyun 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
582*4882a593Smuzhiyun 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
583*4882a593Smuzhiyun 	struct buffer_head *m_bh, *l_bh;
584*4882a593Smuzhiyun 	u64 fs_total, new_free;
585*4882a593Smuzhiyun 
586*4882a593Smuzhiyun 	if (gfs2_trans_begin(sdp, 2 * RES_STATFS, 0) != 0)
587*4882a593Smuzhiyun 		return;
588*4882a593Smuzhiyun 
589*4882a593Smuzhiyun 	/* Total up the file system space, according to the latest rindex. */
590*4882a593Smuzhiyun 	fs_total = gfs2_ri_total(sdp);
591*4882a593Smuzhiyun 	if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0)
592*4882a593Smuzhiyun 		goto out;
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 	spin_lock(&sdp->sd_statfs_spin);
595*4882a593Smuzhiyun 	gfs2_statfs_change_in(m_sc, m_bh->b_data +
596*4882a593Smuzhiyun 			      sizeof(struct gfs2_dinode));
597*4882a593Smuzhiyun 	if (fs_total > (m_sc->sc_total + l_sc->sc_total))
598*4882a593Smuzhiyun 		new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
599*4882a593Smuzhiyun 	else
600*4882a593Smuzhiyun 		new_free = 0;
601*4882a593Smuzhiyun 	spin_unlock(&sdp->sd_statfs_spin);
602*4882a593Smuzhiyun 	fs_warn(sdp, "File system extended by %llu blocks.\n",
603*4882a593Smuzhiyun 		(unsigned long long)new_free);
604*4882a593Smuzhiyun 	gfs2_statfs_change(sdp, new_free, new_free, 0);
605*4882a593Smuzhiyun 
606*4882a593Smuzhiyun 	if (gfs2_meta_inode_buffer(l_ip, &l_bh) != 0)
607*4882a593Smuzhiyun 		goto out2;
608*4882a593Smuzhiyun 	update_statfs(sdp, m_bh, l_bh);
609*4882a593Smuzhiyun 	brelse(l_bh);
610*4882a593Smuzhiyun out2:
611*4882a593Smuzhiyun 	brelse(m_bh);
612*4882a593Smuzhiyun out:
613*4882a593Smuzhiyun 	sdp->sd_rindex_uptodate = 0;
614*4882a593Smuzhiyun 	gfs2_trans_end(sdp);
615*4882a593Smuzhiyun }
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun /**
618*4882a593Smuzhiyun  * jdata_set_page_dirty - Page dirtying function
619*4882a593Smuzhiyun  * @page: The page to dirty
620*4882a593Smuzhiyun  *
621*4882a593Smuzhiyun  * Returns: 1 if it dirtyed the page, or 0 otherwise
622*4882a593Smuzhiyun  */
623*4882a593Smuzhiyun 
jdata_set_page_dirty(struct page * page)624*4882a593Smuzhiyun static int jdata_set_page_dirty(struct page *page)
625*4882a593Smuzhiyun {
626*4882a593Smuzhiyun 	if (current->journal_info)
627*4882a593Smuzhiyun 		SetPageChecked(page);
628*4882a593Smuzhiyun 	return __set_page_dirty_buffers(page);
629*4882a593Smuzhiyun }
630*4882a593Smuzhiyun 
631*4882a593Smuzhiyun /**
632*4882a593Smuzhiyun  * gfs2_bmap - Block map function
633*4882a593Smuzhiyun  * @mapping: Address space info
634*4882a593Smuzhiyun  * @lblock: The block to map
635*4882a593Smuzhiyun  *
636*4882a593Smuzhiyun  * Returns: The disk address for the block or 0 on hole or error
637*4882a593Smuzhiyun  */
638*4882a593Smuzhiyun 
gfs2_bmap(struct address_space * mapping,sector_t lblock)639*4882a593Smuzhiyun static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
640*4882a593Smuzhiyun {
641*4882a593Smuzhiyun 	struct gfs2_inode *ip = GFS2_I(mapping->host);
642*4882a593Smuzhiyun 	struct gfs2_holder i_gh;
643*4882a593Smuzhiyun 	sector_t dblock = 0;
644*4882a593Smuzhiyun 	int error;
645*4882a593Smuzhiyun 
646*4882a593Smuzhiyun 	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
647*4882a593Smuzhiyun 	if (error)
648*4882a593Smuzhiyun 		return 0;
649*4882a593Smuzhiyun 
650*4882a593Smuzhiyun 	if (!gfs2_is_stuffed(ip))
651*4882a593Smuzhiyun 		dblock = iomap_bmap(mapping, lblock, &gfs2_iomap_ops);
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun 	gfs2_glock_dq_uninit(&i_gh);
654*4882a593Smuzhiyun 
655*4882a593Smuzhiyun 	return dblock;
656*4882a593Smuzhiyun }
657*4882a593Smuzhiyun 
gfs2_discard(struct gfs2_sbd * sdp,struct buffer_head * bh)658*4882a593Smuzhiyun static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
659*4882a593Smuzhiyun {
660*4882a593Smuzhiyun 	struct gfs2_bufdata *bd;
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun 	lock_buffer(bh);
663*4882a593Smuzhiyun 	gfs2_log_lock(sdp);
664*4882a593Smuzhiyun 	clear_buffer_dirty(bh);
665*4882a593Smuzhiyun 	bd = bh->b_private;
666*4882a593Smuzhiyun 	if (bd) {
667*4882a593Smuzhiyun 		if (!list_empty(&bd->bd_list) && !buffer_pinned(bh))
668*4882a593Smuzhiyun 			list_del_init(&bd->bd_list);
669*4882a593Smuzhiyun 		else {
670*4882a593Smuzhiyun 			spin_lock(&sdp->sd_ail_lock);
671*4882a593Smuzhiyun 			gfs2_remove_from_journal(bh, REMOVE_JDATA);
672*4882a593Smuzhiyun 			spin_unlock(&sdp->sd_ail_lock);
673*4882a593Smuzhiyun 		}
674*4882a593Smuzhiyun 	}
675*4882a593Smuzhiyun 	bh->b_bdev = NULL;
676*4882a593Smuzhiyun 	clear_buffer_mapped(bh);
677*4882a593Smuzhiyun 	clear_buffer_req(bh);
678*4882a593Smuzhiyun 	clear_buffer_new(bh);
679*4882a593Smuzhiyun 	gfs2_log_unlock(sdp);
680*4882a593Smuzhiyun 	unlock_buffer(bh);
681*4882a593Smuzhiyun }
682*4882a593Smuzhiyun 
gfs2_invalidatepage(struct page * page,unsigned int offset,unsigned int length)683*4882a593Smuzhiyun static void gfs2_invalidatepage(struct page *page, unsigned int offset,
684*4882a593Smuzhiyun 				unsigned int length)
685*4882a593Smuzhiyun {
686*4882a593Smuzhiyun 	struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
687*4882a593Smuzhiyun 	unsigned int stop = offset + length;
688*4882a593Smuzhiyun 	int partial_page = (offset || length < PAGE_SIZE);
689*4882a593Smuzhiyun 	struct buffer_head *bh, *head;
690*4882a593Smuzhiyun 	unsigned long pos = 0;
691*4882a593Smuzhiyun 
692*4882a593Smuzhiyun 	BUG_ON(!PageLocked(page));
693*4882a593Smuzhiyun 	if (!partial_page)
694*4882a593Smuzhiyun 		ClearPageChecked(page);
695*4882a593Smuzhiyun 	if (!page_has_buffers(page))
696*4882a593Smuzhiyun 		goto out;
697*4882a593Smuzhiyun 
698*4882a593Smuzhiyun 	bh = head = page_buffers(page);
699*4882a593Smuzhiyun 	do {
700*4882a593Smuzhiyun 		if (pos + bh->b_size > stop)
701*4882a593Smuzhiyun 			return;
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun 		if (offset <= pos)
704*4882a593Smuzhiyun 			gfs2_discard(sdp, bh);
705*4882a593Smuzhiyun 		pos += bh->b_size;
706*4882a593Smuzhiyun 		bh = bh->b_this_page;
707*4882a593Smuzhiyun 	} while (bh != head);
708*4882a593Smuzhiyun out:
709*4882a593Smuzhiyun 	if (!partial_page)
710*4882a593Smuzhiyun 		try_to_release_page(page, 0);
711*4882a593Smuzhiyun }
712*4882a593Smuzhiyun 
713*4882a593Smuzhiyun /**
714*4882a593Smuzhiyun  * gfs2_releasepage - free the metadata associated with a page
715*4882a593Smuzhiyun  * @page: the page that's being released
716*4882a593Smuzhiyun  * @gfp_mask: passed from Linux VFS, ignored by us
717*4882a593Smuzhiyun  *
718*4882a593Smuzhiyun  * Calls try_to_free_buffers() to free the buffers and put the page if the
719*4882a593Smuzhiyun  * buffers can be released.
720*4882a593Smuzhiyun  *
721*4882a593Smuzhiyun  * Returns: 1 if the page was put or else 0
722*4882a593Smuzhiyun  */
723*4882a593Smuzhiyun 
gfs2_releasepage(struct page * page,gfp_t gfp_mask)724*4882a593Smuzhiyun int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
725*4882a593Smuzhiyun {
726*4882a593Smuzhiyun 	struct address_space *mapping = page->mapping;
727*4882a593Smuzhiyun 	struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
728*4882a593Smuzhiyun 	struct buffer_head *bh, *head;
729*4882a593Smuzhiyun 	struct gfs2_bufdata *bd;
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun 	if (!page_has_buffers(page))
732*4882a593Smuzhiyun 		return 0;
733*4882a593Smuzhiyun 
734*4882a593Smuzhiyun 	/*
735*4882a593Smuzhiyun 	 * From xfs_vm_releasepage: mm accommodates an old ext3 case where
736*4882a593Smuzhiyun 	 * clean pages might not have had the dirty bit cleared.  Thus, it can
737*4882a593Smuzhiyun 	 * send actual dirty pages to ->releasepage() via shrink_active_list().
738*4882a593Smuzhiyun 	 *
739*4882a593Smuzhiyun 	 * As a workaround, we skip pages that contain dirty buffers below.
740*4882a593Smuzhiyun 	 * Once ->releasepage isn't called on dirty pages anymore, we can warn
741*4882a593Smuzhiyun 	 * on dirty buffers like we used to here again.
742*4882a593Smuzhiyun 	 */
743*4882a593Smuzhiyun 
744*4882a593Smuzhiyun 	gfs2_log_lock(sdp);
745*4882a593Smuzhiyun 	head = bh = page_buffers(page);
746*4882a593Smuzhiyun 	do {
747*4882a593Smuzhiyun 		if (atomic_read(&bh->b_count))
748*4882a593Smuzhiyun 			goto cannot_release;
749*4882a593Smuzhiyun 		bd = bh->b_private;
750*4882a593Smuzhiyun 		if (bd && bd->bd_tr)
751*4882a593Smuzhiyun 			goto cannot_release;
752*4882a593Smuzhiyun 		if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh)))
753*4882a593Smuzhiyun 			goto cannot_release;
754*4882a593Smuzhiyun 		bh = bh->b_this_page;
755*4882a593Smuzhiyun 	} while(bh != head);
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 	head = bh = page_buffers(page);
758*4882a593Smuzhiyun 	do {
759*4882a593Smuzhiyun 		bd = bh->b_private;
760*4882a593Smuzhiyun 		if (bd) {
761*4882a593Smuzhiyun 			gfs2_assert_warn(sdp, bd->bd_bh == bh);
762*4882a593Smuzhiyun 			bd->bd_bh = NULL;
763*4882a593Smuzhiyun 			bh->b_private = NULL;
764*4882a593Smuzhiyun 			/*
765*4882a593Smuzhiyun 			 * The bd may still be queued as a revoke, in which
766*4882a593Smuzhiyun 			 * case we must not dequeue nor free it.
767*4882a593Smuzhiyun 			 */
768*4882a593Smuzhiyun 			if (!bd->bd_blkno && !list_empty(&bd->bd_list))
769*4882a593Smuzhiyun 				list_del_init(&bd->bd_list);
770*4882a593Smuzhiyun 			if (list_empty(&bd->bd_list))
771*4882a593Smuzhiyun 				kmem_cache_free(gfs2_bufdata_cachep, bd);
772*4882a593Smuzhiyun 		}
773*4882a593Smuzhiyun 
774*4882a593Smuzhiyun 		bh = bh->b_this_page;
775*4882a593Smuzhiyun 	} while (bh != head);
776*4882a593Smuzhiyun 	gfs2_log_unlock(sdp);
777*4882a593Smuzhiyun 
778*4882a593Smuzhiyun 	return try_to_free_buffers(page);
779*4882a593Smuzhiyun 
780*4882a593Smuzhiyun cannot_release:
781*4882a593Smuzhiyun 	gfs2_log_unlock(sdp);
782*4882a593Smuzhiyun 	return 0;
783*4882a593Smuzhiyun }
784*4882a593Smuzhiyun 
785*4882a593Smuzhiyun static const struct address_space_operations gfs2_aops = {
786*4882a593Smuzhiyun 	.writepage = gfs2_writepage,
787*4882a593Smuzhiyun 	.writepages = gfs2_writepages,
788*4882a593Smuzhiyun 	.readpage = gfs2_readpage,
789*4882a593Smuzhiyun 	.readahead = gfs2_readahead,
790*4882a593Smuzhiyun 	.set_page_dirty = iomap_set_page_dirty,
791*4882a593Smuzhiyun 	.releasepage = iomap_releasepage,
792*4882a593Smuzhiyun 	.invalidatepage = iomap_invalidatepage,
793*4882a593Smuzhiyun 	.bmap = gfs2_bmap,
794*4882a593Smuzhiyun 	.direct_IO = noop_direct_IO,
795*4882a593Smuzhiyun 	.migratepage = iomap_migrate_page,
796*4882a593Smuzhiyun 	.is_partially_uptodate = iomap_is_partially_uptodate,
797*4882a593Smuzhiyun 	.error_remove_page = generic_error_remove_page,
798*4882a593Smuzhiyun };
799*4882a593Smuzhiyun 
800*4882a593Smuzhiyun static const struct address_space_operations gfs2_jdata_aops = {
801*4882a593Smuzhiyun 	.writepage = gfs2_jdata_writepage,
802*4882a593Smuzhiyun 	.writepages = gfs2_jdata_writepages,
803*4882a593Smuzhiyun 	.readpage = gfs2_readpage,
804*4882a593Smuzhiyun 	.readahead = gfs2_readahead,
805*4882a593Smuzhiyun 	.set_page_dirty = jdata_set_page_dirty,
806*4882a593Smuzhiyun 	.bmap = gfs2_bmap,
807*4882a593Smuzhiyun 	.invalidatepage = gfs2_invalidatepage,
808*4882a593Smuzhiyun 	.releasepage = gfs2_releasepage,
809*4882a593Smuzhiyun 	.is_partially_uptodate = block_is_partially_uptodate,
810*4882a593Smuzhiyun 	.error_remove_page = generic_error_remove_page,
811*4882a593Smuzhiyun };
812*4882a593Smuzhiyun 
gfs2_set_aops(struct inode * inode)813*4882a593Smuzhiyun void gfs2_set_aops(struct inode *inode)
814*4882a593Smuzhiyun {
815*4882a593Smuzhiyun 	if (gfs2_is_jdata(GFS2_I(inode)))
816*4882a593Smuzhiyun 		inode->i_mapping->a_ops = &gfs2_jdata_aops;
817*4882a593Smuzhiyun 	else
818*4882a593Smuzhiyun 		inode->i_mapping->a_ops = &gfs2_aops;
819*4882a593Smuzhiyun }
820