xref: /OK3568_Linux_fs/kernel/fs/ocfs2/aops.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /* -*- mode: c; c-basic-offset: 8; -*-
3*4882a593Smuzhiyun  * vim: noexpandtab sw=8 ts=8 sts=0:
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2002, 2004 Oracle.  All rights reserved.
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <linux/fs.h>
9*4882a593Smuzhiyun #include <linux/slab.h>
10*4882a593Smuzhiyun #include <linux/highmem.h>
11*4882a593Smuzhiyun #include <linux/pagemap.h>
12*4882a593Smuzhiyun #include <asm/byteorder.h>
13*4882a593Smuzhiyun #include <linux/swap.h>
14*4882a593Smuzhiyun #include <linux/mpage.h>
15*4882a593Smuzhiyun #include <linux/quotaops.h>
16*4882a593Smuzhiyun #include <linux/blkdev.h>
17*4882a593Smuzhiyun #include <linux/uio.h>
18*4882a593Smuzhiyun #include <linux/mm.h>
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #include <cluster/masklog.h>
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #include "ocfs2.h"
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #include "alloc.h"
25*4882a593Smuzhiyun #include "aops.h"
26*4882a593Smuzhiyun #include "dlmglue.h"
27*4882a593Smuzhiyun #include "extent_map.h"
28*4882a593Smuzhiyun #include "file.h"
29*4882a593Smuzhiyun #include "inode.h"
30*4882a593Smuzhiyun #include "journal.h"
31*4882a593Smuzhiyun #include "suballoc.h"
32*4882a593Smuzhiyun #include "super.h"
33*4882a593Smuzhiyun #include "symlink.h"
34*4882a593Smuzhiyun #include "refcounttree.h"
35*4882a593Smuzhiyun #include "ocfs2_trace.h"
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun #include "buffer_head_io.h"
38*4882a593Smuzhiyun #include "dir.h"
39*4882a593Smuzhiyun #include "namei.h"
40*4882a593Smuzhiyun #include "sysfile.h"
41*4882a593Smuzhiyun 
ocfs2_symlink_get_block(struct inode * inode,sector_t iblock,struct buffer_head * bh_result,int create)42*4882a593Smuzhiyun static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock,
43*4882a593Smuzhiyun 				   struct buffer_head *bh_result, int create)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun 	int err = -EIO;
46*4882a593Smuzhiyun 	int status;
47*4882a593Smuzhiyun 	struct ocfs2_dinode *fe = NULL;
48*4882a593Smuzhiyun 	struct buffer_head *bh = NULL;
49*4882a593Smuzhiyun 	struct buffer_head *buffer_cache_bh = NULL;
50*4882a593Smuzhiyun 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
51*4882a593Smuzhiyun 	void *kaddr;
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun 	trace_ocfs2_symlink_get_block(
54*4882a593Smuzhiyun 			(unsigned long long)OCFS2_I(inode)->ip_blkno,
55*4882a593Smuzhiyun 			(unsigned long long)iblock, bh_result, create);
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	BUG_ON(ocfs2_inode_is_fast_symlink(inode));
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	if ((iblock << inode->i_sb->s_blocksize_bits) > PATH_MAX + 1) {
60*4882a593Smuzhiyun 		mlog(ML_ERROR, "block offset > PATH_MAX: %llu",
61*4882a593Smuzhiyun 		     (unsigned long long)iblock);
62*4882a593Smuzhiyun 		goto bail;
63*4882a593Smuzhiyun 	}
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	status = ocfs2_read_inode_block(inode, &bh);
66*4882a593Smuzhiyun 	if (status < 0) {
67*4882a593Smuzhiyun 		mlog_errno(status);
68*4882a593Smuzhiyun 		goto bail;
69*4882a593Smuzhiyun 	}
70*4882a593Smuzhiyun 	fe = (struct ocfs2_dinode *) bh->b_data;
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	if ((u64)iblock >= ocfs2_clusters_to_blocks(inode->i_sb,
73*4882a593Smuzhiyun 						    le32_to_cpu(fe->i_clusters))) {
74*4882a593Smuzhiyun 		err = -ENOMEM;
75*4882a593Smuzhiyun 		mlog(ML_ERROR, "block offset is outside the allocated size: "
76*4882a593Smuzhiyun 		     "%llu\n", (unsigned long long)iblock);
77*4882a593Smuzhiyun 		goto bail;
78*4882a593Smuzhiyun 	}
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	/* We don't use the page cache to create symlink data, so if
81*4882a593Smuzhiyun 	 * need be, copy it over from the buffer cache. */
82*4882a593Smuzhiyun 	if (!buffer_uptodate(bh_result) && ocfs2_inode_is_new(inode)) {
83*4882a593Smuzhiyun 		u64 blkno = le64_to_cpu(fe->id2.i_list.l_recs[0].e_blkno) +
84*4882a593Smuzhiyun 			    iblock;
85*4882a593Smuzhiyun 		buffer_cache_bh = sb_getblk(osb->sb, blkno);
86*4882a593Smuzhiyun 		if (!buffer_cache_bh) {
87*4882a593Smuzhiyun 			err = -ENOMEM;
88*4882a593Smuzhiyun 			mlog(ML_ERROR, "couldn't getblock for symlink!\n");
89*4882a593Smuzhiyun 			goto bail;
90*4882a593Smuzhiyun 		}
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 		/* we haven't locked out transactions, so a commit
93*4882a593Smuzhiyun 		 * could've happened. Since we've got a reference on
94*4882a593Smuzhiyun 		 * the bh, even if it commits while we're doing the
95*4882a593Smuzhiyun 		 * copy, the data is still good. */
96*4882a593Smuzhiyun 		if (buffer_jbd(buffer_cache_bh)
97*4882a593Smuzhiyun 		    && ocfs2_inode_is_new(inode)) {
98*4882a593Smuzhiyun 			kaddr = kmap_atomic(bh_result->b_page);
99*4882a593Smuzhiyun 			if (!kaddr) {
100*4882a593Smuzhiyun 				mlog(ML_ERROR, "couldn't kmap!\n");
101*4882a593Smuzhiyun 				goto bail;
102*4882a593Smuzhiyun 			}
103*4882a593Smuzhiyun 			memcpy(kaddr + (bh_result->b_size * iblock),
104*4882a593Smuzhiyun 			       buffer_cache_bh->b_data,
105*4882a593Smuzhiyun 			       bh_result->b_size);
106*4882a593Smuzhiyun 			kunmap_atomic(kaddr);
107*4882a593Smuzhiyun 			set_buffer_uptodate(bh_result);
108*4882a593Smuzhiyun 		}
109*4882a593Smuzhiyun 		brelse(buffer_cache_bh);
110*4882a593Smuzhiyun 	}
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	map_bh(bh_result, inode->i_sb,
113*4882a593Smuzhiyun 	       le64_to_cpu(fe->id2.i_list.l_recs[0].e_blkno) + iblock);
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	err = 0;
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun bail:
118*4882a593Smuzhiyun 	brelse(bh);
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	return err;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun 
ocfs2_lock_get_block(struct inode * inode,sector_t iblock,struct buffer_head * bh_result,int create)123*4882a593Smuzhiyun static int ocfs2_lock_get_block(struct inode *inode, sector_t iblock,
124*4882a593Smuzhiyun 		    struct buffer_head *bh_result, int create)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun 	int ret = 0;
127*4882a593Smuzhiyun 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	down_read(&oi->ip_alloc_sem);
130*4882a593Smuzhiyun 	ret = ocfs2_get_block(inode, iblock, bh_result, create);
131*4882a593Smuzhiyun 	up_read(&oi->ip_alloc_sem);
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	return ret;
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun 
ocfs2_get_block(struct inode * inode,sector_t iblock,struct buffer_head * bh_result,int create)136*4882a593Smuzhiyun int ocfs2_get_block(struct inode *inode, sector_t iblock,
137*4882a593Smuzhiyun 		    struct buffer_head *bh_result, int create)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun 	int err = 0;
140*4882a593Smuzhiyun 	unsigned int ext_flags;
141*4882a593Smuzhiyun 	u64 max_blocks = bh_result->b_size >> inode->i_blkbits;
142*4882a593Smuzhiyun 	u64 p_blkno, count, past_eof;
143*4882a593Smuzhiyun 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	trace_ocfs2_get_block((unsigned long long)OCFS2_I(inode)->ip_blkno,
146*4882a593Smuzhiyun 			      (unsigned long long)iblock, bh_result, create);
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE)
149*4882a593Smuzhiyun 		mlog(ML_NOTICE, "get_block on system inode 0x%p (%lu)\n",
150*4882a593Smuzhiyun 		     inode, inode->i_ino);
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	if (S_ISLNK(inode->i_mode)) {
153*4882a593Smuzhiyun 		/* this always does I/O for some reason. */
154*4882a593Smuzhiyun 		err = ocfs2_symlink_get_block(inode, iblock, bh_result, create);
155*4882a593Smuzhiyun 		goto bail;
156*4882a593Smuzhiyun 	}
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	err = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno, &count,
159*4882a593Smuzhiyun 					  &ext_flags);
160*4882a593Smuzhiyun 	if (err) {
161*4882a593Smuzhiyun 		mlog(ML_ERROR, "Error %d from get_blocks(0x%p, %llu, 1, "
162*4882a593Smuzhiyun 		     "%llu, NULL)\n", err, inode, (unsigned long long)iblock,
163*4882a593Smuzhiyun 		     (unsigned long long)p_blkno);
164*4882a593Smuzhiyun 		goto bail;
165*4882a593Smuzhiyun 	}
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	if (max_blocks < count)
168*4882a593Smuzhiyun 		count = max_blocks;
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	/*
171*4882a593Smuzhiyun 	 * ocfs2 never allocates in this function - the only time we
172*4882a593Smuzhiyun 	 * need to use BH_New is when we're extending i_size on a file
173*4882a593Smuzhiyun 	 * system which doesn't support holes, in which case BH_New
174*4882a593Smuzhiyun 	 * allows __block_write_begin() to zero.
175*4882a593Smuzhiyun 	 *
176*4882a593Smuzhiyun 	 * If we see this on a sparse file system, then a truncate has
177*4882a593Smuzhiyun 	 * raced us and removed the cluster. In this case, we clear
178*4882a593Smuzhiyun 	 * the buffers dirty and uptodate bits and let the buffer code
179*4882a593Smuzhiyun 	 * ignore it as a hole.
180*4882a593Smuzhiyun 	 */
181*4882a593Smuzhiyun 	if (create && p_blkno == 0 && ocfs2_sparse_alloc(osb)) {
182*4882a593Smuzhiyun 		clear_buffer_dirty(bh_result);
183*4882a593Smuzhiyun 		clear_buffer_uptodate(bh_result);
184*4882a593Smuzhiyun 		goto bail;
185*4882a593Smuzhiyun 	}
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	/* Treat the unwritten extent as a hole for zeroing purposes. */
188*4882a593Smuzhiyun 	if (p_blkno && !(ext_flags & OCFS2_EXT_UNWRITTEN))
189*4882a593Smuzhiyun 		map_bh(bh_result, inode->i_sb, p_blkno);
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	bh_result->b_size = count << inode->i_blkbits;
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	if (!ocfs2_sparse_alloc(osb)) {
194*4882a593Smuzhiyun 		if (p_blkno == 0) {
195*4882a593Smuzhiyun 			err = -EIO;
196*4882a593Smuzhiyun 			mlog(ML_ERROR,
197*4882a593Smuzhiyun 			     "iblock = %llu p_blkno = %llu blkno=(%llu)\n",
198*4882a593Smuzhiyun 			     (unsigned long long)iblock,
199*4882a593Smuzhiyun 			     (unsigned long long)p_blkno,
200*4882a593Smuzhiyun 			     (unsigned long long)OCFS2_I(inode)->ip_blkno);
201*4882a593Smuzhiyun 			mlog(ML_ERROR, "Size %llu, clusters %u\n", (unsigned long long)i_size_read(inode), OCFS2_I(inode)->ip_clusters);
202*4882a593Smuzhiyun 			dump_stack();
203*4882a593Smuzhiyun 			goto bail;
204*4882a593Smuzhiyun 		}
205*4882a593Smuzhiyun 	}
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	past_eof = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode));
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	trace_ocfs2_get_block_end((unsigned long long)OCFS2_I(inode)->ip_blkno,
210*4882a593Smuzhiyun 				  (unsigned long long)past_eof);
211*4882a593Smuzhiyun 	if (create && (iblock >= past_eof))
212*4882a593Smuzhiyun 		set_buffer_new(bh_result);
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun bail:
215*4882a593Smuzhiyun 	if (err < 0)
216*4882a593Smuzhiyun 		err = -EIO;
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	return err;
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun 
ocfs2_read_inline_data(struct inode * inode,struct page * page,struct buffer_head * di_bh)221*4882a593Smuzhiyun int ocfs2_read_inline_data(struct inode *inode, struct page *page,
222*4882a593Smuzhiyun 			   struct buffer_head *di_bh)
223*4882a593Smuzhiyun {
224*4882a593Smuzhiyun 	void *kaddr;
225*4882a593Smuzhiyun 	loff_t size;
226*4882a593Smuzhiyun 	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	if (!(le16_to_cpu(di->i_dyn_features) & OCFS2_INLINE_DATA_FL)) {
229*4882a593Smuzhiyun 		ocfs2_error(inode->i_sb, "Inode %llu lost inline data flag\n",
230*4882a593Smuzhiyun 			    (unsigned long long)OCFS2_I(inode)->ip_blkno);
231*4882a593Smuzhiyun 		return -EROFS;
232*4882a593Smuzhiyun 	}
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	size = i_size_read(inode);
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	if (size > PAGE_SIZE ||
237*4882a593Smuzhiyun 	    size > ocfs2_max_inline_data_with_xattr(inode->i_sb, di)) {
238*4882a593Smuzhiyun 		ocfs2_error(inode->i_sb,
239*4882a593Smuzhiyun 			    "Inode %llu has with inline data has bad size: %Lu\n",
240*4882a593Smuzhiyun 			    (unsigned long long)OCFS2_I(inode)->ip_blkno,
241*4882a593Smuzhiyun 			    (unsigned long long)size);
242*4882a593Smuzhiyun 		return -EROFS;
243*4882a593Smuzhiyun 	}
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	kaddr = kmap_atomic(page);
246*4882a593Smuzhiyun 	if (size)
247*4882a593Smuzhiyun 		memcpy(kaddr, di->id2.i_data.id_data, size);
248*4882a593Smuzhiyun 	/* Clear the remaining part of the page */
249*4882a593Smuzhiyun 	memset(kaddr + size, 0, PAGE_SIZE - size);
250*4882a593Smuzhiyun 	flush_dcache_page(page);
251*4882a593Smuzhiyun 	kunmap_atomic(kaddr);
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	SetPageUptodate(page);
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	return 0;
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun 
ocfs2_readpage_inline(struct inode * inode,struct page * page)258*4882a593Smuzhiyun static int ocfs2_readpage_inline(struct inode *inode, struct page *page)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun 	int ret;
261*4882a593Smuzhiyun 	struct buffer_head *di_bh = NULL;
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	BUG_ON(!PageLocked(page));
264*4882a593Smuzhiyun 	BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL));
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	ret = ocfs2_read_inode_block(inode, &di_bh);
267*4882a593Smuzhiyun 	if (ret) {
268*4882a593Smuzhiyun 		mlog_errno(ret);
269*4882a593Smuzhiyun 		goto out;
270*4882a593Smuzhiyun 	}
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	ret = ocfs2_read_inline_data(inode, page, di_bh);
273*4882a593Smuzhiyun out:
274*4882a593Smuzhiyun 	unlock_page(page);
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	brelse(di_bh);
277*4882a593Smuzhiyun 	return ret;
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun 
ocfs2_readpage(struct file * file,struct page * page)280*4882a593Smuzhiyun static int ocfs2_readpage(struct file *file, struct page *page)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun 	struct inode *inode = page->mapping->host;
283*4882a593Smuzhiyun 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
284*4882a593Smuzhiyun 	loff_t start = (loff_t)page->index << PAGE_SHIFT;
285*4882a593Smuzhiyun 	int ret, unlock = 1;
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	trace_ocfs2_readpage((unsigned long long)oi->ip_blkno,
288*4882a593Smuzhiyun 			     (page ? page->index : 0));
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun 	ret = ocfs2_inode_lock_with_page(inode, NULL, 0, page);
291*4882a593Smuzhiyun 	if (ret != 0) {
292*4882a593Smuzhiyun 		if (ret == AOP_TRUNCATED_PAGE)
293*4882a593Smuzhiyun 			unlock = 0;
294*4882a593Smuzhiyun 		mlog_errno(ret);
295*4882a593Smuzhiyun 		goto out;
296*4882a593Smuzhiyun 	}
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	if (down_read_trylock(&oi->ip_alloc_sem) == 0) {
299*4882a593Smuzhiyun 		/*
300*4882a593Smuzhiyun 		 * Unlock the page and cycle ip_alloc_sem so that we don't
301*4882a593Smuzhiyun 		 * busyloop waiting for ip_alloc_sem to unlock
302*4882a593Smuzhiyun 		 */
303*4882a593Smuzhiyun 		ret = AOP_TRUNCATED_PAGE;
304*4882a593Smuzhiyun 		unlock_page(page);
305*4882a593Smuzhiyun 		unlock = 0;
306*4882a593Smuzhiyun 		down_read(&oi->ip_alloc_sem);
307*4882a593Smuzhiyun 		up_read(&oi->ip_alloc_sem);
308*4882a593Smuzhiyun 		goto out_inode_unlock;
309*4882a593Smuzhiyun 	}
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	/*
312*4882a593Smuzhiyun 	 * i_size might have just been updated as we grabed the meta lock.  We
313*4882a593Smuzhiyun 	 * might now be discovering a truncate that hit on another node.
314*4882a593Smuzhiyun 	 * block_read_full_page->get_block freaks out if it is asked to read
315*4882a593Smuzhiyun 	 * beyond the end of a file, so we check here.  Callers
316*4882a593Smuzhiyun 	 * (generic_file_read, vm_ops->fault) are clever enough to check i_size
317*4882a593Smuzhiyun 	 * and notice that the page they just read isn't needed.
318*4882a593Smuzhiyun 	 *
319*4882a593Smuzhiyun 	 * XXX sys_readahead() seems to get that wrong?
320*4882a593Smuzhiyun 	 */
321*4882a593Smuzhiyun 	if (start >= i_size_read(inode)) {
322*4882a593Smuzhiyun 		zero_user(page, 0, PAGE_SIZE);
323*4882a593Smuzhiyun 		SetPageUptodate(page);
324*4882a593Smuzhiyun 		ret = 0;
325*4882a593Smuzhiyun 		goto out_alloc;
326*4882a593Smuzhiyun 	}
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
329*4882a593Smuzhiyun 		ret = ocfs2_readpage_inline(inode, page);
330*4882a593Smuzhiyun 	else
331*4882a593Smuzhiyun 		ret = block_read_full_page(page, ocfs2_get_block);
332*4882a593Smuzhiyun 	unlock = 0;
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun out_alloc:
335*4882a593Smuzhiyun 	up_read(&oi->ip_alloc_sem);
336*4882a593Smuzhiyun out_inode_unlock:
337*4882a593Smuzhiyun 	ocfs2_inode_unlock(inode, 0);
338*4882a593Smuzhiyun out:
339*4882a593Smuzhiyun 	if (unlock)
340*4882a593Smuzhiyun 		unlock_page(page);
341*4882a593Smuzhiyun 	return ret;
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun /*
345*4882a593Smuzhiyun  * This is used only for read-ahead. Failures or difficult to handle
346*4882a593Smuzhiyun  * situations are safe to ignore.
347*4882a593Smuzhiyun  *
348*4882a593Smuzhiyun  * Right now, we don't bother with BH_Boundary - in-inode extent lists
349*4882a593Smuzhiyun  * are quite large (243 extents on 4k blocks), so most inodes don't
350*4882a593Smuzhiyun  * grow out to a tree. If need be, detecting boundary extents could
351*4882a593Smuzhiyun  * trivially be added in a future version of ocfs2_get_block().
352*4882a593Smuzhiyun  */
ocfs2_readahead(struct readahead_control * rac)353*4882a593Smuzhiyun static void ocfs2_readahead(struct readahead_control *rac)
354*4882a593Smuzhiyun {
355*4882a593Smuzhiyun 	int ret;
356*4882a593Smuzhiyun 	struct inode *inode = rac->mapping->host;
357*4882a593Smuzhiyun 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	/*
360*4882a593Smuzhiyun 	 * Use the nonblocking flag for the dlm code to avoid page
361*4882a593Smuzhiyun 	 * lock inversion, but don't bother with retrying.
362*4882a593Smuzhiyun 	 */
363*4882a593Smuzhiyun 	ret = ocfs2_inode_lock_full(inode, NULL, 0, OCFS2_LOCK_NONBLOCK);
364*4882a593Smuzhiyun 	if (ret)
365*4882a593Smuzhiyun 		return;
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 	if (down_read_trylock(&oi->ip_alloc_sem) == 0)
368*4882a593Smuzhiyun 		goto out_unlock;
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun 	/*
371*4882a593Smuzhiyun 	 * Don't bother with inline-data. There isn't anything
372*4882a593Smuzhiyun 	 * to read-ahead in that case anyway...
373*4882a593Smuzhiyun 	 */
374*4882a593Smuzhiyun 	if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
375*4882a593Smuzhiyun 		goto out_up;
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 	/*
378*4882a593Smuzhiyun 	 * Check whether a remote node truncated this file - we just
379*4882a593Smuzhiyun 	 * drop out in that case as it's not worth handling here.
380*4882a593Smuzhiyun 	 */
381*4882a593Smuzhiyun 	if (readahead_pos(rac) >= i_size_read(inode))
382*4882a593Smuzhiyun 		goto out_up;
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	mpage_readahead(rac, ocfs2_get_block);
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun out_up:
387*4882a593Smuzhiyun 	up_read(&oi->ip_alloc_sem);
388*4882a593Smuzhiyun out_unlock:
389*4882a593Smuzhiyun 	ocfs2_inode_unlock(inode, 0);
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun /* Note: Because we don't support holes, our allocation has
393*4882a593Smuzhiyun  * already happened (allocation writes zeros to the file data)
394*4882a593Smuzhiyun  * so we don't have to worry about ordered writes in
395*4882a593Smuzhiyun  * ocfs2_writepage.
396*4882a593Smuzhiyun  *
397*4882a593Smuzhiyun  * ->writepage is called during the process of invalidating the page cache
398*4882a593Smuzhiyun  * during blocked lock processing.  It can't block on any cluster locks
399*4882a593Smuzhiyun  * to during block mapping.  It's relying on the fact that the block
400*4882a593Smuzhiyun  * mapping can't have disappeared under the dirty pages that it is
401*4882a593Smuzhiyun  * being asked to write back.
402*4882a593Smuzhiyun  */
ocfs2_writepage(struct page * page,struct writeback_control * wbc)403*4882a593Smuzhiyun static int ocfs2_writepage(struct page *page, struct writeback_control *wbc)
404*4882a593Smuzhiyun {
405*4882a593Smuzhiyun 	trace_ocfs2_writepage(
406*4882a593Smuzhiyun 		(unsigned long long)OCFS2_I(page->mapping->host)->ip_blkno,
407*4882a593Smuzhiyun 		page->index);
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun 	return block_write_full_page(page, ocfs2_get_block, wbc);
410*4882a593Smuzhiyun }
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun /* Taken from ext3. We don't necessarily need the full blown
413*4882a593Smuzhiyun  * functionality yet, but IMHO it's better to cut and paste the whole
414*4882a593Smuzhiyun  * thing so we can avoid introducing our own bugs (and easily pick up
415*4882a593Smuzhiyun  * their fixes when they happen) --Mark */
walk_page_buffers(handle_t * handle,struct buffer_head * head,unsigned from,unsigned to,int * partial,int (* fn)(handle_t * handle,struct buffer_head * bh))416*4882a593Smuzhiyun int walk_page_buffers(	handle_t *handle,
417*4882a593Smuzhiyun 			struct buffer_head *head,
418*4882a593Smuzhiyun 			unsigned from,
419*4882a593Smuzhiyun 			unsigned to,
420*4882a593Smuzhiyun 			int *partial,
421*4882a593Smuzhiyun 			int (*fn)(	handle_t *handle,
422*4882a593Smuzhiyun 					struct buffer_head *bh))
423*4882a593Smuzhiyun {
424*4882a593Smuzhiyun 	struct buffer_head *bh;
425*4882a593Smuzhiyun 	unsigned block_start, block_end;
426*4882a593Smuzhiyun 	unsigned blocksize = head->b_size;
427*4882a593Smuzhiyun 	int err, ret = 0;
428*4882a593Smuzhiyun 	struct buffer_head *next;
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 	for (	bh = head, block_start = 0;
431*4882a593Smuzhiyun 		ret == 0 && (bh != head || !block_start);
432*4882a593Smuzhiyun 	    	block_start = block_end, bh = next)
433*4882a593Smuzhiyun 	{
434*4882a593Smuzhiyun 		next = bh->b_this_page;
435*4882a593Smuzhiyun 		block_end = block_start + blocksize;
436*4882a593Smuzhiyun 		if (block_end <= from || block_start >= to) {
437*4882a593Smuzhiyun 			if (partial && !buffer_uptodate(bh))
438*4882a593Smuzhiyun 				*partial = 1;
439*4882a593Smuzhiyun 			continue;
440*4882a593Smuzhiyun 		}
441*4882a593Smuzhiyun 		err = (*fn)(handle, bh);
442*4882a593Smuzhiyun 		if (!ret)
443*4882a593Smuzhiyun 			ret = err;
444*4882a593Smuzhiyun 	}
445*4882a593Smuzhiyun 	return ret;
446*4882a593Smuzhiyun }
447*4882a593Smuzhiyun 
ocfs2_bmap(struct address_space * mapping,sector_t block)448*4882a593Smuzhiyun static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block)
449*4882a593Smuzhiyun {
450*4882a593Smuzhiyun 	sector_t status;
451*4882a593Smuzhiyun 	u64 p_blkno = 0;
452*4882a593Smuzhiyun 	int err = 0;
453*4882a593Smuzhiyun 	struct inode *inode = mapping->host;
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun 	trace_ocfs2_bmap((unsigned long long)OCFS2_I(inode)->ip_blkno,
456*4882a593Smuzhiyun 			 (unsigned long long)block);
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	/*
459*4882a593Smuzhiyun 	 * The swap code (ab-)uses ->bmap to get a block mapping and then
460*4882a593Smuzhiyun 	 * bypasseѕ the file system for actual I/O.  We really can't allow
461*4882a593Smuzhiyun 	 * that on refcounted inodes, so we have to skip out here.  And yes,
462*4882a593Smuzhiyun 	 * 0 is the magic code for a bmap error..
463*4882a593Smuzhiyun 	 */
464*4882a593Smuzhiyun 	if (ocfs2_is_refcount_inode(inode))
465*4882a593Smuzhiyun 		return 0;
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 	/* We don't need to lock journal system files, since they aren't
468*4882a593Smuzhiyun 	 * accessed concurrently from multiple nodes.
469*4882a593Smuzhiyun 	 */
470*4882a593Smuzhiyun 	if (!INODE_JOURNAL(inode)) {
471*4882a593Smuzhiyun 		err = ocfs2_inode_lock(inode, NULL, 0);
472*4882a593Smuzhiyun 		if (err) {
473*4882a593Smuzhiyun 			if (err != -ENOENT)
474*4882a593Smuzhiyun 				mlog_errno(err);
475*4882a593Smuzhiyun 			goto bail;
476*4882a593Smuzhiyun 		}
477*4882a593Smuzhiyun 		down_read(&OCFS2_I(inode)->ip_alloc_sem);
478*4882a593Smuzhiyun 	}
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 	if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL))
481*4882a593Smuzhiyun 		err = ocfs2_extent_map_get_blocks(inode, block, &p_blkno, NULL,
482*4882a593Smuzhiyun 						  NULL);
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun 	if (!INODE_JOURNAL(inode)) {
485*4882a593Smuzhiyun 		up_read(&OCFS2_I(inode)->ip_alloc_sem);
486*4882a593Smuzhiyun 		ocfs2_inode_unlock(inode, 0);
487*4882a593Smuzhiyun 	}
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun 	if (err) {
490*4882a593Smuzhiyun 		mlog(ML_ERROR, "get_blocks() failed, block = %llu\n",
491*4882a593Smuzhiyun 		     (unsigned long long)block);
492*4882a593Smuzhiyun 		mlog_errno(err);
493*4882a593Smuzhiyun 		goto bail;
494*4882a593Smuzhiyun 	}
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun bail:
497*4882a593Smuzhiyun 	status = err ? 0 : p_blkno;
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun 	return status;
500*4882a593Smuzhiyun }
501*4882a593Smuzhiyun 
ocfs2_releasepage(struct page * page,gfp_t wait)502*4882a593Smuzhiyun static int ocfs2_releasepage(struct page *page, gfp_t wait)
503*4882a593Smuzhiyun {
504*4882a593Smuzhiyun 	if (!page_has_buffers(page))
505*4882a593Smuzhiyun 		return 0;
506*4882a593Smuzhiyun 	return try_to_free_buffers(page);
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun 
ocfs2_figure_cluster_boundaries(struct ocfs2_super * osb,u32 cpos,unsigned int * start,unsigned int * end)509*4882a593Smuzhiyun static void ocfs2_figure_cluster_boundaries(struct ocfs2_super *osb,
510*4882a593Smuzhiyun 					    u32 cpos,
511*4882a593Smuzhiyun 					    unsigned int *start,
512*4882a593Smuzhiyun 					    unsigned int *end)
513*4882a593Smuzhiyun {
514*4882a593Smuzhiyun 	unsigned int cluster_start = 0, cluster_end = PAGE_SIZE;
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun 	if (unlikely(PAGE_SHIFT > osb->s_clustersize_bits)) {
517*4882a593Smuzhiyun 		unsigned int cpp;
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun 		cpp = 1 << (PAGE_SHIFT - osb->s_clustersize_bits);
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 		cluster_start = cpos % cpp;
522*4882a593Smuzhiyun 		cluster_start = cluster_start << osb->s_clustersize_bits;
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun 		cluster_end = cluster_start + osb->s_clustersize;
525*4882a593Smuzhiyun 	}
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 	BUG_ON(cluster_start > PAGE_SIZE);
528*4882a593Smuzhiyun 	BUG_ON(cluster_end > PAGE_SIZE);
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun 	if (start)
531*4882a593Smuzhiyun 		*start = cluster_start;
532*4882a593Smuzhiyun 	if (end)
533*4882a593Smuzhiyun 		*end = cluster_end;
534*4882a593Smuzhiyun }
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun /*
537*4882a593Smuzhiyun  * 'from' and 'to' are the region in the page to avoid zeroing.
538*4882a593Smuzhiyun  *
539*4882a593Smuzhiyun  * If pagesize > clustersize, this function will avoid zeroing outside
540*4882a593Smuzhiyun  * of the cluster boundary.
541*4882a593Smuzhiyun  *
542*4882a593Smuzhiyun  * from == to == 0 is code for "zero the entire cluster region"
543*4882a593Smuzhiyun  */
ocfs2_clear_page_regions(struct page * page,struct ocfs2_super * osb,u32 cpos,unsigned from,unsigned to)544*4882a593Smuzhiyun static void ocfs2_clear_page_regions(struct page *page,
545*4882a593Smuzhiyun 				     struct ocfs2_super *osb, u32 cpos,
546*4882a593Smuzhiyun 				     unsigned from, unsigned to)
547*4882a593Smuzhiyun {
548*4882a593Smuzhiyun 	void *kaddr;
549*4882a593Smuzhiyun 	unsigned int cluster_start, cluster_end;
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun 	ocfs2_figure_cluster_boundaries(osb, cpos, &cluster_start, &cluster_end);
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 	kaddr = kmap_atomic(page);
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun 	if (from || to) {
556*4882a593Smuzhiyun 		if (from > cluster_start)
557*4882a593Smuzhiyun 			memset(kaddr + cluster_start, 0, from - cluster_start);
558*4882a593Smuzhiyun 		if (to < cluster_end)
559*4882a593Smuzhiyun 			memset(kaddr + to, 0, cluster_end - to);
560*4882a593Smuzhiyun 	} else {
561*4882a593Smuzhiyun 		memset(kaddr + cluster_start, 0, cluster_end - cluster_start);
562*4882a593Smuzhiyun 	}
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 	kunmap_atomic(kaddr);
565*4882a593Smuzhiyun }
566*4882a593Smuzhiyun 
567*4882a593Smuzhiyun /*
568*4882a593Smuzhiyun  * Nonsparse file systems fully allocate before we get to the write
569*4882a593Smuzhiyun  * code. This prevents ocfs2_write() from tagging the write as an
570*4882a593Smuzhiyun  * allocating one, which means ocfs2_map_page_blocks() might try to
571*4882a593Smuzhiyun  * read-in the blocks at the tail of our file. Avoid reading them by
572*4882a593Smuzhiyun  * testing i_size against each block offset.
573*4882a593Smuzhiyun  */
ocfs2_should_read_blk(struct inode * inode,struct page * page,unsigned int block_start)574*4882a593Smuzhiyun static int ocfs2_should_read_blk(struct inode *inode, struct page *page,
575*4882a593Smuzhiyun 				 unsigned int block_start)
576*4882a593Smuzhiyun {
577*4882a593Smuzhiyun 	u64 offset = page_offset(page) + block_start;
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun 	if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
580*4882a593Smuzhiyun 		return 1;
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun 	if (i_size_read(inode) > offset)
583*4882a593Smuzhiyun 		return 1;
584*4882a593Smuzhiyun 
585*4882a593Smuzhiyun 	return 0;
586*4882a593Smuzhiyun }
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun /*
589*4882a593Smuzhiyun  * Some of this taken from __block_write_begin(). We already have our
590*4882a593Smuzhiyun  * mapping by now though, and the entire write will be allocating or
591*4882a593Smuzhiyun  * it won't, so not much need to use BH_New.
592*4882a593Smuzhiyun  *
593*4882a593Smuzhiyun  * This will also skip zeroing, which is handled externally.
594*4882a593Smuzhiyun  */
ocfs2_map_page_blocks(struct page * page,u64 * p_blkno,struct inode * inode,unsigned int from,unsigned int to,int new)595*4882a593Smuzhiyun int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno,
596*4882a593Smuzhiyun 			  struct inode *inode, unsigned int from,
597*4882a593Smuzhiyun 			  unsigned int to, int new)
598*4882a593Smuzhiyun {
599*4882a593Smuzhiyun 	int ret = 0;
600*4882a593Smuzhiyun 	struct buffer_head *head, *bh, *wait[2], **wait_bh = wait;
601*4882a593Smuzhiyun 	unsigned int block_end, block_start;
602*4882a593Smuzhiyun 	unsigned int bsize = i_blocksize(inode);
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun 	if (!page_has_buffers(page))
605*4882a593Smuzhiyun 		create_empty_buffers(page, bsize, 0);
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 	head = page_buffers(page);
608*4882a593Smuzhiyun 	for (bh = head, block_start = 0; bh != head || !block_start;
609*4882a593Smuzhiyun 	     bh = bh->b_this_page, block_start += bsize) {
610*4882a593Smuzhiyun 		block_end = block_start + bsize;
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun 		clear_buffer_new(bh);
613*4882a593Smuzhiyun 
614*4882a593Smuzhiyun 		/*
615*4882a593Smuzhiyun 		 * Ignore blocks outside of our i/o range -
616*4882a593Smuzhiyun 		 * they may belong to unallocated clusters.
617*4882a593Smuzhiyun 		 */
618*4882a593Smuzhiyun 		if (block_start >= to || block_end <= from) {
619*4882a593Smuzhiyun 			if (PageUptodate(page))
620*4882a593Smuzhiyun 				set_buffer_uptodate(bh);
621*4882a593Smuzhiyun 			continue;
622*4882a593Smuzhiyun 		}
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun 		/*
625*4882a593Smuzhiyun 		 * For an allocating write with cluster size >= page
626*4882a593Smuzhiyun 		 * size, we always write the entire page.
627*4882a593Smuzhiyun 		 */
628*4882a593Smuzhiyun 		if (new)
629*4882a593Smuzhiyun 			set_buffer_new(bh);
630*4882a593Smuzhiyun 
631*4882a593Smuzhiyun 		if (!buffer_mapped(bh)) {
632*4882a593Smuzhiyun 			map_bh(bh, inode->i_sb, *p_blkno);
633*4882a593Smuzhiyun 			clean_bdev_bh_alias(bh);
634*4882a593Smuzhiyun 		}
635*4882a593Smuzhiyun 
636*4882a593Smuzhiyun 		if (PageUptodate(page)) {
637*4882a593Smuzhiyun 			if (!buffer_uptodate(bh))
638*4882a593Smuzhiyun 				set_buffer_uptodate(bh);
639*4882a593Smuzhiyun 		} else if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
640*4882a593Smuzhiyun 			   !buffer_new(bh) &&
641*4882a593Smuzhiyun 			   ocfs2_should_read_blk(inode, page, block_start) &&
642*4882a593Smuzhiyun 			   (block_start < from || block_end > to)) {
643*4882a593Smuzhiyun 			ll_rw_block(REQ_OP_READ, 0, 1, &bh);
644*4882a593Smuzhiyun 			*wait_bh++=bh;
645*4882a593Smuzhiyun 		}
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun 		*p_blkno = *p_blkno + 1;
648*4882a593Smuzhiyun 	}
649*4882a593Smuzhiyun 
650*4882a593Smuzhiyun 	/*
651*4882a593Smuzhiyun 	 * If we issued read requests - let them complete.
652*4882a593Smuzhiyun 	 */
653*4882a593Smuzhiyun 	while(wait_bh > wait) {
654*4882a593Smuzhiyun 		wait_on_buffer(*--wait_bh);
655*4882a593Smuzhiyun 		if (!buffer_uptodate(*wait_bh))
656*4882a593Smuzhiyun 			ret = -EIO;
657*4882a593Smuzhiyun 	}
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun 	if (ret == 0 || !new)
660*4882a593Smuzhiyun 		return ret;
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun 	/*
663*4882a593Smuzhiyun 	 * If we get -EIO above, zero out any newly allocated blocks
664*4882a593Smuzhiyun 	 * to avoid exposing stale data.
665*4882a593Smuzhiyun 	 */
666*4882a593Smuzhiyun 	bh = head;
667*4882a593Smuzhiyun 	block_start = 0;
668*4882a593Smuzhiyun 	do {
669*4882a593Smuzhiyun 		block_end = block_start + bsize;
670*4882a593Smuzhiyun 		if (block_end <= from)
671*4882a593Smuzhiyun 			goto next_bh;
672*4882a593Smuzhiyun 		if (block_start >= to)
673*4882a593Smuzhiyun 			break;
674*4882a593Smuzhiyun 
675*4882a593Smuzhiyun 		zero_user(page, block_start, bh->b_size);
676*4882a593Smuzhiyun 		set_buffer_uptodate(bh);
677*4882a593Smuzhiyun 		mark_buffer_dirty(bh);
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun next_bh:
680*4882a593Smuzhiyun 		block_start = block_end;
681*4882a593Smuzhiyun 		bh = bh->b_this_page;
682*4882a593Smuzhiyun 	} while (bh != head);
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun 	return ret;
685*4882a593Smuzhiyun }
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun #if (PAGE_SIZE >= OCFS2_MAX_CLUSTERSIZE)
688*4882a593Smuzhiyun #define OCFS2_MAX_CTXT_PAGES	1
689*4882a593Smuzhiyun #else
690*4882a593Smuzhiyun #define OCFS2_MAX_CTXT_PAGES	(OCFS2_MAX_CLUSTERSIZE / PAGE_SIZE)
691*4882a593Smuzhiyun #endif
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun #define OCFS2_MAX_CLUSTERS_PER_PAGE	(PAGE_SIZE / OCFS2_MIN_CLUSTERSIZE)
694*4882a593Smuzhiyun 
695*4882a593Smuzhiyun struct ocfs2_unwritten_extent {
696*4882a593Smuzhiyun 	struct list_head	ue_node;
697*4882a593Smuzhiyun 	struct list_head	ue_ip_node;
698*4882a593Smuzhiyun 	u32			ue_cpos;
699*4882a593Smuzhiyun 	u32			ue_phys;
700*4882a593Smuzhiyun };
701*4882a593Smuzhiyun 
702*4882a593Smuzhiyun /*
703*4882a593Smuzhiyun  * Describe the state of a single cluster to be written to.
704*4882a593Smuzhiyun  */
705*4882a593Smuzhiyun struct ocfs2_write_cluster_desc {
706*4882a593Smuzhiyun 	u32		c_cpos;
707*4882a593Smuzhiyun 	u32		c_phys;
708*4882a593Smuzhiyun 	/*
709*4882a593Smuzhiyun 	 * Give this a unique field because c_phys eventually gets
710*4882a593Smuzhiyun 	 * filled.
711*4882a593Smuzhiyun 	 */
712*4882a593Smuzhiyun 	unsigned	c_new;
713*4882a593Smuzhiyun 	unsigned	c_clear_unwritten;
714*4882a593Smuzhiyun 	unsigned	c_needs_zero;
715*4882a593Smuzhiyun };
716*4882a593Smuzhiyun 
717*4882a593Smuzhiyun struct ocfs2_write_ctxt {
718*4882a593Smuzhiyun 	/* Logical cluster position / len of write */
719*4882a593Smuzhiyun 	u32				w_cpos;
720*4882a593Smuzhiyun 	u32				w_clen;
721*4882a593Smuzhiyun 
722*4882a593Smuzhiyun 	/* First cluster allocated in a nonsparse extend */
723*4882a593Smuzhiyun 	u32				w_first_new_cpos;
724*4882a593Smuzhiyun 
725*4882a593Smuzhiyun 	/* Type of caller. Must be one of buffer, mmap, direct.  */
726*4882a593Smuzhiyun 	ocfs2_write_type_t		w_type;
727*4882a593Smuzhiyun 
728*4882a593Smuzhiyun 	struct ocfs2_write_cluster_desc	w_desc[OCFS2_MAX_CLUSTERS_PER_PAGE];
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun 	/*
731*4882a593Smuzhiyun 	 * This is true if page_size > cluster_size.
732*4882a593Smuzhiyun 	 *
733*4882a593Smuzhiyun 	 * It triggers a set of special cases during write which might
734*4882a593Smuzhiyun 	 * have to deal with allocating writes to partial pages.
735*4882a593Smuzhiyun 	 */
736*4882a593Smuzhiyun 	unsigned int			w_large_pages;
737*4882a593Smuzhiyun 
738*4882a593Smuzhiyun 	/*
739*4882a593Smuzhiyun 	 * Pages involved in this write.
740*4882a593Smuzhiyun 	 *
741*4882a593Smuzhiyun 	 * w_target_page is the page being written to by the user.
742*4882a593Smuzhiyun 	 *
743*4882a593Smuzhiyun 	 * w_pages is an array of pages which always contains
744*4882a593Smuzhiyun 	 * w_target_page, and in the case of an allocating write with
745*4882a593Smuzhiyun 	 * page_size < cluster size, it will contain zero'd and mapped
746*4882a593Smuzhiyun 	 * pages adjacent to w_target_page which need to be written
747*4882a593Smuzhiyun 	 * out in so that future reads from that region will get
748*4882a593Smuzhiyun 	 * zero's.
749*4882a593Smuzhiyun 	 */
750*4882a593Smuzhiyun 	unsigned int			w_num_pages;
751*4882a593Smuzhiyun 	struct page			*w_pages[OCFS2_MAX_CTXT_PAGES];
752*4882a593Smuzhiyun 	struct page			*w_target_page;
753*4882a593Smuzhiyun 
754*4882a593Smuzhiyun 	/*
755*4882a593Smuzhiyun 	 * w_target_locked is used for page_mkwrite path indicating no unlocking
756*4882a593Smuzhiyun 	 * against w_target_page in ocfs2_write_end_nolock.
757*4882a593Smuzhiyun 	 */
758*4882a593Smuzhiyun 	unsigned int			w_target_locked:1;
759*4882a593Smuzhiyun 
760*4882a593Smuzhiyun 	/*
761*4882a593Smuzhiyun 	 * ocfs2_write_end() uses this to know what the real range to
762*4882a593Smuzhiyun 	 * write in the target should be.
763*4882a593Smuzhiyun 	 */
764*4882a593Smuzhiyun 	unsigned int			w_target_from;
765*4882a593Smuzhiyun 	unsigned int			w_target_to;
766*4882a593Smuzhiyun 
767*4882a593Smuzhiyun 	/*
768*4882a593Smuzhiyun 	 * We could use journal_current_handle() but this is cleaner,
769*4882a593Smuzhiyun 	 * IMHO -Mark
770*4882a593Smuzhiyun 	 */
771*4882a593Smuzhiyun 	handle_t			*w_handle;
772*4882a593Smuzhiyun 
773*4882a593Smuzhiyun 	struct buffer_head		*w_di_bh;
774*4882a593Smuzhiyun 
775*4882a593Smuzhiyun 	struct ocfs2_cached_dealloc_ctxt w_dealloc;
776*4882a593Smuzhiyun 
777*4882a593Smuzhiyun 	struct list_head		w_unwritten_list;
778*4882a593Smuzhiyun 	unsigned int			w_unwritten_count;
779*4882a593Smuzhiyun };
780*4882a593Smuzhiyun 
ocfs2_unlock_and_free_pages(struct page ** pages,int num_pages)781*4882a593Smuzhiyun void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages)
782*4882a593Smuzhiyun {
783*4882a593Smuzhiyun 	int i;
784*4882a593Smuzhiyun 
785*4882a593Smuzhiyun 	for(i = 0; i < num_pages; i++) {
786*4882a593Smuzhiyun 		if (pages[i]) {
787*4882a593Smuzhiyun 			unlock_page(pages[i]);
788*4882a593Smuzhiyun 			mark_page_accessed(pages[i]);
789*4882a593Smuzhiyun 			put_page(pages[i]);
790*4882a593Smuzhiyun 		}
791*4882a593Smuzhiyun 	}
792*4882a593Smuzhiyun }
793*4882a593Smuzhiyun 
ocfs2_unlock_pages(struct ocfs2_write_ctxt * wc)794*4882a593Smuzhiyun static void ocfs2_unlock_pages(struct ocfs2_write_ctxt *wc)
795*4882a593Smuzhiyun {
796*4882a593Smuzhiyun 	int i;
797*4882a593Smuzhiyun 
798*4882a593Smuzhiyun 	/*
799*4882a593Smuzhiyun 	 * w_target_locked is only set to true in the page_mkwrite() case.
800*4882a593Smuzhiyun 	 * The intent is to allow us to lock the target page from write_begin()
801*4882a593Smuzhiyun 	 * to write_end(). The caller must hold a ref on w_target_page.
802*4882a593Smuzhiyun 	 */
803*4882a593Smuzhiyun 	if (wc->w_target_locked) {
804*4882a593Smuzhiyun 		BUG_ON(!wc->w_target_page);
805*4882a593Smuzhiyun 		for (i = 0; i < wc->w_num_pages; i++) {
806*4882a593Smuzhiyun 			if (wc->w_target_page == wc->w_pages[i]) {
807*4882a593Smuzhiyun 				wc->w_pages[i] = NULL;
808*4882a593Smuzhiyun 				break;
809*4882a593Smuzhiyun 			}
810*4882a593Smuzhiyun 		}
811*4882a593Smuzhiyun 		mark_page_accessed(wc->w_target_page);
812*4882a593Smuzhiyun 		put_page(wc->w_target_page);
813*4882a593Smuzhiyun 	}
814*4882a593Smuzhiyun 	ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages);
815*4882a593Smuzhiyun }
816*4882a593Smuzhiyun 
ocfs2_free_unwritten_list(struct inode * inode,struct list_head * head)817*4882a593Smuzhiyun static void ocfs2_free_unwritten_list(struct inode *inode,
818*4882a593Smuzhiyun 				 struct list_head *head)
819*4882a593Smuzhiyun {
820*4882a593Smuzhiyun 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
821*4882a593Smuzhiyun 	struct ocfs2_unwritten_extent *ue = NULL, *tmp = NULL;
822*4882a593Smuzhiyun 
823*4882a593Smuzhiyun 	list_for_each_entry_safe(ue, tmp, head, ue_node) {
824*4882a593Smuzhiyun 		list_del(&ue->ue_node);
825*4882a593Smuzhiyun 		spin_lock(&oi->ip_lock);
826*4882a593Smuzhiyun 		list_del(&ue->ue_ip_node);
827*4882a593Smuzhiyun 		spin_unlock(&oi->ip_lock);
828*4882a593Smuzhiyun 		kfree(ue);
829*4882a593Smuzhiyun 	}
830*4882a593Smuzhiyun }
831*4882a593Smuzhiyun 
ocfs2_free_write_ctxt(struct inode * inode,struct ocfs2_write_ctxt * wc)832*4882a593Smuzhiyun static void ocfs2_free_write_ctxt(struct inode *inode,
833*4882a593Smuzhiyun 				  struct ocfs2_write_ctxt *wc)
834*4882a593Smuzhiyun {
835*4882a593Smuzhiyun 	ocfs2_free_unwritten_list(inode, &wc->w_unwritten_list);
836*4882a593Smuzhiyun 	ocfs2_unlock_pages(wc);
837*4882a593Smuzhiyun 	brelse(wc->w_di_bh);
838*4882a593Smuzhiyun 	kfree(wc);
839*4882a593Smuzhiyun }
840*4882a593Smuzhiyun 
ocfs2_alloc_write_ctxt(struct ocfs2_write_ctxt ** wcp,struct ocfs2_super * osb,loff_t pos,unsigned len,ocfs2_write_type_t type,struct buffer_head * di_bh)841*4882a593Smuzhiyun static int ocfs2_alloc_write_ctxt(struct ocfs2_write_ctxt **wcp,
842*4882a593Smuzhiyun 				  struct ocfs2_super *osb, loff_t pos,
843*4882a593Smuzhiyun 				  unsigned len, ocfs2_write_type_t type,
844*4882a593Smuzhiyun 				  struct buffer_head *di_bh)
845*4882a593Smuzhiyun {
846*4882a593Smuzhiyun 	u32 cend;
847*4882a593Smuzhiyun 	struct ocfs2_write_ctxt *wc;
848*4882a593Smuzhiyun 
849*4882a593Smuzhiyun 	wc = kzalloc(sizeof(struct ocfs2_write_ctxt), GFP_NOFS);
850*4882a593Smuzhiyun 	if (!wc)
851*4882a593Smuzhiyun 		return -ENOMEM;
852*4882a593Smuzhiyun 
853*4882a593Smuzhiyun 	wc->w_cpos = pos >> osb->s_clustersize_bits;
854*4882a593Smuzhiyun 	wc->w_first_new_cpos = UINT_MAX;
855*4882a593Smuzhiyun 	cend = (pos + len - 1) >> osb->s_clustersize_bits;
856*4882a593Smuzhiyun 	wc->w_clen = cend - wc->w_cpos + 1;
857*4882a593Smuzhiyun 	get_bh(di_bh);
858*4882a593Smuzhiyun 	wc->w_di_bh = di_bh;
859*4882a593Smuzhiyun 	wc->w_type = type;
860*4882a593Smuzhiyun 
861*4882a593Smuzhiyun 	if (unlikely(PAGE_SHIFT > osb->s_clustersize_bits))
862*4882a593Smuzhiyun 		wc->w_large_pages = 1;
863*4882a593Smuzhiyun 	else
864*4882a593Smuzhiyun 		wc->w_large_pages = 0;
865*4882a593Smuzhiyun 
866*4882a593Smuzhiyun 	ocfs2_init_dealloc_ctxt(&wc->w_dealloc);
867*4882a593Smuzhiyun 	INIT_LIST_HEAD(&wc->w_unwritten_list);
868*4882a593Smuzhiyun 
869*4882a593Smuzhiyun 	*wcp = wc;
870*4882a593Smuzhiyun 
871*4882a593Smuzhiyun 	return 0;
872*4882a593Smuzhiyun }
873*4882a593Smuzhiyun 
874*4882a593Smuzhiyun /*
875*4882a593Smuzhiyun  * If a page has any new buffers, zero them out here, and mark them uptodate
876*4882a593Smuzhiyun  * and dirty so they'll be written out (in order to prevent uninitialised
877*4882a593Smuzhiyun  * block data from leaking). And clear the new bit.
878*4882a593Smuzhiyun  */
ocfs2_zero_new_buffers(struct page * page,unsigned from,unsigned to)879*4882a593Smuzhiyun static void ocfs2_zero_new_buffers(struct page *page, unsigned from, unsigned to)
880*4882a593Smuzhiyun {
881*4882a593Smuzhiyun 	unsigned int block_start, block_end;
882*4882a593Smuzhiyun 	struct buffer_head *head, *bh;
883*4882a593Smuzhiyun 
884*4882a593Smuzhiyun 	BUG_ON(!PageLocked(page));
885*4882a593Smuzhiyun 	if (!page_has_buffers(page))
886*4882a593Smuzhiyun 		return;
887*4882a593Smuzhiyun 
888*4882a593Smuzhiyun 	bh = head = page_buffers(page);
889*4882a593Smuzhiyun 	block_start = 0;
890*4882a593Smuzhiyun 	do {
891*4882a593Smuzhiyun 		block_end = block_start + bh->b_size;
892*4882a593Smuzhiyun 
893*4882a593Smuzhiyun 		if (buffer_new(bh)) {
894*4882a593Smuzhiyun 			if (block_end > from && block_start < to) {
895*4882a593Smuzhiyun 				if (!PageUptodate(page)) {
896*4882a593Smuzhiyun 					unsigned start, end;
897*4882a593Smuzhiyun 
898*4882a593Smuzhiyun 					start = max(from, block_start);
899*4882a593Smuzhiyun 					end = min(to, block_end);
900*4882a593Smuzhiyun 
901*4882a593Smuzhiyun 					zero_user_segment(page, start, end);
902*4882a593Smuzhiyun 					set_buffer_uptodate(bh);
903*4882a593Smuzhiyun 				}
904*4882a593Smuzhiyun 
905*4882a593Smuzhiyun 				clear_buffer_new(bh);
906*4882a593Smuzhiyun 				mark_buffer_dirty(bh);
907*4882a593Smuzhiyun 			}
908*4882a593Smuzhiyun 		}
909*4882a593Smuzhiyun 
910*4882a593Smuzhiyun 		block_start = block_end;
911*4882a593Smuzhiyun 		bh = bh->b_this_page;
912*4882a593Smuzhiyun 	} while (bh != head);
913*4882a593Smuzhiyun }
914*4882a593Smuzhiyun 
915*4882a593Smuzhiyun /*
916*4882a593Smuzhiyun  * Only called when we have a failure during allocating write to write
917*4882a593Smuzhiyun  * zero's to the newly allocated region.
918*4882a593Smuzhiyun  */
ocfs2_write_failure(struct inode * inode,struct ocfs2_write_ctxt * wc,loff_t user_pos,unsigned user_len)919*4882a593Smuzhiyun static void ocfs2_write_failure(struct inode *inode,
920*4882a593Smuzhiyun 				struct ocfs2_write_ctxt *wc,
921*4882a593Smuzhiyun 				loff_t user_pos, unsigned user_len)
922*4882a593Smuzhiyun {
923*4882a593Smuzhiyun 	int i;
924*4882a593Smuzhiyun 	unsigned from = user_pos & (PAGE_SIZE - 1),
925*4882a593Smuzhiyun 		to = user_pos + user_len;
926*4882a593Smuzhiyun 	struct page *tmppage;
927*4882a593Smuzhiyun 
928*4882a593Smuzhiyun 	if (wc->w_target_page)
929*4882a593Smuzhiyun 		ocfs2_zero_new_buffers(wc->w_target_page, from, to);
930*4882a593Smuzhiyun 
931*4882a593Smuzhiyun 	for(i = 0; i < wc->w_num_pages; i++) {
932*4882a593Smuzhiyun 		tmppage = wc->w_pages[i];
933*4882a593Smuzhiyun 
934*4882a593Smuzhiyun 		if (tmppage && page_has_buffers(tmppage)) {
935*4882a593Smuzhiyun 			if (ocfs2_should_order_data(inode))
936*4882a593Smuzhiyun 				ocfs2_jbd2_inode_add_write(wc->w_handle, inode,
937*4882a593Smuzhiyun 							   user_pos, user_len);
938*4882a593Smuzhiyun 
939*4882a593Smuzhiyun 			block_commit_write(tmppage, from, to);
940*4882a593Smuzhiyun 		}
941*4882a593Smuzhiyun 	}
942*4882a593Smuzhiyun }
943*4882a593Smuzhiyun 
ocfs2_prepare_page_for_write(struct inode * inode,u64 * p_blkno,struct ocfs2_write_ctxt * wc,struct page * page,u32 cpos,loff_t user_pos,unsigned user_len,int new)944*4882a593Smuzhiyun static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno,
945*4882a593Smuzhiyun 					struct ocfs2_write_ctxt *wc,
946*4882a593Smuzhiyun 					struct page *page, u32 cpos,
947*4882a593Smuzhiyun 					loff_t user_pos, unsigned user_len,
948*4882a593Smuzhiyun 					int new)
949*4882a593Smuzhiyun {
950*4882a593Smuzhiyun 	int ret;
951*4882a593Smuzhiyun 	unsigned int map_from = 0, map_to = 0;
952*4882a593Smuzhiyun 	unsigned int cluster_start, cluster_end;
953*4882a593Smuzhiyun 	unsigned int user_data_from = 0, user_data_to = 0;
954*4882a593Smuzhiyun 
955*4882a593Smuzhiyun 	ocfs2_figure_cluster_boundaries(OCFS2_SB(inode->i_sb), cpos,
956*4882a593Smuzhiyun 					&cluster_start, &cluster_end);
957*4882a593Smuzhiyun 
958*4882a593Smuzhiyun 	/* treat the write as new if the a hole/lseek spanned across
959*4882a593Smuzhiyun 	 * the page boundary.
960*4882a593Smuzhiyun 	 */
961*4882a593Smuzhiyun 	new = new | ((i_size_read(inode) <= page_offset(page)) &&
962*4882a593Smuzhiyun 			(page_offset(page) <= user_pos));
963*4882a593Smuzhiyun 
964*4882a593Smuzhiyun 	if (page == wc->w_target_page) {
965*4882a593Smuzhiyun 		map_from = user_pos & (PAGE_SIZE - 1);
966*4882a593Smuzhiyun 		map_to = map_from + user_len;
967*4882a593Smuzhiyun 
968*4882a593Smuzhiyun 		if (new)
969*4882a593Smuzhiyun 			ret = ocfs2_map_page_blocks(page, p_blkno, inode,
970*4882a593Smuzhiyun 						    cluster_start, cluster_end,
971*4882a593Smuzhiyun 						    new);
972*4882a593Smuzhiyun 		else
973*4882a593Smuzhiyun 			ret = ocfs2_map_page_blocks(page, p_blkno, inode,
974*4882a593Smuzhiyun 						    map_from, map_to, new);
975*4882a593Smuzhiyun 		if (ret) {
976*4882a593Smuzhiyun 			mlog_errno(ret);
977*4882a593Smuzhiyun 			goto out;
978*4882a593Smuzhiyun 		}
979*4882a593Smuzhiyun 
980*4882a593Smuzhiyun 		user_data_from = map_from;
981*4882a593Smuzhiyun 		user_data_to = map_to;
982*4882a593Smuzhiyun 		if (new) {
983*4882a593Smuzhiyun 			map_from = cluster_start;
984*4882a593Smuzhiyun 			map_to = cluster_end;
985*4882a593Smuzhiyun 		}
986*4882a593Smuzhiyun 	} else {
987*4882a593Smuzhiyun 		/*
988*4882a593Smuzhiyun 		 * If we haven't allocated the new page yet, we
989*4882a593Smuzhiyun 		 * shouldn't be writing it out without copying user
990*4882a593Smuzhiyun 		 * data. This is likely a math error from the caller.
991*4882a593Smuzhiyun 		 */
992*4882a593Smuzhiyun 		BUG_ON(!new);
993*4882a593Smuzhiyun 
994*4882a593Smuzhiyun 		map_from = cluster_start;
995*4882a593Smuzhiyun 		map_to = cluster_end;
996*4882a593Smuzhiyun 
997*4882a593Smuzhiyun 		ret = ocfs2_map_page_blocks(page, p_blkno, inode,
998*4882a593Smuzhiyun 					    cluster_start, cluster_end, new);
999*4882a593Smuzhiyun 		if (ret) {
1000*4882a593Smuzhiyun 			mlog_errno(ret);
1001*4882a593Smuzhiyun 			goto out;
1002*4882a593Smuzhiyun 		}
1003*4882a593Smuzhiyun 	}
1004*4882a593Smuzhiyun 
1005*4882a593Smuzhiyun 	/*
1006*4882a593Smuzhiyun 	 * Parts of newly allocated pages need to be zero'd.
1007*4882a593Smuzhiyun 	 *
1008*4882a593Smuzhiyun 	 * Above, we have also rewritten 'to' and 'from' - as far as
1009*4882a593Smuzhiyun 	 * the rest of the function is concerned, the entire cluster
1010*4882a593Smuzhiyun 	 * range inside of a page needs to be written.
1011*4882a593Smuzhiyun 	 *
1012*4882a593Smuzhiyun 	 * We can skip this if the page is up to date - it's already
1013*4882a593Smuzhiyun 	 * been zero'd from being read in as a hole.
1014*4882a593Smuzhiyun 	 */
1015*4882a593Smuzhiyun 	if (new && !PageUptodate(page))
1016*4882a593Smuzhiyun 		ocfs2_clear_page_regions(page, OCFS2_SB(inode->i_sb),
1017*4882a593Smuzhiyun 					 cpos, user_data_from, user_data_to);
1018*4882a593Smuzhiyun 
1019*4882a593Smuzhiyun 	flush_dcache_page(page);
1020*4882a593Smuzhiyun 
1021*4882a593Smuzhiyun out:
1022*4882a593Smuzhiyun 	return ret;
1023*4882a593Smuzhiyun }
1024*4882a593Smuzhiyun 
1025*4882a593Smuzhiyun /*
1026*4882a593Smuzhiyun  * This function will only grab one clusters worth of pages.
1027*4882a593Smuzhiyun  */
ocfs2_grab_pages_for_write(struct address_space * mapping,struct ocfs2_write_ctxt * wc,u32 cpos,loff_t user_pos,unsigned user_len,int new,struct page * mmap_page)1028*4882a593Smuzhiyun static int ocfs2_grab_pages_for_write(struct address_space *mapping,
1029*4882a593Smuzhiyun 				      struct ocfs2_write_ctxt *wc,
1030*4882a593Smuzhiyun 				      u32 cpos, loff_t user_pos,
1031*4882a593Smuzhiyun 				      unsigned user_len, int new,
1032*4882a593Smuzhiyun 				      struct page *mmap_page)
1033*4882a593Smuzhiyun {
1034*4882a593Smuzhiyun 	int ret = 0, i;
1035*4882a593Smuzhiyun 	unsigned long start, target_index, end_index, index;
1036*4882a593Smuzhiyun 	struct inode *inode = mapping->host;
1037*4882a593Smuzhiyun 	loff_t last_byte;
1038*4882a593Smuzhiyun 
1039*4882a593Smuzhiyun 	target_index = user_pos >> PAGE_SHIFT;
1040*4882a593Smuzhiyun 
1041*4882a593Smuzhiyun 	/*
1042*4882a593Smuzhiyun 	 * Figure out how many pages we'll be manipulating here. For
1043*4882a593Smuzhiyun 	 * non allocating write, we just change the one
1044*4882a593Smuzhiyun 	 * page. Otherwise, we'll need a whole clusters worth.  If we're
1045*4882a593Smuzhiyun 	 * writing past i_size, we only need enough pages to cover the
1046*4882a593Smuzhiyun 	 * last page of the write.
1047*4882a593Smuzhiyun 	 */
1048*4882a593Smuzhiyun 	if (new) {
1049*4882a593Smuzhiyun 		wc->w_num_pages = ocfs2_pages_per_cluster(inode->i_sb);
1050*4882a593Smuzhiyun 		start = ocfs2_align_clusters_to_page_index(inode->i_sb, cpos);
1051*4882a593Smuzhiyun 		/*
1052*4882a593Smuzhiyun 		 * We need the index *past* the last page we could possibly
1053*4882a593Smuzhiyun 		 * touch.  This is the page past the end of the write or
1054*4882a593Smuzhiyun 		 * i_size, whichever is greater.
1055*4882a593Smuzhiyun 		 */
1056*4882a593Smuzhiyun 		last_byte = max(user_pos + user_len, i_size_read(inode));
1057*4882a593Smuzhiyun 		BUG_ON(last_byte < 1);
1058*4882a593Smuzhiyun 		end_index = ((last_byte - 1) >> PAGE_SHIFT) + 1;
1059*4882a593Smuzhiyun 		if ((start + wc->w_num_pages) > end_index)
1060*4882a593Smuzhiyun 			wc->w_num_pages = end_index - start;
1061*4882a593Smuzhiyun 	} else {
1062*4882a593Smuzhiyun 		wc->w_num_pages = 1;
1063*4882a593Smuzhiyun 		start = target_index;
1064*4882a593Smuzhiyun 	}
1065*4882a593Smuzhiyun 	end_index = (user_pos + user_len - 1) >> PAGE_SHIFT;
1066*4882a593Smuzhiyun 
1067*4882a593Smuzhiyun 	for(i = 0; i < wc->w_num_pages; i++) {
1068*4882a593Smuzhiyun 		index = start + i;
1069*4882a593Smuzhiyun 
1070*4882a593Smuzhiyun 		if (index >= target_index && index <= end_index &&
1071*4882a593Smuzhiyun 		    wc->w_type == OCFS2_WRITE_MMAP) {
1072*4882a593Smuzhiyun 			/*
1073*4882a593Smuzhiyun 			 * ocfs2_pagemkwrite() is a little different
1074*4882a593Smuzhiyun 			 * and wants us to directly use the page
1075*4882a593Smuzhiyun 			 * passed in.
1076*4882a593Smuzhiyun 			 */
1077*4882a593Smuzhiyun 			lock_page(mmap_page);
1078*4882a593Smuzhiyun 
1079*4882a593Smuzhiyun 			/* Exit and let the caller retry */
1080*4882a593Smuzhiyun 			if (mmap_page->mapping != mapping) {
1081*4882a593Smuzhiyun 				WARN_ON(mmap_page->mapping);
1082*4882a593Smuzhiyun 				unlock_page(mmap_page);
1083*4882a593Smuzhiyun 				ret = -EAGAIN;
1084*4882a593Smuzhiyun 				goto out;
1085*4882a593Smuzhiyun 			}
1086*4882a593Smuzhiyun 
1087*4882a593Smuzhiyun 			get_page(mmap_page);
1088*4882a593Smuzhiyun 			wc->w_pages[i] = mmap_page;
1089*4882a593Smuzhiyun 			wc->w_target_locked = true;
1090*4882a593Smuzhiyun 		} else if (index >= target_index && index <= end_index &&
1091*4882a593Smuzhiyun 			   wc->w_type == OCFS2_WRITE_DIRECT) {
1092*4882a593Smuzhiyun 			/* Direct write has no mapping page. */
1093*4882a593Smuzhiyun 			wc->w_pages[i] = NULL;
1094*4882a593Smuzhiyun 			continue;
1095*4882a593Smuzhiyun 		} else {
1096*4882a593Smuzhiyun 			wc->w_pages[i] = find_or_create_page(mapping, index,
1097*4882a593Smuzhiyun 							     GFP_NOFS);
1098*4882a593Smuzhiyun 			if (!wc->w_pages[i]) {
1099*4882a593Smuzhiyun 				ret = -ENOMEM;
1100*4882a593Smuzhiyun 				mlog_errno(ret);
1101*4882a593Smuzhiyun 				goto out;
1102*4882a593Smuzhiyun 			}
1103*4882a593Smuzhiyun 		}
1104*4882a593Smuzhiyun 		wait_for_stable_page(wc->w_pages[i]);
1105*4882a593Smuzhiyun 
1106*4882a593Smuzhiyun 		if (index == target_index)
1107*4882a593Smuzhiyun 			wc->w_target_page = wc->w_pages[i];
1108*4882a593Smuzhiyun 	}
1109*4882a593Smuzhiyun out:
1110*4882a593Smuzhiyun 	if (ret)
1111*4882a593Smuzhiyun 		wc->w_target_locked = false;
1112*4882a593Smuzhiyun 	return ret;
1113*4882a593Smuzhiyun }
1114*4882a593Smuzhiyun 
1115*4882a593Smuzhiyun /*
1116*4882a593Smuzhiyun  * Prepare a single cluster for write one cluster into the file.
1117*4882a593Smuzhiyun  */
ocfs2_write_cluster(struct address_space * mapping,u32 * phys,unsigned int new,unsigned int clear_unwritten,unsigned int should_zero,struct ocfs2_alloc_context * data_ac,struct ocfs2_alloc_context * meta_ac,struct ocfs2_write_ctxt * wc,u32 cpos,loff_t user_pos,unsigned user_len)1118*4882a593Smuzhiyun static int ocfs2_write_cluster(struct address_space *mapping,
1119*4882a593Smuzhiyun 			       u32 *phys, unsigned int new,
1120*4882a593Smuzhiyun 			       unsigned int clear_unwritten,
1121*4882a593Smuzhiyun 			       unsigned int should_zero,
1122*4882a593Smuzhiyun 			       struct ocfs2_alloc_context *data_ac,
1123*4882a593Smuzhiyun 			       struct ocfs2_alloc_context *meta_ac,
1124*4882a593Smuzhiyun 			       struct ocfs2_write_ctxt *wc, u32 cpos,
1125*4882a593Smuzhiyun 			       loff_t user_pos, unsigned user_len)
1126*4882a593Smuzhiyun {
1127*4882a593Smuzhiyun 	int ret, i;
1128*4882a593Smuzhiyun 	u64 p_blkno;
1129*4882a593Smuzhiyun 	struct inode *inode = mapping->host;
1130*4882a593Smuzhiyun 	struct ocfs2_extent_tree et;
1131*4882a593Smuzhiyun 	int bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1);
1132*4882a593Smuzhiyun 
1133*4882a593Smuzhiyun 	if (new) {
1134*4882a593Smuzhiyun 		u32 tmp_pos;
1135*4882a593Smuzhiyun 
1136*4882a593Smuzhiyun 		/*
1137*4882a593Smuzhiyun 		 * This is safe to call with the page locks - it won't take
1138*4882a593Smuzhiyun 		 * any additional semaphores or cluster locks.
1139*4882a593Smuzhiyun 		 */
1140*4882a593Smuzhiyun 		tmp_pos = cpos;
1141*4882a593Smuzhiyun 		ret = ocfs2_add_inode_data(OCFS2_SB(inode->i_sb), inode,
1142*4882a593Smuzhiyun 					   &tmp_pos, 1, !clear_unwritten,
1143*4882a593Smuzhiyun 					   wc->w_di_bh, wc->w_handle,
1144*4882a593Smuzhiyun 					   data_ac, meta_ac, NULL);
1145*4882a593Smuzhiyun 		/*
1146*4882a593Smuzhiyun 		 * This shouldn't happen because we must have already
1147*4882a593Smuzhiyun 		 * calculated the correct meta data allocation required. The
1148*4882a593Smuzhiyun 		 * internal tree allocation code should know how to increase
1149*4882a593Smuzhiyun 		 * transaction credits itself.
1150*4882a593Smuzhiyun 		 *
1151*4882a593Smuzhiyun 		 * If need be, we could handle -EAGAIN for a
1152*4882a593Smuzhiyun 		 * RESTART_TRANS here.
1153*4882a593Smuzhiyun 		 */
1154*4882a593Smuzhiyun 		mlog_bug_on_msg(ret == -EAGAIN,
1155*4882a593Smuzhiyun 				"Inode %llu: EAGAIN return during allocation.\n",
1156*4882a593Smuzhiyun 				(unsigned long long)OCFS2_I(inode)->ip_blkno);
1157*4882a593Smuzhiyun 		if (ret < 0) {
1158*4882a593Smuzhiyun 			mlog_errno(ret);
1159*4882a593Smuzhiyun 			goto out;
1160*4882a593Smuzhiyun 		}
1161*4882a593Smuzhiyun 	} else if (clear_unwritten) {
1162*4882a593Smuzhiyun 		ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode),
1163*4882a593Smuzhiyun 					      wc->w_di_bh);
1164*4882a593Smuzhiyun 		ret = ocfs2_mark_extent_written(inode, &et,
1165*4882a593Smuzhiyun 						wc->w_handle, cpos, 1, *phys,
1166*4882a593Smuzhiyun 						meta_ac, &wc->w_dealloc);
1167*4882a593Smuzhiyun 		if (ret < 0) {
1168*4882a593Smuzhiyun 			mlog_errno(ret);
1169*4882a593Smuzhiyun 			goto out;
1170*4882a593Smuzhiyun 		}
1171*4882a593Smuzhiyun 	}
1172*4882a593Smuzhiyun 
1173*4882a593Smuzhiyun 	/*
1174*4882a593Smuzhiyun 	 * The only reason this should fail is due to an inability to
1175*4882a593Smuzhiyun 	 * find the extent added.
1176*4882a593Smuzhiyun 	 */
1177*4882a593Smuzhiyun 	ret = ocfs2_get_clusters(inode, cpos, phys, NULL, NULL);
1178*4882a593Smuzhiyun 	if (ret < 0) {
1179*4882a593Smuzhiyun 		mlog(ML_ERROR, "Get physical blkno failed for inode %llu, "
1180*4882a593Smuzhiyun 			    "at logical cluster %u",
1181*4882a593Smuzhiyun 			    (unsigned long long)OCFS2_I(inode)->ip_blkno, cpos);
1182*4882a593Smuzhiyun 		goto out;
1183*4882a593Smuzhiyun 	}
1184*4882a593Smuzhiyun 
1185*4882a593Smuzhiyun 	BUG_ON(*phys == 0);
1186*4882a593Smuzhiyun 
1187*4882a593Smuzhiyun 	p_blkno = ocfs2_clusters_to_blocks(inode->i_sb, *phys);
1188*4882a593Smuzhiyun 	if (!should_zero)
1189*4882a593Smuzhiyun 		p_blkno += (user_pos >> inode->i_sb->s_blocksize_bits) & (u64)(bpc - 1);
1190*4882a593Smuzhiyun 
1191*4882a593Smuzhiyun 	for(i = 0; i < wc->w_num_pages; i++) {
1192*4882a593Smuzhiyun 		int tmpret;
1193*4882a593Smuzhiyun 
1194*4882a593Smuzhiyun 		/* This is the direct io target page. */
1195*4882a593Smuzhiyun 		if (wc->w_pages[i] == NULL) {
1196*4882a593Smuzhiyun 			p_blkno++;
1197*4882a593Smuzhiyun 			continue;
1198*4882a593Smuzhiyun 		}
1199*4882a593Smuzhiyun 
1200*4882a593Smuzhiyun 		tmpret = ocfs2_prepare_page_for_write(inode, &p_blkno, wc,
1201*4882a593Smuzhiyun 						      wc->w_pages[i], cpos,
1202*4882a593Smuzhiyun 						      user_pos, user_len,
1203*4882a593Smuzhiyun 						      should_zero);
1204*4882a593Smuzhiyun 		if (tmpret) {
1205*4882a593Smuzhiyun 			mlog_errno(tmpret);
1206*4882a593Smuzhiyun 			if (ret == 0)
1207*4882a593Smuzhiyun 				ret = tmpret;
1208*4882a593Smuzhiyun 		}
1209*4882a593Smuzhiyun 	}
1210*4882a593Smuzhiyun 
1211*4882a593Smuzhiyun 	/*
1212*4882a593Smuzhiyun 	 * We only have cleanup to do in case of allocating write.
1213*4882a593Smuzhiyun 	 */
1214*4882a593Smuzhiyun 	if (ret && new)
1215*4882a593Smuzhiyun 		ocfs2_write_failure(inode, wc, user_pos, user_len);
1216*4882a593Smuzhiyun 
1217*4882a593Smuzhiyun out:
1218*4882a593Smuzhiyun 
1219*4882a593Smuzhiyun 	return ret;
1220*4882a593Smuzhiyun }
1221*4882a593Smuzhiyun 
ocfs2_write_cluster_by_desc(struct address_space * mapping,struct ocfs2_alloc_context * data_ac,struct ocfs2_alloc_context * meta_ac,struct ocfs2_write_ctxt * wc,loff_t pos,unsigned len)1222*4882a593Smuzhiyun static int ocfs2_write_cluster_by_desc(struct address_space *mapping,
1223*4882a593Smuzhiyun 				       struct ocfs2_alloc_context *data_ac,
1224*4882a593Smuzhiyun 				       struct ocfs2_alloc_context *meta_ac,
1225*4882a593Smuzhiyun 				       struct ocfs2_write_ctxt *wc,
1226*4882a593Smuzhiyun 				       loff_t pos, unsigned len)
1227*4882a593Smuzhiyun {
1228*4882a593Smuzhiyun 	int ret, i;
1229*4882a593Smuzhiyun 	loff_t cluster_off;
1230*4882a593Smuzhiyun 	unsigned int local_len = len;
1231*4882a593Smuzhiyun 	struct ocfs2_write_cluster_desc *desc;
1232*4882a593Smuzhiyun 	struct ocfs2_super *osb = OCFS2_SB(mapping->host->i_sb);
1233*4882a593Smuzhiyun 
1234*4882a593Smuzhiyun 	for (i = 0; i < wc->w_clen; i++) {
1235*4882a593Smuzhiyun 		desc = &wc->w_desc[i];
1236*4882a593Smuzhiyun 
1237*4882a593Smuzhiyun 		/*
1238*4882a593Smuzhiyun 		 * We have to make sure that the total write passed in
1239*4882a593Smuzhiyun 		 * doesn't extend past a single cluster.
1240*4882a593Smuzhiyun 		 */
1241*4882a593Smuzhiyun 		local_len = len;
1242*4882a593Smuzhiyun 		cluster_off = pos & (osb->s_clustersize - 1);
1243*4882a593Smuzhiyun 		if ((cluster_off + local_len) > osb->s_clustersize)
1244*4882a593Smuzhiyun 			local_len = osb->s_clustersize - cluster_off;
1245*4882a593Smuzhiyun 
1246*4882a593Smuzhiyun 		ret = ocfs2_write_cluster(mapping, &desc->c_phys,
1247*4882a593Smuzhiyun 					  desc->c_new,
1248*4882a593Smuzhiyun 					  desc->c_clear_unwritten,
1249*4882a593Smuzhiyun 					  desc->c_needs_zero,
1250*4882a593Smuzhiyun 					  data_ac, meta_ac,
1251*4882a593Smuzhiyun 					  wc, desc->c_cpos, pos, local_len);
1252*4882a593Smuzhiyun 		if (ret) {
1253*4882a593Smuzhiyun 			mlog_errno(ret);
1254*4882a593Smuzhiyun 			goto out;
1255*4882a593Smuzhiyun 		}
1256*4882a593Smuzhiyun 
1257*4882a593Smuzhiyun 		len -= local_len;
1258*4882a593Smuzhiyun 		pos += local_len;
1259*4882a593Smuzhiyun 	}
1260*4882a593Smuzhiyun 
1261*4882a593Smuzhiyun 	ret = 0;
1262*4882a593Smuzhiyun out:
1263*4882a593Smuzhiyun 	return ret;
1264*4882a593Smuzhiyun }
1265*4882a593Smuzhiyun 
1266*4882a593Smuzhiyun /*
1267*4882a593Smuzhiyun  * ocfs2_write_end() wants to know which parts of the target page it
1268*4882a593Smuzhiyun  * should complete the write on. It's easiest to compute them ahead of
1269*4882a593Smuzhiyun  * time when a more complete view of the write is available.
1270*4882a593Smuzhiyun  */
ocfs2_set_target_boundaries(struct ocfs2_super * osb,struct ocfs2_write_ctxt * wc,loff_t pos,unsigned len,int alloc)1271*4882a593Smuzhiyun static void ocfs2_set_target_boundaries(struct ocfs2_super *osb,
1272*4882a593Smuzhiyun 					struct ocfs2_write_ctxt *wc,
1273*4882a593Smuzhiyun 					loff_t pos, unsigned len, int alloc)
1274*4882a593Smuzhiyun {
1275*4882a593Smuzhiyun 	struct ocfs2_write_cluster_desc *desc;
1276*4882a593Smuzhiyun 
1277*4882a593Smuzhiyun 	wc->w_target_from = pos & (PAGE_SIZE - 1);
1278*4882a593Smuzhiyun 	wc->w_target_to = wc->w_target_from + len;
1279*4882a593Smuzhiyun 
1280*4882a593Smuzhiyun 	if (alloc == 0)
1281*4882a593Smuzhiyun 		return;
1282*4882a593Smuzhiyun 
1283*4882a593Smuzhiyun 	/*
1284*4882a593Smuzhiyun 	 * Allocating write - we may have different boundaries based
1285*4882a593Smuzhiyun 	 * on page size and cluster size.
1286*4882a593Smuzhiyun 	 *
1287*4882a593Smuzhiyun 	 * NOTE: We can no longer compute one value from the other as
1288*4882a593Smuzhiyun 	 * the actual write length and user provided length may be
1289*4882a593Smuzhiyun 	 * different.
1290*4882a593Smuzhiyun 	 */
1291*4882a593Smuzhiyun 
1292*4882a593Smuzhiyun 	if (wc->w_large_pages) {
1293*4882a593Smuzhiyun 		/*
1294*4882a593Smuzhiyun 		 * We only care about the 1st and last cluster within
1295*4882a593Smuzhiyun 		 * our range and whether they should be zero'd or not. Either
1296*4882a593Smuzhiyun 		 * value may be extended out to the start/end of a
1297*4882a593Smuzhiyun 		 * newly allocated cluster.
1298*4882a593Smuzhiyun 		 */
1299*4882a593Smuzhiyun 		desc = &wc->w_desc[0];
1300*4882a593Smuzhiyun 		if (desc->c_needs_zero)
1301*4882a593Smuzhiyun 			ocfs2_figure_cluster_boundaries(osb,
1302*4882a593Smuzhiyun 							desc->c_cpos,
1303*4882a593Smuzhiyun 							&wc->w_target_from,
1304*4882a593Smuzhiyun 							NULL);
1305*4882a593Smuzhiyun 
1306*4882a593Smuzhiyun 		desc = &wc->w_desc[wc->w_clen - 1];
1307*4882a593Smuzhiyun 		if (desc->c_needs_zero)
1308*4882a593Smuzhiyun 			ocfs2_figure_cluster_boundaries(osb,
1309*4882a593Smuzhiyun 							desc->c_cpos,
1310*4882a593Smuzhiyun 							NULL,
1311*4882a593Smuzhiyun 							&wc->w_target_to);
1312*4882a593Smuzhiyun 	} else {
1313*4882a593Smuzhiyun 		wc->w_target_from = 0;
1314*4882a593Smuzhiyun 		wc->w_target_to = PAGE_SIZE;
1315*4882a593Smuzhiyun 	}
1316*4882a593Smuzhiyun }
1317*4882a593Smuzhiyun 
1318*4882a593Smuzhiyun /*
1319*4882a593Smuzhiyun  * Check if this extent is marked UNWRITTEN by direct io. If so, we need not to
1320*4882a593Smuzhiyun  * do the zero work. And should not to clear UNWRITTEN since it will be cleared
1321*4882a593Smuzhiyun  * by the direct io procedure.
1322*4882a593Smuzhiyun  * If this is a new extent that allocated by direct io, we should mark it in
1323*4882a593Smuzhiyun  * the ip_unwritten_list.
1324*4882a593Smuzhiyun  */
ocfs2_unwritten_check(struct inode * inode,struct ocfs2_write_ctxt * wc,struct ocfs2_write_cluster_desc * desc)1325*4882a593Smuzhiyun static int ocfs2_unwritten_check(struct inode *inode,
1326*4882a593Smuzhiyun 				 struct ocfs2_write_ctxt *wc,
1327*4882a593Smuzhiyun 				 struct ocfs2_write_cluster_desc *desc)
1328*4882a593Smuzhiyun {
1329*4882a593Smuzhiyun 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
1330*4882a593Smuzhiyun 	struct ocfs2_unwritten_extent *ue = NULL, *new = NULL;
1331*4882a593Smuzhiyun 	int ret = 0;
1332*4882a593Smuzhiyun 
1333*4882a593Smuzhiyun 	if (!desc->c_needs_zero)
1334*4882a593Smuzhiyun 		return 0;
1335*4882a593Smuzhiyun 
1336*4882a593Smuzhiyun retry:
1337*4882a593Smuzhiyun 	spin_lock(&oi->ip_lock);
1338*4882a593Smuzhiyun 	/* Needs not to zero no metter buffer or direct. The one who is zero
1339*4882a593Smuzhiyun 	 * the cluster is doing zero. And he will clear unwritten after all
1340*4882a593Smuzhiyun 	 * cluster io finished. */
1341*4882a593Smuzhiyun 	list_for_each_entry(ue, &oi->ip_unwritten_list, ue_ip_node) {
1342*4882a593Smuzhiyun 		if (desc->c_cpos == ue->ue_cpos) {
1343*4882a593Smuzhiyun 			BUG_ON(desc->c_new);
1344*4882a593Smuzhiyun 			desc->c_needs_zero = 0;
1345*4882a593Smuzhiyun 			desc->c_clear_unwritten = 0;
1346*4882a593Smuzhiyun 			goto unlock;
1347*4882a593Smuzhiyun 		}
1348*4882a593Smuzhiyun 	}
1349*4882a593Smuzhiyun 
1350*4882a593Smuzhiyun 	if (wc->w_type != OCFS2_WRITE_DIRECT)
1351*4882a593Smuzhiyun 		goto unlock;
1352*4882a593Smuzhiyun 
1353*4882a593Smuzhiyun 	if (new == NULL) {
1354*4882a593Smuzhiyun 		spin_unlock(&oi->ip_lock);
1355*4882a593Smuzhiyun 		new = kmalloc(sizeof(struct ocfs2_unwritten_extent),
1356*4882a593Smuzhiyun 			     GFP_NOFS);
1357*4882a593Smuzhiyun 		if (new == NULL) {
1358*4882a593Smuzhiyun 			ret = -ENOMEM;
1359*4882a593Smuzhiyun 			goto out;
1360*4882a593Smuzhiyun 		}
1361*4882a593Smuzhiyun 		goto retry;
1362*4882a593Smuzhiyun 	}
1363*4882a593Smuzhiyun 	/* This direct write will doing zero. */
1364*4882a593Smuzhiyun 	new->ue_cpos = desc->c_cpos;
1365*4882a593Smuzhiyun 	new->ue_phys = desc->c_phys;
1366*4882a593Smuzhiyun 	desc->c_clear_unwritten = 0;
1367*4882a593Smuzhiyun 	list_add_tail(&new->ue_ip_node, &oi->ip_unwritten_list);
1368*4882a593Smuzhiyun 	list_add_tail(&new->ue_node, &wc->w_unwritten_list);
1369*4882a593Smuzhiyun 	wc->w_unwritten_count++;
1370*4882a593Smuzhiyun 	new = NULL;
1371*4882a593Smuzhiyun unlock:
1372*4882a593Smuzhiyun 	spin_unlock(&oi->ip_lock);
1373*4882a593Smuzhiyun out:
1374*4882a593Smuzhiyun 	kfree(new);
1375*4882a593Smuzhiyun 	return ret;
1376*4882a593Smuzhiyun }
1377*4882a593Smuzhiyun 
1378*4882a593Smuzhiyun /*
1379*4882a593Smuzhiyun  * Populate each single-cluster write descriptor in the write context
1380*4882a593Smuzhiyun  * with information about the i/o to be done.
1381*4882a593Smuzhiyun  *
1382*4882a593Smuzhiyun  * Returns the number of clusters that will have to be allocated, as
1383*4882a593Smuzhiyun  * well as a worst case estimate of the number of extent records that
1384*4882a593Smuzhiyun  * would have to be created during a write to an unwritten region.
1385*4882a593Smuzhiyun  */
ocfs2_populate_write_desc(struct inode * inode,struct ocfs2_write_ctxt * wc,unsigned int * clusters_to_alloc,unsigned int * extents_to_split)1386*4882a593Smuzhiyun static int ocfs2_populate_write_desc(struct inode *inode,
1387*4882a593Smuzhiyun 				     struct ocfs2_write_ctxt *wc,
1388*4882a593Smuzhiyun 				     unsigned int *clusters_to_alloc,
1389*4882a593Smuzhiyun 				     unsigned int *extents_to_split)
1390*4882a593Smuzhiyun {
1391*4882a593Smuzhiyun 	int ret;
1392*4882a593Smuzhiyun 	struct ocfs2_write_cluster_desc *desc;
1393*4882a593Smuzhiyun 	unsigned int num_clusters = 0;
1394*4882a593Smuzhiyun 	unsigned int ext_flags = 0;
1395*4882a593Smuzhiyun 	u32 phys = 0;
1396*4882a593Smuzhiyun 	int i;
1397*4882a593Smuzhiyun 
1398*4882a593Smuzhiyun 	*clusters_to_alloc = 0;
1399*4882a593Smuzhiyun 	*extents_to_split = 0;
1400*4882a593Smuzhiyun 
1401*4882a593Smuzhiyun 	for (i = 0; i < wc->w_clen; i++) {
1402*4882a593Smuzhiyun 		desc = &wc->w_desc[i];
1403*4882a593Smuzhiyun 		desc->c_cpos = wc->w_cpos + i;
1404*4882a593Smuzhiyun 
1405*4882a593Smuzhiyun 		if (num_clusters == 0) {
1406*4882a593Smuzhiyun 			/*
1407*4882a593Smuzhiyun 			 * Need to look up the next extent record.
1408*4882a593Smuzhiyun 			 */
1409*4882a593Smuzhiyun 			ret = ocfs2_get_clusters(inode, desc->c_cpos, &phys,
1410*4882a593Smuzhiyun 						 &num_clusters, &ext_flags);
1411*4882a593Smuzhiyun 			if (ret) {
1412*4882a593Smuzhiyun 				mlog_errno(ret);
1413*4882a593Smuzhiyun 				goto out;
1414*4882a593Smuzhiyun 			}
1415*4882a593Smuzhiyun 
1416*4882a593Smuzhiyun 			/* We should already CoW the refcountd extent. */
1417*4882a593Smuzhiyun 			BUG_ON(ext_flags & OCFS2_EXT_REFCOUNTED);
1418*4882a593Smuzhiyun 
1419*4882a593Smuzhiyun 			/*
1420*4882a593Smuzhiyun 			 * Assume worst case - that we're writing in
1421*4882a593Smuzhiyun 			 * the middle of the extent.
1422*4882a593Smuzhiyun 			 *
1423*4882a593Smuzhiyun 			 * We can assume that the write proceeds from
1424*4882a593Smuzhiyun 			 * left to right, in which case the extent
1425*4882a593Smuzhiyun 			 * insert code is smart enough to coalesce the
1426*4882a593Smuzhiyun 			 * next splits into the previous records created.
1427*4882a593Smuzhiyun 			 */
1428*4882a593Smuzhiyun 			if (ext_flags & OCFS2_EXT_UNWRITTEN)
1429*4882a593Smuzhiyun 				*extents_to_split = *extents_to_split + 2;
1430*4882a593Smuzhiyun 		} else if (phys) {
1431*4882a593Smuzhiyun 			/*
1432*4882a593Smuzhiyun 			 * Only increment phys if it doesn't describe
1433*4882a593Smuzhiyun 			 * a hole.
1434*4882a593Smuzhiyun 			 */
1435*4882a593Smuzhiyun 			phys++;
1436*4882a593Smuzhiyun 		}
1437*4882a593Smuzhiyun 
1438*4882a593Smuzhiyun 		/*
1439*4882a593Smuzhiyun 		 * If w_first_new_cpos is < UINT_MAX, we have a non-sparse
1440*4882a593Smuzhiyun 		 * file that got extended.  w_first_new_cpos tells us
1441*4882a593Smuzhiyun 		 * where the newly allocated clusters are so we can
1442*4882a593Smuzhiyun 		 * zero them.
1443*4882a593Smuzhiyun 		 */
1444*4882a593Smuzhiyun 		if (desc->c_cpos >= wc->w_first_new_cpos) {
1445*4882a593Smuzhiyun 			BUG_ON(phys == 0);
1446*4882a593Smuzhiyun 			desc->c_needs_zero = 1;
1447*4882a593Smuzhiyun 		}
1448*4882a593Smuzhiyun 
1449*4882a593Smuzhiyun 		desc->c_phys = phys;
1450*4882a593Smuzhiyun 		if (phys == 0) {
1451*4882a593Smuzhiyun 			desc->c_new = 1;
1452*4882a593Smuzhiyun 			desc->c_needs_zero = 1;
1453*4882a593Smuzhiyun 			desc->c_clear_unwritten = 1;
1454*4882a593Smuzhiyun 			*clusters_to_alloc = *clusters_to_alloc + 1;
1455*4882a593Smuzhiyun 		}
1456*4882a593Smuzhiyun 
1457*4882a593Smuzhiyun 		if (ext_flags & OCFS2_EXT_UNWRITTEN) {
1458*4882a593Smuzhiyun 			desc->c_clear_unwritten = 1;
1459*4882a593Smuzhiyun 			desc->c_needs_zero = 1;
1460*4882a593Smuzhiyun 		}
1461*4882a593Smuzhiyun 
1462*4882a593Smuzhiyun 		ret = ocfs2_unwritten_check(inode, wc, desc);
1463*4882a593Smuzhiyun 		if (ret) {
1464*4882a593Smuzhiyun 			mlog_errno(ret);
1465*4882a593Smuzhiyun 			goto out;
1466*4882a593Smuzhiyun 		}
1467*4882a593Smuzhiyun 
1468*4882a593Smuzhiyun 		num_clusters--;
1469*4882a593Smuzhiyun 	}
1470*4882a593Smuzhiyun 
1471*4882a593Smuzhiyun 	ret = 0;
1472*4882a593Smuzhiyun out:
1473*4882a593Smuzhiyun 	return ret;
1474*4882a593Smuzhiyun }
1475*4882a593Smuzhiyun 
ocfs2_write_begin_inline(struct address_space * mapping,struct inode * inode,struct ocfs2_write_ctxt * wc)1476*4882a593Smuzhiyun static int ocfs2_write_begin_inline(struct address_space *mapping,
1477*4882a593Smuzhiyun 				    struct inode *inode,
1478*4882a593Smuzhiyun 				    struct ocfs2_write_ctxt *wc)
1479*4882a593Smuzhiyun {
1480*4882a593Smuzhiyun 	int ret;
1481*4882a593Smuzhiyun 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1482*4882a593Smuzhiyun 	struct page *page;
1483*4882a593Smuzhiyun 	handle_t *handle;
1484*4882a593Smuzhiyun 	struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
1485*4882a593Smuzhiyun 
1486*4882a593Smuzhiyun 	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1487*4882a593Smuzhiyun 	if (IS_ERR(handle)) {
1488*4882a593Smuzhiyun 		ret = PTR_ERR(handle);
1489*4882a593Smuzhiyun 		mlog_errno(ret);
1490*4882a593Smuzhiyun 		goto out;
1491*4882a593Smuzhiyun 	}
1492*4882a593Smuzhiyun 
1493*4882a593Smuzhiyun 	page = find_or_create_page(mapping, 0, GFP_NOFS);
1494*4882a593Smuzhiyun 	if (!page) {
1495*4882a593Smuzhiyun 		ocfs2_commit_trans(osb, handle);
1496*4882a593Smuzhiyun 		ret = -ENOMEM;
1497*4882a593Smuzhiyun 		mlog_errno(ret);
1498*4882a593Smuzhiyun 		goto out;
1499*4882a593Smuzhiyun 	}
1500*4882a593Smuzhiyun 	/*
1501*4882a593Smuzhiyun 	 * If we don't set w_num_pages then this page won't get unlocked
1502*4882a593Smuzhiyun 	 * and freed on cleanup of the write context.
1503*4882a593Smuzhiyun 	 */
1504*4882a593Smuzhiyun 	wc->w_pages[0] = wc->w_target_page = page;
1505*4882a593Smuzhiyun 	wc->w_num_pages = 1;
1506*4882a593Smuzhiyun 
1507*4882a593Smuzhiyun 	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh,
1508*4882a593Smuzhiyun 				      OCFS2_JOURNAL_ACCESS_WRITE);
1509*4882a593Smuzhiyun 	if (ret) {
1510*4882a593Smuzhiyun 		ocfs2_commit_trans(osb, handle);
1511*4882a593Smuzhiyun 
1512*4882a593Smuzhiyun 		mlog_errno(ret);
1513*4882a593Smuzhiyun 		goto out;
1514*4882a593Smuzhiyun 	}
1515*4882a593Smuzhiyun 
1516*4882a593Smuzhiyun 	if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL))
1517*4882a593Smuzhiyun 		ocfs2_set_inode_data_inline(inode, di);
1518*4882a593Smuzhiyun 
1519*4882a593Smuzhiyun 	if (!PageUptodate(page)) {
1520*4882a593Smuzhiyun 		ret = ocfs2_read_inline_data(inode, page, wc->w_di_bh);
1521*4882a593Smuzhiyun 		if (ret) {
1522*4882a593Smuzhiyun 			ocfs2_commit_trans(osb, handle);
1523*4882a593Smuzhiyun 
1524*4882a593Smuzhiyun 			goto out;
1525*4882a593Smuzhiyun 		}
1526*4882a593Smuzhiyun 	}
1527*4882a593Smuzhiyun 
1528*4882a593Smuzhiyun 	wc->w_handle = handle;
1529*4882a593Smuzhiyun out:
1530*4882a593Smuzhiyun 	return ret;
1531*4882a593Smuzhiyun }
1532*4882a593Smuzhiyun 
ocfs2_size_fits_inline_data(struct buffer_head * di_bh,u64 new_size)1533*4882a593Smuzhiyun int ocfs2_size_fits_inline_data(struct buffer_head *di_bh, u64 new_size)
1534*4882a593Smuzhiyun {
1535*4882a593Smuzhiyun 	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
1536*4882a593Smuzhiyun 
1537*4882a593Smuzhiyun 	if (new_size <= le16_to_cpu(di->id2.i_data.id_count))
1538*4882a593Smuzhiyun 		return 1;
1539*4882a593Smuzhiyun 	return 0;
1540*4882a593Smuzhiyun }
1541*4882a593Smuzhiyun 
ocfs2_try_to_write_inline_data(struct address_space * mapping,struct inode * inode,loff_t pos,unsigned len,struct page * mmap_page,struct ocfs2_write_ctxt * wc)1542*4882a593Smuzhiyun static int ocfs2_try_to_write_inline_data(struct address_space *mapping,
1543*4882a593Smuzhiyun 					  struct inode *inode, loff_t pos,
1544*4882a593Smuzhiyun 					  unsigned len, struct page *mmap_page,
1545*4882a593Smuzhiyun 					  struct ocfs2_write_ctxt *wc)
1546*4882a593Smuzhiyun {
1547*4882a593Smuzhiyun 	int ret, written = 0;
1548*4882a593Smuzhiyun 	loff_t end = pos + len;
1549*4882a593Smuzhiyun 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
1550*4882a593Smuzhiyun 	struct ocfs2_dinode *di = NULL;
1551*4882a593Smuzhiyun 
1552*4882a593Smuzhiyun 	trace_ocfs2_try_to_write_inline_data((unsigned long long)oi->ip_blkno,
1553*4882a593Smuzhiyun 					     len, (unsigned long long)pos,
1554*4882a593Smuzhiyun 					     oi->ip_dyn_features);
1555*4882a593Smuzhiyun 
1556*4882a593Smuzhiyun 	/*
1557*4882a593Smuzhiyun 	 * Handle inodes which already have inline data 1st.
1558*4882a593Smuzhiyun 	 */
1559*4882a593Smuzhiyun 	if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1560*4882a593Smuzhiyun 		if (mmap_page == NULL &&
1561*4882a593Smuzhiyun 		    ocfs2_size_fits_inline_data(wc->w_di_bh, end))
1562*4882a593Smuzhiyun 			goto do_inline_write;
1563*4882a593Smuzhiyun 
1564*4882a593Smuzhiyun 		/*
1565*4882a593Smuzhiyun 		 * The write won't fit - we have to give this inode an
1566*4882a593Smuzhiyun 		 * inline extent list now.
1567*4882a593Smuzhiyun 		 */
1568*4882a593Smuzhiyun 		ret = ocfs2_convert_inline_data_to_extents(inode, wc->w_di_bh);
1569*4882a593Smuzhiyun 		if (ret)
1570*4882a593Smuzhiyun 			mlog_errno(ret);
1571*4882a593Smuzhiyun 		goto out;
1572*4882a593Smuzhiyun 	}
1573*4882a593Smuzhiyun 
1574*4882a593Smuzhiyun 	/*
1575*4882a593Smuzhiyun 	 * Check whether the inode can accept inline data.
1576*4882a593Smuzhiyun 	 */
1577*4882a593Smuzhiyun 	if (oi->ip_clusters != 0 || i_size_read(inode) != 0)
1578*4882a593Smuzhiyun 		return 0;
1579*4882a593Smuzhiyun 
1580*4882a593Smuzhiyun 	/*
1581*4882a593Smuzhiyun 	 * Check whether the write can fit.
1582*4882a593Smuzhiyun 	 */
1583*4882a593Smuzhiyun 	di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
1584*4882a593Smuzhiyun 	if (mmap_page ||
1585*4882a593Smuzhiyun 	    end > ocfs2_max_inline_data_with_xattr(inode->i_sb, di))
1586*4882a593Smuzhiyun 		return 0;
1587*4882a593Smuzhiyun 
1588*4882a593Smuzhiyun do_inline_write:
1589*4882a593Smuzhiyun 	ret = ocfs2_write_begin_inline(mapping, inode, wc);
1590*4882a593Smuzhiyun 	if (ret) {
1591*4882a593Smuzhiyun 		mlog_errno(ret);
1592*4882a593Smuzhiyun 		goto out;
1593*4882a593Smuzhiyun 	}
1594*4882a593Smuzhiyun 
1595*4882a593Smuzhiyun 	/*
1596*4882a593Smuzhiyun 	 * This signals to the caller that the data can be written
1597*4882a593Smuzhiyun 	 * inline.
1598*4882a593Smuzhiyun 	 */
1599*4882a593Smuzhiyun 	written = 1;
1600*4882a593Smuzhiyun out:
1601*4882a593Smuzhiyun 	return written ? written : ret;
1602*4882a593Smuzhiyun }
1603*4882a593Smuzhiyun 
1604*4882a593Smuzhiyun /*
1605*4882a593Smuzhiyun  * This function only does anything for file systems which can't
1606*4882a593Smuzhiyun  * handle sparse files.
1607*4882a593Smuzhiyun  *
1608*4882a593Smuzhiyun  * What we want to do here is fill in any hole between the current end
1609*4882a593Smuzhiyun  * of allocation and the end of our write. That way the rest of the
1610*4882a593Smuzhiyun  * write path can treat it as an non-allocating write, which has no
1611*4882a593Smuzhiyun  * special case code for sparse/nonsparse files.
1612*4882a593Smuzhiyun  */
ocfs2_expand_nonsparse_inode(struct inode * inode,struct buffer_head * di_bh,loff_t pos,unsigned len,struct ocfs2_write_ctxt * wc)1613*4882a593Smuzhiyun static int ocfs2_expand_nonsparse_inode(struct inode *inode,
1614*4882a593Smuzhiyun 					struct buffer_head *di_bh,
1615*4882a593Smuzhiyun 					loff_t pos, unsigned len,
1616*4882a593Smuzhiyun 					struct ocfs2_write_ctxt *wc)
1617*4882a593Smuzhiyun {
1618*4882a593Smuzhiyun 	int ret;
1619*4882a593Smuzhiyun 	loff_t newsize = pos + len;
1620*4882a593Smuzhiyun 
1621*4882a593Smuzhiyun 	BUG_ON(ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)));
1622*4882a593Smuzhiyun 
1623*4882a593Smuzhiyun 	if (newsize <= i_size_read(inode))
1624*4882a593Smuzhiyun 		return 0;
1625*4882a593Smuzhiyun 
1626*4882a593Smuzhiyun 	ret = ocfs2_extend_no_holes(inode, di_bh, newsize, pos);
1627*4882a593Smuzhiyun 	if (ret)
1628*4882a593Smuzhiyun 		mlog_errno(ret);
1629*4882a593Smuzhiyun 
1630*4882a593Smuzhiyun 	/* There is no wc if this is call from direct. */
1631*4882a593Smuzhiyun 	if (wc)
1632*4882a593Smuzhiyun 		wc->w_first_new_cpos =
1633*4882a593Smuzhiyun 			ocfs2_clusters_for_bytes(inode->i_sb, i_size_read(inode));
1634*4882a593Smuzhiyun 
1635*4882a593Smuzhiyun 	return ret;
1636*4882a593Smuzhiyun }
1637*4882a593Smuzhiyun 
ocfs2_zero_tail(struct inode * inode,struct buffer_head * di_bh,loff_t pos)1638*4882a593Smuzhiyun static int ocfs2_zero_tail(struct inode *inode, struct buffer_head *di_bh,
1639*4882a593Smuzhiyun 			   loff_t pos)
1640*4882a593Smuzhiyun {
1641*4882a593Smuzhiyun 	int ret = 0;
1642*4882a593Smuzhiyun 
1643*4882a593Smuzhiyun 	BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)));
1644*4882a593Smuzhiyun 	if (pos > i_size_read(inode))
1645*4882a593Smuzhiyun 		ret = ocfs2_zero_extend(inode, di_bh, pos);
1646*4882a593Smuzhiyun 
1647*4882a593Smuzhiyun 	return ret;
1648*4882a593Smuzhiyun }
1649*4882a593Smuzhiyun 
ocfs2_write_begin_nolock(struct address_space * mapping,loff_t pos,unsigned len,ocfs2_write_type_t type,struct page ** pagep,void ** fsdata,struct buffer_head * di_bh,struct page * mmap_page)1650*4882a593Smuzhiyun int ocfs2_write_begin_nolock(struct address_space *mapping,
1651*4882a593Smuzhiyun 			     loff_t pos, unsigned len, ocfs2_write_type_t type,
1652*4882a593Smuzhiyun 			     struct page **pagep, void **fsdata,
1653*4882a593Smuzhiyun 			     struct buffer_head *di_bh, struct page *mmap_page)
1654*4882a593Smuzhiyun {
1655*4882a593Smuzhiyun 	int ret, cluster_of_pages, credits = OCFS2_INODE_UPDATE_CREDITS;
1656*4882a593Smuzhiyun 	unsigned int clusters_to_alloc, extents_to_split, clusters_need = 0;
1657*4882a593Smuzhiyun 	struct ocfs2_write_ctxt *wc;
1658*4882a593Smuzhiyun 	struct inode *inode = mapping->host;
1659*4882a593Smuzhiyun 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1660*4882a593Smuzhiyun 	struct ocfs2_dinode *di;
1661*4882a593Smuzhiyun 	struct ocfs2_alloc_context *data_ac = NULL;
1662*4882a593Smuzhiyun 	struct ocfs2_alloc_context *meta_ac = NULL;
1663*4882a593Smuzhiyun 	handle_t *handle;
1664*4882a593Smuzhiyun 	struct ocfs2_extent_tree et;
1665*4882a593Smuzhiyun 	int try_free = 1, ret1;
1666*4882a593Smuzhiyun 
1667*4882a593Smuzhiyun try_again:
1668*4882a593Smuzhiyun 	ret = ocfs2_alloc_write_ctxt(&wc, osb, pos, len, type, di_bh);
1669*4882a593Smuzhiyun 	if (ret) {
1670*4882a593Smuzhiyun 		mlog_errno(ret);
1671*4882a593Smuzhiyun 		return ret;
1672*4882a593Smuzhiyun 	}
1673*4882a593Smuzhiyun 
1674*4882a593Smuzhiyun 	if (ocfs2_supports_inline_data(osb)) {
1675*4882a593Smuzhiyun 		ret = ocfs2_try_to_write_inline_data(mapping, inode, pos, len,
1676*4882a593Smuzhiyun 						     mmap_page, wc);
1677*4882a593Smuzhiyun 		if (ret == 1) {
1678*4882a593Smuzhiyun 			ret = 0;
1679*4882a593Smuzhiyun 			goto success;
1680*4882a593Smuzhiyun 		}
1681*4882a593Smuzhiyun 		if (ret < 0) {
1682*4882a593Smuzhiyun 			mlog_errno(ret);
1683*4882a593Smuzhiyun 			goto out;
1684*4882a593Smuzhiyun 		}
1685*4882a593Smuzhiyun 	}
1686*4882a593Smuzhiyun 
1687*4882a593Smuzhiyun 	/* Direct io change i_size late, should not zero tail here. */
1688*4882a593Smuzhiyun 	if (type != OCFS2_WRITE_DIRECT) {
1689*4882a593Smuzhiyun 		if (ocfs2_sparse_alloc(osb))
1690*4882a593Smuzhiyun 			ret = ocfs2_zero_tail(inode, di_bh, pos);
1691*4882a593Smuzhiyun 		else
1692*4882a593Smuzhiyun 			ret = ocfs2_expand_nonsparse_inode(inode, di_bh, pos,
1693*4882a593Smuzhiyun 							   len, wc);
1694*4882a593Smuzhiyun 		if (ret) {
1695*4882a593Smuzhiyun 			mlog_errno(ret);
1696*4882a593Smuzhiyun 			goto out;
1697*4882a593Smuzhiyun 		}
1698*4882a593Smuzhiyun 	}
1699*4882a593Smuzhiyun 
1700*4882a593Smuzhiyun 	ret = ocfs2_check_range_for_refcount(inode, pos, len);
1701*4882a593Smuzhiyun 	if (ret < 0) {
1702*4882a593Smuzhiyun 		mlog_errno(ret);
1703*4882a593Smuzhiyun 		goto out;
1704*4882a593Smuzhiyun 	} else if (ret == 1) {
1705*4882a593Smuzhiyun 		clusters_need = wc->w_clen;
1706*4882a593Smuzhiyun 		ret = ocfs2_refcount_cow(inode, di_bh,
1707*4882a593Smuzhiyun 					 wc->w_cpos, wc->w_clen, UINT_MAX);
1708*4882a593Smuzhiyun 		if (ret) {
1709*4882a593Smuzhiyun 			mlog_errno(ret);
1710*4882a593Smuzhiyun 			goto out;
1711*4882a593Smuzhiyun 		}
1712*4882a593Smuzhiyun 	}
1713*4882a593Smuzhiyun 
1714*4882a593Smuzhiyun 	ret = ocfs2_populate_write_desc(inode, wc, &clusters_to_alloc,
1715*4882a593Smuzhiyun 					&extents_to_split);
1716*4882a593Smuzhiyun 	if (ret) {
1717*4882a593Smuzhiyun 		mlog_errno(ret);
1718*4882a593Smuzhiyun 		goto out;
1719*4882a593Smuzhiyun 	}
1720*4882a593Smuzhiyun 	clusters_need += clusters_to_alloc;
1721*4882a593Smuzhiyun 
1722*4882a593Smuzhiyun 	di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
1723*4882a593Smuzhiyun 
1724*4882a593Smuzhiyun 	trace_ocfs2_write_begin_nolock(
1725*4882a593Smuzhiyun 			(unsigned long long)OCFS2_I(inode)->ip_blkno,
1726*4882a593Smuzhiyun 			(long long)i_size_read(inode),
1727*4882a593Smuzhiyun 			le32_to_cpu(di->i_clusters),
1728*4882a593Smuzhiyun 			pos, len, type, mmap_page,
1729*4882a593Smuzhiyun 			clusters_to_alloc, extents_to_split);
1730*4882a593Smuzhiyun 
1731*4882a593Smuzhiyun 	/*
1732*4882a593Smuzhiyun 	 * We set w_target_from, w_target_to here so that
1733*4882a593Smuzhiyun 	 * ocfs2_write_end() knows which range in the target page to
1734*4882a593Smuzhiyun 	 * write out. An allocation requires that we write the entire
1735*4882a593Smuzhiyun 	 * cluster range.
1736*4882a593Smuzhiyun 	 */
1737*4882a593Smuzhiyun 	if (clusters_to_alloc || extents_to_split) {
1738*4882a593Smuzhiyun 		/*
1739*4882a593Smuzhiyun 		 * XXX: We are stretching the limits of
1740*4882a593Smuzhiyun 		 * ocfs2_lock_allocators(). It greatly over-estimates
1741*4882a593Smuzhiyun 		 * the work to be done.
1742*4882a593Smuzhiyun 		 */
1743*4882a593Smuzhiyun 		ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode),
1744*4882a593Smuzhiyun 					      wc->w_di_bh);
1745*4882a593Smuzhiyun 		ret = ocfs2_lock_allocators(inode, &et,
1746*4882a593Smuzhiyun 					    clusters_to_alloc, extents_to_split,
1747*4882a593Smuzhiyun 					    &data_ac, &meta_ac);
1748*4882a593Smuzhiyun 		if (ret) {
1749*4882a593Smuzhiyun 			mlog_errno(ret);
1750*4882a593Smuzhiyun 			goto out;
1751*4882a593Smuzhiyun 		}
1752*4882a593Smuzhiyun 
1753*4882a593Smuzhiyun 		if (data_ac)
1754*4882a593Smuzhiyun 			data_ac->ac_resv = &OCFS2_I(inode)->ip_la_data_resv;
1755*4882a593Smuzhiyun 
1756*4882a593Smuzhiyun 		credits = ocfs2_calc_extend_credits(inode->i_sb,
1757*4882a593Smuzhiyun 						    &di->id2.i_list);
1758*4882a593Smuzhiyun 	} else if (type == OCFS2_WRITE_DIRECT)
1759*4882a593Smuzhiyun 		/* direct write needs not to start trans if no extents alloc. */
1760*4882a593Smuzhiyun 		goto success;
1761*4882a593Smuzhiyun 
1762*4882a593Smuzhiyun 	/*
1763*4882a593Smuzhiyun 	 * We have to zero sparse allocated clusters, unwritten extent clusters,
1764*4882a593Smuzhiyun 	 * and non-sparse clusters we just extended.  For non-sparse writes,
1765*4882a593Smuzhiyun 	 * we know zeros will only be needed in the first and/or last cluster.
1766*4882a593Smuzhiyun 	 */
1767*4882a593Smuzhiyun 	if (wc->w_clen && (wc->w_desc[0].c_needs_zero ||
1768*4882a593Smuzhiyun 			   wc->w_desc[wc->w_clen - 1].c_needs_zero))
1769*4882a593Smuzhiyun 		cluster_of_pages = 1;
1770*4882a593Smuzhiyun 	else
1771*4882a593Smuzhiyun 		cluster_of_pages = 0;
1772*4882a593Smuzhiyun 
1773*4882a593Smuzhiyun 	ocfs2_set_target_boundaries(osb, wc, pos, len, cluster_of_pages);
1774*4882a593Smuzhiyun 
1775*4882a593Smuzhiyun 	handle = ocfs2_start_trans(osb, credits);
1776*4882a593Smuzhiyun 	if (IS_ERR(handle)) {
1777*4882a593Smuzhiyun 		ret = PTR_ERR(handle);
1778*4882a593Smuzhiyun 		mlog_errno(ret);
1779*4882a593Smuzhiyun 		goto out;
1780*4882a593Smuzhiyun 	}
1781*4882a593Smuzhiyun 
1782*4882a593Smuzhiyun 	wc->w_handle = handle;
1783*4882a593Smuzhiyun 
1784*4882a593Smuzhiyun 	if (clusters_to_alloc) {
1785*4882a593Smuzhiyun 		ret = dquot_alloc_space_nodirty(inode,
1786*4882a593Smuzhiyun 			ocfs2_clusters_to_bytes(osb->sb, clusters_to_alloc));
1787*4882a593Smuzhiyun 		if (ret)
1788*4882a593Smuzhiyun 			goto out_commit;
1789*4882a593Smuzhiyun 	}
1790*4882a593Smuzhiyun 
1791*4882a593Smuzhiyun 	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh,
1792*4882a593Smuzhiyun 				      OCFS2_JOURNAL_ACCESS_WRITE);
1793*4882a593Smuzhiyun 	if (ret) {
1794*4882a593Smuzhiyun 		mlog_errno(ret);
1795*4882a593Smuzhiyun 		goto out_quota;
1796*4882a593Smuzhiyun 	}
1797*4882a593Smuzhiyun 
1798*4882a593Smuzhiyun 	/*
1799*4882a593Smuzhiyun 	 * Fill our page array first. That way we've grabbed enough so
1800*4882a593Smuzhiyun 	 * that we can zero and flush if we error after adding the
1801*4882a593Smuzhiyun 	 * extent.
1802*4882a593Smuzhiyun 	 */
1803*4882a593Smuzhiyun 	ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos, len,
1804*4882a593Smuzhiyun 					 cluster_of_pages, mmap_page);
1805*4882a593Smuzhiyun 	if (ret && ret != -EAGAIN) {
1806*4882a593Smuzhiyun 		mlog_errno(ret);
1807*4882a593Smuzhiyun 		goto out_quota;
1808*4882a593Smuzhiyun 	}
1809*4882a593Smuzhiyun 
1810*4882a593Smuzhiyun 	/*
1811*4882a593Smuzhiyun 	 * ocfs2_grab_pages_for_write() returns -EAGAIN if it could not lock
1812*4882a593Smuzhiyun 	 * the target page. In this case, we exit with no error and no target
1813*4882a593Smuzhiyun 	 * page. This will trigger the caller, page_mkwrite(), to re-try
1814*4882a593Smuzhiyun 	 * the operation.
1815*4882a593Smuzhiyun 	 */
1816*4882a593Smuzhiyun 	if (ret == -EAGAIN) {
1817*4882a593Smuzhiyun 		BUG_ON(wc->w_target_page);
1818*4882a593Smuzhiyun 		ret = 0;
1819*4882a593Smuzhiyun 		goto out_quota;
1820*4882a593Smuzhiyun 	}
1821*4882a593Smuzhiyun 
1822*4882a593Smuzhiyun 	ret = ocfs2_write_cluster_by_desc(mapping, data_ac, meta_ac, wc, pos,
1823*4882a593Smuzhiyun 					  len);
1824*4882a593Smuzhiyun 	if (ret) {
1825*4882a593Smuzhiyun 		mlog_errno(ret);
1826*4882a593Smuzhiyun 		goto out_quota;
1827*4882a593Smuzhiyun 	}
1828*4882a593Smuzhiyun 
1829*4882a593Smuzhiyun 	if (data_ac)
1830*4882a593Smuzhiyun 		ocfs2_free_alloc_context(data_ac);
1831*4882a593Smuzhiyun 	if (meta_ac)
1832*4882a593Smuzhiyun 		ocfs2_free_alloc_context(meta_ac);
1833*4882a593Smuzhiyun 
1834*4882a593Smuzhiyun success:
1835*4882a593Smuzhiyun 	if (pagep)
1836*4882a593Smuzhiyun 		*pagep = wc->w_target_page;
1837*4882a593Smuzhiyun 	*fsdata = wc;
1838*4882a593Smuzhiyun 	return 0;
1839*4882a593Smuzhiyun out_quota:
1840*4882a593Smuzhiyun 	if (clusters_to_alloc)
1841*4882a593Smuzhiyun 		dquot_free_space(inode,
1842*4882a593Smuzhiyun 			  ocfs2_clusters_to_bytes(osb->sb, clusters_to_alloc));
1843*4882a593Smuzhiyun out_commit:
1844*4882a593Smuzhiyun 	ocfs2_commit_trans(osb, handle);
1845*4882a593Smuzhiyun 
1846*4882a593Smuzhiyun out:
1847*4882a593Smuzhiyun 	/*
1848*4882a593Smuzhiyun 	 * The mmapped page won't be unlocked in ocfs2_free_write_ctxt(),
1849*4882a593Smuzhiyun 	 * even in case of error here like ENOSPC and ENOMEM. So, we need
1850*4882a593Smuzhiyun 	 * to unlock the target page manually to prevent deadlocks when
1851*4882a593Smuzhiyun 	 * retrying again on ENOSPC, or when returning non-VM_FAULT_LOCKED
1852*4882a593Smuzhiyun 	 * to VM code.
1853*4882a593Smuzhiyun 	 */
1854*4882a593Smuzhiyun 	if (wc->w_target_locked)
1855*4882a593Smuzhiyun 		unlock_page(mmap_page);
1856*4882a593Smuzhiyun 
1857*4882a593Smuzhiyun 	ocfs2_free_write_ctxt(inode, wc);
1858*4882a593Smuzhiyun 
1859*4882a593Smuzhiyun 	if (data_ac) {
1860*4882a593Smuzhiyun 		ocfs2_free_alloc_context(data_ac);
1861*4882a593Smuzhiyun 		data_ac = NULL;
1862*4882a593Smuzhiyun 	}
1863*4882a593Smuzhiyun 	if (meta_ac) {
1864*4882a593Smuzhiyun 		ocfs2_free_alloc_context(meta_ac);
1865*4882a593Smuzhiyun 		meta_ac = NULL;
1866*4882a593Smuzhiyun 	}
1867*4882a593Smuzhiyun 
1868*4882a593Smuzhiyun 	if (ret == -ENOSPC && try_free) {
1869*4882a593Smuzhiyun 		/*
1870*4882a593Smuzhiyun 		 * Try to free some truncate log so that we can have enough
1871*4882a593Smuzhiyun 		 * clusters to allocate.
1872*4882a593Smuzhiyun 		 */
1873*4882a593Smuzhiyun 		try_free = 0;
1874*4882a593Smuzhiyun 
1875*4882a593Smuzhiyun 		ret1 = ocfs2_try_to_free_truncate_log(osb, clusters_need);
1876*4882a593Smuzhiyun 		if (ret1 == 1)
1877*4882a593Smuzhiyun 			goto try_again;
1878*4882a593Smuzhiyun 
1879*4882a593Smuzhiyun 		if (ret1 < 0)
1880*4882a593Smuzhiyun 			mlog_errno(ret1);
1881*4882a593Smuzhiyun 	}
1882*4882a593Smuzhiyun 
1883*4882a593Smuzhiyun 	return ret;
1884*4882a593Smuzhiyun }
1885*4882a593Smuzhiyun 
ocfs2_write_begin(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned flags,struct page ** pagep,void ** fsdata)1886*4882a593Smuzhiyun static int ocfs2_write_begin(struct file *file, struct address_space *mapping,
1887*4882a593Smuzhiyun 			     loff_t pos, unsigned len, unsigned flags,
1888*4882a593Smuzhiyun 			     struct page **pagep, void **fsdata)
1889*4882a593Smuzhiyun {
1890*4882a593Smuzhiyun 	int ret;
1891*4882a593Smuzhiyun 	struct buffer_head *di_bh = NULL;
1892*4882a593Smuzhiyun 	struct inode *inode = mapping->host;
1893*4882a593Smuzhiyun 
1894*4882a593Smuzhiyun 	ret = ocfs2_inode_lock(inode, &di_bh, 1);
1895*4882a593Smuzhiyun 	if (ret) {
1896*4882a593Smuzhiyun 		mlog_errno(ret);
1897*4882a593Smuzhiyun 		return ret;
1898*4882a593Smuzhiyun 	}
1899*4882a593Smuzhiyun 
1900*4882a593Smuzhiyun 	/*
1901*4882a593Smuzhiyun 	 * Take alloc sem here to prevent concurrent lookups. That way
1902*4882a593Smuzhiyun 	 * the mapping, zeroing and tree manipulation within
1903*4882a593Smuzhiyun 	 * ocfs2_write() will be safe against ->readpage(). This
1904*4882a593Smuzhiyun 	 * should also serve to lock out allocation from a shared
1905*4882a593Smuzhiyun 	 * writeable region.
1906*4882a593Smuzhiyun 	 */
1907*4882a593Smuzhiyun 	down_write(&OCFS2_I(inode)->ip_alloc_sem);
1908*4882a593Smuzhiyun 
1909*4882a593Smuzhiyun 	ret = ocfs2_write_begin_nolock(mapping, pos, len, OCFS2_WRITE_BUFFER,
1910*4882a593Smuzhiyun 				       pagep, fsdata, di_bh, NULL);
1911*4882a593Smuzhiyun 	if (ret) {
1912*4882a593Smuzhiyun 		mlog_errno(ret);
1913*4882a593Smuzhiyun 		goto out_fail;
1914*4882a593Smuzhiyun 	}
1915*4882a593Smuzhiyun 
1916*4882a593Smuzhiyun 	brelse(di_bh);
1917*4882a593Smuzhiyun 
1918*4882a593Smuzhiyun 	return 0;
1919*4882a593Smuzhiyun 
1920*4882a593Smuzhiyun out_fail:
1921*4882a593Smuzhiyun 	up_write(&OCFS2_I(inode)->ip_alloc_sem);
1922*4882a593Smuzhiyun 
1923*4882a593Smuzhiyun 	brelse(di_bh);
1924*4882a593Smuzhiyun 	ocfs2_inode_unlock(inode, 1);
1925*4882a593Smuzhiyun 
1926*4882a593Smuzhiyun 	return ret;
1927*4882a593Smuzhiyun }
1928*4882a593Smuzhiyun 
ocfs2_write_end_inline(struct inode * inode,loff_t pos,unsigned len,unsigned * copied,struct ocfs2_dinode * di,struct ocfs2_write_ctxt * wc)1929*4882a593Smuzhiyun static void ocfs2_write_end_inline(struct inode *inode, loff_t pos,
1930*4882a593Smuzhiyun 				   unsigned len, unsigned *copied,
1931*4882a593Smuzhiyun 				   struct ocfs2_dinode *di,
1932*4882a593Smuzhiyun 				   struct ocfs2_write_ctxt *wc)
1933*4882a593Smuzhiyun {
1934*4882a593Smuzhiyun 	void *kaddr;
1935*4882a593Smuzhiyun 
1936*4882a593Smuzhiyun 	if (unlikely(*copied < len)) {
1937*4882a593Smuzhiyun 		if (!PageUptodate(wc->w_target_page)) {
1938*4882a593Smuzhiyun 			*copied = 0;
1939*4882a593Smuzhiyun 			return;
1940*4882a593Smuzhiyun 		}
1941*4882a593Smuzhiyun 	}
1942*4882a593Smuzhiyun 
1943*4882a593Smuzhiyun 	kaddr = kmap_atomic(wc->w_target_page);
1944*4882a593Smuzhiyun 	memcpy(di->id2.i_data.id_data + pos, kaddr + pos, *copied);
1945*4882a593Smuzhiyun 	kunmap_atomic(kaddr);
1946*4882a593Smuzhiyun 
1947*4882a593Smuzhiyun 	trace_ocfs2_write_end_inline(
1948*4882a593Smuzhiyun 	     (unsigned long long)OCFS2_I(inode)->ip_blkno,
1949*4882a593Smuzhiyun 	     (unsigned long long)pos, *copied,
1950*4882a593Smuzhiyun 	     le16_to_cpu(di->id2.i_data.id_count),
1951*4882a593Smuzhiyun 	     le16_to_cpu(di->i_dyn_features));
1952*4882a593Smuzhiyun }
1953*4882a593Smuzhiyun 
ocfs2_write_end_nolock(struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,void * fsdata)1954*4882a593Smuzhiyun int ocfs2_write_end_nolock(struct address_space *mapping,
1955*4882a593Smuzhiyun 			   loff_t pos, unsigned len, unsigned copied, void *fsdata)
1956*4882a593Smuzhiyun {
1957*4882a593Smuzhiyun 	int i, ret;
1958*4882a593Smuzhiyun 	unsigned from, to, start = pos & (PAGE_SIZE - 1);
1959*4882a593Smuzhiyun 	struct inode *inode = mapping->host;
1960*4882a593Smuzhiyun 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1961*4882a593Smuzhiyun 	struct ocfs2_write_ctxt *wc = fsdata;
1962*4882a593Smuzhiyun 	struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
1963*4882a593Smuzhiyun 	handle_t *handle = wc->w_handle;
1964*4882a593Smuzhiyun 	struct page *tmppage;
1965*4882a593Smuzhiyun 
1966*4882a593Smuzhiyun 	BUG_ON(!list_empty(&wc->w_unwritten_list));
1967*4882a593Smuzhiyun 
1968*4882a593Smuzhiyun 	if (handle) {
1969*4882a593Smuzhiyun 		ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode),
1970*4882a593Smuzhiyun 				wc->w_di_bh, OCFS2_JOURNAL_ACCESS_WRITE);
1971*4882a593Smuzhiyun 		if (ret) {
1972*4882a593Smuzhiyun 			copied = ret;
1973*4882a593Smuzhiyun 			mlog_errno(ret);
1974*4882a593Smuzhiyun 			goto out;
1975*4882a593Smuzhiyun 		}
1976*4882a593Smuzhiyun 	}
1977*4882a593Smuzhiyun 
1978*4882a593Smuzhiyun 	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1979*4882a593Smuzhiyun 		ocfs2_write_end_inline(inode, pos, len, &copied, di, wc);
1980*4882a593Smuzhiyun 		goto out_write_size;
1981*4882a593Smuzhiyun 	}
1982*4882a593Smuzhiyun 
1983*4882a593Smuzhiyun 	if (unlikely(copied < len) && wc->w_target_page) {
1984*4882a593Smuzhiyun 		if (!PageUptodate(wc->w_target_page))
1985*4882a593Smuzhiyun 			copied = 0;
1986*4882a593Smuzhiyun 
1987*4882a593Smuzhiyun 		ocfs2_zero_new_buffers(wc->w_target_page, start+copied,
1988*4882a593Smuzhiyun 				       start+len);
1989*4882a593Smuzhiyun 	}
1990*4882a593Smuzhiyun 	if (wc->w_target_page)
1991*4882a593Smuzhiyun 		flush_dcache_page(wc->w_target_page);
1992*4882a593Smuzhiyun 
1993*4882a593Smuzhiyun 	for(i = 0; i < wc->w_num_pages; i++) {
1994*4882a593Smuzhiyun 		tmppage = wc->w_pages[i];
1995*4882a593Smuzhiyun 
1996*4882a593Smuzhiyun 		/* This is the direct io target page. */
1997*4882a593Smuzhiyun 		if (tmppage == NULL)
1998*4882a593Smuzhiyun 			continue;
1999*4882a593Smuzhiyun 
2000*4882a593Smuzhiyun 		if (tmppage == wc->w_target_page) {
2001*4882a593Smuzhiyun 			from = wc->w_target_from;
2002*4882a593Smuzhiyun 			to = wc->w_target_to;
2003*4882a593Smuzhiyun 
2004*4882a593Smuzhiyun 			BUG_ON(from > PAGE_SIZE ||
2005*4882a593Smuzhiyun 			       to > PAGE_SIZE ||
2006*4882a593Smuzhiyun 			       to < from);
2007*4882a593Smuzhiyun 		} else {
2008*4882a593Smuzhiyun 			/*
2009*4882a593Smuzhiyun 			 * Pages adjacent to the target (if any) imply
2010*4882a593Smuzhiyun 			 * a hole-filling write in which case we want
2011*4882a593Smuzhiyun 			 * to flush their entire range.
2012*4882a593Smuzhiyun 			 */
2013*4882a593Smuzhiyun 			from = 0;
2014*4882a593Smuzhiyun 			to = PAGE_SIZE;
2015*4882a593Smuzhiyun 		}
2016*4882a593Smuzhiyun 
2017*4882a593Smuzhiyun 		if (page_has_buffers(tmppage)) {
2018*4882a593Smuzhiyun 			if (handle && ocfs2_should_order_data(inode)) {
2019*4882a593Smuzhiyun 				loff_t start_byte =
2020*4882a593Smuzhiyun 					((loff_t)tmppage->index << PAGE_SHIFT) +
2021*4882a593Smuzhiyun 					from;
2022*4882a593Smuzhiyun 				loff_t length = to - from;
2023*4882a593Smuzhiyun 				ocfs2_jbd2_inode_add_write(handle, inode,
2024*4882a593Smuzhiyun 							   start_byte, length);
2025*4882a593Smuzhiyun 			}
2026*4882a593Smuzhiyun 			block_commit_write(tmppage, from, to);
2027*4882a593Smuzhiyun 		}
2028*4882a593Smuzhiyun 	}
2029*4882a593Smuzhiyun 
2030*4882a593Smuzhiyun out_write_size:
2031*4882a593Smuzhiyun 	/* Direct io do not update i_size here. */
2032*4882a593Smuzhiyun 	if (wc->w_type != OCFS2_WRITE_DIRECT) {
2033*4882a593Smuzhiyun 		pos += copied;
2034*4882a593Smuzhiyun 		if (pos > i_size_read(inode)) {
2035*4882a593Smuzhiyun 			i_size_write(inode, pos);
2036*4882a593Smuzhiyun 			mark_inode_dirty(inode);
2037*4882a593Smuzhiyun 		}
2038*4882a593Smuzhiyun 		inode->i_blocks = ocfs2_inode_sector_count(inode);
2039*4882a593Smuzhiyun 		di->i_size = cpu_to_le64((u64)i_size_read(inode));
2040*4882a593Smuzhiyun 		inode->i_mtime = inode->i_ctime = current_time(inode);
2041*4882a593Smuzhiyun 		di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec);
2042*4882a593Smuzhiyun 		di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
2043*4882a593Smuzhiyun 		if (handle)
2044*4882a593Smuzhiyun 			ocfs2_update_inode_fsync_trans(handle, inode, 1);
2045*4882a593Smuzhiyun 	}
2046*4882a593Smuzhiyun 	if (handle)
2047*4882a593Smuzhiyun 		ocfs2_journal_dirty(handle, wc->w_di_bh);
2048*4882a593Smuzhiyun 
2049*4882a593Smuzhiyun out:
2050*4882a593Smuzhiyun 	/* unlock pages before dealloc since it needs acquiring j_trans_barrier
2051*4882a593Smuzhiyun 	 * lock, or it will cause a deadlock since journal commit threads holds
2052*4882a593Smuzhiyun 	 * this lock and will ask for the page lock when flushing the data.
2053*4882a593Smuzhiyun 	 * put it here to preserve the unlock order.
2054*4882a593Smuzhiyun 	 */
2055*4882a593Smuzhiyun 	ocfs2_unlock_pages(wc);
2056*4882a593Smuzhiyun 
2057*4882a593Smuzhiyun 	if (handle)
2058*4882a593Smuzhiyun 		ocfs2_commit_trans(osb, handle);
2059*4882a593Smuzhiyun 
2060*4882a593Smuzhiyun 	ocfs2_run_deallocs(osb, &wc->w_dealloc);
2061*4882a593Smuzhiyun 
2062*4882a593Smuzhiyun 	brelse(wc->w_di_bh);
2063*4882a593Smuzhiyun 	kfree(wc);
2064*4882a593Smuzhiyun 
2065*4882a593Smuzhiyun 	return copied;
2066*4882a593Smuzhiyun }
2067*4882a593Smuzhiyun 
ocfs2_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct page * page,void * fsdata)2068*4882a593Smuzhiyun static int ocfs2_write_end(struct file *file, struct address_space *mapping,
2069*4882a593Smuzhiyun 			   loff_t pos, unsigned len, unsigned copied,
2070*4882a593Smuzhiyun 			   struct page *page, void *fsdata)
2071*4882a593Smuzhiyun {
2072*4882a593Smuzhiyun 	int ret;
2073*4882a593Smuzhiyun 	struct inode *inode = mapping->host;
2074*4882a593Smuzhiyun 
2075*4882a593Smuzhiyun 	ret = ocfs2_write_end_nolock(mapping, pos, len, copied, fsdata);
2076*4882a593Smuzhiyun 
2077*4882a593Smuzhiyun 	up_write(&OCFS2_I(inode)->ip_alloc_sem);
2078*4882a593Smuzhiyun 	ocfs2_inode_unlock(inode, 1);
2079*4882a593Smuzhiyun 
2080*4882a593Smuzhiyun 	return ret;
2081*4882a593Smuzhiyun }
2082*4882a593Smuzhiyun 
2083*4882a593Smuzhiyun struct ocfs2_dio_write_ctxt {
2084*4882a593Smuzhiyun 	struct list_head	dw_zero_list;
2085*4882a593Smuzhiyun 	unsigned		dw_zero_count;
2086*4882a593Smuzhiyun 	int			dw_orphaned;
2087*4882a593Smuzhiyun 	pid_t			dw_writer_pid;
2088*4882a593Smuzhiyun };
2089*4882a593Smuzhiyun 
2090*4882a593Smuzhiyun static struct ocfs2_dio_write_ctxt *
ocfs2_dio_alloc_write_ctx(struct buffer_head * bh,int * alloc)2091*4882a593Smuzhiyun ocfs2_dio_alloc_write_ctx(struct buffer_head *bh, int *alloc)
2092*4882a593Smuzhiyun {
2093*4882a593Smuzhiyun 	struct ocfs2_dio_write_ctxt *dwc = NULL;
2094*4882a593Smuzhiyun 
2095*4882a593Smuzhiyun 	if (bh->b_private)
2096*4882a593Smuzhiyun 		return bh->b_private;
2097*4882a593Smuzhiyun 
2098*4882a593Smuzhiyun 	dwc = kmalloc(sizeof(struct ocfs2_dio_write_ctxt), GFP_NOFS);
2099*4882a593Smuzhiyun 	if (dwc == NULL)
2100*4882a593Smuzhiyun 		return NULL;
2101*4882a593Smuzhiyun 	INIT_LIST_HEAD(&dwc->dw_zero_list);
2102*4882a593Smuzhiyun 	dwc->dw_zero_count = 0;
2103*4882a593Smuzhiyun 	dwc->dw_orphaned = 0;
2104*4882a593Smuzhiyun 	dwc->dw_writer_pid = task_pid_nr(current);
2105*4882a593Smuzhiyun 	bh->b_private = dwc;
2106*4882a593Smuzhiyun 	*alloc = 1;
2107*4882a593Smuzhiyun 
2108*4882a593Smuzhiyun 	return dwc;
2109*4882a593Smuzhiyun }
2110*4882a593Smuzhiyun 
ocfs2_dio_free_write_ctx(struct inode * inode,struct ocfs2_dio_write_ctxt * dwc)2111*4882a593Smuzhiyun static void ocfs2_dio_free_write_ctx(struct inode *inode,
2112*4882a593Smuzhiyun 				     struct ocfs2_dio_write_ctxt *dwc)
2113*4882a593Smuzhiyun {
2114*4882a593Smuzhiyun 	ocfs2_free_unwritten_list(inode, &dwc->dw_zero_list);
2115*4882a593Smuzhiyun 	kfree(dwc);
2116*4882a593Smuzhiyun }
2117*4882a593Smuzhiyun 
2118*4882a593Smuzhiyun /*
2119*4882a593Smuzhiyun  * TODO: Make this into a generic get_blocks function.
2120*4882a593Smuzhiyun  *
2121*4882a593Smuzhiyun  * From do_direct_io in direct-io.c:
2122*4882a593Smuzhiyun  *  "So what we do is to permit the ->get_blocks function to populate
2123*4882a593Smuzhiyun  *   bh.b_size with the size of IO which is permitted at this offset and
2124*4882a593Smuzhiyun  *   this i_blkbits."
2125*4882a593Smuzhiyun  *
2126*4882a593Smuzhiyun  * This function is called directly from get_more_blocks in direct-io.c.
2127*4882a593Smuzhiyun  *
2128*4882a593Smuzhiyun  * called like this: dio->get_blocks(dio->inode, fs_startblk,
2129*4882a593Smuzhiyun  * 					fs_count, map_bh, dio->rw == WRITE);
2130*4882a593Smuzhiyun  */
ocfs2_dio_wr_get_block(struct inode * inode,sector_t iblock,struct buffer_head * bh_result,int create)2131*4882a593Smuzhiyun static int ocfs2_dio_wr_get_block(struct inode *inode, sector_t iblock,
2132*4882a593Smuzhiyun 			       struct buffer_head *bh_result, int create)
2133*4882a593Smuzhiyun {
2134*4882a593Smuzhiyun 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2135*4882a593Smuzhiyun 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
2136*4882a593Smuzhiyun 	struct ocfs2_write_ctxt *wc;
2137*4882a593Smuzhiyun 	struct ocfs2_write_cluster_desc *desc = NULL;
2138*4882a593Smuzhiyun 	struct ocfs2_dio_write_ctxt *dwc = NULL;
2139*4882a593Smuzhiyun 	struct buffer_head *di_bh = NULL;
2140*4882a593Smuzhiyun 	u64 p_blkno;
2141*4882a593Smuzhiyun 	unsigned int i_blkbits = inode->i_sb->s_blocksize_bits;
2142*4882a593Smuzhiyun 	loff_t pos = iblock << i_blkbits;
2143*4882a593Smuzhiyun 	sector_t endblk = (i_size_read(inode) - 1) >> i_blkbits;
2144*4882a593Smuzhiyun 	unsigned len, total_len = bh_result->b_size;
2145*4882a593Smuzhiyun 	int ret = 0, first_get_block = 0;
2146*4882a593Smuzhiyun 
2147*4882a593Smuzhiyun 	len = osb->s_clustersize - (pos & (osb->s_clustersize - 1));
2148*4882a593Smuzhiyun 	len = min(total_len, len);
2149*4882a593Smuzhiyun 
2150*4882a593Smuzhiyun 	/*
2151*4882a593Smuzhiyun 	 * bh_result->b_size is count in get_more_blocks according to write
2152*4882a593Smuzhiyun 	 * "pos" and "end", we need map twice to return different buffer state:
2153*4882a593Smuzhiyun 	 * 1. area in file size, not set NEW;
2154*4882a593Smuzhiyun 	 * 2. area out file size, set  NEW.
2155*4882a593Smuzhiyun 	 *
2156*4882a593Smuzhiyun 	 *		   iblock    endblk
2157*4882a593Smuzhiyun 	 * |--------|---------|---------|---------
2158*4882a593Smuzhiyun 	 * |<-------area in file------->|
2159*4882a593Smuzhiyun 	 */
2160*4882a593Smuzhiyun 
2161*4882a593Smuzhiyun 	if ((iblock <= endblk) &&
2162*4882a593Smuzhiyun 	    ((iblock + ((len - 1) >> i_blkbits)) > endblk))
2163*4882a593Smuzhiyun 		len = (endblk - iblock + 1) << i_blkbits;
2164*4882a593Smuzhiyun 
2165*4882a593Smuzhiyun 	mlog(0, "get block of %lu at %llu:%u req %u\n",
2166*4882a593Smuzhiyun 			inode->i_ino, pos, len, total_len);
2167*4882a593Smuzhiyun 
2168*4882a593Smuzhiyun 	/*
2169*4882a593Smuzhiyun 	 * Because we need to change file size in ocfs2_dio_end_io_write(), or
2170*4882a593Smuzhiyun 	 * we may need to add it to orphan dir. So can not fall to fast path
2171*4882a593Smuzhiyun 	 * while file size will be changed.
2172*4882a593Smuzhiyun 	 */
2173*4882a593Smuzhiyun 	if (pos + total_len <= i_size_read(inode)) {
2174*4882a593Smuzhiyun 
2175*4882a593Smuzhiyun 		/* This is the fast path for re-write. */
2176*4882a593Smuzhiyun 		ret = ocfs2_lock_get_block(inode, iblock, bh_result, create);
2177*4882a593Smuzhiyun 		if (buffer_mapped(bh_result) &&
2178*4882a593Smuzhiyun 		    !buffer_new(bh_result) &&
2179*4882a593Smuzhiyun 		    ret == 0)
2180*4882a593Smuzhiyun 			goto out;
2181*4882a593Smuzhiyun 
2182*4882a593Smuzhiyun 		/* Clear state set by ocfs2_get_block. */
2183*4882a593Smuzhiyun 		bh_result->b_state = 0;
2184*4882a593Smuzhiyun 	}
2185*4882a593Smuzhiyun 
2186*4882a593Smuzhiyun 	dwc = ocfs2_dio_alloc_write_ctx(bh_result, &first_get_block);
2187*4882a593Smuzhiyun 	if (unlikely(dwc == NULL)) {
2188*4882a593Smuzhiyun 		ret = -ENOMEM;
2189*4882a593Smuzhiyun 		mlog_errno(ret);
2190*4882a593Smuzhiyun 		goto out;
2191*4882a593Smuzhiyun 	}
2192*4882a593Smuzhiyun 
2193*4882a593Smuzhiyun 	if (ocfs2_clusters_for_bytes(inode->i_sb, pos + total_len) >
2194*4882a593Smuzhiyun 	    ocfs2_clusters_for_bytes(inode->i_sb, i_size_read(inode)) &&
2195*4882a593Smuzhiyun 	    !dwc->dw_orphaned) {
2196*4882a593Smuzhiyun 		/*
2197*4882a593Smuzhiyun 		 * when we are going to alloc extents beyond file size, add the
2198*4882a593Smuzhiyun 		 * inode to orphan dir, so we can recall those spaces when
2199*4882a593Smuzhiyun 		 * system crashed during write.
2200*4882a593Smuzhiyun 		 */
2201*4882a593Smuzhiyun 		ret = ocfs2_add_inode_to_orphan(osb, inode);
2202*4882a593Smuzhiyun 		if (ret < 0) {
2203*4882a593Smuzhiyun 			mlog_errno(ret);
2204*4882a593Smuzhiyun 			goto out;
2205*4882a593Smuzhiyun 		}
2206*4882a593Smuzhiyun 		dwc->dw_orphaned = 1;
2207*4882a593Smuzhiyun 	}
2208*4882a593Smuzhiyun 
2209*4882a593Smuzhiyun 	ret = ocfs2_inode_lock(inode, &di_bh, 1);
2210*4882a593Smuzhiyun 	if (ret) {
2211*4882a593Smuzhiyun 		mlog_errno(ret);
2212*4882a593Smuzhiyun 		goto out;
2213*4882a593Smuzhiyun 	}
2214*4882a593Smuzhiyun 
2215*4882a593Smuzhiyun 	down_write(&oi->ip_alloc_sem);
2216*4882a593Smuzhiyun 
2217*4882a593Smuzhiyun 	if (first_get_block) {
2218*4882a593Smuzhiyun 		if (ocfs2_sparse_alloc(osb))
2219*4882a593Smuzhiyun 			ret = ocfs2_zero_tail(inode, di_bh, pos);
2220*4882a593Smuzhiyun 		else
2221*4882a593Smuzhiyun 			ret = ocfs2_expand_nonsparse_inode(inode, di_bh, pos,
2222*4882a593Smuzhiyun 							   total_len, NULL);
2223*4882a593Smuzhiyun 		if (ret < 0) {
2224*4882a593Smuzhiyun 			mlog_errno(ret);
2225*4882a593Smuzhiyun 			goto unlock;
2226*4882a593Smuzhiyun 		}
2227*4882a593Smuzhiyun 	}
2228*4882a593Smuzhiyun 
2229*4882a593Smuzhiyun 	ret = ocfs2_write_begin_nolock(inode->i_mapping, pos, len,
2230*4882a593Smuzhiyun 				       OCFS2_WRITE_DIRECT, NULL,
2231*4882a593Smuzhiyun 				       (void **)&wc, di_bh, NULL);
2232*4882a593Smuzhiyun 	if (ret) {
2233*4882a593Smuzhiyun 		mlog_errno(ret);
2234*4882a593Smuzhiyun 		goto unlock;
2235*4882a593Smuzhiyun 	}
2236*4882a593Smuzhiyun 
2237*4882a593Smuzhiyun 	desc = &wc->w_desc[0];
2238*4882a593Smuzhiyun 
2239*4882a593Smuzhiyun 	p_blkno = ocfs2_clusters_to_blocks(inode->i_sb, desc->c_phys);
2240*4882a593Smuzhiyun 	BUG_ON(p_blkno == 0);
2241*4882a593Smuzhiyun 	p_blkno += iblock & (u64)(ocfs2_clusters_to_blocks(inode->i_sb, 1) - 1);
2242*4882a593Smuzhiyun 
2243*4882a593Smuzhiyun 	map_bh(bh_result, inode->i_sb, p_blkno);
2244*4882a593Smuzhiyun 	bh_result->b_size = len;
2245*4882a593Smuzhiyun 	if (desc->c_needs_zero)
2246*4882a593Smuzhiyun 		set_buffer_new(bh_result);
2247*4882a593Smuzhiyun 
2248*4882a593Smuzhiyun 	if (iblock > endblk)
2249*4882a593Smuzhiyun 		set_buffer_new(bh_result);
2250*4882a593Smuzhiyun 
2251*4882a593Smuzhiyun 	/* May sleep in end_io. It should not happen in a irq context. So defer
2252*4882a593Smuzhiyun 	 * it to dio work queue. */
2253*4882a593Smuzhiyun 	set_buffer_defer_completion(bh_result);
2254*4882a593Smuzhiyun 
2255*4882a593Smuzhiyun 	if (!list_empty(&wc->w_unwritten_list)) {
2256*4882a593Smuzhiyun 		struct ocfs2_unwritten_extent *ue = NULL;
2257*4882a593Smuzhiyun 
2258*4882a593Smuzhiyun 		ue = list_first_entry(&wc->w_unwritten_list,
2259*4882a593Smuzhiyun 				      struct ocfs2_unwritten_extent,
2260*4882a593Smuzhiyun 				      ue_node);
2261*4882a593Smuzhiyun 		BUG_ON(ue->ue_cpos != desc->c_cpos);
2262*4882a593Smuzhiyun 		/* The physical address may be 0, fill it. */
2263*4882a593Smuzhiyun 		ue->ue_phys = desc->c_phys;
2264*4882a593Smuzhiyun 
2265*4882a593Smuzhiyun 		list_splice_tail_init(&wc->w_unwritten_list, &dwc->dw_zero_list);
2266*4882a593Smuzhiyun 		dwc->dw_zero_count += wc->w_unwritten_count;
2267*4882a593Smuzhiyun 	}
2268*4882a593Smuzhiyun 
2269*4882a593Smuzhiyun 	ret = ocfs2_write_end_nolock(inode->i_mapping, pos, len, len, wc);
2270*4882a593Smuzhiyun 	BUG_ON(ret != len);
2271*4882a593Smuzhiyun 	ret = 0;
2272*4882a593Smuzhiyun unlock:
2273*4882a593Smuzhiyun 	up_write(&oi->ip_alloc_sem);
2274*4882a593Smuzhiyun 	ocfs2_inode_unlock(inode, 1);
2275*4882a593Smuzhiyun 	brelse(di_bh);
2276*4882a593Smuzhiyun out:
2277*4882a593Smuzhiyun 	if (ret < 0)
2278*4882a593Smuzhiyun 		ret = -EIO;
2279*4882a593Smuzhiyun 	return ret;
2280*4882a593Smuzhiyun }
2281*4882a593Smuzhiyun 
ocfs2_dio_end_io_write(struct inode * inode,struct ocfs2_dio_write_ctxt * dwc,loff_t offset,ssize_t bytes)2282*4882a593Smuzhiyun static int ocfs2_dio_end_io_write(struct inode *inode,
2283*4882a593Smuzhiyun 				  struct ocfs2_dio_write_ctxt *dwc,
2284*4882a593Smuzhiyun 				  loff_t offset,
2285*4882a593Smuzhiyun 				  ssize_t bytes)
2286*4882a593Smuzhiyun {
2287*4882a593Smuzhiyun 	struct ocfs2_cached_dealloc_ctxt dealloc;
2288*4882a593Smuzhiyun 	struct ocfs2_extent_tree et;
2289*4882a593Smuzhiyun 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2290*4882a593Smuzhiyun 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
2291*4882a593Smuzhiyun 	struct ocfs2_unwritten_extent *ue = NULL;
2292*4882a593Smuzhiyun 	struct buffer_head *di_bh = NULL;
2293*4882a593Smuzhiyun 	struct ocfs2_dinode *di;
2294*4882a593Smuzhiyun 	struct ocfs2_alloc_context *data_ac = NULL;
2295*4882a593Smuzhiyun 	struct ocfs2_alloc_context *meta_ac = NULL;
2296*4882a593Smuzhiyun 	handle_t *handle = NULL;
2297*4882a593Smuzhiyun 	loff_t end = offset + bytes;
2298*4882a593Smuzhiyun 	int ret = 0, credits = 0;
2299*4882a593Smuzhiyun 
2300*4882a593Smuzhiyun 	ocfs2_init_dealloc_ctxt(&dealloc);
2301*4882a593Smuzhiyun 
2302*4882a593Smuzhiyun 	/* We do clear unwritten, delete orphan, change i_size here. If neither
2303*4882a593Smuzhiyun 	 * of these happen, we can skip all this. */
2304*4882a593Smuzhiyun 	if (list_empty(&dwc->dw_zero_list) &&
2305*4882a593Smuzhiyun 	    end <= i_size_read(inode) &&
2306*4882a593Smuzhiyun 	    !dwc->dw_orphaned)
2307*4882a593Smuzhiyun 		goto out;
2308*4882a593Smuzhiyun 
2309*4882a593Smuzhiyun 	ret = ocfs2_inode_lock(inode, &di_bh, 1);
2310*4882a593Smuzhiyun 	if (ret < 0) {
2311*4882a593Smuzhiyun 		mlog_errno(ret);
2312*4882a593Smuzhiyun 		goto out;
2313*4882a593Smuzhiyun 	}
2314*4882a593Smuzhiyun 
2315*4882a593Smuzhiyun 	down_write(&oi->ip_alloc_sem);
2316*4882a593Smuzhiyun 
2317*4882a593Smuzhiyun 	/* Delete orphan before acquire i_mutex. */
2318*4882a593Smuzhiyun 	if (dwc->dw_orphaned) {
2319*4882a593Smuzhiyun 		BUG_ON(dwc->dw_writer_pid != task_pid_nr(current));
2320*4882a593Smuzhiyun 
2321*4882a593Smuzhiyun 		end = end > i_size_read(inode) ? end : 0;
2322*4882a593Smuzhiyun 
2323*4882a593Smuzhiyun 		ret = ocfs2_del_inode_from_orphan(osb, inode, di_bh,
2324*4882a593Smuzhiyun 				!!end, end);
2325*4882a593Smuzhiyun 		if (ret < 0)
2326*4882a593Smuzhiyun 			mlog_errno(ret);
2327*4882a593Smuzhiyun 	}
2328*4882a593Smuzhiyun 
2329*4882a593Smuzhiyun 	di = (struct ocfs2_dinode *)di_bh->b_data;
2330*4882a593Smuzhiyun 
2331*4882a593Smuzhiyun 	ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh);
2332*4882a593Smuzhiyun 
2333*4882a593Smuzhiyun 	/* Attach dealloc with extent tree in case that we may reuse extents
2334*4882a593Smuzhiyun 	 * which are already unlinked from current extent tree due to extent
2335*4882a593Smuzhiyun 	 * rotation and merging.
2336*4882a593Smuzhiyun 	 */
2337*4882a593Smuzhiyun 	et.et_dealloc = &dealloc;
2338*4882a593Smuzhiyun 
2339*4882a593Smuzhiyun 	ret = ocfs2_lock_allocators(inode, &et, 0, dwc->dw_zero_count*2,
2340*4882a593Smuzhiyun 				    &data_ac, &meta_ac);
2341*4882a593Smuzhiyun 	if (ret) {
2342*4882a593Smuzhiyun 		mlog_errno(ret);
2343*4882a593Smuzhiyun 		goto unlock;
2344*4882a593Smuzhiyun 	}
2345*4882a593Smuzhiyun 
2346*4882a593Smuzhiyun 	credits = ocfs2_calc_extend_credits(inode->i_sb, &di->id2.i_list);
2347*4882a593Smuzhiyun 
2348*4882a593Smuzhiyun 	handle = ocfs2_start_trans(osb, credits);
2349*4882a593Smuzhiyun 	if (IS_ERR(handle)) {
2350*4882a593Smuzhiyun 		ret = PTR_ERR(handle);
2351*4882a593Smuzhiyun 		mlog_errno(ret);
2352*4882a593Smuzhiyun 		goto unlock;
2353*4882a593Smuzhiyun 	}
2354*4882a593Smuzhiyun 	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
2355*4882a593Smuzhiyun 				      OCFS2_JOURNAL_ACCESS_WRITE);
2356*4882a593Smuzhiyun 	if (ret) {
2357*4882a593Smuzhiyun 		mlog_errno(ret);
2358*4882a593Smuzhiyun 		goto commit;
2359*4882a593Smuzhiyun 	}
2360*4882a593Smuzhiyun 
2361*4882a593Smuzhiyun 	list_for_each_entry(ue, &dwc->dw_zero_list, ue_node) {
2362*4882a593Smuzhiyun 		ret = ocfs2_mark_extent_written(inode, &et, handle,
2363*4882a593Smuzhiyun 						ue->ue_cpos, 1,
2364*4882a593Smuzhiyun 						ue->ue_phys,
2365*4882a593Smuzhiyun 						meta_ac, &dealloc);
2366*4882a593Smuzhiyun 		if (ret < 0) {
2367*4882a593Smuzhiyun 			mlog_errno(ret);
2368*4882a593Smuzhiyun 			break;
2369*4882a593Smuzhiyun 		}
2370*4882a593Smuzhiyun 	}
2371*4882a593Smuzhiyun 
2372*4882a593Smuzhiyun 	if (end > i_size_read(inode)) {
2373*4882a593Smuzhiyun 		ret = ocfs2_set_inode_size(handle, inode, di_bh, end);
2374*4882a593Smuzhiyun 		if (ret < 0)
2375*4882a593Smuzhiyun 			mlog_errno(ret);
2376*4882a593Smuzhiyun 	}
2377*4882a593Smuzhiyun commit:
2378*4882a593Smuzhiyun 	ocfs2_commit_trans(osb, handle);
2379*4882a593Smuzhiyun unlock:
2380*4882a593Smuzhiyun 	up_write(&oi->ip_alloc_sem);
2381*4882a593Smuzhiyun 	ocfs2_inode_unlock(inode, 1);
2382*4882a593Smuzhiyun 	brelse(di_bh);
2383*4882a593Smuzhiyun out:
2384*4882a593Smuzhiyun 	if (data_ac)
2385*4882a593Smuzhiyun 		ocfs2_free_alloc_context(data_ac);
2386*4882a593Smuzhiyun 	if (meta_ac)
2387*4882a593Smuzhiyun 		ocfs2_free_alloc_context(meta_ac);
2388*4882a593Smuzhiyun 	ocfs2_run_deallocs(osb, &dealloc);
2389*4882a593Smuzhiyun 	ocfs2_dio_free_write_ctx(inode, dwc);
2390*4882a593Smuzhiyun 
2391*4882a593Smuzhiyun 	return ret;
2392*4882a593Smuzhiyun }
2393*4882a593Smuzhiyun 
2394*4882a593Smuzhiyun /*
2395*4882a593Smuzhiyun  * ocfs2_dio_end_io is called by the dio core when a dio is finished.  We're
2396*4882a593Smuzhiyun  * particularly interested in the aio/dio case.  We use the rw_lock DLM lock
2397*4882a593Smuzhiyun  * to protect io on one node from truncation on another.
2398*4882a593Smuzhiyun  */
ocfs2_dio_end_io(struct kiocb * iocb,loff_t offset,ssize_t bytes,void * private)2399*4882a593Smuzhiyun static int ocfs2_dio_end_io(struct kiocb *iocb,
2400*4882a593Smuzhiyun 			    loff_t offset,
2401*4882a593Smuzhiyun 			    ssize_t bytes,
2402*4882a593Smuzhiyun 			    void *private)
2403*4882a593Smuzhiyun {
2404*4882a593Smuzhiyun 	struct inode *inode = file_inode(iocb->ki_filp);
2405*4882a593Smuzhiyun 	int level;
2406*4882a593Smuzhiyun 	int ret = 0;
2407*4882a593Smuzhiyun 
2408*4882a593Smuzhiyun 	/* this io's submitter should not have unlocked this before we could */
2409*4882a593Smuzhiyun 	BUG_ON(!ocfs2_iocb_is_rw_locked(iocb));
2410*4882a593Smuzhiyun 
2411*4882a593Smuzhiyun 	if (bytes <= 0)
2412*4882a593Smuzhiyun 		mlog_ratelimited(ML_ERROR, "Direct IO failed, bytes = %lld",
2413*4882a593Smuzhiyun 				 (long long)bytes);
2414*4882a593Smuzhiyun 	if (private) {
2415*4882a593Smuzhiyun 		if (bytes > 0)
2416*4882a593Smuzhiyun 			ret = ocfs2_dio_end_io_write(inode, private, offset,
2417*4882a593Smuzhiyun 						     bytes);
2418*4882a593Smuzhiyun 		else
2419*4882a593Smuzhiyun 			ocfs2_dio_free_write_ctx(inode, private);
2420*4882a593Smuzhiyun 	}
2421*4882a593Smuzhiyun 
2422*4882a593Smuzhiyun 	ocfs2_iocb_clear_rw_locked(iocb);
2423*4882a593Smuzhiyun 
2424*4882a593Smuzhiyun 	level = ocfs2_iocb_rw_locked_level(iocb);
2425*4882a593Smuzhiyun 	ocfs2_rw_unlock(inode, level);
2426*4882a593Smuzhiyun 	return ret;
2427*4882a593Smuzhiyun }
2428*4882a593Smuzhiyun 
ocfs2_direct_IO(struct kiocb * iocb,struct iov_iter * iter)2429*4882a593Smuzhiyun static ssize_t ocfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
2430*4882a593Smuzhiyun {
2431*4882a593Smuzhiyun 	struct file *file = iocb->ki_filp;
2432*4882a593Smuzhiyun 	struct inode *inode = file->f_mapping->host;
2433*4882a593Smuzhiyun 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2434*4882a593Smuzhiyun 	get_block_t *get_block;
2435*4882a593Smuzhiyun 
2436*4882a593Smuzhiyun 	/*
2437*4882a593Smuzhiyun 	 * Fallback to buffered I/O if we see an inode without
2438*4882a593Smuzhiyun 	 * extents.
2439*4882a593Smuzhiyun 	 */
2440*4882a593Smuzhiyun 	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
2441*4882a593Smuzhiyun 		return 0;
2442*4882a593Smuzhiyun 
2443*4882a593Smuzhiyun 	/* Fallback to buffered I/O if we do not support append dio. */
2444*4882a593Smuzhiyun 	if (iocb->ki_pos + iter->count > i_size_read(inode) &&
2445*4882a593Smuzhiyun 	    !ocfs2_supports_append_dio(osb))
2446*4882a593Smuzhiyun 		return 0;
2447*4882a593Smuzhiyun 
2448*4882a593Smuzhiyun 	if (iov_iter_rw(iter) == READ)
2449*4882a593Smuzhiyun 		get_block = ocfs2_lock_get_block;
2450*4882a593Smuzhiyun 	else
2451*4882a593Smuzhiyun 		get_block = ocfs2_dio_wr_get_block;
2452*4882a593Smuzhiyun 
2453*4882a593Smuzhiyun 	return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
2454*4882a593Smuzhiyun 				    iter, get_block,
2455*4882a593Smuzhiyun 				    ocfs2_dio_end_io, NULL, 0);
2456*4882a593Smuzhiyun }
2457*4882a593Smuzhiyun 
2458*4882a593Smuzhiyun const struct address_space_operations ocfs2_aops = {
2459*4882a593Smuzhiyun 	.readpage		= ocfs2_readpage,
2460*4882a593Smuzhiyun 	.readahead		= ocfs2_readahead,
2461*4882a593Smuzhiyun 	.writepage		= ocfs2_writepage,
2462*4882a593Smuzhiyun 	.write_begin		= ocfs2_write_begin,
2463*4882a593Smuzhiyun 	.write_end		= ocfs2_write_end,
2464*4882a593Smuzhiyun 	.bmap			= ocfs2_bmap,
2465*4882a593Smuzhiyun 	.direct_IO		= ocfs2_direct_IO,
2466*4882a593Smuzhiyun 	.invalidatepage		= block_invalidatepage,
2467*4882a593Smuzhiyun 	.releasepage		= ocfs2_releasepage,
2468*4882a593Smuzhiyun 	.migratepage		= buffer_migrate_page,
2469*4882a593Smuzhiyun 	.is_partially_uptodate	= block_is_partially_uptodate,
2470*4882a593Smuzhiyun 	.error_remove_page	= generic_error_remove_page,
2471*4882a593Smuzhiyun };
2472