xref: /OK3568_Linux_fs/kernel/fs/jfs/inode.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *   Copyright (C) International Business Machines Corp., 2000-2004
4*4882a593Smuzhiyun  *   Portions Copyright (C) Christoph Hellwig, 2001-2002
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <linux/fs.h>
8*4882a593Smuzhiyun #include <linux/mpage.h>
9*4882a593Smuzhiyun #include <linux/buffer_head.h>
10*4882a593Smuzhiyun #include <linux/pagemap.h>
11*4882a593Smuzhiyun #include <linux/quotaops.h>
12*4882a593Smuzhiyun #include <linux/uio.h>
13*4882a593Smuzhiyun #include <linux/writeback.h>
14*4882a593Smuzhiyun #include "jfs_incore.h"
15*4882a593Smuzhiyun #include "jfs_inode.h"
16*4882a593Smuzhiyun #include "jfs_filsys.h"
17*4882a593Smuzhiyun #include "jfs_imap.h"
18*4882a593Smuzhiyun #include "jfs_extent.h"
19*4882a593Smuzhiyun #include "jfs_unicode.h"
20*4882a593Smuzhiyun #include "jfs_debug.h"
21*4882a593Smuzhiyun #include "jfs_dmap.h"
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun 
jfs_iget(struct super_block * sb,unsigned long ino)24*4882a593Smuzhiyun struct inode *jfs_iget(struct super_block *sb, unsigned long ino)
25*4882a593Smuzhiyun {
26*4882a593Smuzhiyun 	struct inode *inode;
27*4882a593Smuzhiyun 	int ret;
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun 	inode = iget_locked(sb, ino);
30*4882a593Smuzhiyun 	if (!inode)
31*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
32*4882a593Smuzhiyun 	if (!(inode->i_state & I_NEW))
33*4882a593Smuzhiyun 		return inode;
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun 	ret = diRead(inode);
36*4882a593Smuzhiyun 	if (ret < 0) {
37*4882a593Smuzhiyun 		iget_failed(inode);
38*4882a593Smuzhiyun 		return ERR_PTR(ret);
39*4882a593Smuzhiyun 	}
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun 	if (S_ISREG(inode->i_mode)) {
42*4882a593Smuzhiyun 		inode->i_op = &jfs_file_inode_operations;
43*4882a593Smuzhiyun 		inode->i_fop = &jfs_file_operations;
44*4882a593Smuzhiyun 		inode->i_mapping->a_ops = &jfs_aops;
45*4882a593Smuzhiyun 	} else if (S_ISDIR(inode->i_mode)) {
46*4882a593Smuzhiyun 		inode->i_op = &jfs_dir_inode_operations;
47*4882a593Smuzhiyun 		inode->i_fop = &jfs_dir_operations;
48*4882a593Smuzhiyun 	} else if (S_ISLNK(inode->i_mode)) {
49*4882a593Smuzhiyun 		if (inode->i_size >= IDATASIZE) {
50*4882a593Smuzhiyun 			inode->i_op = &page_symlink_inode_operations;
51*4882a593Smuzhiyun 			inode_nohighmem(inode);
52*4882a593Smuzhiyun 			inode->i_mapping->a_ops = &jfs_aops;
53*4882a593Smuzhiyun 		} else {
54*4882a593Smuzhiyun 			inode->i_op = &jfs_fast_symlink_inode_operations;
55*4882a593Smuzhiyun 			inode->i_link = JFS_IP(inode)->i_inline;
56*4882a593Smuzhiyun 			/*
57*4882a593Smuzhiyun 			 * The inline data should be null-terminated, but
58*4882a593Smuzhiyun 			 * don't let on-disk corruption crash the kernel
59*4882a593Smuzhiyun 			 */
60*4882a593Smuzhiyun 			inode->i_link[inode->i_size] = '\0';
61*4882a593Smuzhiyun 		}
62*4882a593Smuzhiyun 	} else {
63*4882a593Smuzhiyun 		inode->i_op = &jfs_file_inode_operations;
64*4882a593Smuzhiyun 		init_special_inode(inode, inode->i_mode, inode->i_rdev);
65*4882a593Smuzhiyun 	}
66*4882a593Smuzhiyun 	unlock_new_inode(inode);
67*4882a593Smuzhiyun 	return inode;
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun /*
71*4882a593Smuzhiyun  * Workhorse of both fsync & write_inode
72*4882a593Smuzhiyun  */
jfs_commit_inode(struct inode * inode,int wait)73*4882a593Smuzhiyun int jfs_commit_inode(struct inode *inode, int wait)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun 	int rc = 0;
76*4882a593Smuzhiyun 	tid_t tid;
77*4882a593Smuzhiyun 	static int noisy = 5;
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	jfs_info("In jfs_commit_inode, inode = 0x%p", inode);
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	/*
82*4882a593Smuzhiyun 	 * Don't commit if inode has been committed since last being
83*4882a593Smuzhiyun 	 * marked dirty, or if it has been deleted.
84*4882a593Smuzhiyun 	 */
85*4882a593Smuzhiyun 	if (inode->i_nlink == 0 || !test_cflag(COMMIT_Dirty, inode))
86*4882a593Smuzhiyun 		return 0;
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	if (isReadOnly(inode)) {
89*4882a593Smuzhiyun 		/* kernel allows writes to devices on read-only
90*4882a593Smuzhiyun 		 * partitions and may think inode is dirty
91*4882a593Smuzhiyun 		 */
92*4882a593Smuzhiyun 		if (!special_file(inode->i_mode) && noisy) {
93*4882a593Smuzhiyun 			jfs_err("jfs_commit_inode(0x%p) called on read-only volume",
94*4882a593Smuzhiyun 				inode);
95*4882a593Smuzhiyun 			jfs_err("Is remount racy?");
96*4882a593Smuzhiyun 			noisy--;
97*4882a593Smuzhiyun 		}
98*4882a593Smuzhiyun 		return 0;
99*4882a593Smuzhiyun 	}
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	tid = txBegin(inode->i_sb, COMMIT_INODE);
102*4882a593Smuzhiyun 	mutex_lock(&JFS_IP(inode)->commit_mutex);
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	/*
105*4882a593Smuzhiyun 	 * Retest inode state after taking commit_mutex
106*4882a593Smuzhiyun 	 */
107*4882a593Smuzhiyun 	if (inode->i_nlink && test_cflag(COMMIT_Dirty, inode))
108*4882a593Smuzhiyun 		rc = txCommit(tid, 1, &inode, wait ? COMMIT_SYNC : 0);
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	txEnd(tid);
111*4882a593Smuzhiyun 	mutex_unlock(&JFS_IP(inode)->commit_mutex);
112*4882a593Smuzhiyun 	return rc;
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun 
jfs_write_inode(struct inode * inode,struct writeback_control * wbc)115*4882a593Smuzhiyun int jfs_write_inode(struct inode *inode, struct writeback_control *wbc)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun 	int wait = wbc->sync_mode == WB_SYNC_ALL;
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	if (inode->i_nlink == 0)
120*4882a593Smuzhiyun 		return 0;
121*4882a593Smuzhiyun 	/*
122*4882a593Smuzhiyun 	 * If COMMIT_DIRTY is not set, the inode isn't really dirty.
123*4882a593Smuzhiyun 	 * It has been committed since the last change, but was still
124*4882a593Smuzhiyun 	 * on the dirty inode list.
125*4882a593Smuzhiyun 	 */
126*4882a593Smuzhiyun 	if (!test_cflag(COMMIT_Dirty, inode)) {
127*4882a593Smuzhiyun 		/* Make sure committed changes hit the disk */
128*4882a593Smuzhiyun 		jfs_flush_journal(JFS_SBI(inode->i_sb)->log, wait);
129*4882a593Smuzhiyun 		return 0;
130*4882a593Smuzhiyun 	}
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	if (jfs_commit_inode(inode, wait)) {
133*4882a593Smuzhiyun 		jfs_err("jfs_write_inode: jfs_commit_inode failed!");
134*4882a593Smuzhiyun 		return -EIO;
135*4882a593Smuzhiyun 	} else
136*4882a593Smuzhiyun 		return 0;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun 
jfs_evict_inode(struct inode * inode)139*4882a593Smuzhiyun void jfs_evict_inode(struct inode *inode)
140*4882a593Smuzhiyun {
141*4882a593Smuzhiyun 	struct jfs_inode_info *ji = JFS_IP(inode);
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	jfs_info("In jfs_evict_inode, inode = 0x%p", inode);
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	if (!inode->i_nlink && !is_bad_inode(inode)) {
146*4882a593Smuzhiyun 		dquot_initialize(inode);
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 		if (JFS_IP(inode)->fileset == FILESYSTEM_I) {
149*4882a593Smuzhiyun 			struct inode *ipimap = JFS_SBI(inode->i_sb)->ipimap;
150*4882a593Smuzhiyun 			truncate_inode_pages_final(&inode->i_data);
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 			if (test_cflag(COMMIT_Freewmap, inode))
153*4882a593Smuzhiyun 				jfs_free_zero_link(inode);
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 			if (ipimap && JFS_IP(ipimap)->i_imap)
156*4882a593Smuzhiyun 				diFree(inode);
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 			/*
159*4882a593Smuzhiyun 			 * Free the inode from the quota allocation.
160*4882a593Smuzhiyun 			 */
161*4882a593Smuzhiyun 			dquot_free_inode(inode);
162*4882a593Smuzhiyun 		}
163*4882a593Smuzhiyun 	} else {
164*4882a593Smuzhiyun 		truncate_inode_pages_final(&inode->i_data);
165*4882a593Smuzhiyun 	}
166*4882a593Smuzhiyun 	clear_inode(inode);
167*4882a593Smuzhiyun 	dquot_drop(inode);
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	BUG_ON(!list_empty(&ji->anon_inode_list));
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	spin_lock_irq(&ji->ag_lock);
172*4882a593Smuzhiyun 	if (ji->active_ag != -1) {
173*4882a593Smuzhiyun 		struct bmap *bmap = JFS_SBI(inode->i_sb)->bmap;
174*4882a593Smuzhiyun 		atomic_dec(&bmap->db_active[ji->active_ag]);
175*4882a593Smuzhiyun 		ji->active_ag = -1;
176*4882a593Smuzhiyun 	}
177*4882a593Smuzhiyun 	spin_unlock_irq(&ji->ag_lock);
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun 
jfs_dirty_inode(struct inode * inode,int flags)180*4882a593Smuzhiyun void jfs_dirty_inode(struct inode *inode, int flags)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun 	static int noisy = 5;
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	if (isReadOnly(inode)) {
185*4882a593Smuzhiyun 		if (!special_file(inode->i_mode) && noisy) {
186*4882a593Smuzhiyun 			/* kernel allows writes to devices on read-only
187*4882a593Smuzhiyun 			 * partitions and may try to mark inode dirty
188*4882a593Smuzhiyun 			 */
189*4882a593Smuzhiyun 			jfs_err("jfs_dirty_inode called on read-only volume");
190*4882a593Smuzhiyun 			jfs_err("Is remount racy?");
191*4882a593Smuzhiyun 			noisy--;
192*4882a593Smuzhiyun 		}
193*4882a593Smuzhiyun 		return;
194*4882a593Smuzhiyun 	}
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	set_cflag(COMMIT_Dirty, inode);
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun 
jfs_get_block(struct inode * ip,sector_t lblock,struct buffer_head * bh_result,int create)199*4882a593Smuzhiyun int jfs_get_block(struct inode *ip, sector_t lblock,
200*4882a593Smuzhiyun 		  struct buffer_head *bh_result, int create)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun 	s64 lblock64 = lblock;
203*4882a593Smuzhiyun 	int rc = 0;
204*4882a593Smuzhiyun 	xad_t xad;
205*4882a593Smuzhiyun 	s64 xaddr;
206*4882a593Smuzhiyun 	int xflag;
207*4882a593Smuzhiyun 	s32 xlen = bh_result->b_size >> ip->i_blkbits;
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	/*
210*4882a593Smuzhiyun 	 * Take appropriate lock on inode
211*4882a593Smuzhiyun 	 */
212*4882a593Smuzhiyun 	if (create)
213*4882a593Smuzhiyun 		IWRITE_LOCK(ip, RDWRLOCK_NORMAL);
214*4882a593Smuzhiyun 	else
215*4882a593Smuzhiyun 		IREAD_LOCK(ip, RDWRLOCK_NORMAL);
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	if (((lblock64 << ip->i_sb->s_blocksize_bits) < ip->i_size) &&
218*4882a593Smuzhiyun 	    (!xtLookup(ip, lblock64, xlen, &xflag, &xaddr, &xlen, 0)) &&
219*4882a593Smuzhiyun 	    xaddr) {
220*4882a593Smuzhiyun 		if (xflag & XAD_NOTRECORDED) {
221*4882a593Smuzhiyun 			if (!create)
222*4882a593Smuzhiyun 				/*
223*4882a593Smuzhiyun 				 * Allocated but not recorded, read treats
224*4882a593Smuzhiyun 				 * this as a hole
225*4882a593Smuzhiyun 				 */
226*4882a593Smuzhiyun 				goto unlock;
227*4882a593Smuzhiyun #ifdef _JFS_4K
228*4882a593Smuzhiyun 			XADoffset(&xad, lblock64);
229*4882a593Smuzhiyun 			XADlength(&xad, xlen);
230*4882a593Smuzhiyun 			XADaddress(&xad, xaddr);
231*4882a593Smuzhiyun #else				/* _JFS_4K */
232*4882a593Smuzhiyun 			/*
233*4882a593Smuzhiyun 			 * As long as block size = 4K, this isn't a problem.
234*4882a593Smuzhiyun 			 * We should mark the whole page not ABNR, but how
235*4882a593Smuzhiyun 			 * will we know to mark the other blocks BH_New?
236*4882a593Smuzhiyun 			 */
237*4882a593Smuzhiyun 			BUG();
238*4882a593Smuzhiyun #endif				/* _JFS_4K */
239*4882a593Smuzhiyun 			rc = extRecord(ip, &xad);
240*4882a593Smuzhiyun 			if (rc)
241*4882a593Smuzhiyun 				goto unlock;
242*4882a593Smuzhiyun 			set_buffer_new(bh_result);
243*4882a593Smuzhiyun 		}
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 		map_bh(bh_result, ip->i_sb, xaddr);
246*4882a593Smuzhiyun 		bh_result->b_size = xlen << ip->i_blkbits;
247*4882a593Smuzhiyun 		goto unlock;
248*4882a593Smuzhiyun 	}
249*4882a593Smuzhiyun 	if (!create)
250*4882a593Smuzhiyun 		goto unlock;
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	/*
253*4882a593Smuzhiyun 	 * Allocate a new block
254*4882a593Smuzhiyun 	 */
255*4882a593Smuzhiyun #ifdef _JFS_4K
256*4882a593Smuzhiyun 	if ((rc = extHint(ip, lblock64 << ip->i_sb->s_blocksize_bits, &xad)))
257*4882a593Smuzhiyun 		goto unlock;
258*4882a593Smuzhiyun 	rc = extAlloc(ip, xlen, lblock64, &xad, false);
259*4882a593Smuzhiyun 	if (rc)
260*4882a593Smuzhiyun 		goto unlock;
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	set_buffer_new(bh_result);
263*4882a593Smuzhiyun 	map_bh(bh_result, ip->i_sb, addressXAD(&xad));
264*4882a593Smuzhiyun 	bh_result->b_size = lengthXAD(&xad) << ip->i_blkbits;
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun #else				/* _JFS_4K */
267*4882a593Smuzhiyun 	/*
268*4882a593Smuzhiyun 	 * We need to do whatever it takes to keep all but the last buffers
269*4882a593Smuzhiyun 	 * in 4K pages - see jfs_write.c
270*4882a593Smuzhiyun 	 */
271*4882a593Smuzhiyun 	BUG();
272*4882a593Smuzhiyun #endif				/* _JFS_4K */
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun       unlock:
275*4882a593Smuzhiyun 	/*
276*4882a593Smuzhiyun 	 * Release lock on inode
277*4882a593Smuzhiyun 	 */
278*4882a593Smuzhiyun 	if (create)
279*4882a593Smuzhiyun 		IWRITE_UNLOCK(ip);
280*4882a593Smuzhiyun 	else
281*4882a593Smuzhiyun 		IREAD_UNLOCK(ip);
282*4882a593Smuzhiyun 	return rc;
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun 
jfs_writepage(struct page * page,struct writeback_control * wbc)285*4882a593Smuzhiyun static int jfs_writepage(struct page *page, struct writeback_control *wbc)
286*4882a593Smuzhiyun {
287*4882a593Smuzhiyun 	return block_write_full_page(page, jfs_get_block, wbc);
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun 
jfs_writepages(struct address_space * mapping,struct writeback_control * wbc)290*4882a593Smuzhiyun static int jfs_writepages(struct address_space *mapping,
291*4882a593Smuzhiyun 			struct writeback_control *wbc)
292*4882a593Smuzhiyun {
293*4882a593Smuzhiyun 	return mpage_writepages(mapping, wbc, jfs_get_block);
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun 
jfs_readpage(struct file * file,struct page * page)296*4882a593Smuzhiyun static int jfs_readpage(struct file *file, struct page *page)
297*4882a593Smuzhiyun {
298*4882a593Smuzhiyun 	return mpage_readpage(page, jfs_get_block);
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun 
jfs_readahead(struct readahead_control * rac)301*4882a593Smuzhiyun static void jfs_readahead(struct readahead_control *rac)
302*4882a593Smuzhiyun {
303*4882a593Smuzhiyun 	mpage_readahead(rac, jfs_get_block);
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun 
jfs_write_failed(struct address_space * mapping,loff_t to)306*4882a593Smuzhiyun static void jfs_write_failed(struct address_space *mapping, loff_t to)
307*4882a593Smuzhiyun {
308*4882a593Smuzhiyun 	struct inode *inode = mapping->host;
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	if (to > inode->i_size) {
311*4882a593Smuzhiyun 		truncate_pagecache(inode, inode->i_size);
312*4882a593Smuzhiyun 		jfs_truncate(inode);
313*4882a593Smuzhiyun 	}
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun 
jfs_write_begin(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned flags,struct page ** pagep,void ** fsdata)316*4882a593Smuzhiyun static int jfs_write_begin(struct file *file, struct address_space *mapping,
317*4882a593Smuzhiyun 				loff_t pos, unsigned len, unsigned flags,
318*4882a593Smuzhiyun 				struct page **pagep, void **fsdata)
319*4882a593Smuzhiyun {
320*4882a593Smuzhiyun 	int ret;
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	ret = nobh_write_begin(mapping, pos, len, flags, pagep, fsdata,
323*4882a593Smuzhiyun 				jfs_get_block);
324*4882a593Smuzhiyun 	if (unlikely(ret))
325*4882a593Smuzhiyun 		jfs_write_failed(mapping, pos + len);
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	return ret;
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun 
jfs_bmap(struct address_space * mapping,sector_t block)330*4882a593Smuzhiyun static sector_t jfs_bmap(struct address_space *mapping, sector_t block)
331*4882a593Smuzhiyun {
332*4882a593Smuzhiyun 	return generic_block_bmap(mapping, block, jfs_get_block);
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun 
jfs_direct_IO(struct kiocb * iocb,struct iov_iter * iter)335*4882a593Smuzhiyun static ssize_t jfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
336*4882a593Smuzhiyun {
337*4882a593Smuzhiyun 	struct file *file = iocb->ki_filp;
338*4882a593Smuzhiyun 	struct address_space *mapping = file->f_mapping;
339*4882a593Smuzhiyun 	struct inode *inode = file->f_mapping->host;
340*4882a593Smuzhiyun 	size_t count = iov_iter_count(iter);
341*4882a593Smuzhiyun 	ssize_t ret;
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	ret = blockdev_direct_IO(iocb, inode, iter, jfs_get_block);
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	/*
346*4882a593Smuzhiyun 	 * In case of error extending write may have instantiated a few
347*4882a593Smuzhiyun 	 * blocks outside i_size. Trim these off again.
348*4882a593Smuzhiyun 	 */
349*4882a593Smuzhiyun 	if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
350*4882a593Smuzhiyun 		loff_t isize = i_size_read(inode);
351*4882a593Smuzhiyun 		loff_t end = iocb->ki_pos + count;
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 		if (end > isize)
354*4882a593Smuzhiyun 			jfs_write_failed(mapping, end);
355*4882a593Smuzhiyun 	}
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 	return ret;
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun const struct address_space_operations jfs_aops = {
361*4882a593Smuzhiyun 	.readpage	= jfs_readpage,
362*4882a593Smuzhiyun 	.readahead	= jfs_readahead,
363*4882a593Smuzhiyun 	.writepage	= jfs_writepage,
364*4882a593Smuzhiyun 	.writepages	= jfs_writepages,
365*4882a593Smuzhiyun 	.write_begin	= jfs_write_begin,
366*4882a593Smuzhiyun 	.write_end	= nobh_write_end,
367*4882a593Smuzhiyun 	.bmap		= jfs_bmap,
368*4882a593Smuzhiyun 	.direct_IO	= jfs_direct_IO,
369*4882a593Smuzhiyun };
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun /*
372*4882a593Smuzhiyun  * Guts of jfs_truncate.  Called with locks already held.  Can be called
373*4882a593Smuzhiyun  * with directory for truncating directory index table.
374*4882a593Smuzhiyun  */
jfs_truncate_nolock(struct inode * ip,loff_t length)375*4882a593Smuzhiyun void jfs_truncate_nolock(struct inode *ip, loff_t length)
376*4882a593Smuzhiyun {
377*4882a593Smuzhiyun 	loff_t newsize;
378*4882a593Smuzhiyun 	tid_t tid;
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	ASSERT(length >= 0);
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 	if (test_cflag(COMMIT_Nolink, ip)) {
383*4882a593Smuzhiyun 		xtTruncate(0, ip, length, COMMIT_WMAP);
384*4882a593Smuzhiyun 		return;
385*4882a593Smuzhiyun 	}
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 	do {
388*4882a593Smuzhiyun 		tid = txBegin(ip->i_sb, 0);
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 		/*
391*4882a593Smuzhiyun 		 * The commit_mutex cannot be taken before txBegin.
392*4882a593Smuzhiyun 		 * txBegin may block and there is a chance the inode
393*4882a593Smuzhiyun 		 * could be marked dirty and need to be committed
394*4882a593Smuzhiyun 		 * before txBegin unblocks
395*4882a593Smuzhiyun 		 */
396*4882a593Smuzhiyun 		mutex_lock(&JFS_IP(ip)->commit_mutex);
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 		newsize = xtTruncate(tid, ip, length,
399*4882a593Smuzhiyun 				     COMMIT_TRUNCATE | COMMIT_PWMAP);
400*4882a593Smuzhiyun 		if (newsize < 0) {
401*4882a593Smuzhiyun 			txEnd(tid);
402*4882a593Smuzhiyun 			mutex_unlock(&JFS_IP(ip)->commit_mutex);
403*4882a593Smuzhiyun 			break;
404*4882a593Smuzhiyun 		}
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 		ip->i_mtime = ip->i_ctime = current_time(ip);
407*4882a593Smuzhiyun 		mark_inode_dirty(ip);
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun 		txCommit(tid, 1, &ip, 0);
410*4882a593Smuzhiyun 		txEnd(tid);
411*4882a593Smuzhiyun 		mutex_unlock(&JFS_IP(ip)->commit_mutex);
412*4882a593Smuzhiyun 	} while (newsize > length);	/* Truncate isn't always atomic */
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun 
jfs_truncate(struct inode * ip)415*4882a593Smuzhiyun void jfs_truncate(struct inode *ip)
416*4882a593Smuzhiyun {
417*4882a593Smuzhiyun 	jfs_info("jfs_truncate: size = 0x%lx", (ulong) ip->i_size);
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun 	nobh_truncate_page(ip->i_mapping, ip->i_size, jfs_get_block);
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 	IWRITE_LOCK(ip, RDWRLOCK_NORMAL);
422*4882a593Smuzhiyun 	jfs_truncate_nolock(ip, ip->i_size);
423*4882a593Smuzhiyun 	IWRITE_UNLOCK(ip);
424*4882a593Smuzhiyun }
425