1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0+
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * inode.c - NILFS inode operations.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Written by Ryusuke Konishi.
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <linux/buffer_head.h>
12*4882a593Smuzhiyun #include <linux/gfp.h>
13*4882a593Smuzhiyun #include <linux/mpage.h>
14*4882a593Smuzhiyun #include <linux/pagemap.h>
15*4882a593Smuzhiyun #include <linux/writeback.h>
16*4882a593Smuzhiyun #include <linux/uio.h>
17*4882a593Smuzhiyun #include <linux/fiemap.h>
18*4882a593Smuzhiyun #include "nilfs.h"
19*4882a593Smuzhiyun #include "btnode.h"
20*4882a593Smuzhiyun #include "segment.h"
21*4882a593Smuzhiyun #include "page.h"
22*4882a593Smuzhiyun #include "mdt.h"
23*4882a593Smuzhiyun #include "cpfile.h"
24*4882a593Smuzhiyun #include "ifile.h"
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun /**
27*4882a593Smuzhiyun * struct nilfs_iget_args - arguments used during comparison between inodes
28*4882a593Smuzhiyun * @ino: inode number
29*4882a593Smuzhiyun * @cno: checkpoint number
30*4882a593Smuzhiyun * @root: pointer on NILFS root object (mounted checkpoint)
31*4882a593Smuzhiyun * @for_gc: inode for GC flag
32*4882a593Smuzhiyun * @for_btnc: inode for B-tree node cache flag
33*4882a593Smuzhiyun * @for_shadow: inode for shadowed page cache flag
34*4882a593Smuzhiyun */
35*4882a593Smuzhiyun struct nilfs_iget_args {
36*4882a593Smuzhiyun u64 ino;
37*4882a593Smuzhiyun __u64 cno;
38*4882a593Smuzhiyun struct nilfs_root *root;
39*4882a593Smuzhiyun bool for_gc;
40*4882a593Smuzhiyun bool for_btnc;
41*4882a593Smuzhiyun bool for_shadow;
42*4882a593Smuzhiyun };
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun static int nilfs_iget_test(struct inode *inode, void *opaque);
45*4882a593Smuzhiyun
nilfs_inode_add_blocks(struct inode * inode,int n)46*4882a593Smuzhiyun void nilfs_inode_add_blocks(struct inode *inode, int n)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun struct nilfs_root *root = NILFS_I(inode)->i_root;
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun inode_add_bytes(inode, i_blocksize(inode) * n);
51*4882a593Smuzhiyun if (root)
52*4882a593Smuzhiyun atomic64_add(n, &root->blocks_count);
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun
nilfs_inode_sub_blocks(struct inode * inode,int n)55*4882a593Smuzhiyun void nilfs_inode_sub_blocks(struct inode *inode, int n)
56*4882a593Smuzhiyun {
57*4882a593Smuzhiyun struct nilfs_root *root = NILFS_I(inode)->i_root;
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun inode_sub_bytes(inode, i_blocksize(inode) * n);
60*4882a593Smuzhiyun if (root)
61*4882a593Smuzhiyun atomic64_sub(n, &root->blocks_count);
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun /**
65*4882a593Smuzhiyun * nilfs_get_block() - get a file block on the filesystem (callback function)
66*4882a593Smuzhiyun * @inode - inode struct of the target file
67*4882a593Smuzhiyun * @blkoff - file block number
68*4882a593Smuzhiyun * @bh_result - buffer head to be mapped on
69*4882a593Smuzhiyun * @create - indicate whether allocating the block or not when it has not
70*4882a593Smuzhiyun * been allocated yet.
71*4882a593Smuzhiyun *
72*4882a593Smuzhiyun * This function does not issue actual read request of the specified data
73*4882a593Smuzhiyun * block. It is done by VFS.
74*4882a593Smuzhiyun */
nilfs_get_block(struct inode * inode,sector_t blkoff,struct buffer_head * bh_result,int create)75*4882a593Smuzhiyun int nilfs_get_block(struct inode *inode, sector_t blkoff,
76*4882a593Smuzhiyun struct buffer_head *bh_result, int create)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun struct nilfs_inode_info *ii = NILFS_I(inode);
79*4882a593Smuzhiyun struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
80*4882a593Smuzhiyun __u64 blknum = 0;
81*4882a593Smuzhiyun int err = 0, ret;
82*4882a593Smuzhiyun unsigned int maxblocks = bh_result->b_size >> inode->i_blkbits;
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
85*4882a593Smuzhiyun ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks);
86*4882a593Smuzhiyun up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
87*4882a593Smuzhiyun if (ret >= 0) { /* found */
88*4882a593Smuzhiyun map_bh(bh_result, inode->i_sb, blknum);
89*4882a593Smuzhiyun if (ret > 0)
90*4882a593Smuzhiyun bh_result->b_size = (ret << inode->i_blkbits);
91*4882a593Smuzhiyun goto out;
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun /* data block was not found */
94*4882a593Smuzhiyun if (ret == -ENOENT && create) {
95*4882a593Smuzhiyun struct nilfs_transaction_info ti;
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun bh_result->b_blocknr = 0;
98*4882a593Smuzhiyun err = nilfs_transaction_begin(inode->i_sb, &ti, 1);
99*4882a593Smuzhiyun if (unlikely(err))
100*4882a593Smuzhiyun goto out;
101*4882a593Smuzhiyun err = nilfs_bmap_insert(ii->i_bmap, blkoff,
102*4882a593Smuzhiyun (unsigned long)bh_result);
103*4882a593Smuzhiyun if (unlikely(err != 0)) {
104*4882a593Smuzhiyun if (err == -EEXIST) {
105*4882a593Smuzhiyun /*
106*4882a593Smuzhiyun * The get_block() function could be called
107*4882a593Smuzhiyun * from multiple callers for an inode.
108*4882a593Smuzhiyun * However, the page having this block must
109*4882a593Smuzhiyun * be locked in this case.
110*4882a593Smuzhiyun */
111*4882a593Smuzhiyun nilfs_warn(inode->i_sb,
112*4882a593Smuzhiyun "%s (ino=%lu): a race condition while inserting a data block at offset=%llu",
113*4882a593Smuzhiyun __func__, inode->i_ino,
114*4882a593Smuzhiyun (unsigned long long)blkoff);
115*4882a593Smuzhiyun err = 0;
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun nilfs_transaction_abort(inode->i_sb);
118*4882a593Smuzhiyun goto out;
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun nilfs_mark_inode_dirty_sync(inode);
121*4882a593Smuzhiyun nilfs_transaction_commit(inode->i_sb); /* never fails */
122*4882a593Smuzhiyun /* Error handling should be detailed */
123*4882a593Smuzhiyun set_buffer_new(bh_result);
124*4882a593Smuzhiyun set_buffer_delay(bh_result);
125*4882a593Smuzhiyun map_bh(bh_result, inode->i_sb, 0);
126*4882a593Smuzhiyun /* Disk block number must be changed to proper value */
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun } else if (ret == -ENOENT) {
129*4882a593Smuzhiyun /*
130*4882a593Smuzhiyun * not found is not error (e.g. hole); must return without
131*4882a593Smuzhiyun * the mapped state flag.
132*4882a593Smuzhiyun */
133*4882a593Smuzhiyun ;
134*4882a593Smuzhiyun } else {
135*4882a593Smuzhiyun err = ret;
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun out:
139*4882a593Smuzhiyun return err;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun /**
143*4882a593Smuzhiyun * nilfs_readpage() - implement readpage() method of nilfs_aops {}
144*4882a593Smuzhiyun * address_space_operations.
145*4882a593Smuzhiyun * @file - file struct of the file to be read
146*4882a593Smuzhiyun * @page - the page to be read
147*4882a593Smuzhiyun */
nilfs_readpage(struct file * file,struct page * page)148*4882a593Smuzhiyun static int nilfs_readpage(struct file *file, struct page *page)
149*4882a593Smuzhiyun {
150*4882a593Smuzhiyun return mpage_readpage(page, nilfs_get_block);
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun
nilfs_readahead(struct readahead_control * rac)153*4882a593Smuzhiyun static void nilfs_readahead(struct readahead_control *rac)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun mpage_readahead(rac, nilfs_get_block);
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun
nilfs_writepages(struct address_space * mapping,struct writeback_control * wbc)158*4882a593Smuzhiyun static int nilfs_writepages(struct address_space *mapping,
159*4882a593Smuzhiyun struct writeback_control *wbc)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun struct inode *inode = mapping->host;
162*4882a593Smuzhiyun int err = 0;
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun if (sb_rdonly(inode->i_sb)) {
165*4882a593Smuzhiyun nilfs_clear_dirty_pages(mapping, false);
166*4882a593Smuzhiyun return -EROFS;
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun if (wbc->sync_mode == WB_SYNC_ALL)
170*4882a593Smuzhiyun err = nilfs_construct_dsync_segment(inode->i_sb, inode,
171*4882a593Smuzhiyun wbc->range_start,
172*4882a593Smuzhiyun wbc->range_end);
173*4882a593Smuzhiyun return err;
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun
nilfs_writepage(struct page * page,struct writeback_control * wbc)176*4882a593Smuzhiyun static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
177*4882a593Smuzhiyun {
178*4882a593Smuzhiyun struct inode *inode = page->mapping->host;
179*4882a593Smuzhiyun int err;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun if (sb_rdonly(inode->i_sb)) {
182*4882a593Smuzhiyun /*
183*4882a593Smuzhiyun * It means that filesystem was remounted in read-only
184*4882a593Smuzhiyun * mode because of error or metadata corruption. But we
185*4882a593Smuzhiyun * have dirty pages that try to be flushed in background.
186*4882a593Smuzhiyun * So, here we simply discard this dirty page.
187*4882a593Smuzhiyun */
188*4882a593Smuzhiyun nilfs_clear_dirty_page(page, false);
189*4882a593Smuzhiyun unlock_page(page);
190*4882a593Smuzhiyun return -EROFS;
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun redirty_page_for_writepage(wbc, page);
194*4882a593Smuzhiyun unlock_page(page);
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun if (wbc->sync_mode == WB_SYNC_ALL) {
197*4882a593Smuzhiyun err = nilfs_construct_segment(inode->i_sb);
198*4882a593Smuzhiyun if (unlikely(err))
199*4882a593Smuzhiyun return err;
200*4882a593Smuzhiyun } else if (wbc->for_reclaim)
201*4882a593Smuzhiyun nilfs_flush_segment(inode->i_sb, inode->i_ino);
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun return 0;
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun
nilfs_set_page_dirty(struct page * page)206*4882a593Smuzhiyun static int nilfs_set_page_dirty(struct page *page)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun struct inode *inode = page->mapping->host;
209*4882a593Smuzhiyun int ret = __set_page_dirty_nobuffers(page);
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun if (page_has_buffers(page)) {
212*4882a593Smuzhiyun unsigned int nr_dirty = 0;
213*4882a593Smuzhiyun struct buffer_head *bh, *head;
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun /*
216*4882a593Smuzhiyun * This page is locked by callers, and no other thread
217*4882a593Smuzhiyun * concurrently marks its buffers dirty since they are
218*4882a593Smuzhiyun * only dirtied through routines in fs/buffer.c in
219*4882a593Smuzhiyun * which call sites of mark_buffer_dirty are protected
220*4882a593Smuzhiyun * by page lock.
221*4882a593Smuzhiyun */
222*4882a593Smuzhiyun bh = head = page_buffers(page);
223*4882a593Smuzhiyun do {
224*4882a593Smuzhiyun /* Do not mark hole blocks dirty */
225*4882a593Smuzhiyun if (buffer_dirty(bh) || !buffer_mapped(bh))
226*4882a593Smuzhiyun continue;
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun set_buffer_dirty(bh);
229*4882a593Smuzhiyun nr_dirty++;
230*4882a593Smuzhiyun } while (bh = bh->b_this_page, bh != head);
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun if (nr_dirty)
233*4882a593Smuzhiyun nilfs_set_file_dirty(inode, nr_dirty);
234*4882a593Smuzhiyun } else if (ret) {
235*4882a593Smuzhiyun unsigned int nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits);
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun nilfs_set_file_dirty(inode, nr_dirty);
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun return ret;
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun
nilfs_write_failed(struct address_space * mapping,loff_t to)242*4882a593Smuzhiyun void nilfs_write_failed(struct address_space *mapping, loff_t to)
243*4882a593Smuzhiyun {
244*4882a593Smuzhiyun struct inode *inode = mapping->host;
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun if (to > inode->i_size) {
247*4882a593Smuzhiyun truncate_pagecache(inode, inode->i_size);
248*4882a593Smuzhiyun nilfs_truncate(inode);
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun
nilfs_write_begin(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned flags,struct page ** pagep,void ** fsdata)252*4882a593Smuzhiyun static int nilfs_write_begin(struct file *file, struct address_space *mapping,
253*4882a593Smuzhiyun loff_t pos, unsigned len, unsigned flags,
254*4882a593Smuzhiyun struct page **pagep, void **fsdata)
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun {
257*4882a593Smuzhiyun struct inode *inode = mapping->host;
258*4882a593Smuzhiyun int err = nilfs_transaction_begin(inode->i_sb, NULL, 1);
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun if (unlikely(err))
261*4882a593Smuzhiyun return err;
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun err = block_write_begin(mapping, pos, len, flags, pagep,
264*4882a593Smuzhiyun nilfs_get_block);
265*4882a593Smuzhiyun if (unlikely(err)) {
266*4882a593Smuzhiyun nilfs_write_failed(mapping, pos + len);
267*4882a593Smuzhiyun nilfs_transaction_abort(inode->i_sb);
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun return err;
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun
nilfs_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct page * page,void * fsdata)272*4882a593Smuzhiyun static int nilfs_write_end(struct file *file, struct address_space *mapping,
273*4882a593Smuzhiyun loff_t pos, unsigned len, unsigned copied,
274*4882a593Smuzhiyun struct page *page, void *fsdata)
275*4882a593Smuzhiyun {
276*4882a593Smuzhiyun struct inode *inode = mapping->host;
277*4882a593Smuzhiyun unsigned int start = pos & (PAGE_SIZE - 1);
278*4882a593Smuzhiyun unsigned int nr_dirty;
279*4882a593Smuzhiyun int err;
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun nr_dirty = nilfs_page_count_clean_buffers(page, start,
282*4882a593Smuzhiyun start + copied);
283*4882a593Smuzhiyun copied = generic_write_end(file, mapping, pos, len, copied, page,
284*4882a593Smuzhiyun fsdata);
285*4882a593Smuzhiyun nilfs_set_file_dirty(inode, nr_dirty);
286*4882a593Smuzhiyun err = nilfs_transaction_commit(inode->i_sb);
287*4882a593Smuzhiyun return err ? : copied;
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun static ssize_t
nilfs_direct_IO(struct kiocb * iocb,struct iov_iter * iter)291*4882a593Smuzhiyun nilfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
292*4882a593Smuzhiyun {
293*4882a593Smuzhiyun struct inode *inode = file_inode(iocb->ki_filp);
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun if (iov_iter_rw(iter) == WRITE)
296*4882a593Smuzhiyun return 0;
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun /* Needs synchronization with the cleaner */
299*4882a593Smuzhiyun return blockdev_direct_IO(iocb, inode, iter, nilfs_get_block);
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun const struct address_space_operations nilfs_aops = {
303*4882a593Smuzhiyun .writepage = nilfs_writepage,
304*4882a593Smuzhiyun .readpage = nilfs_readpage,
305*4882a593Smuzhiyun .writepages = nilfs_writepages,
306*4882a593Smuzhiyun .set_page_dirty = nilfs_set_page_dirty,
307*4882a593Smuzhiyun .readahead = nilfs_readahead,
308*4882a593Smuzhiyun .write_begin = nilfs_write_begin,
309*4882a593Smuzhiyun .write_end = nilfs_write_end,
310*4882a593Smuzhiyun /* .releasepage = nilfs_releasepage, */
311*4882a593Smuzhiyun .invalidatepage = block_invalidatepage,
312*4882a593Smuzhiyun .direct_IO = nilfs_direct_IO,
313*4882a593Smuzhiyun .is_partially_uptodate = block_is_partially_uptodate,
314*4882a593Smuzhiyun };
315*4882a593Smuzhiyun
nilfs_insert_inode_locked(struct inode * inode,struct nilfs_root * root,unsigned long ino)316*4882a593Smuzhiyun static int nilfs_insert_inode_locked(struct inode *inode,
317*4882a593Smuzhiyun struct nilfs_root *root,
318*4882a593Smuzhiyun unsigned long ino)
319*4882a593Smuzhiyun {
320*4882a593Smuzhiyun struct nilfs_iget_args args = {
321*4882a593Smuzhiyun .ino = ino, .root = root, .cno = 0, .for_gc = false,
322*4882a593Smuzhiyun .for_btnc = false, .for_shadow = false
323*4882a593Smuzhiyun };
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun return insert_inode_locked4(inode, ino, nilfs_iget_test, &args);
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun
nilfs_new_inode(struct inode * dir,umode_t mode)328*4882a593Smuzhiyun struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
329*4882a593Smuzhiyun {
330*4882a593Smuzhiyun struct super_block *sb = dir->i_sb;
331*4882a593Smuzhiyun struct the_nilfs *nilfs = sb->s_fs_info;
332*4882a593Smuzhiyun struct inode *inode;
333*4882a593Smuzhiyun struct nilfs_inode_info *ii;
334*4882a593Smuzhiyun struct nilfs_root *root;
335*4882a593Smuzhiyun struct buffer_head *bh;
336*4882a593Smuzhiyun int err = -ENOMEM;
337*4882a593Smuzhiyun ino_t ino;
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun inode = new_inode(sb);
340*4882a593Smuzhiyun if (unlikely(!inode))
341*4882a593Smuzhiyun goto failed;
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun mapping_set_gfp_mask(inode->i_mapping,
344*4882a593Smuzhiyun mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS));
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun root = NILFS_I(dir)->i_root;
347*4882a593Smuzhiyun ii = NILFS_I(inode);
348*4882a593Smuzhiyun ii->i_state = BIT(NILFS_I_NEW);
349*4882a593Smuzhiyun ii->i_root = root;
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun err = nilfs_ifile_create_inode(root->ifile, &ino, &bh);
352*4882a593Smuzhiyun if (unlikely(err))
353*4882a593Smuzhiyun goto failed_ifile_create_inode;
354*4882a593Smuzhiyun /* reference count of i_bh inherits from nilfs_mdt_read_block() */
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun if (unlikely(ino < NILFS_USER_INO)) {
357*4882a593Smuzhiyun nilfs_warn(sb,
358*4882a593Smuzhiyun "inode bitmap is inconsistent for reserved inodes");
359*4882a593Smuzhiyun do {
360*4882a593Smuzhiyun brelse(bh);
361*4882a593Smuzhiyun err = nilfs_ifile_create_inode(root->ifile, &ino, &bh);
362*4882a593Smuzhiyun if (unlikely(err))
363*4882a593Smuzhiyun goto failed_ifile_create_inode;
364*4882a593Smuzhiyun } while (ino < NILFS_USER_INO);
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun nilfs_info(sb, "repaired inode bitmap for reserved inodes");
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun ii->i_bh = bh;
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun atomic64_inc(&root->inodes_count);
371*4882a593Smuzhiyun inode_init_owner(inode, dir, mode);
372*4882a593Smuzhiyun inode->i_ino = ino;
373*4882a593Smuzhiyun inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
376*4882a593Smuzhiyun err = nilfs_bmap_read(ii->i_bmap, NULL);
377*4882a593Smuzhiyun if (err < 0)
378*4882a593Smuzhiyun goto failed_after_creation;
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun set_bit(NILFS_I_BMAP, &ii->i_state);
381*4882a593Smuzhiyun /* No lock is needed; iget() ensures it. */
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun ii->i_flags = nilfs_mask_flags(
385*4882a593Smuzhiyun mode, NILFS_I(dir)->i_flags & NILFS_FL_INHERITED);
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun /* ii->i_file_acl = 0; */
388*4882a593Smuzhiyun /* ii->i_dir_acl = 0; */
389*4882a593Smuzhiyun ii->i_dir_start_lookup = 0;
390*4882a593Smuzhiyun nilfs_set_inode_flags(inode);
391*4882a593Smuzhiyun spin_lock(&nilfs->ns_next_gen_lock);
392*4882a593Smuzhiyun inode->i_generation = nilfs->ns_next_generation++;
393*4882a593Smuzhiyun spin_unlock(&nilfs->ns_next_gen_lock);
394*4882a593Smuzhiyun if (nilfs_insert_inode_locked(inode, root, ino) < 0) {
395*4882a593Smuzhiyun err = -EIO;
396*4882a593Smuzhiyun goto failed_after_creation;
397*4882a593Smuzhiyun }
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun err = nilfs_init_acl(inode, dir);
400*4882a593Smuzhiyun if (unlikely(err))
401*4882a593Smuzhiyun /*
402*4882a593Smuzhiyun * Never occur. When supporting nilfs_init_acl(),
403*4882a593Smuzhiyun * proper cancellation of above jobs should be considered.
404*4882a593Smuzhiyun */
405*4882a593Smuzhiyun goto failed_after_creation;
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun return inode;
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun failed_after_creation:
410*4882a593Smuzhiyun clear_nlink(inode);
411*4882a593Smuzhiyun if (inode->i_state & I_NEW)
412*4882a593Smuzhiyun unlock_new_inode(inode);
413*4882a593Smuzhiyun iput(inode); /*
414*4882a593Smuzhiyun * raw_inode will be deleted through
415*4882a593Smuzhiyun * nilfs_evict_inode().
416*4882a593Smuzhiyun */
417*4882a593Smuzhiyun goto failed;
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun failed_ifile_create_inode:
420*4882a593Smuzhiyun make_bad_inode(inode);
421*4882a593Smuzhiyun iput(inode);
422*4882a593Smuzhiyun failed:
423*4882a593Smuzhiyun return ERR_PTR(err);
424*4882a593Smuzhiyun }
425*4882a593Smuzhiyun
nilfs_set_inode_flags(struct inode * inode)426*4882a593Smuzhiyun void nilfs_set_inode_flags(struct inode *inode)
427*4882a593Smuzhiyun {
428*4882a593Smuzhiyun unsigned int flags = NILFS_I(inode)->i_flags;
429*4882a593Smuzhiyun unsigned int new_fl = 0;
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun if (flags & FS_SYNC_FL)
432*4882a593Smuzhiyun new_fl |= S_SYNC;
433*4882a593Smuzhiyun if (flags & FS_APPEND_FL)
434*4882a593Smuzhiyun new_fl |= S_APPEND;
435*4882a593Smuzhiyun if (flags & FS_IMMUTABLE_FL)
436*4882a593Smuzhiyun new_fl |= S_IMMUTABLE;
437*4882a593Smuzhiyun if (flags & FS_NOATIME_FL)
438*4882a593Smuzhiyun new_fl |= S_NOATIME;
439*4882a593Smuzhiyun if (flags & FS_DIRSYNC_FL)
440*4882a593Smuzhiyun new_fl |= S_DIRSYNC;
441*4882a593Smuzhiyun inode_set_flags(inode, new_fl, S_SYNC | S_APPEND | S_IMMUTABLE |
442*4882a593Smuzhiyun S_NOATIME | S_DIRSYNC);
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun
nilfs_read_inode_common(struct inode * inode,struct nilfs_inode * raw_inode)445*4882a593Smuzhiyun int nilfs_read_inode_common(struct inode *inode,
446*4882a593Smuzhiyun struct nilfs_inode *raw_inode)
447*4882a593Smuzhiyun {
448*4882a593Smuzhiyun struct nilfs_inode_info *ii = NILFS_I(inode);
449*4882a593Smuzhiyun int err;
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun inode->i_mode = le16_to_cpu(raw_inode->i_mode);
452*4882a593Smuzhiyun i_uid_write(inode, le32_to_cpu(raw_inode->i_uid));
453*4882a593Smuzhiyun i_gid_write(inode, le32_to_cpu(raw_inode->i_gid));
454*4882a593Smuzhiyun set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
455*4882a593Smuzhiyun inode->i_size = le64_to_cpu(raw_inode->i_size);
456*4882a593Smuzhiyun inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
457*4882a593Smuzhiyun inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime);
458*4882a593Smuzhiyun inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
459*4882a593Smuzhiyun inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
460*4882a593Smuzhiyun inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
461*4882a593Smuzhiyun inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
462*4882a593Smuzhiyun if (nilfs_is_metadata_file_inode(inode) && !S_ISREG(inode->i_mode))
463*4882a593Smuzhiyun return -EIO; /* this inode is for metadata and corrupted */
464*4882a593Smuzhiyun if (inode->i_nlink == 0)
465*4882a593Smuzhiyun return -ESTALE; /* this inode is deleted */
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun inode->i_blocks = le64_to_cpu(raw_inode->i_blocks);
468*4882a593Smuzhiyun ii->i_flags = le32_to_cpu(raw_inode->i_flags);
469*4882a593Smuzhiyun #if 0
470*4882a593Smuzhiyun ii->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
471*4882a593Smuzhiyun ii->i_dir_acl = S_ISREG(inode->i_mode) ?
472*4882a593Smuzhiyun 0 : le32_to_cpu(raw_inode->i_dir_acl);
473*4882a593Smuzhiyun #endif
474*4882a593Smuzhiyun ii->i_dir_start_lookup = 0;
475*4882a593Smuzhiyun inode->i_generation = le32_to_cpu(raw_inode->i_generation);
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
478*4882a593Smuzhiyun S_ISLNK(inode->i_mode)) {
479*4882a593Smuzhiyun err = nilfs_bmap_read(ii->i_bmap, raw_inode);
480*4882a593Smuzhiyun if (err < 0)
481*4882a593Smuzhiyun return err;
482*4882a593Smuzhiyun set_bit(NILFS_I_BMAP, &ii->i_state);
483*4882a593Smuzhiyun /* No lock is needed; iget() ensures it. */
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun return 0;
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun
__nilfs_read_inode(struct super_block * sb,struct nilfs_root * root,unsigned long ino,struct inode * inode)488*4882a593Smuzhiyun static int __nilfs_read_inode(struct super_block *sb,
489*4882a593Smuzhiyun struct nilfs_root *root, unsigned long ino,
490*4882a593Smuzhiyun struct inode *inode)
491*4882a593Smuzhiyun {
492*4882a593Smuzhiyun struct the_nilfs *nilfs = sb->s_fs_info;
493*4882a593Smuzhiyun struct buffer_head *bh;
494*4882a593Smuzhiyun struct nilfs_inode *raw_inode;
495*4882a593Smuzhiyun int err;
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
498*4882a593Smuzhiyun err = nilfs_ifile_get_inode_block(root->ifile, ino, &bh);
499*4882a593Smuzhiyun if (unlikely(err))
500*4882a593Smuzhiyun goto bad_inode;
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun raw_inode = nilfs_ifile_map_inode(root->ifile, ino, bh);
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun err = nilfs_read_inode_common(inode, raw_inode);
505*4882a593Smuzhiyun if (err)
506*4882a593Smuzhiyun goto failed_unmap;
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun if (S_ISREG(inode->i_mode)) {
509*4882a593Smuzhiyun inode->i_op = &nilfs_file_inode_operations;
510*4882a593Smuzhiyun inode->i_fop = &nilfs_file_operations;
511*4882a593Smuzhiyun inode->i_mapping->a_ops = &nilfs_aops;
512*4882a593Smuzhiyun } else if (S_ISDIR(inode->i_mode)) {
513*4882a593Smuzhiyun inode->i_op = &nilfs_dir_inode_operations;
514*4882a593Smuzhiyun inode->i_fop = &nilfs_dir_operations;
515*4882a593Smuzhiyun inode->i_mapping->a_ops = &nilfs_aops;
516*4882a593Smuzhiyun } else if (S_ISLNK(inode->i_mode)) {
517*4882a593Smuzhiyun inode->i_op = &nilfs_symlink_inode_operations;
518*4882a593Smuzhiyun inode_nohighmem(inode);
519*4882a593Smuzhiyun inode->i_mapping->a_ops = &nilfs_aops;
520*4882a593Smuzhiyun } else {
521*4882a593Smuzhiyun inode->i_op = &nilfs_special_inode_operations;
522*4882a593Smuzhiyun init_special_inode(
523*4882a593Smuzhiyun inode, inode->i_mode,
524*4882a593Smuzhiyun huge_decode_dev(le64_to_cpu(raw_inode->i_device_code)));
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun nilfs_ifile_unmap_inode(root->ifile, ino, bh);
527*4882a593Smuzhiyun brelse(bh);
528*4882a593Smuzhiyun up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
529*4882a593Smuzhiyun nilfs_set_inode_flags(inode);
530*4882a593Smuzhiyun mapping_set_gfp_mask(inode->i_mapping,
531*4882a593Smuzhiyun mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS));
532*4882a593Smuzhiyun return 0;
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun failed_unmap:
535*4882a593Smuzhiyun nilfs_ifile_unmap_inode(root->ifile, ino, bh);
536*4882a593Smuzhiyun brelse(bh);
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun bad_inode:
539*4882a593Smuzhiyun up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
540*4882a593Smuzhiyun return err;
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun
nilfs_iget_test(struct inode * inode,void * opaque)543*4882a593Smuzhiyun static int nilfs_iget_test(struct inode *inode, void *opaque)
544*4882a593Smuzhiyun {
545*4882a593Smuzhiyun struct nilfs_iget_args *args = opaque;
546*4882a593Smuzhiyun struct nilfs_inode_info *ii;
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun if (args->ino != inode->i_ino || args->root != NILFS_I(inode)->i_root)
549*4882a593Smuzhiyun return 0;
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun ii = NILFS_I(inode);
552*4882a593Smuzhiyun if (test_bit(NILFS_I_BTNC, &ii->i_state)) {
553*4882a593Smuzhiyun if (!args->for_btnc)
554*4882a593Smuzhiyun return 0;
555*4882a593Smuzhiyun } else if (args->for_btnc) {
556*4882a593Smuzhiyun return 0;
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun if (test_bit(NILFS_I_SHADOW, &ii->i_state)) {
559*4882a593Smuzhiyun if (!args->for_shadow)
560*4882a593Smuzhiyun return 0;
561*4882a593Smuzhiyun } else if (args->for_shadow) {
562*4882a593Smuzhiyun return 0;
563*4882a593Smuzhiyun }
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun if (!test_bit(NILFS_I_GCINODE, &ii->i_state))
566*4882a593Smuzhiyun return !args->for_gc;
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun return args->for_gc && args->cno == ii->i_cno;
569*4882a593Smuzhiyun }
570*4882a593Smuzhiyun
nilfs_iget_set(struct inode * inode,void * opaque)571*4882a593Smuzhiyun static int nilfs_iget_set(struct inode *inode, void *opaque)
572*4882a593Smuzhiyun {
573*4882a593Smuzhiyun struct nilfs_iget_args *args = opaque;
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun inode->i_ino = args->ino;
576*4882a593Smuzhiyun NILFS_I(inode)->i_cno = args->cno;
577*4882a593Smuzhiyun NILFS_I(inode)->i_root = args->root;
578*4882a593Smuzhiyun if (args->root && args->ino == NILFS_ROOT_INO)
579*4882a593Smuzhiyun nilfs_get_root(args->root);
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun if (args->for_gc)
582*4882a593Smuzhiyun NILFS_I(inode)->i_state = BIT(NILFS_I_GCINODE);
583*4882a593Smuzhiyun if (args->for_btnc)
584*4882a593Smuzhiyun NILFS_I(inode)->i_state |= BIT(NILFS_I_BTNC);
585*4882a593Smuzhiyun if (args->for_shadow)
586*4882a593Smuzhiyun NILFS_I(inode)->i_state |= BIT(NILFS_I_SHADOW);
587*4882a593Smuzhiyun return 0;
588*4882a593Smuzhiyun }
589*4882a593Smuzhiyun
nilfs_ilookup(struct super_block * sb,struct nilfs_root * root,unsigned long ino)590*4882a593Smuzhiyun struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root,
591*4882a593Smuzhiyun unsigned long ino)
592*4882a593Smuzhiyun {
593*4882a593Smuzhiyun struct nilfs_iget_args args = {
594*4882a593Smuzhiyun .ino = ino, .root = root, .cno = 0, .for_gc = false,
595*4882a593Smuzhiyun .for_btnc = false, .for_shadow = false
596*4882a593Smuzhiyun };
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun return ilookup5(sb, ino, nilfs_iget_test, &args);
599*4882a593Smuzhiyun }
600*4882a593Smuzhiyun
nilfs_iget_locked(struct super_block * sb,struct nilfs_root * root,unsigned long ino)601*4882a593Smuzhiyun struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root,
602*4882a593Smuzhiyun unsigned long ino)
603*4882a593Smuzhiyun {
604*4882a593Smuzhiyun struct nilfs_iget_args args = {
605*4882a593Smuzhiyun .ino = ino, .root = root, .cno = 0, .for_gc = false,
606*4882a593Smuzhiyun .for_btnc = false, .for_shadow = false
607*4882a593Smuzhiyun };
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
610*4882a593Smuzhiyun }
611*4882a593Smuzhiyun
nilfs_iget(struct super_block * sb,struct nilfs_root * root,unsigned long ino)612*4882a593Smuzhiyun struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root,
613*4882a593Smuzhiyun unsigned long ino)
614*4882a593Smuzhiyun {
615*4882a593Smuzhiyun struct inode *inode;
616*4882a593Smuzhiyun int err;
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun inode = nilfs_iget_locked(sb, root, ino);
619*4882a593Smuzhiyun if (unlikely(!inode))
620*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
621*4882a593Smuzhiyun if (!(inode->i_state & I_NEW))
622*4882a593Smuzhiyun return inode;
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun err = __nilfs_read_inode(sb, root, ino, inode);
625*4882a593Smuzhiyun if (unlikely(err)) {
626*4882a593Smuzhiyun iget_failed(inode);
627*4882a593Smuzhiyun return ERR_PTR(err);
628*4882a593Smuzhiyun }
629*4882a593Smuzhiyun unlock_new_inode(inode);
630*4882a593Smuzhiyun return inode;
631*4882a593Smuzhiyun }
632*4882a593Smuzhiyun
nilfs_iget_for_gc(struct super_block * sb,unsigned long ino,__u64 cno)633*4882a593Smuzhiyun struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
634*4882a593Smuzhiyun __u64 cno)
635*4882a593Smuzhiyun {
636*4882a593Smuzhiyun struct nilfs_iget_args args = {
637*4882a593Smuzhiyun .ino = ino, .root = NULL, .cno = cno, .for_gc = true,
638*4882a593Smuzhiyun .for_btnc = false, .for_shadow = false
639*4882a593Smuzhiyun };
640*4882a593Smuzhiyun struct inode *inode;
641*4882a593Smuzhiyun int err;
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun inode = iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
644*4882a593Smuzhiyun if (unlikely(!inode))
645*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
646*4882a593Smuzhiyun if (!(inode->i_state & I_NEW))
647*4882a593Smuzhiyun return inode;
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun err = nilfs_init_gcinode(inode);
650*4882a593Smuzhiyun if (unlikely(err)) {
651*4882a593Smuzhiyun iget_failed(inode);
652*4882a593Smuzhiyun return ERR_PTR(err);
653*4882a593Smuzhiyun }
654*4882a593Smuzhiyun unlock_new_inode(inode);
655*4882a593Smuzhiyun return inode;
656*4882a593Smuzhiyun }
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun /**
659*4882a593Smuzhiyun * nilfs_attach_btree_node_cache - attach a B-tree node cache to the inode
660*4882a593Smuzhiyun * @inode: inode object
661*4882a593Smuzhiyun *
662*4882a593Smuzhiyun * nilfs_attach_btree_node_cache() attaches a B-tree node cache to @inode,
663*4882a593Smuzhiyun * or does nothing if the inode already has it. This function allocates
664*4882a593Smuzhiyun * an additional inode to maintain page cache of B-tree nodes one-on-one.
665*4882a593Smuzhiyun *
666*4882a593Smuzhiyun * Return Value: On success, 0 is returned. On errors, one of the following
667*4882a593Smuzhiyun * negative error code is returned.
668*4882a593Smuzhiyun *
669*4882a593Smuzhiyun * %-ENOMEM - Insufficient memory available.
670*4882a593Smuzhiyun */
nilfs_attach_btree_node_cache(struct inode * inode)671*4882a593Smuzhiyun int nilfs_attach_btree_node_cache(struct inode *inode)
672*4882a593Smuzhiyun {
673*4882a593Smuzhiyun struct nilfs_inode_info *ii = NILFS_I(inode);
674*4882a593Smuzhiyun struct inode *btnc_inode;
675*4882a593Smuzhiyun struct nilfs_iget_args args;
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun if (ii->i_assoc_inode)
678*4882a593Smuzhiyun return 0;
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun args.ino = inode->i_ino;
681*4882a593Smuzhiyun args.root = ii->i_root;
682*4882a593Smuzhiyun args.cno = ii->i_cno;
683*4882a593Smuzhiyun args.for_gc = test_bit(NILFS_I_GCINODE, &ii->i_state) != 0;
684*4882a593Smuzhiyun args.for_btnc = true;
685*4882a593Smuzhiyun args.for_shadow = test_bit(NILFS_I_SHADOW, &ii->i_state) != 0;
686*4882a593Smuzhiyun
687*4882a593Smuzhiyun btnc_inode = iget5_locked(inode->i_sb, inode->i_ino, nilfs_iget_test,
688*4882a593Smuzhiyun nilfs_iget_set, &args);
689*4882a593Smuzhiyun if (unlikely(!btnc_inode))
690*4882a593Smuzhiyun return -ENOMEM;
691*4882a593Smuzhiyun if (btnc_inode->i_state & I_NEW) {
692*4882a593Smuzhiyun nilfs_init_btnc_inode(btnc_inode);
693*4882a593Smuzhiyun unlock_new_inode(btnc_inode);
694*4882a593Smuzhiyun }
695*4882a593Smuzhiyun NILFS_I(btnc_inode)->i_assoc_inode = inode;
696*4882a593Smuzhiyun NILFS_I(btnc_inode)->i_bmap = ii->i_bmap;
697*4882a593Smuzhiyun ii->i_assoc_inode = btnc_inode;
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun return 0;
700*4882a593Smuzhiyun }
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun /**
703*4882a593Smuzhiyun * nilfs_detach_btree_node_cache - detach the B-tree node cache from the inode
704*4882a593Smuzhiyun * @inode: inode object
705*4882a593Smuzhiyun *
706*4882a593Smuzhiyun * nilfs_detach_btree_node_cache() detaches the B-tree node cache and its
707*4882a593Smuzhiyun * holder inode bound to @inode, or does nothing if @inode doesn't have it.
708*4882a593Smuzhiyun */
nilfs_detach_btree_node_cache(struct inode * inode)709*4882a593Smuzhiyun void nilfs_detach_btree_node_cache(struct inode *inode)
710*4882a593Smuzhiyun {
711*4882a593Smuzhiyun struct nilfs_inode_info *ii = NILFS_I(inode);
712*4882a593Smuzhiyun struct inode *btnc_inode = ii->i_assoc_inode;
713*4882a593Smuzhiyun
714*4882a593Smuzhiyun if (btnc_inode) {
715*4882a593Smuzhiyun NILFS_I(btnc_inode)->i_assoc_inode = NULL;
716*4882a593Smuzhiyun ii->i_assoc_inode = NULL;
717*4882a593Smuzhiyun iput(btnc_inode);
718*4882a593Smuzhiyun }
719*4882a593Smuzhiyun }
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun /**
722*4882a593Smuzhiyun * nilfs_iget_for_shadow - obtain inode for shadow mapping
723*4882a593Smuzhiyun * @inode: inode object that uses shadow mapping
724*4882a593Smuzhiyun *
725*4882a593Smuzhiyun * nilfs_iget_for_shadow() allocates a pair of inodes that holds page
726*4882a593Smuzhiyun * caches for shadow mapping. The page cache for data pages is set up
727*4882a593Smuzhiyun * in one inode and the one for b-tree node pages is set up in the
728*4882a593Smuzhiyun * other inode, which is attached to the former inode.
729*4882a593Smuzhiyun *
730*4882a593Smuzhiyun * Return Value: On success, a pointer to the inode for data pages is
731*4882a593Smuzhiyun * returned. On errors, one of the following negative error code is returned
732*4882a593Smuzhiyun * in a pointer type.
733*4882a593Smuzhiyun *
734*4882a593Smuzhiyun * %-ENOMEM - Insufficient memory available.
735*4882a593Smuzhiyun */
nilfs_iget_for_shadow(struct inode * inode)736*4882a593Smuzhiyun struct inode *nilfs_iget_for_shadow(struct inode *inode)
737*4882a593Smuzhiyun {
738*4882a593Smuzhiyun struct nilfs_iget_args args = {
739*4882a593Smuzhiyun .ino = inode->i_ino, .root = NULL, .cno = 0, .for_gc = false,
740*4882a593Smuzhiyun .for_btnc = false, .for_shadow = true
741*4882a593Smuzhiyun };
742*4882a593Smuzhiyun struct inode *s_inode;
743*4882a593Smuzhiyun int err;
744*4882a593Smuzhiyun
745*4882a593Smuzhiyun s_inode = iget5_locked(inode->i_sb, inode->i_ino, nilfs_iget_test,
746*4882a593Smuzhiyun nilfs_iget_set, &args);
747*4882a593Smuzhiyun if (unlikely(!s_inode))
748*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
749*4882a593Smuzhiyun if (!(s_inode->i_state & I_NEW))
750*4882a593Smuzhiyun return inode;
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun NILFS_I(s_inode)->i_flags = 0;
753*4882a593Smuzhiyun memset(NILFS_I(s_inode)->i_bmap, 0, sizeof(struct nilfs_bmap));
754*4882a593Smuzhiyun mapping_set_gfp_mask(s_inode->i_mapping, GFP_NOFS);
755*4882a593Smuzhiyun
756*4882a593Smuzhiyun err = nilfs_attach_btree_node_cache(s_inode);
757*4882a593Smuzhiyun if (unlikely(err)) {
758*4882a593Smuzhiyun iget_failed(s_inode);
759*4882a593Smuzhiyun return ERR_PTR(err);
760*4882a593Smuzhiyun }
761*4882a593Smuzhiyun unlock_new_inode(s_inode);
762*4882a593Smuzhiyun return s_inode;
763*4882a593Smuzhiyun }
764*4882a593Smuzhiyun
nilfs_write_inode_common(struct inode * inode,struct nilfs_inode * raw_inode,int has_bmap)765*4882a593Smuzhiyun void nilfs_write_inode_common(struct inode *inode,
766*4882a593Smuzhiyun struct nilfs_inode *raw_inode, int has_bmap)
767*4882a593Smuzhiyun {
768*4882a593Smuzhiyun struct nilfs_inode_info *ii = NILFS_I(inode);
769*4882a593Smuzhiyun
770*4882a593Smuzhiyun raw_inode->i_mode = cpu_to_le16(inode->i_mode);
771*4882a593Smuzhiyun raw_inode->i_uid = cpu_to_le32(i_uid_read(inode));
772*4882a593Smuzhiyun raw_inode->i_gid = cpu_to_le32(i_gid_read(inode));
773*4882a593Smuzhiyun raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
774*4882a593Smuzhiyun raw_inode->i_size = cpu_to_le64(inode->i_size);
775*4882a593Smuzhiyun raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
776*4882a593Smuzhiyun raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
777*4882a593Smuzhiyun raw_inode->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
778*4882a593Smuzhiyun raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
779*4882a593Smuzhiyun raw_inode->i_blocks = cpu_to_le64(inode->i_blocks);
780*4882a593Smuzhiyun
781*4882a593Smuzhiyun raw_inode->i_flags = cpu_to_le32(ii->i_flags);
782*4882a593Smuzhiyun raw_inode->i_generation = cpu_to_le32(inode->i_generation);
783*4882a593Smuzhiyun
784*4882a593Smuzhiyun if (NILFS_ROOT_METADATA_FILE(inode->i_ino)) {
785*4882a593Smuzhiyun struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
786*4882a593Smuzhiyun
787*4882a593Smuzhiyun /* zero-fill unused portion in the case of super root block */
788*4882a593Smuzhiyun raw_inode->i_xattr = 0;
789*4882a593Smuzhiyun raw_inode->i_pad = 0;
790*4882a593Smuzhiyun memset((void *)raw_inode + sizeof(*raw_inode), 0,
791*4882a593Smuzhiyun nilfs->ns_inode_size - sizeof(*raw_inode));
792*4882a593Smuzhiyun }
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun if (has_bmap)
795*4882a593Smuzhiyun nilfs_bmap_write(ii->i_bmap, raw_inode);
796*4882a593Smuzhiyun else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
797*4882a593Smuzhiyun raw_inode->i_device_code =
798*4882a593Smuzhiyun cpu_to_le64(huge_encode_dev(inode->i_rdev));
799*4882a593Smuzhiyun /*
800*4882a593Smuzhiyun * When extending inode, nilfs->ns_inode_size should be checked
801*4882a593Smuzhiyun * for substitutions of appended fields.
802*4882a593Smuzhiyun */
803*4882a593Smuzhiyun }
804*4882a593Smuzhiyun
nilfs_update_inode(struct inode * inode,struct buffer_head * ibh,int flags)805*4882a593Smuzhiyun void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh, int flags)
806*4882a593Smuzhiyun {
807*4882a593Smuzhiyun ino_t ino = inode->i_ino;
808*4882a593Smuzhiyun struct nilfs_inode_info *ii = NILFS_I(inode);
809*4882a593Smuzhiyun struct inode *ifile = ii->i_root->ifile;
810*4882a593Smuzhiyun struct nilfs_inode *raw_inode;
811*4882a593Smuzhiyun
812*4882a593Smuzhiyun raw_inode = nilfs_ifile_map_inode(ifile, ino, ibh);
813*4882a593Smuzhiyun
814*4882a593Smuzhiyun if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state))
815*4882a593Smuzhiyun memset(raw_inode, 0, NILFS_MDT(ifile)->mi_entry_size);
816*4882a593Smuzhiyun if (flags & I_DIRTY_DATASYNC)
817*4882a593Smuzhiyun set_bit(NILFS_I_INODE_SYNC, &ii->i_state);
818*4882a593Smuzhiyun
819*4882a593Smuzhiyun nilfs_write_inode_common(inode, raw_inode, 0);
820*4882a593Smuzhiyun /*
821*4882a593Smuzhiyun * XXX: call with has_bmap = 0 is a workaround to avoid
822*4882a593Smuzhiyun * deadlock of bmap. This delays update of i_bmap to just
823*4882a593Smuzhiyun * before writing.
824*4882a593Smuzhiyun */
825*4882a593Smuzhiyun
826*4882a593Smuzhiyun nilfs_ifile_unmap_inode(ifile, ino, ibh);
827*4882a593Smuzhiyun }
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun #define NILFS_MAX_TRUNCATE_BLOCKS 16384 /* 64MB for 4KB block */
830*4882a593Smuzhiyun
nilfs_truncate_bmap(struct nilfs_inode_info * ii,unsigned long from)831*4882a593Smuzhiyun static void nilfs_truncate_bmap(struct nilfs_inode_info *ii,
832*4882a593Smuzhiyun unsigned long from)
833*4882a593Smuzhiyun {
834*4882a593Smuzhiyun __u64 b;
835*4882a593Smuzhiyun int ret;
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun if (!test_bit(NILFS_I_BMAP, &ii->i_state))
838*4882a593Smuzhiyun return;
839*4882a593Smuzhiyun repeat:
840*4882a593Smuzhiyun ret = nilfs_bmap_last_key(ii->i_bmap, &b);
841*4882a593Smuzhiyun if (ret == -ENOENT)
842*4882a593Smuzhiyun return;
843*4882a593Smuzhiyun else if (ret < 0)
844*4882a593Smuzhiyun goto failed;
845*4882a593Smuzhiyun
846*4882a593Smuzhiyun if (b < from)
847*4882a593Smuzhiyun return;
848*4882a593Smuzhiyun
849*4882a593Smuzhiyun b -= min_t(__u64, NILFS_MAX_TRUNCATE_BLOCKS, b - from);
850*4882a593Smuzhiyun ret = nilfs_bmap_truncate(ii->i_bmap, b);
851*4882a593Smuzhiyun nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb);
852*4882a593Smuzhiyun if (!ret || (ret == -ENOMEM &&
853*4882a593Smuzhiyun nilfs_bmap_truncate(ii->i_bmap, b) == 0))
854*4882a593Smuzhiyun goto repeat;
855*4882a593Smuzhiyun
856*4882a593Smuzhiyun failed:
857*4882a593Smuzhiyun nilfs_warn(ii->vfs_inode.i_sb, "error %d truncating bmap (ino=%lu)",
858*4882a593Smuzhiyun ret, ii->vfs_inode.i_ino);
859*4882a593Smuzhiyun }
860*4882a593Smuzhiyun
nilfs_truncate(struct inode * inode)861*4882a593Smuzhiyun void nilfs_truncate(struct inode *inode)
862*4882a593Smuzhiyun {
863*4882a593Smuzhiyun unsigned long blkoff;
864*4882a593Smuzhiyun unsigned int blocksize;
865*4882a593Smuzhiyun struct nilfs_transaction_info ti;
866*4882a593Smuzhiyun struct super_block *sb = inode->i_sb;
867*4882a593Smuzhiyun struct nilfs_inode_info *ii = NILFS_I(inode);
868*4882a593Smuzhiyun
869*4882a593Smuzhiyun if (!test_bit(NILFS_I_BMAP, &ii->i_state))
870*4882a593Smuzhiyun return;
871*4882a593Smuzhiyun if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
872*4882a593Smuzhiyun return;
873*4882a593Smuzhiyun
874*4882a593Smuzhiyun blocksize = sb->s_blocksize;
875*4882a593Smuzhiyun blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits;
876*4882a593Smuzhiyun nilfs_transaction_begin(sb, &ti, 0); /* never fails */
877*4882a593Smuzhiyun
878*4882a593Smuzhiyun block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block);
879*4882a593Smuzhiyun
880*4882a593Smuzhiyun nilfs_truncate_bmap(ii, blkoff);
881*4882a593Smuzhiyun
882*4882a593Smuzhiyun inode->i_mtime = inode->i_ctime = current_time(inode);
883*4882a593Smuzhiyun if (IS_SYNC(inode))
884*4882a593Smuzhiyun nilfs_set_transaction_flag(NILFS_TI_SYNC);
885*4882a593Smuzhiyun
886*4882a593Smuzhiyun nilfs_mark_inode_dirty(inode);
887*4882a593Smuzhiyun nilfs_set_file_dirty(inode, 0);
888*4882a593Smuzhiyun nilfs_transaction_commit(sb);
889*4882a593Smuzhiyun /*
890*4882a593Smuzhiyun * May construct a logical segment and may fail in sync mode.
891*4882a593Smuzhiyun * But truncate has no return value.
892*4882a593Smuzhiyun */
893*4882a593Smuzhiyun }
894*4882a593Smuzhiyun
nilfs_clear_inode(struct inode * inode)895*4882a593Smuzhiyun static void nilfs_clear_inode(struct inode *inode)
896*4882a593Smuzhiyun {
897*4882a593Smuzhiyun struct nilfs_inode_info *ii = NILFS_I(inode);
898*4882a593Smuzhiyun
899*4882a593Smuzhiyun /*
900*4882a593Smuzhiyun * Free resources allocated in nilfs_read_inode(), here.
901*4882a593Smuzhiyun */
902*4882a593Smuzhiyun BUG_ON(!list_empty(&ii->i_dirty));
903*4882a593Smuzhiyun brelse(ii->i_bh);
904*4882a593Smuzhiyun ii->i_bh = NULL;
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun if (nilfs_is_metadata_file_inode(inode))
907*4882a593Smuzhiyun nilfs_mdt_clear(inode);
908*4882a593Smuzhiyun
909*4882a593Smuzhiyun if (test_bit(NILFS_I_BMAP, &ii->i_state))
910*4882a593Smuzhiyun nilfs_bmap_clear(ii->i_bmap);
911*4882a593Smuzhiyun
912*4882a593Smuzhiyun if (!test_bit(NILFS_I_BTNC, &ii->i_state))
913*4882a593Smuzhiyun nilfs_detach_btree_node_cache(inode);
914*4882a593Smuzhiyun
915*4882a593Smuzhiyun if (ii->i_root && inode->i_ino == NILFS_ROOT_INO)
916*4882a593Smuzhiyun nilfs_put_root(ii->i_root);
917*4882a593Smuzhiyun }
918*4882a593Smuzhiyun
nilfs_evict_inode(struct inode * inode)919*4882a593Smuzhiyun void nilfs_evict_inode(struct inode *inode)
920*4882a593Smuzhiyun {
921*4882a593Smuzhiyun struct nilfs_transaction_info ti;
922*4882a593Smuzhiyun struct super_block *sb = inode->i_sb;
923*4882a593Smuzhiyun struct nilfs_inode_info *ii = NILFS_I(inode);
924*4882a593Smuzhiyun int ret;
925*4882a593Smuzhiyun
926*4882a593Smuzhiyun if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) {
927*4882a593Smuzhiyun truncate_inode_pages_final(&inode->i_data);
928*4882a593Smuzhiyun clear_inode(inode);
929*4882a593Smuzhiyun nilfs_clear_inode(inode);
930*4882a593Smuzhiyun return;
931*4882a593Smuzhiyun }
932*4882a593Smuzhiyun nilfs_transaction_begin(sb, &ti, 0); /* never fails */
933*4882a593Smuzhiyun
934*4882a593Smuzhiyun truncate_inode_pages_final(&inode->i_data);
935*4882a593Smuzhiyun
936*4882a593Smuzhiyun /* TODO: some of the following operations may fail. */
937*4882a593Smuzhiyun nilfs_truncate_bmap(ii, 0);
938*4882a593Smuzhiyun nilfs_mark_inode_dirty(inode);
939*4882a593Smuzhiyun clear_inode(inode);
940*4882a593Smuzhiyun
941*4882a593Smuzhiyun ret = nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino);
942*4882a593Smuzhiyun if (!ret)
943*4882a593Smuzhiyun atomic64_dec(&ii->i_root->inodes_count);
944*4882a593Smuzhiyun
945*4882a593Smuzhiyun nilfs_clear_inode(inode);
946*4882a593Smuzhiyun
947*4882a593Smuzhiyun if (IS_SYNC(inode))
948*4882a593Smuzhiyun nilfs_set_transaction_flag(NILFS_TI_SYNC);
949*4882a593Smuzhiyun nilfs_transaction_commit(sb);
950*4882a593Smuzhiyun /*
951*4882a593Smuzhiyun * May construct a logical segment and may fail in sync mode.
952*4882a593Smuzhiyun * But delete_inode has no return value.
953*4882a593Smuzhiyun */
954*4882a593Smuzhiyun }
955*4882a593Smuzhiyun
nilfs_setattr(struct dentry * dentry,struct iattr * iattr)956*4882a593Smuzhiyun int nilfs_setattr(struct dentry *dentry, struct iattr *iattr)
957*4882a593Smuzhiyun {
958*4882a593Smuzhiyun struct nilfs_transaction_info ti;
959*4882a593Smuzhiyun struct inode *inode = d_inode(dentry);
960*4882a593Smuzhiyun struct super_block *sb = inode->i_sb;
961*4882a593Smuzhiyun int err;
962*4882a593Smuzhiyun
963*4882a593Smuzhiyun err = setattr_prepare(dentry, iattr);
964*4882a593Smuzhiyun if (err)
965*4882a593Smuzhiyun return err;
966*4882a593Smuzhiyun
967*4882a593Smuzhiyun err = nilfs_transaction_begin(sb, &ti, 0);
968*4882a593Smuzhiyun if (unlikely(err))
969*4882a593Smuzhiyun return err;
970*4882a593Smuzhiyun
971*4882a593Smuzhiyun if ((iattr->ia_valid & ATTR_SIZE) &&
972*4882a593Smuzhiyun iattr->ia_size != i_size_read(inode)) {
973*4882a593Smuzhiyun inode_dio_wait(inode);
974*4882a593Smuzhiyun truncate_setsize(inode, iattr->ia_size);
975*4882a593Smuzhiyun nilfs_truncate(inode);
976*4882a593Smuzhiyun }
977*4882a593Smuzhiyun
978*4882a593Smuzhiyun setattr_copy(inode, iattr);
979*4882a593Smuzhiyun mark_inode_dirty(inode);
980*4882a593Smuzhiyun
981*4882a593Smuzhiyun if (iattr->ia_valid & ATTR_MODE) {
982*4882a593Smuzhiyun err = nilfs_acl_chmod(inode);
983*4882a593Smuzhiyun if (unlikely(err))
984*4882a593Smuzhiyun goto out_err;
985*4882a593Smuzhiyun }
986*4882a593Smuzhiyun
987*4882a593Smuzhiyun return nilfs_transaction_commit(sb);
988*4882a593Smuzhiyun
989*4882a593Smuzhiyun out_err:
990*4882a593Smuzhiyun nilfs_transaction_abort(sb);
991*4882a593Smuzhiyun return err;
992*4882a593Smuzhiyun }
993*4882a593Smuzhiyun
nilfs_permission(struct inode * inode,int mask)994*4882a593Smuzhiyun int nilfs_permission(struct inode *inode, int mask)
995*4882a593Smuzhiyun {
996*4882a593Smuzhiyun struct nilfs_root *root = NILFS_I(inode)->i_root;
997*4882a593Smuzhiyun
998*4882a593Smuzhiyun if ((mask & MAY_WRITE) && root &&
999*4882a593Smuzhiyun root->cno != NILFS_CPTREE_CURRENT_CNO)
1000*4882a593Smuzhiyun return -EROFS; /* snapshot is not writable */
1001*4882a593Smuzhiyun
1002*4882a593Smuzhiyun return generic_permission(inode, mask);
1003*4882a593Smuzhiyun }
1004*4882a593Smuzhiyun
nilfs_load_inode_block(struct inode * inode,struct buffer_head ** pbh)1005*4882a593Smuzhiyun int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh)
1006*4882a593Smuzhiyun {
1007*4882a593Smuzhiyun struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
1008*4882a593Smuzhiyun struct nilfs_inode_info *ii = NILFS_I(inode);
1009*4882a593Smuzhiyun int err;
1010*4882a593Smuzhiyun
1011*4882a593Smuzhiyun spin_lock(&nilfs->ns_inode_lock);
1012*4882a593Smuzhiyun if (ii->i_bh == NULL) {
1013*4882a593Smuzhiyun spin_unlock(&nilfs->ns_inode_lock);
1014*4882a593Smuzhiyun err = nilfs_ifile_get_inode_block(ii->i_root->ifile,
1015*4882a593Smuzhiyun inode->i_ino, pbh);
1016*4882a593Smuzhiyun if (unlikely(err))
1017*4882a593Smuzhiyun return err;
1018*4882a593Smuzhiyun spin_lock(&nilfs->ns_inode_lock);
1019*4882a593Smuzhiyun if (ii->i_bh == NULL)
1020*4882a593Smuzhiyun ii->i_bh = *pbh;
1021*4882a593Smuzhiyun else {
1022*4882a593Smuzhiyun brelse(*pbh);
1023*4882a593Smuzhiyun *pbh = ii->i_bh;
1024*4882a593Smuzhiyun }
1025*4882a593Smuzhiyun } else
1026*4882a593Smuzhiyun *pbh = ii->i_bh;
1027*4882a593Smuzhiyun
1028*4882a593Smuzhiyun get_bh(*pbh);
1029*4882a593Smuzhiyun spin_unlock(&nilfs->ns_inode_lock);
1030*4882a593Smuzhiyun return 0;
1031*4882a593Smuzhiyun }
1032*4882a593Smuzhiyun
nilfs_inode_dirty(struct inode * inode)1033*4882a593Smuzhiyun int nilfs_inode_dirty(struct inode *inode)
1034*4882a593Smuzhiyun {
1035*4882a593Smuzhiyun struct nilfs_inode_info *ii = NILFS_I(inode);
1036*4882a593Smuzhiyun struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
1037*4882a593Smuzhiyun int ret = 0;
1038*4882a593Smuzhiyun
1039*4882a593Smuzhiyun if (!list_empty(&ii->i_dirty)) {
1040*4882a593Smuzhiyun spin_lock(&nilfs->ns_inode_lock);
1041*4882a593Smuzhiyun ret = test_bit(NILFS_I_DIRTY, &ii->i_state) ||
1042*4882a593Smuzhiyun test_bit(NILFS_I_BUSY, &ii->i_state);
1043*4882a593Smuzhiyun spin_unlock(&nilfs->ns_inode_lock);
1044*4882a593Smuzhiyun }
1045*4882a593Smuzhiyun return ret;
1046*4882a593Smuzhiyun }
1047*4882a593Smuzhiyun
nilfs_set_file_dirty(struct inode * inode,unsigned int nr_dirty)1048*4882a593Smuzhiyun int nilfs_set_file_dirty(struct inode *inode, unsigned int nr_dirty)
1049*4882a593Smuzhiyun {
1050*4882a593Smuzhiyun struct nilfs_inode_info *ii = NILFS_I(inode);
1051*4882a593Smuzhiyun struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
1052*4882a593Smuzhiyun
1053*4882a593Smuzhiyun atomic_add(nr_dirty, &nilfs->ns_ndirtyblks);
1054*4882a593Smuzhiyun
1055*4882a593Smuzhiyun if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state))
1056*4882a593Smuzhiyun return 0;
1057*4882a593Smuzhiyun
1058*4882a593Smuzhiyun spin_lock(&nilfs->ns_inode_lock);
1059*4882a593Smuzhiyun if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
1060*4882a593Smuzhiyun !test_bit(NILFS_I_BUSY, &ii->i_state)) {
1061*4882a593Smuzhiyun /*
1062*4882a593Smuzhiyun * Because this routine may race with nilfs_dispose_list(),
1063*4882a593Smuzhiyun * we have to check NILFS_I_QUEUED here, too.
1064*4882a593Smuzhiyun */
1065*4882a593Smuzhiyun if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) {
1066*4882a593Smuzhiyun /*
1067*4882a593Smuzhiyun * This will happen when somebody is freeing
1068*4882a593Smuzhiyun * this inode.
1069*4882a593Smuzhiyun */
1070*4882a593Smuzhiyun nilfs_warn(inode->i_sb,
1071*4882a593Smuzhiyun "cannot set file dirty (ino=%lu): the file is being freed",
1072*4882a593Smuzhiyun inode->i_ino);
1073*4882a593Smuzhiyun spin_unlock(&nilfs->ns_inode_lock);
1074*4882a593Smuzhiyun return -EINVAL; /*
1075*4882a593Smuzhiyun * NILFS_I_DIRTY may remain for
1076*4882a593Smuzhiyun * freeing inode.
1077*4882a593Smuzhiyun */
1078*4882a593Smuzhiyun }
1079*4882a593Smuzhiyun list_move_tail(&ii->i_dirty, &nilfs->ns_dirty_files);
1080*4882a593Smuzhiyun set_bit(NILFS_I_QUEUED, &ii->i_state);
1081*4882a593Smuzhiyun }
1082*4882a593Smuzhiyun spin_unlock(&nilfs->ns_inode_lock);
1083*4882a593Smuzhiyun return 0;
1084*4882a593Smuzhiyun }
1085*4882a593Smuzhiyun
__nilfs_mark_inode_dirty(struct inode * inode,int flags)1086*4882a593Smuzhiyun int __nilfs_mark_inode_dirty(struct inode *inode, int flags)
1087*4882a593Smuzhiyun {
1088*4882a593Smuzhiyun struct buffer_head *ibh;
1089*4882a593Smuzhiyun int err;
1090*4882a593Smuzhiyun
1091*4882a593Smuzhiyun err = nilfs_load_inode_block(inode, &ibh);
1092*4882a593Smuzhiyun if (unlikely(err)) {
1093*4882a593Smuzhiyun nilfs_warn(inode->i_sb,
1094*4882a593Smuzhiyun "cannot mark inode dirty (ino=%lu): error %d loading inode block",
1095*4882a593Smuzhiyun inode->i_ino, err);
1096*4882a593Smuzhiyun return err;
1097*4882a593Smuzhiyun }
1098*4882a593Smuzhiyun nilfs_update_inode(inode, ibh, flags);
1099*4882a593Smuzhiyun mark_buffer_dirty(ibh);
1100*4882a593Smuzhiyun nilfs_mdt_mark_dirty(NILFS_I(inode)->i_root->ifile);
1101*4882a593Smuzhiyun brelse(ibh);
1102*4882a593Smuzhiyun return 0;
1103*4882a593Smuzhiyun }
1104*4882a593Smuzhiyun
1105*4882a593Smuzhiyun /**
1106*4882a593Smuzhiyun * nilfs_dirty_inode - reflect changes on given inode to an inode block.
1107*4882a593Smuzhiyun * @inode: inode of the file to be registered.
1108*4882a593Smuzhiyun *
1109*4882a593Smuzhiyun * nilfs_dirty_inode() loads a inode block containing the specified
1110*4882a593Smuzhiyun * @inode and copies data from a nilfs_inode to a corresponding inode
1111*4882a593Smuzhiyun * entry in the inode block. This operation is excluded from the segment
1112*4882a593Smuzhiyun * construction. This function can be called both as a single operation
1113*4882a593Smuzhiyun * and as a part of indivisible file operations.
1114*4882a593Smuzhiyun */
nilfs_dirty_inode(struct inode * inode,int flags)1115*4882a593Smuzhiyun void nilfs_dirty_inode(struct inode *inode, int flags)
1116*4882a593Smuzhiyun {
1117*4882a593Smuzhiyun struct nilfs_transaction_info ti;
1118*4882a593Smuzhiyun struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
1119*4882a593Smuzhiyun
1120*4882a593Smuzhiyun if (is_bad_inode(inode)) {
1121*4882a593Smuzhiyun nilfs_warn(inode->i_sb,
1122*4882a593Smuzhiyun "tried to mark bad_inode dirty. ignored.");
1123*4882a593Smuzhiyun dump_stack();
1124*4882a593Smuzhiyun return;
1125*4882a593Smuzhiyun }
1126*4882a593Smuzhiyun if (mdi) {
1127*4882a593Smuzhiyun nilfs_mdt_mark_dirty(inode);
1128*4882a593Smuzhiyun return;
1129*4882a593Smuzhiyun }
1130*4882a593Smuzhiyun nilfs_transaction_begin(inode->i_sb, &ti, 0);
1131*4882a593Smuzhiyun __nilfs_mark_inode_dirty(inode, flags);
1132*4882a593Smuzhiyun nilfs_transaction_commit(inode->i_sb); /* never fails */
1133*4882a593Smuzhiyun }
1134*4882a593Smuzhiyun
nilfs_fiemap(struct inode * inode,struct fiemap_extent_info * fieinfo,__u64 start,__u64 len)1135*4882a593Smuzhiyun int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1136*4882a593Smuzhiyun __u64 start, __u64 len)
1137*4882a593Smuzhiyun {
1138*4882a593Smuzhiyun struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
1139*4882a593Smuzhiyun __u64 logical = 0, phys = 0, size = 0;
1140*4882a593Smuzhiyun __u32 flags = 0;
1141*4882a593Smuzhiyun loff_t isize;
1142*4882a593Smuzhiyun sector_t blkoff, end_blkoff;
1143*4882a593Smuzhiyun sector_t delalloc_blkoff;
1144*4882a593Smuzhiyun unsigned long delalloc_blklen;
1145*4882a593Smuzhiyun unsigned int blkbits = inode->i_blkbits;
1146*4882a593Smuzhiyun int ret, n;
1147*4882a593Smuzhiyun
1148*4882a593Smuzhiyun ret = fiemap_prep(inode, fieinfo, start, &len, 0);
1149*4882a593Smuzhiyun if (ret)
1150*4882a593Smuzhiyun return ret;
1151*4882a593Smuzhiyun
1152*4882a593Smuzhiyun inode_lock(inode);
1153*4882a593Smuzhiyun
1154*4882a593Smuzhiyun isize = i_size_read(inode);
1155*4882a593Smuzhiyun
1156*4882a593Smuzhiyun blkoff = start >> blkbits;
1157*4882a593Smuzhiyun end_blkoff = (start + len - 1) >> blkbits;
1158*4882a593Smuzhiyun
1159*4882a593Smuzhiyun delalloc_blklen = nilfs_find_uncommitted_extent(inode, blkoff,
1160*4882a593Smuzhiyun &delalloc_blkoff);
1161*4882a593Smuzhiyun
1162*4882a593Smuzhiyun do {
1163*4882a593Smuzhiyun __u64 blkphy;
1164*4882a593Smuzhiyun unsigned int maxblocks;
1165*4882a593Smuzhiyun
1166*4882a593Smuzhiyun if (delalloc_blklen && blkoff == delalloc_blkoff) {
1167*4882a593Smuzhiyun if (size) {
1168*4882a593Smuzhiyun /* End of the current extent */
1169*4882a593Smuzhiyun ret = fiemap_fill_next_extent(
1170*4882a593Smuzhiyun fieinfo, logical, phys, size, flags);
1171*4882a593Smuzhiyun if (ret)
1172*4882a593Smuzhiyun break;
1173*4882a593Smuzhiyun }
1174*4882a593Smuzhiyun if (blkoff > end_blkoff)
1175*4882a593Smuzhiyun break;
1176*4882a593Smuzhiyun
1177*4882a593Smuzhiyun flags = FIEMAP_EXTENT_MERGED | FIEMAP_EXTENT_DELALLOC;
1178*4882a593Smuzhiyun logical = blkoff << blkbits;
1179*4882a593Smuzhiyun phys = 0;
1180*4882a593Smuzhiyun size = delalloc_blklen << blkbits;
1181*4882a593Smuzhiyun
1182*4882a593Smuzhiyun blkoff = delalloc_blkoff + delalloc_blklen;
1183*4882a593Smuzhiyun delalloc_blklen = nilfs_find_uncommitted_extent(
1184*4882a593Smuzhiyun inode, blkoff, &delalloc_blkoff);
1185*4882a593Smuzhiyun continue;
1186*4882a593Smuzhiyun }
1187*4882a593Smuzhiyun
1188*4882a593Smuzhiyun /*
1189*4882a593Smuzhiyun * Limit the number of blocks that we look up so as
1190*4882a593Smuzhiyun * not to get into the next delayed allocation extent.
1191*4882a593Smuzhiyun */
1192*4882a593Smuzhiyun maxblocks = INT_MAX;
1193*4882a593Smuzhiyun if (delalloc_blklen)
1194*4882a593Smuzhiyun maxblocks = min_t(sector_t, delalloc_blkoff - blkoff,
1195*4882a593Smuzhiyun maxblocks);
1196*4882a593Smuzhiyun blkphy = 0;
1197*4882a593Smuzhiyun
1198*4882a593Smuzhiyun down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
1199*4882a593Smuzhiyun n = nilfs_bmap_lookup_contig(
1200*4882a593Smuzhiyun NILFS_I(inode)->i_bmap, blkoff, &blkphy, maxblocks);
1201*4882a593Smuzhiyun up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
1202*4882a593Smuzhiyun
1203*4882a593Smuzhiyun if (n < 0) {
1204*4882a593Smuzhiyun int past_eof;
1205*4882a593Smuzhiyun
1206*4882a593Smuzhiyun if (unlikely(n != -ENOENT))
1207*4882a593Smuzhiyun break; /* error */
1208*4882a593Smuzhiyun
1209*4882a593Smuzhiyun /* HOLE */
1210*4882a593Smuzhiyun blkoff++;
1211*4882a593Smuzhiyun past_eof = ((blkoff << blkbits) >= isize);
1212*4882a593Smuzhiyun
1213*4882a593Smuzhiyun if (size) {
1214*4882a593Smuzhiyun /* End of the current extent */
1215*4882a593Smuzhiyun
1216*4882a593Smuzhiyun if (past_eof)
1217*4882a593Smuzhiyun flags |= FIEMAP_EXTENT_LAST;
1218*4882a593Smuzhiyun
1219*4882a593Smuzhiyun ret = fiemap_fill_next_extent(
1220*4882a593Smuzhiyun fieinfo, logical, phys, size, flags);
1221*4882a593Smuzhiyun if (ret)
1222*4882a593Smuzhiyun break;
1223*4882a593Smuzhiyun size = 0;
1224*4882a593Smuzhiyun }
1225*4882a593Smuzhiyun if (blkoff > end_blkoff || past_eof)
1226*4882a593Smuzhiyun break;
1227*4882a593Smuzhiyun } else {
1228*4882a593Smuzhiyun if (size) {
1229*4882a593Smuzhiyun if (phys && blkphy << blkbits == phys + size) {
1230*4882a593Smuzhiyun /* The current extent goes on */
1231*4882a593Smuzhiyun size += n << blkbits;
1232*4882a593Smuzhiyun } else {
1233*4882a593Smuzhiyun /* Terminate the current extent */
1234*4882a593Smuzhiyun ret = fiemap_fill_next_extent(
1235*4882a593Smuzhiyun fieinfo, logical, phys, size,
1236*4882a593Smuzhiyun flags);
1237*4882a593Smuzhiyun if (ret || blkoff > end_blkoff)
1238*4882a593Smuzhiyun break;
1239*4882a593Smuzhiyun
1240*4882a593Smuzhiyun /* Start another extent */
1241*4882a593Smuzhiyun flags = FIEMAP_EXTENT_MERGED;
1242*4882a593Smuzhiyun logical = blkoff << blkbits;
1243*4882a593Smuzhiyun phys = blkphy << blkbits;
1244*4882a593Smuzhiyun size = n << blkbits;
1245*4882a593Smuzhiyun }
1246*4882a593Smuzhiyun } else {
1247*4882a593Smuzhiyun /* Start a new extent */
1248*4882a593Smuzhiyun flags = FIEMAP_EXTENT_MERGED;
1249*4882a593Smuzhiyun logical = blkoff << blkbits;
1250*4882a593Smuzhiyun phys = blkphy << blkbits;
1251*4882a593Smuzhiyun size = n << blkbits;
1252*4882a593Smuzhiyun }
1253*4882a593Smuzhiyun blkoff += n;
1254*4882a593Smuzhiyun }
1255*4882a593Smuzhiyun cond_resched();
1256*4882a593Smuzhiyun } while (true);
1257*4882a593Smuzhiyun
1258*4882a593Smuzhiyun /* If ret is 1 then we just hit the end of the extent array */
1259*4882a593Smuzhiyun if (ret == 1)
1260*4882a593Smuzhiyun ret = 0;
1261*4882a593Smuzhiyun
1262*4882a593Smuzhiyun inode_unlock(inode);
1263*4882a593Smuzhiyun return ret;
1264*4882a593Smuzhiyun }
1265