1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * fs/f2fs/file.c
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6*4882a593Smuzhiyun * http://www.samsung.com/
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun #include <linux/fs.h>
9*4882a593Smuzhiyun #include <linux/f2fs_fs.h>
10*4882a593Smuzhiyun #include <linux/stat.h>
11*4882a593Smuzhiyun #include <linux/buffer_head.h>
12*4882a593Smuzhiyun #include <linux/writeback.h>
13*4882a593Smuzhiyun #include <linux/blkdev.h>
14*4882a593Smuzhiyun #include <linux/falloc.h>
15*4882a593Smuzhiyun #include <linux/types.h>
16*4882a593Smuzhiyun #include <linux/compat.h>
17*4882a593Smuzhiyun #include <linux/uaccess.h>
18*4882a593Smuzhiyun #include <linux/mount.h>
19*4882a593Smuzhiyun #include <linux/pagevec.h>
20*4882a593Smuzhiyun #include <linux/uio.h>
21*4882a593Smuzhiyun #include <linux/uuid.h>
22*4882a593Smuzhiyun #include <linux/file.h>
23*4882a593Smuzhiyun #include <linux/nls.h>
24*4882a593Smuzhiyun #include <linux/sched/signal.h>
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun #include "f2fs.h"
27*4882a593Smuzhiyun #include "node.h"
28*4882a593Smuzhiyun #include "segment.h"
29*4882a593Smuzhiyun #include "xattr.h"
30*4882a593Smuzhiyun #include "acl.h"
31*4882a593Smuzhiyun #include "gc.h"
32*4882a593Smuzhiyun #include <trace/events/f2fs.h>
33*4882a593Smuzhiyun #include <uapi/linux/f2fs.h>
34*4882a593Smuzhiyun
f2fs_filemap_fault(struct vm_fault * vmf)35*4882a593Smuzhiyun static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun struct inode *inode = file_inode(vmf->vma->vm_file);
38*4882a593Smuzhiyun vm_fault_t ret;
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun f2fs_down_read(&F2FS_I(inode)->i_mmap_sem);
41*4882a593Smuzhiyun ret = filemap_fault(vmf);
42*4882a593Smuzhiyun f2fs_up_read(&F2FS_I(inode)->i_mmap_sem);
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun if (!ret)
45*4882a593Smuzhiyun f2fs_update_iostat(F2FS_I_SB(inode), APP_MAPPED_READ_IO,
46*4882a593Smuzhiyun F2FS_BLKSIZE);
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun trace_f2fs_filemap_fault(inode, vmf->pgoff, (unsigned long)ret);
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun return ret;
51*4882a593Smuzhiyun }
52*4882a593Smuzhiyun
f2fs_vm_page_mkwrite(struct vm_fault * vmf)53*4882a593Smuzhiyun static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun struct page *page = vmf->page;
56*4882a593Smuzhiyun struct inode *inode = file_inode(vmf->vma->vm_file);
57*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
58*4882a593Smuzhiyun struct dnode_of_data dn;
59*4882a593Smuzhiyun bool need_alloc = true;
60*4882a593Smuzhiyun int err = 0;
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun if (unlikely(IS_IMMUTABLE(inode)))
63*4882a593Smuzhiyun return VM_FAULT_SIGBUS;
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
66*4882a593Smuzhiyun return VM_FAULT_SIGBUS;
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun if (unlikely(f2fs_cp_error(sbi))) {
69*4882a593Smuzhiyun err = -EIO;
70*4882a593Smuzhiyun goto err;
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun if (!f2fs_is_checkpoint_ready(sbi)) {
74*4882a593Smuzhiyun err = -ENOSPC;
75*4882a593Smuzhiyun goto err;
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun err = f2fs_convert_inline_inode(inode);
79*4882a593Smuzhiyun if (err)
80*4882a593Smuzhiyun goto err;
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun #ifdef CONFIG_F2FS_FS_COMPRESSION
83*4882a593Smuzhiyun if (f2fs_compressed_file(inode)) {
84*4882a593Smuzhiyun int ret = f2fs_is_compressed_cluster(inode, page->index);
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun if (ret < 0) {
87*4882a593Smuzhiyun err = ret;
88*4882a593Smuzhiyun goto err;
89*4882a593Smuzhiyun } else if (ret) {
90*4882a593Smuzhiyun need_alloc = false;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun #endif
94*4882a593Smuzhiyun /* should do out of any locked page */
95*4882a593Smuzhiyun if (need_alloc)
96*4882a593Smuzhiyun f2fs_balance_fs(sbi, true);
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun sb_start_pagefault(inode->i_sb);
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun file_update_time(vmf->vma->vm_file);
103*4882a593Smuzhiyun f2fs_down_read(&F2FS_I(inode)->i_mmap_sem);
104*4882a593Smuzhiyun lock_page(page);
105*4882a593Smuzhiyun if (unlikely(page->mapping != inode->i_mapping ||
106*4882a593Smuzhiyun page_offset(page) > i_size_read(inode) ||
107*4882a593Smuzhiyun !PageUptodate(page))) {
108*4882a593Smuzhiyun unlock_page(page);
109*4882a593Smuzhiyun err = -EFAULT;
110*4882a593Smuzhiyun goto out_sem;
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun if (need_alloc) {
114*4882a593Smuzhiyun /* block allocation */
115*4882a593Smuzhiyun f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
116*4882a593Smuzhiyun set_new_dnode(&dn, inode, NULL, NULL, 0);
117*4882a593Smuzhiyun err = f2fs_get_block(&dn, page->index);
118*4882a593Smuzhiyun f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun #ifdef CONFIG_F2FS_FS_COMPRESSION
122*4882a593Smuzhiyun if (!need_alloc) {
123*4882a593Smuzhiyun set_new_dnode(&dn, inode, NULL, NULL, 0);
124*4882a593Smuzhiyun err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
125*4882a593Smuzhiyun f2fs_put_dnode(&dn);
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun #endif
128*4882a593Smuzhiyun if (err) {
129*4882a593Smuzhiyun unlock_page(page);
130*4882a593Smuzhiyun goto out_sem;
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun f2fs_wait_on_page_writeback(page, DATA, false, true);
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun /* wait for GCed page writeback via META_MAPPING */
136*4882a593Smuzhiyun f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun /*
139*4882a593Smuzhiyun * check to see if the page is mapped already (no holes)
140*4882a593Smuzhiyun */
141*4882a593Smuzhiyun if (PageMappedToDisk(page))
142*4882a593Smuzhiyun goto out_sem;
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun /* page is wholly or partially inside EOF */
145*4882a593Smuzhiyun if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
146*4882a593Smuzhiyun i_size_read(inode)) {
147*4882a593Smuzhiyun loff_t offset;
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun offset = i_size_read(inode) & ~PAGE_MASK;
150*4882a593Smuzhiyun zero_user_segment(page, offset, PAGE_SIZE);
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun set_page_dirty(page);
153*4882a593Smuzhiyun if (!PageUptodate(page))
154*4882a593Smuzhiyun SetPageUptodate(page);
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun f2fs_update_iostat(sbi, APP_MAPPED_IO, F2FS_BLKSIZE);
157*4882a593Smuzhiyun f2fs_update_time(sbi, REQ_TIME);
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun trace_f2fs_vm_page_mkwrite(page, DATA);
160*4882a593Smuzhiyun out_sem:
161*4882a593Smuzhiyun f2fs_up_read(&F2FS_I(inode)->i_mmap_sem);
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun sb_end_pagefault(inode->i_sb);
164*4882a593Smuzhiyun err:
165*4882a593Smuzhiyun return block_page_mkwrite_return(err);
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun static const struct vm_operations_struct f2fs_file_vm_ops = {
169*4882a593Smuzhiyun .fault = f2fs_filemap_fault,
170*4882a593Smuzhiyun .map_pages = filemap_map_pages,
171*4882a593Smuzhiyun .page_mkwrite = f2fs_vm_page_mkwrite,
172*4882a593Smuzhiyun #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
173*4882a593Smuzhiyun .allow_speculation = filemap_allow_speculation,
174*4882a593Smuzhiyun #endif
175*4882a593Smuzhiyun };
176*4882a593Smuzhiyun
get_parent_ino(struct inode * inode,nid_t * pino)177*4882a593Smuzhiyun static int get_parent_ino(struct inode *inode, nid_t *pino)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun struct dentry *dentry;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun /*
182*4882a593Smuzhiyun * Make sure to get the non-deleted alias. The alias associated with
183*4882a593Smuzhiyun * the open file descriptor being fsync()'ed may be deleted already.
184*4882a593Smuzhiyun */
185*4882a593Smuzhiyun dentry = d_find_alias(inode);
186*4882a593Smuzhiyun if (!dentry)
187*4882a593Smuzhiyun return 0;
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun *pino = parent_ino(dentry);
190*4882a593Smuzhiyun dput(dentry);
191*4882a593Smuzhiyun return 1;
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun
need_do_checkpoint(struct inode * inode)194*4882a593Smuzhiyun static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
195*4882a593Smuzhiyun {
196*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
197*4882a593Smuzhiyun enum cp_reason_type cp_reason = CP_NO_NEEDED;
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun if (!S_ISREG(inode->i_mode))
200*4882a593Smuzhiyun cp_reason = CP_NON_REGULAR;
201*4882a593Smuzhiyun else if (f2fs_compressed_file(inode))
202*4882a593Smuzhiyun cp_reason = CP_COMPRESSED;
203*4882a593Smuzhiyun else if (inode->i_nlink != 1)
204*4882a593Smuzhiyun cp_reason = CP_HARDLINK;
205*4882a593Smuzhiyun else if (is_sbi_flag_set(sbi, SBI_NEED_CP))
206*4882a593Smuzhiyun cp_reason = CP_SB_NEED_CP;
207*4882a593Smuzhiyun else if (file_wrong_pino(inode))
208*4882a593Smuzhiyun cp_reason = CP_WRONG_PINO;
209*4882a593Smuzhiyun else if (!f2fs_space_for_roll_forward(sbi))
210*4882a593Smuzhiyun cp_reason = CP_NO_SPC_ROLL;
211*4882a593Smuzhiyun else if (!f2fs_is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
212*4882a593Smuzhiyun cp_reason = CP_NODE_NEED_CP;
213*4882a593Smuzhiyun else if (test_opt(sbi, FASTBOOT))
214*4882a593Smuzhiyun cp_reason = CP_FASTBOOT_MODE;
215*4882a593Smuzhiyun else if (F2FS_OPTION(sbi).active_logs == 2)
216*4882a593Smuzhiyun cp_reason = CP_SPEC_LOG_NUM;
217*4882a593Smuzhiyun else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT &&
218*4882a593Smuzhiyun f2fs_need_dentry_mark(sbi, inode->i_ino) &&
219*4882a593Smuzhiyun f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
220*4882a593Smuzhiyun TRANS_DIR_INO))
221*4882a593Smuzhiyun cp_reason = CP_RECOVER_DIR;
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun return cp_reason;
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun
need_inode_page_update(struct f2fs_sb_info * sbi,nid_t ino)226*4882a593Smuzhiyun static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
229*4882a593Smuzhiyun bool ret = false;
230*4882a593Smuzhiyun /* But we need to avoid that there are some inode updates */
231*4882a593Smuzhiyun if ((i && PageDirty(i)) || f2fs_need_inode_block_update(sbi, ino))
232*4882a593Smuzhiyun ret = true;
233*4882a593Smuzhiyun f2fs_put_page(i, 0);
234*4882a593Smuzhiyun return ret;
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun
try_to_fix_pino(struct inode * inode)237*4882a593Smuzhiyun static void try_to_fix_pino(struct inode *inode)
238*4882a593Smuzhiyun {
239*4882a593Smuzhiyun struct f2fs_inode_info *fi = F2FS_I(inode);
240*4882a593Smuzhiyun nid_t pino;
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun f2fs_down_write(&fi->i_sem);
243*4882a593Smuzhiyun if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
244*4882a593Smuzhiyun get_parent_ino(inode, &pino)) {
245*4882a593Smuzhiyun f2fs_i_pino_write(inode, pino);
246*4882a593Smuzhiyun file_got_pino(inode);
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun f2fs_up_write(&fi->i_sem);
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun
f2fs_update_fsync_count(struct f2fs_sb_info * sbi,unsigned int npages)251*4882a593Smuzhiyun static bool f2fs_update_fsync_count(struct f2fs_sb_info *sbi,
252*4882a593Smuzhiyun unsigned int npages)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun struct sysinfo val;
255*4882a593Smuzhiyun unsigned long avail_ram;
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun si_meminfo(&val);
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun /* only uses low memory */
260*4882a593Smuzhiyun avail_ram = val.totalram - val.totalhigh;
261*4882a593Smuzhiyun avail_ram = (avail_ram * DEF_RAM_THRESHOLD) / 100;
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun if ((atomic_read(&sbi->no_cp_fsync_pages) + npages) > avail_ram)
264*4882a593Smuzhiyun return false;
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun atomic_add(npages, &sbi->no_cp_fsync_pages);
267*4882a593Smuzhiyun return true;
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun
f2fs_do_sync_file(struct file * file,loff_t start,loff_t end,int datasync,bool atomic)270*4882a593Smuzhiyun static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
271*4882a593Smuzhiyun int datasync, bool atomic)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun struct inode *inode = file->f_mapping->host;
274*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
275*4882a593Smuzhiyun nid_t ino = inode->i_ino;
276*4882a593Smuzhiyun int ret = 0;
277*4882a593Smuzhiyun unsigned int npages = 0;
278*4882a593Smuzhiyun enum cp_reason_type cp_reason = 0;
279*4882a593Smuzhiyun struct writeback_control wbc = {
280*4882a593Smuzhiyun .sync_mode = WB_SYNC_ALL,
281*4882a593Smuzhiyun .nr_to_write = LONG_MAX,
282*4882a593Smuzhiyun .for_reclaim = 0,
283*4882a593Smuzhiyun };
284*4882a593Smuzhiyun unsigned int seq_id = 0;
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun if (unlikely(f2fs_readonly(inode->i_sb)))
287*4882a593Smuzhiyun return 0;
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun trace_f2fs_sync_file_enter(inode);
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun if (S_ISDIR(inode->i_mode))
292*4882a593Smuzhiyun goto go_write;
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun /* if fdatasync is triggered, let's do in-place-update */
295*4882a593Smuzhiyun npages = get_dirty_pages(inode);
296*4882a593Smuzhiyun if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
297*4882a593Smuzhiyun set_inode_flag(inode, FI_NEED_IPU);
298*4882a593Smuzhiyun ret = file_write_and_wait_range(file, start, end);
299*4882a593Smuzhiyun clear_inode_flag(inode, FI_NEED_IPU);
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun if (ret || is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
302*4882a593Smuzhiyun trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
303*4882a593Smuzhiyun return ret;
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun /* if the inode is dirty, let's recover all the time */
307*4882a593Smuzhiyun if (!f2fs_skip_inode_update(inode, datasync)) {
308*4882a593Smuzhiyun f2fs_write_inode(inode, NULL);
309*4882a593Smuzhiyun goto go_write;
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun /*
313*4882a593Smuzhiyun * if there is no written data, don't waste time to write recovery info.
314*4882a593Smuzhiyun */
315*4882a593Smuzhiyun if (!is_inode_flag_set(inode, FI_APPEND_WRITE) &&
316*4882a593Smuzhiyun !f2fs_exist_written_data(sbi, ino, APPEND_INO)) {
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun /* it may call write_inode just prior to fsync */
319*4882a593Smuzhiyun if (need_inode_page_update(sbi, ino))
320*4882a593Smuzhiyun goto go_write;
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun if (is_inode_flag_set(inode, FI_UPDATE_WRITE) ||
323*4882a593Smuzhiyun f2fs_exist_written_data(sbi, ino, UPDATE_INO))
324*4882a593Smuzhiyun goto flush_out;
325*4882a593Smuzhiyun goto out;
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun go_write:
328*4882a593Smuzhiyun /*
329*4882a593Smuzhiyun * Both of fdatasync() and fsync() are able to be recovered from
330*4882a593Smuzhiyun * sudden-power-off.
331*4882a593Smuzhiyun */
332*4882a593Smuzhiyun f2fs_down_read(&F2FS_I(inode)->i_sem);
333*4882a593Smuzhiyun cp_reason = need_do_checkpoint(inode);
334*4882a593Smuzhiyun f2fs_up_read(&F2FS_I(inode)->i_sem);
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun if (cp_reason || !f2fs_update_fsync_count(sbi, npages)) {
337*4882a593Smuzhiyun /* all the dirty node pages should be flushed for POR */
338*4882a593Smuzhiyun ret = f2fs_sync_fs(inode->i_sb, 1);
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun /*
341*4882a593Smuzhiyun * We've secured consistency through sync_fs. Following pino
342*4882a593Smuzhiyun * will be used only for fsynced inodes after checkpoint.
343*4882a593Smuzhiyun */
344*4882a593Smuzhiyun try_to_fix_pino(inode);
345*4882a593Smuzhiyun clear_inode_flag(inode, FI_APPEND_WRITE);
346*4882a593Smuzhiyun clear_inode_flag(inode, FI_UPDATE_WRITE);
347*4882a593Smuzhiyun goto out;
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun sync_nodes:
350*4882a593Smuzhiyun atomic_inc(&sbi->wb_sync_req[NODE]);
351*4882a593Smuzhiyun ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id);
352*4882a593Smuzhiyun atomic_dec(&sbi->wb_sync_req[NODE]);
353*4882a593Smuzhiyun if (ret)
354*4882a593Smuzhiyun goto out;
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun /* if cp_error was enabled, we should avoid infinite loop */
357*4882a593Smuzhiyun if (unlikely(f2fs_cp_error(sbi))) {
358*4882a593Smuzhiyun ret = -EIO;
359*4882a593Smuzhiyun goto out;
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun if (f2fs_need_inode_block_update(sbi, ino)) {
363*4882a593Smuzhiyun f2fs_mark_inode_dirty_sync(inode, true);
364*4882a593Smuzhiyun f2fs_write_inode(inode, NULL);
365*4882a593Smuzhiyun goto sync_nodes;
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun /*
369*4882a593Smuzhiyun * If it's atomic_write, it's just fine to keep write ordering. So
370*4882a593Smuzhiyun * here we don't need to wait for node write completion, since we use
371*4882a593Smuzhiyun * node chain which serializes node blocks. If one of node writes are
372*4882a593Smuzhiyun * reordered, we can see simply broken chain, resulting in stopping
373*4882a593Smuzhiyun * roll-forward recovery. It means we'll recover all or none node blocks
374*4882a593Smuzhiyun * given fsync mark.
375*4882a593Smuzhiyun */
376*4882a593Smuzhiyun if (!atomic) {
377*4882a593Smuzhiyun ret = f2fs_wait_on_node_pages_writeback(sbi, seq_id);
378*4882a593Smuzhiyun if (ret)
379*4882a593Smuzhiyun goto out;
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun /* once recovery info is written, don't need to tack this */
383*4882a593Smuzhiyun f2fs_remove_ino_entry(sbi, ino, APPEND_INO);
384*4882a593Smuzhiyun clear_inode_flag(inode, FI_APPEND_WRITE);
385*4882a593Smuzhiyun flush_out:
386*4882a593Smuzhiyun if (!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER)
387*4882a593Smuzhiyun ret = f2fs_issue_flush(sbi, inode->i_ino);
388*4882a593Smuzhiyun if (!ret) {
389*4882a593Smuzhiyun f2fs_remove_ino_entry(sbi, ino, UPDATE_INO);
390*4882a593Smuzhiyun clear_inode_flag(inode, FI_UPDATE_WRITE);
391*4882a593Smuzhiyun f2fs_remove_ino_entry(sbi, ino, FLUSH_INO);
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun f2fs_update_time(sbi, REQ_TIME);
394*4882a593Smuzhiyun out:
395*4882a593Smuzhiyun trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
396*4882a593Smuzhiyun return ret;
397*4882a593Smuzhiyun }
398*4882a593Smuzhiyun
f2fs_sync_file(struct file * file,loff_t start,loff_t end,int datasync)399*4882a593Smuzhiyun int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
400*4882a593Smuzhiyun {
401*4882a593Smuzhiyun if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
402*4882a593Smuzhiyun return -EIO;
403*4882a593Smuzhiyun return f2fs_do_sync_file(file, start, end, datasync, false);
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun
__found_offset(struct address_space * mapping,block_t blkaddr,pgoff_t index,int whence)406*4882a593Smuzhiyun static bool __found_offset(struct address_space *mapping, block_t blkaddr,
407*4882a593Smuzhiyun pgoff_t index, int whence)
408*4882a593Smuzhiyun {
409*4882a593Smuzhiyun switch (whence) {
410*4882a593Smuzhiyun case SEEK_DATA:
411*4882a593Smuzhiyun if (__is_valid_data_blkaddr(blkaddr))
412*4882a593Smuzhiyun return true;
413*4882a593Smuzhiyun if (blkaddr == NEW_ADDR &&
414*4882a593Smuzhiyun xa_get_mark(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY))
415*4882a593Smuzhiyun return true;
416*4882a593Smuzhiyun break;
417*4882a593Smuzhiyun case SEEK_HOLE:
418*4882a593Smuzhiyun if (blkaddr == NULL_ADDR)
419*4882a593Smuzhiyun return true;
420*4882a593Smuzhiyun break;
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun return false;
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun
f2fs_seek_block(struct file * file,loff_t offset,int whence)425*4882a593Smuzhiyun static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
426*4882a593Smuzhiyun {
427*4882a593Smuzhiyun struct inode *inode = file->f_mapping->host;
428*4882a593Smuzhiyun loff_t maxbytes = inode->i_sb->s_maxbytes;
429*4882a593Smuzhiyun struct dnode_of_data dn;
430*4882a593Smuzhiyun pgoff_t pgofs, end_offset;
431*4882a593Smuzhiyun loff_t data_ofs = offset;
432*4882a593Smuzhiyun loff_t isize;
433*4882a593Smuzhiyun int err = 0;
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun inode_lock(inode);
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun isize = i_size_read(inode);
438*4882a593Smuzhiyun if (offset >= isize)
439*4882a593Smuzhiyun goto fail;
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun /* handle inline data case */
442*4882a593Smuzhiyun if (f2fs_has_inline_data(inode)) {
443*4882a593Smuzhiyun if (whence == SEEK_HOLE) {
444*4882a593Smuzhiyun data_ofs = isize;
445*4882a593Smuzhiyun goto found;
446*4882a593Smuzhiyun } else if (whence == SEEK_DATA) {
447*4882a593Smuzhiyun data_ofs = offset;
448*4882a593Smuzhiyun goto found;
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun }
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
455*4882a593Smuzhiyun set_new_dnode(&dn, inode, NULL, NULL, 0);
456*4882a593Smuzhiyun err = f2fs_get_dnode_of_data(&dn, pgofs, LOOKUP_NODE);
457*4882a593Smuzhiyun if (err && err != -ENOENT) {
458*4882a593Smuzhiyun goto fail;
459*4882a593Smuzhiyun } else if (err == -ENOENT) {
460*4882a593Smuzhiyun /* direct node does not exists */
461*4882a593Smuzhiyun if (whence == SEEK_DATA) {
462*4882a593Smuzhiyun pgofs = f2fs_get_next_page_offset(&dn, pgofs);
463*4882a593Smuzhiyun continue;
464*4882a593Smuzhiyun } else {
465*4882a593Smuzhiyun goto found;
466*4882a593Smuzhiyun }
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun /* find data/hole in dnode block */
472*4882a593Smuzhiyun for (; dn.ofs_in_node < end_offset;
473*4882a593Smuzhiyun dn.ofs_in_node++, pgofs++,
474*4882a593Smuzhiyun data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
475*4882a593Smuzhiyun block_t blkaddr;
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun blkaddr = f2fs_data_blkaddr(&dn);
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun if (__is_valid_data_blkaddr(blkaddr) &&
480*4882a593Smuzhiyun !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
481*4882a593Smuzhiyun blkaddr, DATA_GENERIC_ENHANCE)) {
482*4882a593Smuzhiyun f2fs_put_dnode(&dn);
483*4882a593Smuzhiyun goto fail;
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun if (__found_offset(file->f_mapping, blkaddr,
487*4882a593Smuzhiyun pgofs, whence)) {
488*4882a593Smuzhiyun f2fs_put_dnode(&dn);
489*4882a593Smuzhiyun goto found;
490*4882a593Smuzhiyun }
491*4882a593Smuzhiyun }
492*4882a593Smuzhiyun f2fs_put_dnode(&dn);
493*4882a593Smuzhiyun }
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun if (whence == SEEK_DATA)
496*4882a593Smuzhiyun goto fail;
497*4882a593Smuzhiyun found:
498*4882a593Smuzhiyun if (whence == SEEK_HOLE && data_ofs > isize)
499*4882a593Smuzhiyun data_ofs = isize;
500*4882a593Smuzhiyun inode_unlock(inode);
501*4882a593Smuzhiyun return vfs_setpos(file, data_ofs, maxbytes);
502*4882a593Smuzhiyun fail:
503*4882a593Smuzhiyun inode_unlock(inode);
504*4882a593Smuzhiyun return -ENXIO;
505*4882a593Smuzhiyun }
506*4882a593Smuzhiyun
f2fs_llseek(struct file * file,loff_t offset,int whence)507*4882a593Smuzhiyun static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
508*4882a593Smuzhiyun {
509*4882a593Smuzhiyun struct inode *inode = file->f_mapping->host;
510*4882a593Smuzhiyun loff_t maxbytes = inode->i_sb->s_maxbytes;
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun if (f2fs_compressed_file(inode))
513*4882a593Smuzhiyun maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS;
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun switch (whence) {
516*4882a593Smuzhiyun case SEEK_SET:
517*4882a593Smuzhiyun case SEEK_CUR:
518*4882a593Smuzhiyun case SEEK_END:
519*4882a593Smuzhiyun return generic_file_llseek_size(file, offset, whence,
520*4882a593Smuzhiyun maxbytes, i_size_read(inode));
521*4882a593Smuzhiyun case SEEK_DATA:
522*4882a593Smuzhiyun case SEEK_HOLE:
523*4882a593Smuzhiyun if (offset < 0)
524*4882a593Smuzhiyun return -ENXIO;
525*4882a593Smuzhiyun return f2fs_seek_block(file, offset, whence);
526*4882a593Smuzhiyun }
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun return -EINVAL;
529*4882a593Smuzhiyun }
530*4882a593Smuzhiyun
f2fs_file_mmap(struct file * file,struct vm_area_struct * vma)531*4882a593Smuzhiyun static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
532*4882a593Smuzhiyun {
533*4882a593Smuzhiyun struct inode *inode = file_inode(file);
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
536*4882a593Smuzhiyun return -EIO;
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun if (!f2fs_is_compress_backend_ready(inode))
539*4882a593Smuzhiyun return -EOPNOTSUPP;
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun file_accessed(file);
542*4882a593Smuzhiyun vma->vm_ops = &f2fs_file_vm_ops;
543*4882a593Smuzhiyun set_inode_flag(inode, FI_MMAP_FILE);
544*4882a593Smuzhiyun return 0;
545*4882a593Smuzhiyun }
546*4882a593Smuzhiyun
f2fs_file_open(struct inode * inode,struct file * filp)547*4882a593Smuzhiyun static int f2fs_file_open(struct inode *inode, struct file *filp)
548*4882a593Smuzhiyun {
549*4882a593Smuzhiyun int err = fscrypt_file_open(inode, filp);
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun if (err)
552*4882a593Smuzhiyun return err;
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun if (!f2fs_is_compress_backend_ready(inode))
555*4882a593Smuzhiyun return -EOPNOTSUPP;
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun err = fsverity_file_open(inode, filp);
558*4882a593Smuzhiyun if (err)
559*4882a593Smuzhiyun return err;
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun filp->f_mode |= FMODE_NOWAIT;
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun return dquot_file_open(inode, filp);
564*4882a593Smuzhiyun }
565*4882a593Smuzhiyun
f2fs_truncate_data_blocks_range(struct dnode_of_data * dn,int count)566*4882a593Smuzhiyun void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
567*4882a593Smuzhiyun {
568*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
569*4882a593Smuzhiyun struct f2fs_node *raw_node;
570*4882a593Smuzhiyun int nr_free = 0, ofs = dn->ofs_in_node, len = count;
571*4882a593Smuzhiyun __le32 *addr;
572*4882a593Smuzhiyun int base = 0;
573*4882a593Smuzhiyun bool compressed_cluster = false;
574*4882a593Smuzhiyun int cluster_index = 0, valid_blocks = 0;
575*4882a593Smuzhiyun int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
576*4882a593Smuzhiyun bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
579*4882a593Smuzhiyun base = get_extra_isize(dn->inode);
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun raw_node = F2FS_NODE(dn->node_page);
582*4882a593Smuzhiyun addr = blkaddr_in_node(raw_node) + base + ofs;
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun /* Assumption: truncateion starts with cluster */
585*4882a593Smuzhiyun for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) {
586*4882a593Smuzhiyun block_t blkaddr = le32_to_cpu(*addr);
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun if (f2fs_compressed_file(dn->inode) &&
589*4882a593Smuzhiyun !(cluster_index & (cluster_size - 1))) {
590*4882a593Smuzhiyun if (compressed_cluster)
591*4882a593Smuzhiyun f2fs_i_compr_blocks_update(dn->inode,
592*4882a593Smuzhiyun valid_blocks, false);
593*4882a593Smuzhiyun compressed_cluster = (blkaddr == COMPRESS_ADDR);
594*4882a593Smuzhiyun valid_blocks = 0;
595*4882a593Smuzhiyun }
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun if (blkaddr == NULL_ADDR)
598*4882a593Smuzhiyun continue;
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun dn->data_blkaddr = NULL_ADDR;
601*4882a593Smuzhiyun f2fs_set_data_blkaddr(dn);
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun if (__is_valid_data_blkaddr(blkaddr)) {
604*4882a593Smuzhiyun if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
605*4882a593Smuzhiyun DATA_GENERIC_ENHANCE))
606*4882a593Smuzhiyun continue;
607*4882a593Smuzhiyun if (compressed_cluster)
608*4882a593Smuzhiyun valid_blocks++;
609*4882a593Smuzhiyun }
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
612*4882a593Smuzhiyun clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun f2fs_invalidate_blocks(sbi, blkaddr);
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun if (!released || blkaddr != COMPRESS_ADDR)
617*4882a593Smuzhiyun nr_free++;
618*4882a593Smuzhiyun }
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun if (compressed_cluster)
621*4882a593Smuzhiyun f2fs_i_compr_blocks_update(dn->inode, valid_blocks, false);
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun if (nr_free) {
624*4882a593Smuzhiyun pgoff_t fofs;
625*4882a593Smuzhiyun /*
626*4882a593Smuzhiyun * once we invalidate valid blkaddr in range [ofs, ofs + count],
627*4882a593Smuzhiyun * we will invalidate all blkaddr in the whole range.
628*4882a593Smuzhiyun */
629*4882a593Smuzhiyun fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page),
630*4882a593Smuzhiyun dn->inode) + ofs;
631*4882a593Smuzhiyun f2fs_update_read_extent_cache_range(dn, fofs, 0, len);
632*4882a593Smuzhiyun f2fs_update_age_extent_cache_range(dn, fofs, nr_free);
633*4882a593Smuzhiyun dec_valid_block_count(sbi, dn->inode, nr_free);
634*4882a593Smuzhiyun }
635*4882a593Smuzhiyun dn->ofs_in_node = ofs;
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun f2fs_update_time(sbi, REQ_TIME);
638*4882a593Smuzhiyun trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
639*4882a593Smuzhiyun dn->ofs_in_node, nr_free);
640*4882a593Smuzhiyun }
641*4882a593Smuzhiyun
f2fs_truncate_data_blocks(struct dnode_of_data * dn)642*4882a593Smuzhiyun void f2fs_truncate_data_blocks(struct dnode_of_data *dn)
643*4882a593Smuzhiyun {
644*4882a593Smuzhiyun f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode));
645*4882a593Smuzhiyun }
646*4882a593Smuzhiyun
truncate_partial_data_page(struct inode * inode,u64 from,bool cache_only)647*4882a593Smuzhiyun static int truncate_partial_data_page(struct inode *inode, u64 from,
648*4882a593Smuzhiyun bool cache_only)
649*4882a593Smuzhiyun {
650*4882a593Smuzhiyun loff_t offset = from & (PAGE_SIZE - 1);
651*4882a593Smuzhiyun pgoff_t index = from >> PAGE_SHIFT;
652*4882a593Smuzhiyun struct address_space *mapping = inode->i_mapping;
653*4882a593Smuzhiyun struct page *page;
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun if (!offset && !cache_only)
656*4882a593Smuzhiyun return 0;
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun if (cache_only) {
659*4882a593Smuzhiyun page = find_lock_page(mapping, index);
660*4882a593Smuzhiyun if (page && PageUptodate(page))
661*4882a593Smuzhiyun goto truncate_out;
662*4882a593Smuzhiyun f2fs_put_page(page, 1);
663*4882a593Smuzhiyun return 0;
664*4882a593Smuzhiyun }
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun page = f2fs_get_lock_data_page(inode, index, true);
667*4882a593Smuzhiyun if (IS_ERR(page))
668*4882a593Smuzhiyun return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
669*4882a593Smuzhiyun truncate_out:
670*4882a593Smuzhiyun f2fs_wait_on_page_writeback(page, DATA, true, true);
671*4882a593Smuzhiyun zero_user(page, offset, PAGE_SIZE - offset);
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun /* An encrypted inode should have a key and truncate the last page. */
674*4882a593Smuzhiyun f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode));
675*4882a593Smuzhiyun if (!cache_only)
676*4882a593Smuzhiyun set_page_dirty(page);
677*4882a593Smuzhiyun f2fs_put_page(page, 1);
678*4882a593Smuzhiyun return 0;
679*4882a593Smuzhiyun }
680*4882a593Smuzhiyun
f2fs_do_truncate_blocks(struct inode * inode,u64 from,bool lock)681*4882a593Smuzhiyun int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock)
682*4882a593Smuzhiyun {
683*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
684*4882a593Smuzhiyun struct dnode_of_data dn;
685*4882a593Smuzhiyun pgoff_t free_from;
686*4882a593Smuzhiyun int count = 0, err = 0;
687*4882a593Smuzhiyun struct page *ipage;
688*4882a593Smuzhiyun bool truncate_page = false;
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun trace_f2fs_truncate_blocks_enter(inode, from);
691*4882a593Smuzhiyun
692*4882a593Smuzhiyun free_from = (pgoff_t)F2FS_BLK_ALIGN(from);
693*4882a593Smuzhiyun
694*4882a593Smuzhiyun if (free_from >= max_file_blocks(inode))
695*4882a593Smuzhiyun goto free_partial;
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun if (lock)
698*4882a593Smuzhiyun f2fs_lock_op(sbi);
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun ipage = f2fs_get_node_page(sbi, inode->i_ino);
701*4882a593Smuzhiyun if (IS_ERR(ipage)) {
702*4882a593Smuzhiyun err = PTR_ERR(ipage);
703*4882a593Smuzhiyun goto out;
704*4882a593Smuzhiyun }
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun if (f2fs_has_inline_data(inode)) {
707*4882a593Smuzhiyun f2fs_truncate_inline_inode(inode, ipage, from);
708*4882a593Smuzhiyun f2fs_put_page(ipage, 1);
709*4882a593Smuzhiyun truncate_page = true;
710*4882a593Smuzhiyun goto out;
711*4882a593Smuzhiyun }
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun set_new_dnode(&dn, inode, ipage, NULL, 0);
714*4882a593Smuzhiyun err = f2fs_get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
715*4882a593Smuzhiyun if (err) {
716*4882a593Smuzhiyun if (err == -ENOENT)
717*4882a593Smuzhiyun goto free_next;
718*4882a593Smuzhiyun goto out;
719*4882a593Smuzhiyun }
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun count = ADDRS_PER_PAGE(dn.node_page, inode);
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun count -= dn.ofs_in_node;
724*4882a593Smuzhiyun f2fs_bug_on(sbi, count < 0);
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
727*4882a593Smuzhiyun f2fs_truncate_data_blocks_range(&dn, count);
728*4882a593Smuzhiyun free_from += count;
729*4882a593Smuzhiyun }
730*4882a593Smuzhiyun
731*4882a593Smuzhiyun f2fs_put_dnode(&dn);
732*4882a593Smuzhiyun free_next:
733*4882a593Smuzhiyun err = f2fs_truncate_inode_blocks(inode, free_from);
734*4882a593Smuzhiyun out:
735*4882a593Smuzhiyun if (lock)
736*4882a593Smuzhiyun f2fs_unlock_op(sbi);
737*4882a593Smuzhiyun free_partial:
738*4882a593Smuzhiyun /* lastly zero out the first data page */
739*4882a593Smuzhiyun if (!err)
740*4882a593Smuzhiyun err = truncate_partial_data_page(inode, from, truncate_page);
741*4882a593Smuzhiyun
742*4882a593Smuzhiyun trace_f2fs_truncate_blocks_exit(inode, err);
743*4882a593Smuzhiyun return err;
744*4882a593Smuzhiyun }
745*4882a593Smuzhiyun
f2fs_truncate_blocks(struct inode * inode,u64 from,bool lock)746*4882a593Smuzhiyun int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
747*4882a593Smuzhiyun {
748*4882a593Smuzhiyun u64 free_from = from;
749*4882a593Smuzhiyun int err;
750*4882a593Smuzhiyun
751*4882a593Smuzhiyun #ifdef CONFIG_F2FS_FS_COMPRESSION
752*4882a593Smuzhiyun /*
753*4882a593Smuzhiyun * for compressed file, only support cluster size
754*4882a593Smuzhiyun * aligned truncation.
755*4882a593Smuzhiyun */
756*4882a593Smuzhiyun if (f2fs_compressed_file(inode))
757*4882a593Smuzhiyun free_from = round_up(from,
758*4882a593Smuzhiyun F2FS_I(inode)->i_cluster_size << PAGE_SHIFT);
759*4882a593Smuzhiyun #endif
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun err = f2fs_do_truncate_blocks(inode, free_from, lock);
762*4882a593Smuzhiyun if (err)
763*4882a593Smuzhiyun return err;
764*4882a593Smuzhiyun
765*4882a593Smuzhiyun #ifdef CONFIG_F2FS_FS_COMPRESSION
766*4882a593Smuzhiyun if (from != free_from) {
767*4882a593Smuzhiyun err = f2fs_truncate_partial_cluster(inode, from, lock);
768*4882a593Smuzhiyun if (err)
769*4882a593Smuzhiyun return err;
770*4882a593Smuzhiyun }
771*4882a593Smuzhiyun #endif
772*4882a593Smuzhiyun
773*4882a593Smuzhiyun return 0;
774*4882a593Smuzhiyun }
775*4882a593Smuzhiyun
f2fs_truncate(struct inode * inode)776*4882a593Smuzhiyun int f2fs_truncate(struct inode *inode)
777*4882a593Smuzhiyun {
778*4882a593Smuzhiyun int err;
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
781*4882a593Smuzhiyun return -EIO;
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
784*4882a593Smuzhiyun S_ISLNK(inode->i_mode)))
785*4882a593Smuzhiyun return 0;
786*4882a593Smuzhiyun
787*4882a593Smuzhiyun trace_f2fs_truncate(inode);
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) {
790*4882a593Smuzhiyun f2fs_show_injection_info(F2FS_I_SB(inode), FAULT_TRUNCATE);
791*4882a593Smuzhiyun return -EIO;
792*4882a593Smuzhiyun }
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun err = dquot_initialize(inode);
795*4882a593Smuzhiyun if (err)
796*4882a593Smuzhiyun return err;
797*4882a593Smuzhiyun
798*4882a593Smuzhiyun /* we should check inline_data size */
799*4882a593Smuzhiyun if (!f2fs_may_inline_data(inode)) {
800*4882a593Smuzhiyun err = f2fs_convert_inline_inode(inode);
801*4882a593Smuzhiyun if (err)
802*4882a593Smuzhiyun return err;
803*4882a593Smuzhiyun }
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun err = f2fs_truncate_blocks(inode, i_size_read(inode), true);
806*4882a593Smuzhiyun if (err)
807*4882a593Smuzhiyun return err;
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun inode->i_mtime = inode->i_ctime = current_time(inode);
810*4882a593Smuzhiyun f2fs_mark_inode_dirty_sync(inode, false);
811*4882a593Smuzhiyun return 0;
812*4882a593Smuzhiyun }
813*4882a593Smuzhiyun
f2fs_getattr(const struct path * path,struct kstat * stat,u32 request_mask,unsigned int query_flags)814*4882a593Smuzhiyun int f2fs_getattr(const struct path *path, struct kstat *stat,
815*4882a593Smuzhiyun u32 request_mask, unsigned int query_flags)
816*4882a593Smuzhiyun {
817*4882a593Smuzhiyun struct inode *inode = d_inode(path->dentry);
818*4882a593Smuzhiyun struct f2fs_inode_info *fi = F2FS_I(inode);
819*4882a593Smuzhiyun struct f2fs_inode *ri;
820*4882a593Smuzhiyun unsigned int flags;
821*4882a593Smuzhiyun
822*4882a593Smuzhiyun if (f2fs_has_extra_attr(inode) &&
823*4882a593Smuzhiyun f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
824*4882a593Smuzhiyun F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
825*4882a593Smuzhiyun stat->result_mask |= STATX_BTIME;
826*4882a593Smuzhiyun stat->btime.tv_sec = fi->i_crtime.tv_sec;
827*4882a593Smuzhiyun stat->btime.tv_nsec = fi->i_crtime.tv_nsec;
828*4882a593Smuzhiyun }
829*4882a593Smuzhiyun
830*4882a593Smuzhiyun flags = fi->i_flags;
831*4882a593Smuzhiyun if (flags & F2FS_COMPR_FL)
832*4882a593Smuzhiyun stat->attributes |= STATX_ATTR_COMPRESSED;
833*4882a593Smuzhiyun if (flags & F2FS_APPEND_FL)
834*4882a593Smuzhiyun stat->attributes |= STATX_ATTR_APPEND;
835*4882a593Smuzhiyun if (IS_ENCRYPTED(inode))
836*4882a593Smuzhiyun stat->attributes |= STATX_ATTR_ENCRYPTED;
837*4882a593Smuzhiyun if (flags & F2FS_IMMUTABLE_FL)
838*4882a593Smuzhiyun stat->attributes |= STATX_ATTR_IMMUTABLE;
839*4882a593Smuzhiyun if (flags & F2FS_NODUMP_FL)
840*4882a593Smuzhiyun stat->attributes |= STATX_ATTR_NODUMP;
841*4882a593Smuzhiyun if (IS_VERITY(inode))
842*4882a593Smuzhiyun stat->attributes |= STATX_ATTR_VERITY;
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
845*4882a593Smuzhiyun STATX_ATTR_APPEND |
846*4882a593Smuzhiyun STATX_ATTR_ENCRYPTED |
847*4882a593Smuzhiyun STATX_ATTR_IMMUTABLE |
848*4882a593Smuzhiyun STATX_ATTR_NODUMP |
849*4882a593Smuzhiyun STATX_ATTR_VERITY);
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun generic_fillattr(inode, stat);
852*4882a593Smuzhiyun
853*4882a593Smuzhiyun /* we need to show initial sectors used for inline_data/dentries */
854*4882a593Smuzhiyun if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) ||
855*4882a593Smuzhiyun f2fs_has_inline_dentry(inode))
856*4882a593Smuzhiyun stat->blocks += (stat->size + 511) >> 9;
857*4882a593Smuzhiyun
858*4882a593Smuzhiyun return 0;
859*4882a593Smuzhiyun }
860*4882a593Smuzhiyun
861*4882a593Smuzhiyun #ifdef CONFIG_F2FS_FS_POSIX_ACL
__setattr_copy(struct inode * inode,const struct iattr * attr)862*4882a593Smuzhiyun static void __setattr_copy(struct inode *inode, const struct iattr *attr)
863*4882a593Smuzhiyun {
864*4882a593Smuzhiyun unsigned int ia_valid = attr->ia_valid;
865*4882a593Smuzhiyun
866*4882a593Smuzhiyun if (ia_valid & ATTR_UID)
867*4882a593Smuzhiyun inode->i_uid = attr->ia_uid;
868*4882a593Smuzhiyun if (ia_valid & ATTR_GID)
869*4882a593Smuzhiyun inode->i_gid = attr->ia_gid;
870*4882a593Smuzhiyun if (ia_valid & ATTR_ATIME)
871*4882a593Smuzhiyun inode->i_atime = attr->ia_atime;
872*4882a593Smuzhiyun if (ia_valid & ATTR_MTIME)
873*4882a593Smuzhiyun inode->i_mtime = attr->ia_mtime;
874*4882a593Smuzhiyun if (ia_valid & ATTR_CTIME)
875*4882a593Smuzhiyun inode->i_ctime = attr->ia_ctime;
876*4882a593Smuzhiyun if (ia_valid & ATTR_MODE) {
877*4882a593Smuzhiyun umode_t mode = attr->ia_mode;
878*4882a593Smuzhiyun
879*4882a593Smuzhiyun if (!in_group_p(inode->i_gid) &&
880*4882a593Smuzhiyun !capable_wrt_inode_uidgid(inode, CAP_FSETID))
881*4882a593Smuzhiyun mode &= ~S_ISGID;
882*4882a593Smuzhiyun set_acl_inode(inode, mode);
883*4882a593Smuzhiyun }
884*4882a593Smuzhiyun }
885*4882a593Smuzhiyun #else
886*4882a593Smuzhiyun #define __setattr_copy setattr_copy
887*4882a593Smuzhiyun #endif
888*4882a593Smuzhiyun
f2fs_setattr(struct dentry * dentry,struct iattr * attr)889*4882a593Smuzhiyun int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
890*4882a593Smuzhiyun {
891*4882a593Smuzhiyun struct inode *inode = d_inode(dentry);
892*4882a593Smuzhiyun int err;
893*4882a593Smuzhiyun
894*4882a593Smuzhiyun if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
895*4882a593Smuzhiyun return -EIO;
896*4882a593Smuzhiyun
897*4882a593Smuzhiyun if (unlikely(IS_IMMUTABLE(inode)))
898*4882a593Smuzhiyun return -EPERM;
899*4882a593Smuzhiyun
900*4882a593Smuzhiyun if (unlikely(IS_APPEND(inode) &&
901*4882a593Smuzhiyun (attr->ia_valid & (ATTR_MODE | ATTR_UID |
902*4882a593Smuzhiyun ATTR_GID | ATTR_TIMES_SET))))
903*4882a593Smuzhiyun return -EPERM;
904*4882a593Smuzhiyun
905*4882a593Smuzhiyun if ((attr->ia_valid & ATTR_SIZE) &&
906*4882a593Smuzhiyun !f2fs_is_compress_backend_ready(inode))
907*4882a593Smuzhiyun return -EOPNOTSUPP;
908*4882a593Smuzhiyun
909*4882a593Smuzhiyun err = setattr_prepare(dentry, attr);
910*4882a593Smuzhiyun if (err)
911*4882a593Smuzhiyun return err;
912*4882a593Smuzhiyun
913*4882a593Smuzhiyun err = fscrypt_prepare_setattr(dentry, attr);
914*4882a593Smuzhiyun if (err)
915*4882a593Smuzhiyun return err;
916*4882a593Smuzhiyun
917*4882a593Smuzhiyun err = fsverity_prepare_setattr(dentry, attr);
918*4882a593Smuzhiyun if (err)
919*4882a593Smuzhiyun return err;
920*4882a593Smuzhiyun
921*4882a593Smuzhiyun if (is_quota_modification(inode, attr)) {
922*4882a593Smuzhiyun err = dquot_initialize(inode);
923*4882a593Smuzhiyun if (err)
924*4882a593Smuzhiyun return err;
925*4882a593Smuzhiyun }
926*4882a593Smuzhiyun if ((attr->ia_valid & ATTR_UID &&
927*4882a593Smuzhiyun !uid_eq(attr->ia_uid, inode->i_uid)) ||
928*4882a593Smuzhiyun (attr->ia_valid & ATTR_GID &&
929*4882a593Smuzhiyun !gid_eq(attr->ia_gid, inode->i_gid))) {
930*4882a593Smuzhiyun f2fs_lock_op(F2FS_I_SB(inode));
931*4882a593Smuzhiyun err = dquot_transfer(inode, attr);
932*4882a593Smuzhiyun if (err) {
933*4882a593Smuzhiyun set_sbi_flag(F2FS_I_SB(inode),
934*4882a593Smuzhiyun SBI_QUOTA_NEED_REPAIR);
935*4882a593Smuzhiyun f2fs_unlock_op(F2FS_I_SB(inode));
936*4882a593Smuzhiyun return err;
937*4882a593Smuzhiyun }
938*4882a593Smuzhiyun /*
939*4882a593Smuzhiyun * update uid/gid under lock_op(), so that dquot and inode can
940*4882a593Smuzhiyun * be updated atomically.
941*4882a593Smuzhiyun */
942*4882a593Smuzhiyun if (attr->ia_valid & ATTR_UID)
943*4882a593Smuzhiyun inode->i_uid = attr->ia_uid;
944*4882a593Smuzhiyun if (attr->ia_valid & ATTR_GID)
945*4882a593Smuzhiyun inode->i_gid = attr->ia_gid;
946*4882a593Smuzhiyun f2fs_mark_inode_dirty_sync(inode, true);
947*4882a593Smuzhiyun f2fs_unlock_op(F2FS_I_SB(inode));
948*4882a593Smuzhiyun }
949*4882a593Smuzhiyun
950*4882a593Smuzhiyun if (attr->ia_valid & ATTR_SIZE) {
951*4882a593Smuzhiyun loff_t old_size = i_size_read(inode);
952*4882a593Smuzhiyun
953*4882a593Smuzhiyun if (attr->ia_size > MAX_INLINE_DATA(inode)) {
954*4882a593Smuzhiyun /*
955*4882a593Smuzhiyun * should convert inline inode before i_size_write to
956*4882a593Smuzhiyun * keep smaller than inline_data size with inline flag.
957*4882a593Smuzhiyun */
958*4882a593Smuzhiyun err = f2fs_convert_inline_inode(inode);
959*4882a593Smuzhiyun if (err)
960*4882a593Smuzhiyun return err;
961*4882a593Smuzhiyun }
962*4882a593Smuzhiyun
963*4882a593Smuzhiyun f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
964*4882a593Smuzhiyun f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
965*4882a593Smuzhiyun
966*4882a593Smuzhiyun truncate_setsize(inode, attr->ia_size);
967*4882a593Smuzhiyun
968*4882a593Smuzhiyun if (attr->ia_size <= old_size)
969*4882a593Smuzhiyun err = f2fs_truncate(inode);
970*4882a593Smuzhiyun /*
971*4882a593Smuzhiyun * do not trim all blocks after i_size if target size is
972*4882a593Smuzhiyun * larger than i_size.
973*4882a593Smuzhiyun */
974*4882a593Smuzhiyun f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
975*4882a593Smuzhiyun f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
976*4882a593Smuzhiyun if (err)
977*4882a593Smuzhiyun return err;
978*4882a593Smuzhiyun
979*4882a593Smuzhiyun spin_lock(&F2FS_I(inode)->i_size_lock);
980*4882a593Smuzhiyun inode->i_mtime = inode->i_ctime = current_time(inode);
981*4882a593Smuzhiyun F2FS_I(inode)->last_disk_size = i_size_read(inode);
982*4882a593Smuzhiyun spin_unlock(&F2FS_I(inode)->i_size_lock);
983*4882a593Smuzhiyun }
984*4882a593Smuzhiyun
985*4882a593Smuzhiyun __setattr_copy(inode, attr);
986*4882a593Smuzhiyun
987*4882a593Smuzhiyun if (attr->ia_valid & ATTR_MODE) {
988*4882a593Smuzhiyun err = posix_acl_chmod(inode, f2fs_get_inode_mode(inode));
989*4882a593Smuzhiyun
990*4882a593Smuzhiyun if (is_inode_flag_set(inode, FI_ACL_MODE)) {
991*4882a593Smuzhiyun if (!err)
992*4882a593Smuzhiyun inode->i_mode = F2FS_I(inode)->i_acl_mode;
993*4882a593Smuzhiyun clear_inode_flag(inode, FI_ACL_MODE);
994*4882a593Smuzhiyun }
995*4882a593Smuzhiyun }
996*4882a593Smuzhiyun
997*4882a593Smuzhiyun /* file size may changed here */
998*4882a593Smuzhiyun f2fs_mark_inode_dirty_sync(inode, true);
999*4882a593Smuzhiyun
1000*4882a593Smuzhiyun /* inode change will produce dirty node pages flushed by checkpoint */
1001*4882a593Smuzhiyun f2fs_balance_fs(F2FS_I_SB(inode), true);
1002*4882a593Smuzhiyun
1003*4882a593Smuzhiyun return err;
1004*4882a593Smuzhiyun }
1005*4882a593Smuzhiyun
1006*4882a593Smuzhiyun const struct inode_operations f2fs_file_inode_operations = {
1007*4882a593Smuzhiyun .getattr = f2fs_getattr,
1008*4882a593Smuzhiyun .setattr = f2fs_setattr,
1009*4882a593Smuzhiyun .get_acl = f2fs_get_acl,
1010*4882a593Smuzhiyun .set_acl = f2fs_set_acl,
1011*4882a593Smuzhiyun .listxattr = f2fs_listxattr,
1012*4882a593Smuzhiyun .fiemap = f2fs_fiemap,
1013*4882a593Smuzhiyun };
1014*4882a593Smuzhiyun
fill_zero(struct inode * inode,pgoff_t index,loff_t start,loff_t len)1015*4882a593Smuzhiyun static int fill_zero(struct inode *inode, pgoff_t index,
1016*4882a593Smuzhiyun loff_t start, loff_t len)
1017*4882a593Smuzhiyun {
1018*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1019*4882a593Smuzhiyun struct page *page;
1020*4882a593Smuzhiyun
1021*4882a593Smuzhiyun if (!len)
1022*4882a593Smuzhiyun return 0;
1023*4882a593Smuzhiyun
1024*4882a593Smuzhiyun f2fs_balance_fs(sbi, true);
1025*4882a593Smuzhiyun
1026*4882a593Smuzhiyun f2fs_lock_op(sbi);
1027*4882a593Smuzhiyun page = f2fs_get_new_data_page(inode, NULL, index, false);
1028*4882a593Smuzhiyun f2fs_unlock_op(sbi);
1029*4882a593Smuzhiyun
1030*4882a593Smuzhiyun if (IS_ERR(page))
1031*4882a593Smuzhiyun return PTR_ERR(page);
1032*4882a593Smuzhiyun
1033*4882a593Smuzhiyun f2fs_wait_on_page_writeback(page, DATA, true, true);
1034*4882a593Smuzhiyun zero_user(page, start, len);
1035*4882a593Smuzhiyun set_page_dirty(page);
1036*4882a593Smuzhiyun f2fs_put_page(page, 1);
1037*4882a593Smuzhiyun return 0;
1038*4882a593Smuzhiyun }
1039*4882a593Smuzhiyun
f2fs_truncate_hole(struct inode * inode,pgoff_t pg_start,pgoff_t pg_end)1040*4882a593Smuzhiyun int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
1041*4882a593Smuzhiyun {
1042*4882a593Smuzhiyun int err;
1043*4882a593Smuzhiyun
1044*4882a593Smuzhiyun while (pg_start < pg_end) {
1045*4882a593Smuzhiyun struct dnode_of_data dn;
1046*4882a593Smuzhiyun pgoff_t end_offset, count;
1047*4882a593Smuzhiyun
1048*4882a593Smuzhiyun set_new_dnode(&dn, inode, NULL, NULL, 0);
1049*4882a593Smuzhiyun err = f2fs_get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
1050*4882a593Smuzhiyun if (err) {
1051*4882a593Smuzhiyun if (err == -ENOENT) {
1052*4882a593Smuzhiyun pg_start = f2fs_get_next_page_offset(&dn,
1053*4882a593Smuzhiyun pg_start);
1054*4882a593Smuzhiyun continue;
1055*4882a593Smuzhiyun }
1056*4882a593Smuzhiyun return err;
1057*4882a593Smuzhiyun }
1058*4882a593Smuzhiyun
1059*4882a593Smuzhiyun end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1060*4882a593Smuzhiyun count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);
1061*4882a593Smuzhiyun
1062*4882a593Smuzhiyun f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
1063*4882a593Smuzhiyun
1064*4882a593Smuzhiyun f2fs_truncate_data_blocks_range(&dn, count);
1065*4882a593Smuzhiyun f2fs_put_dnode(&dn);
1066*4882a593Smuzhiyun
1067*4882a593Smuzhiyun pg_start += count;
1068*4882a593Smuzhiyun }
1069*4882a593Smuzhiyun return 0;
1070*4882a593Smuzhiyun }
1071*4882a593Smuzhiyun
punch_hole(struct inode * inode,loff_t offset,loff_t len)1072*4882a593Smuzhiyun static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
1073*4882a593Smuzhiyun {
1074*4882a593Smuzhiyun pgoff_t pg_start, pg_end;
1075*4882a593Smuzhiyun loff_t off_start, off_end;
1076*4882a593Smuzhiyun int ret;
1077*4882a593Smuzhiyun
1078*4882a593Smuzhiyun ret = f2fs_convert_inline_inode(inode);
1079*4882a593Smuzhiyun if (ret)
1080*4882a593Smuzhiyun return ret;
1081*4882a593Smuzhiyun
1082*4882a593Smuzhiyun pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1083*4882a593Smuzhiyun pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1084*4882a593Smuzhiyun
1085*4882a593Smuzhiyun off_start = offset & (PAGE_SIZE - 1);
1086*4882a593Smuzhiyun off_end = (offset + len) & (PAGE_SIZE - 1);
1087*4882a593Smuzhiyun
1088*4882a593Smuzhiyun if (pg_start == pg_end) {
1089*4882a593Smuzhiyun ret = fill_zero(inode, pg_start, off_start,
1090*4882a593Smuzhiyun off_end - off_start);
1091*4882a593Smuzhiyun if (ret)
1092*4882a593Smuzhiyun return ret;
1093*4882a593Smuzhiyun } else {
1094*4882a593Smuzhiyun if (off_start) {
1095*4882a593Smuzhiyun ret = fill_zero(inode, pg_start++, off_start,
1096*4882a593Smuzhiyun PAGE_SIZE - off_start);
1097*4882a593Smuzhiyun if (ret)
1098*4882a593Smuzhiyun return ret;
1099*4882a593Smuzhiyun }
1100*4882a593Smuzhiyun if (off_end) {
1101*4882a593Smuzhiyun ret = fill_zero(inode, pg_end, 0, off_end);
1102*4882a593Smuzhiyun if (ret)
1103*4882a593Smuzhiyun return ret;
1104*4882a593Smuzhiyun }
1105*4882a593Smuzhiyun
1106*4882a593Smuzhiyun if (pg_start < pg_end) {
1107*4882a593Smuzhiyun loff_t blk_start, blk_end;
1108*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1109*4882a593Smuzhiyun
1110*4882a593Smuzhiyun f2fs_balance_fs(sbi, true);
1111*4882a593Smuzhiyun
1112*4882a593Smuzhiyun blk_start = (loff_t)pg_start << PAGE_SHIFT;
1113*4882a593Smuzhiyun blk_end = (loff_t)pg_end << PAGE_SHIFT;
1114*4882a593Smuzhiyun
1115*4882a593Smuzhiyun f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1116*4882a593Smuzhiyun f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
1117*4882a593Smuzhiyun
1118*4882a593Smuzhiyun truncate_pagecache_range(inode, blk_start, blk_end - 1);
1119*4882a593Smuzhiyun
1120*4882a593Smuzhiyun f2fs_lock_op(sbi);
1121*4882a593Smuzhiyun ret = f2fs_truncate_hole(inode, pg_start, pg_end);
1122*4882a593Smuzhiyun f2fs_unlock_op(sbi);
1123*4882a593Smuzhiyun
1124*4882a593Smuzhiyun f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
1125*4882a593Smuzhiyun f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1126*4882a593Smuzhiyun }
1127*4882a593Smuzhiyun }
1128*4882a593Smuzhiyun
1129*4882a593Smuzhiyun return ret;
1130*4882a593Smuzhiyun }
1131*4882a593Smuzhiyun
__read_out_blkaddrs(struct inode * inode,block_t * blkaddr,int * do_replace,pgoff_t off,pgoff_t len)1132*4882a593Smuzhiyun static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
1133*4882a593Smuzhiyun int *do_replace, pgoff_t off, pgoff_t len)
1134*4882a593Smuzhiyun {
1135*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1136*4882a593Smuzhiyun struct dnode_of_data dn;
1137*4882a593Smuzhiyun int ret, done, i;
1138*4882a593Smuzhiyun
1139*4882a593Smuzhiyun next_dnode:
1140*4882a593Smuzhiyun set_new_dnode(&dn, inode, NULL, NULL, 0);
1141*4882a593Smuzhiyun ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
1142*4882a593Smuzhiyun if (ret && ret != -ENOENT) {
1143*4882a593Smuzhiyun return ret;
1144*4882a593Smuzhiyun } else if (ret == -ENOENT) {
1145*4882a593Smuzhiyun if (dn.max_level == 0)
1146*4882a593Smuzhiyun return -ENOENT;
1147*4882a593Smuzhiyun done = min((pgoff_t)ADDRS_PER_BLOCK(inode) -
1148*4882a593Smuzhiyun dn.ofs_in_node, len);
1149*4882a593Smuzhiyun blkaddr += done;
1150*4882a593Smuzhiyun do_replace += done;
1151*4882a593Smuzhiyun goto next;
1152*4882a593Smuzhiyun }
1153*4882a593Smuzhiyun
1154*4882a593Smuzhiyun done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
1155*4882a593Smuzhiyun dn.ofs_in_node, len);
1156*4882a593Smuzhiyun for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
1157*4882a593Smuzhiyun *blkaddr = f2fs_data_blkaddr(&dn);
1158*4882a593Smuzhiyun
1159*4882a593Smuzhiyun if (__is_valid_data_blkaddr(*blkaddr) &&
1160*4882a593Smuzhiyun !f2fs_is_valid_blkaddr(sbi, *blkaddr,
1161*4882a593Smuzhiyun DATA_GENERIC_ENHANCE)) {
1162*4882a593Smuzhiyun f2fs_put_dnode(&dn);
1163*4882a593Smuzhiyun return -EFSCORRUPTED;
1164*4882a593Smuzhiyun }
1165*4882a593Smuzhiyun
1166*4882a593Smuzhiyun if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) {
1167*4882a593Smuzhiyun
1168*4882a593Smuzhiyun if (f2fs_lfs_mode(sbi)) {
1169*4882a593Smuzhiyun f2fs_put_dnode(&dn);
1170*4882a593Smuzhiyun return -EOPNOTSUPP;
1171*4882a593Smuzhiyun }
1172*4882a593Smuzhiyun
1173*4882a593Smuzhiyun /* do not invalidate this block address */
1174*4882a593Smuzhiyun f2fs_update_data_blkaddr(&dn, NULL_ADDR);
1175*4882a593Smuzhiyun *do_replace = 1;
1176*4882a593Smuzhiyun }
1177*4882a593Smuzhiyun }
1178*4882a593Smuzhiyun f2fs_put_dnode(&dn);
1179*4882a593Smuzhiyun next:
1180*4882a593Smuzhiyun len -= done;
1181*4882a593Smuzhiyun off += done;
1182*4882a593Smuzhiyun if (len)
1183*4882a593Smuzhiyun goto next_dnode;
1184*4882a593Smuzhiyun return 0;
1185*4882a593Smuzhiyun }
1186*4882a593Smuzhiyun
__roll_back_blkaddrs(struct inode * inode,block_t * blkaddr,int * do_replace,pgoff_t off,int len)1187*4882a593Smuzhiyun static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
1188*4882a593Smuzhiyun int *do_replace, pgoff_t off, int len)
1189*4882a593Smuzhiyun {
1190*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1191*4882a593Smuzhiyun struct dnode_of_data dn;
1192*4882a593Smuzhiyun int ret, i;
1193*4882a593Smuzhiyun
1194*4882a593Smuzhiyun for (i = 0; i < len; i++, do_replace++, blkaddr++) {
1195*4882a593Smuzhiyun if (*do_replace == 0)
1196*4882a593Smuzhiyun continue;
1197*4882a593Smuzhiyun
1198*4882a593Smuzhiyun set_new_dnode(&dn, inode, NULL, NULL, 0);
1199*4882a593Smuzhiyun ret = f2fs_get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
1200*4882a593Smuzhiyun if (ret) {
1201*4882a593Smuzhiyun dec_valid_block_count(sbi, inode, 1);
1202*4882a593Smuzhiyun f2fs_invalidate_blocks(sbi, *blkaddr);
1203*4882a593Smuzhiyun } else {
1204*4882a593Smuzhiyun f2fs_update_data_blkaddr(&dn, *blkaddr);
1205*4882a593Smuzhiyun }
1206*4882a593Smuzhiyun f2fs_put_dnode(&dn);
1207*4882a593Smuzhiyun }
1208*4882a593Smuzhiyun return 0;
1209*4882a593Smuzhiyun }
1210*4882a593Smuzhiyun
__clone_blkaddrs(struct inode * src_inode,struct inode * dst_inode,block_t * blkaddr,int * do_replace,pgoff_t src,pgoff_t dst,pgoff_t len,bool full)1211*4882a593Smuzhiyun static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
1212*4882a593Smuzhiyun block_t *blkaddr, int *do_replace,
1213*4882a593Smuzhiyun pgoff_t src, pgoff_t dst, pgoff_t len, bool full)
1214*4882a593Smuzhiyun {
1215*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode);
1216*4882a593Smuzhiyun pgoff_t i = 0;
1217*4882a593Smuzhiyun int ret;
1218*4882a593Smuzhiyun
1219*4882a593Smuzhiyun while (i < len) {
1220*4882a593Smuzhiyun if (blkaddr[i] == NULL_ADDR && !full) {
1221*4882a593Smuzhiyun i++;
1222*4882a593Smuzhiyun continue;
1223*4882a593Smuzhiyun }
1224*4882a593Smuzhiyun
1225*4882a593Smuzhiyun if (do_replace[i] || blkaddr[i] == NULL_ADDR) {
1226*4882a593Smuzhiyun struct dnode_of_data dn;
1227*4882a593Smuzhiyun struct node_info ni;
1228*4882a593Smuzhiyun size_t new_size;
1229*4882a593Smuzhiyun pgoff_t ilen;
1230*4882a593Smuzhiyun
1231*4882a593Smuzhiyun set_new_dnode(&dn, dst_inode, NULL, NULL, 0);
1232*4882a593Smuzhiyun ret = f2fs_get_dnode_of_data(&dn, dst + i, ALLOC_NODE);
1233*4882a593Smuzhiyun if (ret)
1234*4882a593Smuzhiyun return ret;
1235*4882a593Smuzhiyun
1236*4882a593Smuzhiyun ret = f2fs_get_node_info(sbi, dn.nid, &ni, false);
1237*4882a593Smuzhiyun if (ret) {
1238*4882a593Smuzhiyun f2fs_put_dnode(&dn);
1239*4882a593Smuzhiyun return ret;
1240*4882a593Smuzhiyun }
1241*4882a593Smuzhiyun
1242*4882a593Smuzhiyun ilen = min((pgoff_t)
1243*4882a593Smuzhiyun ADDRS_PER_PAGE(dn.node_page, dst_inode) -
1244*4882a593Smuzhiyun dn.ofs_in_node, len - i);
1245*4882a593Smuzhiyun do {
1246*4882a593Smuzhiyun dn.data_blkaddr = f2fs_data_blkaddr(&dn);
1247*4882a593Smuzhiyun f2fs_truncate_data_blocks_range(&dn, 1);
1248*4882a593Smuzhiyun
1249*4882a593Smuzhiyun if (do_replace[i]) {
1250*4882a593Smuzhiyun f2fs_i_blocks_write(src_inode,
1251*4882a593Smuzhiyun 1, false, false);
1252*4882a593Smuzhiyun f2fs_i_blocks_write(dst_inode,
1253*4882a593Smuzhiyun 1, true, false);
1254*4882a593Smuzhiyun f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
1255*4882a593Smuzhiyun blkaddr[i], ni.version, true, false);
1256*4882a593Smuzhiyun
1257*4882a593Smuzhiyun do_replace[i] = 0;
1258*4882a593Smuzhiyun }
1259*4882a593Smuzhiyun dn.ofs_in_node++;
1260*4882a593Smuzhiyun i++;
1261*4882a593Smuzhiyun new_size = (loff_t)(dst + i) << PAGE_SHIFT;
1262*4882a593Smuzhiyun if (dst_inode->i_size < new_size)
1263*4882a593Smuzhiyun f2fs_i_size_write(dst_inode, new_size);
1264*4882a593Smuzhiyun } while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
1265*4882a593Smuzhiyun
1266*4882a593Smuzhiyun f2fs_put_dnode(&dn);
1267*4882a593Smuzhiyun } else {
1268*4882a593Smuzhiyun struct page *psrc, *pdst;
1269*4882a593Smuzhiyun
1270*4882a593Smuzhiyun psrc = f2fs_get_lock_data_page(src_inode,
1271*4882a593Smuzhiyun src + i, true);
1272*4882a593Smuzhiyun if (IS_ERR(psrc))
1273*4882a593Smuzhiyun return PTR_ERR(psrc);
1274*4882a593Smuzhiyun pdst = f2fs_get_new_data_page(dst_inode, NULL, dst + i,
1275*4882a593Smuzhiyun true);
1276*4882a593Smuzhiyun if (IS_ERR(pdst)) {
1277*4882a593Smuzhiyun f2fs_put_page(psrc, 1);
1278*4882a593Smuzhiyun return PTR_ERR(pdst);
1279*4882a593Smuzhiyun }
1280*4882a593Smuzhiyun f2fs_copy_page(psrc, pdst);
1281*4882a593Smuzhiyun set_page_dirty(pdst);
1282*4882a593Smuzhiyun f2fs_put_page(pdst, 1);
1283*4882a593Smuzhiyun f2fs_put_page(psrc, 1);
1284*4882a593Smuzhiyun
1285*4882a593Smuzhiyun ret = f2fs_truncate_hole(src_inode,
1286*4882a593Smuzhiyun src + i, src + i + 1);
1287*4882a593Smuzhiyun if (ret)
1288*4882a593Smuzhiyun return ret;
1289*4882a593Smuzhiyun i++;
1290*4882a593Smuzhiyun }
1291*4882a593Smuzhiyun }
1292*4882a593Smuzhiyun return 0;
1293*4882a593Smuzhiyun }
1294*4882a593Smuzhiyun
__exchange_data_block(struct inode * src_inode,struct inode * dst_inode,pgoff_t src,pgoff_t dst,pgoff_t len,bool full)1295*4882a593Smuzhiyun static int __exchange_data_block(struct inode *src_inode,
1296*4882a593Smuzhiyun struct inode *dst_inode, pgoff_t src, pgoff_t dst,
1297*4882a593Smuzhiyun pgoff_t len, bool full)
1298*4882a593Smuzhiyun {
1299*4882a593Smuzhiyun block_t *src_blkaddr;
1300*4882a593Smuzhiyun int *do_replace;
1301*4882a593Smuzhiyun pgoff_t olen;
1302*4882a593Smuzhiyun int ret;
1303*4882a593Smuzhiyun
1304*4882a593Smuzhiyun while (len) {
1305*4882a593Smuzhiyun olen = min((pgoff_t)4 * ADDRS_PER_BLOCK(src_inode), len);
1306*4882a593Smuzhiyun
1307*4882a593Smuzhiyun src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1308*4882a593Smuzhiyun array_size(olen, sizeof(block_t)),
1309*4882a593Smuzhiyun GFP_NOFS);
1310*4882a593Smuzhiyun if (!src_blkaddr)
1311*4882a593Smuzhiyun return -ENOMEM;
1312*4882a593Smuzhiyun
1313*4882a593Smuzhiyun do_replace = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1314*4882a593Smuzhiyun array_size(olen, sizeof(int)),
1315*4882a593Smuzhiyun GFP_NOFS);
1316*4882a593Smuzhiyun if (!do_replace) {
1317*4882a593Smuzhiyun kvfree(src_blkaddr);
1318*4882a593Smuzhiyun return -ENOMEM;
1319*4882a593Smuzhiyun }
1320*4882a593Smuzhiyun
1321*4882a593Smuzhiyun ret = __read_out_blkaddrs(src_inode, src_blkaddr,
1322*4882a593Smuzhiyun do_replace, src, olen);
1323*4882a593Smuzhiyun if (ret)
1324*4882a593Smuzhiyun goto roll_back;
1325*4882a593Smuzhiyun
1326*4882a593Smuzhiyun ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr,
1327*4882a593Smuzhiyun do_replace, src, dst, olen, full);
1328*4882a593Smuzhiyun if (ret)
1329*4882a593Smuzhiyun goto roll_back;
1330*4882a593Smuzhiyun
1331*4882a593Smuzhiyun src += olen;
1332*4882a593Smuzhiyun dst += olen;
1333*4882a593Smuzhiyun len -= olen;
1334*4882a593Smuzhiyun
1335*4882a593Smuzhiyun kvfree(src_blkaddr);
1336*4882a593Smuzhiyun kvfree(do_replace);
1337*4882a593Smuzhiyun }
1338*4882a593Smuzhiyun return 0;
1339*4882a593Smuzhiyun
1340*4882a593Smuzhiyun roll_back:
1341*4882a593Smuzhiyun __roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, olen);
1342*4882a593Smuzhiyun kvfree(src_blkaddr);
1343*4882a593Smuzhiyun kvfree(do_replace);
1344*4882a593Smuzhiyun return ret;
1345*4882a593Smuzhiyun }
1346*4882a593Smuzhiyun
f2fs_do_collapse(struct inode * inode,loff_t offset,loff_t len)1347*4882a593Smuzhiyun static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
1348*4882a593Smuzhiyun {
1349*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1350*4882a593Smuzhiyun pgoff_t nrpages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1351*4882a593Smuzhiyun pgoff_t start = offset >> PAGE_SHIFT;
1352*4882a593Smuzhiyun pgoff_t end = (offset + len) >> PAGE_SHIFT;
1353*4882a593Smuzhiyun int ret;
1354*4882a593Smuzhiyun
1355*4882a593Smuzhiyun f2fs_balance_fs(sbi, true);
1356*4882a593Smuzhiyun
1357*4882a593Smuzhiyun /* avoid gc operation during block exchange */
1358*4882a593Smuzhiyun f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1359*4882a593Smuzhiyun f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
1360*4882a593Smuzhiyun
1361*4882a593Smuzhiyun f2fs_lock_op(sbi);
1362*4882a593Smuzhiyun f2fs_drop_extent_tree(inode);
1363*4882a593Smuzhiyun truncate_pagecache(inode, offset);
1364*4882a593Smuzhiyun ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
1365*4882a593Smuzhiyun f2fs_unlock_op(sbi);
1366*4882a593Smuzhiyun
1367*4882a593Smuzhiyun f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
1368*4882a593Smuzhiyun f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1369*4882a593Smuzhiyun return ret;
1370*4882a593Smuzhiyun }
1371*4882a593Smuzhiyun
f2fs_collapse_range(struct inode * inode,loff_t offset,loff_t len)1372*4882a593Smuzhiyun static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
1373*4882a593Smuzhiyun {
1374*4882a593Smuzhiyun loff_t new_size;
1375*4882a593Smuzhiyun int ret;
1376*4882a593Smuzhiyun
1377*4882a593Smuzhiyun if (offset + len >= i_size_read(inode))
1378*4882a593Smuzhiyun return -EINVAL;
1379*4882a593Smuzhiyun
1380*4882a593Smuzhiyun /* collapse range should be aligned to block size of f2fs. */
1381*4882a593Smuzhiyun if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1382*4882a593Smuzhiyun return -EINVAL;
1383*4882a593Smuzhiyun
1384*4882a593Smuzhiyun ret = f2fs_convert_inline_inode(inode);
1385*4882a593Smuzhiyun if (ret)
1386*4882a593Smuzhiyun return ret;
1387*4882a593Smuzhiyun
1388*4882a593Smuzhiyun /* write out all dirty pages from offset */
1389*4882a593Smuzhiyun ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1390*4882a593Smuzhiyun if (ret)
1391*4882a593Smuzhiyun return ret;
1392*4882a593Smuzhiyun
1393*4882a593Smuzhiyun ret = f2fs_do_collapse(inode, offset, len);
1394*4882a593Smuzhiyun if (ret)
1395*4882a593Smuzhiyun return ret;
1396*4882a593Smuzhiyun
1397*4882a593Smuzhiyun /* write out all moved pages, if possible */
1398*4882a593Smuzhiyun f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
1399*4882a593Smuzhiyun filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1400*4882a593Smuzhiyun truncate_pagecache(inode, offset);
1401*4882a593Smuzhiyun
1402*4882a593Smuzhiyun new_size = i_size_read(inode) - len;
1403*4882a593Smuzhiyun ret = f2fs_truncate_blocks(inode, new_size, true);
1404*4882a593Smuzhiyun f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
1405*4882a593Smuzhiyun if (!ret)
1406*4882a593Smuzhiyun f2fs_i_size_write(inode, new_size);
1407*4882a593Smuzhiyun return ret;
1408*4882a593Smuzhiyun }
1409*4882a593Smuzhiyun
f2fs_do_zero_range(struct dnode_of_data * dn,pgoff_t start,pgoff_t end)1410*4882a593Smuzhiyun static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
1411*4882a593Smuzhiyun pgoff_t end)
1412*4882a593Smuzhiyun {
1413*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1414*4882a593Smuzhiyun pgoff_t index = start;
1415*4882a593Smuzhiyun unsigned int ofs_in_node = dn->ofs_in_node;
1416*4882a593Smuzhiyun blkcnt_t count = 0;
1417*4882a593Smuzhiyun int ret;
1418*4882a593Smuzhiyun
1419*4882a593Smuzhiyun for (; index < end; index++, dn->ofs_in_node++) {
1420*4882a593Smuzhiyun if (f2fs_data_blkaddr(dn) == NULL_ADDR)
1421*4882a593Smuzhiyun count++;
1422*4882a593Smuzhiyun }
1423*4882a593Smuzhiyun
1424*4882a593Smuzhiyun dn->ofs_in_node = ofs_in_node;
1425*4882a593Smuzhiyun ret = f2fs_reserve_new_blocks(dn, count);
1426*4882a593Smuzhiyun if (ret)
1427*4882a593Smuzhiyun return ret;
1428*4882a593Smuzhiyun
1429*4882a593Smuzhiyun dn->ofs_in_node = ofs_in_node;
1430*4882a593Smuzhiyun for (index = start; index < end; index++, dn->ofs_in_node++) {
1431*4882a593Smuzhiyun dn->data_blkaddr = f2fs_data_blkaddr(dn);
1432*4882a593Smuzhiyun /*
1433*4882a593Smuzhiyun * f2fs_reserve_new_blocks will not guarantee entire block
1434*4882a593Smuzhiyun * allocation.
1435*4882a593Smuzhiyun */
1436*4882a593Smuzhiyun if (dn->data_blkaddr == NULL_ADDR) {
1437*4882a593Smuzhiyun ret = -ENOSPC;
1438*4882a593Smuzhiyun break;
1439*4882a593Smuzhiyun }
1440*4882a593Smuzhiyun
1441*4882a593Smuzhiyun if (dn->data_blkaddr == NEW_ADDR)
1442*4882a593Smuzhiyun continue;
1443*4882a593Smuzhiyun
1444*4882a593Smuzhiyun if (!f2fs_is_valid_blkaddr(sbi, dn->data_blkaddr,
1445*4882a593Smuzhiyun DATA_GENERIC_ENHANCE)) {
1446*4882a593Smuzhiyun ret = -EFSCORRUPTED;
1447*4882a593Smuzhiyun break;
1448*4882a593Smuzhiyun }
1449*4882a593Smuzhiyun
1450*4882a593Smuzhiyun f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
1451*4882a593Smuzhiyun dn->data_blkaddr = NEW_ADDR;
1452*4882a593Smuzhiyun f2fs_set_data_blkaddr(dn);
1453*4882a593Smuzhiyun }
1454*4882a593Smuzhiyun
1455*4882a593Smuzhiyun f2fs_update_read_extent_cache_range(dn, start, 0, index - start);
1456*4882a593Smuzhiyun
1457*4882a593Smuzhiyun return ret;
1458*4882a593Smuzhiyun }
1459*4882a593Smuzhiyun
f2fs_zero_range(struct inode * inode,loff_t offset,loff_t len,int mode)1460*4882a593Smuzhiyun static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
1461*4882a593Smuzhiyun int mode)
1462*4882a593Smuzhiyun {
1463*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1464*4882a593Smuzhiyun struct address_space *mapping = inode->i_mapping;
1465*4882a593Smuzhiyun pgoff_t index, pg_start, pg_end;
1466*4882a593Smuzhiyun loff_t new_size = i_size_read(inode);
1467*4882a593Smuzhiyun loff_t off_start, off_end;
1468*4882a593Smuzhiyun int ret = 0;
1469*4882a593Smuzhiyun
1470*4882a593Smuzhiyun ret = inode_newsize_ok(inode, (len + offset));
1471*4882a593Smuzhiyun if (ret)
1472*4882a593Smuzhiyun return ret;
1473*4882a593Smuzhiyun
1474*4882a593Smuzhiyun ret = f2fs_convert_inline_inode(inode);
1475*4882a593Smuzhiyun if (ret)
1476*4882a593Smuzhiyun return ret;
1477*4882a593Smuzhiyun
1478*4882a593Smuzhiyun ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
1479*4882a593Smuzhiyun if (ret)
1480*4882a593Smuzhiyun return ret;
1481*4882a593Smuzhiyun
1482*4882a593Smuzhiyun pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1483*4882a593Smuzhiyun pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1484*4882a593Smuzhiyun
1485*4882a593Smuzhiyun off_start = offset & (PAGE_SIZE - 1);
1486*4882a593Smuzhiyun off_end = (offset + len) & (PAGE_SIZE - 1);
1487*4882a593Smuzhiyun
1488*4882a593Smuzhiyun if (pg_start == pg_end) {
1489*4882a593Smuzhiyun ret = fill_zero(inode, pg_start, off_start,
1490*4882a593Smuzhiyun off_end - off_start);
1491*4882a593Smuzhiyun if (ret)
1492*4882a593Smuzhiyun return ret;
1493*4882a593Smuzhiyun
1494*4882a593Smuzhiyun new_size = max_t(loff_t, new_size, offset + len);
1495*4882a593Smuzhiyun } else {
1496*4882a593Smuzhiyun if (off_start) {
1497*4882a593Smuzhiyun ret = fill_zero(inode, pg_start++, off_start,
1498*4882a593Smuzhiyun PAGE_SIZE - off_start);
1499*4882a593Smuzhiyun if (ret)
1500*4882a593Smuzhiyun return ret;
1501*4882a593Smuzhiyun
1502*4882a593Smuzhiyun new_size = max_t(loff_t, new_size,
1503*4882a593Smuzhiyun (loff_t)pg_start << PAGE_SHIFT);
1504*4882a593Smuzhiyun }
1505*4882a593Smuzhiyun
1506*4882a593Smuzhiyun for (index = pg_start; index < pg_end;) {
1507*4882a593Smuzhiyun struct dnode_of_data dn;
1508*4882a593Smuzhiyun unsigned int end_offset;
1509*4882a593Smuzhiyun pgoff_t end;
1510*4882a593Smuzhiyun
1511*4882a593Smuzhiyun f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1512*4882a593Smuzhiyun f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
1513*4882a593Smuzhiyun
1514*4882a593Smuzhiyun truncate_pagecache_range(inode,
1515*4882a593Smuzhiyun (loff_t)index << PAGE_SHIFT,
1516*4882a593Smuzhiyun ((loff_t)pg_end << PAGE_SHIFT) - 1);
1517*4882a593Smuzhiyun
1518*4882a593Smuzhiyun f2fs_lock_op(sbi);
1519*4882a593Smuzhiyun
1520*4882a593Smuzhiyun set_new_dnode(&dn, inode, NULL, NULL, 0);
1521*4882a593Smuzhiyun ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
1522*4882a593Smuzhiyun if (ret) {
1523*4882a593Smuzhiyun f2fs_unlock_op(sbi);
1524*4882a593Smuzhiyun f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
1525*4882a593Smuzhiyun f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1526*4882a593Smuzhiyun goto out;
1527*4882a593Smuzhiyun }
1528*4882a593Smuzhiyun
1529*4882a593Smuzhiyun end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1530*4882a593Smuzhiyun end = min(pg_end, end_offset - dn.ofs_in_node + index);
1531*4882a593Smuzhiyun
1532*4882a593Smuzhiyun ret = f2fs_do_zero_range(&dn, index, end);
1533*4882a593Smuzhiyun f2fs_put_dnode(&dn);
1534*4882a593Smuzhiyun
1535*4882a593Smuzhiyun f2fs_unlock_op(sbi);
1536*4882a593Smuzhiyun f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
1537*4882a593Smuzhiyun f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1538*4882a593Smuzhiyun
1539*4882a593Smuzhiyun f2fs_balance_fs(sbi, dn.node_changed);
1540*4882a593Smuzhiyun
1541*4882a593Smuzhiyun if (ret)
1542*4882a593Smuzhiyun goto out;
1543*4882a593Smuzhiyun
1544*4882a593Smuzhiyun index = end;
1545*4882a593Smuzhiyun new_size = max_t(loff_t, new_size,
1546*4882a593Smuzhiyun (loff_t)index << PAGE_SHIFT);
1547*4882a593Smuzhiyun }
1548*4882a593Smuzhiyun
1549*4882a593Smuzhiyun if (off_end) {
1550*4882a593Smuzhiyun ret = fill_zero(inode, pg_end, 0, off_end);
1551*4882a593Smuzhiyun if (ret)
1552*4882a593Smuzhiyun goto out;
1553*4882a593Smuzhiyun
1554*4882a593Smuzhiyun new_size = max_t(loff_t, new_size, offset + len);
1555*4882a593Smuzhiyun }
1556*4882a593Smuzhiyun }
1557*4882a593Smuzhiyun
1558*4882a593Smuzhiyun out:
1559*4882a593Smuzhiyun if (new_size > i_size_read(inode)) {
1560*4882a593Smuzhiyun if (mode & FALLOC_FL_KEEP_SIZE)
1561*4882a593Smuzhiyun file_set_keep_isize(inode);
1562*4882a593Smuzhiyun else
1563*4882a593Smuzhiyun f2fs_i_size_write(inode, new_size);
1564*4882a593Smuzhiyun }
1565*4882a593Smuzhiyun return ret;
1566*4882a593Smuzhiyun }
1567*4882a593Smuzhiyun
f2fs_insert_range(struct inode * inode,loff_t offset,loff_t len)1568*4882a593Smuzhiyun static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
1569*4882a593Smuzhiyun {
1570*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1571*4882a593Smuzhiyun pgoff_t nr, pg_start, pg_end, delta, idx;
1572*4882a593Smuzhiyun loff_t new_size;
1573*4882a593Smuzhiyun int ret = 0;
1574*4882a593Smuzhiyun
1575*4882a593Smuzhiyun new_size = i_size_read(inode) + len;
1576*4882a593Smuzhiyun ret = inode_newsize_ok(inode, new_size);
1577*4882a593Smuzhiyun if (ret)
1578*4882a593Smuzhiyun return ret;
1579*4882a593Smuzhiyun
1580*4882a593Smuzhiyun if (offset >= i_size_read(inode))
1581*4882a593Smuzhiyun return -EINVAL;
1582*4882a593Smuzhiyun
1583*4882a593Smuzhiyun /* insert range should be aligned to block size of f2fs. */
1584*4882a593Smuzhiyun if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1585*4882a593Smuzhiyun return -EINVAL;
1586*4882a593Smuzhiyun
1587*4882a593Smuzhiyun ret = f2fs_convert_inline_inode(inode);
1588*4882a593Smuzhiyun if (ret)
1589*4882a593Smuzhiyun return ret;
1590*4882a593Smuzhiyun
1591*4882a593Smuzhiyun f2fs_balance_fs(sbi, true);
1592*4882a593Smuzhiyun
1593*4882a593Smuzhiyun f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
1594*4882a593Smuzhiyun ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
1595*4882a593Smuzhiyun f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
1596*4882a593Smuzhiyun if (ret)
1597*4882a593Smuzhiyun return ret;
1598*4882a593Smuzhiyun
1599*4882a593Smuzhiyun /* write out all dirty pages from offset */
1600*4882a593Smuzhiyun ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1601*4882a593Smuzhiyun if (ret)
1602*4882a593Smuzhiyun return ret;
1603*4882a593Smuzhiyun
1604*4882a593Smuzhiyun pg_start = offset >> PAGE_SHIFT;
1605*4882a593Smuzhiyun pg_end = (offset + len) >> PAGE_SHIFT;
1606*4882a593Smuzhiyun delta = pg_end - pg_start;
1607*4882a593Smuzhiyun idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1608*4882a593Smuzhiyun
1609*4882a593Smuzhiyun /* avoid gc operation during block exchange */
1610*4882a593Smuzhiyun f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1611*4882a593Smuzhiyun f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
1612*4882a593Smuzhiyun truncate_pagecache(inode, offset);
1613*4882a593Smuzhiyun
1614*4882a593Smuzhiyun while (!ret && idx > pg_start) {
1615*4882a593Smuzhiyun nr = idx - pg_start;
1616*4882a593Smuzhiyun if (nr > delta)
1617*4882a593Smuzhiyun nr = delta;
1618*4882a593Smuzhiyun idx -= nr;
1619*4882a593Smuzhiyun
1620*4882a593Smuzhiyun f2fs_lock_op(sbi);
1621*4882a593Smuzhiyun f2fs_drop_extent_tree(inode);
1622*4882a593Smuzhiyun
1623*4882a593Smuzhiyun ret = __exchange_data_block(inode, inode, idx,
1624*4882a593Smuzhiyun idx + delta, nr, false);
1625*4882a593Smuzhiyun f2fs_unlock_op(sbi);
1626*4882a593Smuzhiyun }
1627*4882a593Smuzhiyun f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
1628*4882a593Smuzhiyun f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1629*4882a593Smuzhiyun
1630*4882a593Smuzhiyun /* write out all moved pages, if possible */
1631*4882a593Smuzhiyun f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
1632*4882a593Smuzhiyun filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1633*4882a593Smuzhiyun truncate_pagecache(inode, offset);
1634*4882a593Smuzhiyun f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
1635*4882a593Smuzhiyun
1636*4882a593Smuzhiyun if (!ret)
1637*4882a593Smuzhiyun f2fs_i_size_write(inode, new_size);
1638*4882a593Smuzhiyun return ret;
1639*4882a593Smuzhiyun }
1640*4882a593Smuzhiyun
expand_inode_data(struct inode * inode,loff_t offset,loff_t len,int mode)1641*4882a593Smuzhiyun static int expand_inode_data(struct inode *inode, loff_t offset,
1642*4882a593Smuzhiyun loff_t len, int mode)
1643*4882a593Smuzhiyun {
1644*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1645*4882a593Smuzhiyun struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
1646*4882a593Smuzhiyun .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
1647*4882a593Smuzhiyun .m_may_create = true };
1648*4882a593Smuzhiyun pgoff_t pg_start, pg_end;
1649*4882a593Smuzhiyun loff_t new_size = i_size_read(inode);
1650*4882a593Smuzhiyun loff_t off_end;
1651*4882a593Smuzhiyun block_t expanded = 0;
1652*4882a593Smuzhiyun int err;
1653*4882a593Smuzhiyun
1654*4882a593Smuzhiyun err = inode_newsize_ok(inode, (len + offset));
1655*4882a593Smuzhiyun if (err)
1656*4882a593Smuzhiyun return err;
1657*4882a593Smuzhiyun
1658*4882a593Smuzhiyun err = f2fs_convert_inline_inode(inode);
1659*4882a593Smuzhiyun if (err)
1660*4882a593Smuzhiyun return err;
1661*4882a593Smuzhiyun
1662*4882a593Smuzhiyun f2fs_balance_fs(sbi, true);
1663*4882a593Smuzhiyun
1664*4882a593Smuzhiyun pg_start = ((unsigned long long)offset) >> PAGE_SHIFT;
1665*4882a593Smuzhiyun pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
1666*4882a593Smuzhiyun off_end = (offset + len) & (PAGE_SIZE - 1);
1667*4882a593Smuzhiyun
1668*4882a593Smuzhiyun map.m_lblk = pg_start;
1669*4882a593Smuzhiyun map.m_len = pg_end - pg_start;
1670*4882a593Smuzhiyun if (off_end)
1671*4882a593Smuzhiyun map.m_len++;
1672*4882a593Smuzhiyun
1673*4882a593Smuzhiyun if (!map.m_len)
1674*4882a593Smuzhiyun return 0;
1675*4882a593Smuzhiyun
1676*4882a593Smuzhiyun if (f2fs_is_pinned_file(inode)) {
1677*4882a593Smuzhiyun block_t sec_blks = BLKS_PER_SEC(sbi);
1678*4882a593Smuzhiyun block_t sec_len = roundup(map.m_len, sec_blks);
1679*4882a593Smuzhiyun
1680*4882a593Smuzhiyun map.m_len = sec_blks;
1681*4882a593Smuzhiyun next_alloc:
1682*4882a593Smuzhiyun if (has_not_enough_free_secs(sbi, 0,
1683*4882a593Smuzhiyun GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
1684*4882a593Smuzhiyun f2fs_down_write(&sbi->gc_lock);
1685*4882a593Smuzhiyun err = f2fs_gc(sbi, true, false, false, NULL_SEGNO);
1686*4882a593Smuzhiyun if (err && err != -ENODATA && err != -EAGAIN)
1687*4882a593Smuzhiyun goto out_err;
1688*4882a593Smuzhiyun }
1689*4882a593Smuzhiyun
1690*4882a593Smuzhiyun f2fs_down_write(&sbi->pin_sem);
1691*4882a593Smuzhiyun
1692*4882a593Smuzhiyun f2fs_lock_op(sbi);
1693*4882a593Smuzhiyun f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
1694*4882a593Smuzhiyun f2fs_unlock_op(sbi);
1695*4882a593Smuzhiyun
1696*4882a593Smuzhiyun map.m_seg_type = CURSEG_COLD_DATA_PINNED;
1697*4882a593Smuzhiyun err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
1698*4882a593Smuzhiyun
1699*4882a593Smuzhiyun f2fs_up_write(&sbi->pin_sem);
1700*4882a593Smuzhiyun
1701*4882a593Smuzhiyun expanded += map.m_len;
1702*4882a593Smuzhiyun sec_len -= map.m_len;
1703*4882a593Smuzhiyun map.m_lblk += map.m_len;
1704*4882a593Smuzhiyun if (!err && sec_len)
1705*4882a593Smuzhiyun goto next_alloc;
1706*4882a593Smuzhiyun
1707*4882a593Smuzhiyun map.m_len = expanded;
1708*4882a593Smuzhiyun } else {
1709*4882a593Smuzhiyun err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
1710*4882a593Smuzhiyun expanded = map.m_len;
1711*4882a593Smuzhiyun }
1712*4882a593Smuzhiyun out_err:
1713*4882a593Smuzhiyun if (err) {
1714*4882a593Smuzhiyun pgoff_t last_off;
1715*4882a593Smuzhiyun
1716*4882a593Smuzhiyun if (!expanded)
1717*4882a593Smuzhiyun return err;
1718*4882a593Smuzhiyun
1719*4882a593Smuzhiyun last_off = pg_start + expanded - 1;
1720*4882a593Smuzhiyun
1721*4882a593Smuzhiyun /* update new size to the failed position */
1722*4882a593Smuzhiyun new_size = (last_off == pg_end) ? offset + len :
1723*4882a593Smuzhiyun (loff_t)(last_off + 1) << PAGE_SHIFT;
1724*4882a593Smuzhiyun } else {
1725*4882a593Smuzhiyun new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end;
1726*4882a593Smuzhiyun }
1727*4882a593Smuzhiyun
1728*4882a593Smuzhiyun if (new_size > i_size_read(inode)) {
1729*4882a593Smuzhiyun if (mode & FALLOC_FL_KEEP_SIZE)
1730*4882a593Smuzhiyun file_set_keep_isize(inode);
1731*4882a593Smuzhiyun else
1732*4882a593Smuzhiyun f2fs_i_size_write(inode, new_size);
1733*4882a593Smuzhiyun }
1734*4882a593Smuzhiyun
1735*4882a593Smuzhiyun return err;
1736*4882a593Smuzhiyun }
1737*4882a593Smuzhiyun
f2fs_fallocate(struct file * file,int mode,loff_t offset,loff_t len)1738*4882a593Smuzhiyun static long f2fs_fallocate(struct file *file, int mode,
1739*4882a593Smuzhiyun loff_t offset, loff_t len)
1740*4882a593Smuzhiyun {
1741*4882a593Smuzhiyun struct inode *inode = file_inode(file);
1742*4882a593Smuzhiyun long ret = 0;
1743*4882a593Smuzhiyun
1744*4882a593Smuzhiyun if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
1745*4882a593Smuzhiyun return -EIO;
1746*4882a593Smuzhiyun if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
1747*4882a593Smuzhiyun return -ENOSPC;
1748*4882a593Smuzhiyun if (!f2fs_is_compress_backend_ready(inode))
1749*4882a593Smuzhiyun return -EOPNOTSUPP;
1750*4882a593Smuzhiyun
1751*4882a593Smuzhiyun /* f2fs only support ->fallocate for regular file */
1752*4882a593Smuzhiyun if (!S_ISREG(inode->i_mode))
1753*4882a593Smuzhiyun return -EINVAL;
1754*4882a593Smuzhiyun
1755*4882a593Smuzhiyun if (IS_ENCRYPTED(inode) &&
1756*4882a593Smuzhiyun (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
1757*4882a593Smuzhiyun return -EOPNOTSUPP;
1758*4882a593Smuzhiyun
1759*4882a593Smuzhiyun if (f2fs_compressed_file(inode) &&
1760*4882a593Smuzhiyun (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
1761*4882a593Smuzhiyun FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE)))
1762*4882a593Smuzhiyun return -EOPNOTSUPP;
1763*4882a593Smuzhiyun
1764*4882a593Smuzhiyun if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
1765*4882a593Smuzhiyun FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
1766*4882a593Smuzhiyun FALLOC_FL_INSERT_RANGE))
1767*4882a593Smuzhiyun return -EOPNOTSUPP;
1768*4882a593Smuzhiyun
1769*4882a593Smuzhiyun inode_lock(inode);
1770*4882a593Smuzhiyun
1771*4882a593Smuzhiyun ret = file_modified(file);
1772*4882a593Smuzhiyun if (ret)
1773*4882a593Smuzhiyun goto out;
1774*4882a593Smuzhiyun
1775*4882a593Smuzhiyun if (mode & FALLOC_FL_PUNCH_HOLE) {
1776*4882a593Smuzhiyun if (offset >= inode->i_size)
1777*4882a593Smuzhiyun goto out;
1778*4882a593Smuzhiyun
1779*4882a593Smuzhiyun ret = punch_hole(inode, offset, len);
1780*4882a593Smuzhiyun } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
1781*4882a593Smuzhiyun ret = f2fs_collapse_range(inode, offset, len);
1782*4882a593Smuzhiyun } else if (mode & FALLOC_FL_ZERO_RANGE) {
1783*4882a593Smuzhiyun ret = f2fs_zero_range(inode, offset, len, mode);
1784*4882a593Smuzhiyun } else if (mode & FALLOC_FL_INSERT_RANGE) {
1785*4882a593Smuzhiyun ret = f2fs_insert_range(inode, offset, len);
1786*4882a593Smuzhiyun } else {
1787*4882a593Smuzhiyun ret = expand_inode_data(inode, offset, len, mode);
1788*4882a593Smuzhiyun }
1789*4882a593Smuzhiyun
1790*4882a593Smuzhiyun if (!ret) {
1791*4882a593Smuzhiyun inode->i_mtime = inode->i_ctime = current_time(inode);
1792*4882a593Smuzhiyun f2fs_mark_inode_dirty_sync(inode, false);
1793*4882a593Smuzhiyun f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1794*4882a593Smuzhiyun }
1795*4882a593Smuzhiyun
1796*4882a593Smuzhiyun out:
1797*4882a593Smuzhiyun inode_unlock(inode);
1798*4882a593Smuzhiyun
1799*4882a593Smuzhiyun trace_f2fs_fallocate(inode, mode, offset, len, ret);
1800*4882a593Smuzhiyun return ret;
1801*4882a593Smuzhiyun }
1802*4882a593Smuzhiyun
f2fs_release_file(struct inode * inode,struct file * filp)1803*4882a593Smuzhiyun static int f2fs_release_file(struct inode *inode, struct file *filp)
1804*4882a593Smuzhiyun {
1805*4882a593Smuzhiyun /*
1806*4882a593Smuzhiyun * f2fs_relase_file is called at every close calls. So we should
1807*4882a593Smuzhiyun * not drop any inmemory pages by close called by other process.
1808*4882a593Smuzhiyun */
1809*4882a593Smuzhiyun if (!(filp->f_mode & FMODE_WRITE) ||
1810*4882a593Smuzhiyun atomic_read(&inode->i_writecount) != 1)
1811*4882a593Smuzhiyun return 0;
1812*4882a593Smuzhiyun
1813*4882a593Smuzhiyun /* some remained atomic pages should discarded */
1814*4882a593Smuzhiyun if (f2fs_is_atomic_file(inode))
1815*4882a593Smuzhiyun f2fs_drop_inmem_pages(inode);
1816*4882a593Smuzhiyun if (f2fs_is_volatile_file(inode)) {
1817*4882a593Smuzhiyun set_inode_flag(inode, FI_DROP_CACHE);
1818*4882a593Smuzhiyun filemap_fdatawrite(inode->i_mapping);
1819*4882a593Smuzhiyun clear_inode_flag(inode, FI_DROP_CACHE);
1820*4882a593Smuzhiyun clear_inode_flag(inode, FI_VOLATILE_FILE);
1821*4882a593Smuzhiyun stat_dec_volatile_write(inode);
1822*4882a593Smuzhiyun }
1823*4882a593Smuzhiyun return 0;
1824*4882a593Smuzhiyun }
1825*4882a593Smuzhiyun
f2fs_file_flush(struct file * file,fl_owner_t id)1826*4882a593Smuzhiyun static int f2fs_file_flush(struct file *file, fl_owner_t id)
1827*4882a593Smuzhiyun {
1828*4882a593Smuzhiyun struct inode *inode = file_inode(file);
1829*4882a593Smuzhiyun
1830*4882a593Smuzhiyun /*
1831*4882a593Smuzhiyun * If the process doing a transaction is crashed, we should do
1832*4882a593Smuzhiyun * roll-back. Otherwise, other reader/write can see corrupted database
1833*4882a593Smuzhiyun * until all the writers close its file. Since this should be done
1834*4882a593Smuzhiyun * before dropping file lock, it needs to do in ->flush.
1835*4882a593Smuzhiyun */
1836*4882a593Smuzhiyun if (f2fs_is_atomic_file(inode) &&
1837*4882a593Smuzhiyun F2FS_I(inode)->inmem_task == current)
1838*4882a593Smuzhiyun f2fs_drop_inmem_pages(inode);
1839*4882a593Smuzhiyun return 0;
1840*4882a593Smuzhiyun }
1841*4882a593Smuzhiyun
f2fs_setflags_common(struct inode * inode,u32 iflags,u32 mask)1842*4882a593Smuzhiyun static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
1843*4882a593Smuzhiyun {
1844*4882a593Smuzhiyun struct f2fs_inode_info *fi = F2FS_I(inode);
1845*4882a593Smuzhiyun u32 masked_flags = fi->i_flags & mask;
1846*4882a593Smuzhiyun
1847*4882a593Smuzhiyun /* mask can be shrunk by flags_valid selector */
1848*4882a593Smuzhiyun iflags &= mask;
1849*4882a593Smuzhiyun
1850*4882a593Smuzhiyun /* Is it quota file? Do not allow user to mess with it */
1851*4882a593Smuzhiyun if (IS_NOQUOTA(inode))
1852*4882a593Smuzhiyun return -EPERM;
1853*4882a593Smuzhiyun
1854*4882a593Smuzhiyun if ((iflags ^ masked_flags) & F2FS_CASEFOLD_FL) {
1855*4882a593Smuzhiyun if (!f2fs_sb_has_casefold(F2FS_I_SB(inode)))
1856*4882a593Smuzhiyun return -EOPNOTSUPP;
1857*4882a593Smuzhiyun if (!f2fs_empty_dir(inode))
1858*4882a593Smuzhiyun return -ENOTEMPTY;
1859*4882a593Smuzhiyun }
1860*4882a593Smuzhiyun
1861*4882a593Smuzhiyun if (iflags & (F2FS_COMPR_FL | F2FS_NOCOMP_FL)) {
1862*4882a593Smuzhiyun if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
1863*4882a593Smuzhiyun return -EOPNOTSUPP;
1864*4882a593Smuzhiyun if ((iflags & F2FS_COMPR_FL) && (iflags & F2FS_NOCOMP_FL))
1865*4882a593Smuzhiyun return -EINVAL;
1866*4882a593Smuzhiyun }
1867*4882a593Smuzhiyun
1868*4882a593Smuzhiyun if ((iflags ^ masked_flags) & F2FS_COMPR_FL) {
1869*4882a593Smuzhiyun if (masked_flags & F2FS_COMPR_FL) {
1870*4882a593Smuzhiyun if (!f2fs_disable_compressed_file(inode))
1871*4882a593Smuzhiyun return -EINVAL;
1872*4882a593Smuzhiyun } else {
1873*4882a593Smuzhiyun if (!f2fs_may_compress(inode))
1874*4882a593Smuzhiyun return -EINVAL;
1875*4882a593Smuzhiyun if (S_ISREG(inode->i_mode) && inode->i_size)
1876*4882a593Smuzhiyun return -EINVAL;
1877*4882a593Smuzhiyun if (set_compress_context(inode))
1878*4882a593Smuzhiyun return -EOPNOTSUPP;
1879*4882a593Smuzhiyun }
1880*4882a593Smuzhiyun }
1881*4882a593Smuzhiyun
1882*4882a593Smuzhiyun fi->i_flags = iflags | (fi->i_flags & ~mask);
1883*4882a593Smuzhiyun f2fs_bug_on(F2FS_I_SB(inode), (fi->i_flags & F2FS_COMPR_FL) &&
1884*4882a593Smuzhiyun (fi->i_flags & F2FS_NOCOMP_FL));
1885*4882a593Smuzhiyun
1886*4882a593Smuzhiyun if (fi->i_flags & F2FS_PROJINHERIT_FL)
1887*4882a593Smuzhiyun set_inode_flag(inode, FI_PROJ_INHERIT);
1888*4882a593Smuzhiyun else
1889*4882a593Smuzhiyun clear_inode_flag(inode, FI_PROJ_INHERIT);
1890*4882a593Smuzhiyun
1891*4882a593Smuzhiyun inode->i_ctime = current_time(inode);
1892*4882a593Smuzhiyun f2fs_set_inode_flags(inode);
1893*4882a593Smuzhiyun f2fs_mark_inode_dirty_sync(inode, true);
1894*4882a593Smuzhiyun return 0;
1895*4882a593Smuzhiyun }
1896*4882a593Smuzhiyun
1897*4882a593Smuzhiyun /* FS_IOC_GETFLAGS and FS_IOC_SETFLAGS support */
1898*4882a593Smuzhiyun
1899*4882a593Smuzhiyun /*
1900*4882a593Smuzhiyun * To make a new on-disk f2fs i_flag gettable via FS_IOC_GETFLAGS, add an entry
1901*4882a593Smuzhiyun * for it to f2fs_fsflags_map[], and add its FS_*_FL equivalent to
1902*4882a593Smuzhiyun * F2FS_GETTABLE_FS_FL. To also make it settable via FS_IOC_SETFLAGS, also add
1903*4882a593Smuzhiyun * its FS_*_FL equivalent to F2FS_SETTABLE_FS_FL.
1904*4882a593Smuzhiyun */
1905*4882a593Smuzhiyun
1906*4882a593Smuzhiyun static const struct {
1907*4882a593Smuzhiyun u32 iflag;
1908*4882a593Smuzhiyun u32 fsflag;
1909*4882a593Smuzhiyun } f2fs_fsflags_map[] = {
1910*4882a593Smuzhiyun { F2FS_COMPR_FL, FS_COMPR_FL },
1911*4882a593Smuzhiyun { F2FS_SYNC_FL, FS_SYNC_FL },
1912*4882a593Smuzhiyun { F2FS_IMMUTABLE_FL, FS_IMMUTABLE_FL },
1913*4882a593Smuzhiyun { F2FS_APPEND_FL, FS_APPEND_FL },
1914*4882a593Smuzhiyun { F2FS_NODUMP_FL, FS_NODUMP_FL },
1915*4882a593Smuzhiyun { F2FS_NOATIME_FL, FS_NOATIME_FL },
1916*4882a593Smuzhiyun { F2FS_NOCOMP_FL, FS_NOCOMP_FL },
1917*4882a593Smuzhiyun { F2FS_INDEX_FL, FS_INDEX_FL },
1918*4882a593Smuzhiyun { F2FS_DIRSYNC_FL, FS_DIRSYNC_FL },
1919*4882a593Smuzhiyun { F2FS_PROJINHERIT_FL, FS_PROJINHERIT_FL },
1920*4882a593Smuzhiyun { F2FS_CASEFOLD_FL, FS_CASEFOLD_FL },
1921*4882a593Smuzhiyun };
1922*4882a593Smuzhiyun
1923*4882a593Smuzhiyun #define F2FS_GETTABLE_FS_FL ( \
1924*4882a593Smuzhiyun FS_COMPR_FL | \
1925*4882a593Smuzhiyun FS_SYNC_FL | \
1926*4882a593Smuzhiyun FS_IMMUTABLE_FL | \
1927*4882a593Smuzhiyun FS_APPEND_FL | \
1928*4882a593Smuzhiyun FS_NODUMP_FL | \
1929*4882a593Smuzhiyun FS_NOATIME_FL | \
1930*4882a593Smuzhiyun FS_NOCOMP_FL | \
1931*4882a593Smuzhiyun FS_INDEX_FL | \
1932*4882a593Smuzhiyun FS_DIRSYNC_FL | \
1933*4882a593Smuzhiyun FS_PROJINHERIT_FL | \
1934*4882a593Smuzhiyun FS_ENCRYPT_FL | \
1935*4882a593Smuzhiyun FS_INLINE_DATA_FL | \
1936*4882a593Smuzhiyun FS_NOCOW_FL | \
1937*4882a593Smuzhiyun FS_VERITY_FL | \
1938*4882a593Smuzhiyun FS_CASEFOLD_FL)
1939*4882a593Smuzhiyun
1940*4882a593Smuzhiyun #define F2FS_SETTABLE_FS_FL ( \
1941*4882a593Smuzhiyun FS_COMPR_FL | \
1942*4882a593Smuzhiyun FS_SYNC_FL | \
1943*4882a593Smuzhiyun FS_IMMUTABLE_FL | \
1944*4882a593Smuzhiyun FS_APPEND_FL | \
1945*4882a593Smuzhiyun FS_NODUMP_FL | \
1946*4882a593Smuzhiyun FS_NOATIME_FL | \
1947*4882a593Smuzhiyun FS_NOCOMP_FL | \
1948*4882a593Smuzhiyun FS_DIRSYNC_FL | \
1949*4882a593Smuzhiyun FS_PROJINHERIT_FL | \
1950*4882a593Smuzhiyun FS_CASEFOLD_FL)
1951*4882a593Smuzhiyun
1952*4882a593Smuzhiyun /* Convert f2fs on-disk i_flags to FS_IOC_{GET,SET}FLAGS flags */
f2fs_iflags_to_fsflags(u32 iflags)1953*4882a593Smuzhiyun static inline u32 f2fs_iflags_to_fsflags(u32 iflags)
1954*4882a593Smuzhiyun {
1955*4882a593Smuzhiyun u32 fsflags = 0;
1956*4882a593Smuzhiyun int i;
1957*4882a593Smuzhiyun
1958*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1959*4882a593Smuzhiyun if (iflags & f2fs_fsflags_map[i].iflag)
1960*4882a593Smuzhiyun fsflags |= f2fs_fsflags_map[i].fsflag;
1961*4882a593Smuzhiyun
1962*4882a593Smuzhiyun return fsflags;
1963*4882a593Smuzhiyun }
1964*4882a593Smuzhiyun
1965*4882a593Smuzhiyun /* Convert FS_IOC_{GET,SET}FLAGS flags to f2fs on-disk i_flags */
f2fs_fsflags_to_iflags(u32 fsflags)1966*4882a593Smuzhiyun static inline u32 f2fs_fsflags_to_iflags(u32 fsflags)
1967*4882a593Smuzhiyun {
1968*4882a593Smuzhiyun u32 iflags = 0;
1969*4882a593Smuzhiyun int i;
1970*4882a593Smuzhiyun
1971*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1972*4882a593Smuzhiyun if (fsflags & f2fs_fsflags_map[i].fsflag)
1973*4882a593Smuzhiyun iflags |= f2fs_fsflags_map[i].iflag;
1974*4882a593Smuzhiyun
1975*4882a593Smuzhiyun return iflags;
1976*4882a593Smuzhiyun }
1977*4882a593Smuzhiyun
f2fs_ioc_getflags(struct file * filp,unsigned long arg)1978*4882a593Smuzhiyun static int f2fs_ioc_getflags(struct file *filp, unsigned long arg)
1979*4882a593Smuzhiyun {
1980*4882a593Smuzhiyun struct inode *inode = file_inode(filp);
1981*4882a593Smuzhiyun struct f2fs_inode_info *fi = F2FS_I(inode);
1982*4882a593Smuzhiyun u32 fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
1983*4882a593Smuzhiyun
1984*4882a593Smuzhiyun if (IS_ENCRYPTED(inode))
1985*4882a593Smuzhiyun fsflags |= FS_ENCRYPT_FL;
1986*4882a593Smuzhiyun if (IS_VERITY(inode))
1987*4882a593Smuzhiyun fsflags |= FS_VERITY_FL;
1988*4882a593Smuzhiyun if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
1989*4882a593Smuzhiyun fsflags |= FS_INLINE_DATA_FL;
1990*4882a593Smuzhiyun if (is_inode_flag_set(inode, FI_PIN_FILE))
1991*4882a593Smuzhiyun fsflags |= FS_NOCOW_FL;
1992*4882a593Smuzhiyun
1993*4882a593Smuzhiyun fsflags &= F2FS_GETTABLE_FS_FL;
1994*4882a593Smuzhiyun
1995*4882a593Smuzhiyun return put_user(fsflags, (int __user *)arg);
1996*4882a593Smuzhiyun }
1997*4882a593Smuzhiyun
f2fs_ioc_setflags(struct file * filp,unsigned long arg)1998*4882a593Smuzhiyun static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
1999*4882a593Smuzhiyun {
2000*4882a593Smuzhiyun struct inode *inode = file_inode(filp);
2001*4882a593Smuzhiyun struct f2fs_inode_info *fi = F2FS_I(inode);
2002*4882a593Smuzhiyun u32 fsflags, old_fsflags;
2003*4882a593Smuzhiyun u32 iflags;
2004*4882a593Smuzhiyun int ret;
2005*4882a593Smuzhiyun
2006*4882a593Smuzhiyun if (!inode_owner_or_capable(inode))
2007*4882a593Smuzhiyun return -EACCES;
2008*4882a593Smuzhiyun
2009*4882a593Smuzhiyun if (get_user(fsflags, (int __user *)arg))
2010*4882a593Smuzhiyun return -EFAULT;
2011*4882a593Smuzhiyun
2012*4882a593Smuzhiyun if (fsflags & ~F2FS_GETTABLE_FS_FL)
2013*4882a593Smuzhiyun return -EOPNOTSUPP;
2014*4882a593Smuzhiyun fsflags &= F2FS_SETTABLE_FS_FL;
2015*4882a593Smuzhiyun
2016*4882a593Smuzhiyun iflags = f2fs_fsflags_to_iflags(fsflags);
2017*4882a593Smuzhiyun if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
2018*4882a593Smuzhiyun return -EOPNOTSUPP;
2019*4882a593Smuzhiyun
2020*4882a593Smuzhiyun ret = mnt_want_write_file(filp);
2021*4882a593Smuzhiyun if (ret)
2022*4882a593Smuzhiyun return ret;
2023*4882a593Smuzhiyun
2024*4882a593Smuzhiyun inode_lock(inode);
2025*4882a593Smuzhiyun
2026*4882a593Smuzhiyun old_fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
2027*4882a593Smuzhiyun ret = vfs_ioc_setflags_prepare(inode, old_fsflags, fsflags);
2028*4882a593Smuzhiyun if (ret)
2029*4882a593Smuzhiyun goto out;
2030*4882a593Smuzhiyun
2031*4882a593Smuzhiyun ret = f2fs_setflags_common(inode, iflags,
2032*4882a593Smuzhiyun f2fs_fsflags_to_iflags(F2FS_SETTABLE_FS_FL));
2033*4882a593Smuzhiyun out:
2034*4882a593Smuzhiyun inode_unlock(inode);
2035*4882a593Smuzhiyun mnt_drop_write_file(filp);
2036*4882a593Smuzhiyun return ret;
2037*4882a593Smuzhiyun }
2038*4882a593Smuzhiyun
f2fs_ioc_getversion(struct file * filp,unsigned long arg)2039*4882a593Smuzhiyun static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
2040*4882a593Smuzhiyun {
2041*4882a593Smuzhiyun struct inode *inode = file_inode(filp);
2042*4882a593Smuzhiyun
2043*4882a593Smuzhiyun return put_user(inode->i_generation, (int __user *)arg);
2044*4882a593Smuzhiyun }
2045*4882a593Smuzhiyun
f2fs_ioc_start_atomic_write(struct file * filp)2046*4882a593Smuzhiyun static int f2fs_ioc_start_atomic_write(struct file *filp)
2047*4882a593Smuzhiyun {
2048*4882a593Smuzhiyun struct inode *inode = file_inode(filp);
2049*4882a593Smuzhiyun struct f2fs_inode_info *fi = F2FS_I(inode);
2050*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2051*4882a593Smuzhiyun int ret;
2052*4882a593Smuzhiyun
2053*4882a593Smuzhiyun if (!inode_owner_or_capable(inode))
2054*4882a593Smuzhiyun return -EACCES;
2055*4882a593Smuzhiyun
2056*4882a593Smuzhiyun if (!S_ISREG(inode->i_mode))
2057*4882a593Smuzhiyun return -EINVAL;
2058*4882a593Smuzhiyun
2059*4882a593Smuzhiyun if (filp->f_flags & O_DIRECT)
2060*4882a593Smuzhiyun return -EINVAL;
2061*4882a593Smuzhiyun
2062*4882a593Smuzhiyun ret = mnt_want_write_file(filp);
2063*4882a593Smuzhiyun if (ret)
2064*4882a593Smuzhiyun return ret;
2065*4882a593Smuzhiyun
2066*4882a593Smuzhiyun inode_lock(inode);
2067*4882a593Smuzhiyun
2068*4882a593Smuzhiyun if (!f2fs_disable_compressed_file(inode)) {
2069*4882a593Smuzhiyun ret = -EINVAL;
2070*4882a593Smuzhiyun goto out;
2071*4882a593Smuzhiyun }
2072*4882a593Smuzhiyun
2073*4882a593Smuzhiyun if (f2fs_is_atomic_file(inode)) {
2074*4882a593Smuzhiyun if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST))
2075*4882a593Smuzhiyun ret = -EINVAL;
2076*4882a593Smuzhiyun goto out;
2077*4882a593Smuzhiyun }
2078*4882a593Smuzhiyun
2079*4882a593Smuzhiyun ret = f2fs_convert_inline_inode(inode);
2080*4882a593Smuzhiyun if (ret)
2081*4882a593Smuzhiyun goto out;
2082*4882a593Smuzhiyun
2083*4882a593Smuzhiyun f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2084*4882a593Smuzhiyun
2085*4882a593Smuzhiyun /*
2086*4882a593Smuzhiyun * Should wait end_io to count F2FS_WB_CP_DATA correctly by
2087*4882a593Smuzhiyun * f2fs_is_atomic_file.
2088*4882a593Smuzhiyun */
2089*4882a593Smuzhiyun if (get_dirty_pages(inode))
2090*4882a593Smuzhiyun f2fs_warn(F2FS_I_SB(inode), "Unexpected flush for atomic writes: ino=%lu, npages=%u",
2091*4882a593Smuzhiyun inode->i_ino, get_dirty_pages(inode));
2092*4882a593Smuzhiyun ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
2093*4882a593Smuzhiyun if (ret) {
2094*4882a593Smuzhiyun f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2095*4882a593Smuzhiyun goto out;
2096*4882a593Smuzhiyun }
2097*4882a593Smuzhiyun
2098*4882a593Smuzhiyun spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
2099*4882a593Smuzhiyun if (list_empty(&fi->inmem_ilist))
2100*4882a593Smuzhiyun list_add_tail(&fi->inmem_ilist, &sbi->inode_list[ATOMIC_FILE]);
2101*4882a593Smuzhiyun sbi->atomic_files++;
2102*4882a593Smuzhiyun spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
2103*4882a593Smuzhiyun
2104*4882a593Smuzhiyun /* add inode in inmem_list first and set atomic_file */
2105*4882a593Smuzhiyun set_inode_flag(inode, FI_ATOMIC_FILE);
2106*4882a593Smuzhiyun clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2107*4882a593Smuzhiyun f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2108*4882a593Smuzhiyun
2109*4882a593Smuzhiyun f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2110*4882a593Smuzhiyun F2FS_I(inode)->inmem_task = current;
2111*4882a593Smuzhiyun stat_update_max_atomic_write(inode);
2112*4882a593Smuzhiyun out:
2113*4882a593Smuzhiyun inode_unlock(inode);
2114*4882a593Smuzhiyun mnt_drop_write_file(filp);
2115*4882a593Smuzhiyun return ret;
2116*4882a593Smuzhiyun }
2117*4882a593Smuzhiyun
f2fs_ioc_commit_atomic_write(struct file * filp)2118*4882a593Smuzhiyun static int f2fs_ioc_commit_atomic_write(struct file *filp)
2119*4882a593Smuzhiyun {
2120*4882a593Smuzhiyun struct inode *inode = file_inode(filp);
2121*4882a593Smuzhiyun int ret;
2122*4882a593Smuzhiyun
2123*4882a593Smuzhiyun if (!inode_owner_or_capable(inode))
2124*4882a593Smuzhiyun return -EACCES;
2125*4882a593Smuzhiyun
2126*4882a593Smuzhiyun ret = mnt_want_write_file(filp);
2127*4882a593Smuzhiyun if (ret)
2128*4882a593Smuzhiyun return ret;
2129*4882a593Smuzhiyun
2130*4882a593Smuzhiyun f2fs_balance_fs(F2FS_I_SB(inode), true);
2131*4882a593Smuzhiyun
2132*4882a593Smuzhiyun inode_lock(inode);
2133*4882a593Smuzhiyun
2134*4882a593Smuzhiyun if (f2fs_is_volatile_file(inode)) {
2135*4882a593Smuzhiyun ret = -EINVAL;
2136*4882a593Smuzhiyun goto err_out;
2137*4882a593Smuzhiyun }
2138*4882a593Smuzhiyun
2139*4882a593Smuzhiyun if (f2fs_is_atomic_file(inode)) {
2140*4882a593Smuzhiyun ret = f2fs_commit_inmem_pages(inode);
2141*4882a593Smuzhiyun if (ret)
2142*4882a593Smuzhiyun goto err_out;
2143*4882a593Smuzhiyun
2144*4882a593Smuzhiyun ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2145*4882a593Smuzhiyun if (!ret)
2146*4882a593Smuzhiyun f2fs_drop_inmem_pages(inode);
2147*4882a593Smuzhiyun } else {
2148*4882a593Smuzhiyun ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
2149*4882a593Smuzhiyun }
2150*4882a593Smuzhiyun err_out:
2151*4882a593Smuzhiyun if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
2152*4882a593Smuzhiyun clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2153*4882a593Smuzhiyun ret = -EINVAL;
2154*4882a593Smuzhiyun }
2155*4882a593Smuzhiyun inode_unlock(inode);
2156*4882a593Smuzhiyun mnt_drop_write_file(filp);
2157*4882a593Smuzhiyun return ret;
2158*4882a593Smuzhiyun }
2159*4882a593Smuzhiyun
f2fs_ioc_start_volatile_write(struct file * filp)2160*4882a593Smuzhiyun static int f2fs_ioc_start_volatile_write(struct file *filp)
2161*4882a593Smuzhiyun {
2162*4882a593Smuzhiyun struct inode *inode = file_inode(filp);
2163*4882a593Smuzhiyun int ret;
2164*4882a593Smuzhiyun
2165*4882a593Smuzhiyun if (!inode_owner_or_capable(inode))
2166*4882a593Smuzhiyun return -EACCES;
2167*4882a593Smuzhiyun
2168*4882a593Smuzhiyun if (!S_ISREG(inode->i_mode))
2169*4882a593Smuzhiyun return -EINVAL;
2170*4882a593Smuzhiyun
2171*4882a593Smuzhiyun ret = mnt_want_write_file(filp);
2172*4882a593Smuzhiyun if (ret)
2173*4882a593Smuzhiyun return ret;
2174*4882a593Smuzhiyun
2175*4882a593Smuzhiyun inode_lock(inode);
2176*4882a593Smuzhiyun
2177*4882a593Smuzhiyun if (f2fs_is_volatile_file(inode))
2178*4882a593Smuzhiyun goto out;
2179*4882a593Smuzhiyun
2180*4882a593Smuzhiyun ret = f2fs_convert_inline_inode(inode);
2181*4882a593Smuzhiyun if (ret)
2182*4882a593Smuzhiyun goto out;
2183*4882a593Smuzhiyun
2184*4882a593Smuzhiyun stat_inc_volatile_write(inode);
2185*4882a593Smuzhiyun stat_update_max_volatile_write(inode);
2186*4882a593Smuzhiyun
2187*4882a593Smuzhiyun set_inode_flag(inode, FI_VOLATILE_FILE);
2188*4882a593Smuzhiyun f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2189*4882a593Smuzhiyun out:
2190*4882a593Smuzhiyun inode_unlock(inode);
2191*4882a593Smuzhiyun mnt_drop_write_file(filp);
2192*4882a593Smuzhiyun return ret;
2193*4882a593Smuzhiyun }
2194*4882a593Smuzhiyun
f2fs_ioc_release_volatile_write(struct file * filp)2195*4882a593Smuzhiyun static int f2fs_ioc_release_volatile_write(struct file *filp)
2196*4882a593Smuzhiyun {
2197*4882a593Smuzhiyun struct inode *inode = file_inode(filp);
2198*4882a593Smuzhiyun int ret;
2199*4882a593Smuzhiyun
2200*4882a593Smuzhiyun if (!inode_owner_or_capable(inode))
2201*4882a593Smuzhiyun return -EACCES;
2202*4882a593Smuzhiyun
2203*4882a593Smuzhiyun ret = mnt_want_write_file(filp);
2204*4882a593Smuzhiyun if (ret)
2205*4882a593Smuzhiyun return ret;
2206*4882a593Smuzhiyun
2207*4882a593Smuzhiyun inode_lock(inode);
2208*4882a593Smuzhiyun
2209*4882a593Smuzhiyun if (!f2fs_is_volatile_file(inode))
2210*4882a593Smuzhiyun goto out;
2211*4882a593Smuzhiyun
2212*4882a593Smuzhiyun if (!f2fs_is_first_block_written(inode)) {
2213*4882a593Smuzhiyun ret = truncate_partial_data_page(inode, 0, true);
2214*4882a593Smuzhiyun goto out;
2215*4882a593Smuzhiyun }
2216*4882a593Smuzhiyun
2217*4882a593Smuzhiyun ret = punch_hole(inode, 0, F2FS_BLKSIZE);
2218*4882a593Smuzhiyun out:
2219*4882a593Smuzhiyun inode_unlock(inode);
2220*4882a593Smuzhiyun mnt_drop_write_file(filp);
2221*4882a593Smuzhiyun return ret;
2222*4882a593Smuzhiyun }
2223*4882a593Smuzhiyun
f2fs_ioc_abort_volatile_write(struct file * filp)2224*4882a593Smuzhiyun static int f2fs_ioc_abort_volatile_write(struct file *filp)
2225*4882a593Smuzhiyun {
2226*4882a593Smuzhiyun struct inode *inode = file_inode(filp);
2227*4882a593Smuzhiyun int ret;
2228*4882a593Smuzhiyun
2229*4882a593Smuzhiyun if (!inode_owner_or_capable(inode))
2230*4882a593Smuzhiyun return -EACCES;
2231*4882a593Smuzhiyun
2232*4882a593Smuzhiyun ret = mnt_want_write_file(filp);
2233*4882a593Smuzhiyun if (ret)
2234*4882a593Smuzhiyun return ret;
2235*4882a593Smuzhiyun
2236*4882a593Smuzhiyun inode_lock(inode);
2237*4882a593Smuzhiyun
2238*4882a593Smuzhiyun if (f2fs_is_atomic_file(inode))
2239*4882a593Smuzhiyun f2fs_drop_inmem_pages(inode);
2240*4882a593Smuzhiyun if (f2fs_is_volatile_file(inode)) {
2241*4882a593Smuzhiyun clear_inode_flag(inode, FI_VOLATILE_FILE);
2242*4882a593Smuzhiyun stat_dec_volatile_write(inode);
2243*4882a593Smuzhiyun ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2244*4882a593Smuzhiyun }
2245*4882a593Smuzhiyun
2246*4882a593Smuzhiyun clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2247*4882a593Smuzhiyun
2248*4882a593Smuzhiyun inode_unlock(inode);
2249*4882a593Smuzhiyun
2250*4882a593Smuzhiyun mnt_drop_write_file(filp);
2251*4882a593Smuzhiyun f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2252*4882a593Smuzhiyun return ret;
2253*4882a593Smuzhiyun }
2254*4882a593Smuzhiyun
f2fs_ioc_shutdown(struct file * filp,unsigned long arg)2255*4882a593Smuzhiyun static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
2256*4882a593Smuzhiyun {
2257*4882a593Smuzhiyun struct inode *inode = file_inode(filp);
2258*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2259*4882a593Smuzhiyun struct super_block *sb = sbi->sb;
2260*4882a593Smuzhiyun __u32 in;
2261*4882a593Smuzhiyun int ret = 0;
2262*4882a593Smuzhiyun
2263*4882a593Smuzhiyun if (!capable(CAP_SYS_ADMIN))
2264*4882a593Smuzhiyun return -EPERM;
2265*4882a593Smuzhiyun
2266*4882a593Smuzhiyun if (get_user(in, (__u32 __user *)arg))
2267*4882a593Smuzhiyun return -EFAULT;
2268*4882a593Smuzhiyun
2269*4882a593Smuzhiyun if (in != F2FS_GOING_DOWN_FULLSYNC) {
2270*4882a593Smuzhiyun ret = mnt_want_write_file(filp);
2271*4882a593Smuzhiyun if (ret) {
2272*4882a593Smuzhiyun if (ret == -EROFS) {
2273*4882a593Smuzhiyun ret = 0;
2274*4882a593Smuzhiyun f2fs_stop_checkpoint(sbi, false,
2275*4882a593Smuzhiyun STOP_CP_REASON_SHUTDOWN);
2276*4882a593Smuzhiyun set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2277*4882a593Smuzhiyun trace_f2fs_shutdown(sbi, in, ret);
2278*4882a593Smuzhiyun }
2279*4882a593Smuzhiyun return ret;
2280*4882a593Smuzhiyun }
2281*4882a593Smuzhiyun }
2282*4882a593Smuzhiyun
2283*4882a593Smuzhiyun switch (in) {
2284*4882a593Smuzhiyun case F2FS_GOING_DOWN_FULLSYNC:
2285*4882a593Smuzhiyun ret = freeze_bdev(sb->s_bdev);
2286*4882a593Smuzhiyun if (ret)
2287*4882a593Smuzhiyun goto out;
2288*4882a593Smuzhiyun f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
2289*4882a593Smuzhiyun set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2290*4882a593Smuzhiyun thaw_bdev(sb->s_bdev);
2291*4882a593Smuzhiyun break;
2292*4882a593Smuzhiyun case F2FS_GOING_DOWN_METASYNC:
2293*4882a593Smuzhiyun /* do checkpoint only */
2294*4882a593Smuzhiyun ret = f2fs_sync_fs(sb, 1);
2295*4882a593Smuzhiyun if (ret)
2296*4882a593Smuzhiyun goto out;
2297*4882a593Smuzhiyun f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
2298*4882a593Smuzhiyun set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2299*4882a593Smuzhiyun break;
2300*4882a593Smuzhiyun case F2FS_GOING_DOWN_NOSYNC:
2301*4882a593Smuzhiyun f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
2302*4882a593Smuzhiyun set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2303*4882a593Smuzhiyun break;
2304*4882a593Smuzhiyun case F2FS_GOING_DOWN_METAFLUSH:
2305*4882a593Smuzhiyun f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
2306*4882a593Smuzhiyun f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
2307*4882a593Smuzhiyun set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2308*4882a593Smuzhiyun break;
2309*4882a593Smuzhiyun case F2FS_GOING_DOWN_NEED_FSCK:
2310*4882a593Smuzhiyun set_sbi_flag(sbi, SBI_NEED_FSCK);
2311*4882a593Smuzhiyun set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
2312*4882a593Smuzhiyun set_sbi_flag(sbi, SBI_IS_DIRTY);
2313*4882a593Smuzhiyun /* do checkpoint only */
2314*4882a593Smuzhiyun ret = f2fs_sync_fs(sb, 1);
2315*4882a593Smuzhiyun goto out;
2316*4882a593Smuzhiyun default:
2317*4882a593Smuzhiyun ret = -EINVAL;
2318*4882a593Smuzhiyun goto out;
2319*4882a593Smuzhiyun }
2320*4882a593Smuzhiyun
2321*4882a593Smuzhiyun f2fs_stop_gc_thread(sbi);
2322*4882a593Smuzhiyun f2fs_stop_discard_thread(sbi);
2323*4882a593Smuzhiyun
2324*4882a593Smuzhiyun f2fs_drop_discard_cmd(sbi);
2325*4882a593Smuzhiyun clear_opt(sbi, DISCARD);
2326*4882a593Smuzhiyun
2327*4882a593Smuzhiyun f2fs_update_time(sbi, REQ_TIME);
2328*4882a593Smuzhiyun out:
2329*4882a593Smuzhiyun if (in != F2FS_GOING_DOWN_FULLSYNC)
2330*4882a593Smuzhiyun mnt_drop_write_file(filp);
2331*4882a593Smuzhiyun
2332*4882a593Smuzhiyun trace_f2fs_shutdown(sbi, in, ret);
2333*4882a593Smuzhiyun
2334*4882a593Smuzhiyun return ret;
2335*4882a593Smuzhiyun }
2336*4882a593Smuzhiyun
f2fs_ioc_fitrim(struct file * filp,unsigned long arg)2337*4882a593Smuzhiyun static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
2338*4882a593Smuzhiyun {
2339*4882a593Smuzhiyun struct inode *inode = file_inode(filp);
2340*4882a593Smuzhiyun struct super_block *sb = inode->i_sb;
2341*4882a593Smuzhiyun struct request_queue *q = bdev_get_queue(sb->s_bdev);
2342*4882a593Smuzhiyun struct fstrim_range range;
2343*4882a593Smuzhiyun int ret;
2344*4882a593Smuzhiyun
2345*4882a593Smuzhiyun if (!capable(CAP_SYS_ADMIN))
2346*4882a593Smuzhiyun return -EPERM;
2347*4882a593Smuzhiyun
2348*4882a593Smuzhiyun if (!f2fs_hw_support_discard(F2FS_SB(sb)))
2349*4882a593Smuzhiyun return -EOPNOTSUPP;
2350*4882a593Smuzhiyun
2351*4882a593Smuzhiyun if (copy_from_user(&range, (struct fstrim_range __user *)arg,
2352*4882a593Smuzhiyun sizeof(range)))
2353*4882a593Smuzhiyun return -EFAULT;
2354*4882a593Smuzhiyun
2355*4882a593Smuzhiyun ret = mnt_want_write_file(filp);
2356*4882a593Smuzhiyun if (ret)
2357*4882a593Smuzhiyun return ret;
2358*4882a593Smuzhiyun
2359*4882a593Smuzhiyun range.minlen = max((unsigned int)range.minlen,
2360*4882a593Smuzhiyun q->limits.discard_granularity);
2361*4882a593Smuzhiyun ret = f2fs_trim_fs(F2FS_SB(sb), &range);
2362*4882a593Smuzhiyun mnt_drop_write_file(filp);
2363*4882a593Smuzhiyun if (ret < 0)
2364*4882a593Smuzhiyun return ret;
2365*4882a593Smuzhiyun
2366*4882a593Smuzhiyun if (copy_to_user((struct fstrim_range __user *)arg, &range,
2367*4882a593Smuzhiyun sizeof(range)))
2368*4882a593Smuzhiyun return -EFAULT;
2369*4882a593Smuzhiyun f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2370*4882a593Smuzhiyun return 0;
2371*4882a593Smuzhiyun }
2372*4882a593Smuzhiyun
uuid_is_nonzero(__u8 u[16])2373*4882a593Smuzhiyun static bool uuid_is_nonzero(__u8 u[16])
2374*4882a593Smuzhiyun {
2375*4882a593Smuzhiyun int i;
2376*4882a593Smuzhiyun
2377*4882a593Smuzhiyun for (i = 0; i < 16; i++)
2378*4882a593Smuzhiyun if (u[i])
2379*4882a593Smuzhiyun return true;
2380*4882a593Smuzhiyun return false;
2381*4882a593Smuzhiyun }
2382*4882a593Smuzhiyun
f2fs_ioc_set_encryption_policy(struct file * filp,unsigned long arg)2383*4882a593Smuzhiyun static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
2384*4882a593Smuzhiyun {
2385*4882a593Smuzhiyun struct inode *inode = file_inode(filp);
2386*4882a593Smuzhiyun
2387*4882a593Smuzhiyun if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode)))
2388*4882a593Smuzhiyun return -EOPNOTSUPP;
2389*4882a593Smuzhiyun
2390*4882a593Smuzhiyun f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2391*4882a593Smuzhiyun
2392*4882a593Smuzhiyun return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
2393*4882a593Smuzhiyun }
2394*4882a593Smuzhiyun
f2fs_ioc_get_encryption_policy(struct file * filp,unsigned long arg)2395*4882a593Smuzhiyun static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
2396*4882a593Smuzhiyun {
2397*4882a593Smuzhiyun if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2398*4882a593Smuzhiyun return -EOPNOTSUPP;
2399*4882a593Smuzhiyun return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
2400*4882a593Smuzhiyun }
2401*4882a593Smuzhiyun
f2fs_ioc_get_encryption_pwsalt(struct file * filp,unsigned long arg)2402*4882a593Smuzhiyun static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
2403*4882a593Smuzhiyun {
2404*4882a593Smuzhiyun struct inode *inode = file_inode(filp);
2405*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2406*4882a593Smuzhiyun int err;
2407*4882a593Smuzhiyun
2408*4882a593Smuzhiyun if (!f2fs_sb_has_encrypt(sbi))
2409*4882a593Smuzhiyun return -EOPNOTSUPP;
2410*4882a593Smuzhiyun
2411*4882a593Smuzhiyun err = mnt_want_write_file(filp);
2412*4882a593Smuzhiyun if (err)
2413*4882a593Smuzhiyun return err;
2414*4882a593Smuzhiyun
2415*4882a593Smuzhiyun f2fs_down_write(&sbi->sb_lock);
2416*4882a593Smuzhiyun
2417*4882a593Smuzhiyun if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
2418*4882a593Smuzhiyun goto got_it;
2419*4882a593Smuzhiyun
2420*4882a593Smuzhiyun /* update superblock with uuid */
2421*4882a593Smuzhiyun generate_random_uuid(sbi->raw_super->encrypt_pw_salt);
2422*4882a593Smuzhiyun
2423*4882a593Smuzhiyun err = f2fs_commit_super(sbi, false);
2424*4882a593Smuzhiyun if (err) {
2425*4882a593Smuzhiyun /* undo new data */
2426*4882a593Smuzhiyun memset(sbi->raw_super->encrypt_pw_salt, 0, 16);
2427*4882a593Smuzhiyun goto out_err;
2428*4882a593Smuzhiyun }
2429*4882a593Smuzhiyun got_it:
2430*4882a593Smuzhiyun if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt,
2431*4882a593Smuzhiyun 16))
2432*4882a593Smuzhiyun err = -EFAULT;
2433*4882a593Smuzhiyun out_err:
2434*4882a593Smuzhiyun f2fs_up_write(&sbi->sb_lock);
2435*4882a593Smuzhiyun mnt_drop_write_file(filp);
2436*4882a593Smuzhiyun return err;
2437*4882a593Smuzhiyun }
2438*4882a593Smuzhiyun
f2fs_ioc_get_encryption_policy_ex(struct file * filp,unsigned long arg)2439*4882a593Smuzhiyun static int f2fs_ioc_get_encryption_policy_ex(struct file *filp,
2440*4882a593Smuzhiyun unsigned long arg)
2441*4882a593Smuzhiyun {
2442*4882a593Smuzhiyun if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2443*4882a593Smuzhiyun return -EOPNOTSUPP;
2444*4882a593Smuzhiyun
2445*4882a593Smuzhiyun return fscrypt_ioctl_get_policy_ex(filp, (void __user *)arg);
2446*4882a593Smuzhiyun }
2447*4882a593Smuzhiyun
f2fs_ioc_add_encryption_key(struct file * filp,unsigned long arg)2448*4882a593Smuzhiyun static int f2fs_ioc_add_encryption_key(struct file *filp, unsigned long arg)
2449*4882a593Smuzhiyun {
2450*4882a593Smuzhiyun if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2451*4882a593Smuzhiyun return -EOPNOTSUPP;
2452*4882a593Smuzhiyun
2453*4882a593Smuzhiyun return fscrypt_ioctl_add_key(filp, (void __user *)arg);
2454*4882a593Smuzhiyun }
2455*4882a593Smuzhiyun
f2fs_ioc_remove_encryption_key(struct file * filp,unsigned long arg)2456*4882a593Smuzhiyun static int f2fs_ioc_remove_encryption_key(struct file *filp, unsigned long arg)
2457*4882a593Smuzhiyun {
2458*4882a593Smuzhiyun if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2459*4882a593Smuzhiyun return -EOPNOTSUPP;
2460*4882a593Smuzhiyun
2461*4882a593Smuzhiyun return fscrypt_ioctl_remove_key(filp, (void __user *)arg);
2462*4882a593Smuzhiyun }
2463*4882a593Smuzhiyun
f2fs_ioc_remove_encryption_key_all_users(struct file * filp,unsigned long arg)2464*4882a593Smuzhiyun static int f2fs_ioc_remove_encryption_key_all_users(struct file *filp,
2465*4882a593Smuzhiyun unsigned long arg)
2466*4882a593Smuzhiyun {
2467*4882a593Smuzhiyun if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2468*4882a593Smuzhiyun return -EOPNOTSUPP;
2469*4882a593Smuzhiyun
2470*4882a593Smuzhiyun return fscrypt_ioctl_remove_key_all_users(filp, (void __user *)arg);
2471*4882a593Smuzhiyun }
2472*4882a593Smuzhiyun
f2fs_ioc_get_encryption_key_status(struct file * filp,unsigned long arg)2473*4882a593Smuzhiyun static int f2fs_ioc_get_encryption_key_status(struct file *filp,
2474*4882a593Smuzhiyun unsigned long arg)
2475*4882a593Smuzhiyun {
2476*4882a593Smuzhiyun if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2477*4882a593Smuzhiyun return -EOPNOTSUPP;
2478*4882a593Smuzhiyun
2479*4882a593Smuzhiyun return fscrypt_ioctl_get_key_status(filp, (void __user *)arg);
2480*4882a593Smuzhiyun }
2481*4882a593Smuzhiyun
f2fs_ioc_get_encryption_nonce(struct file * filp,unsigned long arg)2482*4882a593Smuzhiyun static int f2fs_ioc_get_encryption_nonce(struct file *filp, unsigned long arg)
2483*4882a593Smuzhiyun {
2484*4882a593Smuzhiyun if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2485*4882a593Smuzhiyun return -EOPNOTSUPP;
2486*4882a593Smuzhiyun
2487*4882a593Smuzhiyun return fscrypt_ioctl_get_nonce(filp, (void __user *)arg);
2488*4882a593Smuzhiyun }
2489*4882a593Smuzhiyun
f2fs_ioc_gc(struct file * filp,unsigned long arg)2490*4882a593Smuzhiyun static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
2491*4882a593Smuzhiyun {
2492*4882a593Smuzhiyun struct inode *inode = file_inode(filp);
2493*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2494*4882a593Smuzhiyun __u32 sync;
2495*4882a593Smuzhiyun int ret;
2496*4882a593Smuzhiyun
2497*4882a593Smuzhiyun if (!capable(CAP_SYS_ADMIN))
2498*4882a593Smuzhiyun return -EPERM;
2499*4882a593Smuzhiyun
2500*4882a593Smuzhiyun if (get_user(sync, (__u32 __user *)arg))
2501*4882a593Smuzhiyun return -EFAULT;
2502*4882a593Smuzhiyun
2503*4882a593Smuzhiyun if (f2fs_readonly(sbi->sb))
2504*4882a593Smuzhiyun return -EROFS;
2505*4882a593Smuzhiyun
2506*4882a593Smuzhiyun ret = mnt_want_write_file(filp);
2507*4882a593Smuzhiyun if (ret)
2508*4882a593Smuzhiyun return ret;
2509*4882a593Smuzhiyun
2510*4882a593Smuzhiyun if (!sync) {
2511*4882a593Smuzhiyun if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
2512*4882a593Smuzhiyun ret = -EBUSY;
2513*4882a593Smuzhiyun goto out;
2514*4882a593Smuzhiyun }
2515*4882a593Smuzhiyun } else {
2516*4882a593Smuzhiyun f2fs_down_write(&sbi->gc_lock);
2517*4882a593Smuzhiyun }
2518*4882a593Smuzhiyun
2519*4882a593Smuzhiyun ret = f2fs_gc(sbi, sync, true, false, NULL_SEGNO);
2520*4882a593Smuzhiyun out:
2521*4882a593Smuzhiyun mnt_drop_write_file(filp);
2522*4882a593Smuzhiyun return ret;
2523*4882a593Smuzhiyun }
2524*4882a593Smuzhiyun
__f2fs_ioc_gc_range(struct file * filp,struct f2fs_gc_range * range)2525*4882a593Smuzhiyun static int __f2fs_ioc_gc_range(struct file *filp, struct f2fs_gc_range *range)
2526*4882a593Smuzhiyun {
2527*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
2528*4882a593Smuzhiyun u64 end;
2529*4882a593Smuzhiyun int ret;
2530*4882a593Smuzhiyun
2531*4882a593Smuzhiyun if (!capable(CAP_SYS_ADMIN))
2532*4882a593Smuzhiyun return -EPERM;
2533*4882a593Smuzhiyun if (f2fs_readonly(sbi->sb))
2534*4882a593Smuzhiyun return -EROFS;
2535*4882a593Smuzhiyun
2536*4882a593Smuzhiyun end = range->start + range->len;
2537*4882a593Smuzhiyun if (end < range->start || range->start < MAIN_BLKADDR(sbi) ||
2538*4882a593Smuzhiyun end >= MAX_BLKADDR(sbi))
2539*4882a593Smuzhiyun return -EINVAL;
2540*4882a593Smuzhiyun
2541*4882a593Smuzhiyun ret = mnt_want_write_file(filp);
2542*4882a593Smuzhiyun if (ret)
2543*4882a593Smuzhiyun return ret;
2544*4882a593Smuzhiyun
2545*4882a593Smuzhiyun do_more:
2546*4882a593Smuzhiyun if (!range->sync) {
2547*4882a593Smuzhiyun if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
2548*4882a593Smuzhiyun ret = -EBUSY;
2549*4882a593Smuzhiyun goto out;
2550*4882a593Smuzhiyun }
2551*4882a593Smuzhiyun } else {
2552*4882a593Smuzhiyun f2fs_down_write(&sbi->gc_lock);
2553*4882a593Smuzhiyun }
2554*4882a593Smuzhiyun
2555*4882a593Smuzhiyun ret = f2fs_gc(sbi, range->sync, true, false,
2556*4882a593Smuzhiyun GET_SEGNO(sbi, range->start));
2557*4882a593Smuzhiyun if (ret) {
2558*4882a593Smuzhiyun if (ret == -EBUSY)
2559*4882a593Smuzhiyun ret = -EAGAIN;
2560*4882a593Smuzhiyun goto out;
2561*4882a593Smuzhiyun }
2562*4882a593Smuzhiyun range->start += BLKS_PER_SEC(sbi);
2563*4882a593Smuzhiyun if (range->start <= end)
2564*4882a593Smuzhiyun goto do_more;
2565*4882a593Smuzhiyun out:
2566*4882a593Smuzhiyun mnt_drop_write_file(filp);
2567*4882a593Smuzhiyun return ret;
2568*4882a593Smuzhiyun }
2569*4882a593Smuzhiyun
f2fs_ioc_gc_range(struct file * filp,unsigned long arg)2570*4882a593Smuzhiyun static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
2571*4882a593Smuzhiyun {
2572*4882a593Smuzhiyun struct f2fs_gc_range range;
2573*4882a593Smuzhiyun
2574*4882a593Smuzhiyun if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg,
2575*4882a593Smuzhiyun sizeof(range)))
2576*4882a593Smuzhiyun return -EFAULT;
2577*4882a593Smuzhiyun return __f2fs_ioc_gc_range(filp, &range);
2578*4882a593Smuzhiyun }
2579*4882a593Smuzhiyun
f2fs_ioc_write_checkpoint(struct file * filp,unsigned long arg)2580*4882a593Smuzhiyun static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
2581*4882a593Smuzhiyun {
2582*4882a593Smuzhiyun struct inode *inode = file_inode(filp);
2583*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2584*4882a593Smuzhiyun int ret;
2585*4882a593Smuzhiyun
2586*4882a593Smuzhiyun if (!capable(CAP_SYS_ADMIN))
2587*4882a593Smuzhiyun return -EPERM;
2588*4882a593Smuzhiyun
2589*4882a593Smuzhiyun if (f2fs_readonly(sbi->sb))
2590*4882a593Smuzhiyun return -EROFS;
2591*4882a593Smuzhiyun
2592*4882a593Smuzhiyun if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2593*4882a593Smuzhiyun f2fs_info(sbi, "Skipping Checkpoint. Checkpoints currently disabled.");
2594*4882a593Smuzhiyun return -EINVAL;
2595*4882a593Smuzhiyun }
2596*4882a593Smuzhiyun
2597*4882a593Smuzhiyun ret = mnt_want_write_file(filp);
2598*4882a593Smuzhiyun if (ret)
2599*4882a593Smuzhiyun return ret;
2600*4882a593Smuzhiyun
2601*4882a593Smuzhiyun ret = f2fs_sync_fs(sbi->sb, 1);
2602*4882a593Smuzhiyun
2603*4882a593Smuzhiyun mnt_drop_write_file(filp);
2604*4882a593Smuzhiyun return ret;
2605*4882a593Smuzhiyun }
2606*4882a593Smuzhiyun
f2fs_defragment_range(struct f2fs_sb_info * sbi,struct file * filp,struct f2fs_defragment * range)2607*4882a593Smuzhiyun static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
2608*4882a593Smuzhiyun struct file *filp,
2609*4882a593Smuzhiyun struct f2fs_defragment *range)
2610*4882a593Smuzhiyun {
2611*4882a593Smuzhiyun struct inode *inode = file_inode(filp);
2612*4882a593Smuzhiyun struct f2fs_map_blocks map = { .m_next_extent = NULL,
2613*4882a593Smuzhiyun .m_seg_type = NO_CHECK_TYPE,
2614*4882a593Smuzhiyun .m_may_create = false };
2615*4882a593Smuzhiyun struct extent_info ei = {};
2616*4882a593Smuzhiyun pgoff_t pg_start, pg_end, next_pgofs;
2617*4882a593Smuzhiyun unsigned int blk_per_seg = sbi->blocks_per_seg;
2618*4882a593Smuzhiyun unsigned int total = 0, sec_num;
2619*4882a593Smuzhiyun block_t blk_end = 0;
2620*4882a593Smuzhiyun bool fragmented = false;
2621*4882a593Smuzhiyun int err;
2622*4882a593Smuzhiyun
2623*4882a593Smuzhiyun pg_start = range->start >> PAGE_SHIFT;
2624*4882a593Smuzhiyun pg_end = (range->start + range->len) >> PAGE_SHIFT;
2625*4882a593Smuzhiyun
2626*4882a593Smuzhiyun f2fs_balance_fs(sbi, true);
2627*4882a593Smuzhiyun
2628*4882a593Smuzhiyun inode_lock(inode);
2629*4882a593Smuzhiyun
2630*4882a593Smuzhiyun /* if in-place-update policy is enabled, don't waste time here */
2631*4882a593Smuzhiyun set_inode_flag(inode, FI_OPU_WRITE);
2632*4882a593Smuzhiyun if (f2fs_should_update_inplace(inode, NULL)) {
2633*4882a593Smuzhiyun err = -EINVAL;
2634*4882a593Smuzhiyun goto out;
2635*4882a593Smuzhiyun }
2636*4882a593Smuzhiyun
2637*4882a593Smuzhiyun /* writeback all dirty pages in the range */
2638*4882a593Smuzhiyun err = filemap_write_and_wait_range(inode->i_mapping, range->start,
2639*4882a593Smuzhiyun range->start + range->len - 1);
2640*4882a593Smuzhiyun if (err)
2641*4882a593Smuzhiyun goto out;
2642*4882a593Smuzhiyun
2643*4882a593Smuzhiyun /*
2644*4882a593Smuzhiyun * lookup mapping info in extent cache, skip defragmenting if physical
2645*4882a593Smuzhiyun * block addresses are continuous.
2646*4882a593Smuzhiyun */
2647*4882a593Smuzhiyun if (f2fs_lookup_read_extent_cache(inode, pg_start, &ei)) {
2648*4882a593Smuzhiyun if (ei.fofs + ei.len >= pg_end)
2649*4882a593Smuzhiyun goto out;
2650*4882a593Smuzhiyun }
2651*4882a593Smuzhiyun
2652*4882a593Smuzhiyun map.m_lblk = pg_start;
2653*4882a593Smuzhiyun map.m_next_pgofs = &next_pgofs;
2654*4882a593Smuzhiyun
2655*4882a593Smuzhiyun /*
2656*4882a593Smuzhiyun * lookup mapping info in dnode page cache, skip defragmenting if all
2657*4882a593Smuzhiyun * physical block addresses are continuous even if there are hole(s)
2658*4882a593Smuzhiyun * in logical blocks.
2659*4882a593Smuzhiyun */
2660*4882a593Smuzhiyun while (map.m_lblk < pg_end) {
2661*4882a593Smuzhiyun map.m_len = pg_end - map.m_lblk;
2662*4882a593Smuzhiyun err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2663*4882a593Smuzhiyun if (err)
2664*4882a593Smuzhiyun goto out;
2665*4882a593Smuzhiyun
2666*4882a593Smuzhiyun if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2667*4882a593Smuzhiyun map.m_lblk = next_pgofs;
2668*4882a593Smuzhiyun continue;
2669*4882a593Smuzhiyun }
2670*4882a593Smuzhiyun
2671*4882a593Smuzhiyun if (blk_end && blk_end != map.m_pblk)
2672*4882a593Smuzhiyun fragmented = true;
2673*4882a593Smuzhiyun
2674*4882a593Smuzhiyun /* record total count of block that we're going to move */
2675*4882a593Smuzhiyun total += map.m_len;
2676*4882a593Smuzhiyun
2677*4882a593Smuzhiyun blk_end = map.m_pblk + map.m_len;
2678*4882a593Smuzhiyun
2679*4882a593Smuzhiyun map.m_lblk += map.m_len;
2680*4882a593Smuzhiyun }
2681*4882a593Smuzhiyun
2682*4882a593Smuzhiyun if (!fragmented) {
2683*4882a593Smuzhiyun total = 0;
2684*4882a593Smuzhiyun goto out;
2685*4882a593Smuzhiyun }
2686*4882a593Smuzhiyun
2687*4882a593Smuzhiyun sec_num = DIV_ROUND_UP(total, BLKS_PER_SEC(sbi));
2688*4882a593Smuzhiyun
2689*4882a593Smuzhiyun /*
2690*4882a593Smuzhiyun * make sure there are enough free section for LFS allocation, this can
2691*4882a593Smuzhiyun * avoid defragment running in SSR mode when free section are allocated
2692*4882a593Smuzhiyun * intensively
2693*4882a593Smuzhiyun */
2694*4882a593Smuzhiyun if (has_not_enough_free_secs(sbi, 0, sec_num)) {
2695*4882a593Smuzhiyun err = -EAGAIN;
2696*4882a593Smuzhiyun goto out;
2697*4882a593Smuzhiyun }
2698*4882a593Smuzhiyun
2699*4882a593Smuzhiyun map.m_lblk = pg_start;
2700*4882a593Smuzhiyun map.m_len = pg_end - pg_start;
2701*4882a593Smuzhiyun total = 0;
2702*4882a593Smuzhiyun
2703*4882a593Smuzhiyun while (map.m_lblk < pg_end) {
2704*4882a593Smuzhiyun pgoff_t idx;
2705*4882a593Smuzhiyun int cnt = 0;
2706*4882a593Smuzhiyun
2707*4882a593Smuzhiyun do_map:
2708*4882a593Smuzhiyun map.m_len = pg_end - map.m_lblk;
2709*4882a593Smuzhiyun err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2710*4882a593Smuzhiyun if (err)
2711*4882a593Smuzhiyun goto clear_out;
2712*4882a593Smuzhiyun
2713*4882a593Smuzhiyun if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2714*4882a593Smuzhiyun map.m_lblk = next_pgofs;
2715*4882a593Smuzhiyun goto check;
2716*4882a593Smuzhiyun }
2717*4882a593Smuzhiyun
2718*4882a593Smuzhiyun set_inode_flag(inode, FI_SKIP_WRITES);
2719*4882a593Smuzhiyun
2720*4882a593Smuzhiyun idx = map.m_lblk;
2721*4882a593Smuzhiyun while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
2722*4882a593Smuzhiyun struct page *page;
2723*4882a593Smuzhiyun
2724*4882a593Smuzhiyun page = f2fs_get_lock_data_page(inode, idx, true);
2725*4882a593Smuzhiyun if (IS_ERR(page)) {
2726*4882a593Smuzhiyun err = PTR_ERR(page);
2727*4882a593Smuzhiyun goto clear_out;
2728*4882a593Smuzhiyun }
2729*4882a593Smuzhiyun
2730*4882a593Smuzhiyun set_page_dirty(page);
2731*4882a593Smuzhiyun f2fs_put_page(page, 1);
2732*4882a593Smuzhiyun
2733*4882a593Smuzhiyun idx++;
2734*4882a593Smuzhiyun cnt++;
2735*4882a593Smuzhiyun total++;
2736*4882a593Smuzhiyun }
2737*4882a593Smuzhiyun
2738*4882a593Smuzhiyun map.m_lblk = idx;
2739*4882a593Smuzhiyun check:
2740*4882a593Smuzhiyun if (map.m_lblk < pg_end && cnt < blk_per_seg)
2741*4882a593Smuzhiyun goto do_map;
2742*4882a593Smuzhiyun
2743*4882a593Smuzhiyun clear_inode_flag(inode, FI_SKIP_WRITES);
2744*4882a593Smuzhiyun
2745*4882a593Smuzhiyun err = filemap_fdatawrite(inode->i_mapping);
2746*4882a593Smuzhiyun if (err)
2747*4882a593Smuzhiyun goto out;
2748*4882a593Smuzhiyun }
2749*4882a593Smuzhiyun clear_out:
2750*4882a593Smuzhiyun clear_inode_flag(inode, FI_SKIP_WRITES);
2751*4882a593Smuzhiyun out:
2752*4882a593Smuzhiyun clear_inode_flag(inode, FI_OPU_WRITE);
2753*4882a593Smuzhiyun inode_unlock(inode);
2754*4882a593Smuzhiyun if (!err)
2755*4882a593Smuzhiyun range->len = (u64)total << PAGE_SHIFT;
2756*4882a593Smuzhiyun return err;
2757*4882a593Smuzhiyun }
2758*4882a593Smuzhiyun
f2fs_ioc_defragment(struct file * filp,unsigned long arg)2759*4882a593Smuzhiyun static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
2760*4882a593Smuzhiyun {
2761*4882a593Smuzhiyun struct inode *inode = file_inode(filp);
2762*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2763*4882a593Smuzhiyun struct f2fs_defragment range;
2764*4882a593Smuzhiyun int err;
2765*4882a593Smuzhiyun
2766*4882a593Smuzhiyun if (!capable(CAP_SYS_ADMIN))
2767*4882a593Smuzhiyun return -EPERM;
2768*4882a593Smuzhiyun
2769*4882a593Smuzhiyun if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode))
2770*4882a593Smuzhiyun return -EINVAL;
2771*4882a593Smuzhiyun
2772*4882a593Smuzhiyun if (f2fs_readonly(sbi->sb))
2773*4882a593Smuzhiyun return -EROFS;
2774*4882a593Smuzhiyun
2775*4882a593Smuzhiyun if (copy_from_user(&range, (struct f2fs_defragment __user *)arg,
2776*4882a593Smuzhiyun sizeof(range)))
2777*4882a593Smuzhiyun return -EFAULT;
2778*4882a593Smuzhiyun
2779*4882a593Smuzhiyun /* verify alignment of offset & size */
2780*4882a593Smuzhiyun if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1))
2781*4882a593Smuzhiyun return -EINVAL;
2782*4882a593Smuzhiyun
2783*4882a593Smuzhiyun if (unlikely((range.start + range.len) >> PAGE_SHIFT >
2784*4882a593Smuzhiyun max_file_blocks(inode)))
2785*4882a593Smuzhiyun return -EINVAL;
2786*4882a593Smuzhiyun
2787*4882a593Smuzhiyun err = mnt_want_write_file(filp);
2788*4882a593Smuzhiyun if (err)
2789*4882a593Smuzhiyun return err;
2790*4882a593Smuzhiyun
2791*4882a593Smuzhiyun err = f2fs_defragment_range(sbi, filp, &range);
2792*4882a593Smuzhiyun mnt_drop_write_file(filp);
2793*4882a593Smuzhiyun
2794*4882a593Smuzhiyun f2fs_update_time(sbi, REQ_TIME);
2795*4882a593Smuzhiyun if (err < 0)
2796*4882a593Smuzhiyun return err;
2797*4882a593Smuzhiyun
2798*4882a593Smuzhiyun if (copy_to_user((struct f2fs_defragment __user *)arg, &range,
2799*4882a593Smuzhiyun sizeof(range)))
2800*4882a593Smuzhiyun return -EFAULT;
2801*4882a593Smuzhiyun
2802*4882a593Smuzhiyun return 0;
2803*4882a593Smuzhiyun }
2804*4882a593Smuzhiyun
f2fs_move_file_range(struct file * file_in,loff_t pos_in,struct file * file_out,loff_t pos_out,size_t len)2805*4882a593Smuzhiyun static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
2806*4882a593Smuzhiyun struct file *file_out, loff_t pos_out, size_t len)
2807*4882a593Smuzhiyun {
2808*4882a593Smuzhiyun struct inode *src = file_inode(file_in);
2809*4882a593Smuzhiyun struct inode *dst = file_inode(file_out);
2810*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_I_SB(src);
2811*4882a593Smuzhiyun size_t olen = len, dst_max_i_size = 0;
2812*4882a593Smuzhiyun size_t dst_osize;
2813*4882a593Smuzhiyun int ret;
2814*4882a593Smuzhiyun
2815*4882a593Smuzhiyun if (file_in->f_path.mnt != file_out->f_path.mnt ||
2816*4882a593Smuzhiyun src->i_sb != dst->i_sb)
2817*4882a593Smuzhiyun return -EXDEV;
2818*4882a593Smuzhiyun
2819*4882a593Smuzhiyun if (unlikely(f2fs_readonly(src->i_sb)))
2820*4882a593Smuzhiyun return -EROFS;
2821*4882a593Smuzhiyun
2822*4882a593Smuzhiyun if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
2823*4882a593Smuzhiyun return -EINVAL;
2824*4882a593Smuzhiyun
2825*4882a593Smuzhiyun if (IS_ENCRYPTED(src) || IS_ENCRYPTED(dst))
2826*4882a593Smuzhiyun return -EOPNOTSUPP;
2827*4882a593Smuzhiyun
2828*4882a593Smuzhiyun if (pos_out < 0 || pos_in < 0)
2829*4882a593Smuzhiyun return -EINVAL;
2830*4882a593Smuzhiyun
2831*4882a593Smuzhiyun if (src == dst) {
2832*4882a593Smuzhiyun if (pos_in == pos_out)
2833*4882a593Smuzhiyun return 0;
2834*4882a593Smuzhiyun if (pos_out > pos_in && pos_out < pos_in + len)
2835*4882a593Smuzhiyun return -EINVAL;
2836*4882a593Smuzhiyun }
2837*4882a593Smuzhiyun
2838*4882a593Smuzhiyun inode_lock(src);
2839*4882a593Smuzhiyun if (src != dst) {
2840*4882a593Smuzhiyun ret = -EBUSY;
2841*4882a593Smuzhiyun if (!inode_trylock(dst))
2842*4882a593Smuzhiyun goto out;
2843*4882a593Smuzhiyun }
2844*4882a593Smuzhiyun
2845*4882a593Smuzhiyun ret = -EINVAL;
2846*4882a593Smuzhiyun if (pos_in + len > src->i_size || pos_in + len < pos_in)
2847*4882a593Smuzhiyun goto out_unlock;
2848*4882a593Smuzhiyun if (len == 0)
2849*4882a593Smuzhiyun olen = len = src->i_size - pos_in;
2850*4882a593Smuzhiyun if (pos_in + len == src->i_size)
2851*4882a593Smuzhiyun len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in;
2852*4882a593Smuzhiyun if (len == 0) {
2853*4882a593Smuzhiyun ret = 0;
2854*4882a593Smuzhiyun goto out_unlock;
2855*4882a593Smuzhiyun }
2856*4882a593Smuzhiyun
2857*4882a593Smuzhiyun dst_osize = dst->i_size;
2858*4882a593Smuzhiyun if (pos_out + olen > dst->i_size)
2859*4882a593Smuzhiyun dst_max_i_size = pos_out + olen;
2860*4882a593Smuzhiyun
2861*4882a593Smuzhiyun /* verify the end result is block aligned */
2862*4882a593Smuzhiyun if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) ||
2863*4882a593Smuzhiyun !IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) ||
2864*4882a593Smuzhiyun !IS_ALIGNED(pos_out, F2FS_BLKSIZE))
2865*4882a593Smuzhiyun goto out_unlock;
2866*4882a593Smuzhiyun
2867*4882a593Smuzhiyun ret = f2fs_convert_inline_inode(src);
2868*4882a593Smuzhiyun if (ret)
2869*4882a593Smuzhiyun goto out_unlock;
2870*4882a593Smuzhiyun
2871*4882a593Smuzhiyun ret = f2fs_convert_inline_inode(dst);
2872*4882a593Smuzhiyun if (ret)
2873*4882a593Smuzhiyun goto out_unlock;
2874*4882a593Smuzhiyun
2875*4882a593Smuzhiyun /* write out all dirty pages from offset */
2876*4882a593Smuzhiyun ret = filemap_write_and_wait_range(src->i_mapping,
2877*4882a593Smuzhiyun pos_in, pos_in + len);
2878*4882a593Smuzhiyun if (ret)
2879*4882a593Smuzhiyun goto out_unlock;
2880*4882a593Smuzhiyun
2881*4882a593Smuzhiyun ret = filemap_write_and_wait_range(dst->i_mapping,
2882*4882a593Smuzhiyun pos_out, pos_out + len);
2883*4882a593Smuzhiyun if (ret)
2884*4882a593Smuzhiyun goto out_unlock;
2885*4882a593Smuzhiyun
2886*4882a593Smuzhiyun f2fs_balance_fs(sbi, true);
2887*4882a593Smuzhiyun
2888*4882a593Smuzhiyun f2fs_down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2889*4882a593Smuzhiyun if (src != dst) {
2890*4882a593Smuzhiyun ret = -EBUSY;
2891*4882a593Smuzhiyun if (!f2fs_down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
2892*4882a593Smuzhiyun goto out_src;
2893*4882a593Smuzhiyun }
2894*4882a593Smuzhiyun
2895*4882a593Smuzhiyun f2fs_lock_op(sbi);
2896*4882a593Smuzhiyun ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
2897*4882a593Smuzhiyun pos_out >> F2FS_BLKSIZE_BITS,
2898*4882a593Smuzhiyun len >> F2FS_BLKSIZE_BITS, false);
2899*4882a593Smuzhiyun
2900*4882a593Smuzhiyun if (!ret) {
2901*4882a593Smuzhiyun if (dst_max_i_size)
2902*4882a593Smuzhiyun f2fs_i_size_write(dst, dst_max_i_size);
2903*4882a593Smuzhiyun else if (dst_osize != dst->i_size)
2904*4882a593Smuzhiyun f2fs_i_size_write(dst, dst_osize);
2905*4882a593Smuzhiyun }
2906*4882a593Smuzhiyun f2fs_unlock_op(sbi);
2907*4882a593Smuzhiyun
2908*4882a593Smuzhiyun if (src != dst)
2909*4882a593Smuzhiyun f2fs_up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
2910*4882a593Smuzhiyun out_src:
2911*4882a593Smuzhiyun f2fs_up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2912*4882a593Smuzhiyun out_unlock:
2913*4882a593Smuzhiyun if (src != dst)
2914*4882a593Smuzhiyun inode_unlock(dst);
2915*4882a593Smuzhiyun out:
2916*4882a593Smuzhiyun inode_unlock(src);
2917*4882a593Smuzhiyun return ret;
2918*4882a593Smuzhiyun }
2919*4882a593Smuzhiyun
__f2fs_ioc_move_range(struct file * filp,struct f2fs_move_range * range)2920*4882a593Smuzhiyun static int __f2fs_ioc_move_range(struct file *filp,
2921*4882a593Smuzhiyun struct f2fs_move_range *range)
2922*4882a593Smuzhiyun {
2923*4882a593Smuzhiyun struct fd dst;
2924*4882a593Smuzhiyun int err;
2925*4882a593Smuzhiyun
2926*4882a593Smuzhiyun if (!(filp->f_mode & FMODE_READ) ||
2927*4882a593Smuzhiyun !(filp->f_mode & FMODE_WRITE))
2928*4882a593Smuzhiyun return -EBADF;
2929*4882a593Smuzhiyun
2930*4882a593Smuzhiyun dst = fdget(range->dst_fd);
2931*4882a593Smuzhiyun if (!dst.file)
2932*4882a593Smuzhiyun return -EBADF;
2933*4882a593Smuzhiyun
2934*4882a593Smuzhiyun if (!(dst.file->f_mode & FMODE_WRITE)) {
2935*4882a593Smuzhiyun err = -EBADF;
2936*4882a593Smuzhiyun goto err_out;
2937*4882a593Smuzhiyun }
2938*4882a593Smuzhiyun
2939*4882a593Smuzhiyun err = mnt_want_write_file(filp);
2940*4882a593Smuzhiyun if (err)
2941*4882a593Smuzhiyun goto err_out;
2942*4882a593Smuzhiyun
2943*4882a593Smuzhiyun err = f2fs_move_file_range(filp, range->pos_in, dst.file,
2944*4882a593Smuzhiyun range->pos_out, range->len);
2945*4882a593Smuzhiyun
2946*4882a593Smuzhiyun mnt_drop_write_file(filp);
2947*4882a593Smuzhiyun err_out:
2948*4882a593Smuzhiyun fdput(dst);
2949*4882a593Smuzhiyun return err;
2950*4882a593Smuzhiyun }
2951*4882a593Smuzhiyun
f2fs_ioc_move_range(struct file * filp,unsigned long arg)2952*4882a593Smuzhiyun static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
2953*4882a593Smuzhiyun {
2954*4882a593Smuzhiyun struct f2fs_move_range range;
2955*4882a593Smuzhiyun
2956*4882a593Smuzhiyun if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
2957*4882a593Smuzhiyun sizeof(range)))
2958*4882a593Smuzhiyun return -EFAULT;
2959*4882a593Smuzhiyun return __f2fs_ioc_move_range(filp, &range);
2960*4882a593Smuzhiyun }
2961*4882a593Smuzhiyun
f2fs_ioc_flush_device(struct file * filp,unsigned long arg)2962*4882a593Smuzhiyun static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
2963*4882a593Smuzhiyun {
2964*4882a593Smuzhiyun struct inode *inode = file_inode(filp);
2965*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2966*4882a593Smuzhiyun struct sit_info *sm = SIT_I(sbi);
2967*4882a593Smuzhiyun unsigned int start_segno = 0, end_segno = 0;
2968*4882a593Smuzhiyun unsigned int dev_start_segno = 0, dev_end_segno = 0;
2969*4882a593Smuzhiyun struct f2fs_flush_device range;
2970*4882a593Smuzhiyun int ret;
2971*4882a593Smuzhiyun
2972*4882a593Smuzhiyun if (!capable(CAP_SYS_ADMIN))
2973*4882a593Smuzhiyun return -EPERM;
2974*4882a593Smuzhiyun
2975*4882a593Smuzhiyun if (f2fs_readonly(sbi->sb))
2976*4882a593Smuzhiyun return -EROFS;
2977*4882a593Smuzhiyun
2978*4882a593Smuzhiyun if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2979*4882a593Smuzhiyun return -EINVAL;
2980*4882a593Smuzhiyun
2981*4882a593Smuzhiyun if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg,
2982*4882a593Smuzhiyun sizeof(range)))
2983*4882a593Smuzhiyun return -EFAULT;
2984*4882a593Smuzhiyun
2985*4882a593Smuzhiyun if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num ||
2986*4882a593Smuzhiyun __is_large_section(sbi)) {
2987*4882a593Smuzhiyun f2fs_warn(sbi, "Can't flush %u in %d for segs_per_sec %u != 1",
2988*4882a593Smuzhiyun range.dev_num, sbi->s_ndevs, sbi->segs_per_sec);
2989*4882a593Smuzhiyun return -EINVAL;
2990*4882a593Smuzhiyun }
2991*4882a593Smuzhiyun
2992*4882a593Smuzhiyun ret = mnt_want_write_file(filp);
2993*4882a593Smuzhiyun if (ret)
2994*4882a593Smuzhiyun return ret;
2995*4882a593Smuzhiyun
2996*4882a593Smuzhiyun if (range.dev_num != 0)
2997*4882a593Smuzhiyun dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk);
2998*4882a593Smuzhiyun dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk);
2999*4882a593Smuzhiyun
3000*4882a593Smuzhiyun start_segno = sm->last_victim[FLUSH_DEVICE];
3001*4882a593Smuzhiyun if (start_segno < dev_start_segno || start_segno >= dev_end_segno)
3002*4882a593Smuzhiyun start_segno = dev_start_segno;
3003*4882a593Smuzhiyun end_segno = min(start_segno + range.segments, dev_end_segno);
3004*4882a593Smuzhiyun
3005*4882a593Smuzhiyun while (start_segno < end_segno) {
3006*4882a593Smuzhiyun if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
3007*4882a593Smuzhiyun ret = -EBUSY;
3008*4882a593Smuzhiyun goto out;
3009*4882a593Smuzhiyun }
3010*4882a593Smuzhiyun sm->last_victim[GC_CB] = end_segno + 1;
3011*4882a593Smuzhiyun sm->last_victim[GC_GREEDY] = end_segno + 1;
3012*4882a593Smuzhiyun sm->last_victim[ALLOC_NEXT] = end_segno + 1;
3013*4882a593Smuzhiyun ret = f2fs_gc(sbi, true, true, true, start_segno);
3014*4882a593Smuzhiyun if (ret == -EAGAIN)
3015*4882a593Smuzhiyun ret = 0;
3016*4882a593Smuzhiyun else if (ret < 0)
3017*4882a593Smuzhiyun break;
3018*4882a593Smuzhiyun start_segno++;
3019*4882a593Smuzhiyun }
3020*4882a593Smuzhiyun out:
3021*4882a593Smuzhiyun mnt_drop_write_file(filp);
3022*4882a593Smuzhiyun return ret;
3023*4882a593Smuzhiyun }
3024*4882a593Smuzhiyun
f2fs_ioc_get_features(struct file * filp,unsigned long arg)3025*4882a593Smuzhiyun static int f2fs_ioc_get_features(struct file *filp, unsigned long arg)
3026*4882a593Smuzhiyun {
3027*4882a593Smuzhiyun struct inode *inode = file_inode(filp);
3028*4882a593Smuzhiyun u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature);
3029*4882a593Smuzhiyun
3030*4882a593Smuzhiyun /* Must validate to set it with SQLite behavior in Android. */
3031*4882a593Smuzhiyun sb_feature |= F2FS_FEATURE_ATOMIC_WRITE;
3032*4882a593Smuzhiyun
3033*4882a593Smuzhiyun return put_user(sb_feature, (u32 __user *)arg);
3034*4882a593Smuzhiyun }
3035*4882a593Smuzhiyun
3036*4882a593Smuzhiyun #ifdef CONFIG_QUOTA
f2fs_transfer_project_quota(struct inode * inode,kprojid_t kprojid)3037*4882a593Smuzhiyun int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
3038*4882a593Smuzhiyun {
3039*4882a593Smuzhiyun struct dquot *transfer_to[MAXQUOTAS] = {};
3040*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3041*4882a593Smuzhiyun struct super_block *sb = sbi->sb;
3042*4882a593Smuzhiyun int err = 0;
3043*4882a593Smuzhiyun
3044*4882a593Smuzhiyun transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
3045*4882a593Smuzhiyun if (!IS_ERR(transfer_to[PRJQUOTA])) {
3046*4882a593Smuzhiyun err = __dquot_transfer(inode, transfer_to);
3047*4882a593Smuzhiyun if (err)
3048*4882a593Smuzhiyun set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3049*4882a593Smuzhiyun dqput(transfer_to[PRJQUOTA]);
3050*4882a593Smuzhiyun }
3051*4882a593Smuzhiyun return err;
3052*4882a593Smuzhiyun }
3053*4882a593Smuzhiyun
f2fs_ioc_setproject(struct file * filp,__u32 projid)3054*4882a593Smuzhiyun static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
3055*4882a593Smuzhiyun {
3056*4882a593Smuzhiyun struct inode *inode = file_inode(filp);
3057*4882a593Smuzhiyun struct f2fs_inode_info *fi = F2FS_I(inode);
3058*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3059*4882a593Smuzhiyun struct page *ipage;
3060*4882a593Smuzhiyun kprojid_t kprojid;
3061*4882a593Smuzhiyun int err;
3062*4882a593Smuzhiyun
3063*4882a593Smuzhiyun if (!f2fs_sb_has_project_quota(sbi)) {
3064*4882a593Smuzhiyun if (projid != F2FS_DEF_PROJID)
3065*4882a593Smuzhiyun return -EOPNOTSUPP;
3066*4882a593Smuzhiyun else
3067*4882a593Smuzhiyun return 0;
3068*4882a593Smuzhiyun }
3069*4882a593Smuzhiyun
3070*4882a593Smuzhiyun if (!f2fs_has_extra_attr(inode))
3071*4882a593Smuzhiyun return -EOPNOTSUPP;
3072*4882a593Smuzhiyun
3073*4882a593Smuzhiyun kprojid = make_kprojid(&init_user_ns, (projid_t)projid);
3074*4882a593Smuzhiyun
3075*4882a593Smuzhiyun if (projid_eq(kprojid, F2FS_I(inode)->i_projid))
3076*4882a593Smuzhiyun return 0;
3077*4882a593Smuzhiyun
3078*4882a593Smuzhiyun err = -EPERM;
3079*4882a593Smuzhiyun /* Is it quota file? Do not allow user to mess with it */
3080*4882a593Smuzhiyun if (IS_NOQUOTA(inode))
3081*4882a593Smuzhiyun return err;
3082*4882a593Smuzhiyun
3083*4882a593Smuzhiyun ipage = f2fs_get_node_page(sbi, inode->i_ino);
3084*4882a593Smuzhiyun if (IS_ERR(ipage))
3085*4882a593Smuzhiyun return PTR_ERR(ipage);
3086*4882a593Smuzhiyun
3087*4882a593Smuzhiyun if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage), fi->i_extra_isize,
3088*4882a593Smuzhiyun i_projid)) {
3089*4882a593Smuzhiyun err = -EOVERFLOW;
3090*4882a593Smuzhiyun f2fs_put_page(ipage, 1);
3091*4882a593Smuzhiyun return err;
3092*4882a593Smuzhiyun }
3093*4882a593Smuzhiyun f2fs_put_page(ipage, 1);
3094*4882a593Smuzhiyun
3095*4882a593Smuzhiyun err = dquot_initialize(inode);
3096*4882a593Smuzhiyun if (err)
3097*4882a593Smuzhiyun return err;
3098*4882a593Smuzhiyun
3099*4882a593Smuzhiyun f2fs_lock_op(sbi);
3100*4882a593Smuzhiyun err = f2fs_transfer_project_quota(inode, kprojid);
3101*4882a593Smuzhiyun if (err)
3102*4882a593Smuzhiyun goto out_unlock;
3103*4882a593Smuzhiyun
3104*4882a593Smuzhiyun F2FS_I(inode)->i_projid = kprojid;
3105*4882a593Smuzhiyun inode->i_ctime = current_time(inode);
3106*4882a593Smuzhiyun f2fs_mark_inode_dirty_sync(inode, true);
3107*4882a593Smuzhiyun out_unlock:
3108*4882a593Smuzhiyun f2fs_unlock_op(sbi);
3109*4882a593Smuzhiyun return err;
3110*4882a593Smuzhiyun }
3111*4882a593Smuzhiyun #else
f2fs_transfer_project_quota(struct inode * inode,kprojid_t kprojid)3112*4882a593Smuzhiyun int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
3113*4882a593Smuzhiyun {
3114*4882a593Smuzhiyun return 0;
3115*4882a593Smuzhiyun }
3116*4882a593Smuzhiyun
f2fs_ioc_setproject(struct file * filp,__u32 projid)3117*4882a593Smuzhiyun static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
3118*4882a593Smuzhiyun {
3119*4882a593Smuzhiyun if (projid != F2FS_DEF_PROJID)
3120*4882a593Smuzhiyun return -EOPNOTSUPP;
3121*4882a593Smuzhiyun return 0;
3122*4882a593Smuzhiyun }
3123*4882a593Smuzhiyun #endif
3124*4882a593Smuzhiyun
3125*4882a593Smuzhiyun /* FS_IOC_FSGETXATTR and FS_IOC_FSSETXATTR support */
3126*4882a593Smuzhiyun
3127*4882a593Smuzhiyun /*
3128*4882a593Smuzhiyun * To make a new on-disk f2fs i_flag gettable via FS_IOC_FSGETXATTR and settable
3129*4882a593Smuzhiyun * via FS_IOC_FSSETXATTR, add an entry for it to f2fs_xflags_map[], and add its
3130*4882a593Smuzhiyun * FS_XFLAG_* equivalent to F2FS_SUPPORTED_XFLAGS.
3131*4882a593Smuzhiyun */
3132*4882a593Smuzhiyun
3133*4882a593Smuzhiyun static const struct {
3134*4882a593Smuzhiyun u32 iflag;
3135*4882a593Smuzhiyun u32 xflag;
3136*4882a593Smuzhiyun } f2fs_xflags_map[] = {
3137*4882a593Smuzhiyun { F2FS_SYNC_FL, FS_XFLAG_SYNC },
3138*4882a593Smuzhiyun { F2FS_IMMUTABLE_FL, FS_XFLAG_IMMUTABLE },
3139*4882a593Smuzhiyun { F2FS_APPEND_FL, FS_XFLAG_APPEND },
3140*4882a593Smuzhiyun { F2FS_NODUMP_FL, FS_XFLAG_NODUMP },
3141*4882a593Smuzhiyun { F2FS_NOATIME_FL, FS_XFLAG_NOATIME },
3142*4882a593Smuzhiyun { F2FS_PROJINHERIT_FL, FS_XFLAG_PROJINHERIT },
3143*4882a593Smuzhiyun };
3144*4882a593Smuzhiyun
3145*4882a593Smuzhiyun #define F2FS_SUPPORTED_XFLAGS ( \
3146*4882a593Smuzhiyun FS_XFLAG_SYNC | \
3147*4882a593Smuzhiyun FS_XFLAG_IMMUTABLE | \
3148*4882a593Smuzhiyun FS_XFLAG_APPEND | \
3149*4882a593Smuzhiyun FS_XFLAG_NODUMP | \
3150*4882a593Smuzhiyun FS_XFLAG_NOATIME | \
3151*4882a593Smuzhiyun FS_XFLAG_PROJINHERIT)
3152*4882a593Smuzhiyun
3153*4882a593Smuzhiyun /* Convert f2fs on-disk i_flags to FS_IOC_FS{GET,SET}XATTR flags */
f2fs_iflags_to_xflags(u32 iflags)3154*4882a593Smuzhiyun static inline u32 f2fs_iflags_to_xflags(u32 iflags)
3155*4882a593Smuzhiyun {
3156*4882a593Smuzhiyun u32 xflags = 0;
3157*4882a593Smuzhiyun int i;
3158*4882a593Smuzhiyun
3159*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(f2fs_xflags_map); i++)
3160*4882a593Smuzhiyun if (iflags & f2fs_xflags_map[i].iflag)
3161*4882a593Smuzhiyun xflags |= f2fs_xflags_map[i].xflag;
3162*4882a593Smuzhiyun
3163*4882a593Smuzhiyun return xflags;
3164*4882a593Smuzhiyun }
3165*4882a593Smuzhiyun
3166*4882a593Smuzhiyun /* Convert FS_IOC_FS{GET,SET}XATTR flags to f2fs on-disk i_flags */
f2fs_xflags_to_iflags(u32 xflags)3167*4882a593Smuzhiyun static inline u32 f2fs_xflags_to_iflags(u32 xflags)
3168*4882a593Smuzhiyun {
3169*4882a593Smuzhiyun u32 iflags = 0;
3170*4882a593Smuzhiyun int i;
3171*4882a593Smuzhiyun
3172*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(f2fs_xflags_map); i++)
3173*4882a593Smuzhiyun if (xflags & f2fs_xflags_map[i].xflag)
3174*4882a593Smuzhiyun iflags |= f2fs_xflags_map[i].iflag;
3175*4882a593Smuzhiyun
3176*4882a593Smuzhiyun return iflags;
3177*4882a593Smuzhiyun }
3178*4882a593Smuzhiyun
f2fs_fill_fsxattr(struct inode * inode,struct fsxattr * fa)3179*4882a593Smuzhiyun static void f2fs_fill_fsxattr(struct inode *inode, struct fsxattr *fa)
3180*4882a593Smuzhiyun {
3181*4882a593Smuzhiyun struct f2fs_inode_info *fi = F2FS_I(inode);
3182*4882a593Smuzhiyun
3183*4882a593Smuzhiyun simple_fill_fsxattr(fa, f2fs_iflags_to_xflags(fi->i_flags));
3184*4882a593Smuzhiyun
3185*4882a593Smuzhiyun if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)))
3186*4882a593Smuzhiyun fa->fsx_projid = from_kprojid(&init_user_ns, fi->i_projid);
3187*4882a593Smuzhiyun }
3188*4882a593Smuzhiyun
f2fs_ioc_fsgetxattr(struct file * filp,unsigned long arg)3189*4882a593Smuzhiyun static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg)
3190*4882a593Smuzhiyun {
3191*4882a593Smuzhiyun struct inode *inode = file_inode(filp);
3192*4882a593Smuzhiyun struct fsxattr fa;
3193*4882a593Smuzhiyun
3194*4882a593Smuzhiyun f2fs_fill_fsxattr(inode, &fa);
3195*4882a593Smuzhiyun
3196*4882a593Smuzhiyun if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa)))
3197*4882a593Smuzhiyun return -EFAULT;
3198*4882a593Smuzhiyun return 0;
3199*4882a593Smuzhiyun }
3200*4882a593Smuzhiyun
f2fs_ioc_fssetxattr(struct file * filp,unsigned long arg)3201*4882a593Smuzhiyun static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg)
3202*4882a593Smuzhiyun {
3203*4882a593Smuzhiyun struct inode *inode = file_inode(filp);
3204*4882a593Smuzhiyun struct fsxattr fa, old_fa;
3205*4882a593Smuzhiyun u32 iflags;
3206*4882a593Smuzhiyun int err;
3207*4882a593Smuzhiyun
3208*4882a593Smuzhiyun if (copy_from_user(&fa, (struct fsxattr __user *)arg, sizeof(fa)))
3209*4882a593Smuzhiyun return -EFAULT;
3210*4882a593Smuzhiyun
3211*4882a593Smuzhiyun /* Make sure caller has proper permission */
3212*4882a593Smuzhiyun if (!inode_owner_or_capable(inode))
3213*4882a593Smuzhiyun return -EACCES;
3214*4882a593Smuzhiyun
3215*4882a593Smuzhiyun if (fa.fsx_xflags & ~F2FS_SUPPORTED_XFLAGS)
3216*4882a593Smuzhiyun return -EOPNOTSUPP;
3217*4882a593Smuzhiyun
3218*4882a593Smuzhiyun iflags = f2fs_xflags_to_iflags(fa.fsx_xflags);
3219*4882a593Smuzhiyun if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
3220*4882a593Smuzhiyun return -EOPNOTSUPP;
3221*4882a593Smuzhiyun
3222*4882a593Smuzhiyun err = mnt_want_write_file(filp);
3223*4882a593Smuzhiyun if (err)
3224*4882a593Smuzhiyun return err;
3225*4882a593Smuzhiyun
3226*4882a593Smuzhiyun inode_lock(inode);
3227*4882a593Smuzhiyun
3228*4882a593Smuzhiyun f2fs_fill_fsxattr(inode, &old_fa);
3229*4882a593Smuzhiyun err = vfs_ioc_fssetxattr_check(inode, &old_fa, &fa);
3230*4882a593Smuzhiyun if (err)
3231*4882a593Smuzhiyun goto out;
3232*4882a593Smuzhiyun
3233*4882a593Smuzhiyun err = f2fs_setflags_common(inode, iflags,
3234*4882a593Smuzhiyun f2fs_xflags_to_iflags(F2FS_SUPPORTED_XFLAGS));
3235*4882a593Smuzhiyun if (err)
3236*4882a593Smuzhiyun goto out;
3237*4882a593Smuzhiyun
3238*4882a593Smuzhiyun err = f2fs_ioc_setproject(filp, fa.fsx_projid);
3239*4882a593Smuzhiyun out:
3240*4882a593Smuzhiyun inode_unlock(inode);
3241*4882a593Smuzhiyun mnt_drop_write_file(filp);
3242*4882a593Smuzhiyun return err;
3243*4882a593Smuzhiyun }
3244*4882a593Smuzhiyun
f2fs_pin_file_control(struct inode * inode,bool inc)3245*4882a593Smuzhiyun int f2fs_pin_file_control(struct inode *inode, bool inc)
3246*4882a593Smuzhiyun {
3247*4882a593Smuzhiyun struct f2fs_inode_info *fi = F2FS_I(inode);
3248*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3249*4882a593Smuzhiyun
3250*4882a593Smuzhiyun /* Use i_gc_failures for normal file as a risk signal. */
3251*4882a593Smuzhiyun if (inc)
3252*4882a593Smuzhiyun f2fs_i_gc_failures_write(inode,
3253*4882a593Smuzhiyun fi->i_gc_failures[GC_FAILURE_PIN] + 1);
3254*4882a593Smuzhiyun
3255*4882a593Smuzhiyun if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) {
3256*4882a593Smuzhiyun f2fs_warn(sbi, "%s: Enable GC = ino %lx after %x GC trials",
3257*4882a593Smuzhiyun __func__, inode->i_ino,
3258*4882a593Smuzhiyun fi->i_gc_failures[GC_FAILURE_PIN]);
3259*4882a593Smuzhiyun clear_inode_flag(inode, FI_PIN_FILE);
3260*4882a593Smuzhiyun return -EAGAIN;
3261*4882a593Smuzhiyun }
3262*4882a593Smuzhiyun return 0;
3263*4882a593Smuzhiyun }
3264*4882a593Smuzhiyun
f2fs_ioc_set_pin_file(struct file * filp,unsigned long arg)3265*4882a593Smuzhiyun static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
3266*4882a593Smuzhiyun {
3267*4882a593Smuzhiyun struct inode *inode = file_inode(filp);
3268*4882a593Smuzhiyun __u32 pin;
3269*4882a593Smuzhiyun int ret = 0;
3270*4882a593Smuzhiyun
3271*4882a593Smuzhiyun if (get_user(pin, (__u32 __user *)arg))
3272*4882a593Smuzhiyun return -EFAULT;
3273*4882a593Smuzhiyun
3274*4882a593Smuzhiyun if (!S_ISREG(inode->i_mode))
3275*4882a593Smuzhiyun return -EINVAL;
3276*4882a593Smuzhiyun
3277*4882a593Smuzhiyun if (f2fs_readonly(F2FS_I_SB(inode)->sb))
3278*4882a593Smuzhiyun return -EROFS;
3279*4882a593Smuzhiyun
3280*4882a593Smuzhiyun ret = mnt_want_write_file(filp);
3281*4882a593Smuzhiyun if (ret)
3282*4882a593Smuzhiyun return ret;
3283*4882a593Smuzhiyun
3284*4882a593Smuzhiyun inode_lock(inode);
3285*4882a593Smuzhiyun
3286*4882a593Smuzhiyun if (!pin) {
3287*4882a593Smuzhiyun clear_inode_flag(inode, FI_PIN_FILE);
3288*4882a593Smuzhiyun f2fs_i_gc_failures_write(inode, 0);
3289*4882a593Smuzhiyun goto done;
3290*4882a593Smuzhiyun }
3291*4882a593Smuzhiyun
3292*4882a593Smuzhiyun if (f2fs_should_update_outplace(inode, NULL)) {
3293*4882a593Smuzhiyun ret = -EINVAL;
3294*4882a593Smuzhiyun goto out;
3295*4882a593Smuzhiyun }
3296*4882a593Smuzhiyun
3297*4882a593Smuzhiyun if (f2fs_pin_file_control(inode, false)) {
3298*4882a593Smuzhiyun ret = -EAGAIN;
3299*4882a593Smuzhiyun goto out;
3300*4882a593Smuzhiyun }
3301*4882a593Smuzhiyun
3302*4882a593Smuzhiyun ret = f2fs_convert_inline_inode(inode);
3303*4882a593Smuzhiyun if (ret)
3304*4882a593Smuzhiyun goto out;
3305*4882a593Smuzhiyun
3306*4882a593Smuzhiyun if (!f2fs_disable_compressed_file(inode)) {
3307*4882a593Smuzhiyun ret = -EOPNOTSUPP;
3308*4882a593Smuzhiyun goto out;
3309*4882a593Smuzhiyun }
3310*4882a593Smuzhiyun
3311*4882a593Smuzhiyun set_inode_flag(inode, FI_PIN_FILE);
3312*4882a593Smuzhiyun ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3313*4882a593Smuzhiyun done:
3314*4882a593Smuzhiyun f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3315*4882a593Smuzhiyun out:
3316*4882a593Smuzhiyun inode_unlock(inode);
3317*4882a593Smuzhiyun mnt_drop_write_file(filp);
3318*4882a593Smuzhiyun return ret;
3319*4882a593Smuzhiyun }
3320*4882a593Smuzhiyun
f2fs_ioc_get_pin_file(struct file * filp,unsigned long arg)3321*4882a593Smuzhiyun static int f2fs_ioc_get_pin_file(struct file *filp, unsigned long arg)
3322*4882a593Smuzhiyun {
3323*4882a593Smuzhiyun struct inode *inode = file_inode(filp);
3324*4882a593Smuzhiyun __u32 pin = 0;
3325*4882a593Smuzhiyun
3326*4882a593Smuzhiyun if (is_inode_flag_set(inode, FI_PIN_FILE))
3327*4882a593Smuzhiyun pin = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3328*4882a593Smuzhiyun return put_user(pin, (u32 __user *)arg);
3329*4882a593Smuzhiyun }
3330*4882a593Smuzhiyun
f2fs_precache_extents(struct inode * inode)3331*4882a593Smuzhiyun int f2fs_precache_extents(struct inode *inode)
3332*4882a593Smuzhiyun {
3333*4882a593Smuzhiyun struct f2fs_inode_info *fi = F2FS_I(inode);
3334*4882a593Smuzhiyun struct f2fs_map_blocks map;
3335*4882a593Smuzhiyun pgoff_t m_next_extent;
3336*4882a593Smuzhiyun loff_t end;
3337*4882a593Smuzhiyun int err;
3338*4882a593Smuzhiyun
3339*4882a593Smuzhiyun if (is_inode_flag_set(inode, FI_NO_EXTENT))
3340*4882a593Smuzhiyun return -EOPNOTSUPP;
3341*4882a593Smuzhiyun
3342*4882a593Smuzhiyun map.m_lblk = 0;
3343*4882a593Smuzhiyun map.m_next_pgofs = NULL;
3344*4882a593Smuzhiyun map.m_next_extent = &m_next_extent;
3345*4882a593Smuzhiyun map.m_seg_type = NO_CHECK_TYPE;
3346*4882a593Smuzhiyun map.m_may_create = false;
3347*4882a593Smuzhiyun end = max_file_blocks(inode);
3348*4882a593Smuzhiyun
3349*4882a593Smuzhiyun while (map.m_lblk < end) {
3350*4882a593Smuzhiyun map.m_len = end - map.m_lblk;
3351*4882a593Smuzhiyun
3352*4882a593Smuzhiyun f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
3353*4882a593Smuzhiyun err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE);
3354*4882a593Smuzhiyun f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
3355*4882a593Smuzhiyun if (err)
3356*4882a593Smuzhiyun return err;
3357*4882a593Smuzhiyun
3358*4882a593Smuzhiyun map.m_lblk = m_next_extent;
3359*4882a593Smuzhiyun }
3360*4882a593Smuzhiyun
3361*4882a593Smuzhiyun return 0;
3362*4882a593Smuzhiyun }
3363*4882a593Smuzhiyun
f2fs_ioc_precache_extents(struct file * filp,unsigned long arg)3364*4882a593Smuzhiyun static int f2fs_ioc_precache_extents(struct file *filp, unsigned long arg)
3365*4882a593Smuzhiyun {
3366*4882a593Smuzhiyun return f2fs_precache_extents(file_inode(filp));
3367*4882a593Smuzhiyun }
3368*4882a593Smuzhiyun
f2fs_ioc_resize_fs(struct file * filp,unsigned long arg)3369*4882a593Smuzhiyun static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg)
3370*4882a593Smuzhiyun {
3371*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
3372*4882a593Smuzhiyun __u64 block_count;
3373*4882a593Smuzhiyun
3374*4882a593Smuzhiyun if (!capable(CAP_SYS_ADMIN))
3375*4882a593Smuzhiyun return -EPERM;
3376*4882a593Smuzhiyun
3377*4882a593Smuzhiyun if (f2fs_readonly(sbi->sb))
3378*4882a593Smuzhiyun return -EROFS;
3379*4882a593Smuzhiyun
3380*4882a593Smuzhiyun if (copy_from_user(&block_count, (void __user *)arg,
3381*4882a593Smuzhiyun sizeof(block_count)))
3382*4882a593Smuzhiyun return -EFAULT;
3383*4882a593Smuzhiyun
3384*4882a593Smuzhiyun return f2fs_resize_fs(sbi, block_count);
3385*4882a593Smuzhiyun }
3386*4882a593Smuzhiyun
f2fs_ioc_enable_verity(struct file * filp,unsigned long arg)3387*4882a593Smuzhiyun static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg)
3388*4882a593Smuzhiyun {
3389*4882a593Smuzhiyun struct inode *inode = file_inode(filp);
3390*4882a593Smuzhiyun
3391*4882a593Smuzhiyun f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3392*4882a593Smuzhiyun
3393*4882a593Smuzhiyun if (!f2fs_sb_has_verity(F2FS_I_SB(inode))) {
3394*4882a593Smuzhiyun f2fs_warn(F2FS_I_SB(inode),
3395*4882a593Smuzhiyun "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem",
3396*4882a593Smuzhiyun inode->i_ino);
3397*4882a593Smuzhiyun return -EOPNOTSUPP;
3398*4882a593Smuzhiyun }
3399*4882a593Smuzhiyun
3400*4882a593Smuzhiyun return fsverity_ioctl_enable(filp, (const void __user *)arg);
3401*4882a593Smuzhiyun }
3402*4882a593Smuzhiyun
f2fs_ioc_measure_verity(struct file * filp,unsigned long arg)3403*4882a593Smuzhiyun static int f2fs_ioc_measure_verity(struct file *filp, unsigned long arg)
3404*4882a593Smuzhiyun {
3405*4882a593Smuzhiyun if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3406*4882a593Smuzhiyun return -EOPNOTSUPP;
3407*4882a593Smuzhiyun
3408*4882a593Smuzhiyun return fsverity_ioctl_measure(filp, (void __user *)arg);
3409*4882a593Smuzhiyun }
3410*4882a593Smuzhiyun
f2fs_ioc_read_verity_metadata(struct file * filp,unsigned long arg)3411*4882a593Smuzhiyun static int f2fs_ioc_read_verity_metadata(struct file *filp, unsigned long arg)
3412*4882a593Smuzhiyun {
3413*4882a593Smuzhiyun if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3414*4882a593Smuzhiyun return -EOPNOTSUPP;
3415*4882a593Smuzhiyun
3416*4882a593Smuzhiyun return fsverity_ioctl_read_metadata(filp, (const void __user *)arg);
3417*4882a593Smuzhiyun }
3418*4882a593Smuzhiyun
f2fs_ioc_getfslabel(struct file * filp,unsigned long arg)3419*4882a593Smuzhiyun static int f2fs_ioc_getfslabel(struct file *filp, unsigned long arg)
3420*4882a593Smuzhiyun {
3421*4882a593Smuzhiyun struct inode *inode = file_inode(filp);
3422*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3423*4882a593Smuzhiyun char *vbuf;
3424*4882a593Smuzhiyun int count;
3425*4882a593Smuzhiyun int err = 0;
3426*4882a593Smuzhiyun
3427*4882a593Smuzhiyun vbuf = f2fs_kzalloc(sbi, MAX_VOLUME_NAME, GFP_KERNEL);
3428*4882a593Smuzhiyun if (!vbuf)
3429*4882a593Smuzhiyun return -ENOMEM;
3430*4882a593Smuzhiyun
3431*4882a593Smuzhiyun f2fs_down_read(&sbi->sb_lock);
3432*4882a593Smuzhiyun count = utf16s_to_utf8s(sbi->raw_super->volume_name,
3433*4882a593Smuzhiyun ARRAY_SIZE(sbi->raw_super->volume_name),
3434*4882a593Smuzhiyun UTF16_LITTLE_ENDIAN, vbuf, MAX_VOLUME_NAME);
3435*4882a593Smuzhiyun f2fs_up_read(&sbi->sb_lock);
3436*4882a593Smuzhiyun
3437*4882a593Smuzhiyun if (copy_to_user((char __user *)arg, vbuf,
3438*4882a593Smuzhiyun min(FSLABEL_MAX, count)))
3439*4882a593Smuzhiyun err = -EFAULT;
3440*4882a593Smuzhiyun
3441*4882a593Smuzhiyun kfree(vbuf);
3442*4882a593Smuzhiyun return err;
3443*4882a593Smuzhiyun }
3444*4882a593Smuzhiyun
f2fs_ioc_setfslabel(struct file * filp,unsigned long arg)3445*4882a593Smuzhiyun static int f2fs_ioc_setfslabel(struct file *filp, unsigned long arg)
3446*4882a593Smuzhiyun {
3447*4882a593Smuzhiyun struct inode *inode = file_inode(filp);
3448*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3449*4882a593Smuzhiyun char *vbuf;
3450*4882a593Smuzhiyun int err = 0;
3451*4882a593Smuzhiyun
3452*4882a593Smuzhiyun if (!capable(CAP_SYS_ADMIN))
3453*4882a593Smuzhiyun return -EPERM;
3454*4882a593Smuzhiyun
3455*4882a593Smuzhiyun vbuf = strndup_user((const char __user *)arg, FSLABEL_MAX);
3456*4882a593Smuzhiyun if (IS_ERR(vbuf))
3457*4882a593Smuzhiyun return PTR_ERR(vbuf);
3458*4882a593Smuzhiyun
3459*4882a593Smuzhiyun err = mnt_want_write_file(filp);
3460*4882a593Smuzhiyun if (err)
3461*4882a593Smuzhiyun goto out;
3462*4882a593Smuzhiyun
3463*4882a593Smuzhiyun f2fs_down_write(&sbi->sb_lock);
3464*4882a593Smuzhiyun
3465*4882a593Smuzhiyun memset(sbi->raw_super->volume_name, 0,
3466*4882a593Smuzhiyun sizeof(sbi->raw_super->volume_name));
3467*4882a593Smuzhiyun utf8s_to_utf16s(vbuf, strlen(vbuf), UTF16_LITTLE_ENDIAN,
3468*4882a593Smuzhiyun sbi->raw_super->volume_name,
3469*4882a593Smuzhiyun ARRAY_SIZE(sbi->raw_super->volume_name));
3470*4882a593Smuzhiyun
3471*4882a593Smuzhiyun err = f2fs_commit_super(sbi, false);
3472*4882a593Smuzhiyun
3473*4882a593Smuzhiyun f2fs_up_write(&sbi->sb_lock);
3474*4882a593Smuzhiyun
3475*4882a593Smuzhiyun mnt_drop_write_file(filp);
3476*4882a593Smuzhiyun out:
3477*4882a593Smuzhiyun kfree(vbuf);
3478*4882a593Smuzhiyun return err;
3479*4882a593Smuzhiyun }
3480*4882a593Smuzhiyun
f2fs_get_compress_blocks(struct file * filp,unsigned long arg)3481*4882a593Smuzhiyun static int f2fs_get_compress_blocks(struct file *filp, unsigned long arg)
3482*4882a593Smuzhiyun {
3483*4882a593Smuzhiyun struct inode *inode = file_inode(filp);
3484*4882a593Smuzhiyun __u64 blocks;
3485*4882a593Smuzhiyun
3486*4882a593Smuzhiyun if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3487*4882a593Smuzhiyun return -EOPNOTSUPP;
3488*4882a593Smuzhiyun
3489*4882a593Smuzhiyun if (!f2fs_compressed_file(inode))
3490*4882a593Smuzhiyun return -EINVAL;
3491*4882a593Smuzhiyun
3492*4882a593Smuzhiyun blocks = atomic_read(&F2FS_I(inode)->i_compr_blocks);
3493*4882a593Smuzhiyun return put_user(blocks, (u64 __user *)arg);
3494*4882a593Smuzhiyun }
3495*4882a593Smuzhiyun
release_compress_blocks(struct dnode_of_data * dn,pgoff_t count)3496*4882a593Smuzhiyun static int release_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3497*4882a593Smuzhiyun {
3498*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3499*4882a593Smuzhiyun unsigned int released_blocks = 0;
3500*4882a593Smuzhiyun int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3501*4882a593Smuzhiyun block_t blkaddr;
3502*4882a593Smuzhiyun int i;
3503*4882a593Smuzhiyun
3504*4882a593Smuzhiyun for (i = 0; i < count; i++) {
3505*4882a593Smuzhiyun blkaddr = data_blkaddr(dn->inode, dn->node_page,
3506*4882a593Smuzhiyun dn->ofs_in_node + i);
3507*4882a593Smuzhiyun
3508*4882a593Smuzhiyun if (!__is_valid_data_blkaddr(blkaddr))
3509*4882a593Smuzhiyun continue;
3510*4882a593Smuzhiyun if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3511*4882a593Smuzhiyun DATA_GENERIC_ENHANCE)))
3512*4882a593Smuzhiyun return -EFSCORRUPTED;
3513*4882a593Smuzhiyun }
3514*4882a593Smuzhiyun
3515*4882a593Smuzhiyun while (count) {
3516*4882a593Smuzhiyun int compr_blocks = 0;
3517*4882a593Smuzhiyun
3518*4882a593Smuzhiyun for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3519*4882a593Smuzhiyun blkaddr = f2fs_data_blkaddr(dn);
3520*4882a593Smuzhiyun
3521*4882a593Smuzhiyun if (i == 0) {
3522*4882a593Smuzhiyun if (blkaddr == COMPRESS_ADDR)
3523*4882a593Smuzhiyun continue;
3524*4882a593Smuzhiyun dn->ofs_in_node += cluster_size;
3525*4882a593Smuzhiyun goto next;
3526*4882a593Smuzhiyun }
3527*4882a593Smuzhiyun
3528*4882a593Smuzhiyun if (__is_valid_data_blkaddr(blkaddr))
3529*4882a593Smuzhiyun compr_blocks++;
3530*4882a593Smuzhiyun
3531*4882a593Smuzhiyun if (blkaddr != NEW_ADDR)
3532*4882a593Smuzhiyun continue;
3533*4882a593Smuzhiyun
3534*4882a593Smuzhiyun dn->data_blkaddr = NULL_ADDR;
3535*4882a593Smuzhiyun f2fs_set_data_blkaddr(dn);
3536*4882a593Smuzhiyun }
3537*4882a593Smuzhiyun
3538*4882a593Smuzhiyun f2fs_i_compr_blocks_update(dn->inode, compr_blocks, false);
3539*4882a593Smuzhiyun dec_valid_block_count(sbi, dn->inode,
3540*4882a593Smuzhiyun cluster_size - compr_blocks);
3541*4882a593Smuzhiyun
3542*4882a593Smuzhiyun released_blocks += cluster_size - compr_blocks;
3543*4882a593Smuzhiyun next:
3544*4882a593Smuzhiyun count -= cluster_size;
3545*4882a593Smuzhiyun }
3546*4882a593Smuzhiyun
3547*4882a593Smuzhiyun return released_blocks;
3548*4882a593Smuzhiyun }
3549*4882a593Smuzhiyun
f2fs_release_compress_blocks(struct file * filp,unsigned long arg)3550*4882a593Smuzhiyun static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
3551*4882a593Smuzhiyun {
3552*4882a593Smuzhiyun struct inode *inode = file_inode(filp);
3553*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3554*4882a593Smuzhiyun pgoff_t page_idx = 0, last_idx;
3555*4882a593Smuzhiyun unsigned int released_blocks = 0;
3556*4882a593Smuzhiyun int ret;
3557*4882a593Smuzhiyun int writecount;
3558*4882a593Smuzhiyun
3559*4882a593Smuzhiyun if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3560*4882a593Smuzhiyun return -EOPNOTSUPP;
3561*4882a593Smuzhiyun
3562*4882a593Smuzhiyun if (!f2fs_compressed_file(inode))
3563*4882a593Smuzhiyun return -EINVAL;
3564*4882a593Smuzhiyun
3565*4882a593Smuzhiyun if (f2fs_readonly(sbi->sb))
3566*4882a593Smuzhiyun return -EROFS;
3567*4882a593Smuzhiyun
3568*4882a593Smuzhiyun ret = mnt_want_write_file(filp);
3569*4882a593Smuzhiyun if (ret)
3570*4882a593Smuzhiyun return ret;
3571*4882a593Smuzhiyun
3572*4882a593Smuzhiyun f2fs_balance_fs(F2FS_I_SB(inode), true);
3573*4882a593Smuzhiyun
3574*4882a593Smuzhiyun inode_lock(inode);
3575*4882a593Smuzhiyun
3576*4882a593Smuzhiyun writecount = atomic_read(&inode->i_writecount);
3577*4882a593Smuzhiyun if ((filp->f_mode & FMODE_WRITE && writecount != 1) ||
3578*4882a593Smuzhiyun (!(filp->f_mode & FMODE_WRITE) && writecount)) {
3579*4882a593Smuzhiyun ret = -EBUSY;
3580*4882a593Smuzhiyun goto out;
3581*4882a593Smuzhiyun }
3582*4882a593Smuzhiyun
3583*4882a593Smuzhiyun if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
3584*4882a593Smuzhiyun ret = -EINVAL;
3585*4882a593Smuzhiyun goto out;
3586*4882a593Smuzhiyun }
3587*4882a593Smuzhiyun
3588*4882a593Smuzhiyun ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
3589*4882a593Smuzhiyun if (ret)
3590*4882a593Smuzhiyun goto out;
3591*4882a593Smuzhiyun
3592*4882a593Smuzhiyun set_inode_flag(inode, FI_COMPRESS_RELEASED);
3593*4882a593Smuzhiyun inode->i_ctime = current_time(inode);
3594*4882a593Smuzhiyun f2fs_mark_inode_dirty_sync(inode, true);
3595*4882a593Smuzhiyun
3596*4882a593Smuzhiyun if (!atomic_read(&F2FS_I(inode)->i_compr_blocks))
3597*4882a593Smuzhiyun goto out;
3598*4882a593Smuzhiyun
3599*4882a593Smuzhiyun f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3600*4882a593Smuzhiyun f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
3601*4882a593Smuzhiyun
3602*4882a593Smuzhiyun last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3603*4882a593Smuzhiyun
3604*4882a593Smuzhiyun while (page_idx < last_idx) {
3605*4882a593Smuzhiyun struct dnode_of_data dn;
3606*4882a593Smuzhiyun pgoff_t end_offset, count;
3607*4882a593Smuzhiyun
3608*4882a593Smuzhiyun set_new_dnode(&dn, inode, NULL, NULL, 0);
3609*4882a593Smuzhiyun ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3610*4882a593Smuzhiyun if (ret) {
3611*4882a593Smuzhiyun if (ret == -ENOENT) {
3612*4882a593Smuzhiyun page_idx = f2fs_get_next_page_offset(&dn,
3613*4882a593Smuzhiyun page_idx);
3614*4882a593Smuzhiyun ret = 0;
3615*4882a593Smuzhiyun continue;
3616*4882a593Smuzhiyun }
3617*4882a593Smuzhiyun break;
3618*4882a593Smuzhiyun }
3619*4882a593Smuzhiyun
3620*4882a593Smuzhiyun end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3621*4882a593Smuzhiyun count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3622*4882a593Smuzhiyun count = round_up(count, F2FS_I(inode)->i_cluster_size);
3623*4882a593Smuzhiyun
3624*4882a593Smuzhiyun ret = release_compress_blocks(&dn, count);
3625*4882a593Smuzhiyun
3626*4882a593Smuzhiyun f2fs_put_dnode(&dn);
3627*4882a593Smuzhiyun
3628*4882a593Smuzhiyun if (ret < 0)
3629*4882a593Smuzhiyun break;
3630*4882a593Smuzhiyun
3631*4882a593Smuzhiyun page_idx += count;
3632*4882a593Smuzhiyun released_blocks += ret;
3633*4882a593Smuzhiyun }
3634*4882a593Smuzhiyun
3635*4882a593Smuzhiyun f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
3636*4882a593Smuzhiyun f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3637*4882a593Smuzhiyun out:
3638*4882a593Smuzhiyun inode_unlock(inode);
3639*4882a593Smuzhiyun
3640*4882a593Smuzhiyun mnt_drop_write_file(filp);
3641*4882a593Smuzhiyun
3642*4882a593Smuzhiyun if (ret >= 0) {
3643*4882a593Smuzhiyun ret = put_user(released_blocks, (u64 __user *)arg);
3644*4882a593Smuzhiyun } else if (released_blocks &&
3645*4882a593Smuzhiyun atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3646*4882a593Smuzhiyun set_sbi_flag(sbi, SBI_NEED_FSCK);
3647*4882a593Smuzhiyun f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3648*4882a593Smuzhiyun "iblocks=%llu, released=%u, compr_blocks=%u, "
3649*4882a593Smuzhiyun "run fsck to fix.",
3650*4882a593Smuzhiyun __func__, inode->i_ino, inode->i_blocks,
3651*4882a593Smuzhiyun released_blocks,
3652*4882a593Smuzhiyun atomic_read(&F2FS_I(inode)->i_compr_blocks));
3653*4882a593Smuzhiyun }
3654*4882a593Smuzhiyun
3655*4882a593Smuzhiyun return ret;
3656*4882a593Smuzhiyun }
3657*4882a593Smuzhiyun
reserve_compress_blocks(struct dnode_of_data * dn,pgoff_t count)3658*4882a593Smuzhiyun static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3659*4882a593Smuzhiyun {
3660*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3661*4882a593Smuzhiyun unsigned int reserved_blocks = 0;
3662*4882a593Smuzhiyun int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3663*4882a593Smuzhiyun block_t blkaddr;
3664*4882a593Smuzhiyun int i;
3665*4882a593Smuzhiyun
3666*4882a593Smuzhiyun for (i = 0; i < count; i++) {
3667*4882a593Smuzhiyun blkaddr = data_blkaddr(dn->inode, dn->node_page,
3668*4882a593Smuzhiyun dn->ofs_in_node + i);
3669*4882a593Smuzhiyun
3670*4882a593Smuzhiyun if (!__is_valid_data_blkaddr(blkaddr))
3671*4882a593Smuzhiyun continue;
3672*4882a593Smuzhiyun if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3673*4882a593Smuzhiyun DATA_GENERIC_ENHANCE)))
3674*4882a593Smuzhiyun return -EFSCORRUPTED;
3675*4882a593Smuzhiyun }
3676*4882a593Smuzhiyun
3677*4882a593Smuzhiyun while (count) {
3678*4882a593Smuzhiyun int compr_blocks = 0;
3679*4882a593Smuzhiyun blkcnt_t reserved;
3680*4882a593Smuzhiyun int ret;
3681*4882a593Smuzhiyun
3682*4882a593Smuzhiyun for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3683*4882a593Smuzhiyun blkaddr = f2fs_data_blkaddr(dn);
3684*4882a593Smuzhiyun
3685*4882a593Smuzhiyun if (i == 0) {
3686*4882a593Smuzhiyun if (blkaddr == COMPRESS_ADDR)
3687*4882a593Smuzhiyun continue;
3688*4882a593Smuzhiyun dn->ofs_in_node += cluster_size;
3689*4882a593Smuzhiyun goto next;
3690*4882a593Smuzhiyun }
3691*4882a593Smuzhiyun
3692*4882a593Smuzhiyun if (__is_valid_data_blkaddr(blkaddr)) {
3693*4882a593Smuzhiyun compr_blocks++;
3694*4882a593Smuzhiyun continue;
3695*4882a593Smuzhiyun }
3696*4882a593Smuzhiyun
3697*4882a593Smuzhiyun dn->data_blkaddr = NEW_ADDR;
3698*4882a593Smuzhiyun f2fs_set_data_blkaddr(dn);
3699*4882a593Smuzhiyun }
3700*4882a593Smuzhiyun
3701*4882a593Smuzhiyun reserved = cluster_size - compr_blocks;
3702*4882a593Smuzhiyun ret = inc_valid_block_count(sbi, dn->inode, &reserved);
3703*4882a593Smuzhiyun if (ret)
3704*4882a593Smuzhiyun return ret;
3705*4882a593Smuzhiyun
3706*4882a593Smuzhiyun if (reserved != cluster_size - compr_blocks)
3707*4882a593Smuzhiyun return -ENOSPC;
3708*4882a593Smuzhiyun
3709*4882a593Smuzhiyun f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true);
3710*4882a593Smuzhiyun
3711*4882a593Smuzhiyun reserved_blocks += reserved;
3712*4882a593Smuzhiyun next:
3713*4882a593Smuzhiyun count -= cluster_size;
3714*4882a593Smuzhiyun }
3715*4882a593Smuzhiyun
3716*4882a593Smuzhiyun return reserved_blocks;
3717*4882a593Smuzhiyun }
3718*4882a593Smuzhiyun
f2fs_reserve_compress_blocks(struct file * filp,unsigned long arg)3719*4882a593Smuzhiyun static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
3720*4882a593Smuzhiyun {
3721*4882a593Smuzhiyun struct inode *inode = file_inode(filp);
3722*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3723*4882a593Smuzhiyun pgoff_t page_idx = 0, last_idx;
3724*4882a593Smuzhiyun unsigned int reserved_blocks = 0;
3725*4882a593Smuzhiyun int ret;
3726*4882a593Smuzhiyun
3727*4882a593Smuzhiyun if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3728*4882a593Smuzhiyun return -EOPNOTSUPP;
3729*4882a593Smuzhiyun
3730*4882a593Smuzhiyun if (!f2fs_compressed_file(inode))
3731*4882a593Smuzhiyun return -EINVAL;
3732*4882a593Smuzhiyun
3733*4882a593Smuzhiyun if (f2fs_readonly(sbi->sb))
3734*4882a593Smuzhiyun return -EROFS;
3735*4882a593Smuzhiyun
3736*4882a593Smuzhiyun ret = mnt_want_write_file(filp);
3737*4882a593Smuzhiyun if (ret)
3738*4882a593Smuzhiyun return ret;
3739*4882a593Smuzhiyun
3740*4882a593Smuzhiyun if (atomic_read(&F2FS_I(inode)->i_compr_blocks))
3741*4882a593Smuzhiyun goto out;
3742*4882a593Smuzhiyun
3743*4882a593Smuzhiyun f2fs_balance_fs(F2FS_I_SB(inode), true);
3744*4882a593Smuzhiyun
3745*4882a593Smuzhiyun inode_lock(inode);
3746*4882a593Smuzhiyun
3747*4882a593Smuzhiyun if (!is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
3748*4882a593Smuzhiyun ret = -EINVAL;
3749*4882a593Smuzhiyun goto unlock_inode;
3750*4882a593Smuzhiyun }
3751*4882a593Smuzhiyun
3752*4882a593Smuzhiyun f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3753*4882a593Smuzhiyun f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
3754*4882a593Smuzhiyun
3755*4882a593Smuzhiyun last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3756*4882a593Smuzhiyun
3757*4882a593Smuzhiyun while (page_idx < last_idx) {
3758*4882a593Smuzhiyun struct dnode_of_data dn;
3759*4882a593Smuzhiyun pgoff_t end_offset, count;
3760*4882a593Smuzhiyun
3761*4882a593Smuzhiyun set_new_dnode(&dn, inode, NULL, NULL, 0);
3762*4882a593Smuzhiyun ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3763*4882a593Smuzhiyun if (ret) {
3764*4882a593Smuzhiyun if (ret == -ENOENT) {
3765*4882a593Smuzhiyun page_idx = f2fs_get_next_page_offset(&dn,
3766*4882a593Smuzhiyun page_idx);
3767*4882a593Smuzhiyun ret = 0;
3768*4882a593Smuzhiyun continue;
3769*4882a593Smuzhiyun }
3770*4882a593Smuzhiyun break;
3771*4882a593Smuzhiyun }
3772*4882a593Smuzhiyun
3773*4882a593Smuzhiyun end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3774*4882a593Smuzhiyun count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3775*4882a593Smuzhiyun count = round_up(count, F2FS_I(inode)->i_cluster_size);
3776*4882a593Smuzhiyun
3777*4882a593Smuzhiyun ret = reserve_compress_blocks(&dn, count);
3778*4882a593Smuzhiyun
3779*4882a593Smuzhiyun f2fs_put_dnode(&dn);
3780*4882a593Smuzhiyun
3781*4882a593Smuzhiyun if (ret < 0)
3782*4882a593Smuzhiyun break;
3783*4882a593Smuzhiyun
3784*4882a593Smuzhiyun page_idx += count;
3785*4882a593Smuzhiyun reserved_blocks += ret;
3786*4882a593Smuzhiyun }
3787*4882a593Smuzhiyun
3788*4882a593Smuzhiyun f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
3789*4882a593Smuzhiyun f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3790*4882a593Smuzhiyun
3791*4882a593Smuzhiyun if (ret >= 0) {
3792*4882a593Smuzhiyun clear_inode_flag(inode, FI_COMPRESS_RELEASED);
3793*4882a593Smuzhiyun inode->i_ctime = current_time(inode);
3794*4882a593Smuzhiyun f2fs_mark_inode_dirty_sync(inode, true);
3795*4882a593Smuzhiyun }
3796*4882a593Smuzhiyun unlock_inode:
3797*4882a593Smuzhiyun inode_unlock(inode);
3798*4882a593Smuzhiyun out:
3799*4882a593Smuzhiyun mnt_drop_write_file(filp);
3800*4882a593Smuzhiyun
3801*4882a593Smuzhiyun if (ret >= 0) {
3802*4882a593Smuzhiyun ret = put_user(reserved_blocks, (u64 __user *)arg);
3803*4882a593Smuzhiyun } else if (reserved_blocks &&
3804*4882a593Smuzhiyun atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3805*4882a593Smuzhiyun set_sbi_flag(sbi, SBI_NEED_FSCK);
3806*4882a593Smuzhiyun f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3807*4882a593Smuzhiyun "iblocks=%llu, reserved=%u, compr_blocks=%u, "
3808*4882a593Smuzhiyun "run fsck to fix.",
3809*4882a593Smuzhiyun __func__, inode->i_ino, inode->i_blocks,
3810*4882a593Smuzhiyun reserved_blocks,
3811*4882a593Smuzhiyun atomic_read(&F2FS_I(inode)->i_compr_blocks));
3812*4882a593Smuzhiyun }
3813*4882a593Smuzhiyun
3814*4882a593Smuzhiyun return ret;
3815*4882a593Smuzhiyun }
3816*4882a593Smuzhiyun
f2fs_secure_erase(struct block_device * bdev,struct inode * inode,pgoff_t off,block_t block,block_t len,u32 flags)3817*4882a593Smuzhiyun static int f2fs_secure_erase(struct block_device *bdev, struct inode *inode,
3818*4882a593Smuzhiyun pgoff_t off, block_t block, block_t len, u32 flags)
3819*4882a593Smuzhiyun {
3820*4882a593Smuzhiyun struct request_queue *q = bdev_get_queue(bdev);
3821*4882a593Smuzhiyun sector_t sector = SECTOR_FROM_BLOCK(block);
3822*4882a593Smuzhiyun sector_t nr_sects = SECTOR_FROM_BLOCK(len);
3823*4882a593Smuzhiyun int ret = 0;
3824*4882a593Smuzhiyun
3825*4882a593Smuzhiyun if (!q)
3826*4882a593Smuzhiyun return -ENXIO;
3827*4882a593Smuzhiyun
3828*4882a593Smuzhiyun if (flags & F2FS_TRIM_FILE_DISCARD)
3829*4882a593Smuzhiyun ret = blkdev_issue_discard(bdev, sector, nr_sects, GFP_NOFS,
3830*4882a593Smuzhiyun blk_queue_secure_erase(q) ?
3831*4882a593Smuzhiyun BLKDEV_DISCARD_SECURE : 0);
3832*4882a593Smuzhiyun
3833*4882a593Smuzhiyun if (!ret && (flags & F2FS_TRIM_FILE_ZEROOUT)) {
3834*4882a593Smuzhiyun if (IS_ENCRYPTED(inode))
3835*4882a593Smuzhiyun ret = fscrypt_zeroout_range(inode, off, block, len);
3836*4882a593Smuzhiyun else
3837*4882a593Smuzhiyun ret = blkdev_issue_zeroout(bdev, sector, nr_sects,
3838*4882a593Smuzhiyun GFP_NOFS, 0);
3839*4882a593Smuzhiyun }
3840*4882a593Smuzhiyun
3841*4882a593Smuzhiyun return ret;
3842*4882a593Smuzhiyun }
3843*4882a593Smuzhiyun
f2fs_sec_trim_file(struct file * filp,unsigned long arg)3844*4882a593Smuzhiyun static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
3845*4882a593Smuzhiyun {
3846*4882a593Smuzhiyun struct inode *inode = file_inode(filp);
3847*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3848*4882a593Smuzhiyun struct address_space *mapping = inode->i_mapping;
3849*4882a593Smuzhiyun struct block_device *prev_bdev = NULL;
3850*4882a593Smuzhiyun struct f2fs_sectrim_range range;
3851*4882a593Smuzhiyun pgoff_t index, pg_end, prev_index = 0;
3852*4882a593Smuzhiyun block_t prev_block = 0, len = 0;
3853*4882a593Smuzhiyun loff_t end_addr;
3854*4882a593Smuzhiyun bool to_end = false;
3855*4882a593Smuzhiyun int ret = 0;
3856*4882a593Smuzhiyun
3857*4882a593Smuzhiyun if (!(filp->f_mode & FMODE_WRITE))
3858*4882a593Smuzhiyun return -EBADF;
3859*4882a593Smuzhiyun
3860*4882a593Smuzhiyun if (copy_from_user(&range, (struct f2fs_sectrim_range __user *)arg,
3861*4882a593Smuzhiyun sizeof(range)))
3862*4882a593Smuzhiyun return -EFAULT;
3863*4882a593Smuzhiyun
3864*4882a593Smuzhiyun if (range.flags == 0 || (range.flags & ~F2FS_TRIM_FILE_MASK) ||
3865*4882a593Smuzhiyun !S_ISREG(inode->i_mode))
3866*4882a593Smuzhiyun return -EINVAL;
3867*4882a593Smuzhiyun
3868*4882a593Smuzhiyun if (((range.flags & F2FS_TRIM_FILE_DISCARD) &&
3869*4882a593Smuzhiyun !f2fs_hw_support_discard(sbi)) ||
3870*4882a593Smuzhiyun ((range.flags & F2FS_TRIM_FILE_ZEROOUT) &&
3871*4882a593Smuzhiyun IS_ENCRYPTED(inode) && f2fs_is_multi_device(sbi)))
3872*4882a593Smuzhiyun return -EOPNOTSUPP;
3873*4882a593Smuzhiyun
3874*4882a593Smuzhiyun file_start_write(filp);
3875*4882a593Smuzhiyun inode_lock(inode);
3876*4882a593Smuzhiyun
3877*4882a593Smuzhiyun if (f2fs_is_atomic_file(inode) || f2fs_compressed_file(inode) ||
3878*4882a593Smuzhiyun range.start >= inode->i_size) {
3879*4882a593Smuzhiyun ret = -EINVAL;
3880*4882a593Smuzhiyun goto err;
3881*4882a593Smuzhiyun }
3882*4882a593Smuzhiyun
3883*4882a593Smuzhiyun if (range.len == 0)
3884*4882a593Smuzhiyun goto err;
3885*4882a593Smuzhiyun
3886*4882a593Smuzhiyun if (inode->i_size - range.start > range.len) {
3887*4882a593Smuzhiyun end_addr = range.start + range.len;
3888*4882a593Smuzhiyun } else {
3889*4882a593Smuzhiyun end_addr = range.len == (u64)-1 ?
3890*4882a593Smuzhiyun sbi->sb->s_maxbytes : inode->i_size;
3891*4882a593Smuzhiyun to_end = true;
3892*4882a593Smuzhiyun }
3893*4882a593Smuzhiyun
3894*4882a593Smuzhiyun if (!IS_ALIGNED(range.start, F2FS_BLKSIZE) ||
3895*4882a593Smuzhiyun (!to_end && !IS_ALIGNED(end_addr, F2FS_BLKSIZE))) {
3896*4882a593Smuzhiyun ret = -EINVAL;
3897*4882a593Smuzhiyun goto err;
3898*4882a593Smuzhiyun }
3899*4882a593Smuzhiyun
3900*4882a593Smuzhiyun index = F2FS_BYTES_TO_BLK(range.start);
3901*4882a593Smuzhiyun pg_end = DIV_ROUND_UP(end_addr, F2FS_BLKSIZE);
3902*4882a593Smuzhiyun
3903*4882a593Smuzhiyun ret = f2fs_convert_inline_inode(inode);
3904*4882a593Smuzhiyun if (ret)
3905*4882a593Smuzhiyun goto err;
3906*4882a593Smuzhiyun
3907*4882a593Smuzhiyun f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3908*4882a593Smuzhiyun f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
3909*4882a593Smuzhiyun
3910*4882a593Smuzhiyun ret = filemap_write_and_wait_range(mapping, range.start,
3911*4882a593Smuzhiyun to_end ? LLONG_MAX : end_addr - 1);
3912*4882a593Smuzhiyun if (ret)
3913*4882a593Smuzhiyun goto out;
3914*4882a593Smuzhiyun
3915*4882a593Smuzhiyun truncate_inode_pages_range(mapping, range.start,
3916*4882a593Smuzhiyun to_end ? -1 : end_addr - 1);
3917*4882a593Smuzhiyun
3918*4882a593Smuzhiyun while (index < pg_end) {
3919*4882a593Smuzhiyun struct dnode_of_data dn;
3920*4882a593Smuzhiyun pgoff_t end_offset, count;
3921*4882a593Smuzhiyun int i;
3922*4882a593Smuzhiyun
3923*4882a593Smuzhiyun set_new_dnode(&dn, inode, NULL, NULL, 0);
3924*4882a593Smuzhiyun ret = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
3925*4882a593Smuzhiyun if (ret) {
3926*4882a593Smuzhiyun if (ret == -ENOENT) {
3927*4882a593Smuzhiyun index = f2fs_get_next_page_offset(&dn, index);
3928*4882a593Smuzhiyun continue;
3929*4882a593Smuzhiyun }
3930*4882a593Smuzhiyun goto out;
3931*4882a593Smuzhiyun }
3932*4882a593Smuzhiyun
3933*4882a593Smuzhiyun end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3934*4882a593Smuzhiyun count = min(end_offset - dn.ofs_in_node, pg_end - index);
3935*4882a593Smuzhiyun for (i = 0; i < count; i++, index++, dn.ofs_in_node++) {
3936*4882a593Smuzhiyun struct block_device *cur_bdev;
3937*4882a593Smuzhiyun block_t blkaddr = f2fs_data_blkaddr(&dn);
3938*4882a593Smuzhiyun
3939*4882a593Smuzhiyun if (!__is_valid_data_blkaddr(blkaddr))
3940*4882a593Smuzhiyun continue;
3941*4882a593Smuzhiyun
3942*4882a593Smuzhiyun if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
3943*4882a593Smuzhiyun DATA_GENERIC_ENHANCE)) {
3944*4882a593Smuzhiyun ret = -EFSCORRUPTED;
3945*4882a593Smuzhiyun f2fs_put_dnode(&dn);
3946*4882a593Smuzhiyun goto out;
3947*4882a593Smuzhiyun }
3948*4882a593Smuzhiyun
3949*4882a593Smuzhiyun cur_bdev = f2fs_target_device(sbi, blkaddr, NULL);
3950*4882a593Smuzhiyun if (f2fs_is_multi_device(sbi)) {
3951*4882a593Smuzhiyun int di = f2fs_target_device_index(sbi, blkaddr);
3952*4882a593Smuzhiyun
3953*4882a593Smuzhiyun blkaddr -= FDEV(di).start_blk;
3954*4882a593Smuzhiyun }
3955*4882a593Smuzhiyun
3956*4882a593Smuzhiyun if (len) {
3957*4882a593Smuzhiyun if (prev_bdev == cur_bdev &&
3958*4882a593Smuzhiyun index == prev_index + len &&
3959*4882a593Smuzhiyun blkaddr == prev_block + len) {
3960*4882a593Smuzhiyun len++;
3961*4882a593Smuzhiyun } else {
3962*4882a593Smuzhiyun ret = f2fs_secure_erase(prev_bdev,
3963*4882a593Smuzhiyun inode, prev_index, prev_block,
3964*4882a593Smuzhiyun len, range.flags);
3965*4882a593Smuzhiyun if (ret) {
3966*4882a593Smuzhiyun f2fs_put_dnode(&dn);
3967*4882a593Smuzhiyun goto out;
3968*4882a593Smuzhiyun }
3969*4882a593Smuzhiyun
3970*4882a593Smuzhiyun len = 0;
3971*4882a593Smuzhiyun }
3972*4882a593Smuzhiyun }
3973*4882a593Smuzhiyun
3974*4882a593Smuzhiyun if (!len) {
3975*4882a593Smuzhiyun prev_bdev = cur_bdev;
3976*4882a593Smuzhiyun prev_index = index;
3977*4882a593Smuzhiyun prev_block = blkaddr;
3978*4882a593Smuzhiyun len = 1;
3979*4882a593Smuzhiyun }
3980*4882a593Smuzhiyun }
3981*4882a593Smuzhiyun
3982*4882a593Smuzhiyun f2fs_put_dnode(&dn);
3983*4882a593Smuzhiyun
3984*4882a593Smuzhiyun if (fatal_signal_pending(current)) {
3985*4882a593Smuzhiyun ret = -EINTR;
3986*4882a593Smuzhiyun goto out;
3987*4882a593Smuzhiyun }
3988*4882a593Smuzhiyun cond_resched();
3989*4882a593Smuzhiyun }
3990*4882a593Smuzhiyun
3991*4882a593Smuzhiyun if (len)
3992*4882a593Smuzhiyun ret = f2fs_secure_erase(prev_bdev, inode, prev_index,
3993*4882a593Smuzhiyun prev_block, len, range.flags);
3994*4882a593Smuzhiyun out:
3995*4882a593Smuzhiyun f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
3996*4882a593Smuzhiyun f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3997*4882a593Smuzhiyun err:
3998*4882a593Smuzhiyun inode_unlock(inode);
3999*4882a593Smuzhiyun file_end_write(filp);
4000*4882a593Smuzhiyun
4001*4882a593Smuzhiyun return ret;
4002*4882a593Smuzhiyun }
4003*4882a593Smuzhiyun
f2fs_ioc_get_compress_option(struct file * filp,unsigned long arg)4004*4882a593Smuzhiyun static int f2fs_ioc_get_compress_option(struct file *filp, unsigned long arg)
4005*4882a593Smuzhiyun {
4006*4882a593Smuzhiyun struct inode *inode = file_inode(filp);
4007*4882a593Smuzhiyun struct f2fs_comp_option option;
4008*4882a593Smuzhiyun
4009*4882a593Smuzhiyun if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
4010*4882a593Smuzhiyun return -EOPNOTSUPP;
4011*4882a593Smuzhiyun
4012*4882a593Smuzhiyun inode_lock_shared(inode);
4013*4882a593Smuzhiyun
4014*4882a593Smuzhiyun if (!f2fs_compressed_file(inode)) {
4015*4882a593Smuzhiyun inode_unlock_shared(inode);
4016*4882a593Smuzhiyun return -ENODATA;
4017*4882a593Smuzhiyun }
4018*4882a593Smuzhiyun
4019*4882a593Smuzhiyun option.algorithm = F2FS_I(inode)->i_compress_algorithm;
4020*4882a593Smuzhiyun option.log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
4021*4882a593Smuzhiyun
4022*4882a593Smuzhiyun inode_unlock_shared(inode);
4023*4882a593Smuzhiyun
4024*4882a593Smuzhiyun if (copy_to_user((struct f2fs_comp_option __user *)arg, &option,
4025*4882a593Smuzhiyun sizeof(option)))
4026*4882a593Smuzhiyun return -EFAULT;
4027*4882a593Smuzhiyun
4028*4882a593Smuzhiyun return 0;
4029*4882a593Smuzhiyun }
4030*4882a593Smuzhiyun
f2fs_ioc_set_compress_option(struct file * filp,unsigned long arg)4031*4882a593Smuzhiyun static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
4032*4882a593Smuzhiyun {
4033*4882a593Smuzhiyun struct inode *inode = file_inode(filp);
4034*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4035*4882a593Smuzhiyun struct f2fs_comp_option option;
4036*4882a593Smuzhiyun int ret = 0;
4037*4882a593Smuzhiyun
4038*4882a593Smuzhiyun if (!f2fs_sb_has_compression(sbi))
4039*4882a593Smuzhiyun return -EOPNOTSUPP;
4040*4882a593Smuzhiyun
4041*4882a593Smuzhiyun if (!(filp->f_mode & FMODE_WRITE))
4042*4882a593Smuzhiyun return -EBADF;
4043*4882a593Smuzhiyun
4044*4882a593Smuzhiyun if (copy_from_user(&option, (struct f2fs_comp_option __user *)arg,
4045*4882a593Smuzhiyun sizeof(option)))
4046*4882a593Smuzhiyun return -EFAULT;
4047*4882a593Smuzhiyun
4048*4882a593Smuzhiyun if (!f2fs_compressed_file(inode) ||
4049*4882a593Smuzhiyun option.log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
4050*4882a593Smuzhiyun option.log_cluster_size > MAX_COMPRESS_LOG_SIZE ||
4051*4882a593Smuzhiyun option.algorithm >= COMPRESS_MAX)
4052*4882a593Smuzhiyun return -EINVAL;
4053*4882a593Smuzhiyun
4054*4882a593Smuzhiyun file_start_write(filp);
4055*4882a593Smuzhiyun inode_lock(inode);
4056*4882a593Smuzhiyun
4057*4882a593Smuzhiyun if (f2fs_is_mmap_file(inode) || get_dirty_pages(inode)) {
4058*4882a593Smuzhiyun ret = -EBUSY;
4059*4882a593Smuzhiyun goto out;
4060*4882a593Smuzhiyun }
4061*4882a593Smuzhiyun
4062*4882a593Smuzhiyun if (inode->i_size != 0) {
4063*4882a593Smuzhiyun ret = -EFBIG;
4064*4882a593Smuzhiyun goto out;
4065*4882a593Smuzhiyun }
4066*4882a593Smuzhiyun
4067*4882a593Smuzhiyun F2FS_I(inode)->i_compress_algorithm = option.algorithm;
4068*4882a593Smuzhiyun F2FS_I(inode)->i_log_cluster_size = option.log_cluster_size;
4069*4882a593Smuzhiyun F2FS_I(inode)->i_cluster_size = 1 << option.log_cluster_size;
4070*4882a593Smuzhiyun f2fs_mark_inode_dirty_sync(inode, true);
4071*4882a593Smuzhiyun
4072*4882a593Smuzhiyun if (!f2fs_is_compress_backend_ready(inode))
4073*4882a593Smuzhiyun f2fs_warn(sbi, "compression algorithm is successfully set, "
4074*4882a593Smuzhiyun "but current kernel doesn't support this algorithm.");
4075*4882a593Smuzhiyun out:
4076*4882a593Smuzhiyun inode_unlock(inode);
4077*4882a593Smuzhiyun file_end_write(filp);
4078*4882a593Smuzhiyun
4079*4882a593Smuzhiyun return ret;
4080*4882a593Smuzhiyun }
4081*4882a593Smuzhiyun
redirty_blocks(struct inode * inode,pgoff_t page_idx,int len)4082*4882a593Smuzhiyun static int redirty_blocks(struct inode *inode, pgoff_t page_idx, int len)
4083*4882a593Smuzhiyun {
4084*4882a593Smuzhiyun DEFINE_READAHEAD(ractl, NULL, inode->i_mapping, page_idx);
4085*4882a593Smuzhiyun struct address_space *mapping = inode->i_mapping;
4086*4882a593Smuzhiyun struct page *page;
4087*4882a593Smuzhiyun pgoff_t redirty_idx = page_idx;
4088*4882a593Smuzhiyun int i, page_len = 0, ret = 0;
4089*4882a593Smuzhiyun
4090*4882a593Smuzhiyun page_cache_ra_unbounded(&ractl, len, 0);
4091*4882a593Smuzhiyun
4092*4882a593Smuzhiyun for (i = 0; i < len; i++, page_idx++) {
4093*4882a593Smuzhiyun page = read_cache_page(mapping, page_idx, NULL, NULL);
4094*4882a593Smuzhiyun if (IS_ERR(page)) {
4095*4882a593Smuzhiyun ret = PTR_ERR(page);
4096*4882a593Smuzhiyun break;
4097*4882a593Smuzhiyun }
4098*4882a593Smuzhiyun page_len++;
4099*4882a593Smuzhiyun }
4100*4882a593Smuzhiyun
4101*4882a593Smuzhiyun for (i = 0; i < page_len; i++, redirty_idx++) {
4102*4882a593Smuzhiyun page = find_lock_page(mapping, redirty_idx);
4103*4882a593Smuzhiyun if (!page) {
4104*4882a593Smuzhiyun ret = -ENOMEM;
4105*4882a593Smuzhiyun break;
4106*4882a593Smuzhiyun }
4107*4882a593Smuzhiyun set_page_dirty(page);
4108*4882a593Smuzhiyun f2fs_put_page(page, 1);
4109*4882a593Smuzhiyun f2fs_put_page(page, 0);
4110*4882a593Smuzhiyun }
4111*4882a593Smuzhiyun
4112*4882a593Smuzhiyun return ret;
4113*4882a593Smuzhiyun }
4114*4882a593Smuzhiyun
f2fs_ioc_decompress_file(struct file * filp,unsigned long arg)4115*4882a593Smuzhiyun static int f2fs_ioc_decompress_file(struct file *filp, unsigned long arg)
4116*4882a593Smuzhiyun {
4117*4882a593Smuzhiyun struct inode *inode = file_inode(filp);
4118*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4119*4882a593Smuzhiyun struct f2fs_inode_info *fi = F2FS_I(inode);
4120*4882a593Smuzhiyun pgoff_t page_idx = 0, last_idx;
4121*4882a593Smuzhiyun unsigned int blk_per_seg = sbi->blocks_per_seg;
4122*4882a593Smuzhiyun int cluster_size = F2FS_I(inode)->i_cluster_size;
4123*4882a593Smuzhiyun int count, ret;
4124*4882a593Smuzhiyun
4125*4882a593Smuzhiyun if (!f2fs_sb_has_compression(sbi) ||
4126*4882a593Smuzhiyun F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
4127*4882a593Smuzhiyun return -EOPNOTSUPP;
4128*4882a593Smuzhiyun
4129*4882a593Smuzhiyun if (!(filp->f_mode & FMODE_WRITE))
4130*4882a593Smuzhiyun return -EBADF;
4131*4882a593Smuzhiyun
4132*4882a593Smuzhiyun if (!f2fs_compressed_file(inode))
4133*4882a593Smuzhiyun return -EINVAL;
4134*4882a593Smuzhiyun
4135*4882a593Smuzhiyun f2fs_balance_fs(F2FS_I_SB(inode), true);
4136*4882a593Smuzhiyun
4137*4882a593Smuzhiyun file_start_write(filp);
4138*4882a593Smuzhiyun inode_lock(inode);
4139*4882a593Smuzhiyun
4140*4882a593Smuzhiyun if (!f2fs_is_compress_backend_ready(inode)) {
4141*4882a593Smuzhiyun ret = -EOPNOTSUPP;
4142*4882a593Smuzhiyun goto out;
4143*4882a593Smuzhiyun }
4144*4882a593Smuzhiyun
4145*4882a593Smuzhiyun if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
4146*4882a593Smuzhiyun ret = -EINVAL;
4147*4882a593Smuzhiyun goto out;
4148*4882a593Smuzhiyun }
4149*4882a593Smuzhiyun
4150*4882a593Smuzhiyun ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
4151*4882a593Smuzhiyun if (ret)
4152*4882a593Smuzhiyun goto out;
4153*4882a593Smuzhiyun
4154*4882a593Smuzhiyun if (!atomic_read(&fi->i_compr_blocks))
4155*4882a593Smuzhiyun goto out;
4156*4882a593Smuzhiyun
4157*4882a593Smuzhiyun last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4158*4882a593Smuzhiyun
4159*4882a593Smuzhiyun count = last_idx - page_idx;
4160*4882a593Smuzhiyun while (count) {
4161*4882a593Smuzhiyun int len = min(cluster_size, count);
4162*4882a593Smuzhiyun
4163*4882a593Smuzhiyun ret = redirty_blocks(inode, page_idx, len);
4164*4882a593Smuzhiyun if (ret < 0)
4165*4882a593Smuzhiyun break;
4166*4882a593Smuzhiyun
4167*4882a593Smuzhiyun if (get_dirty_pages(inode) >= blk_per_seg)
4168*4882a593Smuzhiyun filemap_fdatawrite(inode->i_mapping);
4169*4882a593Smuzhiyun
4170*4882a593Smuzhiyun count -= len;
4171*4882a593Smuzhiyun page_idx += len;
4172*4882a593Smuzhiyun }
4173*4882a593Smuzhiyun
4174*4882a593Smuzhiyun if (!ret)
4175*4882a593Smuzhiyun ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4176*4882a593Smuzhiyun LLONG_MAX);
4177*4882a593Smuzhiyun
4178*4882a593Smuzhiyun if (ret)
4179*4882a593Smuzhiyun f2fs_warn(sbi, "%s: The file might be partially decompressed (errno=%d). Please delete the file.",
4180*4882a593Smuzhiyun __func__, ret);
4181*4882a593Smuzhiyun out:
4182*4882a593Smuzhiyun inode_unlock(inode);
4183*4882a593Smuzhiyun file_end_write(filp);
4184*4882a593Smuzhiyun
4185*4882a593Smuzhiyun return ret;
4186*4882a593Smuzhiyun }
4187*4882a593Smuzhiyun
f2fs_ioc_compress_file(struct file * filp,unsigned long arg)4188*4882a593Smuzhiyun static int f2fs_ioc_compress_file(struct file *filp, unsigned long arg)
4189*4882a593Smuzhiyun {
4190*4882a593Smuzhiyun struct inode *inode = file_inode(filp);
4191*4882a593Smuzhiyun struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4192*4882a593Smuzhiyun pgoff_t page_idx = 0, last_idx;
4193*4882a593Smuzhiyun unsigned int blk_per_seg = sbi->blocks_per_seg;
4194*4882a593Smuzhiyun int cluster_size = F2FS_I(inode)->i_cluster_size;
4195*4882a593Smuzhiyun int count, ret;
4196*4882a593Smuzhiyun
4197*4882a593Smuzhiyun if (!f2fs_sb_has_compression(sbi) ||
4198*4882a593Smuzhiyun F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
4199*4882a593Smuzhiyun return -EOPNOTSUPP;
4200*4882a593Smuzhiyun
4201*4882a593Smuzhiyun if (!(filp->f_mode & FMODE_WRITE))
4202*4882a593Smuzhiyun return -EBADF;
4203*4882a593Smuzhiyun
4204*4882a593Smuzhiyun if (!f2fs_compressed_file(inode))
4205*4882a593Smuzhiyun return -EINVAL;
4206*4882a593Smuzhiyun
4207*4882a593Smuzhiyun f2fs_balance_fs(F2FS_I_SB(inode), true);
4208*4882a593Smuzhiyun
4209*4882a593Smuzhiyun file_start_write(filp);
4210*4882a593Smuzhiyun inode_lock(inode);
4211*4882a593Smuzhiyun
4212*4882a593Smuzhiyun if (!f2fs_is_compress_backend_ready(inode)) {
4213*4882a593Smuzhiyun ret = -EOPNOTSUPP;
4214*4882a593Smuzhiyun goto out;
4215*4882a593Smuzhiyun }
4216*4882a593Smuzhiyun
4217*4882a593Smuzhiyun if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
4218*4882a593Smuzhiyun ret = -EINVAL;
4219*4882a593Smuzhiyun goto out;
4220*4882a593Smuzhiyun }
4221*4882a593Smuzhiyun
4222*4882a593Smuzhiyun ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
4223*4882a593Smuzhiyun if (ret)
4224*4882a593Smuzhiyun goto out;
4225*4882a593Smuzhiyun
4226*4882a593Smuzhiyun set_inode_flag(inode, FI_ENABLE_COMPRESS);
4227*4882a593Smuzhiyun
4228*4882a593Smuzhiyun last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4229*4882a593Smuzhiyun
4230*4882a593Smuzhiyun count = last_idx - page_idx;
4231*4882a593Smuzhiyun while (count) {
4232*4882a593Smuzhiyun int len = min(cluster_size, count);
4233*4882a593Smuzhiyun
4234*4882a593Smuzhiyun ret = redirty_blocks(inode, page_idx, len);
4235*4882a593Smuzhiyun if (ret < 0)
4236*4882a593Smuzhiyun break;
4237*4882a593Smuzhiyun
4238*4882a593Smuzhiyun if (get_dirty_pages(inode) >= blk_per_seg)
4239*4882a593Smuzhiyun filemap_fdatawrite(inode->i_mapping);
4240*4882a593Smuzhiyun
4241*4882a593Smuzhiyun count -= len;
4242*4882a593Smuzhiyun page_idx += len;
4243*4882a593Smuzhiyun }
4244*4882a593Smuzhiyun
4245*4882a593Smuzhiyun if (!ret)
4246*4882a593Smuzhiyun ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4247*4882a593Smuzhiyun LLONG_MAX);
4248*4882a593Smuzhiyun
4249*4882a593Smuzhiyun clear_inode_flag(inode, FI_ENABLE_COMPRESS);
4250*4882a593Smuzhiyun
4251*4882a593Smuzhiyun if (ret)
4252*4882a593Smuzhiyun f2fs_warn(sbi, "%s: The file might be partially compressed (errno=%d). Please delete the file.",
4253*4882a593Smuzhiyun __func__, ret);
4254*4882a593Smuzhiyun out:
4255*4882a593Smuzhiyun inode_unlock(inode);
4256*4882a593Smuzhiyun file_end_write(filp);
4257*4882a593Smuzhiyun
4258*4882a593Smuzhiyun return ret;
4259*4882a593Smuzhiyun }
4260*4882a593Smuzhiyun
__f2fs_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)4261*4882a593Smuzhiyun static long __f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4262*4882a593Smuzhiyun {
4263*4882a593Smuzhiyun switch (cmd) {
4264*4882a593Smuzhiyun case FS_IOC_GETFLAGS:
4265*4882a593Smuzhiyun return f2fs_ioc_getflags(filp, arg);
4266*4882a593Smuzhiyun case FS_IOC_SETFLAGS:
4267*4882a593Smuzhiyun return f2fs_ioc_setflags(filp, arg);
4268*4882a593Smuzhiyun case FS_IOC_GETVERSION:
4269*4882a593Smuzhiyun return f2fs_ioc_getversion(filp, arg);
4270*4882a593Smuzhiyun case F2FS_IOC_START_ATOMIC_WRITE:
4271*4882a593Smuzhiyun return f2fs_ioc_start_atomic_write(filp);
4272*4882a593Smuzhiyun case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4273*4882a593Smuzhiyun return f2fs_ioc_commit_atomic_write(filp);
4274*4882a593Smuzhiyun case F2FS_IOC_START_VOLATILE_WRITE:
4275*4882a593Smuzhiyun return f2fs_ioc_start_volatile_write(filp);
4276*4882a593Smuzhiyun case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4277*4882a593Smuzhiyun return f2fs_ioc_release_volatile_write(filp);
4278*4882a593Smuzhiyun case F2FS_IOC_ABORT_VOLATILE_WRITE:
4279*4882a593Smuzhiyun return f2fs_ioc_abort_volatile_write(filp);
4280*4882a593Smuzhiyun case F2FS_IOC_SHUTDOWN:
4281*4882a593Smuzhiyun return f2fs_ioc_shutdown(filp, arg);
4282*4882a593Smuzhiyun case FITRIM:
4283*4882a593Smuzhiyun return f2fs_ioc_fitrim(filp, arg);
4284*4882a593Smuzhiyun case FS_IOC_SET_ENCRYPTION_POLICY:
4285*4882a593Smuzhiyun return f2fs_ioc_set_encryption_policy(filp, arg);
4286*4882a593Smuzhiyun case FS_IOC_GET_ENCRYPTION_POLICY:
4287*4882a593Smuzhiyun return f2fs_ioc_get_encryption_policy(filp, arg);
4288*4882a593Smuzhiyun case FS_IOC_GET_ENCRYPTION_PWSALT:
4289*4882a593Smuzhiyun return f2fs_ioc_get_encryption_pwsalt(filp, arg);
4290*4882a593Smuzhiyun case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4291*4882a593Smuzhiyun return f2fs_ioc_get_encryption_policy_ex(filp, arg);
4292*4882a593Smuzhiyun case FS_IOC_ADD_ENCRYPTION_KEY:
4293*4882a593Smuzhiyun return f2fs_ioc_add_encryption_key(filp, arg);
4294*4882a593Smuzhiyun case FS_IOC_REMOVE_ENCRYPTION_KEY:
4295*4882a593Smuzhiyun return f2fs_ioc_remove_encryption_key(filp, arg);
4296*4882a593Smuzhiyun case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4297*4882a593Smuzhiyun return f2fs_ioc_remove_encryption_key_all_users(filp, arg);
4298*4882a593Smuzhiyun case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4299*4882a593Smuzhiyun return f2fs_ioc_get_encryption_key_status(filp, arg);
4300*4882a593Smuzhiyun case FS_IOC_GET_ENCRYPTION_NONCE:
4301*4882a593Smuzhiyun return f2fs_ioc_get_encryption_nonce(filp, arg);
4302*4882a593Smuzhiyun case F2FS_IOC_GARBAGE_COLLECT:
4303*4882a593Smuzhiyun return f2fs_ioc_gc(filp, arg);
4304*4882a593Smuzhiyun case F2FS_IOC_GARBAGE_COLLECT_RANGE:
4305*4882a593Smuzhiyun return f2fs_ioc_gc_range(filp, arg);
4306*4882a593Smuzhiyun case F2FS_IOC_WRITE_CHECKPOINT:
4307*4882a593Smuzhiyun return f2fs_ioc_write_checkpoint(filp, arg);
4308*4882a593Smuzhiyun case F2FS_IOC_DEFRAGMENT:
4309*4882a593Smuzhiyun return f2fs_ioc_defragment(filp, arg);
4310*4882a593Smuzhiyun case F2FS_IOC_MOVE_RANGE:
4311*4882a593Smuzhiyun return f2fs_ioc_move_range(filp, arg);
4312*4882a593Smuzhiyun case F2FS_IOC_FLUSH_DEVICE:
4313*4882a593Smuzhiyun return f2fs_ioc_flush_device(filp, arg);
4314*4882a593Smuzhiyun case F2FS_IOC_GET_FEATURES:
4315*4882a593Smuzhiyun return f2fs_ioc_get_features(filp, arg);
4316*4882a593Smuzhiyun case FS_IOC_FSGETXATTR:
4317*4882a593Smuzhiyun return f2fs_ioc_fsgetxattr(filp, arg);
4318*4882a593Smuzhiyun case FS_IOC_FSSETXATTR:
4319*4882a593Smuzhiyun return f2fs_ioc_fssetxattr(filp, arg);
4320*4882a593Smuzhiyun case F2FS_IOC_GET_PIN_FILE:
4321*4882a593Smuzhiyun return f2fs_ioc_get_pin_file(filp, arg);
4322*4882a593Smuzhiyun case F2FS_IOC_SET_PIN_FILE:
4323*4882a593Smuzhiyun return f2fs_ioc_set_pin_file(filp, arg);
4324*4882a593Smuzhiyun case F2FS_IOC_PRECACHE_EXTENTS:
4325*4882a593Smuzhiyun return f2fs_ioc_precache_extents(filp, arg);
4326*4882a593Smuzhiyun case F2FS_IOC_RESIZE_FS:
4327*4882a593Smuzhiyun return f2fs_ioc_resize_fs(filp, arg);
4328*4882a593Smuzhiyun case FS_IOC_ENABLE_VERITY:
4329*4882a593Smuzhiyun return f2fs_ioc_enable_verity(filp, arg);
4330*4882a593Smuzhiyun case FS_IOC_MEASURE_VERITY:
4331*4882a593Smuzhiyun return f2fs_ioc_measure_verity(filp, arg);
4332*4882a593Smuzhiyun case FS_IOC_READ_VERITY_METADATA:
4333*4882a593Smuzhiyun return f2fs_ioc_read_verity_metadata(filp, arg);
4334*4882a593Smuzhiyun case FS_IOC_GETFSLABEL:
4335*4882a593Smuzhiyun return f2fs_ioc_getfslabel(filp, arg);
4336*4882a593Smuzhiyun case FS_IOC_SETFSLABEL:
4337*4882a593Smuzhiyun return f2fs_ioc_setfslabel(filp, arg);
4338*4882a593Smuzhiyun case F2FS_IOC_GET_COMPRESS_BLOCKS:
4339*4882a593Smuzhiyun return f2fs_get_compress_blocks(filp, arg);
4340*4882a593Smuzhiyun case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4341*4882a593Smuzhiyun return f2fs_release_compress_blocks(filp, arg);
4342*4882a593Smuzhiyun case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4343*4882a593Smuzhiyun return f2fs_reserve_compress_blocks(filp, arg);
4344*4882a593Smuzhiyun case F2FS_IOC_SEC_TRIM_FILE:
4345*4882a593Smuzhiyun return f2fs_sec_trim_file(filp, arg);
4346*4882a593Smuzhiyun case F2FS_IOC_GET_COMPRESS_OPTION:
4347*4882a593Smuzhiyun return f2fs_ioc_get_compress_option(filp, arg);
4348*4882a593Smuzhiyun case F2FS_IOC_SET_COMPRESS_OPTION:
4349*4882a593Smuzhiyun return f2fs_ioc_set_compress_option(filp, arg);
4350*4882a593Smuzhiyun case F2FS_IOC_DECOMPRESS_FILE:
4351*4882a593Smuzhiyun return f2fs_ioc_decompress_file(filp, arg);
4352*4882a593Smuzhiyun case F2FS_IOC_COMPRESS_FILE:
4353*4882a593Smuzhiyun return f2fs_ioc_compress_file(filp, arg);
4354*4882a593Smuzhiyun default:
4355*4882a593Smuzhiyun return -ENOTTY;
4356*4882a593Smuzhiyun }
4357*4882a593Smuzhiyun }
4358*4882a593Smuzhiyun
f2fs_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)4359*4882a593Smuzhiyun long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4360*4882a593Smuzhiyun {
4361*4882a593Smuzhiyun if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
4362*4882a593Smuzhiyun return -EIO;
4363*4882a593Smuzhiyun if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp))))
4364*4882a593Smuzhiyun return -ENOSPC;
4365*4882a593Smuzhiyun
4366*4882a593Smuzhiyun return __f2fs_ioctl(filp, cmd, arg);
4367*4882a593Smuzhiyun }
4368*4882a593Smuzhiyun
f2fs_file_read_iter(struct kiocb * iocb,struct iov_iter * iter)4369*4882a593Smuzhiyun static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
4370*4882a593Smuzhiyun {
4371*4882a593Smuzhiyun struct file *file = iocb->ki_filp;
4372*4882a593Smuzhiyun struct inode *inode = file_inode(file);
4373*4882a593Smuzhiyun int ret;
4374*4882a593Smuzhiyun
4375*4882a593Smuzhiyun if (!f2fs_is_compress_backend_ready(inode))
4376*4882a593Smuzhiyun return -EOPNOTSUPP;
4377*4882a593Smuzhiyun
4378*4882a593Smuzhiyun ret = generic_file_read_iter(iocb, iter);
4379*4882a593Smuzhiyun
4380*4882a593Smuzhiyun if (ret > 0)
4381*4882a593Smuzhiyun f2fs_update_iostat(F2FS_I_SB(inode), APP_READ_IO, ret);
4382*4882a593Smuzhiyun
4383*4882a593Smuzhiyun return ret;
4384*4882a593Smuzhiyun }
4385*4882a593Smuzhiyun
f2fs_file_write_iter(struct kiocb * iocb,struct iov_iter * from)4386*4882a593Smuzhiyun static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
4387*4882a593Smuzhiyun {
4388*4882a593Smuzhiyun struct file *file = iocb->ki_filp;
4389*4882a593Smuzhiyun struct inode *inode = file_inode(file);
4390*4882a593Smuzhiyun ssize_t ret;
4391*4882a593Smuzhiyun
4392*4882a593Smuzhiyun if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
4393*4882a593Smuzhiyun ret = -EIO;
4394*4882a593Smuzhiyun goto out;
4395*4882a593Smuzhiyun }
4396*4882a593Smuzhiyun
4397*4882a593Smuzhiyun if (!f2fs_is_compress_backend_ready(inode)) {
4398*4882a593Smuzhiyun ret = -EOPNOTSUPP;
4399*4882a593Smuzhiyun goto out;
4400*4882a593Smuzhiyun }
4401*4882a593Smuzhiyun
4402*4882a593Smuzhiyun if (iocb->ki_flags & IOCB_NOWAIT) {
4403*4882a593Smuzhiyun if (!inode_trylock(inode)) {
4404*4882a593Smuzhiyun ret = -EAGAIN;
4405*4882a593Smuzhiyun goto out;
4406*4882a593Smuzhiyun }
4407*4882a593Smuzhiyun } else {
4408*4882a593Smuzhiyun inode_lock(inode);
4409*4882a593Smuzhiyun }
4410*4882a593Smuzhiyun
4411*4882a593Smuzhiyun if (unlikely(IS_IMMUTABLE(inode))) {
4412*4882a593Smuzhiyun ret = -EPERM;
4413*4882a593Smuzhiyun goto unlock;
4414*4882a593Smuzhiyun }
4415*4882a593Smuzhiyun
4416*4882a593Smuzhiyun if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
4417*4882a593Smuzhiyun ret = -EPERM;
4418*4882a593Smuzhiyun goto unlock;
4419*4882a593Smuzhiyun }
4420*4882a593Smuzhiyun
4421*4882a593Smuzhiyun ret = generic_write_checks(iocb, from);
4422*4882a593Smuzhiyun if (ret > 0) {
4423*4882a593Smuzhiyun bool preallocated = false;
4424*4882a593Smuzhiyun size_t target_size = 0;
4425*4882a593Smuzhiyun int err;
4426*4882a593Smuzhiyun
4427*4882a593Smuzhiyun if (iov_iter_fault_in_readable(from, iov_iter_count(from)))
4428*4882a593Smuzhiyun set_inode_flag(inode, FI_NO_PREALLOC);
4429*4882a593Smuzhiyun
4430*4882a593Smuzhiyun if ((iocb->ki_flags & IOCB_NOWAIT)) {
4431*4882a593Smuzhiyun if (!f2fs_overwrite_io(inode, iocb->ki_pos,
4432*4882a593Smuzhiyun iov_iter_count(from)) ||
4433*4882a593Smuzhiyun f2fs_has_inline_data(inode) ||
4434*4882a593Smuzhiyun f2fs_force_buffered_io(inode, iocb, from)) {
4435*4882a593Smuzhiyun clear_inode_flag(inode, FI_NO_PREALLOC);
4436*4882a593Smuzhiyun inode_unlock(inode);
4437*4882a593Smuzhiyun ret = -EAGAIN;
4438*4882a593Smuzhiyun goto out;
4439*4882a593Smuzhiyun }
4440*4882a593Smuzhiyun goto write;
4441*4882a593Smuzhiyun }
4442*4882a593Smuzhiyun
4443*4882a593Smuzhiyun if (is_inode_flag_set(inode, FI_NO_PREALLOC))
4444*4882a593Smuzhiyun goto write;
4445*4882a593Smuzhiyun
4446*4882a593Smuzhiyun if (iocb->ki_flags & IOCB_DIRECT) {
4447*4882a593Smuzhiyun /*
4448*4882a593Smuzhiyun * Convert inline data for Direct I/O before entering
4449*4882a593Smuzhiyun * f2fs_direct_IO().
4450*4882a593Smuzhiyun */
4451*4882a593Smuzhiyun err = f2fs_convert_inline_inode(inode);
4452*4882a593Smuzhiyun if (err)
4453*4882a593Smuzhiyun goto out_err;
4454*4882a593Smuzhiyun /*
4455*4882a593Smuzhiyun * If force_buffere_io() is true, we have to allocate
4456*4882a593Smuzhiyun * blocks all the time, since f2fs_direct_IO will fall
4457*4882a593Smuzhiyun * back to buffered IO.
4458*4882a593Smuzhiyun */
4459*4882a593Smuzhiyun if (!f2fs_force_buffered_io(inode, iocb, from) &&
4460*4882a593Smuzhiyun allow_outplace_dio(inode, iocb, from))
4461*4882a593Smuzhiyun goto write;
4462*4882a593Smuzhiyun }
4463*4882a593Smuzhiyun preallocated = true;
4464*4882a593Smuzhiyun target_size = iocb->ki_pos + iov_iter_count(from);
4465*4882a593Smuzhiyun
4466*4882a593Smuzhiyun err = f2fs_preallocate_blocks(iocb, from);
4467*4882a593Smuzhiyun if (err) {
4468*4882a593Smuzhiyun out_err:
4469*4882a593Smuzhiyun clear_inode_flag(inode, FI_NO_PREALLOC);
4470*4882a593Smuzhiyun inode_unlock(inode);
4471*4882a593Smuzhiyun ret = err;
4472*4882a593Smuzhiyun goto out;
4473*4882a593Smuzhiyun }
4474*4882a593Smuzhiyun write:
4475*4882a593Smuzhiyun ret = __generic_file_write_iter(iocb, from);
4476*4882a593Smuzhiyun clear_inode_flag(inode, FI_NO_PREALLOC);
4477*4882a593Smuzhiyun
4478*4882a593Smuzhiyun /* if we couldn't write data, we should deallocate blocks. */
4479*4882a593Smuzhiyun if (preallocated && i_size_read(inode) < target_size) {
4480*4882a593Smuzhiyun f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4481*4882a593Smuzhiyun f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
4482*4882a593Smuzhiyun f2fs_truncate(inode);
4483*4882a593Smuzhiyun f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
4484*4882a593Smuzhiyun f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4485*4882a593Smuzhiyun }
4486*4882a593Smuzhiyun
4487*4882a593Smuzhiyun if (ret > 0)
4488*4882a593Smuzhiyun f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret);
4489*4882a593Smuzhiyun }
4490*4882a593Smuzhiyun unlock:
4491*4882a593Smuzhiyun inode_unlock(inode);
4492*4882a593Smuzhiyun out:
4493*4882a593Smuzhiyun trace_f2fs_file_write_iter(inode, iocb->ki_pos,
4494*4882a593Smuzhiyun iov_iter_count(from), ret);
4495*4882a593Smuzhiyun if (ret > 0)
4496*4882a593Smuzhiyun ret = generic_write_sync(iocb, ret);
4497*4882a593Smuzhiyun return ret;
4498*4882a593Smuzhiyun }
4499*4882a593Smuzhiyun
4500*4882a593Smuzhiyun #ifdef CONFIG_COMPAT
4501*4882a593Smuzhiyun struct compat_f2fs_gc_range {
4502*4882a593Smuzhiyun u32 sync;
4503*4882a593Smuzhiyun compat_u64 start;
4504*4882a593Smuzhiyun compat_u64 len;
4505*4882a593Smuzhiyun };
4506*4882a593Smuzhiyun #define F2FS_IOC32_GARBAGE_COLLECT_RANGE _IOW(F2FS_IOCTL_MAGIC, 11,\
4507*4882a593Smuzhiyun struct compat_f2fs_gc_range)
4508*4882a593Smuzhiyun
f2fs_compat_ioc_gc_range(struct file * file,unsigned long arg)4509*4882a593Smuzhiyun static int f2fs_compat_ioc_gc_range(struct file *file, unsigned long arg)
4510*4882a593Smuzhiyun {
4511*4882a593Smuzhiyun struct compat_f2fs_gc_range __user *urange;
4512*4882a593Smuzhiyun struct f2fs_gc_range range;
4513*4882a593Smuzhiyun int err;
4514*4882a593Smuzhiyun
4515*4882a593Smuzhiyun urange = compat_ptr(arg);
4516*4882a593Smuzhiyun err = get_user(range.sync, &urange->sync);
4517*4882a593Smuzhiyun err |= get_user(range.start, &urange->start);
4518*4882a593Smuzhiyun err |= get_user(range.len, &urange->len);
4519*4882a593Smuzhiyun if (err)
4520*4882a593Smuzhiyun return -EFAULT;
4521*4882a593Smuzhiyun
4522*4882a593Smuzhiyun return __f2fs_ioc_gc_range(file, &range);
4523*4882a593Smuzhiyun }
4524*4882a593Smuzhiyun
4525*4882a593Smuzhiyun struct compat_f2fs_move_range {
4526*4882a593Smuzhiyun u32 dst_fd;
4527*4882a593Smuzhiyun compat_u64 pos_in;
4528*4882a593Smuzhiyun compat_u64 pos_out;
4529*4882a593Smuzhiyun compat_u64 len;
4530*4882a593Smuzhiyun };
4531*4882a593Smuzhiyun #define F2FS_IOC32_MOVE_RANGE _IOWR(F2FS_IOCTL_MAGIC, 9, \
4532*4882a593Smuzhiyun struct compat_f2fs_move_range)
4533*4882a593Smuzhiyun
f2fs_compat_ioc_move_range(struct file * file,unsigned long arg)4534*4882a593Smuzhiyun static int f2fs_compat_ioc_move_range(struct file *file, unsigned long arg)
4535*4882a593Smuzhiyun {
4536*4882a593Smuzhiyun struct compat_f2fs_move_range __user *urange;
4537*4882a593Smuzhiyun struct f2fs_move_range range;
4538*4882a593Smuzhiyun int err;
4539*4882a593Smuzhiyun
4540*4882a593Smuzhiyun urange = compat_ptr(arg);
4541*4882a593Smuzhiyun err = get_user(range.dst_fd, &urange->dst_fd);
4542*4882a593Smuzhiyun err |= get_user(range.pos_in, &urange->pos_in);
4543*4882a593Smuzhiyun err |= get_user(range.pos_out, &urange->pos_out);
4544*4882a593Smuzhiyun err |= get_user(range.len, &urange->len);
4545*4882a593Smuzhiyun if (err)
4546*4882a593Smuzhiyun return -EFAULT;
4547*4882a593Smuzhiyun
4548*4882a593Smuzhiyun return __f2fs_ioc_move_range(file, &range);
4549*4882a593Smuzhiyun }
4550*4882a593Smuzhiyun
f2fs_compat_ioctl(struct file * file,unsigned int cmd,unsigned long arg)4551*4882a593Smuzhiyun long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4552*4882a593Smuzhiyun {
4553*4882a593Smuzhiyun if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
4554*4882a593Smuzhiyun return -EIO;
4555*4882a593Smuzhiyun if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(file))))
4556*4882a593Smuzhiyun return -ENOSPC;
4557*4882a593Smuzhiyun
4558*4882a593Smuzhiyun switch (cmd) {
4559*4882a593Smuzhiyun case FS_IOC32_GETFLAGS:
4560*4882a593Smuzhiyun cmd = FS_IOC_GETFLAGS;
4561*4882a593Smuzhiyun break;
4562*4882a593Smuzhiyun case FS_IOC32_SETFLAGS:
4563*4882a593Smuzhiyun cmd = FS_IOC_SETFLAGS;
4564*4882a593Smuzhiyun break;
4565*4882a593Smuzhiyun case FS_IOC32_GETVERSION:
4566*4882a593Smuzhiyun cmd = FS_IOC_GETVERSION;
4567*4882a593Smuzhiyun break;
4568*4882a593Smuzhiyun case F2FS_IOC32_GARBAGE_COLLECT_RANGE:
4569*4882a593Smuzhiyun return f2fs_compat_ioc_gc_range(file, arg);
4570*4882a593Smuzhiyun case F2FS_IOC32_MOVE_RANGE:
4571*4882a593Smuzhiyun return f2fs_compat_ioc_move_range(file, arg);
4572*4882a593Smuzhiyun case F2FS_IOC_START_ATOMIC_WRITE:
4573*4882a593Smuzhiyun case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4574*4882a593Smuzhiyun case F2FS_IOC_START_VOLATILE_WRITE:
4575*4882a593Smuzhiyun case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4576*4882a593Smuzhiyun case F2FS_IOC_ABORT_VOLATILE_WRITE:
4577*4882a593Smuzhiyun case F2FS_IOC_SHUTDOWN:
4578*4882a593Smuzhiyun case FITRIM:
4579*4882a593Smuzhiyun case FS_IOC_SET_ENCRYPTION_POLICY:
4580*4882a593Smuzhiyun case FS_IOC_GET_ENCRYPTION_PWSALT:
4581*4882a593Smuzhiyun case FS_IOC_GET_ENCRYPTION_POLICY:
4582*4882a593Smuzhiyun case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4583*4882a593Smuzhiyun case FS_IOC_ADD_ENCRYPTION_KEY:
4584*4882a593Smuzhiyun case FS_IOC_REMOVE_ENCRYPTION_KEY:
4585*4882a593Smuzhiyun case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4586*4882a593Smuzhiyun case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4587*4882a593Smuzhiyun case FS_IOC_GET_ENCRYPTION_NONCE:
4588*4882a593Smuzhiyun case F2FS_IOC_GARBAGE_COLLECT:
4589*4882a593Smuzhiyun case F2FS_IOC_WRITE_CHECKPOINT:
4590*4882a593Smuzhiyun case F2FS_IOC_DEFRAGMENT:
4591*4882a593Smuzhiyun case F2FS_IOC_FLUSH_DEVICE:
4592*4882a593Smuzhiyun case F2FS_IOC_GET_FEATURES:
4593*4882a593Smuzhiyun case FS_IOC_FSGETXATTR:
4594*4882a593Smuzhiyun case FS_IOC_FSSETXATTR:
4595*4882a593Smuzhiyun case F2FS_IOC_GET_PIN_FILE:
4596*4882a593Smuzhiyun case F2FS_IOC_SET_PIN_FILE:
4597*4882a593Smuzhiyun case F2FS_IOC_PRECACHE_EXTENTS:
4598*4882a593Smuzhiyun case F2FS_IOC_RESIZE_FS:
4599*4882a593Smuzhiyun case FS_IOC_ENABLE_VERITY:
4600*4882a593Smuzhiyun case FS_IOC_MEASURE_VERITY:
4601*4882a593Smuzhiyun case FS_IOC_READ_VERITY_METADATA:
4602*4882a593Smuzhiyun case FS_IOC_GETFSLABEL:
4603*4882a593Smuzhiyun case FS_IOC_SETFSLABEL:
4604*4882a593Smuzhiyun case F2FS_IOC_GET_COMPRESS_BLOCKS:
4605*4882a593Smuzhiyun case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4606*4882a593Smuzhiyun case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4607*4882a593Smuzhiyun case F2FS_IOC_SEC_TRIM_FILE:
4608*4882a593Smuzhiyun case F2FS_IOC_GET_COMPRESS_OPTION:
4609*4882a593Smuzhiyun case F2FS_IOC_SET_COMPRESS_OPTION:
4610*4882a593Smuzhiyun case F2FS_IOC_DECOMPRESS_FILE:
4611*4882a593Smuzhiyun case F2FS_IOC_COMPRESS_FILE:
4612*4882a593Smuzhiyun break;
4613*4882a593Smuzhiyun default:
4614*4882a593Smuzhiyun return -ENOIOCTLCMD;
4615*4882a593Smuzhiyun }
4616*4882a593Smuzhiyun return __f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
4617*4882a593Smuzhiyun }
4618*4882a593Smuzhiyun #endif
4619*4882a593Smuzhiyun
4620*4882a593Smuzhiyun const struct file_operations f2fs_file_operations = {
4621*4882a593Smuzhiyun .llseek = f2fs_llseek,
4622*4882a593Smuzhiyun .read_iter = f2fs_file_read_iter,
4623*4882a593Smuzhiyun .write_iter = f2fs_file_write_iter,
4624*4882a593Smuzhiyun .open = f2fs_file_open,
4625*4882a593Smuzhiyun .release = f2fs_release_file,
4626*4882a593Smuzhiyun .mmap = f2fs_file_mmap,
4627*4882a593Smuzhiyun .flush = f2fs_file_flush,
4628*4882a593Smuzhiyun .fsync = f2fs_sync_file,
4629*4882a593Smuzhiyun .fallocate = f2fs_fallocate,
4630*4882a593Smuzhiyun .unlocked_ioctl = f2fs_ioctl,
4631*4882a593Smuzhiyun #ifdef CONFIG_COMPAT
4632*4882a593Smuzhiyun .compat_ioctl = f2fs_compat_ioctl,
4633*4882a593Smuzhiyun #endif
4634*4882a593Smuzhiyun .splice_read = generic_file_splice_read,
4635*4882a593Smuzhiyun .splice_write = iter_file_splice_write,
4636*4882a593Smuzhiyun };
4637