xref: /OK3568_Linux_fs/kernel/fs/f2fs/file.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs/f2fs/file.c
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/stat.h>
11 #include <linux/buffer_head.h>
12 #include <linux/writeback.h>
13 #include <linux/blkdev.h>
14 #include <linux/falloc.h>
15 #include <linux/types.h>
16 #include <linux/compat.h>
17 #include <linux/uaccess.h>
18 #include <linux/mount.h>
19 #include <linux/pagevec.h>
20 #include <linux/uio.h>
21 #include <linux/uuid.h>
22 #include <linux/file.h>
23 #include <linux/nls.h>
24 #include <linux/sched/signal.h>
25 
26 #include "f2fs.h"
27 #include "node.h"
28 #include "segment.h"
29 #include "xattr.h"
30 #include "acl.h"
31 #include "gc.h"
32 #include <trace/events/f2fs.h>
33 #include <uapi/linux/f2fs.h>
34 
f2fs_filemap_fault(struct vm_fault * vmf)35 static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
36 {
37 	struct inode *inode = file_inode(vmf->vma->vm_file);
38 	vm_fault_t ret;
39 
40 	f2fs_down_read(&F2FS_I(inode)->i_mmap_sem);
41 	ret = filemap_fault(vmf);
42 	f2fs_up_read(&F2FS_I(inode)->i_mmap_sem);
43 
44 	if (!ret)
45 		f2fs_update_iostat(F2FS_I_SB(inode), APP_MAPPED_READ_IO,
46 							F2FS_BLKSIZE);
47 
48 	trace_f2fs_filemap_fault(inode, vmf->pgoff, (unsigned long)ret);
49 
50 	return ret;
51 }
52 
f2fs_vm_page_mkwrite(struct vm_fault * vmf)53 static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
54 {
55 	struct page *page = vmf->page;
56 	struct inode *inode = file_inode(vmf->vma->vm_file);
57 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
58 	struct dnode_of_data dn;
59 	bool need_alloc = true;
60 	int err = 0;
61 
62 	if (unlikely(IS_IMMUTABLE(inode)))
63 		return VM_FAULT_SIGBUS;
64 
65 	if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
66 		return VM_FAULT_SIGBUS;
67 
68 	if (unlikely(f2fs_cp_error(sbi))) {
69 		err = -EIO;
70 		goto err;
71 	}
72 
73 	if (!f2fs_is_checkpoint_ready(sbi)) {
74 		err = -ENOSPC;
75 		goto err;
76 	}
77 
78 	err = f2fs_convert_inline_inode(inode);
79 	if (err)
80 		goto err;
81 
82 #ifdef CONFIG_F2FS_FS_COMPRESSION
83 	if (f2fs_compressed_file(inode)) {
84 		int ret = f2fs_is_compressed_cluster(inode, page->index);
85 
86 		if (ret < 0) {
87 			err = ret;
88 			goto err;
89 		} else if (ret) {
90 			need_alloc = false;
91 		}
92 	}
93 #endif
94 	/* should do out of any locked page */
95 	if (need_alloc)
96 		f2fs_balance_fs(sbi, true);
97 
98 	sb_start_pagefault(inode->i_sb);
99 
100 	f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
101 
102 	file_update_time(vmf->vma->vm_file);
103 	f2fs_down_read(&F2FS_I(inode)->i_mmap_sem);
104 	lock_page(page);
105 	if (unlikely(page->mapping != inode->i_mapping ||
106 			page_offset(page) > i_size_read(inode) ||
107 			!PageUptodate(page))) {
108 		unlock_page(page);
109 		err = -EFAULT;
110 		goto out_sem;
111 	}
112 
113 	if (need_alloc) {
114 		/* block allocation */
115 		f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
116 		set_new_dnode(&dn, inode, NULL, NULL, 0);
117 		err = f2fs_get_block(&dn, page->index);
118 		f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
119 	}
120 
121 #ifdef CONFIG_F2FS_FS_COMPRESSION
122 	if (!need_alloc) {
123 		set_new_dnode(&dn, inode, NULL, NULL, 0);
124 		err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
125 		f2fs_put_dnode(&dn);
126 	}
127 #endif
128 	if (err) {
129 		unlock_page(page);
130 		goto out_sem;
131 	}
132 
133 	f2fs_wait_on_page_writeback(page, DATA, false, true);
134 
135 	/* wait for GCed page writeback via META_MAPPING */
136 	f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
137 
138 	/*
139 	 * check to see if the page is mapped already (no holes)
140 	 */
141 	if (PageMappedToDisk(page))
142 		goto out_sem;
143 
144 	/* page is wholly or partially inside EOF */
145 	if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
146 						i_size_read(inode)) {
147 		loff_t offset;
148 
149 		offset = i_size_read(inode) & ~PAGE_MASK;
150 		zero_user_segment(page, offset, PAGE_SIZE);
151 	}
152 	set_page_dirty(page);
153 	if (!PageUptodate(page))
154 		SetPageUptodate(page);
155 
156 	f2fs_update_iostat(sbi, APP_MAPPED_IO, F2FS_BLKSIZE);
157 	f2fs_update_time(sbi, REQ_TIME);
158 
159 	trace_f2fs_vm_page_mkwrite(page, DATA);
160 out_sem:
161 	f2fs_up_read(&F2FS_I(inode)->i_mmap_sem);
162 
163 	sb_end_pagefault(inode->i_sb);
164 err:
165 	return block_page_mkwrite_return(err);
166 }
167 
168 static const struct vm_operations_struct f2fs_file_vm_ops = {
169 	.fault		= f2fs_filemap_fault,
170 	.map_pages	= filemap_map_pages,
171 	.page_mkwrite	= f2fs_vm_page_mkwrite,
172 #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
173 	.allow_speculation = filemap_allow_speculation,
174 #endif
175 };
176 
get_parent_ino(struct inode * inode,nid_t * pino)177 static int get_parent_ino(struct inode *inode, nid_t *pino)
178 {
179 	struct dentry *dentry;
180 
181 	/*
182 	 * Make sure to get the non-deleted alias.  The alias associated with
183 	 * the open file descriptor being fsync()'ed may be deleted already.
184 	 */
185 	dentry = d_find_alias(inode);
186 	if (!dentry)
187 		return 0;
188 
189 	*pino = parent_ino(dentry);
190 	dput(dentry);
191 	return 1;
192 }
193 
need_do_checkpoint(struct inode * inode)194 static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
195 {
196 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
197 	enum cp_reason_type cp_reason = CP_NO_NEEDED;
198 
199 	if (!S_ISREG(inode->i_mode))
200 		cp_reason = CP_NON_REGULAR;
201 	else if (f2fs_compressed_file(inode))
202 		cp_reason = CP_COMPRESSED;
203 	else if (inode->i_nlink != 1)
204 		cp_reason = CP_HARDLINK;
205 	else if (is_sbi_flag_set(sbi, SBI_NEED_CP))
206 		cp_reason = CP_SB_NEED_CP;
207 	else if (file_wrong_pino(inode))
208 		cp_reason = CP_WRONG_PINO;
209 	else if (!f2fs_space_for_roll_forward(sbi))
210 		cp_reason = CP_NO_SPC_ROLL;
211 	else if (!f2fs_is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
212 		cp_reason = CP_NODE_NEED_CP;
213 	else if (test_opt(sbi, FASTBOOT))
214 		cp_reason = CP_FASTBOOT_MODE;
215 	else if (F2FS_OPTION(sbi).active_logs == 2)
216 		cp_reason = CP_SPEC_LOG_NUM;
217 	else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT &&
218 		f2fs_need_dentry_mark(sbi, inode->i_ino) &&
219 		f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
220 							TRANS_DIR_INO))
221 		cp_reason = CP_RECOVER_DIR;
222 
223 	return cp_reason;
224 }
225 
need_inode_page_update(struct f2fs_sb_info * sbi,nid_t ino)226 static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
227 {
228 	struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
229 	bool ret = false;
230 	/* But we need to avoid that there are some inode updates */
231 	if ((i && PageDirty(i)) || f2fs_need_inode_block_update(sbi, ino))
232 		ret = true;
233 	f2fs_put_page(i, 0);
234 	return ret;
235 }
236 
try_to_fix_pino(struct inode * inode)237 static void try_to_fix_pino(struct inode *inode)
238 {
239 	struct f2fs_inode_info *fi = F2FS_I(inode);
240 	nid_t pino;
241 
242 	f2fs_down_write(&fi->i_sem);
243 	if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
244 			get_parent_ino(inode, &pino)) {
245 		f2fs_i_pino_write(inode, pino);
246 		file_got_pino(inode);
247 	}
248 	f2fs_up_write(&fi->i_sem);
249 }
250 
f2fs_update_fsync_count(struct f2fs_sb_info * sbi,unsigned int npages)251 static bool f2fs_update_fsync_count(struct f2fs_sb_info *sbi,
252 					unsigned int npages)
253 {
254 	struct sysinfo val;
255 	unsigned long avail_ram;
256 
257 	si_meminfo(&val);
258 
259 	/* only uses low memory */
260 	avail_ram = val.totalram - val.totalhigh;
261 	avail_ram = (avail_ram * DEF_RAM_THRESHOLD) / 100;
262 
263 	if ((atomic_read(&sbi->no_cp_fsync_pages) + npages) > avail_ram)
264 		return false;
265 
266 	atomic_add(npages, &sbi->no_cp_fsync_pages);
267 	return true;
268 }
269 
f2fs_do_sync_file(struct file * file,loff_t start,loff_t end,int datasync,bool atomic)270 static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
271 						int datasync, bool atomic)
272 {
273 	struct inode *inode = file->f_mapping->host;
274 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
275 	nid_t ino = inode->i_ino;
276 	int ret = 0;
277 	unsigned int npages = 0;
278 	enum cp_reason_type cp_reason = 0;
279 	struct writeback_control wbc = {
280 		.sync_mode = WB_SYNC_ALL,
281 		.nr_to_write = LONG_MAX,
282 		.for_reclaim = 0,
283 	};
284 	unsigned int seq_id = 0;
285 
286 	if (unlikely(f2fs_readonly(inode->i_sb)))
287 		return 0;
288 
289 	trace_f2fs_sync_file_enter(inode);
290 
291 	if (S_ISDIR(inode->i_mode))
292 		goto go_write;
293 
294 	/* if fdatasync is triggered, let's do in-place-update */
295 	npages = get_dirty_pages(inode);
296 	if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
297 		set_inode_flag(inode, FI_NEED_IPU);
298 	ret = file_write_and_wait_range(file, start, end);
299 	clear_inode_flag(inode, FI_NEED_IPU);
300 
301 	if (ret || is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
302 		trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
303 		return ret;
304 	}
305 
306 	/* if the inode is dirty, let's recover all the time */
307 	if (!f2fs_skip_inode_update(inode, datasync)) {
308 		f2fs_write_inode(inode, NULL);
309 		goto go_write;
310 	}
311 
312 	/*
313 	 * if there is no written data, don't waste time to write recovery info.
314 	 */
315 	if (!is_inode_flag_set(inode, FI_APPEND_WRITE) &&
316 			!f2fs_exist_written_data(sbi, ino, APPEND_INO)) {
317 
318 		/* it may call write_inode just prior to fsync */
319 		if (need_inode_page_update(sbi, ino))
320 			goto go_write;
321 
322 		if (is_inode_flag_set(inode, FI_UPDATE_WRITE) ||
323 				f2fs_exist_written_data(sbi, ino, UPDATE_INO))
324 			goto flush_out;
325 		goto out;
326 	}
327 go_write:
328 	/*
329 	 * Both of fdatasync() and fsync() are able to be recovered from
330 	 * sudden-power-off.
331 	 */
332 	f2fs_down_read(&F2FS_I(inode)->i_sem);
333 	cp_reason = need_do_checkpoint(inode);
334 	f2fs_up_read(&F2FS_I(inode)->i_sem);
335 
336 	if (cp_reason || !f2fs_update_fsync_count(sbi, npages)) {
337 		/* all the dirty node pages should be flushed for POR */
338 		ret = f2fs_sync_fs(inode->i_sb, 1);
339 
340 		/*
341 		 * We've secured consistency through sync_fs. Following pino
342 		 * will be used only for fsynced inodes after checkpoint.
343 		 */
344 		try_to_fix_pino(inode);
345 		clear_inode_flag(inode, FI_APPEND_WRITE);
346 		clear_inode_flag(inode, FI_UPDATE_WRITE);
347 		goto out;
348 	}
349 sync_nodes:
350 	atomic_inc(&sbi->wb_sync_req[NODE]);
351 	ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id);
352 	atomic_dec(&sbi->wb_sync_req[NODE]);
353 	if (ret)
354 		goto out;
355 
356 	/* if cp_error was enabled, we should avoid infinite loop */
357 	if (unlikely(f2fs_cp_error(sbi))) {
358 		ret = -EIO;
359 		goto out;
360 	}
361 
362 	if (f2fs_need_inode_block_update(sbi, ino)) {
363 		f2fs_mark_inode_dirty_sync(inode, true);
364 		f2fs_write_inode(inode, NULL);
365 		goto sync_nodes;
366 	}
367 
368 	/*
369 	 * If it's atomic_write, it's just fine to keep write ordering. So
370 	 * here we don't need to wait for node write completion, since we use
371 	 * node chain which serializes node blocks. If one of node writes are
372 	 * reordered, we can see simply broken chain, resulting in stopping
373 	 * roll-forward recovery. It means we'll recover all or none node blocks
374 	 * given fsync mark.
375 	 */
376 	if (!atomic) {
377 		ret = f2fs_wait_on_node_pages_writeback(sbi, seq_id);
378 		if (ret)
379 			goto out;
380 	}
381 
382 	/* once recovery info is written, don't need to tack this */
383 	f2fs_remove_ino_entry(sbi, ino, APPEND_INO);
384 	clear_inode_flag(inode, FI_APPEND_WRITE);
385 flush_out:
386 	if (!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER)
387 		ret = f2fs_issue_flush(sbi, inode->i_ino);
388 	if (!ret) {
389 		f2fs_remove_ino_entry(sbi, ino, UPDATE_INO);
390 		clear_inode_flag(inode, FI_UPDATE_WRITE);
391 		f2fs_remove_ino_entry(sbi, ino, FLUSH_INO);
392 	}
393 	f2fs_update_time(sbi, REQ_TIME);
394 out:
395 	trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
396 	return ret;
397 }
398 
f2fs_sync_file(struct file * file,loff_t start,loff_t end,int datasync)399 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
400 {
401 	if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
402 		return -EIO;
403 	return f2fs_do_sync_file(file, start, end, datasync, false);
404 }
405 
__found_offset(struct address_space * mapping,block_t blkaddr,pgoff_t index,int whence)406 static bool __found_offset(struct address_space *mapping, block_t blkaddr,
407 				pgoff_t index, int whence)
408 {
409 	switch (whence) {
410 	case SEEK_DATA:
411 		if (__is_valid_data_blkaddr(blkaddr))
412 			return true;
413 		if (blkaddr == NEW_ADDR &&
414 		    xa_get_mark(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY))
415 			return true;
416 		break;
417 	case SEEK_HOLE:
418 		if (blkaddr == NULL_ADDR)
419 			return true;
420 		break;
421 	}
422 	return false;
423 }
424 
f2fs_seek_block(struct file * file,loff_t offset,int whence)425 static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
426 {
427 	struct inode *inode = file->f_mapping->host;
428 	loff_t maxbytes = inode->i_sb->s_maxbytes;
429 	struct dnode_of_data dn;
430 	pgoff_t pgofs, end_offset;
431 	loff_t data_ofs = offset;
432 	loff_t isize;
433 	int err = 0;
434 
435 	inode_lock(inode);
436 
437 	isize = i_size_read(inode);
438 	if (offset >= isize)
439 		goto fail;
440 
441 	/* handle inline data case */
442 	if (f2fs_has_inline_data(inode)) {
443 		if (whence == SEEK_HOLE) {
444 			data_ofs = isize;
445 			goto found;
446 		} else if (whence == SEEK_DATA) {
447 			data_ofs = offset;
448 			goto found;
449 		}
450 	}
451 
452 	pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
453 
454 	for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
455 		set_new_dnode(&dn, inode, NULL, NULL, 0);
456 		err = f2fs_get_dnode_of_data(&dn, pgofs, LOOKUP_NODE);
457 		if (err && err != -ENOENT) {
458 			goto fail;
459 		} else if (err == -ENOENT) {
460 			/* direct node does not exists */
461 			if (whence == SEEK_DATA) {
462 				pgofs = f2fs_get_next_page_offset(&dn, pgofs);
463 				continue;
464 			} else {
465 				goto found;
466 			}
467 		}
468 
469 		end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
470 
471 		/* find data/hole in dnode block */
472 		for (; dn.ofs_in_node < end_offset;
473 				dn.ofs_in_node++, pgofs++,
474 				data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
475 			block_t blkaddr;
476 
477 			blkaddr = f2fs_data_blkaddr(&dn);
478 
479 			if (__is_valid_data_blkaddr(blkaddr) &&
480 				!f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
481 					blkaddr, DATA_GENERIC_ENHANCE)) {
482 				f2fs_put_dnode(&dn);
483 				goto fail;
484 			}
485 
486 			if (__found_offset(file->f_mapping, blkaddr,
487 							pgofs, whence)) {
488 				f2fs_put_dnode(&dn);
489 				goto found;
490 			}
491 		}
492 		f2fs_put_dnode(&dn);
493 	}
494 
495 	if (whence == SEEK_DATA)
496 		goto fail;
497 found:
498 	if (whence == SEEK_HOLE && data_ofs > isize)
499 		data_ofs = isize;
500 	inode_unlock(inode);
501 	return vfs_setpos(file, data_ofs, maxbytes);
502 fail:
503 	inode_unlock(inode);
504 	return -ENXIO;
505 }
506 
f2fs_llseek(struct file * file,loff_t offset,int whence)507 static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
508 {
509 	struct inode *inode = file->f_mapping->host;
510 	loff_t maxbytes = inode->i_sb->s_maxbytes;
511 
512 	if (f2fs_compressed_file(inode))
513 		maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS;
514 
515 	switch (whence) {
516 	case SEEK_SET:
517 	case SEEK_CUR:
518 	case SEEK_END:
519 		return generic_file_llseek_size(file, offset, whence,
520 						maxbytes, i_size_read(inode));
521 	case SEEK_DATA:
522 	case SEEK_HOLE:
523 		if (offset < 0)
524 			return -ENXIO;
525 		return f2fs_seek_block(file, offset, whence);
526 	}
527 
528 	return -EINVAL;
529 }
530 
f2fs_file_mmap(struct file * file,struct vm_area_struct * vma)531 static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
532 {
533 	struct inode *inode = file_inode(file);
534 
535 	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
536 		return -EIO;
537 
538 	if (!f2fs_is_compress_backend_ready(inode))
539 		return -EOPNOTSUPP;
540 
541 	file_accessed(file);
542 	vma->vm_ops = &f2fs_file_vm_ops;
543 	set_inode_flag(inode, FI_MMAP_FILE);
544 	return 0;
545 }
546 
f2fs_file_open(struct inode * inode,struct file * filp)547 static int f2fs_file_open(struct inode *inode, struct file *filp)
548 {
549 	int err = fscrypt_file_open(inode, filp);
550 
551 	if (err)
552 		return err;
553 
554 	if (!f2fs_is_compress_backend_ready(inode))
555 		return -EOPNOTSUPP;
556 
557 	err = fsverity_file_open(inode, filp);
558 	if (err)
559 		return err;
560 
561 	filp->f_mode |= FMODE_NOWAIT;
562 
563 	return dquot_file_open(inode, filp);
564 }
565 
f2fs_truncate_data_blocks_range(struct dnode_of_data * dn,int count)566 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
567 {
568 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
569 	struct f2fs_node *raw_node;
570 	int nr_free = 0, ofs = dn->ofs_in_node, len = count;
571 	__le32 *addr;
572 	int base = 0;
573 	bool compressed_cluster = false;
574 	int cluster_index = 0, valid_blocks = 0;
575 	int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
576 	bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
577 
578 	if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
579 		base = get_extra_isize(dn->inode);
580 
581 	raw_node = F2FS_NODE(dn->node_page);
582 	addr = blkaddr_in_node(raw_node) + base + ofs;
583 
584 	/* Assumption: truncateion starts with cluster */
585 	for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) {
586 		block_t blkaddr = le32_to_cpu(*addr);
587 
588 		if (f2fs_compressed_file(dn->inode) &&
589 					!(cluster_index & (cluster_size - 1))) {
590 			if (compressed_cluster)
591 				f2fs_i_compr_blocks_update(dn->inode,
592 							valid_blocks, false);
593 			compressed_cluster = (blkaddr == COMPRESS_ADDR);
594 			valid_blocks = 0;
595 		}
596 
597 		if (blkaddr == NULL_ADDR)
598 			continue;
599 
600 		dn->data_blkaddr = NULL_ADDR;
601 		f2fs_set_data_blkaddr(dn);
602 
603 		if (__is_valid_data_blkaddr(blkaddr)) {
604 			if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
605 					DATA_GENERIC_ENHANCE))
606 				continue;
607 			if (compressed_cluster)
608 				valid_blocks++;
609 		}
610 
611 		if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
612 			clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
613 
614 		f2fs_invalidate_blocks(sbi, blkaddr);
615 
616 		if (!released || blkaddr != COMPRESS_ADDR)
617 			nr_free++;
618 	}
619 
620 	if (compressed_cluster)
621 		f2fs_i_compr_blocks_update(dn->inode, valid_blocks, false);
622 
623 	if (nr_free) {
624 		pgoff_t fofs;
625 		/*
626 		 * once we invalidate valid blkaddr in range [ofs, ofs + count],
627 		 * we will invalidate all blkaddr in the whole range.
628 		 */
629 		fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page),
630 							dn->inode) + ofs;
631 		f2fs_update_read_extent_cache_range(dn, fofs, 0, len);
632 		f2fs_update_age_extent_cache_range(dn, fofs, nr_free);
633 		dec_valid_block_count(sbi, dn->inode, nr_free);
634 	}
635 	dn->ofs_in_node = ofs;
636 
637 	f2fs_update_time(sbi, REQ_TIME);
638 	trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
639 					 dn->ofs_in_node, nr_free);
640 }
641 
f2fs_truncate_data_blocks(struct dnode_of_data * dn)642 void f2fs_truncate_data_blocks(struct dnode_of_data *dn)
643 {
644 	f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode));
645 }
646 
truncate_partial_data_page(struct inode * inode,u64 from,bool cache_only)647 static int truncate_partial_data_page(struct inode *inode, u64 from,
648 								bool cache_only)
649 {
650 	loff_t offset = from & (PAGE_SIZE - 1);
651 	pgoff_t index = from >> PAGE_SHIFT;
652 	struct address_space *mapping = inode->i_mapping;
653 	struct page *page;
654 
655 	if (!offset && !cache_only)
656 		return 0;
657 
658 	if (cache_only) {
659 		page = find_lock_page(mapping, index);
660 		if (page && PageUptodate(page))
661 			goto truncate_out;
662 		f2fs_put_page(page, 1);
663 		return 0;
664 	}
665 
666 	page = f2fs_get_lock_data_page(inode, index, true);
667 	if (IS_ERR(page))
668 		return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
669 truncate_out:
670 	f2fs_wait_on_page_writeback(page, DATA, true, true);
671 	zero_user(page, offset, PAGE_SIZE - offset);
672 
673 	/* An encrypted inode should have a key and truncate the last page. */
674 	f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode));
675 	if (!cache_only)
676 		set_page_dirty(page);
677 	f2fs_put_page(page, 1);
678 	return 0;
679 }
680 
f2fs_do_truncate_blocks(struct inode * inode,u64 from,bool lock)681 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock)
682 {
683 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
684 	struct dnode_of_data dn;
685 	pgoff_t free_from;
686 	int count = 0, err = 0;
687 	struct page *ipage;
688 	bool truncate_page = false;
689 
690 	trace_f2fs_truncate_blocks_enter(inode, from);
691 
692 	free_from = (pgoff_t)F2FS_BLK_ALIGN(from);
693 
694 	if (free_from >= max_file_blocks(inode))
695 		goto free_partial;
696 
697 	if (lock)
698 		f2fs_lock_op(sbi);
699 
700 	ipage = f2fs_get_node_page(sbi, inode->i_ino);
701 	if (IS_ERR(ipage)) {
702 		err = PTR_ERR(ipage);
703 		goto out;
704 	}
705 
706 	if (f2fs_has_inline_data(inode)) {
707 		f2fs_truncate_inline_inode(inode, ipage, from);
708 		f2fs_put_page(ipage, 1);
709 		truncate_page = true;
710 		goto out;
711 	}
712 
713 	set_new_dnode(&dn, inode, ipage, NULL, 0);
714 	err = f2fs_get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
715 	if (err) {
716 		if (err == -ENOENT)
717 			goto free_next;
718 		goto out;
719 	}
720 
721 	count = ADDRS_PER_PAGE(dn.node_page, inode);
722 
723 	count -= dn.ofs_in_node;
724 	f2fs_bug_on(sbi, count < 0);
725 
726 	if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
727 		f2fs_truncate_data_blocks_range(&dn, count);
728 		free_from += count;
729 	}
730 
731 	f2fs_put_dnode(&dn);
732 free_next:
733 	err = f2fs_truncate_inode_blocks(inode, free_from);
734 out:
735 	if (lock)
736 		f2fs_unlock_op(sbi);
737 free_partial:
738 	/* lastly zero out the first data page */
739 	if (!err)
740 		err = truncate_partial_data_page(inode, from, truncate_page);
741 
742 	trace_f2fs_truncate_blocks_exit(inode, err);
743 	return err;
744 }
745 
f2fs_truncate_blocks(struct inode * inode,u64 from,bool lock)746 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
747 {
748 	u64 free_from = from;
749 	int err;
750 
751 #ifdef CONFIG_F2FS_FS_COMPRESSION
752 	/*
753 	 * for compressed file, only support cluster size
754 	 * aligned truncation.
755 	 */
756 	if (f2fs_compressed_file(inode))
757 		free_from = round_up(from,
758 				F2FS_I(inode)->i_cluster_size << PAGE_SHIFT);
759 #endif
760 
761 	err = f2fs_do_truncate_blocks(inode, free_from, lock);
762 	if (err)
763 		return err;
764 
765 #ifdef CONFIG_F2FS_FS_COMPRESSION
766 	if (from != free_from) {
767 		err = f2fs_truncate_partial_cluster(inode, from, lock);
768 		if (err)
769 			return err;
770 	}
771 #endif
772 
773 	return 0;
774 }
775 
f2fs_truncate(struct inode * inode)776 int f2fs_truncate(struct inode *inode)
777 {
778 	int err;
779 
780 	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
781 		return -EIO;
782 
783 	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
784 				S_ISLNK(inode->i_mode)))
785 		return 0;
786 
787 	trace_f2fs_truncate(inode);
788 
789 	if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) {
790 		f2fs_show_injection_info(F2FS_I_SB(inode), FAULT_TRUNCATE);
791 		return -EIO;
792 	}
793 
794 	err = dquot_initialize(inode);
795 	if (err)
796 		return err;
797 
798 	/* we should check inline_data size */
799 	if (!f2fs_may_inline_data(inode)) {
800 		err = f2fs_convert_inline_inode(inode);
801 		if (err)
802 			return err;
803 	}
804 
805 	err = f2fs_truncate_blocks(inode, i_size_read(inode), true);
806 	if (err)
807 		return err;
808 
809 	inode->i_mtime = inode->i_ctime = current_time(inode);
810 	f2fs_mark_inode_dirty_sync(inode, false);
811 	return 0;
812 }
813 
f2fs_getattr(const struct path * path,struct kstat * stat,u32 request_mask,unsigned int query_flags)814 int f2fs_getattr(const struct path *path, struct kstat *stat,
815 		 u32 request_mask, unsigned int query_flags)
816 {
817 	struct inode *inode = d_inode(path->dentry);
818 	struct f2fs_inode_info *fi = F2FS_I(inode);
819 	struct f2fs_inode *ri;
820 	unsigned int flags;
821 
822 	if (f2fs_has_extra_attr(inode) &&
823 			f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
824 			F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
825 		stat->result_mask |= STATX_BTIME;
826 		stat->btime.tv_sec = fi->i_crtime.tv_sec;
827 		stat->btime.tv_nsec = fi->i_crtime.tv_nsec;
828 	}
829 
830 	flags = fi->i_flags;
831 	if (flags & F2FS_COMPR_FL)
832 		stat->attributes |= STATX_ATTR_COMPRESSED;
833 	if (flags & F2FS_APPEND_FL)
834 		stat->attributes |= STATX_ATTR_APPEND;
835 	if (IS_ENCRYPTED(inode))
836 		stat->attributes |= STATX_ATTR_ENCRYPTED;
837 	if (flags & F2FS_IMMUTABLE_FL)
838 		stat->attributes |= STATX_ATTR_IMMUTABLE;
839 	if (flags & F2FS_NODUMP_FL)
840 		stat->attributes |= STATX_ATTR_NODUMP;
841 	if (IS_VERITY(inode))
842 		stat->attributes |= STATX_ATTR_VERITY;
843 
844 	stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
845 				  STATX_ATTR_APPEND |
846 				  STATX_ATTR_ENCRYPTED |
847 				  STATX_ATTR_IMMUTABLE |
848 				  STATX_ATTR_NODUMP |
849 				  STATX_ATTR_VERITY);
850 
851 	generic_fillattr(inode, stat);
852 
853 	/* we need to show initial sectors used for inline_data/dentries */
854 	if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) ||
855 					f2fs_has_inline_dentry(inode))
856 		stat->blocks += (stat->size + 511) >> 9;
857 
858 	return 0;
859 }
860 
861 #ifdef CONFIG_F2FS_FS_POSIX_ACL
__setattr_copy(struct inode * inode,const struct iattr * attr)862 static void __setattr_copy(struct inode *inode, const struct iattr *attr)
863 {
864 	unsigned int ia_valid = attr->ia_valid;
865 
866 	if (ia_valid & ATTR_UID)
867 		inode->i_uid = attr->ia_uid;
868 	if (ia_valid & ATTR_GID)
869 		inode->i_gid = attr->ia_gid;
870 	if (ia_valid & ATTR_ATIME)
871 		inode->i_atime = attr->ia_atime;
872 	if (ia_valid & ATTR_MTIME)
873 		inode->i_mtime = attr->ia_mtime;
874 	if (ia_valid & ATTR_CTIME)
875 		inode->i_ctime = attr->ia_ctime;
876 	if (ia_valid & ATTR_MODE) {
877 		umode_t mode = attr->ia_mode;
878 
879 		if (!in_group_p(inode->i_gid) &&
880 			!capable_wrt_inode_uidgid(inode, CAP_FSETID))
881 			mode &= ~S_ISGID;
882 		set_acl_inode(inode, mode);
883 	}
884 }
885 #else
886 #define __setattr_copy setattr_copy
887 #endif
888 
f2fs_setattr(struct dentry * dentry,struct iattr * attr)889 int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
890 {
891 	struct inode *inode = d_inode(dentry);
892 	int err;
893 
894 	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
895 		return -EIO;
896 
897 	if (unlikely(IS_IMMUTABLE(inode)))
898 		return -EPERM;
899 
900 	if (unlikely(IS_APPEND(inode) &&
901 			(attr->ia_valid & (ATTR_MODE | ATTR_UID |
902 				  ATTR_GID | ATTR_TIMES_SET))))
903 		return -EPERM;
904 
905 	if ((attr->ia_valid & ATTR_SIZE) &&
906 		!f2fs_is_compress_backend_ready(inode))
907 		return -EOPNOTSUPP;
908 
909 	err = setattr_prepare(dentry, attr);
910 	if (err)
911 		return err;
912 
913 	err = fscrypt_prepare_setattr(dentry, attr);
914 	if (err)
915 		return err;
916 
917 	err = fsverity_prepare_setattr(dentry, attr);
918 	if (err)
919 		return err;
920 
921 	if (is_quota_modification(inode, attr)) {
922 		err = dquot_initialize(inode);
923 		if (err)
924 			return err;
925 	}
926 	if ((attr->ia_valid & ATTR_UID &&
927 		!uid_eq(attr->ia_uid, inode->i_uid)) ||
928 		(attr->ia_valid & ATTR_GID &&
929 		!gid_eq(attr->ia_gid, inode->i_gid))) {
930 		f2fs_lock_op(F2FS_I_SB(inode));
931 		err = dquot_transfer(inode, attr);
932 		if (err) {
933 			set_sbi_flag(F2FS_I_SB(inode),
934 					SBI_QUOTA_NEED_REPAIR);
935 			f2fs_unlock_op(F2FS_I_SB(inode));
936 			return err;
937 		}
938 		/*
939 		 * update uid/gid under lock_op(), so that dquot and inode can
940 		 * be updated atomically.
941 		 */
942 		if (attr->ia_valid & ATTR_UID)
943 			inode->i_uid = attr->ia_uid;
944 		if (attr->ia_valid & ATTR_GID)
945 			inode->i_gid = attr->ia_gid;
946 		f2fs_mark_inode_dirty_sync(inode, true);
947 		f2fs_unlock_op(F2FS_I_SB(inode));
948 	}
949 
950 	if (attr->ia_valid & ATTR_SIZE) {
951 		loff_t old_size = i_size_read(inode);
952 
953 		if (attr->ia_size > MAX_INLINE_DATA(inode)) {
954 			/*
955 			 * should convert inline inode before i_size_write to
956 			 * keep smaller than inline_data size with inline flag.
957 			 */
958 			err = f2fs_convert_inline_inode(inode);
959 			if (err)
960 				return err;
961 		}
962 
963 		f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
964 		f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
965 
966 		truncate_setsize(inode, attr->ia_size);
967 
968 		if (attr->ia_size <= old_size)
969 			err = f2fs_truncate(inode);
970 		/*
971 		 * do not trim all blocks after i_size if target size is
972 		 * larger than i_size.
973 		 */
974 		f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
975 		f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
976 		if (err)
977 			return err;
978 
979 		spin_lock(&F2FS_I(inode)->i_size_lock);
980 		inode->i_mtime = inode->i_ctime = current_time(inode);
981 		F2FS_I(inode)->last_disk_size = i_size_read(inode);
982 		spin_unlock(&F2FS_I(inode)->i_size_lock);
983 	}
984 
985 	__setattr_copy(inode, attr);
986 
987 	if (attr->ia_valid & ATTR_MODE) {
988 		err = posix_acl_chmod(inode, f2fs_get_inode_mode(inode));
989 
990 		if (is_inode_flag_set(inode, FI_ACL_MODE)) {
991 			if (!err)
992 				inode->i_mode = F2FS_I(inode)->i_acl_mode;
993 			clear_inode_flag(inode, FI_ACL_MODE);
994 		}
995 	}
996 
997 	/* file size may changed here */
998 	f2fs_mark_inode_dirty_sync(inode, true);
999 
1000 	/* inode change will produce dirty node pages flushed by checkpoint */
1001 	f2fs_balance_fs(F2FS_I_SB(inode), true);
1002 
1003 	return err;
1004 }
1005 
1006 const struct inode_operations f2fs_file_inode_operations = {
1007 	.getattr	= f2fs_getattr,
1008 	.setattr	= f2fs_setattr,
1009 	.get_acl	= f2fs_get_acl,
1010 	.set_acl	= f2fs_set_acl,
1011 	.listxattr	= f2fs_listxattr,
1012 	.fiemap		= f2fs_fiemap,
1013 };
1014 
fill_zero(struct inode * inode,pgoff_t index,loff_t start,loff_t len)1015 static int fill_zero(struct inode *inode, pgoff_t index,
1016 					loff_t start, loff_t len)
1017 {
1018 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1019 	struct page *page;
1020 
1021 	if (!len)
1022 		return 0;
1023 
1024 	f2fs_balance_fs(sbi, true);
1025 
1026 	f2fs_lock_op(sbi);
1027 	page = f2fs_get_new_data_page(inode, NULL, index, false);
1028 	f2fs_unlock_op(sbi);
1029 
1030 	if (IS_ERR(page))
1031 		return PTR_ERR(page);
1032 
1033 	f2fs_wait_on_page_writeback(page, DATA, true, true);
1034 	zero_user(page, start, len);
1035 	set_page_dirty(page);
1036 	f2fs_put_page(page, 1);
1037 	return 0;
1038 }
1039 
f2fs_truncate_hole(struct inode * inode,pgoff_t pg_start,pgoff_t pg_end)1040 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
1041 {
1042 	int err;
1043 
1044 	while (pg_start < pg_end) {
1045 		struct dnode_of_data dn;
1046 		pgoff_t end_offset, count;
1047 
1048 		set_new_dnode(&dn, inode, NULL, NULL, 0);
1049 		err = f2fs_get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
1050 		if (err) {
1051 			if (err == -ENOENT) {
1052 				pg_start = f2fs_get_next_page_offset(&dn,
1053 								pg_start);
1054 				continue;
1055 			}
1056 			return err;
1057 		}
1058 
1059 		end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1060 		count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);
1061 
1062 		f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
1063 
1064 		f2fs_truncate_data_blocks_range(&dn, count);
1065 		f2fs_put_dnode(&dn);
1066 
1067 		pg_start += count;
1068 	}
1069 	return 0;
1070 }
1071 
punch_hole(struct inode * inode,loff_t offset,loff_t len)1072 static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
1073 {
1074 	pgoff_t pg_start, pg_end;
1075 	loff_t off_start, off_end;
1076 	int ret;
1077 
1078 	ret = f2fs_convert_inline_inode(inode);
1079 	if (ret)
1080 		return ret;
1081 
1082 	pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1083 	pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1084 
1085 	off_start = offset & (PAGE_SIZE - 1);
1086 	off_end = (offset + len) & (PAGE_SIZE - 1);
1087 
1088 	if (pg_start == pg_end) {
1089 		ret = fill_zero(inode, pg_start, off_start,
1090 						off_end - off_start);
1091 		if (ret)
1092 			return ret;
1093 	} else {
1094 		if (off_start) {
1095 			ret = fill_zero(inode, pg_start++, off_start,
1096 						PAGE_SIZE - off_start);
1097 			if (ret)
1098 				return ret;
1099 		}
1100 		if (off_end) {
1101 			ret = fill_zero(inode, pg_end, 0, off_end);
1102 			if (ret)
1103 				return ret;
1104 		}
1105 
1106 		if (pg_start < pg_end) {
1107 			loff_t blk_start, blk_end;
1108 			struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1109 
1110 			f2fs_balance_fs(sbi, true);
1111 
1112 			blk_start = (loff_t)pg_start << PAGE_SHIFT;
1113 			blk_end = (loff_t)pg_end << PAGE_SHIFT;
1114 
1115 			f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1116 			f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
1117 
1118 			truncate_pagecache_range(inode, blk_start, blk_end - 1);
1119 
1120 			f2fs_lock_op(sbi);
1121 			ret = f2fs_truncate_hole(inode, pg_start, pg_end);
1122 			f2fs_unlock_op(sbi);
1123 
1124 			f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
1125 			f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1126 		}
1127 	}
1128 
1129 	return ret;
1130 }
1131 
__read_out_blkaddrs(struct inode * inode,block_t * blkaddr,int * do_replace,pgoff_t off,pgoff_t len)1132 static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
1133 				int *do_replace, pgoff_t off, pgoff_t len)
1134 {
1135 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1136 	struct dnode_of_data dn;
1137 	int ret, done, i;
1138 
1139 next_dnode:
1140 	set_new_dnode(&dn, inode, NULL, NULL, 0);
1141 	ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
1142 	if (ret && ret != -ENOENT) {
1143 		return ret;
1144 	} else if (ret == -ENOENT) {
1145 		if (dn.max_level == 0)
1146 			return -ENOENT;
1147 		done = min((pgoff_t)ADDRS_PER_BLOCK(inode) -
1148 						dn.ofs_in_node, len);
1149 		blkaddr += done;
1150 		do_replace += done;
1151 		goto next;
1152 	}
1153 
1154 	done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
1155 							dn.ofs_in_node, len);
1156 	for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
1157 		*blkaddr = f2fs_data_blkaddr(&dn);
1158 
1159 		if (__is_valid_data_blkaddr(*blkaddr) &&
1160 			!f2fs_is_valid_blkaddr(sbi, *blkaddr,
1161 					DATA_GENERIC_ENHANCE)) {
1162 			f2fs_put_dnode(&dn);
1163 			return -EFSCORRUPTED;
1164 		}
1165 
1166 		if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) {
1167 
1168 			if (f2fs_lfs_mode(sbi)) {
1169 				f2fs_put_dnode(&dn);
1170 				return -EOPNOTSUPP;
1171 			}
1172 
1173 			/* do not invalidate this block address */
1174 			f2fs_update_data_blkaddr(&dn, NULL_ADDR);
1175 			*do_replace = 1;
1176 		}
1177 	}
1178 	f2fs_put_dnode(&dn);
1179 next:
1180 	len -= done;
1181 	off += done;
1182 	if (len)
1183 		goto next_dnode;
1184 	return 0;
1185 }
1186 
__roll_back_blkaddrs(struct inode * inode,block_t * blkaddr,int * do_replace,pgoff_t off,int len)1187 static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
1188 				int *do_replace, pgoff_t off, int len)
1189 {
1190 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1191 	struct dnode_of_data dn;
1192 	int ret, i;
1193 
1194 	for (i = 0; i < len; i++, do_replace++, blkaddr++) {
1195 		if (*do_replace == 0)
1196 			continue;
1197 
1198 		set_new_dnode(&dn, inode, NULL, NULL, 0);
1199 		ret = f2fs_get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
1200 		if (ret) {
1201 			dec_valid_block_count(sbi, inode, 1);
1202 			f2fs_invalidate_blocks(sbi, *blkaddr);
1203 		} else {
1204 			f2fs_update_data_blkaddr(&dn, *blkaddr);
1205 		}
1206 		f2fs_put_dnode(&dn);
1207 	}
1208 	return 0;
1209 }
1210 
__clone_blkaddrs(struct inode * src_inode,struct inode * dst_inode,block_t * blkaddr,int * do_replace,pgoff_t src,pgoff_t dst,pgoff_t len,bool full)1211 static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
1212 			block_t *blkaddr, int *do_replace,
1213 			pgoff_t src, pgoff_t dst, pgoff_t len, bool full)
1214 {
1215 	struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode);
1216 	pgoff_t i = 0;
1217 	int ret;
1218 
1219 	while (i < len) {
1220 		if (blkaddr[i] == NULL_ADDR && !full) {
1221 			i++;
1222 			continue;
1223 		}
1224 
1225 		if (do_replace[i] || blkaddr[i] == NULL_ADDR) {
1226 			struct dnode_of_data dn;
1227 			struct node_info ni;
1228 			size_t new_size;
1229 			pgoff_t ilen;
1230 
1231 			set_new_dnode(&dn, dst_inode, NULL, NULL, 0);
1232 			ret = f2fs_get_dnode_of_data(&dn, dst + i, ALLOC_NODE);
1233 			if (ret)
1234 				return ret;
1235 
1236 			ret = f2fs_get_node_info(sbi, dn.nid, &ni, false);
1237 			if (ret) {
1238 				f2fs_put_dnode(&dn);
1239 				return ret;
1240 			}
1241 
1242 			ilen = min((pgoff_t)
1243 				ADDRS_PER_PAGE(dn.node_page, dst_inode) -
1244 						dn.ofs_in_node, len - i);
1245 			do {
1246 				dn.data_blkaddr = f2fs_data_blkaddr(&dn);
1247 				f2fs_truncate_data_blocks_range(&dn, 1);
1248 
1249 				if (do_replace[i]) {
1250 					f2fs_i_blocks_write(src_inode,
1251 							1, false, false);
1252 					f2fs_i_blocks_write(dst_inode,
1253 							1, true, false);
1254 					f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
1255 					blkaddr[i], ni.version, true, false);
1256 
1257 					do_replace[i] = 0;
1258 				}
1259 				dn.ofs_in_node++;
1260 				i++;
1261 				new_size = (loff_t)(dst + i) << PAGE_SHIFT;
1262 				if (dst_inode->i_size < new_size)
1263 					f2fs_i_size_write(dst_inode, new_size);
1264 			} while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
1265 
1266 			f2fs_put_dnode(&dn);
1267 		} else {
1268 			struct page *psrc, *pdst;
1269 
1270 			psrc = f2fs_get_lock_data_page(src_inode,
1271 							src + i, true);
1272 			if (IS_ERR(psrc))
1273 				return PTR_ERR(psrc);
1274 			pdst = f2fs_get_new_data_page(dst_inode, NULL, dst + i,
1275 								true);
1276 			if (IS_ERR(pdst)) {
1277 				f2fs_put_page(psrc, 1);
1278 				return PTR_ERR(pdst);
1279 			}
1280 			f2fs_copy_page(psrc, pdst);
1281 			set_page_dirty(pdst);
1282 			f2fs_put_page(pdst, 1);
1283 			f2fs_put_page(psrc, 1);
1284 
1285 			ret = f2fs_truncate_hole(src_inode,
1286 						src + i, src + i + 1);
1287 			if (ret)
1288 				return ret;
1289 			i++;
1290 		}
1291 	}
1292 	return 0;
1293 }
1294 
__exchange_data_block(struct inode * src_inode,struct inode * dst_inode,pgoff_t src,pgoff_t dst,pgoff_t len,bool full)1295 static int __exchange_data_block(struct inode *src_inode,
1296 			struct inode *dst_inode, pgoff_t src, pgoff_t dst,
1297 			pgoff_t len, bool full)
1298 {
1299 	block_t *src_blkaddr;
1300 	int *do_replace;
1301 	pgoff_t olen;
1302 	int ret;
1303 
1304 	while (len) {
1305 		olen = min((pgoff_t)4 * ADDRS_PER_BLOCK(src_inode), len);
1306 
1307 		src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1308 					array_size(olen, sizeof(block_t)),
1309 					GFP_NOFS);
1310 		if (!src_blkaddr)
1311 			return -ENOMEM;
1312 
1313 		do_replace = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1314 					array_size(olen, sizeof(int)),
1315 					GFP_NOFS);
1316 		if (!do_replace) {
1317 			kvfree(src_blkaddr);
1318 			return -ENOMEM;
1319 		}
1320 
1321 		ret = __read_out_blkaddrs(src_inode, src_blkaddr,
1322 					do_replace, src, olen);
1323 		if (ret)
1324 			goto roll_back;
1325 
1326 		ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr,
1327 					do_replace, src, dst, olen, full);
1328 		if (ret)
1329 			goto roll_back;
1330 
1331 		src += olen;
1332 		dst += olen;
1333 		len -= olen;
1334 
1335 		kvfree(src_blkaddr);
1336 		kvfree(do_replace);
1337 	}
1338 	return 0;
1339 
1340 roll_back:
1341 	__roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, olen);
1342 	kvfree(src_blkaddr);
1343 	kvfree(do_replace);
1344 	return ret;
1345 }
1346 
f2fs_do_collapse(struct inode * inode,loff_t offset,loff_t len)1347 static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
1348 {
1349 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1350 	pgoff_t nrpages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1351 	pgoff_t start = offset >> PAGE_SHIFT;
1352 	pgoff_t end = (offset + len) >> PAGE_SHIFT;
1353 	int ret;
1354 
1355 	f2fs_balance_fs(sbi, true);
1356 
1357 	/* avoid gc operation during block exchange */
1358 	f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1359 	f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
1360 
1361 	f2fs_lock_op(sbi);
1362 	f2fs_drop_extent_tree(inode);
1363 	truncate_pagecache(inode, offset);
1364 	ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
1365 	f2fs_unlock_op(sbi);
1366 
1367 	f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
1368 	f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1369 	return ret;
1370 }
1371 
f2fs_collapse_range(struct inode * inode,loff_t offset,loff_t len)1372 static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
1373 {
1374 	loff_t new_size;
1375 	int ret;
1376 
1377 	if (offset + len >= i_size_read(inode))
1378 		return -EINVAL;
1379 
1380 	/* collapse range should be aligned to block size of f2fs. */
1381 	if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1382 		return -EINVAL;
1383 
1384 	ret = f2fs_convert_inline_inode(inode);
1385 	if (ret)
1386 		return ret;
1387 
1388 	/* write out all dirty pages from offset */
1389 	ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1390 	if (ret)
1391 		return ret;
1392 
1393 	ret = f2fs_do_collapse(inode, offset, len);
1394 	if (ret)
1395 		return ret;
1396 
1397 	/* write out all moved pages, if possible */
1398 	f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
1399 	filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1400 	truncate_pagecache(inode, offset);
1401 
1402 	new_size = i_size_read(inode) - len;
1403 	ret = f2fs_truncate_blocks(inode, new_size, true);
1404 	f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
1405 	if (!ret)
1406 		f2fs_i_size_write(inode, new_size);
1407 	return ret;
1408 }
1409 
f2fs_do_zero_range(struct dnode_of_data * dn,pgoff_t start,pgoff_t end)1410 static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
1411 								pgoff_t end)
1412 {
1413 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1414 	pgoff_t index = start;
1415 	unsigned int ofs_in_node = dn->ofs_in_node;
1416 	blkcnt_t count = 0;
1417 	int ret;
1418 
1419 	for (; index < end; index++, dn->ofs_in_node++) {
1420 		if (f2fs_data_blkaddr(dn) == NULL_ADDR)
1421 			count++;
1422 	}
1423 
1424 	dn->ofs_in_node = ofs_in_node;
1425 	ret = f2fs_reserve_new_blocks(dn, count);
1426 	if (ret)
1427 		return ret;
1428 
1429 	dn->ofs_in_node = ofs_in_node;
1430 	for (index = start; index < end; index++, dn->ofs_in_node++) {
1431 		dn->data_blkaddr = f2fs_data_blkaddr(dn);
1432 		/*
1433 		 * f2fs_reserve_new_blocks will not guarantee entire block
1434 		 * allocation.
1435 		 */
1436 		if (dn->data_blkaddr == NULL_ADDR) {
1437 			ret = -ENOSPC;
1438 			break;
1439 		}
1440 
1441 		if (dn->data_blkaddr == NEW_ADDR)
1442 			continue;
1443 
1444 		if (!f2fs_is_valid_blkaddr(sbi, dn->data_blkaddr,
1445 					DATA_GENERIC_ENHANCE)) {
1446 			ret = -EFSCORRUPTED;
1447 			break;
1448 		}
1449 
1450 		f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
1451 		dn->data_blkaddr = NEW_ADDR;
1452 		f2fs_set_data_blkaddr(dn);
1453 	}
1454 
1455 	f2fs_update_read_extent_cache_range(dn, start, 0, index - start);
1456 
1457 	return ret;
1458 }
1459 
f2fs_zero_range(struct inode * inode,loff_t offset,loff_t len,int mode)1460 static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
1461 								int mode)
1462 {
1463 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1464 	struct address_space *mapping = inode->i_mapping;
1465 	pgoff_t index, pg_start, pg_end;
1466 	loff_t new_size = i_size_read(inode);
1467 	loff_t off_start, off_end;
1468 	int ret = 0;
1469 
1470 	ret = inode_newsize_ok(inode, (len + offset));
1471 	if (ret)
1472 		return ret;
1473 
1474 	ret = f2fs_convert_inline_inode(inode);
1475 	if (ret)
1476 		return ret;
1477 
1478 	ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
1479 	if (ret)
1480 		return ret;
1481 
1482 	pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1483 	pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1484 
1485 	off_start = offset & (PAGE_SIZE - 1);
1486 	off_end = (offset + len) & (PAGE_SIZE - 1);
1487 
1488 	if (pg_start == pg_end) {
1489 		ret = fill_zero(inode, pg_start, off_start,
1490 						off_end - off_start);
1491 		if (ret)
1492 			return ret;
1493 
1494 		new_size = max_t(loff_t, new_size, offset + len);
1495 	} else {
1496 		if (off_start) {
1497 			ret = fill_zero(inode, pg_start++, off_start,
1498 						PAGE_SIZE - off_start);
1499 			if (ret)
1500 				return ret;
1501 
1502 			new_size = max_t(loff_t, new_size,
1503 					(loff_t)pg_start << PAGE_SHIFT);
1504 		}
1505 
1506 		for (index = pg_start; index < pg_end;) {
1507 			struct dnode_of_data dn;
1508 			unsigned int end_offset;
1509 			pgoff_t end;
1510 
1511 			f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1512 			f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
1513 
1514 			truncate_pagecache_range(inode,
1515 				(loff_t)index << PAGE_SHIFT,
1516 				((loff_t)pg_end << PAGE_SHIFT) - 1);
1517 
1518 			f2fs_lock_op(sbi);
1519 
1520 			set_new_dnode(&dn, inode, NULL, NULL, 0);
1521 			ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
1522 			if (ret) {
1523 				f2fs_unlock_op(sbi);
1524 				f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
1525 				f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1526 				goto out;
1527 			}
1528 
1529 			end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1530 			end = min(pg_end, end_offset - dn.ofs_in_node + index);
1531 
1532 			ret = f2fs_do_zero_range(&dn, index, end);
1533 			f2fs_put_dnode(&dn);
1534 
1535 			f2fs_unlock_op(sbi);
1536 			f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
1537 			f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1538 
1539 			f2fs_balance_fs(sbi, dn.node_changed);
1540 
1541 			if (ret)
1542 				goto out;
1543 
1544 			index = end;
1545 			new_size = max_t(loff_t, new_size,
1546 					(loff_t)index << PAGE_SHIFT);
1547 		}
1548 
1549 		if (off_end) {
1550 			ret = fill_zero(inode, pg_end, 0, off_end);
1551 			if (ret)
1552 				goto out;
1553 
1554 			new_size = max_t(loff_t, new_size, offset + len);
1555 		}
1556 	}
1557 
1558 out:
1559 	if (new_size > i_size_read(inode)) {
1560 		if (mode & FALLOC_FL_KEEP_SIZE)
1561 			file_set_keep_isize(inode);
1562 		else
1563 			f2fs_i_size_write(inode, new_size);
1564 	}
1565 	return ret;
1566 }
1567 
f2fs_insert_range(struct inode * inode,loff_t offset,loff_t len)1568 static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
1569 {
1570 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1571 	pgoff_t nr, pg_start, pg_end, delta, idx;
1572 	loff_t new_size;
1573 	int ret = 0;
1574 
1575 	new_size = i_size_read(inode) + len;
1576 	ret = inode_newsize_ok(inode, new_size);
1577 	if (ret)
1578 		return ret;
1579 
1580 	if (offset >= i_size_read(inode))
1581 		return -EINVAL;
1582 
1583 	/* insert range should be aligned to block size of f2fs. */
1584 	if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1585 		return -EINVAL;
1586 
1587 	ret = f2fs_convert_inline_inode(inode);
1588 	if (ret)
1589 		return ret;
1590 
1591 	f2fs_balance_fs(sbi, true);
1592 
1593 	f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
1594 	ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
1595 	f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
1596 	if (ret)
1597 		return ret;
1598 
1599 	/* write out all dirty pages from offset */
1600 	ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1601 	if (ret)
1602 		return ret;
1603 
1604 	pg_start = offset >> PAGE_SHIFT;
1605 	pg_end = (offset + len) >> PAGE_SHIFT;
1606 	delta = pg_end - pg_start;
1607 	idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1608 
1609 	/* avoid gc operation during block exchange */
1610 	f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1611 	f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
1612 	truncate_pagecache(inode, offset);
1613 
1614 	while (!ret && idx > pg_start) {
1615 		nr = idx - pg_start;
1616 		if (nr > delta)
1617 			nr = delta;
1618 		idx -= nr;
1619 
1620 		f2fs_lock_op(sbi);
1621 		f2fs_drop_extent_tree(inode);
1622 
1623 		ret = __exchange_data_block(inode, inode, idx,
1624 					idx + delta, nr, false);
1625 		f2fs_unlock_op(sbi);
1626 	}
1627 	f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
1628 	f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1629 
1630 	/* write out all moved pages, if possible */
1631 	f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
1632 	filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1633 	truncate_pagecache(inode, offset);
1634 	f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
1635 
1636 	if (!ret)
1637 		f2fs_i_size_write(inode, new_size);
1638 	return ret;
1639 }
1640 
expand_inode_data(struct inode * inode,loff_t offset,loff_t len,int mode)1641 static int expand_inode_data(struct inode *inode, loff_t offset,
1642 					loff_t len, int mode)
1643 {
1644 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1645 	struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
1646 			.m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
1647 			.m_may_create = true };
1648 	pgoff_t pg_start, pg_end;
1649 	loff_t new_size = i_size_read(inode);
1650 	loff_t off_end;
1651 	block_t expanded = 0;
1652 	int err;
1653 
1654 	err = inode_newsize_ok(inode, (len + offset));
1655 	if (err)
1656 		return err;
1657 
1658 	err = f2fs_convert_inline_inode(inode);
1659 	if (err)
1660 		return err;
1661 
1662 	f2fs_balance_fs(sbi, true);
1663 
1664 	pg_start = ((unsigned long long)offset) >> PAGE_SHIFT;
1665 	pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
1666 	off_end = (offset + len) & (PAGE_SIZE - 1);
1667 
1668 	map.m_lblk = pg_start;
1669 	map.m_len = pg_end - pg_start;
1670 	if (off_end)
1671 		map.m_len++;
1672 
1673 	if (!map.m_len)
1674 		return 0;
1675 
1676 	if (f2fs_is_pinned_file(inode)) {
1677 		block_t sec_blks = BLKS_PER_SEC(sbi);
1678 		block_t sec_len = roundup(map.m_len, sec_blks);
1679 
1680 		map.m_len = sec_blks;
1681 next_alloc:
1682 		if (has_not_enough_free_secs(sbi, 0,
1683 			GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
1684 			f2fs_down_write(&sbi->gc_lock);
1685 			err = f2fs_gc(sbi, true, false, false, NULL_SEGNO);
1686 			if (err && err != -ENODATA && err != -EAGAIN)
1687 				goto out_err;
1688 		}
1689 
1690 		f2fs_down_write(&sbi->pin_sem);
1691 
1692 		f2fs_lock_op(sbi);
1693 		f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
1694 		f2fs_unlock_op(sbi);
1695 
1696 		map.m_seg_type = CURSEG_COLD_DATA_PINNED;
1697 		err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
1698 
1699 		f2fs_up_write(&sbi->pin_sem);
1700 
1701 		expanded += map.m_len;
1702 		sec_len -= map.m_len;
1703 		map.m_lblk += map.m_len;
1704 		if (!err && sec_len)
1705 			goto next_alloc;
1706 
1707 		map.m_len = expanded;
1708 	} else {
1709 		err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
1710 		expanded = map.m_len;
1711 	}
1712 out_err:
1713 	if (err) {
1714 		pgoff_t last_off;
1715 
1716 		if (!expanded)
1717 			return err;
1718 
1719 		last_off = pg_start + expanded - 1;
1720 
1721 		/* update new size to the failed position */
1722 		new_size = (last_off == pg_end) ? offset + len :
1723 					(loff_t)(last_off + 1) << PAGE_SHIFT;
1724 	} else {
1725 		new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end;
1726 	}
1727 
1728 	if (new_size > i_size_read(inode)) {
1729 		if (mode & FALLOC_FL_KEEP_SIZE)
1730 			file_set_keep_isize(inode);
1731 		else
1732 			f2fs_i_size_write(inode, new_size);
1733 	}
1734 
1735 	return err;
1736 }
1737 
f2fs_fallocate(struct file * file,int mode,loff_t offset,loff_t len)1738 static long f2fs_fallocate(struct file *file, int mode,
1739 				loff_t offset, loff_t len)
1740 {
1741 	struct inode *inode = file_inode(file);
1742 	long ret = 0;
1743 
1744 	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
1745 		return -EIO;
1746 	if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
1747 		return -ENOSPC;
1748 	if (!f2fs_is_compress_backend_ready(inode))
1749 		return -EOPNOTSUPP;
1750 
1751 	/* f2fs only support ->fallocate for regular file */
1752 	if (!S_ISREG(inode->i_mode))
1753 		return -EINVAL;
1754 
1755 	if (IS_ENCRYPTED(inode) &&
1756 		(mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
1757 		return -EOPNOTSUPP;
1758 
1759 	if (f2fs_compressed_file(inode) &&
1760 		(mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
1761 			FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE)))
1762 		return -EOPNOTSUPP;
1763 
1764 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
1765 			FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
1766 			FALLOC_FL_INSERT_RANGE))
1767 		return -EOPNOTSUPP;
1768 
1769 	inode_lock(inode);
1770 
1771 	ret = file_modified(file);
1772 	if (ret)
1773 		goto out;
1774 
1775 	if (mode & FALLOC_FL_PUNCH_HOLE) {
1776 		if (offset >= inode->i_size)
1777 			goto out;
1778 
1779 		ret = punch_hole(inode, offset, len);
1780 	} else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
1781 		ret = f2fs_collapse_range(inode, offset, len);
1782 	} else if (mode & FALLOC_FL_ZERO_RANGE) {
1783 		ret = f2fs_zero_range(inode, offset, len, mode);
1784 	} else if (mode & FALLOC_FL_INSERT_RANGE) {
1785 		ret = f2fs_insert_range(inode, offset, len);
1786 	} else {
1787 		ret = expand_inode_data(inode, offset, len, mode);
1788 	}
1789 
1790 	if (!ret) {
1791 		inode->i_mtime = inode->i_ctime = current_time(inode);
1792 		f2fs_mark_inode_dirty_sync(inode, false);
1793 		f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1794 	}
1795 
1796 out:
1797 	inode_unlock(inode);
1798 
1799 	trace_f2fs_fallocate(inode, mode, offset, len, ret);
1800 	return ret;
1801 }
1802 
f2fs_release_file(struct inode * inode,struct file * filp)1803 static int f2fs_release_file(struct inode *inode, struct file *filp)
1804 {
1805 	/*
1806 	 * f2fs_relase_file is called at every close calls. So we should
1807 	 * not drop any inmemory pages by close called by other process.
1808 	 */
1809 	if (!(filp->f_mode & FMODE_WRITE) ||
1810 			atomic_read(&inode->i_writecount) != 1)
1811 		return 0;
1812 
1813 	/* some remained atomic pages should discarded */
1814 	if (f2fs_is_atomic_file(inode))
1815 		f2fs_drop_inmem_pages(inode);
1816 	if (f2fs_is_volatile_file(inode)) {
1817 		set_inode_flag(inode, FI_DROP_CACHE);
1818 		filemap_fdatawrite(inode->i_mapping);
1819 		clear_inode_flag(inode, FI_DROP_CACHE);
1820 		clear_inode_flag(inode, FI_VOLATILE_FILE);
1821 		stat_dec_volatile_write(inode);
1822 	}
1823 	return 0;
1824 }
1825 
f2fs_file_flush(struct file * file,fl_owner_t id)1826 static int f2fs_file_flush(struct file *file, fl_owner_t id)
1827 {
1828 	struct inode *inode = file_inode(file);
1829 
1830 	/*
1831 	 * If the process doing a transaction is crashed, we should do
1832 	 * roll-back. Otherwise, other reader/write can see corrupted database
1833 	 * until all the writers close its file. Since this should be done
1834 	 * before dropping file lock, it needs to do in ->flush.
1835 	 */
1836 	if (f2fs_is_atomic_file(inode) &&
1837 			F2FS_I(inode)->inmem_task == current)
1838 		f2fs_drop_inmem_pages(inode);
1839 	return 0;
1840 }
1841 
f2fs_setflags_common(struct inode * inode,u32 iflags,u32 mask)1842 static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
1843 {
1844 	struct f2fs_inode_info *fi = F2FS_I(inode);
1845 	u32 masked_flags = fi->i_flags & mask;
1846 
1847 	/* mask can be shrunk by flags_valid selector */
1848 	iflags &= mask;
1849 
1850 	/* Is it quota file? Do not allow user to mess with it */
1851 	if (IS_NOQUOTA(inode))
1852 		return -EPERM;
1853 
1854 	if ((iflags ^ masked_flags) & F2FS_CASEFOLD_FL) {
1855 		if (!f2fs_sb_has_casefold(F2FS_I_SB(inode)))
1856 			return -EOPNOTSUPP;
1857 		if (!f2fs_empty_dir(inode))
1858 			return -ENOTEMPTY;
1859 	}
1860 
1861 	if (iflags & (F2FS_COMPR_FL | F2FS_NOCOMP_FL)) {
1862 		if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
1863 			return -EOPNOTSUPP;
1864 		if ((iflags & F2FS_COMPR_FL) && (iflags & F2FS_NOCOMP_FL))
1865 			return -EINVAL;
1866 	}
1867 
1868 	if ((iflags ^ masked_flags) & F2FS_COMPR_FL) {
1869 		if (masked_flags & F2FS_COMPR_FL) {
1870 			if (!f2fs_disable_compressed_file(inode))
1871 				return -EINVAL;
1872 		} else {
1873 			if (!f2fs_may_compress(inode))
1874 				return -EINVAL;
1875 			if (S_ISREG(inode->i_mode) && inode->i_size)
1876 				return -EINVAL;
1877 			if (set_compress_context(inode))
1878 				return -EOPNOTSUPP;
1879 		}
1880 	}
1881 
1882 	fi->i_flags = iflags | (fi->i_flags & ~mask);
1883 	f2fs_bug_on(F2FS_I_SB(inode), (fi->i_flags & F2FS_COMPR_FL) &&
1884 					(fi->i_flags & F2FS_NOCOMP_FL));
1885 
1886 	if (fi->i_flags & F2FS_PROJINHERIT_FL)
1887 		set_inode_flag(inode, FI_PROJ_INHERIT);
1888 	else
1889 		clear_inode_flag(inode, FI_PROJ_INHERIT);
1890 
1891 	inode->i_ctime = current_time(inode);
1892 	f2fs_set_inode_flags(inode);
1893 	f2fs_mark_inode_dirty_sync(inode, true);
1894 	return 0;
1895 }
1896 
1897 /* FS_IOC_GETFLAGS and FS_IOC_SETFLAGS support */
1898 
1899 /*
1900  * To make a new on-disk f2fs i_flag gettable via FS_IOC_GETFLAGS, add an entry
1901  * for it to f2fs_fsflags_map[], and add its FS_*_FL equivalent to
1902  * F2FS_GETTABLE_FS_FL.  To also make it settable via FS_IOC_SETFLAGS, also add
1903  * its FS_*_FL equivalent to F2FS_SETTABLE_FS_FL.
1904  */
1905 
1906 static const struct {
1907 	u32 iflag;
1908 	u32 fsflag;
1909 } f2fs_fsflags_map[] = {
1910 	{ F2FS_COMPR_FL,	FS_COMPR_FL },
1911 	{ F2FS_SYNC_FL,		FS_SYNC_FL },
1912 	{ F2FS_IMMUTABLE_FL,	FS_IMMUTABLE_FL },
1913 	{ F2FS_APPEND_FL,	FS_APPEND_FL },
1914 	{ F2FS_NODUMP_FL,	FS_NODUMP_FL },
1915 	{ F2FS_NOATIME_FL,	FS_NOATIME_FL },
1916 	{ F2FS_NOCOMP_FL,	FS_NOCOMP_FL },
1917 	{ F2FS_INDEX_FL,	FS_INDEX_FL },
1918 	{ F2FS_DIRSYNC_FL,	FS_DIRSYNC_FL },
1919 	{ F2FS_PROJINHERIT_FL,	FS_PROJINHERIT_FL },
1920 	{ F2FS_CASEFOLD_FL,	FS_CASEFOLD_FL },
1921 };
1922 
1923 #define F2FS_GETTABLE_FS_FL (		\
1924 		FS_COMPR_FL |		\
1925 		FS_SYNC_FL |		\
1926 		FS_IMMUTABLE_FL |	\
1927 		FS_APPEND_FL |		\
1928 		FS_NODUMP_FL |		\
1929 		FS_NOATIME_FL |		\
1930 		FS_NOCOMP_FL |		\
1931 		FS_INDEX_FL |		\
1932 		FS_DIRSYNC_FL |		\
1933 		FS_PROJINHERIT_FL |	\
1934 		FS_ENCRYPT_FL |		\
1935 		FS_INLINE_DATA_FL |	\
1936 		FS_NOCOW_FL |		\
1937 		FS_VERITY_FL |		\
1938 		FS_CASEFOLD_FL)
1939 
1940 #define F2FS_SETTABLE_FS_FL (		\
1941 		FS_COMPR_FL |		\
1942 		FS_SYNC_FL |		\
1943 		FS_IMMUTABLE_FL |	\
1944 		FS_APPEND_FL |		\
1945 		FS_NODUMP_FL |		\
1946 		FS_NOATIME_FL |		\
1947 		FS_NOCOMP_FL |		\
1948 		FS_DIRSYNC_FL |		\
1949 		FS_PROJINHERIT_FL |	\
1950 		FS_CASEFOLD_FL)
1951 
1952 /* Convert f2fs on-disk i_flags to FS_IOC_{GET,SET}FLAGS flags */
f2fs_iflags_to_fsflags(u32 iflags)1953 static inline u32 f2fs_iflags_to_fsflags(u32 iflags)
1954 {
1955 	u32 fsflags = 0;
1956 	int i;
1957 
1958 	for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1959 		if (iflags & f2fs_fsflags_map[i].iflag)
1960 			fsflags |= f2fs_fsflags_map[i].fsflag;
1961 
1962 	return fsflags;
1963 }
1964 
1965 /* Convert FS_IOC_{GET,SET}FLAGS flags to f2fs on-disk i_flags */
f2fs_fsflags_to_iflags(u32 fsflags)1966 static inline u32 f2fs_fsflags_to_iflags(u32 fsflags)
1967 {
1968 	u32 iflags = 0;
1969 	int i;
1970 
1971 	for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1972 		if (fsflags & f2fs_fsflags_map[i].fsflag)
1973 			iflags |= f2fs_fsflags_map[i].iflag;
1974 
1975 	return iflags;
1976 }
1977 
f2fs_ioc_getflags(struct file * filp,unsigned long arg)1978 static int f2fs_ioc_getflags(struct file *filp, unsigned long arg)
1979 {
1980 	struct inode *inode = file_inode(filp);
1981 	struct f2fs_inode_info *fi = F2FS_I(inode);
1982 	u32 fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
1983 
1984 	if (IS_ENCRYPTED(inode))
1985 		fsflags |= FS_ENCRYPT_FL;
1986 	if (IS_VERITY(inode))
1987 		fsflags |= FS_VERITY_FL;
1988 	if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
1989 		fsflags |= FS_INLINE_DATA_FL;
1990 	if (is_inode_flag_set(inode, FI_PIN_FILE))
1991 		fsflags |= FS_NOCOW_FL;
1992 
1993 	fsflags &= F2FS_GETTABLE_FS_FL;
1994 
1995 	return put_user(fsflags, (int __user *)arg);
1996 }
1997 
f2fs_ioc_setflags(struct file * filp,unsigned long arg)1998 static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
1999 {
2000 	struct inode *inode = file_inode(filp);
2001 	struct f2fs_inode_info *fi = F2FS_I(inode);
2002 	u32 fsflags, old_fsflags;
2003 	u32 iflags;
2004 	int ret;
2005 
2006 	if (!inode_owner_or_capable(inode))
2007 		return -EACCES;
2008 
2009 	if (get_user(fsflags, (int __user *)arg))
2010 		return -EFAULT;
2011 
2012 	if (fsflags & ~F2FS_GETTABLE_FS_FL)
2013 		return -EOPNOTSUPP;
2014 	fsflags &= F2FS_SETTABLE_FS_FL;
2015 
2016 	iflags = f2fs_fsflags_to_iflags(fsflags);
2017 	if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
2018 		return -EOPNOTSUPP;
2019 
2020 	ret = mnt_want_write_file(filp);
2021 	if (ret)
2022 		return ret;
2023 
2024 	inode_lock(inode);
2025 
2026 	old_fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
2027 	ret = vfs_ioc_setflags_prepare(inode, old_fsflags, fsflags);
2028 	if (ret)
2029 		goto out;
2030 
2031 	ret = f2fs_setflags_common(inode, iflags,
2032 			f2fs_fsflags_to_iflags(F2FS_SETTABLE_FS_FL));
2033 out:
2034 	inode_unlock(inode);
2035 	mnt_drop_write_file(filp);
2036 	return ret;
2037 }
2038 
f2fs_ioc_getversion(struct file * filp,unsigned long arg)2039 static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
2040 {
2041 	struct inode *inode = file_inode(filp);
2042 
2043 	return put_user(inode->i_generation, (int __user *)arg);
2044 }
2045 
f2fs_ioc_start_atomic_write(struct file * filp)2046 static int f2fs_ioc_start_atomic_write(struct file *filp)
2047 {
2048 	struct inode *inode = file_inode(filp);
2049 	struct f2fs_inode_info *fi = F2FS_I(inode);
2050 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2051 	int ret;
2052 
2053 	if (!inode_owner_or_capable(inode))
2054 		return -EACCES;
2055 
2056 	if (!S_ISREG(inode->i_mode))
2057 		return -EINVAL;
2058 
2059 	if (filp->f_flags & O_DIRECT)
2060 		return -EINVAL;
2061 
2062 	ret = mnt_want_write_file(filp);
2063 	if (ret)
2064 		return ret;
2065 
2066 	inode_lock(inode);
2067 
2068 	if (!f2fs_disable_compressed_file(inode)) {
2069 		ret = -EINVAL;
2070 		goto out;
2071 	}
2072 
2073 	if (f2fs_is_atomic_file(inode)) {
2074 		if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST))
2075 			ret = -EINVAL;
2076 		goto out;
2077 	}
2078 
2079 	ret = f2fs_convert_inline_inode(inode);
2080 	if (ret)
2081 		goto out;
2082 
2083 	f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2084 
2085 	/*
2086 	 * Should wait end_io to count F2FS_WB_CP_DATA correctly by
2087 	 * f2fs_is_atomic_file.
2088 	 */
2089 	if (get_dirty_pages(inode))
2090 		f2fs_warn(F2FS_I_SB(inode), "Unexpected flush for atomic writes: ino=%lu, npages=%u",
2091 			  inode->i_ino, get_dirty_pages(inode));
2092 	ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
2093 	if (ret) {
2094 		f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2095 		goto out;
2096 	}
2097 
2098 	spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
2099 	if (list_empty(&fi->inmem_ilist))
2100 		list_add_tail(&fi->inmem_ilist, &sbi->inode_list[ATOMIC_FILE]);
2101 	sbi->atomic_files++;
2102 	spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
2103 
2104 	/* add inode in inmem_list first and set atomic_file */
2105 	set_inode_flag(inode, FI_ATOMIC_FILE);
2106 	clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2107 	f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2108 
2109 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2110 	F2FS_I(inode)->inmem_task = current;
2111 	stat_update_max_atomic_write(inode);
2112 out:
2113 	inode_unlock(inode);
2114 	mnt_drop_write_file(filp);
2115 	return ret;
2116 }
2117 
f2fs_ioc_commit_atomic_write(struct file * filp)2118 static int f2fs_ioc_commit_atomic_write(struct file *filp)
2119 {
2120 	struct inode *inode = file_inode(filp);
2121 	int ret;
2122 
2123 	if (!inode_owner_or_capable(inode))
2124 		return -EACCES;
2125 
2126 	ret = mnt_want_write_file(filp);
2127 	if (ret)
2128 		return ret;
2129 
2130 	f2fs_balance_fs(F2FS_I_SB(inode), true);
2131 
2132 	inode_lock(inode);
2133 
2134 	if (f2fs_is_volatile_file(inode)) {
2135 		ret = -EINVAL;
2136 		goto err_out;
2137 	}
2138 
2139 	if (f2fs_is_atomic_file(inode)) {
2140 		ret = f2fs_commit_inmem_pages(inode);
2141 		if (ret)
2142 			goto err_out;
2143 
2144 		ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2145 		if (!ret)
2146 			f2fs_drop_inmem_pages(inode);
2147 	} else {
2148 		ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
2149 	}
2150 err_out:
2151 	if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
2152 		clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2153 		ret = -EINVAL;
2154 	}
2155 	inode_unlock(inode);
2156 	mnt_drop_write_file(filp);
2157 	return ret;
2158 }
2159 
f2fs_ioc_start_volatile_write(struct file * filp)2160 static int f2fs_ioc_start_volatile_write(struct file *filp)
2161 {
2162 	struct inode *inode = file_inode(filp);
2163 	int ret;
2164 
2165 	if (!inode_owner_or_capable(inode))
2166 		return -EACCES;
2167 
2168 	if (!S_ISREG(inode->i_mode))
2169 		return -EINVAL;
2170 
2171 	ret = mnt_want_write_file(filp);
2172 	if (ret)
2173 		return ret;
2174 
2175 	inode_lock(inode);
2176 
2177 	if (f2fs_is_volatile_file(inode))
2178 		goto out;
2179 
2180 	ret = f2fs_convert_inline_inode(inode);
2181 	if (ret)
2182 		goto out;
2183 
2184 	stat_inc_volatile_write(inode);
2185 	stat_update_max_volatile_write(inode);
2186 
2187 	set_inode_flag(inode, FI_VOLATILE_FILE);
2188 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2189 out:
2190 	inode_unlock(inode);
2191 	mnt_drop_write_file(filp);
2192 	return ret;
2193 }
2194 
f2fs_ioc_release_volatile_write(struct file * filp)2195 static int f2fs_ioc_release_volatile_write(struct file *filp)
2196 {
2197 	struct inode *inode = file_inode(filp);
2198 	int ret;
2199 
2200 	if (!inode_owner_or_capable(inode))
2201 		return -EACCES;
2202 
2203 	ret = mnt_want_write_file(filp);
2204 	if (ret)
2205 		return ret;
2206 
2207 	inode_lock(inode);
2208 
2209 	if (!f2fs_is_volatile_file(inode))
2210 		goto out;
2211 
2212 	if (!f2fs_is_first_block_written(inode)) {
2213 		ret = truncate_partial_data_page(inode, 0, true);
2214 		goto out;
2215 	}
2216 
2217 	ret = punch_hole(inode, 0, F2FS_BLKSIZE);
2218 out:
2219 	inode_unlock(inode);
2220 	mnt_drop_write_file(filp);
2221 	return ret;
2222 }
2223 
f2fs_ioc_abort_volatile_write(struct file * filp)2224 static int f2fs_ioc_abort_volatile_write(struct file *filp)
2225 {
2226 	struct inode *inode = file_inode(filp);
2227 	int ret;
2228 
2229 	if (!inode_owner_or_capable(inode))
2230 		return -EACCES;
2231 
2232 	ret = mnt_want_write_file(filp);
2233 	if (ret)
2234 		return ret;
2235 
2236 	inode_lock(inode);
2237 
2238 	if (f2fs_is_atomic_file(inode))
2239 		f2fs_drop_inmem_pages(inode);
2240 	if (f2fs_is_volatile_file(inode)) {
2241 		clear_inode_flag(inode, FI_VOLATILE_FILE);
2242 		stat_dec_volatile_write(inode);
2243 		ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2244 	}
2245 
2246 	clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2247 
2248 	inode_unlock(inode);
2249 
2250 	mnt_drop_write_file(filp);
2251 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2252 	return ret;
2253 }
2254 
f2fs_ioc_shutdown(struct file * filp,unsigned long arg)2255 static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
2256 {
2257 	struct inode *inode = file_inode(filp);
2258 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2259 	struct super_block *sb = sbi->sb;
2260 	__u32 in;
2261 	int ret = 0;
2262 
2263 	if (!capable(CAP_SYS_ADMIN))
2264 		return -EPERM;
2265 
2266 	if (get_user(in, (__u32 __user *)arg))
2267 		return -EFAULT;
2268 
2269 	if (in != F2FS_GOING_DOWN_FULLSYNC) {
2270 		ret = mnt_want_write_file(filp);
2271 		if (ret) {
2272 			if (ret == -EROFS) {
2273 				ret = 0;
2274 				f2fs_stop_checkpoint(sbi, false,
2275 						STOP_CP_REASON_SHUTDOWN);
2276 				set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2277 				trace_f2fs_shutdown(sbi, in, ret);
2278 			}
2279 			return ret;
2280 		}
2281 	}
2282 
2283 	switch (in) {
2284 	case F2FS_GOING_DOWN_FULLSYNC:
2285 		ret = freeze_bdev(sb->s_bdev);
2286 		if (ret)
2287 			goto out;
2288 		f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
2289 		set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2290 		thaw_bdev(sb->s_bdev);
2291 		break;
2292 	case F2FS_GOING_DOWN_METASYNC:
2293 		/* do checkpoint only */
2294 		ret = f2fs_sync_fs(sb, 1);
2295 		if (ret)
2296 			goto out;
2297 		f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
2298 		set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2299 		break;
2300 	case F2FS_GOING_DOWN_NOSYNC:
2301 		f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
2302 		set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2303 		break;
2304 	case F2FS_GOING_DOWN_METAFLUSH:
2305 		f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
2306 		f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
2307 		set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2308 		break;
2309 	case F2FS_GOING_DOWN_NEED_FSCK:
2310 		set_sbi_flag(sbi, SBI_NEED_FSCK);
2311 		set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
2312 		set_sbi_flag(sbi, SBI_IS_DIRTY);
2313 		/* do checkpoint only */
2314 		ret = f2fs_sync_fs(sb, 1);
2315 		goto out;
2316 	default:
2317 		ret = -EINVAL;
2318 		goto out;
2319 	}
2320 
2321 	f2fs_stop_gc_thread(sbi);
2322 	f2fs_stop_discard_thread(sbi);
2323 
2324 	f2fs_drop_discard_cmd(sbi);
2325 	clear_opt(sbi, DISCARD);
2326 
2327 	f2fs_update_time(sbi, REQ_TIME);
2328 out:
2329 	if (in != F2FS_GOING_DOWN_FULLSYNC)
2330 		mnt_drop_write_file(filp);
2331 
2332 	trace_f2fs_shutdown(sbi, in, ret);
2333 
2334 	return ret;
2335 }
2336 
f2fs_ioc_fitrim(struct file * filp,unsigned long arg)2337 static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
2338 {
2339 	struct inode *inode = file_inode(filp);
2340 	struct super_block *sb = inode->i_sb;
2341 	struct request_queue *q = bdev_get_queue(sb->s_bdev);
2342 	struct fstrim_range range;
2343 	int ret;
2344 
2345 	if (!capable(CAP_SYS_ADMIN))
2346 		return -EPERM;
2347 
2348 	if (!f2fs_hw_support_discard(F2FS_SB(sb)))
2349 		return -EOPNOTSUPP;
2350 
2351 	if (copy_from_user(&range, (struct fstrim_range __user *)arg,
2352 				sizeof(range)))
2353 		return -EFAULT;
2354 
2355 	ret = mnt_want_write_file(filp);
2356 	if (ret)
2357 		return ret;
2358 
2359 	range.minlen = max((unsigned int)range.minlen,
2360 				q->limits.discard_granularity);
2361 	ret = f2fs_trim_fs(F2FS_SB(sb), &range);
2362 	mnt_drop_write_file(filp);
2363 	if (ret < 0)
2364 		return ret;
2365 
2366 	if (copy_to_user((struct fstrim_range __user *)arg, &range,
2367 				sizeof(range)))
2368 		return -EFAULT;
2369 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2370 	return 0;
2371 }
2372 
uuid_is_nonzero(__u8 u[16])2373 static bool uuid_is_nonzero(__u8 u[16])
2374 {
2375 	int i;
2376 
2377 	for (i = 0; i < 16; i++)
2378 		if (u[i])
2379 			return true;
2380 	return false;
2381 }
2382 
f2fs_ioc_set_encryption_policy(struct file * filp,unsigned long arg)2383 static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
2384 {
2385 	struct inode *inode = file_inode(filp);
2386 
2387 	if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode)))
2388 		return -EOPNOTSUPP;
2389 
2390 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2391 
2392 	return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
2393 }
2394 
f2fs_ioc_get_encryption_policy(struct file * filp,unsigned long arg)2395 static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
2396 {
2397 	if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2398 		return -EOPNOTSUPP;
2399 	return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
2400 }
2401 
f2fs_ioc_get_encryption_pwsalt(struct file * filp,unsigned long arg)2402 static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
2403 {
2404 	struct inode *inode = file_inode(filp);
2405 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2406 	int err;
2407 
2408 	if (!f2fs_sb_has_encrypt(sbi))
2409 		return -EOPNOTSUPP;
2410 
2411 	err = mnt_want_write_file(filp);
2412 	if (err)
2413 		return err;
2414 
2415 	f2fs_down_write(&sbi->sb_lock);
2416 
2417 	if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
2418 		goto got_it;
2419 
2420 	/* update superblock with uuid */
2421 	generate_random_uuid(sbi->raw_super->encrypt_pw_salt);
2422 
2423 	err = f2fs_commit_super(sbi, false);
2424 	if (err) {
2425 		/* undo new data */
2426 		memset(sbi->raw_super->encrypt_pw_salt, 0, 16);
2427 		goto out_err;
2428 	}
2429 got_it:
2430 	if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt,
2431 									16))
2432 		err = -EFAULT;
2433 out_err:
2434 	f2fs_up_write(&sbi->sb_lock);
2435 	mnt_drop_write_file(filp);
2436 	return err;
2437 }
2438 
f2fs_ioc_get_encryption_policy_ex(struct file * filp,unsigned long arg)2439 static int f2fs_ioc_get_encryption_policy_ex(struct file *filp,
2440 					     unsigned long arg)
2441 {
2442 	if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2443 		return -EOPNOTSUPP;
2444 
2445 	return fscrypt_ioctl_get_policy_ex(filp, (void __user *)arg);
2446 }
2447 
f2fs_ioc_add_encryption_key(struct file * filp,unsigned long arg)2448 static int f2fs_ioc_add_encryption_key(struct file *filp, unsigned long arg)
2449 {
2450 	if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2451 		return -EOPNOTSUPP;
2452 
2453 	return fscrypt_ioctl_add_key(filp, (void __user *)arg);
2454 }
2455 
f2fs_ioc_remove_encryption_key(struct file * filp,unsigned long arg)2456 static int f2fs_ioc_remove_encryption_key(struct file *filp, unsigned long arg)
2457 {
2458 	if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2459 		return -EOPNOTSUPP;
2460 
2461 	return fscrypt_ioctl_remove_key(filp, (void __user *)arg);
2462 }
2463 
f2fs_ioc_remove_encryption_key_all_users(struct file * filp,unsigned long arg)2464 static int f2fs_ioc_remove_encryption_key_all_users(struct file *filp,
2465 						    unsigned long arg)
2466 {
2467 	if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2468 		return -EOPNOTSUPP;
2469 
2470 	return fscrypt_ioctl_remove_key_all_users(filp, (void __user *)arg);
2471 }
2472 
f2fs_ioc_get_encryption_key_status(struct file * filp,unsigned long arg)2473 static int f2fs_ioc_get_encryption_key_status(struct file *filp,
2474 					      unsigned long arg)
2475 {
2476 	if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2477 		return -EOPNOTSUPP;
2478 
2479 	return fscrypt_ioctl_get_key_status(filp, (void __user *)arg);
2480 }
2481 
f2fs_ioc_get_encryption_nonce(struct file * filp,unsigned long arg)2482 static int f2fs_ioc_get_encryption_nonce(struct file *filp, unsigned long arg)
2483 {
2484 	if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2485 		return -EOPNOTSUPP;
2486 
2487 	return fscrypt_ioctl_get_nonce(filp, (void __user *)arg);
2488 }
2489 
f2fs_ioc_gc(struct file * filp,unsigned long arg)2490 static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
2491 {
2492 	struct inode *inode = file_inode(filp);
2493 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2494 	__u32 sync;
2495 	int ret;
2496 
2497 	if (!capable(CAP_SYS_ADMIN))
2498 		return -EPERM;
2499 
2500 	if (get_user(sync, (__u32 __user *)arg))
2501 		return -EFAULT;
2502 
2503 	if (f2fs_readonly(sbi->sb))
2504 		return -EROFS;
2505 
2506 	ret = mnt_want_write_file(filp);
2507 	if (ret)
2508 		return ret;
2509 
2510 	if (!sync) {
2511 		if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
2512 			ret = -EBUSY;
2513 			goto out;
2514 		}
2515 	} else {
2516 		f2fs_down_write(&sbi->gc_lock);
2517 	}
2518 
2519 	ret = f2fs_gc(sbi, sync, true, false, NULL_SEGNO);
2520 out:
2521 	mnt_drop_write_file(filp);
2522 	return ret;
2523 }
2524 
__f2fs_ioc_gc_range(struct file * filp,struct f2fs_gc_range * range)2525 static int __f2fs_ioc_gc_range(struct file *filp, struct f2fs_gc_range *range)
2526 {
2527 	struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
2528 	u64 end;
2529 	int ret;
2530 
2531 	if (!capable(CAP_SYS_ADMIN))
2532 		return -EPERM;
2533 	if (f2fs_readonly(sbi->sb))
2534 		return -EROFS;
2535 
2536 	end = range->start + range->len;
2537 	if (end < range->start || range->start < MAIN_BLKADDR(sbi) ||
2538 					end >= MAX_BLKADDR(sbi))
2539 		return -EINVAL;
2540 
2541 	ret = mnt_want_write_file(filp);
2542 	if (ret)
2543 		return ret;
2544 
2545 do_more:
2546 	if (!range->sync) {
2547 		if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
2548 			ret = -EBUSY;
2549 			goto out;
2550 		}
2551 	} else {
2552 		f2fs_down_write(&sbi->gc_lock);
2553 	}
2554 
2555 	ret = f2fs_gc(sbi, range->sync, true, false,
2556 				GET_SEGNO(sbi, range->start));
2557 	if (ret) {
2558 		if (ret == -EBUSY)
2559 			ret = -EAGAIN;
2560 		goto out;
2561 	}
2562 	range->start += BLKS_PER_SEC(sbi);
2563 	if (range->start <= end)
2564 		goto do_more;
2565 out:
2566 	mnt_drop_write_file(filp);
2567 	return ret;
2568 }
2569 
f2fs_ioc_gc_range(struct file * filp,unsigned long arg)2570 static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
2571 {
2572 	struct f2fs_gc_range range;
2573 
2574 	if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg,
2575 							sizeof(range)))
2576 		return -EFAULT;
2577 	return __f2fs_ioc_gc_range(filp, &range);
2578 }
2579 
f2fs_ioc_write_checkpoint(struct file * filp,unsigned long arg)2580 static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
2581 {
2582 	struct inode *inode = file_inode(filp);
2583 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2584 	int ret;
2585 
2586 	if (!capable(CAP_SYS_ADMIN))
2587 		return -EPERM;
2588 
2589 	if (f2fs_readonly(sbi->sb))
2590 		return -EROFS;
2591 
2592 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2593 		f2fs_info(sbi, "Skipping Checkpoint. Checkpoints currently disabled.");
2594 		return -EINVAL;
2595 	}
2596 
2597 	ret = mnt_want_write_file(filp);
2598 	if (ret)
2599 		return ret;
2600 
2601 	ret = f2fs_sync_fs(sbi->sb, 1);
2602 
2603 	mnt_drop_write_file(filp);
2604 	return ret;
2605 }
2606 
f2fs_defragment_range(struct f2fs_sb_info * sbi,struct file * filp,struct f2fs_defragment * range)2607 static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
2608 					struct file *filp,
2609 					struct f2fs_defragment *range)
2610 {
2611 	struct inode *inode = file_inode(filp);
2612 	struct f2fs_map_blocks map = { .m_next_extent = NULL,
2613 					.m_seg_type = NO_CHECK_TYPE,
2614 					.m_may_create = false };
2615 	struct extent_info ei = {};
2616 	pgoff_t pg_start, pg_end, next_pgofs;
2617 	unsigned int blk_per_seg = sbi->blocks_per_seg;
2618 	unsigned int total = 0, sec_num;
2619 	block_t blk_end = 0;
2620 	bool fragmented = false;
2621 	int err;
2622 
2623 	pg_start = range->start >> PAGE_SHIFT;
2624 	pg_end = (range->start + range->len) >> PAGE_SHIFT;
2625 
2626 	f2fs_balance_fs(sbi, true);
2627 
2628 	inode_lock(inode);
2629 
2630 	/* if in-place-update policy is enabled, don't waste time here */
2631 	set_inode_flag(inode, FI_OPU_WRITE);
2632 	if (f2fs_should_update_inplace(inode, NULL)) {
2633 		err = -EINVAL;
2634 		goto out;
2635 	}
2636 
2637 	/* writeback all dirty pages in the range */
2638 	err = filemap_write_and_wait_range(inode->i_mapping, range->start,
2639 						range->start + range->len - 1);
2640 	if (err)
2641 		goto out;
2642 
2643 	/*
2644 	 * lookup mapping info in extent cache, skip defragmenting if physical
2645 	 * block addresses are continuous.
2646 	 */
2647 	if (f2fs_lookup_read_extent_cache(inode, pg_start, &ei)) {
2648 		if (ei.fofs + ei.len >= pg_end)
2649 			goto out;
2650 	}
2651 
2652 	map.m_lblk = pg_start;
2653 	map.m_next_pgofs = &next_pgofs;
2654 
2655 	/*
2656 	 * lookup mapping info in dnode page cache, skip defragmenting if all
2657 	 * physical block addresses are continuous even if there are hole(s)
2658 	 * in logical blocks.
2659 	 */
2660 	while (map.m_lblk < pg_end) {
2661 		map.m_len = pg_end - map.m_lblk;
2662 		err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2663 		if (err)
2664 			goto out;
2665 
2666 		if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2667 			map.m_lblk = next_pgofs;
2668 			continue;
2669 		}
2670 
2671 		if (blk_end && blk_end != map.m_pblk)
2672 			fragmented = true;
2673 
2674 		/* record total count of block that we're going to move */
2675 		total += map.m_len;
2676 
2677 		blk_end = map.m_pblk + map.m_len;
2678 
2679 		map.m_lblk += map.m_len;
2680 	}
2681 
2682 	if (!fragmented) {
2683 		total = 0;
2684 		goto out;
2685 	}
2686 
2687 	sec_num = DIV_ROUND_UP(total, BLKS_PER_SEC(sbi));
2688 
2689 	/*
2690 	 * make sure there are enough free section for LFS allocation, this can
2691 	 * avoid defragment running in SSR mode when free section are allocated
2692 	 * intensively
2693 	 */
2694 	if (has_not_enough_free_secs(sbi, 0, sec_num)) {
2695 		err = -EAGAIN;
2696 		goto out;
2697 	}
2698 
2699 	map.m_lblk = pg_start;
2700 	map.m_len = pg_end - pg_start;
2701 	total = 0;
2702 
2703 	while (map.m_lblk < pg_end) {
2704 		pgoff_t idx;
2705 		int cnt = 0;
2706 
2707 do_map:
2708 		map.m_len = pg_end - map.m_lblk;
2709 		err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2710 		if (err)
2711 			goto clear_out;
2712 
2713 		if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2714 			map.m_lblk = next_pgofs;
2715 			goto check;
2716 		}
2717 
2718 		set_inode_flag(inode, FI_SKIP_WRITES);
2719 
2720 		idx = map.m_lblk;
2721 		while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
2722 			struct page *page;
2723 
2724 			page = f2fs_get_lock_data_page(inode, idx, true);
2725 			if (IS_ERR(page)) {
2726 				err = PTR_ERR(page);
2727 				goto clear_out;
2728 			}
2729 
2730 			set_page_dirty(page);
2731 			f2fs_put_page(page, 1);
2732 
2733 			idx++;
2734 			cnt++;
2735 			total++;
2736 		}
2737 
2738 		map.m_lblk = idx;
2739 check:
2740 		if (map.m_lblk < pg_end && cnt < blk_per_seg)
2741 			goto do_map;
2742 
2743 		clear_inode_flag(inode, FI_SKIP_WRITES);
2744 
2745 		err = filemap_fdatawrite(inode->i_mapping);
2746 		if (err)
2747 			goto out;
2748 	}
2749 clear_out:
2750 	clear_inode_flag(inode, FI_SKIP_WRITES);
2751 out:
2752 	clear_inode_flag(inode, FI_OPU_WRITE);
2753 	inode_unlock(inode);
2754 	if (!err)
2755 		range->len = (u64)total << PAGE_SHIFT;
2756 	return err;
2757 }
2758 
f2fs_ioc_defragment(struct file * filp,unsigned long arg)2759 static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
2760 {
2761 	struct inode *inode = file_inode(filp);
2762 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2763 	struct f2fs_defragment range;
2764 	int err;
2765 
2766 	if (!capable(CAP_SYS_ADMIN))
2767 		return -EPERM;
2768 
2769 	if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode))
2770 		return -EINVAL;
2771 
2772 	if (f2fs_readonly(sbi->sb))
2773 		return -EROFS;
2774 
2775 	if (copy_from_user(&range, (struct f2fs_defragment __user *)arg,
2776 							sizeof(range)))
2777 		return -EFAULT;
2778 
2779 	/* verify alignment of offset & size */
2780 	if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1))
2781 		return -EINVAL;
2782 
2783 	if (unlikely((range.start + range.len) >> PAGE_SHIFT >
2784 					max_file_blocks(inode)))
2785 		return -EINVAL;
2786 
2787 	err = mnt_want_write_file(filp);
2788 	if (err)
2789 		return err;
2790 
2791 	err = f2fs_defragment_range(sbi, filp, &range);
2792 	mnt_drop_write_file(filp);
2793 
2794 	f2fs_update_time(sbi, REQ_TIME);
2795 	if (err < 0)
2796 		return err;
2797 
2798 	if (copy_to_user((struct f2fs_defragment __user *)arg, &range,
2799 							sizeof(range)))
2800 		return -EFAULT;
2801 
2802 	return 0;
2803 }
2804 
f2fs_move_file_range(struct file * file_in,loff_t pos_in,struct file * file_out,loff_t pos_out,size_t len)2805 static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
2806 			struct file *file_out, loff_t pos_out, size_t len)
2807 {
2808 	struct inode *src = file_inode(file_in);
2809 	struct inode *dst = file_inode(file_out);
2810 	struct f2fs_sb_info *sbi = F2FS_I_SB(src);
2811 	size_t olen = len, dst_max_i_size = 0;
2812 	size_t dst_osize;
2813 	int ret;
2814 
2815 	if (file_in->f_path.mnt != file_out->f_path.mnt ||
2816 				src->i_sb != dst->i_sb)
2817 		return -EXDEV;
2818 
2819 	if (unlikely(f2fs_readonly(src->i_sb)))
2820 		return -EROFS;
2821 
2822 	if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
2823 		return -EINVAL;
2824 
2825 	if (IS_ENCRYPTED(src) || IS_ENCRYPTED(dst))
2826 		return -EOPNOTSUPP;
2827 
2828 	if (pos_out < 0 || pos_in < 0)
2829 		return -EINVAL;
2830 
2831 	if (src == dst) {
2832 		if (pos_in == pos_out)
2833 			return 0;
2834 		if (pos_out > pos_in && pos_out < pos_in + len)
2835 			return -EINVAL;
2836 	}
2837 
2838 	inode_lock(src);
2839 	if (src != dst) {
2840 		ret = -EBUSY;
2841 		if (!inode_trylock(dst))
2842 			goto out;
2843 	}
2844 
2845 	ret = -EINVAL;
2846 	if (pos_in + len > src->i_size || pos_in + len < pos_in)
2847 		goto out_unlock;
2848 	if (len == 0)
2849 		olen = len = src->i_size - pos_in;
2850 	if (pos_in + len == src->i_size)
2851 		len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in;
2852 	if (len == 0) {
2853 		ret = 0;
2854 		goto out_unlock;
2855 	}
2856 
2857 	dst_osize = dst->i_size;
2858 	if (pos_out + olen > dst->i_size)
2859 		dst_max_i_size = pos_out + olen;
2860 
2861 	/* verify the end result is block aligned */
2862 	if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) ||
2863 			!IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) ||
2864 			!IS_ALIGNED(pos_out, F2FS_BLKSIZE))
2865 		goto out_unlock;
2866 
2867 	ret = f2fs_convert_inline_inode(src);
2868 	if (ret)
2869 		goto out_unlock;
2870 
2871 	ret = f2fs_convert_inline_inode(dst);
2872 	if (ret)
2873 		goto out_unlock;
2874 
2875 	/* write out all dirty pages from offset */
2876 	ret = filemap_write_and_wait_range(src->i_mapping,
2877 					pos_in, pos_in + len);
2878 	if (ret)
2879 		goto out_unlock;
2880 
2881 	ret = filemap_write_and_wait_range(dst->i_mapping,
2882 					pos_out, pos_out + len);
2883 	if (ret)
2884 		goto out_unlock;
2885 
2886 	f2fs_balance_fs(sbi, true);
2887 
2888 	f2fs_down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2889 	if (src != dst) {
2890 		ret = -EBUSY;
2891 		if (!f2fs_down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
2892 			goto out_src;
2893 	}
2894 
2895 	f2fs_lock_op(sbi);
2896 	ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
2897 				pos_out >> F2FS_BLKSIZE_BITS,
2898 				len >> F2FS_BLKSIZE_BITS, false);
2899 
2900 	if (!ret) {
2901 		if (dst_max_i_size)
2902 			f2fs_i_size_write(dst, dst_max_i_size);
2903 		else if (dst_osize != dst->i_size)
2904 			f2fs_i_size_write(dst, dst_osize);
2905 	}
2906 	f2fs_unlock_op(sbi);
2907 
2908 	if (src != dst)
2909 		f2fs_up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
2910 out_src:
2911 	f2fs_up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2912 out_unlock:
2913 	if (src != dst)
2914 		inode_unlock(dst);
2915 out:
2916 	inode_unlock(src);
2917 	return ret;
2918 }
2919 
__f2fs_ioc_move_range(struct file * filp,struct f2fs_move_range * range)2920 static int __f2fs_ioc_move_range(struct file *filp,
2921 				struct f2fs_move_range *range)
2922 {
2923 	struct fd dst;
2924 	int err;
2925 
2926 	if (!(filp->f_mode & FMODE_READ) ||
2927 			!(filp->f_mode & FMODE_WRITE))
2928 		return -EBADF;
2929 
2930 	dst = fdget(range->dst_fd);
2931 	if (!dst.file)
2932 		return -EBADF;
2933 
2934 	if (!(dst.file->f_mode & FMODE_WRITE)) {
2935 		err = -EBADF;
2936 		goto err_out;
2937 	}
2938 
2939 	err = mnt_want_write_file(filp);
2940 	if (err)
2941 		goto err_out;
2942 
2943 	err = f2fs_move_file_range(filp, range->pos_in, dst.file,
2944 					range->pos_out, range->len);
2945 
2946 	mnt_drop_write_file(filp);
2947 err_out:
2948 	fdput(dst);
2949 	return err;
2950 }
2951 
f2fs_ioc_move_range(struct file * filp,unsigned long arg)2952 static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
2953 {
2954 	struct f2fs_move_range range;
2955 
2956 	if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
2957 							sizeof(range)))
2958 		return -EFAULT;
2959 	return __f2fs_ioc_move_range(filp, &range);
2960 }
2961 
f2fs_ioc_flush_device(struct file * filp,unsigned long arg)2962 static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
2963 {
2964 	struct inode *inode = file_inode(filp);
2965 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2966 	struct sit_info *sm = SIT_I(sbi);
2967 	unsigned int start_segno = 0, end_segno = 0;
2968 	unsigned int dev_start_segno = 0, dev_end_segno = 0;
2969 	struct f2fs_flush_device range;
2970 	int ret;
2971 
2972 	if (!capable(CAP_SYS_ADMIN))
2973 		return -EPERM;
2974 
2975 	if (f2fs_readonly(sbi->sb))
2976 		return -EROFS;
2977 
2978 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2979 		return -EINVAL;
2980 
2981 	if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg,
2982 							sizeof(range)))
2983 		return -EFAULT;
2984 
2985 	if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num ||
2986 			__is_large_section(sbi)) {
2987 		f2fs_warn(sbi, "Can't flush %u in %d for segs_per_sec %u != 1",
2988 			  range.dev_num, sbi->s_ndevs, sbi->segs_per_sec);
2989 		return -EINVAL;
2990 	}
2991 
2992 	ret = mnt_want_write_file(filp);
2993 	if (ret)
2994 		return ret;
2995 
2996 	if (range.dev_num != 0)
2997 		dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk);
2998 	dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk);
2999 
3000 	start_segno = sm->last_victim[FLUSH_DEVICE];
3001 	if (start_segno < dev_start_segno || start_segno >= dev_end_segno)
3002 		start_segno = dev_start_segno;
3003 	end_segno = min(start_segno + range.segments, dev_end_segno);
3004 
3005 	while (start_segno < end_segno) {
3006 		if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
3007 			ret = -EBUSY;
3008 			goto out;
3009 		}
3010 		sm->last_victim[GC_CB] = end_segno + 1;
3011 		sm->last_victim[GC_GREEDY] = end_segno + 1;
3012 		sm->last_victim[ALLOC_NEXT] = end_segno + 1;
3013 		ret = f2fs_gc(sbi, true, true, true, start_segno);
3014 		if (ret == -EAGAIN)
3015 			ret = 0;
3016 		else if (ret < 0)
3017 			break;
3018 		start_segno++;
3019 	}
3020 out:
3021 	mnt_drop_write_file(filp);
3022 	return ret;
3023 }
3024 
f2fs_ioc_get_features(struct file * filp,unsigned long arg)3025 static int f2fs_ioc_get_features(struct file *filp, unsigned long arg)
3026 {
3027 	struct inode *inode = file_inode(filp);
3028 	u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature);
3029 
3030 	/* Must validate to set it with SQLite behavior in Android. */
3031 	sb_feature |= F2FS_FEATURE_ATOMIC_WRITE;
3032 
3033 	return put_user(sb_feature, (u32 __user *)arg);
3034 }
3035 
3036 #ifdef CONFIG_QUOTA
f2fs_transfer_project_quota(struct inode * inode,kprojid_t kprojid)3037 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
3038 {
3039 	struct dquot *transfer_to[MAXQUOTAS] = {};
3040 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3041 	struct super_block *sb = sbi->sb;
3042 	int err = 0;
3043 
3044 	transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
3045 	if (!IS_ERR(transfer_to[PRJQUOTA])) {
3046 		err = __dquot_transfer(inode, transfer_to);
3047 		if (err)
3048 			set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3049 		dqput(transfer_to[PRJQUOTA]);
3050 	}
3051 	return err;
3052 }
3053 
f2fs_ioc_setproject(struct file * filp,__u32 projid)3054 static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
3055 {
3056 	struct inode *inode = file_inode(filp);
3057 	struct f2fs_inode_info *fi = F2FS_I(inode);
3058 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3059 	struct page *ipage;
3060 	kprojid_t kprojid;
3061 	int err;
3062 
3063 	if (!f2fs_sb_has_project_quota(sbi)) {
3064 		if (projid != F2FS_DEF_PROJID)
3065 			return -EOPNOTSUPP;
3066 		else
3067 			return 0;
3068 	}
3069 
3070 	if (!f2fs_has_extra_attr(inode))
3071 		return -EOPNOTSUPP;
3072 
3073 	kprojid = make_kprojid(&init_user_ns, (projid_t)projid);
3074 
3075 	if (projid_eq(kprojid, F2FS_I(inode)->i_projid))
3076 		return 0;
3077 
3078 	err = -EPERM;
3079 	/* Is it quota file? Do not allow user to mess with it */
3080 	if (IS_NOQUOTA(inode))
3081 		return err;
3082 
3083 	ipage = f2fs_get_node_page(sbi, inode->i_ino);
3084 	if (IS_ERR(ipage))
3085 		return PTR_ERR(ipage);
3086 
3087 	if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage), fi->i_extra_isize,
3088 								i_projid)) {
3089 		err = -EOVERFLOW;
3090 		f2fs_put_page(ipage, 1);
3091 		return err;
3092 	}
3093 	f2fs_put_page(ipage, 1);
3094 
3095 	err = dquot_initialize(inode);
3096 	if (err)
3097 		return err;
3098 
3099 	f2fs_lock_op(sbi);
3100 	err = f2fs_transfer_project_quota(inode, kprojid);
3101 	if (err)
3102 		goto out_unlock;
3103 
3104 	F2FS_I(inode)->i_projid = kprojid;
3105 	inode->i_ctime = current_time(inode);
3106 	f2fs_mark_inode_dirty_sync(inode, true);
3107 out_unlock:
3108 	f2fs_unlock_op(sbi);
3109 	return err;
3110 }
3111 #else
f2fs_transfer_project_quota(struct inode * inode,kprojid_t kprojid)3112 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
3113 {
3114 	return 0;
3115 }
3116 
f2fs_ioc_setproject(struct file * filp,__u32 projid)3117 static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
3118 {
3119 	if (projid != F2FS_DEF_PROJID)
3120 		return -EOPNOTSUPP;
3121 	return 0;
3122 }
3123 #endif
3124 
3125 /* FS_IOC_FSGETXATTR and FS_IOC_FSSETXATTR support */
3126 
3127 /*
3128  * To make a new on-disk f2fs i_flag gettable via FS_IOC_FSGETXATTR and settable
3129  * via FS_IOC_FSSETXATTR, add an entry for it to f2fs_xflags_map[], and add its
3130  * FS_XFLAG_* equivalent to F2FS_SUPPORTED_XFLAGS.
3131  */
3132 
3133 static const struct {
3134 	u32 iflag;
3135 	u32 xflag;
3136 } f2fs_xflags_map[] = {
3137 	{ F2FS_SYNC_FL,		FS_XFLAG_SYNC },
3138 	{ F2FS_IMMUTABLE_FL,	FS_XFLAG_IMMUTABLE },
3139 	{ F2FS_APPEND_FL,	FS_XFLAG_APPEND },
3140 	{ F2FS_NODUMP_FL,	FS_XFLAG_NODUMP },
3141 	{ F2FS_NOATIME_FL,	FS_XFLAG_NOATIME },
3142 	{ F2FS_PROJINHERIT_FL,	FS_XFLAG_PROJINHERIT },
3143 };
3144 
3145 #define F2FS_SUPPORTED_XFLAGS (		\
3146 		FS_XFLAG_SYNC |		\
3147 		FS_XFLAG_IMMUTABLE |	\
3148 		FS_XFLAG_APPEND |	\
3149 		FS_XFLAG_NODUMP |	\
3150 		FS_XFLAG_NOATIME |	\
3151 		FS_XFLAG_PROJINHERIT)
3152 
3153 /* Convert f2fs on-disk i_flags to FS_IOC_FS{GET,SET}XATTR flags */
f2fs_iflags_to_xflags(u32 iflags)3154 static inline u32 f2fs_iflags_to_xflags(u32 iflags)
3155 {
3156 	u32 xflags = 0;
3157 	int i;
3158 
3159 	for (i = 0; i < ARRAY_SIZE(f2fs_xflags_map); i++)
3160 		if (iflags & f2fs_xflags_map[i].iflag)
3161 			xflags |= f2fs_xflags_map[i].xflag;
3162 
3163 	return xflags;
3164 }
3165 
3166 /* Convert FS_IOC_FS{GET,SET}XATTR flags to f2fs on-disk i_flags */
f2fs_xflags_to_iflags(u32 xflags)3167 static inline u32 f2fs_xflags_to_iflags(u32 xflags)
3168 {
3169 	u32 iflags = 0;
3170 	int i;
3171 
3172 	for (i = 0; i < ARRAY_SIZE(f2fs_xflags_map); i++)
3173 		if (xflags & f2fs_xflags_map[i].xflag)
3174 			iflags |= f2fs_xflags_map[i].iflag;
3175 
3176 	return iflags;
3177 }
3178 
f2fs_fill_fsxattr(struct inode * inode,struct fsxattr * fa)3179 static void f2fs_fill_fsxattr(struct inode *inode, struct fsxattr *fa)
3180 {
3181 	struct f2fs_inode_info *fi = F2FS_I(inode);
3182 
3183 	simple_fill_fsxattr(fa, f2fs_iflags_to_xflags(fi->i_flags));
3184 
3185 	if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)))
3186 		fa->fsx_projid = from_kprojid(&init_user_ns, fi->i_projid);
3187 }
3188 
f2fs_ioc_fsgetxattr(struct file * filp,unsigned long arg)3189 static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg)
3190 {
3191 	struct inode *inode = file_inode(filp);
3192 	struct fsxattr fa;
3193 
3194 	f2fs_fill_fsxattr(inode, &fa);
3195 
3196 	if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa)))
3197 		return -EFAULT;
3198 	return 0;
3199 }
3200 
f2fs_ioc_fssetxattr(struct file * filp,unsigned long arg)3201 static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg)
3202 {
3203 	struct inode *inode = file_inode(filp);
3204 	struct fsxattr fa, old_fa;
3205 	u32 iflags;
3206 	int err;
3207 
3208 	if (copy_from_user(&fa, (struct fsxattr __user *)arg, sizeof(fa)))
3209 		return -EFAULT;
3210 
3211 	/* Make sure caller has proper permission */
3212 	if (!inode_owner_or_capable(inode))
3213 		return -EACCES;
3214 
3215 	if (fa.fsx_xflags & ~F2FS_SUPPORTED_XFLAGS)
3216 		return -EOPNOTSUPP;
3217 
3218 	iflags = f2fs_xflags_to_iflags(fa.fsx_xflags);
3219 	if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
3220 		return -EOPNOTSUPP;
3221 
3222 	err = mnt_want_write_file(filp);
3223 	if (err)
3224 		return err;
3225 
3226 	inode_lock(inode);
3227 
3228 	f2fs_fill_fsxattr(inode, &old_fa);
3229 	err = vfs_ioc_fssetxattr_check(inode, &old_fa, &fa);
3230 	if (err)
3231 		goto out;
3232 
3233 	err = f2fs_setflags_common(inode, iflags,
3234 			f2fs_xflags_to_iflags(F2FS_SUPPORTED_XFLAGS));
3235 	if (err)
3236 		goto out;
3237 
3238 	err = f2fs_ioc_setproject(filp, fa.fsx_projid);
3239 out:
3240 	inode_unlock(inode);
3241 	mnt_drop_write_file(filp);
3242 	return err;
3243 }
3244 
f2fs_pin_file_control(struct inode * inode,bool inc)3245 int f2fs_pin_file_control(struct inode *inode, bool inc)
3246 {
3247 	struct f2fs_inode_info *fi = F2FS_I(inode);
3248 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3249 
3250 	/* Use i_gc_failures for normal file as a risk signal. */
3251 	if (inc)
3252 		f2fs_i_gc_failures_write(inode,
3253 				fi->i_gc_failures[GC_FAILURE_PIN] + 1);
3254 
3255 	if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) {
3256 		f2fs_warn(sbi, "%s: Enable GC = ino %lx after %x GC trials",
3257 			  __func__, inode->i_ino,
3258 			  fi->i_gc_failures[GC_FAILURE_PIN]);
3259 		clear_inode_flag(inode, FI_PIN_FILE);
3260 		return -EAGAIN;
3261 	}
3262 	return 0;
3263 }
3264 
f2fs_ioc_set_pin_file(struct file * filp,unsigned long arg)3265 static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
3266 {
3267 	struct inode *inode = file_inode(filp);
3268 	__u32 pin;
3269 	int ret = 0;
3270 
3271 	if (get_user(pin, (__u32 __user *)arg))
3272 		return -EFAULT;
3273 
3274 	if (!S_ISREG(inode->i_mode))
3275 		return -EINVAL;
3276 
3277 	if (f2fs_readonly(F2FS_I_SB(inode)->sb))
3278 		return -EROFS;
3279 
3280 	ret = mnt_want_write_file(filp);
3281 	if (ret)
3282 		return ret;
3283 
3284 	inode_lock(inode);
3285 
3286 	if (!pin) {
3287 		clear_inode_flag(inode, FI_PIN_FILE);
3288 		f2fs_i_gc_failures_write(inode, 0);
3289 		goto done;
3290 	}
3291 
3292 	if (f2fs_should_update_outplace(inode, NULL)) {
3293 		ret = -EINVAL;
3294 		goto out;
3295 	}
3296 
3297 	if (f2fs_pin_file_control(inode, false)) {
3298 		ret = -EAGAIN;
3299 		goto out;
3300 	}
3301 
3302 	ret = f2fs_convert_inline_inode(inode);
3303 	if (ret)
3304 		goto out;
3305 
3306 	if (!f2fs_disable_compressed_file(inode)) {
3307 		ret = -EOPNOTSUPP;
3308 		goto out;
3309 	}
3310 
3311 	set_inode_flag(inode, FI_PIN_FILE);
3312 	ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3313 done:
3314 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3315 out:
3316 	inode_unlock(inode);
3317 	mnt_drop_write_file(filp);
3318 	return ret;
3319 }
3320 
f2fs_ioc_get_pin_file(struct file * filp,unsigned long arg)3321 static int f2fs_ioc_get_pin_file(struct file *filp, unsigned long arg)
3322 {
3323 	struct inode *inode = file_inode(filp);
3324 	__u32 pin = 0;
3325 
3326 	if (is_inode_flag_set(inode, FI_PIN_FILE))
3327 		pin = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3328 	return put_user(pin, (u32 __user *)arg);
3329 }
3330 
f2fs_precache_extents(struct inode * inode)3331 int f2fs_precache_extents(struct inode *inode)
3332 {
3333 	struct f2fs_inode_info *fi = F2FS_I(inode);
3334 	struct f2fs_map_blocks map;
3335 	pgoff_t m_next_extent;
3336 	loff_t end;
3337 	int err;
3338 
3339 	if (is_inode_flag_set(inode, FI_NO_EXTENT))
3340 		return -EOPNOTSUPP;
3341 
3342 	map.m_lblk = 0;
3343 	map.m_next_pgofs = NULL;
3344 	map.m_next_extent = &m_next_extent;
3345 	map.m_seg_type = NO_CHECK_TYPE;
3346 	map.m_may_create = false;
3347 	end = max_file_blocks(inode);
3348 
3349 	while (map.m_lblk < end) {
3350 		map.m_len = end - map.m_lblk;
3351 
3352 		f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
3353 		err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE);
3354 		f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
3355 		if (err)
3356 			return err;
3357 
3358 		map.m_lblk = m_next_extent;
3359 	}
3360 
3361 	return 0;
3362 }
3363 
f2fs_ioc_precache_extents(struct file * filp,unsigned long arg)3364 static int f2fs_ioc_precache_extents(struct file *filp, unsigned long arg)
3365 {
3366 	return f2fs_precache_extents(file_inode(filp));
3367 }
3368 
f2fs_ioc_resize_fs(struct file * filp,unsigned long arg)3369 static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg)
3370 {
3371 	struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
3372 	__u64 block_count;
3373 
3374 	if (!capable(CAP_SYS_ADMIN))
3375 		return -EPERM;
3376 
3377 	if (f2fs_readonly(sbi->sb))
3378 		return -EROFS;
3379 
3380 	if (copy_from_user(&block_count, (void __user *)arg,
3381 			   sizeof(block_count)))
3382 		return -EFAULT;
3383 
3384 	return f2fs_resize_fs(sbi, block_count);
3385 }
3386 
f2fs_ioc_enable_verity(struct file * filp,unsigned long arg)3387 static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg)
3388 {
3389 	struct inode *inode = file_inode(filp);
3390 
3391 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3392 
3393 	if (!f2fs_sb_has_verity(F2FS_I_SB(inode))) {
3394 		f2fs_warn(F2FS_I_SB(inode),
3395 			  "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem",
3396 			  inode->i_ino);
3397 		return -EOPNOTSUPP;
3398 	}
3399 
3400 	return fsverity_ioctl_enable(filp, (const void __user *)arg);
3401 }
3402 
f2fs_ioc_measure_verity(struct file * filp,unsigned long arg)3403 static int f2fs_ioc_measure_verity(struct file *filp, unsigned long arg)
3404 {
3405 	if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3406 		return -EOPNOTSUPP;
3407 
3408 	return fsverity_ioctl_measure(filp, (void __user *)arg);
3409 }
3410 
f2fs_ioc_read_verity_metadata(struct file * filp,unsigned long arg)3411 static int f2fs_ioc_read_verity_metadata(struct file *filp, unsigned long arg)
3412 {
3413 	if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3414 		return -EOPNOTSUPP;
3415 
3416 	return fsverity_ioctl_read_metadata(filp, (const void __user *)arg);
3417 }
3418 
f2fs_ioc_getfslabel(struct file * filp,unsigned long arg)3419 static int f2fs_ioc_getfslabel(struct file *filp, unsigned long arg)
3420 {
3421 	struct inode *inode = file_inode(filp);
3422 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3423 	char *vbuf;
3424 	int count;
3425 	int err = 0;
3426 
3427 	vbuf = f2fs_kzalloc(sbi, MAX_VOLUME_NAME, GFP_KERNEL);
3428 	if (!vbuf)
3429 		return -ENOMEM;
3430 
3431 	f2fs_down_read(&sbi->sb_lock);
3432 	count = utf16s_to_utf8s(sbi->raw_super->volume_name,
3433 			ARRAY_SIZE(sbi->raw_super->volume_name),
3434 			UTF16_LITTLE_ENDIAN, vbuf, MAX_VOLUME_NAME);
3435 	f2fs_up_read(&sbi->sb_lock);
3436 
3437 	if (copy_to_user((char __user *)arg, vbuf,
3438 				min(FSLABEL_MAX, count)))
3439 		err = -EFAULT;
3440 
3441 	kfree(vbuf);
3442 	return err;
3443 }
3444 
f2fs_ioc_setfslabel(struct file * filp,unsigned long arg)3445 static int f2fs_ioc_setfslabel(struct file *filp, unsigned long arg)
3446 {
3447 	struct inode *inode = file_inode(filp);
3448 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3449 	char *vbuf;
3450 	int err = 0;
3451 
3452 	if (!capable(CAP_SYS_ADMIN))
3453 		return -EPERM;
3454 
3455 	vbuf = strndup_user((const char __user *)arg, FSLABEL_MAX);
3456 	if (IS_ERR(vbuf))
3457 		return PTR_ERR(vbuf);
3458 
3459 	err = mnt_want_write_file(filp);
3460 	if (err)
3461 		goto out;
3462 
3463 	f2fs_down_write(&sbi->sb_lock);
3464 
3465 	memset(sbi->raw_super->volume_name, 0,
3466 			sizeof(sbi->raw_super->volume_name));
3467 	utf8s_to_utf16s(vbuf, strlen(vbuf), UTF16_LITTLE_ENDIAN,
3468 			sbi->raw_super->volume_name,
3469 			ARRAY_SIZE(sbi->raw_super->volume_name));
3470 
3471 	err = f2fs_commit_super(sbi, false);
3472 
3473 	f2fs_up_write(&sbi->sb_lock);
3474 
3475 	mnt_drop_write_file(filp);
3476 out:
3477 	kfree(vbuf);
3478 	return err;
3479 }
3480 
f2fs_get_compress_blocks(struct file * filp,unsigned long arg)3481 static int f2fs_get_compress_blocks(struct file *filp, unsigned long arg)
3482 {
3483 	struct inode *inode = file_inode(filp);
3484 	__u64 blocks;
3485 
3486 	if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3487 		return -EOPNOTSUPP;
3488 
3489 	if (!f2fs_compressed_file(inode))
3490 		return -EINVAL;
3491 
3492 	blocks = atomic_read(&F2FS_I(inode)->i_compr_blocks);
3493 	return put_user(blocks, (u64 __user *)arg);
3494 }
3495 
release_compress_blocks(struct dnode_of_data * dn,pgoff_t count)3496 static int release_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3497 {
3498 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3499 	unsigned int released_blocks = 0;
3500 	int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3501 	block_t blkaddr;
3502 	int i;
3503 
3504 	for (i = 0; i < count; i++) {
3505 		blkaddr = data_blkaddr(dn->inode, dn->node_page,
3506 						dn->ofs_in_node + i);
3507 
3508 		if (!__is_valid_data_blkaddr(blkaddr))
3509 			continue;
3510 		if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3511 					DATA_GENERIC_ENHANCE)))
3512 			return -EFSCORRUPTED;
3513 	}
3514 
3515 	while (count) {
3516 		int compr_blocks = 0;
3517 
3518 		for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3519 			blkaddr = f2fs_data_blkaddr(dn);
3520 
3521 			if (i == 0) {
3522 				if (blkaddr == COMPRESS_ADDR)
3523 					continue;
3524 				dn->ofs_in_node += cluster_size;
3525 				goto next;
3526 			}
3527 
3528 			if (__is_valid_data_blkaddr(blkaddr))
3529 				compr_blocks++;
3530 
3531 			if (blkaddr != NEW_ADDR)
3532 				continue;
3533 
3534 			dn->data_blkaddr = NULL_ADDR;
3535 			f2fs_set_data_blkaddr(dn);
3536 		}
3537 
3538 		f2fs_i_compr_blocks_update(dn->inode, compr_blocks, false);
3539 		dec_valid_block_count(sbi, dn->inode,
3540 					cluster_size - compr_blocks);
3541 
3542 		released_blocks += cluster_size - compr_blocks;
3543 next:
3544 		count -= cluster_size;
3545 	}
3546 
3547 	return released_blocks;
3548 }
3549 
f2fs_release_compress_blocks(struct file * filp,unsigned long arg)3550 static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
3551 {
3552 	struct inode *inode = file_inode(filp);
3553 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3554 	pgoff_t page_idx = 0, last_idx;
3555 	unsigned int released_blocks = 0;
3556 	int ret;
3557 	int writecount;
3558 
3559 	if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3560 		return -EOPNOTSUPP;
3561 
3562 	if (!f2fs_compressed_file(inode))
3563 		return -EINVAL;
3564 
3565 	if (f2fs_readonly(sbi->sb))
3566 		return -EROFS;
3567 
3568 	ret = mnt_want_write_file(filp);
3569 	if (ret)
3570 		return ret;
3571 
3572 	f2fs_balance_fs(F2FS_I_SB(inode), true);
3573 
3574 	inode_lock(inode);
3575 
3576 	writecount = atomic_read(&inode->i_writecount);
3577 	if ((filp->f_mode & FMODE_WRITE && writecount != 1) ||
3578 			(!(filp->f_mode & FMODE_WRITE) && writecount)) {
3579 		ret = -EBUSY;
3580 		goto out;
3581 	}
3582 
3583 	if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
3584 		ret = -EINVAL;
3585 		goto out;
3586 	}
3587 
3588 	ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
3589 	if (ret)
3590 		goto out;
3591 
3592 	set_inode_flag(inode, FI_COMPRESS_RELEASED);
3593 	inode->i_ctime = current_time(inode);
3594 	f2fs_mark_inode_dirty_sync(inode, true);
3595 
3596 	if (!atomic_read(&F2FS_I(inode)->i_compr_blocks))
3597 		goto out;
3598 
3599 	f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3600 	f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
3601 
3602 	last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3603 
3604 	while (page_idx < last_idx) {
3605 		struct dnode_of_data dn;
3606 		pgoff_t end_offset, count;
3607 
3608 		set_new_dnode(&dn, inode, NULL, NULL, 0);
3609 		ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3610 		if (ret) {
3611 			if (ret == -ENOENT) {
3612 				page_idx = f2fs_get_next_page_offset(&dn,
3613 								page_idx);
3614 				ret = 0;
3615 				continue;
3616 			}
3617 			break;
3618 		}
3619 
3620 		end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3621 		count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3622 		count = round_up(count, F2FS_I(inode)->i_cluster_size);
3623 
3624 		ret = release_compress_blocks(&dn, count);
3625 
3626 		f2fs_put_dnode(&dn);
3627 
3628 		if (ret < 0)
3629 			break;
3630 
3631 		page_idx += count;
3632 		released_blocks += ret;
3633 	}
3634 
3635 	f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
3636 	f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3637 out:
3638 	inode_unlock(inode);
3639 
3640 	mnt_drop_write_file(filp);
3641 
3642 	if (ret >= 0) {
3643 		ret = put_user(released_blocks, (u64 __user *)arg);
3644 	} else if (released_blocks &&
3645 			atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3646 		set_sbi_flag(sbi, SBI_NEED_FSCK);
3647 		f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3648 			"iblocks=%llu, released=%u, compr_blocks=%u, "
3649 			"run fsck to fix.",
3650 			__func__, inode->i_ino, inode->i_blocks,
3651 			released_blocks,
3652 			atomic_read(&F2FS_I(inode)->i_compr_blocks));
3653 	}
3654 
3655 	return ret;
3656 }
3657 
reserve_compress_blocks(struct dnode_of_data * dn,pgoff_t count)3658 static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3659 {
3660 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3661 	unsigned int reserved_blocks = 0;
3662 	int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3663 	block_t blkaddr;
3664 	int i;
3665 
3666 	for (i = 0; i < count; i++) {
3667 		blkaddr = data_blkaddr(dn->inode, dn->node_page,
3668 						dn->ofs_in_node + i);
3669 
3670 		if (!__is_valid_data_blkaddr(blkaddr))
3671 			continue;
3672 		if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3673 					DATA_GENERIC_ENHANCE)))
3674 			return -EFSCORRUPTED;
3675 	}
3676 
3677 	while (count) {
3678 		int compr_blocks = 0;
3679 		blkcnt_t reserved;
3680 		int ret;
3681 
3682 		for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3683 			blkaddr = f2fs_data_blkaddr(dn);
3684 
3685 			if (i == 0) {
3686 				if (blkaddr == COMPRESS_ADDR)
3687 					continue;
3688 				dn->ofs_in_node += cluster_size;
3689 				goto next;
3690 			}
3691 
3692 			if (__is_valid_data_blkaddr(blkaddr)) {
3693 				compr_blocks++;
3694 				continue;
3695 			}
3696 
3697 			dn->data_blkaddr = NEW_ADDR;
3698 			f2fs_set_data_blkaddr(dn);
3699 		}
3700 
3701 		reserved = cluster_size - compr_blocks;
3702 		ret = inc_valid_block_count(sbi, dn->inode, &reserved);
3703 		if (ret)
3704 			return ret;
3705 
3706 		if (reserved != cluster_size - compr_blocks)
3707 			return -ENOSPC;
3708 
3709 		f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true);
3710 
3711 		reserved_blocks += reserved;
3712 next:
3713 		count -= cluster_size;
3714 	}
3715 
3716 	return reserved_blocks;
3717 }
3718 
f2fs_reserve_compress_blocks(struct file * filp,unsigned long arg)3719 static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
3720 {
3721 	struct inode *inode = file_inode(filp);
3722 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3723 	pgoff_t page_idx = 0, last_idx;
3724 	unsigned int reserved_blocks = 0;
3725 	int ret;
3726 
3727 	if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3728 		return -EOPNOTSUPP;
3729 
3730 	if (!f2fs_compressed_file(inode))
3731 		return -EINVAL;
3732 
3733 	if (f2fs_readonly(sbi->sb))
3734 		return -EROFS;
3735 
3736 	ret = mnt_want_write_file(filp);
3737 	if (ret)
3738 		return ret;
3739 
3740 	if (atomic_read(&F2FS_I(inode)->i_compr_blocks))
3741 		goto out;
3742 
3743 	f2fs_balance_fs(F2FS_I_SB(inode), true);
3744 
3745 	inode_lock(inode);
3746 
3747 	if (!is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
3748 		ret = -EINVAL;
3749 		goto unlock_inode;
3750 	}
3751 
3752 	f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3753 	f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
3754 
3755 	last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3756 
3757 	while (page_idx < last_idx) {
3758 		struct dnode_of_data dn;
3759 		pgoff_t end_offset, count;
3760 
3761 		set_new_dnode(&dn, inode, NULL, NULL, 0);
3762 		ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3763 		if (ret) {
3764 			if (ret == -ENOENT) {
3765 				page_idx = f2fs_get_next_page_offset(&dn,
3766 								page_idx);
3767 				ret = 0;
3768 				continue;
3769 			}
3770 			break;
3771 		}
3772 
3773 		end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3774 		count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3775 		count = round_up(count, F2FS_I(inode)->i_cluster_size);
3776 
3777 		ret = reserve_compress_blocks(&dn, count);
3778 
3779 		f2fs_put_dnode(&dn);
3780 
3781 		if (ret < 0)
3782 			break;
3783 
3784 		page_idx += count;
3785 		reserved_blocks += ret;
3786 	}
3787 
3788 	f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
3789 	f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3790 
3791 	if (ret >= 0) {
3792 		clear_inode_flag(inode, FI_COMPRESS_RELEASED);
3793 		inode->i_ctime = current_time(inode);
3794 		f2fs_mark_inode_dirty_sync(inode, true);
3795 	}
3796 unlock_inode:
3797 	inode_unlock(inode);
3798 out:
3799 	mnt_drop_write_file(filp);
3800 
3801 	if (ret >= 0) {
3802 		ret = put_user(reserved_blocks, (u64 __user *)arg);
3803 	} else if (reserved_blocks &&
3804 			atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3805 		set_sbi_flag(sbi, SBI_NEED_FSCK);
3806 		f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3807 			"iblocks=%llu, reserved=%u, compr_blocks=%u, "
3808 			"run fsck to fix.",
3809 			__func__, inode->i_ino, inode->i_blocks,
3810 			reserved_blocks,
3811 			atomic_read(&F2FS_I(inode)->i_compr_blocks));
3812 	}
3813 
3814 	return ret;
3815 }
3816 
f2fs_secure_erase(struct block_device * bdev,struct inode * inode,pgoff_t off,block_t block,block_t len,u32 flags)3817 static int f2fs_secure_erase(struct block_device *bdev, struct inode *inode,
3818 		pgoff_t off, block_t block, block_t len, u32 flags)
3819 {
3820 	struct request_queue *q = bdev_get_queue(bdev);
3821 	sector_t sector = SECTOR_FROM_BLOCK(block);
3822 	sector_t nr_sects = SECTOR_FROM_BLOCK(len);
3823 	int ret = 0;
3824 
3825 	if (!q)
3826 		return -ENXIO;
3827 
3828 	if (flags & F2FS_TRIM_FILE_DISCARD)
3829 		ret = blkdev_issue_discard(bdev, sector, nr_sects, GFP_NOFS,
3830 						blk_queue_secure_erase(q) ?
3831 						BLKDEV_DISCARD_SECURE : 0);
3832 
3833 	if (!ret && (flags & F2FS_TRIM_FILE_ZEROOUT)) {
3834 		if (IS_ENCRYPTED(inode))
3835 			ret = fscrypt_zeroout_range(inode, off, block, len);
3836 		else
3837 			ret = blkdev_issue_zeroout(bdev, sector, nr_sects,
3838 					GFP_NOFS, 0);
3839 	}
3840 
3841 	return ret;
3842 }
3843 
f2fs_sec_trim_file(struct file * filp,unsigned long arg)3844 static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
3845 {
3846 	struct inode *inode = file_inode(filp);
3847 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3848 	struct address_space *mapping = inode->i_mapping;
3849 	struct block_device *prev_bdev = NULL;
3850 	struct f2fs_sectrim_range range;
3851 	pgoff_t index, pg_end, prev_index = 0;
3852 	block_t prev_block = 0, len = 0;
3853 	loff_t end_addr;
3854 	bool to_end = false;
3855 	int ret = 0;
3856 
3857 	if (!(filp->f_mode & FMODE_WRITE))
3858 		return -EBADF;
3859 
3860 	if (copy_from_user(&range, (struct f2fs_sectrim_range __user *)arg,
3861 				sizeof(range)))
3862 		return -EFAULT;
3863 
3864 	if (range.flags == 0 || (range.flags & ~F2FS_TRIM_FILE_MASK) ||
3865 			!S_ISREG(inode->i_mode))
3866 		return -EINVAL;
3867 
3868 	if (((range.flags & F2FS_TRIM_FILE_DISCARD) &&
3869 			!f2fs_hw_support_discard(sbi)) ||
3870 			((range.flags & F2FS_TRIM_FILE_ZEROOUT) &&
3871 			 IS_ENCRYPTED(inode) && f2fs_is_multi_device(sbi)))
3872 		return -EOPNOTSUPP;
3873 
3874 	file_start_write(filp);
3875 	inode_lock(inode);
3876 
3877 	if (f2fs_is_atomic_file(inode) || f2fs_compressed_file(inode) ||
3878 			range.start >= inode->i_size) {
3879 		ret = -EINVAL;
3880 		goto err;
3881 	}
3882 
3883 	if (range.len == 0)
3884 		goto err;
3885 
3886 	if (inode->i_size - range.start > range.len) {
3887 		end_addr = range.start + range.len;
3888 	} else {
3889 		end_addr = range.len == (u64)-1 ?
3890 			sbi->sb->s_maxbytes : inode->i_size;
3891 		to_end = true;
3892 	}
3893 
3894 	if (!IS_ALIGNED(range.start, F2FS_BLKSIZE) ||
3895 			(!to_end && !IS_ALIGNED(end_addr, F2FS_BLKSIZE))) {
3896 		ret = -EINVAL;
3897 		goto err;
3898 	}
3899 
3900 	index = F2FS_BYTES_TO_BLK(range.start);
3901 	pg_end = DIV_ROUND_UP(end_addr, F2FS_BLKSIZE);
3902 
3903 	ret = f2fs_convert_inline_inode(inode);
3904 	if (ret)
3905 		goto err;
3906 
3907 	f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3908 	f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
3909 
3910 	ret = filemap_write_and_wait_range(mapping, range.start,
3911 			to_end ? LLONG_MAX : end_addr - 1);
3912 	if (ret)
3913 		goto out;
3914 
3915 	truncate_inode_pages_range(mapping, range.start,
3916 			to_end ? -1 : end_addr - 1);
3917 
3918 	while (index < pg_end) {
3919 		struct dnode_of_data dn;
3920 		pgoff_t end_offset, count;
3921 		int i;
3922 
3923 		set_new_dnode(&dn, inode, NULL, NULL, 0);
3924 		ret = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
3925 		if (ret) {
3926 			if (ret == -ENOENT) {
3927 				index = f2fs_get_next_page_offset(&dn, index);
3928 				continue;
3929 			}
3930 			goto out;
3931 		}
3932 
3933 		end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3934 		count = min(end_offset - dn.ofs_in_node, pg_end - index);
3935 		for (i = 0; i < count; i++, index++, dn.ofs_in_node++) {
3936 			struct block_device *cur_bdev;
3937 			block_t blkaddr = f2fs_data_blkaddr(&dn);
3938 
3939 			if (!__is_valid_data_blkaddr(blkaddr))
3940 				continue;
3941 
3942 			if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
3943 						DATA_GENERIC_ENHANCE)) {
3944 				ret = -EFSCORRUPTED;
3945 				f2fs_put_dnode(&dn);
3946 				goto out;
3947 			}
3948 
3949 			cur_bdev = f2fs_target_device(sbi, blkaddr, NULL);
3950 			if (f2fs_is_multi_device(sbi)) {
3951 				int di = f2fs_target_device_index(sbi, blkaddr);
3952 
3953 				blkaddr -= FDEV(di).start_blk;
3954 			}
3955 
3956 			if (len) {
3957 				if (prev_bdev == cur_bdev &&
3958 						index == prev_index + len &&
3959 						blkaddr == prev_block + len) {
3960 					len++;
3961 				} else {
3962 					ret = f2fs_secure_erase(prev_bdev,
3963 						inode, prev_index, prev_block,
3964 						len, range.flags);
3965 					if (ret) {
3966 						f2fs_put_dnode(&dn);
3967 						goto out;
3968 					}
3969 
3970 					len = 0;
3971 				}
3972 			}
3973 
3974 			if (!len) {
3975 				prev_bdev = cur_bdev;
3976 				prev_index = index;
3977 				prev_block = blkaddr;
3978 				len = 1;
3979 			}
3980 		}
3981 
3982 		f2fs_put_dnode(&dn);
3983 
3984 		if (fatal_signal_pending(current)) {
3985 			ret = -EINTR;
3986 			goto out;
3987 		}
3988 		cond_resched();
3989 	}
3990 
3991 	if (len)
3992 		ret = f2fs_secure_erase(prev_bdev, inode, prev_index,
3993 				prev_block, len, range.flags);
3994 out:
3995 	f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
3996 	f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3997 err:
3998 	inode_unlock(inode);
3999 	file_end_write(filp);
4000 
4001 	return ret;
4002 }
4003 
f2fs_ioc_get_compress_option(struct file * filp,unsigned long arg)4004 static int f2fs_ioc_get_compress_option(struct file *filp, unsigned long arg)
4005 {
4006 	struct inode *inode = file_inode(filp);
4007 	struct f2fs_comp_option option;
4008 
4009 	if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
4010 		return -EOPNOTSUPP;
4011 
4012 	inode_lock_shared(inode);
4013 
4014 	if (!f2fs_compressed_file(inode)) {
4015 		inode_unlock_shared(inode);
4016 		return -ENODATA;
4017 	}
4018 
4019 	option.algorithm = F2FS_I(inode)->i_compress_algorithm;
4020 	option.log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
4021 
4022 	inode_unlock_shared(inode);
4023 
4024 	if (copy_to_user((struct f2fs_comp_option __user *)arg, &option,
4025 				sizeof(option)))
4026 		return -EFAULT;
4027 
4028 	return 0;
4029 }
4030 
f2fs_ioc_set_compress_option(struct file * filp,unsigned long arg)4031 static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
4032 {
4033 	struct inode *inode = file_inode(filp);
4034 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4035 	struct f2fs_comp_option option;
4036 	int ret = 0;
4037 
4038 	if (!f2fs_sb_has_compression(sbi))
4039 		return -EOPNOTSUPP;
4040 
4041 	if (!(filp->f_mode & FMODE_WRITE))
4042 		return -EBADF;
4043 
4044 	if (copy_from_user(&option, (struct f2fs_comp_option __user *)arg,
4045 				sizeof(option)))
4046 		return -EFAULT;
4047 
4048 	if (!f2fs_compressed_file(inode) ||
4049 			option.log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
4050 			option.log_cluster_size > MAX_COMPRESS_LOG_SIZE ||
4051 			option.algorithm >= COMPRESS_MAX)
4052 		return -EINVAL;
4053 
4054 	file_start_write(filp);
4055 	inode_lock(inode);
4056 
4057 	if (f2fs_is_mmap_file(inode) || get_dirty_pages(inode)) {
4058 		ret = -EBUSY;
4059 		goto out;
4060 	}
4061 
4062 	if (inode->i_size != 0) {
4063 		ret = -EFBIG;
4064 		goto out;
4065 	}
4066 
4067 	F2FS_I(inode)->i_compress_algorithm = option.algorithm;
4068 	F2FS_I(inode)->i_log_cluster_size = option.log_cluster_size;
4069 	F2FS_I(inode)->i_cluster_size = 1 << option.log_cluster_size;
4070 	f2fs_mark_inode_dirty_sync(inode, true);
4071 
4072 	if (!f2fs_is_compress_backend_ready(inode))
4073 		f2fs_warn(sbi, "compression algorithm is successfully set, "
4074 			"but current kernel doesn't support this algorithm.");
4075 out:
4076 	inode_unlock(inode);
4077 	file_end_write(filp);
4078 
4079 	return ret;
4080 }
4081 
redirty_blocks(struct inode * inode,pgoff_t page_idx,int len)4082 static int redirty_blocks(struct inode *inode, pgoff_t page_idx, int len)
4083 {
4084 	DEFINE_READAHEAD(ractl, NULL, inode->i_mapping, page_idx);
4085 	struct address_space *mapping = inode->i_mapping;
4086 	struct page *page;
4087 	pgoff_t redirty_idx = page_idx;
4088 	int i, page_len = 0, ret = 0;
4089 
4090 	page_cache_ra_unbounded(&ractl, len, 0);
4091 
4092 	for (i = 0; i < len; i++, page_idx++) {
4093 		page = read_cache_page(mapping, page_idx, NULL, NULL);
4094 		if (IS_ERR(page)) {
4095 			ret = PTR_ERR(page);
4096 			break;
4097 		}
4098 		page_len++;
4099 	}
4100 
4101 	for (i = 0; i < page_len; i++, redirty_idx++) {
4102 		page = find_lock_page(mapping, redirty_idx);
4103 		if (!page) {
4104 			ret = -ENOMEM;
4105 			break;
4106 		}
4107 		set_page_dirty(page);
4108 		f2fs_put_page(page, 1);
4109 		f2fs_put_page(page, 0);
4110 	}
4111 
4112 	return ret;
4113 }
4114 
f2fs_ioc_decompress_file(struct file * filp,unsigned long arg)4115 static int f2fs_ioc_decompress_file(struct file *filp, unsigned long arg)
4116 {
4117 	struct inode *inode = file_inode(filp);
4118 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4119 	struct f2fs_inode_info *fi = F2FS_I(inode);
4120 	pgoff_t page_idx = 0, last_idx;
4121 	unsigned int blk_per_seg = sbi->blocks_per_seg;
4122 	int cluster_size = F2FS_I(inode)->i_cluster_size;
4123 	int count, ret;
4124 
4125 	if (!f2fs_sb_has_compression(sbi) ||
4126 			F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
4127 		return -EOPNOTSUPP;
4128 
4129 	if (!(filp->f_mode & FMODE_WRITE))
4130 		return -EBADF;
4131 
4132 	if (!f2fs_compressed_file(inode))
4133 		return -EINVAL;
4134 
4135 	f2fs_balance_fs(F2FS_I_SB(inode), true);
4136 
4137 	file_start_write(filp);
4138 	inode_lock(inode);
4139 
4140 	if (!f2fs_is_compress_backend_ready(inode)) {
4141 		ret = -EOPNOTSUPP;
4142 		goto out;
4143 	}
4144 
4145 	if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
4146 		ret = -EINVAL;
4147 		goto out;
4148 	}
4149 
4150 	ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
4151 	if (ret)
4152 		goto out;
4153 
4154 	if (!atomic_read(&fi->i_compr_blocks))
4155 		goto out;
4156 
4157 	last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4158 
4159 	count = last_idx - page_idx;
4160 	while (count) {
4161 		int len = min(cluster_size, count);
4162 
4163 		ret = redirty_blocks(inode, page_idx, len);
4164 		if (ret < 0)
4165 			break;
4166 
4167 		if (get_dirty_pages(inode) >= blk_per_seg)
4168 			filemap_fdatawrite(inode->i_mapping);
4169 
4170 		count -= len;
4171 		page_idx += len;
4172 	}
4173 
4174 	if (!ret)
4175 		ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4176 							LLONG_MAX);
4177 
4178 	if (ret)
4179 		f2fs_warn(sbi, "%s: The file might be partially decompressed (errno=%d). Please delete the file.",
4180 			  __func__, ret);
4181 out:
4182 	inode_unlock(inode);
4183 	file_end_write(filp);
4184 
4185 	return ret;
4186 }
4187 
f2fs_ioc_compress_file(struct file * filp,unsigned long arg)4188 static int f2fs_ioc_compress_file(struct file *filp, unsigned long arg)
4189 {
4190 	struct inode *inode = file_inode(filp);
4191 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4192 	pgoff_t page_idx = 0, last_idx;
4193 	unsigned int blk_per_seg = sbi->blocks_per_seg;
4194 	int cluster_size = F2FS_I(inode)->i_cluster_size;
4195 	int count, ret;
4196 
4197 	if (!f2fs_sb_has_compression(sbi) ||
4198 			F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
4199 		return -EOPNOTSUPP;
4200 
4201 	if (!(filp->f_mode & FMODE_WRITE))
4202 		return -EBADF;
4203 
4204 	if (!f2fs_compressed_file(inode))
4205 		return -EINVAL;
4206 
4207 	f2fs_balance_fs(F2FS_I_SB(inode), true);
4208 
4209 	file_start_write(filp);
4210 	inode_lock(inode);
4211 
4212 	if (!f2fs_is_compress_backend_ready(inode)) {
4213 		ret = -EOPNOTSUPP;
4214 		goto out;
4215 	}
4216 
4217 	if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
4218 		ret = -EINVAL;
4219 		goto out;
4220 	}
4221 
4222 	ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
4223 	if (ret)
4224 		goto out;
4225 
4226 	set_inode_flag(inode, FI_ENABLE_COMPRESS);
4227 
4228 	last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4229 
4230 	count = last_idx - page_idx;
4231 	while (count) {
4232 		int len = min(cluster_size, count);
4233 
4234 		ret = redirty_blocks(inode, page_idx, len);
4235 		if (ret < 0)
4236 			break;
4237 
4238 		if (get_dirty_pages(inode) >= blk_per_seg)
4239 			filemap_fdatawrite(inode->i_mapping);
4240 
4241 		count -= len;
4242 		page_idx += len;
4243 	}
4244 
4245 	if (!ret)
4246 		ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4247 							LLONG_MAX);
4248 
4249 	clear_inode_flag(inode, FI_ENABLE_COMPRESS);
4250 
4251 	if (ret)
4252 		f2fs_warn(sbi, "%s: The file might be partially compressed (errno=%d). Please delete the file.",
4253 			  __func__, ret);
4254 out:
4255 	inode_unlock(inode);
4256 	file_end_write(filp);
4257 
4258 	return ret;
4259 }
4260 
__f2fs_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)4261 static long __f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4262 {
4263 	switch (cmd) {
4264 	case FS_IOC_GETFLAGS:
4265 		return f2fs_ioc_getflags(filp, arg);
4266 	case FS_IOC_SETFLAGS:
4267 		return f2fs_ioc_setflags(filp, arg);
4268 	case FS_IOC_GETVERSION:
4269 		return f2fs_ioc_getversion(filp, arg);
4270 	case F2FS_IOC_START_ATOMIC_WRITE:
4271 		return f2fs_ioc_start_atomic_write(filp);
4272 	case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4273 		return f2fs_ioc_commit_atomic_write(filp);
4274 	case F2FS_IOC_START_VOLATILE_WRITE:
4275 		return f2fs_ioc_start_volatile_write(filp);
4276 	case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4277 		return f2fs_ioc_release_volatile_write(filp);
4278 	case F2FS_IOC_ABORT_VOLATILE_WRITE:
4279 		return f2fs_ioc_abort_volatile_write(filp);
4280 	case F2FS_IOC_SHUTDOWN:
4281 		return f2fs_ioc_shutdown(filp, arg);
4282 	case FITRIM:
4283 		return f2fs_ioc_fitrim(filp, arg);
4284 	case FS_IOC_SET_ENCRYPTION_POLICY:
4285 		return f2fs_ioc_set_encryption_policy(filp, arg);
4286 	case FS_IOC_GET_ENCRYPTION_POLICY:
4287 		return f2fs_ioc_get_encryption_policy(filp, arg);
4288 	case FS_IOC_GET_ENCRYPTION_PWSALT:
4289 		return f2fs_ioc_get_encryption_pwsalt(filp, arg);
4290 	case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4291 		return f2fs_ioc_get_encryption_policy_ex(filp, arg);
4292 	case FS_IOC_ADD_ENCRYPTION_KEY:
4293 		return f2fs_ioc_add_encryption_key(filp, arg);
4294 	case FS_IOC_REMOVE_ENCRYPTION_KEY:
4295 		return f2fs_ioc_remove_encryption_key(filp, arg);
4296 	case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4297 		return f2fs_ioc_remove_encryption_key_all_users(filp, arg);
4298 	case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4299 		return f2fs_ioc_get_encryption_key_status(filp, arg);
4300 	case FS_IOC_GET_ENCRYPTION_NONCE:
4301 		return f2fs_ioc_get_encryption_nonce(filp, arg);
4302 	case F2FS_IOC_GARBAGE_COLLECT:
4303 		return f2fs_ioc_gc(filp, arg);
4304 	case F2FS_IOC_GARBAGE_COLLECT_RANGE:
4305 		return f2fs_ioc_gc_range(filp, arg);
4306 	case F2FS_IOC_WRITE_CHECKPOINT:
4307 		return f2fs_ioc_write_checkpoint(filp, arg);
4308 	case F2FS_IOC_DEFRAGMENT:
4309 		return f2fs_ioc_defragment(filp, arg);
4310 	case F2FS_IOC_MOVE_RANGE:
4311 		return f2fs_ioc_move_range(filp, arg);
4312 	case F2FS_IOC_FLUSH_DEVICE:
4313 		return f2fs_ioc_flush_device(filp, arg);
4314 	case F2FS_IOC_GET_FEATURES:
4315 		return f2fs_ioc_get_features(filp, arg);
4316 	case FS_IOC_FSGETXATTR:
4317 		return f2fs_ioc_fsgetxattr(filp, arg);
4318 	case FS_IOC_FSSETXATTR:
4319 		return f2fs_ioc_fssetxattr(filp, arg);
4320 	case F2FS_IOC_GET_PIN_FILE:
4321 		return f2fs_ioc_get_pin_file(filp, arg);
4322 	case F2FS_IOC_SET_PIN_FILE:
4323 		return f2fs_ioc_set_pin_file(filp, arg);
4324 	case F2FS_IOC_PRECACHE_EXTENTS:
4325 		return f2fs_ioc_precache_extents(filp, arg);
4326 	case F2FS_IOC_RESIZE_FS:
4327 		return f2fs_ioc_resize_fs(filp, arg);
4328 	case FS_IOC_ENABLE_VERITY:
4329 		return f2fs_ioc_enable_verity(filp, arg);
4330 	case FS_IOC_MEASURE_VERITY:
4331 		return f2fs_ioc_measure_verity(filp, arg);
4332 	case FS_IOC_READ_VERITY_METADATA:
4333 		return f2fs_ioc_read_verity_metadata(filp, arg);
4334 	case FS_IOC_GETFSLABEL:
4335 		return f2fs_ioc_getfslabel(filp, arg);
4336 	case FS_IOC_SETFSLABEL:
4337 		return f2fs_ioc_setfslabel(filp, arg);
4338 	case F2FS_IOC_GET_COMPRESS_BLOCKS:
4339 		return f2fs_get_compress_blocks(filp, arg);
4340 	case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4341 		return f2fs_release_compress_blocks(filp, arg);
4342 	case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4343 		return f2fs_reserve_compress_blocks(filp, arg);
4344 	case F2FS_IOC_SEC_TRIM_FILE:
4345 		return f2fs_sec_trim_file(filp, arg);
4346 	case F2FS_IOC_GET_COMPRESS_OPTION:
4347 		return f2fs_ioc_get_compress_option(filp, arg);
4348 	case F2FS_IOC_SET_COMPRESS_OPTION:
4349 		return f2fs_ioc_set_compress_option(filp, arg);
4350 	case F2FS_IOC_DECOMPRESS_FILE:
4351 		return f2fs_ioc_decompress_file(filp, arg);
4352 	case F2FS_IOC_COMPRESS_FILE:
4353 		return f2fs_ioc_compress_file(filp, arg);
4354 	default:
4355 		return -ENOTTY;
4356 	}
4357 }
4358 
f2fs_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)4359 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4360 {
4361 	if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
4362 		return -EIO;
4363 	if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp))))
4364 		return -ENOSPC;
4365 
4366 	return __f2fs_ioctl(filp, cmd, arg);
4367 }
4368 
f2fs_file_read_iter(struct kiocb * iocb,struct iov_iter * iter)4369 static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
4370 {
4371 	struct file *file = iocb->ki_filp;
4372 	struct inode *inode = file_inode(file);
4373 	int ret;
4374 
4375 	if (!f2fs_is_compress_backend_ready(inode))
4376 		return -EOPNOTSUPP;
4377 
4378 	ret = generic_file_read_iter(iocb, iter);
4379 
4380 	if (ret > 0)
4381 		f2fs_update_iostat(F2FS_I_SB(inode), APP_READ_IO, ret);
4382 
4383 	return ret;
4384 }
4385 
f2fs_file_write_iter(struct kiocb * iocb,struct iov_iter * from)4386 static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
4387 {
4388 	struct file *file = iocb->ki_filp;
4389 	struct inode *inode = file_inode(file);
4390 	ssize_t ret;
4391 
4392 	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
4393 		ret = -EIO;
4394 		goto out;
4395 	}
4396 
4397 	if (!f2fs_is_compress_backend_ready(inode)) {
4398 		ret = -EOPNOTSUPP;
4399 		goto out;
4400 	}
4401 
4402 	if (iocb->ki_flags & IOCB_NOWAIT) {
4403 		if (!inode_trylock(inode)) {
4404 			ret = -EAGAIN;
4405 			goto out;
4406 		}
4407 	} else {
4408 		inode_lock(inode);
4409 	}
4410 
4411 	if (unlikely(IS_IMMUTABLE(inode))) {
4412 		ret = -EPERM;
4413 		goto unlock;
4414 	}
4415 
4416 	if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
4417 		ret = -EPERM;
4418 		goto unlock;
4419 	}
4420 
4421 	ret = generic_write_checks(iocb, from);
4422 	if (ret > 0) {
4423 		bool preallocated = false;
4424 		size_t target_size = 0;
4425 		int err;
4426 
4427 		if (iov_iter_fault_in_readable(from, iov_iter_count(from)))
4428 			set_inode_flag(inode, FI_NO_PREALLOC);
4429 
4430 		if ((iocb->ki_flags & IOCB_NOWAIT)) {
4431 			if (!f2fs_overwrite_io(inode, iocb->ki_pos,
4432 						iov_iter_count(from)) ||
4433 				f2fs_has_inline_data(inode) ||
4434 				f2fs_force_buffered_io(inode, iocb, from)) {
4435 				clear_inode_flag(inode, FI_NO_PREALLOC);
4436 				inode_unlock(inode);
4437 				ret = -EAGAIN;
4438 				goto out;
4439 			}
4440 			goto write;
4441 		}
4442 
4443 		if (is_inode_flag_set(inode, FI_NO_PREALLOC))
4444 			goto write;
4445 
4446 		if (iocb->ki_flags & IOCB_DIRECT) {
4447 			/*
4448 			 * Convert inline data for Direct I/O before entering
4449 			 * f2fs_direct_IO().
4450 			 */
4451 			err = f2fs_convert_inline_inode(inode);
4452 			if (err)
4453 				goto out_err;
4454 			/*
4455 			 * If force_buffere_io() is true, we have to allocate
4456 			 * blocks all the time, since f2fs_direct_IO will fall
4457 			 * back to buffered IO.
4458 			 */
4459 			if (!f2fs_force_buffered_io(inode, iocb, from) &&
4460 					allow_outplace_dio(inode, iocb, from))
4461 				goto write;
4462 		}
4463 		preallocated = true;
4464 		target_size = iocb->ki_pos + iov_iter_count(from);
4465 
4466 		err = f2fs_preallocate_blocks(iocb, from);
4467 		if (err) {
4468 out_err:
4469 			clear_inode_flag(inode, FI_NO_PREALLOC);
4470 			inode_unlock(inode);
4471 			ret = err;
4472 			goto out;
4473 		}
4474 write:
4475 		ret = __generic_file_write_iter(iocb, from);
4476 		clear_inode_flag(inode, FI_NO_PREALLOC);
4477 
4478 		/* if we couldn't write data, we should deallocate blocks. */
4479 		if (preallocated && i_size_read(inode) < target_size) {
4480 			f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4481 			f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
4482 			f2fs_truncate(inode);
4483 			f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
4484 			f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4485 		}
4486 
4487 		if (ret > 0)
4488 			f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret);
4489 	}
4490 unlock:
4491 	inode_unlock(inode);
4492 out:
4493 	trace_f2fs_file_write_iter(inode, iocb->ki_pos,
4494 					iov_iter_count(from), ret);
4495 	if (ret > 0)
4496 		ret = generic_write_sync(iocb, ret);
4497 	return ret;
4498 }
4499 
4500 #ifdef CONFIG_COMPAT
4501 struct compat_f2fs_gc_range {
4502 	u32 sync;
4503 	compat_u64 start;
4504 	compat_u64 len;
4505 };
4506 #define F2FS_IOC32_GARBAGE_COLLECT_RANGE	_IOW(F2FS_IOCTL_MAGIC, 11,\
4507 						struct compat_f2fs_gc_range)
4508 
f2fs_compat_ioc_gc_range(struct file * file,unsigned long arg)4509 static int f2fs_compat_ioc_gc_range(struct file *file, unsigned long arg)
4510 {
4511 	struct compat_f2fs_gc_range __user *urange;
4512 	struct f2fs_gc_range range;
4513 	int err;
4514 
4515 	urange = compat_ptr(arg);
4516 	err = get_user(range.sync, &urange->sync);
4517 	err |= get_user(range.start, &urange->start);
4518 	err |= get_user(range.len, &urange->len);
4519 	if (err)
4520 		return -EFAULT;
4521 
4522 	return __f2fs_ioc_gc_range(file, &range);
4523 }
4524 
4525 struct compat_f2fs_move_range {
4526 	u32 dst_fd;
4527 	compat_u64 pos_in;
4528 	compat_u64 pos_out;
4529 	compat_u64 len;
4530 };
4531 #define F2FS_IOC32_MOVE_RANGE		_IOWR(F2FS_IOCTL_MAGIC, 9,	\
4532 					struct compat_f2fs_move_range)
4533 
f2fs_compat_ioc_move_range(struct file * file,unsigned long arg)4534 static int f2fs_compat_ioc_move_range(struct file *file, unsigned long arg)
4535 {
4536 	struct compat_f2fs_move_range __user *urange;
4537 	struct f2fs_move_range range;
4538 	int err;
4539 
4540 	urange = compat_ptr(arg);
4541 	err = get_user(range.dst_fd, &urange->dst_fd);
4542 	err |= get_user(range.pos_in, &urange->pos_in);
4543 	err |= get_user(range.pos_out, &urange->pos_out);
4544 	err |= get_user(range.len, &urange->len);
4545 	if (err)
4546 		return -EFAULT;
4547 
4548 	return __f2fs_ioc_move_range(file, &range);
4549 }
4550 
f2fs_compat_ioctl(struct file * file,unsigned int cmd,unsigned long arg)4551 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4552 {
4553 	if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
4554 		return -EIO;
4555 	if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(file))))
4556 		return -ENOSPC;
4557 
4558 	switch (cmd) {
4559 	case FS_IOC32_GETFLAGS:
4560 		cmd = FS_IOC_GETFLAGS;
4561 		break;
4562 	case FS_IOC32_SETFLAGS:
4563 		cmd = FS_IOC_SETFLAGS;
4564 		break;
4565 	case FS_IOC32_GETVERSION:
4566 		cmd = FS_IOC_GETVERSION;
4567 		break;
4568 	case F2FS_IOC32_GARBAGE_COLLECT_RANGE:
4569 		return f2fs_compat_ioc_gc_range(file, arg);
4570 	case F2FS_IOC32_MOVE_RANGE:
4571 		return f2fs_compat_ioc_move_range(file, arg);
4572 	case F2FS_IOC_START_ATOMIC_WRITE:
4573 	case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4574 	case F2FS_IOC_START_VOLATILE_WRITE:
4575 	case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4576 	case F2FS_IOC_ABORT_VOLATILE_WRITE:
4577 	case F2FS_IOC_SHUTDOWN:
4578 	case FITRIM:
4579 	case FS_IOC_SET_ENCRYPTION_POLICY:
4580 	case FS_IOC_GET_ENCRYPTION_PWSALT:
4581 	case FS_IOC_GET_ENCRYPTION_POLICY:
4582 	case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4583 	case FS_IOC_ADD_ENCRYPTION_KEY:
4584 	case FS_IOC_REMOVE_ENCRYPTION_KEY:
4585 	case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4586 	case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4587 	case FS_IOC_GET_ENCRYPTION_NONCE:
4588 	case F2FS_IOC_GARBAGE_COLLECT:
4589 	case F2FS_IOC_WRITE_CHECKPOINT:
4590 	case F2FS_IOC_DEFRAGMENT:
4591 	case F2FS_IOC_FLUSH_DEVICE:
4592 	case F2FS_IOC_GET_FEATURES:
4593 	case FS_IOC_FSGETXATTR:
4594 	case FS_IOC_FSSETXATTR:
4595 	case F2FS_IOC_GET_PIN_FILE:
4596 	case F2FS_IOC_SET_PIN_FILE:
4597 	case F2FS_IOC_PRECACHE_EXTENTS:
4598 	case F2FS_IOC_RESIZE_FS:
4599 	case FS_IOC_ENABLE_VERITY:
4600 	case FS_IOC_MEASURE_VERITY:
4601 	case FS_IOC_READ_VERITY_METADATA:
4602 	case FS_IOC_GETFSLABEL:
4603 	case FS_IOC_SETFSLABEL:
4604 	case F2FS_IOC_GET_COMPRESS_BLOCKS:
4605 	case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4606 	case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4607 	case F2FS_IOC_SEC_TRIM_FILE:
4608 	case F2FS_IOC_GET_COMPRESS_OPTION:
4609 	case F2FS_IOC_SET_COMPRESS_OPTION:
4610 	case F2FS_IOC_DECOMPRESS_FILE:
4611 	case F2FS_IOC_COMPRESS_FILE:
4612 		break;
4613 	default:
4614 		return -ENOIOCTLCMD;
4615 	}
4616 	return __f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
4617 }
4618 #endif
4619 
4620 const struct file_operations f2fs_file_operations = {
4621 	.llseek		= f2fs_llseek,
4622 	.read_iter	= f2fs_file_read_iter,
4623 	.write_iter	= f2fs_file_write_iter,
4624 	.open		= f2fs_file_open,
4625 	.release	= f2fs_release_file,
4626 	.mmap		= f2fs_file_mmap,
4627 	.flush		= f2fs_file_flush,
4628 	.fsync		= f2fs_sync_file,
4629 	.fallocate	= f2fs_fallocate,
4630 	.unlocked_ioctl	= f2fs_ioctl,
4631 #ifdef CONFIG_COMPAT
4632 	.compat_ioctl	= f2fs_compat_ioctl,
4633 #endif
4634 	.splice_read	= generic_file_splice_read,
4635 	.splice_write	= iter_file_splice_write,
4636 };
4637