xref: /OK3568_Linux_fs/kernel/mm/msync.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *	linux/mm/msync.c
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 1994-1999  Linus Torvalds
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun /*
9*4882a593Smuzhiyun  * The msync() system call.
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun #include <linux/fs.h>
12*4882a593Smuzhiyun #include <linux/mm.h>
13*4882a593Smuzhiyun #include <linux/mman.h>
14*4882a593Smuzhiyun #include <linux/file.h>
15*4882a593Smuzhiyun #include <linux/syscalls.h>
16*4882a593Smuzhiyun #include <linux/sched.h>
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun /*
19*4882a593Smuzhiyun  * MS_SYNC syncs the entire file - including mappings.
20*4882a593Smuzhiyun  *
21*4882a593Smuzhiyun  * MS_ASYNC does not start I/O (it used to, up to 2.5.67).
22*4882a593Smuzhiyun  * Nor does it marks the relevant pages dirty (it used to up to 2.6.17).
23*4882a593Smuzhiyun  * Now it doesn't do anything, since dirty pages are properly tracked.
24*4882a593Smuzhiyun  *
25*4882a593Smuzhiyun  * The application may now run fsync() to
26*4882a593Smuzhiyun  * write out the dirty pages and wait on the writeout and check the result.
27*4882a593Smuzhiyun  * Or the application may run fadvise(FADV_DONTNEED) against the fd to start
28*4882a593Smuzhiyun  * async writeout immediately.
29*4882a593Smuzhiyun  * So by _not_ starting I/O in MS_ASYNC we provide complete flexibility to
30*4882a593Smuzhiyun  * applications.
31*4882a593Smuzhiyun  */
SYSCALL_DEFINE3(msync,unsigned long,start,size_t,len,int,flags)32*4882a593Smuzhiyun SYSCALL_DEFINE3(msync, unsigned long, start, size_t, len, int, flags)
33*4882a593Smuzhiyun {
34*4882a593Smuzhiyun 	unsigned long end;
35*4882a593Smuzhiyun 	struct mm_struct *mm = current->mm;
36*4882a593Smuzhiyun 	struct vm_area_struct *vma;
37*4882a593Smuzhiyun 	int unmapped_error = 0;
38*4882a593Smuzhiyun 	int error = -EINVAL;
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun 	start = untagged_addr(start);
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 	if (flags & ~(MS_ASYNC | MS_INVALIDATE | MS_SYNC))
43*4882a593Smuzhiyun 		goto out;
44*4882a593Smuzhiyun 	if (offset_in_page(start))
45*4882a593Smuzhiyun 		goto out;
46*4882a593Smuzhiyun 	if ((flags & MS_ASYNC) && (flags & MS_SYNC))
47*4882a593Smuzhiyun 		goto out;
48*4882a593Smuzhiyun 	error = -ENOMEM;
49*4882a593Smuzhiyun 	len = (len + ~PAGE_MASK) & PAGE_MASK;
50*4882a593Smuzhiyun 	end = start + len;
51*4882a593Smuzhiyun 	if (end < start)
52*4882a593Smuzhiyun 		goto out;
53*4882a593Smuzhiyun 	error = 0;
54*4882a593Smuzhiyun 	if (end == start)
55*4882a593Smuzhiyun 		goto out;
56*4882a593Smuzhiyun 	/*
57*4882a593Smuzhiyun 	 * If the interval [start,end) covers some unmapped address ranges,
58*4882a593Smuzhiyun 	 * just ignore them, but return -ENOMEM at the end.
59*4882a593Smuzhiyun 	 */
60*4882a593Smuzhiyun 	mmap_read_lock(mm);
61*4882a593Smuzhiyun 	vma = find_vma(mm, start);
62*4882a593Smuzhiyun 	for (;;) {
63*4882a593Smuzhiyun 		struct file *file;
64*4882a593Smuzhiyun 		loff_t fstart, fend;
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 		/* Still start < end. */
67*4882a593Smuzhiyun 		error = -ENOMEM;
68*4882a593Smuzhiyun 		if (!vma)
69*4882a593Smuzhiyun 			goto out_unlock;
70*4882a593Smuzhiyun 		/* Here start < vma->vm_end. */
71*4882a593Smuzhiyun 		if (start < vma->vm_start) {
72*4882a593Smuzhiyun 			start = vma->vm_start;
73*4882a593Smuzhiyun 			if (start >= end)
74*4882a593Smuzhiyun 				goto out_unlock;
75*4882a593Smuzhiyun 			unmapped_error = -ENOMEM;
76*4882a593Smuzhiyun 		}
77*4882a593Smuzhiyun 		/* Here vma->vm_start <= start < vma->vm_end. */
78*4882a593Smuzhiyun 		if ((flags & MS_INVALIDATE) &&
79*4882a593Smuzhiyun 				(vma->vm_flags & VM_LOCKED)) {
80*4882a593Smuzhiyun 			error = -EBUSY;
81*4882a593Smuzhiyun 			goto out_unlock;
82*4882a593Smuzhiyun 		}
83*4882a593Smuzhiyun 		file = vma->vm_file;
84*4882a593Smuzhiyun 		fstart = (start - vma->vm_start) +
85*4882a593Smuzhiyun 			 ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
86*4882a593Smuzhiyun 		fend = fstart + (min(end, vma->vm_end) - start) - 1;
87*4882a593Smuzhiyun 		start = vma->vm_end;
88*4882a593Smuzhiyun 		if ((flags & MS_SYNC) && file &&
89*4882a593Smuzhiyun 				(vma->vm_flags & VM_SHARED)) {
90*4882a593Smuzhiyun 			get_file(file);
91*4882a593Smuzhiyun 			mmap_read_unlock(mm);
92*4882a593Smuzhiyun 			error = vfs_fsync_range(file, fstart, fend, 1);
93*4882a593Smuzhiyun 			fput(file);
94*4882a593Smuzhiyun 			if (error || start >= end)
95*4882a593Smuzhiyun 				goto out;
96*4882a593Smuzhiyun 			mmap_read_lock(mm);
97*4882a593Smuzhiyun 			vma = find_vma(mm, start);
98*4882a593Smuzhiyun 		} else {
99*4882a593Smuzhiyun 			if (start >= end) {
100*4882a593Smuzhiyun 				error = 0;
101*4882a593Smuzhiyun 				goto out_unlock;
102*4882a593Smuzhiyun 			}
103*4882a593Smuzhiyun 			vma = vma->vm_next;
104*4882a593Smuzhiyun 		}
105*4882a593Smuzhiyun 	}
106*4882a593Smuzhiyun out_unlock:
107*4882a593Smuzhiyun 	mmap_read_unlock(mm);
108*4882a593Smuzhiyun out:
109*4882a593Smuzhiyun 	return error ? : unmapped_error;
110*4882a593Smuzhiyun }
111