1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * linux/fs/attr.c
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 1991, 1992 Linus Torvalds
6*4882a593Smuzhiyun * changes by Thomas Schoebel-Theuer
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/export.h>
10*4882a593Smuzhiyun #include <linux/time.h>
11*4882a593Smuzhiyun #include <linux/mm.h>
12*4882a593Smuzhiyun #include <linux/string.h>
13*4882a593Smuzhiyun #include <linux/sched/signal.h>
14*4882a593Smuzhiyun #include <linux/capability.h>
15*4882a593Smuzhiyun #include <linux/fsnotify.h>
16*4882a593Smuzhiyun #include <linux/fcntl.h>
17*4882a593Smuzhiyun #include <linux/security.h>
18*4882a593Smuzhiyun #include <linux/evm.h>
19*4882a593Smuzhiyun #include <linux/ima.h>
20*4882a593Smuzhiyun
chown_ok(const struct inode * inode,kuid_t uid)21*4882a593Smuzhiyun static bool chown_ok(const struct inode *inode, kuid_t uid)
22*4882a593Smuzhiyun {
23*4882a593Smuzhiyun if (uid_eq(current_fsuid(), inode->i_uid) &&
24*4882a593Smuzhiyun uid_eq(uid, inode->i_uid))
25*4882a593Smuzhiyun return true;
26*4882a593Smuzhiyun if (capable_wrt_inode_uidgid(inode, CAP_CHOWN))
27*4882a593Smuzhiyun return true;
28*4882a593Smuzhiyun if (uid_eq(inode->i_uid, INVALID_UID) &&
29*4882a593Smuzhiyun ns_capable(inode->i_sb->s_user_ns, CAP_CHOWN))
30*4882a593Smuzhiyun return true;
31*4882a593Smuzhiyun return false;
32*4882a593Smuzhiyun }
33*4882a593Smuzhiyun
chgrp_ok(const struct inode * inode,kgid_t gid)34*4882a593Smuzhiyun static bool chgrp_ok(const struct inode *inode, kgid_t gid)
35*4882a593Smuzhiyun {
36*4882a593Smuzhiyun if (uid_eq(current_fsuid(), inode->i_uid) &&
37*4882a593Smuzhiyun (in_group_p(gid) || gid_eq(gid, inode->i_gid)))
38*4882a593Smuzhiyun return true;
39*4882a593Smuzhiyun if (capable_wrt_inode_uidgid(inode, CAP_CHOWN))
40*4882a593Smuzhiyun return true;
41*4882a593Smuzhiyun if (gid_eq(inode->i_gid, INVALID_GID) &&
42*4882a593Smuzhiyun ns_capable(inode->i_sb->s_user_ns, CAP_CHOWN))
43*4882a593Smuzhiyun return true;
44*4882a593Smuzhiyun return false;
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun /**
48*4882a593Smuzhiyun * setattr_prepare - check if attribute changes to a dentry are allowed
49*4882a593Smuzhiyun * @dentry: dentry to check
50*4882a593Smuzhiyun * @attr: attributes to change
51*4882a593Smuzhiyun *
52*4882a593Smuzhiyun * Check if we are allowed to change the attributes contained in @attr
53*4882a593Smuzhiyun * in the given dentry. This includes the normal unix access permission
54*4882a593Smuzhiyun * checks, as well as checks for rlimits and others. The function also clears
55*4882a593Smuzhiyun * SGID bit from mode if user is not allowed to set it. Also file capabilities
56*4882a593Smuzhiyun * and IMA extended attributes are cleared if ATTR_KILL_PRIV is set.
57*4882a593Smuzhiyun *
58*4882a593Smuzhiyun * Should be called as the first thing in ->setattr implementations,
59*4882a593Smuzhiyun * possibly after taking additional locks.
60*4882a593Smuzhiyun */
setattr_prepare(struct dentry * dentry,struct iattr * attr)61*4882a593Smuzhiyun int setattr_prepare(struct dentry *dentry, struct iattr *attr)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun struct inode *inode = d_inode(dentry);
64*4882a593Smuzhiyun unsigned int ia_valid = attr->ia_valid;
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun /*
67*4882a593Smuzhiyun * First check size constraints. These can't be overriden using
68*4882a593Smuzhiyun * ATTR_FORCE.
69*4882a593Smuzhiyun */
70*4882a593Smuzhiyun if (ia_valid & ATTR_SIZE) {
71*4882a593Smuzhiyun int error = inode_newsize_ok(inode, attr->ia_size);
72*4882a593Smuzhiyun if (error)
73*4882a593Smuzhiyun return error;
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun /* If force is set do it anyway. */
77*4882a593Smuzhiyun if (ia_valid & ATTR_FORCE)
78*4882a593Smuzhiyun goto kill_priv;
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun /* Make sure a caller can chown. */
81*4882a593Smuzhiyun if ((ia_valid & ATTR_UID) && !chown_ok(inode, attr->ia_uid))
82*4882a593Smuzhiyun return -EPERM;
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun /* Make sure caller can chgrp. */
85*4882a593Smuzhiyun if ((ia_valid & ATTR_GID) && !chgrp_ok(inode, attr->ia_gid))
86*4882a593Smuzhiyun return -EPERM;
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun /* Make sure a caller can chmod. */
89*4882a593Smuzhiyun if (ia_valid & ATTR_MODE) {
90*4882a593Smuzhiyun if (!inode_owner_or_capable(inode))
91*4882a593Smuzhiyun return -EPERM;
92*4882a593Smuzhiyun /* Also check the setgid bit! */
93*4882a593Smuzhiyun if (!in_group_p((ia_valid & ATTR_GID) ? attr->ia_gid :
94*4882a593Smuzhiyun inode->i_gid) &&
95*4882a593Smuzhiyun !capable_wrt_inode_uidgid(inode, CAP_FSETID))
96*4882a593Smuzhiyun attr->ia_mode &= ~S_ISGID;
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun /* Check for setting the inode time. */
100*4882a593Smuzhiyun if (ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)) {
101*4882a593Smuzhiyun if (!inode_owner_or_capable(inode))
102*4882a593Smuzhiyun return -EPERM;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun kill_priv:
106*4882a593Smuzhiyun /* User has permission for the change */
107*4882a593Smuzhiyun if (ia_valid & ATTR_KILL_PRIV) {
108*4882a593Smuzhiyun int error;
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun error = security_inode_killpriv(dentry);
111*4882a593Smuzhiyun if (error)
112*4882a593Smuzhiyun return error;
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun return 0;
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun EXPORT_SYMBOL_NS(setattr_prepare, ANDROID_GKI_VFS_EXPORT_ONLY);
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun /**
120*4882a593Smuzhiyun * inode_newsize_ok - may this inode be truncated to a given size
121*4882a593Smuzhiyun * @inode: the inode to be truncated
122*4882a593Smuzhiyun * @offset: the new size to assign to the inode
123*4882a593Smuzhiyun *
124*4882a593Smuzhiyun * inode_newsize_ok must be called with i_mutex held.
125*4882a593Smuzhiyun *
126*4882a593Smuzhiyun * inode_newsize_ok will check filesystem limits and ulimits to check that the
127*4882a593Smuzhiyun * new inode size is within limits. inode_newsize_ok will also send SIGXFSZ
128*4882a593Smuzhiyun * when necessary. Caller must not proceed with inode size change if failure is
129*4882a593Smuzhiyun * returned. @inode must be a file (not directory), with appropriate
130*4882a593Smuzhiyun * permissions to allow truncate (inode_newsize_ok does NOT check these
131*4882a593Smuzhiyun * conditions).
132*4882a593Smuzhiyun *
133*4882a593Smuzhiyun * Return: 0 on success, -ve errno on failure
134*4882a593Smuzhiyun */
inode_newsize_ok(const struct inode * inode,loff_t offset)135*4882a593Smuzhiyun int inode_newsize_ok(const struct inode *inode, loff_t offset)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun if (offset < 0)
138*4882a593Smuzhiyun return -EINVAL;
139*4882a593Smuzhiyun if (inode->i_size < offset) {
140*4882a593Smuzhiyun unsigned long limit;
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun limit = rlimit(RLIMIT_FSIZE);
143*4882a593Smuzhiyun if (limit != RLIM_INFINITY && offset > limit)
144*4882a593Smuzhiyun goto out_sig;
145*4882a593Smuzhiyun if (offset > inode->i_sb->s_maxbytes)
146*4882a593Smuzhiyun goto out_big;
147*4882a593Smuzhiyun } else {
148*4882a593Smuzhiyun /*
149*4882a593Smuzhiyun * truncation of in-use swapfiles is disallowed - it would
150*4882a593Smuzhiyun * cause subsequent swapout to scribble on the now-freed
151*4882a593Smuzhiyun * blocks.
152*4882a593Smuzhiyun */
153*4882a593Smuzhiyun if (IS_SWAPFILE(inode))
154*4882a593Smuzhiyun return -ETXTBSY;
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun return 0;
158*4882a593Smuzhiyun out_sig:
159*4882a593Smuzhiyun send_sig(SIGXFSZ, current, 0);
160*4882a593Smuzhiyun out_big:
161*4882a593Smuzhiyun return -EFBIG;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun EXPORT_SYMBOL_NS(inode_newsize_ok, ANDROID_GKI_VFS_EXPORT_ONLY);
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun /**
166*4882a593Smuzhiyun * setattr_copy - copy simple metadata updates into the generic inode
167*4882a593Smuzhiyun * @inode: the inode to be updated
168*4882a593Smuzhiyun * @attr: the new attributes
169*4882a593Smuzhiyun *
170*4882a593Smuzhiyun * setattr_copy must be called with i_mutex held.
171*4882a593Smuzhiyun *
172*4882a593Smuzhiyun * setattr_copy updates the inode's metadata with that specified
173*4882a593Smuzhiyun * in attr. Noticeably missing is inode size update, which is more complex
174*4882a593Smuzhiyun * as it requires pagecache updates.
175*4882a593Smuzhiyun *
176*4882a593Smuzhiyun * The inode is not marked as dirty after this operation. The rationale is
177*4882a593Smuzhiyun * that for "simple" filesystems, the struct inode is the inode storage.
178*4882a593Smuzhiyun * The caller is free to mark the inode dirty afterwards if needed.
179*4882a593Smuzhiyun */
setattr_copy(struct inode * inode,const struct iattr * attr)180*4882a593Smuzhiyun void setattr_copy(struct inode *inode, const struct iattr *attr)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun unsigned int ia_valid = attr->ia_valid;
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun if (ia_valid & ATTR_UID)
185*4882a593Smuzhiyun inode->i_uid = attr->ia_uid;
186*4882a593Smuzhiyun if (ia_valid & ATTR_GID)
187*4882a593Smuzhiyun inode->i_gid = attr->ia_gid;
188*4882a593Smuzhiyun if (ia_valid & ATTR_ATIME)
189*4882a593Smuzhiyun inode->i_atime = attr->ia_atime;
190*4882a593Smuzhiyun if (ia_valid & ATTR_MTIME)
191*4882a593Smuzhiyun inode->i_mtime = attr->ia_mtime;
192*4882a593Smuzhiyun if (ia_valid & ATTR_CTIME)
193*4882a593Smuzhiyun inode->i_ctime = attr->ia_ctime;
194*4882a593Smuzhiyun if (ia_valid & ATTR_MODE) {
195*4882a593Smuzhiyun umode_t mode = attr->ia_mode;
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun if (!in_group_p(inode->i_gid) &&
198*4882a593Smuzhiyun !capable_wrt_inode_uidgid(inode, CAP_FSETID))
199*4882a593Smuzhiyun mode &= ~S_ISGID;
200*4882a593Smuzhiyun inode->i_mode = mode;
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun EXPORT_SYMBOL(setattr_copy);
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun /**
206*4882a593Smuzhiyun * notify_change - modify attributes of a filesytem object
207*4882a593Smuzhiyun * @dentry: object affected
208*4882a593Smuzhiyun * @attr: new attributes
209*4882a593Smuzhiyun * @delegated_inode: returns inode, if the inode is delegated
210*4882a593Smuzhiyun *
211*4882a593Smuzhiyun * The caller must hold the i_mutex on the affected object.
212*4882a593Smuzhiyun *
213*4882a593Smuzhiyun * If notify_change discovers a delegation in need of breaking,
214*4882a593Smuzhiyun * it will return -EWOULDBLOCK and return a reference to the inode in
215*4882a593Smuzhiyun * delegated_inode. The caller should then break the delegation and
216*4882a593Smuzhiyun * retry. Because breaking a delegation may take a long time, the
217*4882a593Smuzhiyun * caller should drop the i_mutex before doing so.
218*4882a593Smuzhiyun *
219*4882a593Smuzhiyun * Alternatively, a caller may pass NULL for delegated_inode. This may
220*4882a593Smuzhiyun * be appropriate for callers that expect the underlying filesystem not
221*4882a593Smuzhiyun * to be NFS exported. Also, passing NULL is fine for callers holding
222*4882a593Smuzhiyun * the file open for write, as there can be no conflicting delegation in
223*4882a593Smuzhiyun * that case.
224*4882a593Smuzhiyun */
notify_change(struct dentry * dentry,struct iattr * attr,struct inode ** delegated_inode)225*4882a593Smuzhiyun int notify_change(struct dentry * dentry, struct iattr * attr, struct inode **delegated_inode)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun struct inode *inode = dentry->d_inode;
228*4882a593Smuzhiyun umode_t mode = inode->i_mode;
229*4882a593Smuzhiyun int error;
230*4882a593Smuzhiyun struct timespec64 now;
231*4882a593Smuzhiyun unsigned int ia_valid = attr->ia_valid;
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun WARN_ON_ONCE(!inode_is_locked(inode));
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun if (ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID | ATTR_TIMES_SET)) {
236*4882a593Smuzhiyun if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
237*4882a593Smuzhiyun return -EPERM;
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun /*
241*4882a593Smuzhiyun * If utimes(2) and friends are called with times == NULL (or both
242*4882a593Smuzhiyun * times are UTIME_NOW), then we need to check for write permission
243*4882a593Smuzhiyun */
244*4882a593Smuzhiyun if (ia_valid & ATTR_TOUCH) {
245*4882a593Smuzhiyun if (IS_IMMUTABLE(inode))
246*4882a593Smuzhiyun return -EPERM;
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun if (!inode_owner_or_capable(inode)) {
249*4882a593Smuzhiyun error = inode_permission(inode, MAY_WRITE);
250*4882a593Smuzhiyun if (error)
251*4882a593Smuzhiyun return error;
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun if ((ia_valid & ATTR_MODE)) {
256*4882a593Smuzhiyun umode_t amode = attr->ia_mode;
257*4882a593Smuzhiyun /* Flag setting protected by i_mutex */
258*4882a593Smuzhiyun if (is_sxid(amode))
259*4882a593Smuzhiyun inode->i_flags &= ~S_NOSEC;
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun now = current_time(inode);
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun attr->ia_ctime = now;
265*4882a593Smuzhiyun if (!(ia_valid & ATTR_ATIME_SET))
266*4882a593Smuzhiyun attr->ia_atime = now;
267*4882a593Smuzhiyun else
268*4882a593Smuzhiyun attr->ia_atime = timestamp_truncate(attr->ia_atime, inode);
269*4882a593Smuzhiyun if (!(ia_valid & ATTR_MTIME_SET))
270*4882a593Smuzhiyun attr->ia_mtime = now;
271*4882a593Smuzhiyun else
272*4882a593Smuzhiyun attr->ia_mtime = timestamp_truncate(attr->ia_mtime, inode);
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun if (ia_valid & ATTR_KILL_PRIV) {
275*4882a593Smuzhiyun error = security_inode_need_killpriv(dentry);
276*4882a593Smuzhiyun if (error < 0)
277*4882a593Smuzhiyun return error;
278*4882a593Smuzhiyun if (error == 0)
279*4882a593Smuzhiyun ia_valid = attr->ia_valid &= ~ATTR_KILL_PRIV;
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun /*
283*4882a593Smuzhiyun * We now pass ATTR_KILL_S*ID to the lower level setattr function so
284*4882a593Smuzhiyun * that the function has the ability to reinterpret a mode change
285*4882a593Smuzhiyun * that's due to these bits. This adds an implicit restriction that
286*4882a593Smuzhiyun * no function will ever call notify_change with both ATTR_MODE and
287*4882a593Smuzhiyun * ATTR_KILL_S*ID set.
288*4882a593Smuzhiyun */
289*4882a593Smuzhiyun if ((ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID)) &&
290*4882a593Smuzhiyun (ia_valid & ATTR_MODE))
291*4882a593Smuzhiyun BUG();
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun if (ia_valid & ATTR_KILL_SUID) {
294*4882a593Smuzhiyun if (mode & S_ISUID) {
295*4882a593Smuzhiyun ia_valid = attr->ia_valid |= ATTR_MODE;
296*4882a593Smuzhiyun attr->ia_mode = (inode->i_mode & ~S_ISUID);
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun if (ia_valid & ATTR_KILL_SGID) {
300*4882a593Smuzhiyun if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
301*4882a593Smuzhiyun if (!(ia_valid & ATTR_MODE)) {
302*4882a593Smuzhiyun ia_valid = attr->ia_valid |= ATTR_MODE;
303*4882a593Smuzhiyun attr->ia_mode = inode->i_mode;
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun attr->ia_mode &= ~S_ISGID;
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun if (!(attr->ia_valid & ~(ATTR_KILL_SUID | ATTR_KILL_SGID)))
309*4882a593Smuzhiyun return 0;
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun /*
312*4882a593Smuzhiyun * Verify that uid/gid changes are valid in the target
313*4882a593Smuzhiyun * namespace of the superblock.
314*4882a593Smuzhiyun */
315*4882a593Smuzhiyun if (ia_valid & ATTR_UID &&
316*4882a593Smuzhiyun !kuid_has_mapping(inode->i_sb->s_user_ns, attr->ia_uid))
317*4882a593Smuzhiyun return -EOVERFLOW;
318*4882a593Smuzhiyun if (ia_valid & ATTR_GID &&
319*4882a593Smuzhiyun !kgid_has_mapping(inode->i_sb->s_user_ns, attr->ia_gid))
320*4882a593Smuzhiyun return -EOVERFLOW;
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun /* Don't allow modifications of files with invalid uids or
323*4882a593Smuzhiyun * gids unless those uids & gids are being made valid.
324*4882a593Smuzhiyun */
325*4882a593Smuzhiyun if (!(ia_valid & ATTR_UID) && !uid_valid(inode->i_uid))
326*4882a593Smuzhiyun return -EOVERFLOW;
327*4882a593Smuzhiyun if (!(ia_valid & ATTR_GID) && !gid_valid(inode->i_gid))
328*4882a593Smuzhiyun return -EOVERFLOW;
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun error = security_inode_setattr(dentry, attr);
331*4882a593Smuzhiyun if (error)
332*4882a593Smuzhiyun return error;
333*4882a593Smuzhiyun error = try_break_deleg(inode, delegated_inode);
334*4882a593Smuzhiyun if (error)
335*4882a593Smuzhiyun return error;
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun if (inode->i_op->setattr)
338*4882a593Smuzhiyun error = inode->i_op->setattr(dentry, attr);
339*4882a593Smuzhiyun else
340*4882a593Smuzhiyun error = simple_setattr(dentry, attr);
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun if (!error) {
343*4882a593Smuzhiyun fsnotify_change(dentry, ia_valid);
344*4882a593Smuzhiyun ima_inode_post_setattr(dentry);
345*4882a593Smuzhiyun evm_inode_post_setattr(dentry, ia_valid);
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun return error;
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun EXPORT_SYMBOL_NS(notify_change, ANDROID_GKI_VFS_EXPORT_ONLY);
351