1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * fs/kernfs/inode.c - kernfs inode implementation
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (c) 2001-3 Patrick Mochel
6*4882a593Smuzhiyun * Copyright (c) 2007 SUSE Linux Products GmbH
7*4882a593Smuzhiyun * Copyright (c) 2007, 2013 Tejun Heo <tj@kernel.org>
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include <linux/pagemap.h>
11*4882a593Smuzhiyun #include <linux/backing-dev.h>
12*4882a593Smuzhiyun #include <linux/capability.h>
13*4882a593Smuzhiyun #include <linux/errno.h>
14*4882a593Smuzhiyun #include <linux/slab.h>
15*4882a593Smuzhiyun #include <linux/xattr.h>
16*4882a593Smuzhiyun #include <linux/security.h>
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #include "kernfs-internal.h"
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun static const struct address_space_operations kernfs_aops = {
21*4882a593Smuzhiyun .readpage = simple_readpage,
22*4882a593Smuzhiyun .write_begin = simple_write_begin,
23*4882a593Smuzhiyun .write_end = simple_write_end,
24*4882a593Smuzhiyun };
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun static const struct inode_operations kernfs_iops = {
27*4882a593Smuzhiyun .permission = kernfs_iop_permission,
28*4882a593Smuzhiyun .setattr = kernfs_iop_setattr,
29*4882a593Smuzhiyun .getattr = kernfs_iop_getattr,
30*4882a593Smuzhiyun .listxattr = kernfs_iop_listxattr,
31*4882a593Smuzhiyun };
32*4882a593Smuzhiyun
__kernfs_iattrs(struct kernfs_node * kn,int alloc)33*4882a593Smuzhiyun static struct kernfs_iattrs *__kernfs_iattrs(struct kernfs_node *kn, int alloc)
34*4882a593Smuzhiyun {
35*4882a593Smuzhiyun static DEFINE_MUTEX(iattr_mutex);
36*4882a593Smuzhiyun struct kernfs_iattrs *ret;
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun mutex_lock(&iattr_mutex);
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun if (kn->iattr || !alloc)
41*4882a593Smuzhiyun goto out_unlock;
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun kn->iattr = kmem_cache_zalloc(kernfs_iattrs_cache, GFP_KERNEL);
44*4882a593Smuzhiyun if (!kn->iattr)
45*4882a593Smuzhiyun goto out_unlock;
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun /* assign default attributes */
48*4882a593Smuzhiyun kn->iattr->ia_uid = GLOBAL_ROOT_UID;
49*4882a593Smuzhiyun kn->iattr->ia_gid = GLOBAL_ROOT_GID;
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun ktime_get_real_ts64(&kn->iattr->ia_atime);
52*4882a593Smuzhiyun kn->iattr->ia_mtime = kn->iattr->ia_atime;
53*4882a593Smuzhiyun kn->iattr->ia_ctime = kn->iattr->ia_atime;
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun simple_xattrs_init(&kn->iattr->xattrs);
56*4882a593Smuzhiyun atomic_set(&kn->iattr->nr_user_xattrs, 0);
57*4882a593Smuzhiyun atomic_set(&kn->iattr->user_xattr_size, 0);
58*4882a593Smuzhiyun out_unlock:
59*4882a593Smuzhiyun ret = kn->iattr;
60*4882a593Smuzhiyun mutex_unlock(&iattr_mutex);
61*4882a593Smuzhiyun return ret;
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun
kernfs_iattrs(struct kernfs_node * kn)64*4882a593Smuzhiyun static struct kernfs_iattrs *kernfs_iattrs(struct kernfs_node *kn)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun return __kernfs_iattrs(kn, 1);
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun
kernfs_iattrs_noalloc(struct kernfs_node * kn)69*4882a593Smuzhiyun static struct kernfs_iattrs *kernfs_iattrs_noalloc(struct kernfs_node *kn)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun return __kernfs_iattrs(kn, 0);
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun
__kernfs_setattr(struct kernfs_node * kn,const struct iattr * iattr)74*4882a593Smuzhiyun int __kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun struct kernfs_iattrs *attrs;
77*4882a593Smuzhiyun unsigned int ia_valid = iattr->ia_valid;
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun attrs = kernfs_iattrs(kn);
80*4882a593Smuzhiyun if (!attrs)
81*4882a593Smuzhiyun return -ENOMEM;
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun if (ia_valid & ATTR_UID)
84*4882a593Smuzhiyun attrs->ia_uid = iattr->ia_uid;
85*4882a593Smuzhiyun if (ia_valid & ATTR_GID)
86*4882a593Smuzhiyun attrs->ia_gid = iattr->ia_gid;
87*4882a593Smuzhiyun if (ia_valid & ATTR_ATIME)
88*4882a593Smuzhiyun attrs->ia_atime = iattr->ia_atime;
89*4882a593Smuzhiyun if (ia_valid & ATTR_MTIME)
90*4882a593Smuzhiyun attrs->ia_mtime = iattr->ia_mtime;
91*4882a593Smuzhiyun if (ia_valid & ATTR_CTIME)
92*4882a593Smuzhiyun attrs->ia_ctime = iattr->ia_ctime;
93*4882a593Smuzhiyun if (ia_valid & ATTR_MODE)
94*4882a593Smuzhiyun kn->mode = iattr->ia_mode;
95*4882a593Smuzhiyun return 0;
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun /**
99*4882a593Smuzhiyun * kernfs_setattr - set iattr on a node
100*4882a593Smuzhiyun * @kn: target node
101*4882a593Smuzhiyun * @iattr: iattr to set
102*4882a593Smuzhiyun *
103*4882a593Smuzhiyun * Returns 0 on success, -errno on failure.
104*4882a593Smuzhiyun */
kernfs_setattr(struct kernfs_node * kn,const struct iattr * iattr)105*4882a593Smuzhiyun int kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun int ret;
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun mutex_lock(&kernfs_mutex);
110*4882a593Smuzhiyun ret = __kernfs_setattr(kn, iattr);
111*4882a593Smuzhiyun mutex_unlock(&kernfs_mutex);
112*4882a593Smuzhiyun return ret;
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun
kernfs_iop_setattr(struct dentry * dentry,struct iattr * iattr)115*4882a593Smuzhiyun int kernfs_iop_setattr(struct dentry *dentry, struct iattr *iattr)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun struct inode *inode = d_inode(dentry);
118*4882a593Smuzhiyun struct kernfs_node *kn = inode->i_private;
119*4882a593Smuzhiyun int error;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun if (!kn)
122*4882a593Smuzhiyun return -EINVAL;
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun mutex_lock(&kernfs_mutex);
125*4882a593Smuzhiyun error = setattr_prepare(dentry, iattr);
126*4882a593Smuzhiyun if (error)
127*4882a593Smuzhiyun goto out;
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun error = __kernfs_setattr(kn, iattr);
130*4882a593Smuzhiyun if (error)
131*4882a593Smuzhiyun goto out;
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun /* this ignores size changes */
134*4882a593Smuzhiyun setattr_copy(inode, iattr);
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun out:
137*4882a593Smuzhiyun mutex_unlock(&kernfs_mutex);
138*4882a593Smuzhiyun return error;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun
kernfs_iop_listxattr(struct dentry * dentry,char * buf,size_t size)141*4882a593Smuzhiyun ssize_t kernfs_iop_listxattr(struct dentry *dentry, char *buf, size_t size)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun struct kernfs_node *kn = kernfs_dentry_node(dentry);
144*4882a593Smuzhiyun struct kernfs_iattrs *attrs;
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun attrs = kernfs_iattrs(kn);
147*4882a593Smuzhiyun if (!attrs)
148*4882a593Smuzhiyun return -ENOMEM;
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun return simple_xattr_list(d_inode(dentry), &attrs->xattrs, buf, size);
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun
set_default_inode_attr(struct inode * inode,umode_t mode)153*4882a593Smuzhiyun static inline void set_default_inode_attr(struct inode *inode, umode_t mode)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun inode->i_mode = mode;
156*4882a593Smuzhiyun inode->i_atime = inode->i_mtime =
157*4882a593Smuzhiyun inode->i_ctime = current_time(inode);
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun
set_inode_attr(struct inode * inode,struct kernfs_iattrs * attrs)160*4882a593Smuzhiyun static inline void set_inode_attr(struct inode *inode,
161*4882a593Smuzhiyun struct kernfs_iattrs *attrs)
162*4882a593Smuzhiyun {
163*4882a593Smuzhiyun inode->i_uid = attrs->ia_uid;
164*4882a593Smuzhiyun inode->i_gid = attrs->ia_gid;
165*4882a593Smuzhiyun inode->i_atime = attrs->ia_atime;
166*4882a593Smuzhiyun inode->i_mtime = attrs->ia_mtime;
167*4882a593Smuzhiyun inode->i_ctime = attrs->ia_ctime;
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun
kernfs_refresh_inode(struct kernfs_node * kn,struct inode * inode)170*4882a593Smuzhiyun static void kernfs_refresh_inode(struct kernfs_node *kn, struct inode *inode)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun struct kernfs_iattrs *attrs = kn->iattr;
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun inode->i_mode = kn->mode;
175*4882a593Smuzhiyun if (attrs)
176*4882a593Smuzhiyun /*
177*4882a593Smuzhiyun * kernfs_node has non-default attributes get them from
178*4882a593Smuzhiyun * persistent copy in kernfs_node.
179*4882a593Smuzhiyun */
180*4882a593Smuzhiyun set_inode_attr(inode, attrs);
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun if (kernfs_type(kn) == KERNFS_DIR)
183*4882a593Smuzhiyun set_nlink(inode, kn->dir.subdirs + 2);
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun
kernfs_iop_getattr(const struct path * path,struct kstat * stat,u32 request_mask,unsigned int query_flags)186*4882a593Smuzhiyun int kernfs_iop_getattr(const struct path *path, struct kstat *stat,
187*4882a593Smuzhiyun u32 request_mask, unsigned int query_flags)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun struct inode *inode = d_inode(path->dentry);
190*4882a593Smuzhiyun struct kernfs_node *kn = inode->i_private;
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun mutex_lock(&kernfs_mutex);
193*4882a593Smuzhiyun kernfs_refresh_inode(kn, inode);
194*4882a593Smuzhiyun mutex_unlock(&kernfs_mutex);
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun generic_fillattr(inode, stat);
197*4882a593Smuzhiyun return 0;
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun
kernfs_init_inode(struct kernfs_node * kn,struct inode * inode)200*4882a593Smuzhiyun static void kernfs_init_inode(struct kernfs_node *kn, struct inode *inode)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun kernfs_get(kn);
203*4882a593Smuzhiyun inode->i_private = kn;
204*4882a593Smuzhiyun inode->i_mapping->a_ops = &kernfs_aops;
205*4882a593Smuzhiyun inode->i_op = &kernfs_iops;
206*4882a593Smuzhiyun inode->i_generation = kernfs_gen(kn);
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun set_default_inode_attr(inode, kn->mode);
209*4882a593Smuzhiyun kernfs_refresh_inode(kn, inode);
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun /* initialize inode according to type */
212*4882a593Smuzhiyun switch (kernfs_type(kn)) {
213*4882a593Smuzhiyun case KERNFS_DIR:
214*4882a593Smuzhiyun inode->i_op = &kernfs_dir_iops;
215*4882a593Smuzhiyun inode->i_fop = &kernfs_dir_fops;
216*4882a593Smuzhiyun if (kn->flags & KERNFS_EMPTY_DIR)
217*4882a593Smuzhiyun make_empty_dir_inode(inode);
218*4882a593Smuzhiyun break;
219*4882a593Smuzhiyun case KERNFS_FILE:
220*4882a593Smuzhiyun inode->i_size = kn->attr.size;
221*4882a593Smuzhiyun inode->i_fop = &kernfs_file_fops;
222*4882a593Smuzhiyun break;
223*4882a593Smuzhiyun case KERNFS_LINK:
224*4882a593Smuzhiyun inode->i_op = &kernfs_symlink_iops;
225*4882a593Smuzhiyun break;
226*4882a593Smuzhiyun default:
227*4882a593Smuzhiyun BUG();
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun unlock_new_inode(inode);
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun /**
234*4882a593Smuzhiyun * kernfs_get_inode - get inode for kernfs_node
235*4882a593Smuzhiyun * @sb: super block
236*4882a593Smuzhiyun * @kn: kernfs_node to allocate inode for
237*4882a593Smuzhiyun *
238*4882a593Smuzhiyun * Get inode for @kn. If such inode doesn't exist, a new inode is
239*4882a593Smuzhiyun * allocated and basics are initialized. New inode is returned
240*4882a593Smuzhiyun * locked.
241*4882a593Smuzhiyun *
242*4882a593Smuzhiyun * LOCKING:
243*4882a593Smuzhiyun * Kernel thread context (may sleep).
244*4882a593Smuzhiyun *
245*4882a593Smuzhiyun * RETURNS:
246*4882a593Smuzhiyun * Pointer to allocated inode on success, NULL on failure.
247*4882a593Smuzhiyun */
kernfs_get_inode(struct super_block * sb,struct kernfs_node * kn)248*4882a593Smuzhiyun struct inode *kernfs_get_inode(struct super_block *sb, struct kernfs_node *kn)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun struct inode *inode;
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun inode = iget_locked(sb, kernfs_ino(kn));
253*4882a593Smuzhiyun if (inode && (inode->i_state & I_NEW))
254*4882a593Smuzhiyun kernfs_init_inode(kn, inode);
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun return inode;
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun /*
260*4882a593Smuzhiyun * The kernfs_node serves as both an inode and a directory entry for
261*4882a593Smuzhiyun * kernfs. To prevent the kernfs inode numbers from being freed
262*4882a593Smuzhiyun * prematurely we take a reference to kernfs_node from the kernfs inode. A
263*4882a593Smuzhiyun * super_operations.evict_inode() implementation is needed to drop that
264*4882a593Smuzhiyun * reference upon inode destruction.
265*4882a593Smuzhiyun */
kernfs_evict_inode(struct inode * inode)266*4882a593Smuzhiyun void kernfs_evict_inode(struct inode *inode)
267*4882a593Smuzhiyun {
268*4882a593Smuzhiyun struct kernfs_node *kn = inode->i_private;
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun truncate_inode_pages_final(&inode->i_data);
271*4882a593Smuzhiyun clear_inode(inode);
272*4882a593Smuzhiyun kernfs_put(kn);
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun
kernfs_iop_permission(struct inode * inode,int mask)275*4882a593Smuzhiyun int kernfs_iop_permission(struct inode *inode, int mask)
276*4882a593Smuzhiyun {
277*4882a593Smuzhiyun struct kernfs_node *kn;
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun if (mask & MAY_NOT_BLOCK)
280*4882a593Smuzhiyun return -ECHILD;
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun kn = inode->i_private;
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun mutex_lock(&kernfs_mutex);
285*4882a593Smuzhiyun kernfs_refresh_inode(kn, inode);
286*4882a593Smuzhiyun mutex_unlock(&kernfs_mutex);
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun return generic_permission(inode, mask);
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun
kernfs_xattr_get(struct kernfs_node * kn,const char * name,void * value,size_t size)291*4882a593Smuzhiyun int kernfs_xattr_get(struct kernfs_node *kn, const char *name,
292*4882a593Smuzhiyun void *value, size_t size)
293*4882a593Smuzhiyun {
294*4882a593Smuzhiyun struct kernfs_iattrs *attrs = kernfs_iattrs_noalloc(kn);
295*4882a593Smuzhiyun if (!attrs)
296*4882a593Smuzhiyun return -ENODATA;
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun return simple_xattr_get(&attrs->xattrs, name, value, size);
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun
kernfs_xattr_set(struct kernfs_node * kn,const char * name,const void * value,size_t size,int flags)301*4882a593Smuzhiyun int kernfs_xattr_set(struct kernfs_node *kn, const char *name,
302*4882a593Smuzhiyun const void *value, size_t size, int flags)
303*4882a593Smuzhiyun {
304*4882a593Smuzhiyun struct kernfs_iattrs *attrs = kernfs_iattrs(kn);
305*4882a593Smuzhiyun if (!attrs)
306*4882a593Smuzhiyun return -ENOMEM;
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun return simple_xattr_set(&attrs->xattrs, name, value, size, flags, NULL);
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun
kernfs_vfs_xattr_get(const struct xattr_handler * handler,struct dentry * unused,struct inode * inode,const char * suffix,void * value,size_t size,int flags)311*4882a593Smuzhiyun static int kernfs_vfs_xattr_get(const struct xattr_handler *handler,
312*4882a593Smuzhiyun struct dentry *unused, struct inode *inode,
313*4882a593Smuzhiyun const char *suffix, void *value, size_t size,
314*4882a593Smuzhiyun int flags)
315*4882a593Smuzhiyun {
316*4882a593Smuzhiyun const char *name = xattr_full_name(handler, suffix);
317*4882a593Smuzhiyun struct kernfs_node *kn = inode->i_private;
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun return kernfs_xattr_get(kn, name, value, size);
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun
kernfs_vfs_xattr_set(const struct xattr_handler * handler,struct dentry * unused,struct inode * inode,const char * suffix,const void * value,size_t size,int flags)322*4882a593Smuzhiyun static int kernfs_vfs_xattr_set(const struct xattr_handler *handler,
323*4882a593Smuzhiyun struct dentry *unused, struct inode *inode,
324*4882a593Smuzhiyun const char *suffix, const void *value,
325*4882a593Smuzhiyun size_t size, int flags)
326*4882a593Smuzhiyun {
327*4882a593Smuzhiyun const char *name = xattr_full_name(handler, suffix);
328*4882a593Smuzhiyun struct kernfs_node *kn = inode->i_private;
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun return kernfs_xattr_set(kn, name, value, size, flags);
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun
kernfs_vfs_user_xattr_add(struct kernfs_node * kn,const char * full_name,struct simple_xattrs * xattrs,const void * value,size_t size,int flags)333*4882a593Smuzhiyun static int kernfs_vfs_user_xattr_add(struct kernfs_node *kn,
334*4882a593Smuzhiyun const char *full_name,
335*4882a593Smuzhiyun struct simple_xattrs *xattrs,
336*4882a593Smuzhiyun const void *value, size_t size, int flags)
337*4882a593Smuzhiyun {
338*4882a593Smuzhiyun atomic_t *sz = &kn->iattr->user_xattr_size;
339*4882a593Smuzhiyun atomic_t *nr = &kn->iattr->nr_user_xattrs;
340*4882a593Smuzhiyun ssize_t removed_size;
341*4882a593Smuzhiyun int ret;
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun if (atomic_inc_return(nr) > KERNFS_MAX_USER_XATTRS) {
344*4882a593Smuzhiyun ret = -ENOSPC;
345*4882a593Smuzhiyun goto dec_count_out;
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun if (atomic_add_return(size, sz) > KERNFS_USER_XATTR_SIZE_LIMIT) {
349*4882a593Smuzhiyun ret = -ENOSPC;
350*4882a593Smuzhiyun goto dec_size_out;
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun ret = simple_xattr_set(xattrs, full_name, value, size, flags,
354*4882a593Smuzhiyun &removed_size);
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun if (!ret && removed_size >= 0)
357*4882a593Smuzhiyun size = removed_size;
358*4882a593Smuzhiyun else if (!ret)
359*4882a593Smuzhiyun return 0;
360*4882a593Smuzhiyun dec_size_out:
361*4882a593Smuzhiyun atomic_sub(size, sz);
362*4882a593Smuzhiyun dec_count_out:
363*4882a593Smuzhiyun atomic_dec(nr);
364*4882a593Smuzhiyun return ret;
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun
kernfs_vfs_user_xattr_rm(struct kernfs_node * kn,const char * full_name,struct simple_xattrs * xattrs,const void * value,size_t size,int flags)367*4882a593Smuzhiyun static int kernfs_vfs_user_xattr_rm(struct kernfs_node *kn,
368*4882a593Smuzhiyun const char *full_name,
369*4882a593Smuzhiyun struct simple_xattrs *xattrs,
370*4882a593Smuzhiyun const void *value, size_t size, int flags)
371*4882a593Smuzhiyun {
372*4882a593Smuzhiyun atomic_t *sz = &kn->iattr->user_xattr_size;
373*4882a593Smuzhiyun atomic_t *nr = &kn->iattr->nr_user_xattrs;
374*4882a593Smuzhiyun ssize_t removed_size;
375*4882a593Smuzhiyun int ret;
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun ret = simple_xattr_set(xattrs, full_name, value, size, flags,
378*4882a593Smuzhiyun &removed_size);
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun if (removed_size >= 0) {
381*4882a593Smuzhiyun atomic_sub(removed_size, sz);
382*4882a593Smuzhiyun atomic_dec(nr);
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun return ret;
386*4882a593Smuzhiyun }
387*4882a593Smuzhiyun
kernfs_vfs_user_xattr_set(const struct xattr_handler * handler,struct dentry * unused,struct inode * inode,const char * suffix,const void * value,size_t size,int flags)388*4882a593Smuzhiyun static int kernfs_vfs_user_xattr_set(const struct xattr_handler *handler,
389*4882a593Smuzhiyun struct dentry *unused, struct inode *inode,
390*4882a593Smuzhiyun const char *suffix, const void *value,
391*4882a593Smuzhiyun size_t size, int flags)
392*4882a593Smuzhiyun {
393*4882a593Smuzhiyun const char *full_name = xattr_full_name(handler, suffix);
394*4882a593Smuzhiyun struct kernfs_node *kn = inode->i_private;
395*4882a593Smuzhiyun struct kernfs_iattrs *attrs;
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun if (!(kernfs_root(kn)->flags & KERNFS_ROOT_SUPPORT_USER_XATTR))
398*4882a593Smuzhiyun return -EOPNOTSUPP;
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun attrs = kernfs_iattrs(kn);
401*4882a593Smuzhiyun if (!attrs)
402*4882a593Smuzhiyun return -ENOMEM;
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun if (value)
405*4882a593Smuzhiyun return kernfs_vfs_user_xattr_add(kn, full_name, &attrs->xattrs,
406*4882a593Smuzhiyun value, size, flags);
407*4882a593Smuzhiyun else
408*4882a593Smuzhiyun return kernfs_vfs_user_xattr_rm(kn, full_name, &attrs->xattrs,
409*4882a593Smuzhiyun value, size, flags);
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun static const struct xattr_handler kernfs_trusted_xattr_handler = {
414*4882a593Smuzhiyun .prefix = XATTR_TRUSTED_PREFIX,
415*4882a593Smuzhiyun .get = kernfs_vfs_xattr_get,
416*4882a593Smuzhiyun .set = kernfs_vfs_xattr_set,
417*4882a593Smuzhiyun };
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun static const struct xattr_handler kernfs_security_xattr_handler = {
420*4882a593Smuzhiyun .prefix = XATTR_SECURITY_PREFIX,
421*4882a593Smuzhiyun .get = kernfs_vfs_xattr_get,
422*4882a593Smuzhiyun .set = kernfs_vfs_xattr_set,
423*4882a593Smuzhiyun };
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun static const struct xattr_handler kernfs_user_xattr_handler = {
426*4882a593Smuzhiyun .prefix = XATTR_USER_PREFIX,
427*4882a593Smuzhiyun .get = kernfs_vfs_xattr_get,
428*4882a593Smuzhiyun .set = kernfs_vfs_user_xattr_set,
429*4882a593Smuzhiyun };
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun const struct xattr_handler *kernfs_xattr_handlers[] = {
432*4882a593Smuzhiyun &kernfs_trusted_xattr_handler,
433*4882a593Smuzhiyun &kernfs_security_xattr_handler,
434*4882a593Smuzhiyun &kernfs_user_xattr_handler,
435*4882a593Smuzhiyun NULL
436*4882a593Smuzhiyun };
437