xref: /OK3568_Linux_fs/kernel/fs/qnx4/inode.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * QNX4 file system, Linux implementation.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Version : 0.2.1
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Using parts of the xiafs filesystem.
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * History :
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * 01-06-1998 by Richard Frowijn : first release.
12*4882a593Smuzhiyun  * 20-06-1998 by Frank Denis : Linux 2.1.99+ support, boot signature, misc.
13*4882a593Smuzhiyun  * 30-06-1998 by Frank Denis : first step to write inodes.
14*4882a593Smuzhiyun  */
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #include <linux/module.h>
17*4882a593Smuzhiyun #include <linux/init.h>
18*4882a593Smuzhiyun #include <linux/slab.h>
19*4882a593Smuzhiyun #include <linux/highuid.h>
20*4882a593Smuzhiyun #include <linux/pagemap.h>
21*4882a593Smuzhiyun #include <linux/buffer_head.h>
22*4882a593Smuzhiyun #include <linux/writeback.h>
23*4882a593Smuzhiyun #include <linux/statfs.h>
24*4882a593Smuzhiyun #include "qnx4.h"
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun #define QNX4_VERSION  4
27*4882a593Smuzhiyun #define QNX4_BMNAME   ".bitmap"
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun static const struct super_operations qnx4_sops;
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun static struct inode *qnx4_alloc_inode(struct super_block *sb);
32*4882a593Smuzhiyun static void qnx4_free_inode(struct inode *inode);
33*4882a593Smuzhiyun static int qnx4_remount(struct super_block *sb, int *flags, char *data);
34*4882a593Smuzhiyun static int qnx4_statfs(struct dentry *, struct kstatfs *);
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun static const struct super_operations qnx4_sops =
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun 	.alloc_inode	= qnx4_alloc_inode,
39*4882a593Smuzhiyun 	.free_inode	= qnx4_free_inode,
40*4882a593Smuzhiyun 	.statfs		= qnx4_statfs,
41*4882a593Smuzhiyun 	.remount_fs	= qnx4_remount,
42*4882a593Smuzhiyun };
43*4882a593Smuzhiyun 
qnx4_remount(struct super_block * sb,int * flags,char * data)44*4882a593Smuzhiyun static int qnx4_remount(struct super_block *sb, int *flags, char *data)
45*4882a593Smuzhiyun {
46*4882a593Smuzhiyun 	struct qnx4_sb_info *qs;
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun 	sync_filesystem(sb);
49*4882a593Smuzhiyun 	qs = qnx4_sb(sb);
50*4882a593Smuzhiyun 	qs->Version = QNX4_VERSION;
51*4882a593Smuzhiyun 	*flags |= SB_RDONLY;
52*4882a593Smuzhiyun 	return 0;
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun 
qnx4_get_block(struct inode * inode,sector_t iblock,struct buffer_head * bh,int create)55*4882a593Smuzhiyun static int qnx4_get_block( struct inode *inode, sector_t iblock, struct buffer_head *bh, int create )
56*4882a593Smuzhiyun {
57*4882a593Smuzhiyun 	unsigned long phys;
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	QNX4DEBUG((KERN_INFO "qnx4: qnx4_get_block inode=[%ld] iblock=[%ld]\n",inode->i_ino,iblock));
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 	phys = qnx4_block_map( inode, iblock );
62*4882a593Smuzhiyun 	if ( phys ) {
63*4882a593Smuzhiyun 		// logical block is before EOF
64*4882a593Smuzhiyun 		map_bh(bh, inode->i_sb, phys);
65*4882a593Smuzhiyun 	}
66*4882a593Smuzhiyun 	return 0;
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun 
try_extent(qnx4_xtnt_t * extent,u32 * offset)69*4882a593Smuzhiyun static inline u32 try_extent(qnx4_xtnt_t *extent, u32 *offset)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun 	u32 size = le32_to_cpu(extent->xtnt_size);
72*4882a593Smuzhiyun 	if (*offset < size)
73*4882a593Smuzhiyun 		return le32_to_cpu(extent->xtnt_blk) + *offset - 1;
74*4882a593Smuzhiyun 	*offset -= size;
75*4882a593Smuzhiyun 	return 0;
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun 
qnx4_block_map(struct inode * inode,long iblock)78*4882a593Smuzhiyun unsigned long qnx4_block_map( struct inode *inode, long iblock )
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun 	int ix;
81*4882a593Smuzhiyun 	long i_xblk;
82*4882a593Smuzhiyun 	struct buffer_head *bh = NULL;
83*4882a593Smuzhiyun 	struct qnx4_xblk *xblk = NULL;
84*4882a593Smuzhiyun 	struct qnx4_inode_entry *qnx4_inode = qnx4_raw_inode(inode);
85*4882a593Smuzhiyun 	u16 nxtnt = le16_to_cpu(qnx4_inode->di_num_xtnts);
86*4882a593Smuzhiyun 	u32 offset = iblock;
87*4882a593Smuzhiyun 	u32 block = try_extent(&qnx4_inode->di_first_xtnt, &offset);
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	if (block) {
90*4882a593Smuzhiyun 		// iblock is in the first extent. This is easy.
91*4882a593Smuzhiyun 	} else {
92*4882a593Smuzhiyun 		// iblock is beyond first extent. We have to follow the extent chain.
93*4882a593Smuzhiyun 		i_xblk = le32_to_cpu(qnx4_inode->di_xblk);
94*4882a593Smuzhiyun 		ix = 0;
95*4882a593Smuzhiyun 		while ( --nxtnt > 0 ) {
96*4882a593Smuzhiyun 			if ( ix == 0 ) {
97*4882a593Smuzhiyun 				// read next xtnt block.
98*4882a593Smuzhiyun 				bh = sb_bread(inode->i_sb, i_xblk - 1);
99*4882a593Smuzhiyun 				if ( !bh ) {
100*4882a593Smuzhiyun 					QNX4DEBUG((KERN_ERR "qnx4: I/O error reading xtnt block [%ld])\n", i_xblk - 1));
101*4882a593Smuzhiyun 					return -EIO;
102*4882a593Smuzhiyun 				}
103*4882a593Smuzhiyun 				xblk = (struct qnx4_xblk*)bh->b_data;
104*4882a593Smuzhiyun 				if ( memcmp( xblk->xblk_signature, "IamXblk", 7 ) ) {
105*4882a593Smuzhiyun 					QNX4DEBUG((KERN_ERR "qnx4: block at %ld is not a valid xtnt\n", qnx4_inode->i_xblk));
106*4882a593Smuzhiyun 					return -EIO;
107*4882a593Smuzhiyun 				}
108*4882a593Smuzhiyun 			}
109*4882a593Smuzhiyun 			block = try_extent(&xblk->xblk_xtnts[ix], &offset);
110*4882a593Smuzhiyun 			if (block) {
111*4882a593Smuzhiyun 				// got it!
112*4882a593Smuzhiyun 				break;
113*4882a593Smuzhiyun 			}
114*4882a593Smuzhiyun 			if ( ++ix >= xblk->xblk_num_xtnts ) {
115*4882a593Smuzhiyun 				i_xblk = le32_to_cpu(xblk->xblk_next_xblk);
116*4882a593Smuzhiyun 				ix = 0;
117*4882a593Smuzhiyun 				brelse( bh );
118*4882a593Smuzhiyun 				bh = NULL;
119*4882a593Smuzhiyun 			}
120*4882a593Smuzhiyun 		}
121*4882a593Smuzhiyun 		if ( bh )
122*4882a593Smuzhiyun 			brelse( bh );
123*4882a593Smuzhiyun 	}
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	QNX4DEBUG((KERN_INFO "qnx4: mapping block %ld of inode %ld = %ld\n",iblock,inode->i_ino,block));
126*4882a593Smuzhiyun 	return block;
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun 
qnx4_statfs(struct dentry * dentry,struct kstatfs * buf)129*4882a593Smuzhiyun static int qnx4_statfs(struct dentry *dentry, struct kstatfs *buf)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun 	struct super_block *sb = dentry->d_sb;
132*4882a593Smuzhiyun 	u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	buf->f_type    = sb->s_magic;
135*4882a593Smuzhiyun 	buf->f_bsize   = sb->s_blocksize;
136*4882a593Smuzhiyun 	buf->f_blocks  = le32_to_cpu(qnx4_sb(sb)->BitMap->di_size) * 8;
137*4882a593Smuzhiyun 	buf->f_bfree   = qnx4_count_free_blocks(sb);
138*4882a593Smuzhiyun 	buf->f_bavail  = buf->f_bfree;
139*4882a593Smuzhiyun 	buf->f_namelen = QNX4_NAME_MAX;
140*4882a593Smuzhiyun 	buf->f_fsid    = u64_to_fsid(id);
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	return 0;
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun /*
146*4882a593Smuzhiyun  * Check the root directory of the filesystem to make sure
147*4882a593Smuzhiyun  * it really _is_ a qnx4 filesystem, and to check the size
148*4882a593Smuzhiyun  * of the directory entry.
149*4882a593Smuzhiyun  */
qnx4_checkroot(struct super_block * sb,struct qnx4_super_block * s)150*4882a593Smuzhiyun static const char *qnx4_checkroot(struct super_block *sb,
151*4882a593Smuzhiyun 				  struct qnx4_super_block *s)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun 	struct buffer_head *bh;
154*4882a593Smuzhiyun 	struct qnx4_inode_entry *rootdir;
155*4882a593Smuzhiyun 	int rd, rl;
156*4882a593Smuzhiyun 	int i, j;
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	if (s->RootDir.di_fname[0] != '/' || s->RootDir.di_fname[1] != '\0')
159*4882a593Smuzhiyun 		return "no qnx4 filesystem (no root dir).";
160*4882a593Smuzhiyun 	QNX4DEBUG((KERN_NOTICE "QNX4 filesystem found on dev %s.\n", sb->s_id));
161*4882a593Smuzhiyun 	rd = le32_to_cpu(s->RootDir.di_first_xtnt.xtnt_blk) - 1;
162*4882a593Smuzhiyun 	rl = le32_to_cpu(s->RootDir.di_first_xtnt.xtnt_size);
163*4882a593Smuzhiyun 	for (j = 0; j < rl; j++) {
164*4882a593Smuzhiyun 		bh = sb_bread(sb, rd + j);	/* root dir, first block */
165*4882a593Smuzhiyun 		if (bh == NULL)
166*4882a593Smuzhiyun 			return "unable to read root entry.";
167*4882a593Smuzhiyun 		rootdir = (struct qnx4_inode_entry *) bh->b_data;
168*4882a593Smuzhiyun 		for (i = 0; i < QNX4_INODES_PER_BLOCK; i++, rootdir++) {
169*4882a593Smuzhiyun 			QNX4DEBUG((KERN_INFO "rootdir entry found : [%s]\n", rootdir->di_fname));
170*4882a593Smuzhiyun 			if (strcmp(rootdir->di_fname, QNX4_BMNAME) != 0)
171*4882a593Smuzhiyun 				continue;
172*4882a593Smuzhiyun 			qnx4_sb(sb)->BitMap = kmemdup(rootdir,
173*4882a593Smuzhiyun 						      sizeof(struct qnx4_inode_entry),
174*4882a593Smuzhiyun 						      GFP_KERNEL);
175*4882a593Smuzhiyun 			brelse(bh);
176*4882a593Smuzhiyun 			if (!qnx4_sb(sb)->BitMap)
177*4882a593Smuzhiyun 				return "not enough memory for bitmap inode";
178*4882a593Smuzhiyun 			/* keep bitmap inode known */
179*4882a593Smuzhiyun 			return NULL;
180*4882a593Smuzhiyun 		}
181*4882a593Smuzhiyun 		brelse(bh);
182*4882a593Smuzhiyun 	}
183*4882a593Smuzhiyun 	return "bitmap file not found.";
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun 
qnx4_fill_super(struct super_block * s,void * data,int silent)186*4882a593Smuzhiyun static int qnx4_fill_super(struct super_block *s, void *data, int silent)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun 	struct buffer_head *bh;
189*4882a593Smuzhiyun 	struct inode *root;
190*4882a593Smuzhiyun 	const char *errmsg;
191*4882a593Smuzhiyun 	struct qnx4_sb_info *qs;
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	qs = kzalloc(sizeof(struct qnx4_sb_info), GFP_KERNEL);
194*4882a593Smuzhiyun 	if (!qs)
195*4882a593Smuzhiyun 		return -ENOMEM;
196*4882a593Smuzhiyun 	s->s_fs_info = qs;
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	sb_set_blocksize(s, QNX4_BLOCK_SIZE);
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	s->s_op = &qnx4_sops;
201*4882a593Smuzhiyun 	s->s_magic = QNX4_SUPER_MAGIC;
202*4882a593Smuzhiyun 	s->s_flags |= SB_RDONLY;	/* Yup, read-only yet */
203*4882a593Smuzhiyun 	s->s_time_min = 0;
204*4882a593Smuzhiyun 	s->s_time_max = U32_MAX;
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	/* Check the superblock signature. Since the qnx4 code is
207*4882a593Smuzhiyun 	   dangerous, we should leave as quickly as possible
208*4882a593Smuzhiyun 	   if we don't belong here... */
209*4882a593Smuzhiyun 	bh = sb_bread(s, 1);
210*4882a593Smuzhiyun 	if (!bh) {
211*4882a593Smuzhiyun 		printk(KERN_ERR "qnx4: unable to read the superblock\n");
212*4882a593Smuzhiyun 		return -EINVAL;
213*4882a593Smuzhiyun 	}
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun  	/* check before allocating dentries, inodes, .. */
216*4882a593Smuzhiyun 	errmsg = qnx4_checkroot(s, (struct qnx4_super_block *) bh->b_data);
217*4882a593Smuzhiyun 	brelse(bh);
218*4882a593Smuzhiyun 	if (errmsg != NULL) {
219*4882a593Smuzhiyun  		if (!silent)
220*4882a593Smuzhiyun 			printk(KERN_ERR "qnx4: %s\n", errmsg);
221*4882a593Smuzhiyun 		return -EINVAL;
222*4882a593Smuzhiyun 	}
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun  	/* does root not have inode number QNX4_ROOT_INO ?? */
225*4882a593Smuzhiyun 	root = qnx4_iget(s, QNX4_ROOT_INO * QNX4_INODES_PER_BLOCK);
226*4882a593Smuzhiyun 	if (IS_ERR(root)) {
227*4882a593Smuzhiyun 		printk(KERN_ERR "qnx4: get inode failed\n");
228*4882a593Smuzhiyun 		return PTR_ERR(root);
229*4882a593Smuzhiyun  	}
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun  	s->s_root = d_make_root(root);
232*4882a593Smuzhiyun  	if (s->s_root == NULL)
233*4882a593Smuzhiyun  		return -ENOMEM;
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	return 0;
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun 
qnx4_kill_sb(struct super_block * sb)238*4882a593Smuzhiyun static void qnx4_kill_sb(struct super_block *sb)
239*4882a593Smuzhiyun {
240*4882a593Smuzhiyun 	struct qnx4_sb_info *qs = qnx4_sb(sb);
241*4882a593Smuzhiyun 	kill_block_super(sb);
242*4882a593Smuzhiyun 	if (qs) {
243*4882a593Smuzhiyun 		kfree(qs->BitMap);
244*4882a593Smuzhiyun 		kfree(qs);
245*4882a593Smuzhiyun 	}
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun 
qnx4_readpage(struct file * file,struct page * page)248*4882a593Smuzhiyun static int qnx4_readpage(struct file *file, struct page *page)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun 	return block_read_full_page(page,qnx4_get_block);
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun 
qnx4_bmap(struct address_space * mapping,sector_t block)253*4882a593Smuzhiyun static sector_t qnx4_bmap(struct address_space *mapping, sector_t block)
254*4882a593Smuzhiyun {
255*4882a593Smuzhiyun 	return generic_block_bmap(mapping,block,qnx4_get_block);
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun static const struct address_space_operations qnx4_aops = {
258*4882a593Smuzhiyun 	.readpage	= qnx4_readpage,
259*4882a593Smuzhiyun 	.bmap		= qnx4_bmap
260*4882a593Smuzhiyun };
261*4882a593Smuzhiyun 
qnx4_iget(struct super_block * sb,unsigned long ino)262*4882a593Smuzhiyun struct inode *qnx4_iget(struct super_block *sb, unsigned long ino)
263*4882a593Smuzhiyun {
264*4882a593Smuzhiyun 	struct buffer_head *bh;
265*4882a593Smuzhiyun 	struct qnx4_inode_entry *raw_inode;
266*4882a593Smuzhiyun 	int block;
267*4882a593Smuzhiyun 	struct qnx4_inode_entry *qnx4_inode;
268*4882a593Smuzhiyun 	struct inode *inode;
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 	inode = iget_locked(sb, ino);
271*4882a593Smuzhiyun 	if (!inode)
272*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
273*4882a593Smuzhiyun 	if (!(inode->i_state & I_NEW))
274*4882a593Smuzhiyun 		return inode;
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	qnx4_inode = qnx4_raw_inode(inode);
277*4882a593Smuzhiyun 	inode->i_mode = 0;
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	QNX4DEBUG((KERN_INFO "reading inode : [%d]\n", ino));
280*4882a593Smuzhiyun 	if (!ino) {
281*4882a593Smuzhiyun 		printk(KERN_ERR "qnx4: bad inode number on dev %s: %lu is "
282*4882a593Smuzhiyun 				"out of range\n",
283*4882a593Smuzhiyun 		       sb->s_id, ino);
284*4882a593Smuzhiyun 		iget_failed(inode);
285*4882a593Smuzhiyun 		return ERR_PTR(-EIO);
286*4882a593Smuzhiyun 	}
287*4882a593Smuzhiyun 	block = ino / QNX4_INODES_PER_BLOCK;
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	if (!(bh = sb_bread(sb, block))) {
290*4882a593Smuzhiyun 		printk(KERN_ERR "qnx4: major problem: unable to read inode from dev "
291*4882a593Smuzhiyun 		       "%s\n", sb->s_id);
292*4882a593Smuzhiyun 		iget_failed(inode);
293*4882a593Smuzhiyun 		return ERR_PTR(-EIO);
294*4882a593Smuzhiyun 	}
295*4882a593Smuzhiyun 	raw_inode = ((struct qnx4_inode_entry *) bh->b_data) +
296*4882a593Smuzhiyun 	    (ino % QNX4_INODES_PER_BLOCK);
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	inode->i_mode    = le16_to_cpu(raw_inode->di_mode);
299*4882a593Smuzhiyun 	i_uid_write(inode, (uid_t)le16_to_cpu(raw_inode->di_uid));
300*4882a593Smuzhiyun 	i_gid_write(inode, (gid_t)le16_to_cpu(raw_inode->di_gid));
301*4882a593Smuzhiyun 	set_nlink(inode, le16_to_cpu(raw_inode->di_nlink));
302*4882a593Smuzhiyun 	inode->i_size    = le32_to_cpu(raw_inode->di_size);
303*4882a593Smuzhiyun 	inode->i_mtime.tv_sec   = le32_to_cpu(raw_inode->di_mtime);
304*4882a593Smuzhiyun 	inode->i_mtime.tv_nsec = 0;
305*4882a593Smuzhiyun 	inode->i_atime.tv_sec   = le32_to_cpu(raw_inode->di_atime);
306*4882a593Smuzhiyun 	inode->i_atime.tv_nsec = 0;
307*4882a593Smuzhiyun 	inode->i_ctime.tv_sec   = le32_to_cpu(raw_inode->di_ctime);
308*4882a593Smuzhiyun 	inode->i_ctime.tv_nsec = 0;
309*4882a593Smuzhiyun 	inode->i_blocks  = le32_to_cpu(raw_inode->di_first_xtnt.xtnt_size);
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	memcpy(qnx4_inode, raw_inode, QNX4_DIR_ENTRY_SIZE);
312*4882a593Smuzhiyun 	if (S_ISREG(inode->i_mode)) {
313*4882a593Smuzhiyun 		inode->i_fop = &generic_ro_fops;
314*4882a593Smuzhiyun 		inode->i_mapping->a_ops = &qnx4_aops;
315*4882a593Smuzhiyun 		qnx4_i(inode)->mmu_private = inode->i_size;
316*4882a593Smuzhiyun 	} else if (S_ISDIR(inode->i_mode)) {
317*4882a593Smuzhiyun 		inode->i_op = &qnx4_dir_inode_operations;
318*4882a593Smuzhiyun 		inode->i_fop = &qnx4_dir_operations;
319*4882a593Smuzhiyun 	} else if (S_ISLNK(inode->i_mode)) {
320*4882a593Smuzhiyun 		inode->i_op = &page_symlink_inode_operations;
321*4882a593Smuzhiyun 		inode_nohighmem(inode);
322*4882a593Smuzhiyun 		inode->i_mapping->a_ops = &qnx4_aops;
323*4882a593Smuzhiyun 		qnx4_i(inode)->mmu_private = inode->i_size;
324*4882a593Smuzhiyun 	} else {
325*4882a593Smuzhiyun 		printk(KERN_ERR "qnx4: bad inode %lu on dev %s\n",
326*4882a593Smuzhiyun 			ino, sb->s_id);
327*4882a593Smuzhiyun 		iget_failed(inode);
328*4882a593Smuzhiyun 		brelse(bh);
329*4882a593Smuzhiyun 		return ERR_PTR(-EIO);
330*4882a593Smuzhiyun 	}
331*4882a593Smuzhiyun 	brelse(bh);
332*4882a593Smuzhiyun 	unlock_new_inode(inode);
333*4882a593Smuzhiyun 	return inode;
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun static struct kmem_cache *qnx4_inode_cachep;
337*4882a593Smuzhiyun 
qnx4_alloc_inode(struct super_block * sb)338*4882a593Smuzhiyun static struct inode *qnx4_alloc_inode(struct super_block *sb)
339*4882a593Smuzhiyun {
340*4882a593Smuzhiyun 	struct qnx4_inode_info *ei;
341*4882a593Smuzhiyun 	ei = kmem_cache_alloc(qnx4_inode_cachep, GFP_KERNEL);
342*4882a593Smuzhiyun 	if (!ei)
343*4882a593Smuzhiyun 		return NULL;
344*4882a593Smuzhiyun 	return &ei->vfs_inode;
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun 
qnx4_free_inode(struct inode * inode)347*4882a593Smuzhiyun static void qnx4_free_inode(struct inode *inode)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun 	kmem_cache_free(qnx4_inode_cachep, qnx4_i(inode));
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun 
init_once(void * foo)352*4882a593Smuzhiyun static void init_once(void *foo)
353*4882a593Smuzhiyun {
354*4882a593Smuzhiyun 	struct qnx4_inode_info *ei = (struct qnx4_inode_info *) foo;
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	inode_init_once(&ei->vfs_inode);
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun 
init_inodecache(void)359*4882a593Smuzhiyun static int init_inodecache(void)
360*4882a593Smuzhiyun {
361*4882a593Smuzhiyun 	qnx4_inode_cachep = kmem_cache_create("qnx4_inode_cache",
362*4882a593Smuzhiyun 					     sizeof(struct qnx4_inode_info),
363*4882a593Smuzhiyun 					     0, (SLAB_RECLAIM_ACCOUNT|
364*4882a593Smuzhiyun 						SLAB_MEM_SPREAD|SLAB_ACCOUNT),
365*4882a593Smuzhiyun 					     init_once);
366*4882a593Smuzhiyun 	if (qnx4_inode_cachep == NULL)
367*4882a593Smuzhiyun 		return -ENOMEM;
368*4882a593Smuzhiyun 	return 0;
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun 
destroy_inodecache(void)371*4882a593Smuzhiyun static void destroy_inodecache(void)
372*4882a593Smuzhiyun {
373*4882a593Smuzhiyun 	/*
374*4882a593Smuzhiyun 	 * Make sure all delayed rcu free inodes are flushed before we
375*4882a593Smuzhiyun 	 * destroy cache.
376*4882a593Smuzhiyun 	 */
377*4882a593Smuzhiyun 	rcu_barrier();
378*4882a593Smuzhiyun 	kmem_cache_destroy(qnx4_inode_cachep);
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun 
qnx4_mount(struct file_system_type * fs_type,int flags,const char * dev_name,void * data)381*4882a593Smuzhiyun static struct dentry *qnx4_mount(struct file_system_type *fs_type,
382*4882a593Smuzhiyun 	int flags, const char *dev_name, void *data)
383*4882a593Smuzhiyun {
384*4882a593Smuzhiyun 	return mount_bdev(fs_type, flags, dev_name, data, qnx4_fill_super);
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun static struct file_system_type qnx4_fs_type = {
388*4882a593Smuzhiyun 	.owner		= THIS_MODULE,
389*4882a593Smuzhiyun 	.name		= "qnx4",
390*4882a593Smuzhiyun 	.mount		= qnx4_mount,
391*4882a593Smuzhiyun 	.kill_sb	= qnx4_kill_sb,
392*4882a593Smuzhiyun 	.fs_flags	= FS_REQUIRES_DEV,
393*4882a593Smuzhiyun };
394*4882a593Smuzhiyun MODULE_ALIAS_FS("qnx4");
395*4882a593Smuzhiyun 
init_qnx4_fs(void)396*4882a593Smuzhiyun static int __init init_qnx4_fs(void)
397*4882a593Smuzhiyun {
398*4882a593Smuzhiyun 	int err;
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	err = init_inodecache();
401*4882a593Smuzhiyun 	if (err)
402*4882a593Smuzhiyun 		return err;
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	err = register_filesystem(&qnx4_fs_type);
405*4882a593Smuzhiyun 	if (err) {
406*4882a593Smuzhiyun 		destroy_inodecache();
407*4882a593Smuzhiyun 		return err;
408*4882a593Smuzhiyun 	}
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	printk(KERN_INFO "QNX4 filesystem 0.2.3 registered.\n");
411*4882a593Smuzhiyun 	return 0;
412*4882a593Smuzhiyun }
413*4882a593Smuzhiyun 
exit_qnx4_fs(void)414*4882a593Smuzhiyun static void __exit exit_qnx4_fs(void)
415*4882a593Smuzhiyun {
416*4882a593Smuzhiyun 	unregister_filesystem(&qnx4_fs_type);
417*4882a593Smuzhiyun 	destroy_inodecache();
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun module_init(init_qnx4_fs)
421*4882a593Smuzhiyun module_exit(exit_qnx4_fs)
422*4882a593Smuzhiyun MODULE_LICENSE("GPL");
423*4882a593Smuzhiyun MODULE_IMPORT_NS(ANDROID_GKI_VFS_EXPORT_ONLY);
424*4882a593Smuzhiyun 
425