xref: /OK3568_Linux_fs/kernel/fs/hpfs/map.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *  linux/fs/hpfs/map.c
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  *  Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  *  mapping structures to memory with some minimal checks
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include "hpfs_fn.h"
11*4882a593Smuzhiyun 
hpfs_map_dnode_bitmap(struct super_block * s,struct quad_buffer_head * qbh)12*4882a593Smuzhiyun __le32 *hpfs_map_dnode_bitmap(struct super_block *s, struct quad_buffer_head *qbh)
13*4882a593Smuzhiyun {
14*4882a593Smuzhiyun 	return hpfs_map_4sectors(s, hpfs_sb(s)->sb_dmap, qbh, 0);
15*4882a593Smuzhiyun }
16*4882a593Smuzhiyun 
hpfs_map_bitmap(struct super_block * s,unsigned bmp_block,struct quad_buffer_head * qbh,char * id)17*4882a593Smuzhiyun __le32 *hpfs_map_bitmap(struct super_block *s, unsigned bmp_block,
18*4882a593Smuzhiyun 			 struct quad_buffer_head *qbh, char *id)
19*4882a593Smuzhiyun {
20*4882a593Smuzhiyun 	secno sec;
21*4882a593Smuzhiyun 	__le32 *ret;
22*4882a593Smuzhiyun 	unsigned n_bands = (hpfs_sb(s)->sb_fs_size + 0x3fff) >> 14;
23*4882a593Smuzhiyun 	if (hpfs_sb(s)->sb_chk) if (bmp_block >= n_bands) {
24*4882a593Smuzhiyun 		hpfs_error(s, "hpfs_map_bitmap called with bad parameter: %08x at %s", bmp_block, id);
25*4882a593Smuzhiyun 		return NULL;
26*4882a593Smuzhiyun 	}
27*4882a593Smuzhiyun 	sec = le32_to_cpu(hpfs_sb(s)->sb_bmp_dir[bmp_block]);
28*4882a593Smuzhiyun 	if (!sec || sec > hpfs_sb(s)->sb_fs_size-4) {
29*4882a593Smuzhiyun 		hpfs_error(s, "invalid bitmap block pointer %08x -> %08x at %s", bmp_block, sec, id);
30*4882a593Smuzhiyun 		return NULL;
31*4882a593Smuzhiyun 	}
32*4882a593Smuzhiyun 	ret = hpfs_map_4sectors(s, sec, qbh, 4);
33*4882a593Smuzhiyun 	if (ret) hpfs_prefetch_bitmap(s, bmp_block + 1);
34*4882a593Smuzhiyun 	return ret;
35*4882a593Smuzhiyun }
36*4882a593Smuzhiyun 
hpfs_prefetch_bitmap(struct super_block * s,unsigned bmp_block)37*4882a593Smuzhiyun void hpfs_prefetch_bitmap(struct super_block *s, unsigned bmp_block)
38*4882a593Smuzhiyun {
39*4882a593Smuzhiyun 	unsigned to_prefetch, next_prefetch;
40*4882a593Smuzhiyun 	unsigned n_bands = (hpfs_sb(s)->sb_fs_size + 0x3fff) >> 14;
41*4882a593Smuzhiyun 	if (unlikely(bmp_block >= n_bands))
42*4882a593Smuzhiyun 		return;
43*4882a593Smuzhiyun 	to_prefetch = le32_to_cpu(hpfs_sb(s)->sb_bmp_dir[bmp_block]);
44*4882a593Smuzhiyun 	if (unlikely(bmp_block + 1 >= n_bands))
45*4882a593Smuzhiyun 		next_prefetch = 0;
46*4882a593Smuzhiyun 	else
47*4882a593Smuzhiyun 		next_prefetch = le32_to_cpu(hpfs_sb(s)->sb_bmp_dir[bmp_block + 1]);
48*4882a593Smuzhiyun 	hpfs_prefetch_sectors(s, to_prefetch, 4 + 4 * (to_prefetch + 4 == next_prefetch));
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun /*
52*4882a593Smuzhiyun  * Load first code page into kernel memory, return pointer to 256-byte array,
53*4882a593Smuzhiyun  * first 128 bytes are uppercasing table for chars 128-255, next 128 bytes are
54*4882a593Smuzhiyun  * lowercasing table
55*4882a593Smuzhiyun  */
56*4882a593Smuzhiyun 
hpfs_load_code_page(struct super_block * s,secno cps)57*4882a593Smuzhiyun unsigned char *hpfs_load_code_page(struct super_block *s, secno cps)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun 	struct buffer_head *bh;
60*4882a593Smuzhiyun 	secno cpds;
61*4882a593Smuzhiyun 	unsigned cpi;
62*4882a593Smuzhiyun 	unsigned char *ptr;
63*4882a593Smuzhiyun 	unsigned char *cp_table;
64*4882a593Smuzhiyun 	int i;
65*4882a593Smuzhiyun 	struct code_page_data *cpd;
66*4882a593Smuzhiyun 	struct code_page_directory *cp = hpfs_map_sector(s, cps, &bh, 0);
67*4882a593Smuzhiyun 	if (!cp) return NULL;
68*4882a593Smuzhiyun 	if (le32_to_cpu(cp->magic) != CP_DIR_MAGIC) {
69*4882a593Smuzhiyun 		pr_err("Code page directory magic doesn't match (magic = %08x)\n",
70*4882a593Smuzhiyun 			le32_to_cpu(cp->magic));
71*4882a593Smuzhiyun 		brelse(bh);
72*4882a593Smuzhiyun 		return NULL;
73*4882a593Smuzhiyun 	}
74*4882a593Smuzhiyun 	if (!le32_to_cpu(cp->n_code_pages)) {
75*4882a593Smuzhiyun 		pr_err("n_code_pages == 0\n");
76*4882a593Smuzhiyun 		brelse(bh);
77*4882a593Smuzhiyun 		return NULL;
78*4882a593Smuzhiyun 	}
79*4882a593Smuzhiyun 	cpds = le32_to_cpu(cp->array[0].code_page_data);
80*4882a593Smuzhiyun 	cpi = le16_to_cpu(cp->array[0].index);
81*4882a593Smuzhiyun 	brelse(bh);
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	if (cpi >= 3) {
84*4882a593Smuzhiyun 		pr_err("Code page index out of array\n");
85*4882a593Smuzhiyun 		return NULL;
86*4882a593Smuzhiyun 	}
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	if (!(cpd = hpfs_map_sector(s, cpds, &bh, 0))) return NULL;
89*4882a593Smuzhiyun 	if (le16_to_cpu(cpd->offs[cpi]) > 0x178) {
90*4882a593Smuzhiyun 		pr_err("Code page index out of sector\n");
91*4882a593Smuzhiyun 		brelse(bh);
92*4882a593Smuzhiyun 		return NULL;
93*4882a593Smuzhiyun 	}
94*4882a593Smuzhiyun 	ptr = (unsigned char *)cpd + le16_to_cpu(cpd->offs[cpi]) + 6;
95*4882a593Smuzhiyun 	if (!(cp_table = kmalloc(256, GFP_KERNEL))) {
96*4882a593Smuzhiyun 		pr_err("out of memory for code page table\n");
97*4882a593Smuzhiyun 		brelse(bh);
98*4882a593Smuzhiyun 		return NULL;
99*4882a593Smuzhiyun 	}
100*4882a593Smuzhiyun 	memcpy(cp_table, ptr, 128);
101*4882a593Smuzhiyun 	brelse(bh);
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	/* Try to build lowercasing table from uppercasing one */
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	for (i=128; i<256; i++) cp_table[i]=i;
106*4882a593Smuzhiyun 	for (i=128; i<256; i++) if (cp_table[i-128]!=i && cp_table[i-128]>=128)
107*4882a593Smuzhiyun 		cp_table[cp_table[i-128]] = i;
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	return cp_table;
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun 
hpfs_load_bitmap_directory(struct super_block * s,secno bmp)112*4882a593Smuzhiyun __le32 *hpfs_load_bitmap_directory(struct super_block *s, secno bmp)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun 	struct buffer_head *bh;
115*4882a593Smuzhiyun 	int n = (hpfs_sb(s)->sb_fs_size + 0x200000 - 1) >> 21;
116*4882a593Smuzhiyun 	int i;
117*4882a593Smuzhiyun 	__le32 *b;
118*4882a593Smuzhiyun 	if (!(b = kmalloc_array(n, 512, GFP_KERNEL))) {
119*4882a593Smuzhiyun 		pr_err("can't allocate memory for bitmap directory\n");
120*4882a593Smuzhiyun 		return NULL;
121*4882a593Smuzhiyun 	}
122*4882a593Smuzhiyun 	for (i=0;i<n;i++) {
123*4882a593Smuzhiyun 		__le32 *d = hpfs_map_sector(s, bmp+i, &bh, n - i - 1);
124*4882a593Smuzhiyun 		if (!d) {
125*4882a593Smuzhiyun 			kfree(b);
126*4882a593Smuzhiyun 			return NULL;
127*4882a593Smuzhiyun 		}
128*4882a593Smuzhiyun 		memcpy((char *)b + 512 * i, d, 512);
129*4882a593Smuzhiyun 		brelse(bh);
130*4882a593Smuzhiyun 	}
131*4882a593Smuzhiyun 	return b;
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun 
hpfs_load_hotfix_map(struct super_block * s,struct hpfs_spare_block * spareblock)134*4882a593Smuzhiyun void hpfs_load_hotfix_map(struct super_block *s, struct hpfs_spare_block *spareblock)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun 	struct quad_buffer_head qbh;
137*4882a593Smuzhiyun 	__le32 *directory;
138*4882a593Smuzhiyun 	u32 n_hotfixes, n_used_hotfixes;
139*4882a593Smuzhiyun 	unsigned i;
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	n_hotfixes = le32_to_cpu(spareblock->n_spares);
142*4882a593Smuzhiyun 	n_used_hotfixes = le32_to_cpu(spareblock->n_spares_used);
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	if (n_hotfixes > 256 || n_used_hotfixes > n_hotfixes) {
145*4882a593Smuzhiyun 		hpfs_error(s, "invalid number of hotfixes: %u, used: %u", n_hotfixes, n_used_hotfixes);
146*4882a593Smuzhiyun 		return;
147*4882a593Smuzhiyun 	}
148*4882a593Smuzhiyun 	if (!(directory = hpfs_map_4sectors(s, le32_to_cpu(spareblock->hotfix_map), &qbh, 0))) {
149*4882a593Smuzhiyun 		hpfs_error(s, "can't load hotfix map");
150*4882a593Smuzhiyun 		return;
151*4882a593Smuzhiyun 	}
152*4882a593Smuzhiyun 	for (i = 0; i < n_used_hotfixes; i++) {
153*4882a593Smuzhiyun 		hpfs_sb(s)->hotfix_from[i] = le32_to_cpu(directory[i]);
154*4882a593Smuzhiyun 		hpfs_sb(s)->hotfix_to[i] = le32_to_cpu(directory[n_hotfixes + i]);
155*4882a593Smuzhiyun 	}
156*4882a593Smuzhiyun 	hpfs_sb(s)->n_hotfixes = n_used_hotfixes;
157*4882a593Smuzhiyun 	hpfs_brelse4(&qbh);
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun /*
161*4882a593Smuzhiyun  * Load fnode to memory
162*4882a593Smuzhiyun  */
163*4882a593Smuzhiyun 
hpfs_map_fnode(struct super_block * s,ino_t ino,struct buffer_head ** bhp)164*4882a593Smuzhiyun struct fnode *hpfs_map_fnode(struct super_block *s, ino_t ino, struct buffer_head **bhp)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun 	struct fnode *fnode;
167*4882a593Smuzhiyun 	if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, ino, 1, "fnode")) {
168*4882a593Smuzhiyun 		return NULL;
169*4882a593Smuzhiyun 	}
170*4882a593Smuzhiyun 	if ((fnode = hpfs_map_sector(s, ino, bhp, FNODE_RD_AHEAD))) {
171*4882a593Smuzhiyun 		if (hpfs_sb(s)->sb_chk) {
172*4882a593Smuzhiyun 			struct extended_attribute *ea;
173*4882a593Smuzhiyun 			struct extended_attribute *ea_end;
174*4882a593Smuzhiyun 			if (le32_to_cpu(fnode->magic) != FNODE_MAGIC) {
175*4882a593Smuzhiyun 				hpfs_error(s, "bad magic on fnode %08lx",
176*4882a593Smuzhiyun 					(unsigned long)ino);
177*4882a593Smuzhiyun 				goto bail;
178*4882a593Smuzhiyun 			}
179*4882a593Smuzhiyun 			if (!fnode_is_dir(fnode)) {
180*4882a593Smuzhiyun 				if ((unsigned)fnode->btree.n_used_nodes + (unsigned)fnode->btree.n_free_nodes !=
181*4882a593Smuzhiyun 				    (bp_internal(&fnode->btree) ? 12 : 8)) {
182*4882a593Smuzhiyun 					hpfs_error(s,
183*4882a593Smuzhiyun 					   "bad number of nodes in fnode %08lx",
184*4882a593Smuzhiyun 					    (unsigned long)ino);
185*4882a593Smuzhiyun 					goto bail;
186*4882a593Smuzhiyun 				}
187*4882a593Smuzhiyun 				if (le16_to_cpu(fnode->btree.first_free) !=
188*4882a593Smuzhiyun 				    8 + fnode->btree.n_used_nodes * (bp_internal(&fnode->btree) ? 8 : 12)) {
189*4882a593Smuzhiyun 					hpfs_error(s,
190*4882a593Smuzhiyun 					    "bad first_free pointer in fnode %08lx",
191*4882a593Smuzhiyun 					    (unsigned long)ino);
192*4882a593Smuzhiyun 					goto bail;
193*4882a593Smuzhiyun 				}
194*4882a593Smuzhiyun 			}
195*4882a593Smuzhiyun 			if (le16_to_cpu(fnode->ea_size_s) && (le16_to_cpu(fnode->ea_offs) < 0xc4 ||
196*4882a593Smuzhiyun 			   le16_to_cpu(fnode->ea_offs) + le16_to_cpu(fnode->acl_size_s) + le16_to_cpu(fnode->ea_size_s) > 0x200)) {
197*4882a593Smuzhiyun 				hpfs_error(s,
198*4882a593Smuzhiyun 					"bad EA info in fnode %08lx: ea_offs == %04x ea_size_s == %04x",
199*4882a593Smuzhiyun 					(unsigned long)ino,
200*4882a593Smuzhiyun 					le16_to_cpu(fnode->ea_offs), le16_to_cpu(fnode->ea_size_s));
201*4882a593Smuzhiyun 				goto bail;
202*4882a593Smuzhiyun 			}
203*4882a593Smuzhiyun 			ea = fnode_ea(fnode);
204*4882a593Smuzhiyun 			ea_end = fnode_end_ea(fnode);
205*4882a593Smuzhiyun 			while (ea != ea_end) {
206*4882a593Smuzhiyun 				if (ea > ea_end) {
207*4882a593Smuzhiyun 					hpfs_error(s, "bad EA in fnode %08lx",
208*4882a593Smuzhiyun 						(unsigned long)ino);
209*4882a593Smuzhiyun 					goto bail;
210*4882a593Smuzhiyun 				}
211*4882a593Smuzhiyun 				ea = next_ea(ea);
212*4882a593Smuzhiyun 			}
213*4882a593Smuzhiyun 		}
214*4882a593Smuzhiyun 	}
215*4882a593Smuzhiyun 	return fnode;
216*4882a593Smuzhiyun 	bail:
217*4882a593Smuzhiyun 	brelse(*bhp);
218*4882a593Smuzhiyun 	return NULL;
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun 
hpfs_map_anode(struct super_block * s,anode_secno ano,struct buffer_head ** bhp)221*4882a593Smuzhiyun struct anode *hpfs_map_anode(struct super_block *s, anode_secno ano, struct buffer_head **bhp)
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun 	struct anode *anode;
224*4882a593Smuzhiyun 	if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, ano, 1, "anode")) return NULL;
225*4882a593Smuzhiyun 	if ((anode = hpfs_map_sector(s, ano, bhp, ANODE_RD_AHEAD)))
226*4882a593Smuzhiyun 		if (hpfs_sb(s)->sb_chk) {
227*4882a593Smuzhiyun 			if (le32_to_cpu(anode->magic) != ANODE_MAGIC) {
228*4882a593Smuzhiyun 				hpfs_error(s, "bad magic on anode %08x", ano);
229*4882a593Smuzhiyun 				goto bail;
230*4882a593Smuzhiyun 			}
231*4882a593Smuzhiyun 			if (le32_to_cpu(anode->self) != ano) {
232*4882a593Smuzhiyun 				hpfs_error(s, "self pointer invalid on anode %08x", ano);
233*4882a593Smuzhiyun 				goto bail;
234*4882a593Smuzhiyun 			}
235*4882a593Smuzhiyun 			if ((unsigned)anode->btree.n_used_nodes + (unsigned)anode->btree.n_free_nodes !=
236*4882a593Smuzhiyun 			    (bp_internal(&anode->btree) ? 60 : 40)) {
237*4882a593Smuzhiyun 				hpfs_error(s, "bad number of nodes in anode %08x", ano);
238*4882a593Smuzhiyun 				goto bail;
239*4882a593Smuzhiyun 			}
240*4882a593Smuzhiyun 			if (le16_to_cpu(anode->btree.first_free) !=
241*4882a593Smuzhiyun 			    8 + anode->btree.n_used_nodes * (bp_internal(&anode->btree) ? 8 : 12)) {
242*4882a593Smuzhiyun 				hpfs_error(s, "bad first_free pointer in anode %08x", ano);
243*4882a593Smuzhiyun 				goto bail;
244*4882a593Smuzhiyun 			}
245*4882a593Smuzhiyun 		}
246*4882a593Smuzhiyun 	return anode;
247*4882a593Smuzhiyun 	bail:
248*4882a593Smuzhiyun 	brelse(*bhp);
249*4882a593Smuzhiyun 	return NULL;
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun /*
253*4882a593Smuzhiyun  * Load dnode to memory and do some checks
254*4882a593Smuzhiyun  */
255*4882a593Smuzhiyun 
hpfs_map_dnode(struct super_block * s,unsigned secno,struct quad_buffer_head * qbh)256*4882a593Smuzhiyun struct dnode *hpfs_map_dnode(struct super_block *s, unsigned secno,
257*4882a593Smuzhiyun 			     struct quad_buffer_head *qbh)
258*4882a593Smuzhiyun {
259*4882a593Smuzhiyun 	struct dnode *dnode;
260*4882a593Smuzhiyun 	if (hpfs_sb(s)->sb_chk) {
261*4882a593Smuzhiyun 		if (hpfs_chk_sectors(s, secno, 4, "dnode")) return NULL;
262*4882a593Smuzhiyun 		if (secno & 3) {
263*4882a593Smuzhiyun 			hpfs_error(s, "dnode %08x not byte-aligned", secno);
264*4882a593Smuzhiyun 			return NULL;
265*4882a593Smuzhiyun 		}
266*4882a593Smuzhiyun 	}
267*4882a593Smuzhiyun 	if ((dnode = hpfs_map_4sectors(s, secno, qbh, DNODE_RD_AHEAD)))
268*4882a593Smuzhiyun 		if (hpfs_sb(s)->sb_chk) {
269*4882a593Smuzhiyun 			unsigned p, pp = 0;
270*4882a593Smuzhiyun 			unsigned char *d = (unsigned char *)dnode;
271*4882a593Smuzhiyun 			int b = 0;
272*4882a593Smuzhiyun 			if (le32_to_cpu(dnode->magic) != DNODE_MAGIC) {
273*4882a593Smuzhiyun 				hpfs_error(s, "bad magic on dnode %08x", secno);
274*4882a593Smuzhiyun 				goto bail;
275*4882a593Smuzhiyun 			}
276*4882a593Smuzhiyun 			if (le32_to_cpu(dnode->self) != secno)
277*4882a593Smuzhiyun 				hpfs_error(s, "bad self pointer on dnode %08x self = %08x", secno, le32_to_cpu(dnode->self));
278*4882a593Smuzhiyun 			/* Check dirents - bad dirents would cause infinite
279*4882a593Smuzhiyun 			   loops or shooting to memory */
280*4882a593Smuzhiyun 			if (le32_to_cpu(dnode->first_free) > 2048) {
281*4882a593Smuzhiyun 				hpfs_error(s, "dnode %08x has first_free == %08x", secno, le32_to_cpu(dnode->first_free));
282*4882a593Smuzhiyun 				goto bail;
283*4882a593Smuzhiyun 			}
284*4882a593Smuzhiyun 			for (p = 20; p < le32_to_cpu(dnode->first_free); p += d[p] + (d[p+1] << 8)) {
285*4882a593Smuzhiyun 				struct hpfs_dirent *de = (struct hpfs_dirent *)((char *)dnode + p);
286*4882a593Smuzhiyun 				if (le16_to_cpu(de->length) > 292 || (le16_to_cpu(de->length) < 32) || (le16_to_cpu(de->length) & 3) || p + le16_to_cpu(de->length) > 2048) {
287*4882a593Smuzhiyun 					hpfs_error(s, "bad dirent size in dnode %08x, dirent %03x, last %03x", secno, p, pp);
288*4882a593Smuzhiyun 					goto bail;
289*4882a593Smuzhiyun 				}
290*4882a593Smuzhiyun 				if (((31 + de->namelen + de->down*4 + 3) & ~3) != le16_to_cpu(de->length)) {
291*4882a593Smuzhiyun 					if (((31 + de->namelen + de->down*4 + 3) & ~3) < le16_to_cpu(de->length) && s->s_flags & SB_RDONLY) goto ok;
292*4882a593Smuzhiyun 					hpfs_error(s, "namelen does not match dirent size in dnode %08x, dirent %03x, last %03x", secno, p, pp);
293*4882a593Smuzhiyun 					goto bail;
294*4882a593Smuzhiyun 				}
295*4882a593Smuzhiyun 				ok:
296*4882a593Smuzhiyun 				if (hpfs_sb(s)->sb_chk >= 2) b |= 1 << de->down;
297*4882a593Smuzhiyun 				if (de->down) if (de_down_pointer(de) < 0x10) {
298*4882a593Smuzhiyun 					hpfs_error(s, "bad down pointer in dnode %08x, dirent %03x, last %03x", secno, p, pp);
299*4882a593Smuzhiyun 					goto bail;
300*4882a593Smuzhiyun 				}
301*4882a593Smuzhiyun 				pp = p;
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 			}
304*4882a593Smuzhiyun 			if (p != le32_to_cpu(dnode->first_free)) {
305*4882a593Smuzhiyun 				hpfs_error(s, "size on last dirent does not match first_free; dnode %08x", secno);
306*4882a593Smuzhiyun 				goto bail;
307*4882a593Smuzhiyun 			}
308*4882a593Smuzhiyun 			if (d[pp + 30] != 1 || d[pp + 31] != 255) {
309*4882a593Smuzhiyun 				hpfs_error(s, "dnode %08x does not end with \\377 entry", secno);
310*4882a593Smuzhiyun 				goto bail;
311*4882a593Smuzhiyun 			}
312*4882a593Smuzhiyun 			if (b == 3)
313*4882a593Smuzhiyun 				pr_err("unbalanced dnode tree, dnode %08x; see hpfs.txt 4 more info\n",
314*4882a593Smuzhiyun 					secno);
315*4882a593Smuzhiyun 		}
316*4882a593Smuzhiyun 	return dnode;
317*4882a593Smuzhiyun 	bail:
318*4882a593Smuzhiyun 	hpfs_brelse4(qbh);
319*4882a593Smuzhiyun 	return NULL;
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun 
hpfs_fnode_dno(struct super_block * s,ino_t ino)322*4882a593Smuzhiyun dnode_secno hpfs_fnode_dno(struct super_block *s, ino_t ino)
323*4882a593Smuzhiyun {
324*4882a593Smuzhiyun 	struct buffer_head *bh;
325*4882a593Smuzhiyun 	struct fnode *fnode;
326*4882a593Smuzhiyun 	dnode_secno dno;
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	fnode = hpfs_map_fnode(s, ino, &bh);
329*4882a593Smuzhiyun 	if (!fnode)
330*4882a593Smuzhiyun 		return 0;
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	dno = le32_to_cpu(fnode->u.external[0].disk_secno);
333*4882a593Smuzhiyun 	brelse(bh);
334*4882a593Smuzhiyun 	return dno;
335*4882a593Smuzhiyun }
336