1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * linux/fs/hpfs/anode.c
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * handling HPFS anode tree that contains file allocation info
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include "hpfs_fn.h"
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun /* Find a sector in allocation tree */
13*4882a593Smuzhiyun
hpfs_bplus_lookup(struct super_block * s,struct inode * inode,struct bplus_header * btree,unsigned sec,struct buffer_head * bh)14*4882a593Smuzhiyun secno hpfs_bplus_lookup(struct super_block *s, struct inode *inode,
15*4882a593Smuzhiyun struct bplus_header *btree, unsigned sec,
16*4882a593Smuzhiyun struct buffer_head *bh)
17*4882a593Smuzhiyun {
18*4882a593Smuzhiyun anode_secno a = -1;
19*4882a593Smuzhiyun struct anode *anode;
20*4882a593Smuzhiyun int i;
21*4882a593Smuzhiyun int c1, c2 = 0;
22*4882a593Smuzhiyun go_down:
23*4882a593Smuzhiyun if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, a, &c1, &c2, "hpfs_bplus_lookup")) return -1;
24*4882a593Smuzhiyun if (bp_internal(btree)) {
25*4882a593Smuzhiyun for (i = 0; i < btree->n_used_nodes; i++)
26*4882a593Smuzhiyun if (le32_to_cpu(btree->u.internal[i].file_secno) > sec) {
27*4882a593Smuzhiyun a = le32_to_cpu(btree->u.internal[i].down);
28*4882a593Smuzhiyun brelse(bh);
29*4882a593Smuzhiyun if (!(anode = hpfs_map_anode(s, a, &bh))) return -1;
30*4882a593Smuzhiyun btree = &anode->btree;
31*4882a593Smuzhiyun goto go_down;
32*4882a593Smuzhiyun }
33*4882a593Smuzhiyun hpfs_error(s, "sector %08x not found in internal anode %08x", sec, a);
34*4882a593Smuzhiyun brelse(bh);
35*4882a593Smuzhiyun return -1;
36*4882a593Smuzhiyun }
37*4882a593Smuzhiyun for (i = 0; i < btree->n_used_nodes; i++)
38*4882a593Smuzhiyun if (le32_to_cpu(btree->u.external[i].file_secno) <= sec &&
39*4882a593Smuzhiyun le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) > sec) {
40*4882a593Smuzhiyun a = le32_to_cpu(btree->u.external[i].disk_secno) + sec - le32_to_cpu(btree->u.external[i].file_secno);
41*4882a593Smuzhiyun if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, a, 1, "data")) {
42*4882a593Smuzhiyun brelse(bh);
43*4882a593Smuzhiyun return -1;
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun if (inode) {
46*4882a593Smuzhiyun struct hpfs_inode_info *hpfs_inode = hpfs_i(inode);
47*4882a593Smuzhiyun hpfs_inode->i_file_sec = le32_to_cpu(btree->u.external[i].file_secno);
48*4882a593Smuzhiyun hpfs_inode->i_disk_sec = le32_to_cpu(btree->u.external[i].disk_secno);
49*4882a593Smuzhiyun hpfs_inode->i_n_secs = le32_to_cpu(btree->u.external[i].length);
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun brelse(bh);
52*4882a593Smuzhiyun return a;
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun hpfs_error(s, "sector %08x not found in external anode %08x", sec, a);
55*4882a593Smuzhiyun brelse(bh);
56*4882a593Smuzhiyun return -1;
57*4882a593Smuzhiyun }
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun /* Add a sector to tree */
60*4882a593Smuzhiyun
hpfs_add_sector_to_btree(struct super_block * s,secno node,int fnod,unsigned fsecno)61*4882a593Smuzhiyun secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsigned fsecno)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun struct bplus_header *btree;
64*4882a593Smuzhiyun struct anode *anode = NULL, *ranode = NULL;
65*4882a593Smuzhiyun struct fnode *fnode;
66*4882a593Smuzhiyun anode_secno a, na = -1, ra, up = -1;
67*4882a593Smuzhiyun secno se;
68*4882a593Smuzhiyun struct buffer_head *bh, *bh1, *bh2;
69*4882a593Smuzhiyun int n;
70*4882a593Smuzhiyun unsigned fs;
71*4882a593Smuzhiyun int c1, c2 = 0;
72*4882a593Smuzhiyun if (fnod) {
73*4882a593Smuzhiyun if (!(fnode = hpfs_map_fnode(s, node, &bh))) return -1;
74*4882a593Smuzhiyun btree = &fnode->btree;
75*4882a593Smuzhiyun } else {
76*4882a593Smuzhiyun if (!(anode = hpfs_map_anode(s, node, &bh))) return -1;
77*4882a593Smuzhiyun btree = &anode->btree;
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun a = node;
80*4882a593Smuzhiyun go_down:
81*4882a593Smuzhiyun if ((n = btree->n_used_nodes - 1) < -!!fnod) {
82*4882a593Smuzhiyun hpfs_error(s, "anode %08x has no entries", a);
83*4882a593Smuzhiyun brelse(bh);
84*4882a593Smuzhiyun return -1;
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun if (bp_internal(btree)) {
87*4882a593Smuzhiyun a = le32_to_cpu(btree->u.internal[n].down);
88*4882a593Smuzhiyun btree->u.internal[n].file_secno = cpu_to_le32(-1);
89*4882a593Smuzhiyun mark_buffer_dirty(bh);
90*4882a593Smuzhiyun brelse(bh);
91*4882a593Smuzhiyun if (hpfs_sb(s)->sb_chk)
92*4882a593Smuzhiyun if (hpfs_stop_cycles(s, a, &c1, &c2, "hpfs_add_sector_to_btree #1")) return -1;
93*4882a593Smuzhiyun if (!(anode = hpfs_map_anode(s, a, &bh))) return -1;
94*4882a593Smuzhiyun btree = &anode->btree;
95*4882a593Smuzhiyun goto go_down;
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun if (n >= 0) {
98*4882a593Smuzhiyun if (le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length) != fsecno) {
99*4882a593Smuzhiyun hpfs_error(s, "allocated size %08x, trying to add sector %08x, %cnode %08x",
100*4882a593Smuzhiyun le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length), fsecno,
101*4882a593Smuzhiyun fnod?'f':'a', node);
102*4882a593Smuzhiyun brelse(bh);
103*4882a593Smuzhiyun return -1;
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun if (hpfs_alloc_if_possible(s, se = le32_to_cpu(btree->u.external[n].disk_secno) + le32_to_cpu(btree->u.external[n].length))) {
106*4882a593Smuzhiyun le32_add_cpu(&btree->u.external[n].length, 1);
107*4882a593Smuzhiyun mark_buffer_dirty(bh);
108*4882a593Smuzhiyun brelse(bh);
109*4882a593Smuzhiyun return se;
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun } else {
112*4882a593Smuzhiyun if (fsecno) {
113*4882a593Smuzhiyun hpfs_error(s, "empty file %08x, trying to add sector %08x", node, fsecno);
114*4882a593Smuzhiyun brelse(bh);
115*4882a593Smuzhiyun return -1;
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun se = !fnod ? node : (node + 16384) & ~16383;
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun if (!(se = hpfs_alloc_sector(s, se, 1, fsecno*ALLOC_M>ALLOC_FWD_MAX ? ALLOC_FWD_MAX : fsecno*ALLOC_M<ALLOC_FWD_MIN ? ALLOC_FWD_MIN : fsecno*ALLOC_M))) {
120*4882a593Smuzhiyun brelse(bh);
121*4882a593Smuzhiyun return -1;
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun fs = n < 0 ? 0 : le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length);
124*4882a593Smuzhiyun if (!btree->n_free_nodes) {
125*4882a593Smuzhiyun up = a != node ? le32_to_cpu(anode->up) : -1;
126*4882a593Smuzhiyun if (!(anode = hpfs_alloc_anode(s, a, &na, &bh1))) {
127*4882a593Smuzhiyun brelse(bh);
128*4882a593Smuzhiyun hpfs_free_sectors(s, se, 1);
129*4882a593Smuzhiyun return -1;
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun if (a == node && fnod) {
132*4882a593Smuzhiyun anode->up = cpu_to_le32(node);
133*4882a593Smuzhiyun anode->btree.flags |= BP_fnode_parent;
134*4882a593Smuzhiyun anode->btree.n_used_nodes = btree->n_used_nodes;
135*4882a593Smuzhiyun anode->btree.first_free = btree->first_free;
136*4882a593Smuzhiyun anode->btree.n_free_nodes = 40 - anode->btree.n_used_nodes;
137*4882a593Smuzhiyun memcpy(&anode->u, &btree->u, btree->n_used_nodes * 12);
138*4882a593Smuzhiyun btree->flags |= BP_internal;
139*4882a593Smuzhiyun btree->n_free_nodes = 11;
140*4882a593Smuzhiyun btree->n_used_nodes = 1;
141*4882a593Smuzhiyun btree->first_free = cpu_to_le16((char *)&(btree->u.internal[1]) - (char *)btree);
142*4882a593Smuzhiyun btree->u.internal[0].file_secno = cpu_to_le32(-1);
143*4882a593Smuzhiyun btree->u.internal[0].down = cpu_to_le32(na);
144*4882a593Smuzhiyun mark_buffer_dirty(bh);
145*4882a593Smuzhiyun } else if (!(ranode = hpfs_alloc_anode(s, /*a*/0, &ra, &bh2))) {
146*4882a593Smuzhiyun brelse(bh);
147*4882a593Smuzhiyun brelse(bh1);
148*4882a593Smuzhiyun hpfs_free_sectors(s, se, 1);
149*4882a593Smuzhiyun hpfs_free_sectors(s, na, 1);
150*4882a593Smuzhiyun return -1;
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun brelse(bh);
153*4882a593Smuzhiyun bh = bh1;
154*4882a593Smuzhiyun btree = &anode->btree;
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun btree->n_free_nodes--; n = btree->n_used_nodes++;
157*4882a593Smuzhiyun le16_add_cpu(&btree->first_free, 12);
158*4882a593Smuzhiyun btree->u.external[n].disk_secno = cpu_to_le32(se);
159*4882a593Smuzhiyun btree->u.external[n].file_secno = cpu_to_le32(fs);
160*4882a593Smuzhiyun btree->u.external[n].length = cpu_to_le32(1);
161*4882a593Smuzhiyun mark_buffer_dirty(bh);
162*4882a593Smuzhiyun brelse(bh);
163*4882a593Smuzhiyun if ((a == node && fnod) || na == -1) return se;
164*4882a593Smuzhiyun c2 = 0;
165*4882a593Smuzhiyun while (up != (anode_secno)-1) {
166*4882a593Smuzhiyun struct anode *new_anode;
167*4882a593Smuzhiyun if (hpfs_sb(s)->sb_chk)
168*4882a593Smuzhiyun if (hpfs_stop_cycles(s, up, &c1, &c2, "hpfs_add_sector_to_btree #2")) return -1;
169*4882a593Smuzhiyun if (up != node || !fnod) {
170*4882a593Smuzhiyun if (!(anode = hpfs_map_anode(s, up, &bh))) return -1;
171*4882a593Smuzhiyun btree = &anode->btree;
172*4882a593Smuzhiyun } else {
173*4882a593Smuzhiyun if (!(fnode = hpfs_map_fnode(s, up, &bh))) return -1;
174*4882a593Smuzhiyun btree = &fnode->btree;
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun if (btree->n_free_nodes) {
177*4882a593Smuzhiyun btree->n_free_nodes--; n = btree->n_used_nodes++;
178*4882a593Smuzhiyun le16_add_cpu(&btree->first_free, 8);
179*4882a593Smuzhiyun btree->u.internal[n].file_secno = cpu_to_le32(-1);
180*4882a593Smuzhiyun btree->u.internal[n].down = cpu_to_le32(na);
181*4882a593Smuzhiyun btree->u.internal[n-1].file_secno = cpu_to_le32(fs);
182*4882a593Smuzhiyun mark_buffer_dirty(bh);
183*4882a593Smuzhiyun brelse(bh);
184*4882a593Smuzhiyun brelse(bh2);
185*4882a593Smuzhiyun hpfs_free_sectors(s, ra, 1);
186*4882a593Smuzhiyun if ((anode = hpfs_map_anode(s, na, &bh))) {
187*4882a593Smuzhiyun anode->up = cpu_to_le32(up);
188*4882a593Smuzhiyun if (up == node && fnod)
189*4882a593Smuzhiyun anode->btree.flags |= BP_fnode_parent;
190*4882a593Smuzhiyun else
191*4882a593Smuzhiyun anode->btree.flags &= ~BP_fnode_parent;
192*4882a593Smuzhiyun mark_buffer_dirty(bh);
193*4882a593Smuzhiyun brelse(bh);
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun return se;
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun up = up != node ? le32_to_cpu(anode->up) : -1;
198*4882a593Smuzhiyun btree->u.internal[btree->n_used_nodes - 1].file_secno = cpu_to_le32(/*fs*/-1);
199*4882a593Smuzhiyun mark_buffer_dirty(bh);
200*4882a593Smuzhiyun brelse(bh);
201*4882a593Smuzhiyun a = na;
202*4882a593Smuzhiyun if ((new_anode = hpfs_alloc_anode(s, a, &na, &bh))) {
203*4882a593Smuzhiyun anode = new_anode;
204*4882a593Smuzhiyun /*anode->up = cpu_to_le32(up != -1 ? up : ra);*/
205*4882a593Smuzhiyun anode->btree.flags |= BP_internal;
206*4882a593Smuzhiyun anode->btree.n_used_nodes = 1;
207*4882a593Smuzhiyun anode->btree.n_free_nodes = 59;
208*4882a593Smuzhiyun anode->btree.first_free = cpu_to_le16(16);
209*4882a593Smuzhiyun anode->btree.u.internal[0].down = cpu_to_le32(a);
210*4882a593Smuzhiyun anode->btree.u.internal[0].file_secno = cpu_to_le32(-1);
211*4882a593Smuzhiyun mark_buffer_dirty(bh);
212*4882a593Smuzhiyun brelse(bh);
213*4882a593Smuzhiyun if ((anode = hpfs_map_anode(s, a, &bh))) {
214*4882a593Smuzhiyun anode->up = cpu_to_le32(na);
215*4882a593Smuzhiyun mark_buffer_dirty(bh);
216*4882a593Smuzhiyun brelse(bh);
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun } else na = a;
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun if ((anode = hpfs_map_anode(s, na, &bh))) {
221*4882a593Smuzhiyun anode->up = cpu_to_le32(node);
222*4882a593Smuzhiyun if (fnod)
223*4882a593Smuzhiyun anode->btree.flags |= BP_fnode_parent;
224*4882a593Smuzhiyun mark_buffer_dirty(bh);
225*4882a593Smuzhiyun brelse(bh);
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun if (!fnod) {
228*4882a593Smuzhiyun if (!(anode = hpfs_map_anode(s, node, &bh))) {
229*4882a593Smuzhiyun brelse(bh2);
230*4882a593Smuzhiyun return -1;
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun btree = &anode->btree;
233*4882a593Smuzhiyun } else {
234*4882a593Smuzhiyun if (!(fnode = hpfs_map_fnode(s, node, &bh))) {
235*4882a593Smuzhiyun brelse(bh2);
236*4882a593Smuzhiyun return -1;
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun btree = &fnode->btree;
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun ranode->up = cpu_to_le32(node);
241*4882a593Smuzhiyun memcpy(&ranode->btree, btree, le16_to_cpu(btree->first_free));
242*4882a593Smuzhiyun if (fnod)
243*4882a593Smuzhiyun ranode->btree.flags |= BP_fnode_parent;
244*4882a593Smuzhiyun ranode->btree.n_free_nodes = (bp_internal(&ranode->btree) ? 60 : 40) - ranode->btree.n_used_nodes;
245*4882a593Smuzhiyun if (bp_internal(&ranode->btree)) for (n = 0; n < ranode->btree.n_used_nodes; n++) {
246*4882a593Smuzhiyun struct anode *unode;
247*4882a593Smuzhiyun if ((unode = hpfs_map_anode(s, le32_to_cpu(ranode->u.internal[n].down), &bh1))) {
248*4882a593Smuzhiyun unode->up = cpu_to_le32(ra);
249*4882a593Smuzhiyun unode->btree.flags &= ~BP_fnode_parent;
250*4882a593Smuzhiyun mark_buffer_dirty(bh1);
251*4882a593Smuzhiyun brelse(bh1);
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun btree->flags |= BP_internal;
255*4882a593Smuzhiyun btree->n_free_nodes = fnod ? 10 : 58;
256*4882a593Smuzhiyun btree->n_used_nodes = 2;
257*4882a593Smuzhiyun btree->first_free = cpu_to_le16((char *)&btree->u.internal[2] - (char *)btree);
258*4882a593Smuzhiyun btree->u.internal[0].file_secno = cpu_to_le32(fs);
259*4882a593Smuzhiyun btree->u.internal[0].down = cpu_to_le32(ra);
260*4882a593Smuzhiyun btree->u.internal[1].file_secno = cpu_to_le32(-1);
261*4882a593Smuzhiyun btree->u.internal[1].down = cpu_to_le32(na);
262*4882a593Smuzhiyun mark_buffer_dirty(bh);
263*4882a593Smuzhiyun brelse(bh);
264*4882a593Smuzhiyun mark_buffer_dirty(bh2);
265*4882a593Smuzhiyun brelse(bh2);
266*4882a593Smuzhiyun return se;
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun /*
270*4882a593Smuzhiyun * Remove allocation tree. Recursion would look much nicer but
271*4882a593Smuzhiyun * I want to avoid it because it can cause stack overflow.
272*4882a593Smuzhiyun */
273*4882a593Smuzhiyun
hpfs_remove_btree(struct super_block * s,struct bplus_header * btree)274*4882a593Smuzhiyun void hpfs_remove_btree(struct super_block *s, struct bplus_header *btree)
275*4882a593Smuzhiyun {
276*4882a593Smuzhiyun struct bplus_header *btree1 = btree;
277*4882a593Smuzhiyun struct anode *anode = NULL;
278*4882a593Smuzhiyun anode_secno ano = 0, oano;
279*4882a593Smuzhiyun struct buffer_head *bh;
280*4882a593Smuzhiyun int level = 0;
281*4882a593Smuzhiyun int pos = 0;
282*4882a593Smuzhiyun int i;
283*4882a593Smuzhiyun int c1, c2 = 0;
284*4882a593Smuzhiyun int d1, d2;
285*4882a593Smuzhiyun go_down:
286*4882a593Smuzhiyun d2 = 0;
287*4882a593Smuzhiyun while (bp_internal(btree1)) {
288*4882a593Smuzhiyun ano = le32_to_cpu(btree1->u.internal[pos].down);
289*4882a593Smuzhiyun if (level) brelse(bh);
290*4882a593Smuzhiyun if (hpfs_sb(s)->sb_chk)
291*4882a593Smuzhiyun if (hpfs_stop_cycles(s, ano, &d1, &d2, "hpfs_remove_btree #1"))
292*4882a593Smuzhiyun return;
293*4882a593Smuzhiyun if (!(anode = hpfs_map_anode(s, ano, &bh))) return;
294*4882a593Smuzhiyun btree1 = &anode->btree;
295*4882a593Smuzhiyun level++;
296*4882a593Smuzhiyun pos = 0;
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun for (i = 0; i < btree1->n_used_nodes; i++)
299*4882a593Smuzhiyun hpfs_free_sectors(s, le32_to_cpu(btree1->u.external[i].disk_secno), le32_to_cpu(btree1->u.external[i].length));
300*4882a593Smuzhiyun go_up:
301*4882a593Smuzhiyun if (!level) return;
302*4882a593Smuzhiyun brelse(bh);
303*4882a593Smuzhiyun if (hpfs_sb(s)->sb_chk)
304*4882a593Smuzhiyun if (hpfs_stop_cycles(s, ano, &c1, &c2, "hpfs_remove_btree #2")) return;
305*4882a593Smuzhiyun hpfs_free_sectors(s, ano, 1);
306*4882a593Smuzhiyun oano = ano;
307*4882a593Smuzhiyun ano = le32_to_cpu(anode->up);
308*4882a593Smuzhiyun if (--level) {
309*4882a593Smuzhiyun if (!(anode = hpfs_map_anode(s, ano, &bh))) return;
310*4882a593Smuzhiyun btree1 = &anode->btree;
311*4882a593Smuzhiyun } else btree1 = btree;
312*4882a593Smuzhiyun for (i = 0; i < btree1->n_used_nodes; i++) {
313*4882a593Smuzhiyun if (le32_to_cpu(btree1->u.internal[i].down) == oano) {
314*4882a593Smuzhiyun if ((pos = i + 1) < btree1->n_used_nodes)
315*4882a593Smuzhiyun goto go_down;
316*4882a593Smuzhiyun else
317*4882a593Smuzhiyun goto go_up;
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun hpfs_error(s,
321*4882a593Smuzhiyun "reference to anode %08x not found in anode %08x "
322*4882a593Smuzhiyun "(probably bad up pointer)",
323*4882a593Smuzhiyun oano, level ? ano : -1);
324*4882a593Smuzhiyun if (level)
325*4882a593Smuzhiyun brelse(bh);
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun /* Just a wrapper around hpfs_bplus_lookup .. used for reading eas */
329*4882a593Smuzhiyun
anode_lookup(struct super_block * s,anode_secno a,unsigned sec)330*4882a593Smuzhiyun static secno anode_lookup(struct super_block *s, anode_secno a, unsigned sec)
331*4882a593Smuzhiyun {
332*4882a593Smuzhiyun struct anode *anode;
333*4882a593Smuzhiyun struct buffer_head *bh;
334*4882a593Smuzhiyun if (!(anode = hpfs_map_anode(s, a, &bh))) return -1;
335*4882a593Smuzhiyun return hpfs_bplus_lookup(s, NULL, &anode->btree, sec, bh);
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun
hpfs_ea_read(struct super_block * s,secno a,int ano,unsigned pos,unsigned len,char * buf)338*4882a593Smuzhiyun int hpfs_ea_read(struct super_block *s, secno a, int ano, unsigned pos,
339*4882a593Smuzhiyun unsigned len, char *buf)
340*4882a593Smuzhiyun {
341*4882a593Smuzhiyun struct buffer_head *bh;
342*4882a593Smuzhiyun char *data;
343*4882a593Smuzhiyun secno sec;
344*4882a593Smuzhiyun unsigned l;
345*4882a593Smuzhiyun while (len) {
346*4882a593Smuzhiyun if (ano) {
347*4882a593Smuzhiyun if ((sec = anode_lookup(s, a, pos >> 9)) == -1)
348*4882a593Smuzhiyun return -1;
349*4882a593Smuzhiyun } else sec = a + (pos >> 9);
350*4882a593Smuzhiyun if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, sec, 1, "ea #1")) return -1;
351*4882a593Smuzhiyun if (!(data = hpfs_map_sector(s, sec, &bh, (len - 1) >> 9)))
352*4882a593Smuzhiyun return -1;
353*4882a593Smuzhiyun l = 0x200 - (pos & 0x1ff); if (l > len) l = len;
354*4882a593Smuzhiyun memcpy(buf, data + (pos & 0x1ff), l);
355*4882a593Smuzhiyun brelse(bh);
356*4882a593Smuzhiyun buf += l; pos += l; len -= l;
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun return 0;
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun
hpfs_ea_write(struct super_block * s,secno a,int ano,unsigned pos,unsigned len,const char * buf)361*4882a593Smuzhiyun int hpfs_ea_write(struct super_block *s, secno a, int ano, unsigned pos,
362*4882a593Smuzhiyun unsigned len, const char *buf)
363*4882a593Smuzhiyun {
364*4882a593Smuzhiyun struct buffer_head *bh;
365*4882a593Smuzhiyun char *data;
366*4882a593Smuzhiyun secno sec;
367*4882a593Smuzhiyun unsigned l;
368*4882a593Smuzhiyun while (len) {
369*4882a593Smuzhiyun if (ano) {
370*4882a593Smuzhiyun if ((sec = anode_lookup(s, a, pos >> 9)) == -1)
371*4882a593Smuzhiyun return -1;
372*4882a593Smuzhiyun } else sec = a + (pos >> 9);
373*4882a593Smuzhiyun if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, sec, 1, "ea #2")) return -1;
374*4882a593Smuzhiyun if (!(data = hpfs_map_sector(s, sec, &bh, (len - 1) >> 9)))
375*4882a593Smuzhiyun return -1;
376*4882a593Smuzhiyun l = 0x200 - (pos & 0x1ff); if (l > len) l = len;
377*4882a593Smuzhiyun memcpy(data + (pos & 0x1ff), buf, l);
378*4882a593Smuzhiyun mark_buffer_dirty(bh);
379*4882a593Smuzhiyun brelse(bh);
380*4882a593Smuzhiyun buf += l; pos += l; len -= l;
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun return 0;
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun
hpfs_ea_remove(struct super_block * s,secno a,int ano,unsigned len)385*4882a593Smuzhiyun void hpfs_ea_remove(struct super_block *s, secno a, int ano, unsigned len)
386*4882a593Smuzhiyun {
387*4882a593Smuzhiyun struct anode *anode;
388*4882a593Smuzhiyun struct buffer_head *bh;
389*4882a593Smuzhiyun if (ano) {
390*4882a593Smuzhiyun if (!(anode = hpfs_map_anode(s, a, &bh))) return;
391*4882a593Smuzhiyun hpfs_remove_btree(s, &anode->btree);
392*4882a593Smuzhiyun brelse(bh);
393*4882a593Smuzhiyun hpfs_free_sectors(s, a, 1);
394*4882a593Smuzhiyun } else hpfs_free_sectors(s, a, (len + 511) >> 9);
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun /* Truncate allocation tree. Doesn't join anodes - I hope it doesn't matter */
398*4882a593Smuzhiyun
hpfs_truncate_btree(struct super_block * s,secno f,int fno,unsigned secs)399*4882a593Smuzhiyun void hpfs_truncate_btree(struct super_block *s, secno f, int fno, unsigned secs)
400*4882a593Smuzhiyun {
401*4882a593Smuzhiyun struct fnode *fnode;
402*4882a593Smuzhiyun struct anode *anode;
403*4882a593Smuzhiyun struct buffer_head *bh;
404*4882a593Smuzhiyun struct bplus_header *btree;
405*4882a593Smuzhiyun anode_secno node = f;
406*4882a593Smuzhiyun int i, j, nodes;
407*4882a593Smuzhiyun int c1, c2 = 0;
408*4882a593Smuzhiyun if (fno) {
409*4882a593Smuzhiyun if (!(fnode = hpfs_map_fnode(s, f, &bh))) return;
410*4882a593Smuzhiyun btree = &fnode->btree;
411*4882a593Smuzhiyun } else {
412*4882a593Smuzhiyun if (!(anode = hpfs_map_anode(s, f, &bh))) return;
413*4882a593Smuzhiyun btree = &anode->btree;
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun if (!secs) {
416*4882a593Smuzhiyun hpfs_remove_btree(s, btree);
417*4882a593Smuzhiyun if (fno) {
418*4882a593Smuzhiyun btree->n_free_nodes = 8;
419*4882a593Smuzhiyun btree->n_used_nodes = 0;
420*4882a593Smuzhiyun btree->first_free = cpu_to_le16(8);
421*4882a593Smuzhiyun btree->flags &= ~BP_internal;
422*4882a593Smuzhiyun mark_buffer_dirty(bh);
423*4882a593Smuzhiyun } else hpfs_free_sectors(s, f, 1);
424*4882a593Smuzhiyun brelse(bh);
425*4882a593Smuzhiyun return;
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun while (bp_internal(btree)) {
428*4882a593Smuzhiyun nodes = btree->n_used_nodes + btree->n_free_nodes;
429*4882a593Smuzhiyun for (i = 0; i < btree->n_used_nodes; i++)
430*4882a593Smuzhiyun if (le32_to_cpu(btree->u.internal[i].file_secno) >= secs) goto f;
431*4882a593Smuzhiyun brelse(bh);
432*4882a593Smuzhiyun hpfs_error(s, "internal btree %08x doesn't end with -1", node);
433*4882a593Smuzhiyun return;
434*4882a593Smuzhiyun f:
435*4882a593Smuzhiyun for (j = i + 1; j < btree->n_used_nodes; j++)
436*4882a593Smuzhiyun hpfs_ea_remove(s, le32_to_cpu(btree->u.internal[j].down), 1, 0);
437*4882a593Smuzhiyun btree->n_used_nodes = i + 1;
438*4882a593Smuzhiyun btree->n_free_nodes = nodes - btree->n_used_nodes;
439*4882a593Smuzhiyun btree->first_free = cpu_to_le16(8 + 8 * btree->n_used_nodes);
440*4882a593Smuzhiyun mark_buffer_dirty(bh);
441*4882a593Smuzhiyun if (btree->u.internal[i].file_secno == cpu_to_le32(secs)) {
442*4882a593Smuzhiyun brelse(bh);
443*4882a593Smuzhiyun return;
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun node = le32_to_cpu(btree->u.internal[i].down);
446*4882a593Smuzhiyun brelse(bh);
447*4882a593Smuzhiyun if (hpfs_sb(s)->sb_chk)
448*4882a593Smuzhiyun if (hpfs_stop_cycles(s, node, &c1, &c2, "hpfs_truncate_btree"))
449*4882a593Smuzhiyun return;
450*4882a593Smuzhiyun if (!(anode = hpfs_map_anode(s, node, &bh))) return;
451*4882a593Smuzhiyun btree = &anode->btree;
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun nodes = btree->n_used_nodes + btree->n_free_nodes;
454*4882a593Smuzhiyun for (i = 0; i < btree->n_used_nodes; i++)
455*4882a593Smuzhiyun if (le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) >= secs) goto ff;
456*4882a593Smuzhiyun brelse(bh);
457*4882a593Smuzhiyun return;
458*4882a593Smuzhiyun ff:
459*4882a593Smuzhiyun if (secs <= le32_to_cpu(btree->u.external[i].file_secno)) {
460*4882a593Smuzhiyun hpfs_error(s, "there is an allocation error in file %08x, sector %08x", f, secs);
461*4882a593Smuzhiyun if (i) i--;
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun else if (le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) > secs) {
464*4882a593Smuzhiyun hpfs_free_sectors(s, le32_to_cpu(btree->u.external[i].disk_secno) + secs -
465*4882a593Smuzhiyun le32_to_cpu(btree->u.external[i].file_secno), le32_to_cpu(btree->u.external[i].length)
466*4882a593Smuzhiyun - secs + le32_to_cpu(btree->u.external[i].file_secno)); /* I hope gcc optimizes this :-) */
467*4882a593Smuzhiyun btree->u.external[i].length = cpu_to_le32(secs - le32_to_cpu(btree->u.external[i].file_secno));
468*4882a593Smuzhiyun }
469*4882a593Smuzhiyun for (j = i + 1; j < btree->n_used_nodes; j++)
470*4882a593Smuzhiyun hpfs_free_sectors(s, le32_to_cpu(btree->u.external[j].disk_secno), le32_to_cpu(btree->u.external[j].length));
471*4882a593Smuzhiyun btree->n_used_nodes = i + 1;
472*4882a593Smuzhiyun btree->n_free_nodes = nodes - btree->n_used_nodes;
473*4882a593Smuzhiyun btree->first_free = cpu_to_le16(8 + 12 * btree->n_used_nodes);
474*4882a593Smuzhiyun mark_buffer_dirty(bh);
475*4882a593Smuzhiyun brelse(bh);
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun /* Remove file or directory and it's eas - note that directory must
479*4882a593Smuzhiyun be empty when this is called. */
480*4882a593Smuzhiyun
hpfs_remove_fnode(struct super_block * s,fnode_secno fno)481*4882a593Smuzhiyun void hpfs_remove_fnode(struct super_block *s, fnode_secno fno)
482*4882a593Smuzhiyun {
483*4882a593Smuzhiyun struct buffer_head *bh;
484*4882a593Smuzhiyun struct fnode *fnode;
485*4882a593Smuzhiyun struct extended_attribute *ea;
486*4882a593Smuzhiyun struct extended_attribute *ea_end;
487*4882a593Smuzhiyun if (!(fnode = hpfs_map_fnode(s, fno, &bh))) return;
488*4882a593Smuzhiyun if (!fnode_is_dir(fnode)) hpfs_remove_btree(s, &fnode->btree);
489*4882a593Smuzhiyun else hpfs_remove_dtree(s, le32_to_cpu(fnode->u.external[0].disk_secno));
490*4882a593Smuzhiyun ea_end = fnode_end_ea(fnode);
491*4882a593Smuzhiyun for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea))
492*4882a593Smuzhiyun if (ea_indirect(ea))
493*4882a593Smuzhiyun hpfs_ea_remove(s, ea_sec(ea), ea_in_anode(ea), ea_len(ea));
494*4882a593Smuzhiyun hpfs_ea_ext_remove(s, le32_to_cpu(fnode->ea_secno), fnode_in_anode(fnode), le32_to_cpu(fnode->ea_size_l));
495*4882a593Smuzhiyun brelse(bh);
496*4882a593Smuzhiyun hpfs_free_sectors(s, fno, 1);
497*4882a593Smuzhiyun }
498