1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * linux/fs/hpfs/alloc.c
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * HPFS bitmap operations
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include "hpfs_fn.h"
11*4882a593Smuzhiyun
hpfs_claim_alloc(struct super_block * s,secno sec)12*4882a593Smuzhiyun static void hpfs_claim_alloc(struct super_block *s, secno sec)
13*4882a593Smuzhiyun {
14*4882a593Smuzhiyun struct hpfs_sb_info *sbi = hpfs_sb(s);
15*4882a593Smuzhiyun if (sbi->sb_n_free != (unsigned)-1) {
16*4882a593Smuzhiyun if (unlikely(!sbi->sb_n_free)) {
17*4882a593Smuzhiyun hpfs_error(s, "free count underflow, allocating sector %08x", sec);
18*4882a593Smuzhiyun sbi->sb_n_free = -1;
19*4882a593Smuzhiyun return;
20*4882a593Smuzhiyun }
21*4882a593Smuzhiyun sbi->sb_n_free--;
22*4882a593Smuzhiyun }
23*4882a593Smuzhiyun }
24*4882a593Smuzhiyun
hpfs_claim_free(struct super_block * s,secno sec)25*4882a593Smuzhiyun static void hpfs_claim_free(struct super_block *s, secno sec)
26*4882a593Smuzhiyun {
27*4882a593Smuzhiyun struct hpfs_sb_info *sbi = hpfs_sb(s);
28*4882a593Smuzhiyun if (sbi->sb_n_free != (unsigned)-1) {
29*4882a593Smuzhiyun if (unlikely(sbi->sb_n_free >= sbi->sb_fs_size)) {
30*4882a593Smuzhiyun hpfs_error(s, "free count overflow, freeing sector %08x", sec);
31*4882a593Smuzhiyun sbi->sb_n_free = -1;
32*4882a593Smuzhiyun return;
33*4882a593Smuzhiyun }
34*4882a593Smuzhiyun sbi->sb_n_free++;
35*4882a593Smuzhiyun }
36*4882a593Smuzhiyun }
37*4882a593Smuzhiyun
hpfs_claim_dirband_alloc(struct super_block * s,secno sec)38*4882a593Smuzhiyun static void hpfs_claim_dirband_alloc(struct super_block *s, secno sec)
39*4882a593Smuzhiyun {
40*4882a593Smuzhiyun struct hpfs_sb_info *sbi = hpfs_sb(s);
41*4882a593Smuzhiyun if (sbi->sb_n_free_dnodes != (unsigned)-1) {
42*4882a593Smuzhiyun if (unlikely(!sbi->sb_n_free_dnodes)) {
43*4882a593Smuzhiyun hpfs_error(s, "dirband free count underflow, allocating sector %08x", sec);
44*4882a593Smuzhiyun sbi->sb_n_free_dnodes = -1;
45*4882a593Smuzhiyun return;
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun sbi->sb_n_free_dnodes--;
48*4882a593Smuzhiyun }
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun
hpfs_claim_dirband_free(struct super_block * s,secno sec)51*4882a593Smuzhiyun static void hpfs_claim_dirband_free(struct super_block *s, secno sec)
52*4882a593Smuzhiyun {
53*4882a593Smuzhiyun struct hpfs_sb_info *sbi = hpfs_sb(s);
54*4882a593Smuzhiyun if (sbi->sb_n_free_dnodes != (unsigned)-1) {
55*4882a593Smuzhiyun if (unlikely(sbi->sb_n_free_dnodes >= sbi->sb_dirband_size / 4)) {
56*4882a593Smuzhiyun hpfs_error(s, "dirband free count overflow, freeing sector %08x", sec);
57*4882a593Smuzhiyun sbi->sb_n_free_dnodes = -1;
58*4882a593Smuzhiyun return;
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun sbi->sb_n_free_dnodes++;
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun /*
65*4882a593Smuzhiyun * Check if a sector is allocated in bitmap
66*4882a593Smuzhiyun * This is really slow. Turned on only if chk==2
67*4882a593Smuzhiyun */
68*4882a593Smuzhiyun
chk_if_allocated(struct super_block * s,secno sec,char * msg)69*4882a593Smuzhiyun static int chk_if_allocated(struct super_block *s, secno sec, char *msg)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun struct quad_buffer_head qbh;
72*4882a593Smuzhiyun __le32 *bmp;
73*4882a593Smuzhiyun if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "chk"))) goto fail;
74*4882a593Smuzhiyun if ((le32_to_cpu(bmp[(sec & 0x3fff) >> 5]) >> (sec & 0x1f)) & 1) {
75*4882a593Smuzhiyun hpfs_error(s, "sector '%s' - %08x not allocated in bitmap", msg, sec);
76*4882a593Smuzhiyun goto fail1;
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun hpfs_brelse4(&qbh);
79*4882a593Smuzhiyun if (sec >= hpfs_sb(s)->sb_dirband_start && sec < hpfs_sb(s)->sb_dirband_start + hpfs_sb(s)->sb_dirband_size) {
80*4882a593Smuzhiyun unsigned ssec = (sec - hpfs_sb(s)->sb_dirband_start) / 4;
81*4882a593Smuzhiyun if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) goto fail;
82*4882a593Smuzhiyun if ((le32_to_cpu(bmp[ssec >> 5]) >> (ssec & 0x1f)) & 1) {
83*4882a593Smuzhiyun hpfs_error(s, "sector '%s' - %08x not allocated in directory bitmap", msg, sec);
84*4882a593Smuzhiyun goto fail1;
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun hpfs_brelse4(&qbh);
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun return 0;
89*4882a593Smuzhiyun fail1:
90*4882a593Smuzhiyun hpfs_brelse4(&qbh);
91*4882a593Smuzhiyun fail:
92*4882a593Smuzhiyun return 1;
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun /*
96*4882a593Smuzhiyun * Check if sector(s) have proper number and additionally check if they're
97*4882a593Smuzhiyun * allocated in bitmap.
98*4882a593Smuzhiyun */
99*4882a593Smuzhiyun
hpfs_chk_sectors(struct super_block * s,secno start,int len,char * msg)100*4882a593Smuzhiyun int hpfs_chk_sectors(struct super_block *s, secno start, int len, char *msg)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun if (start + len < start || start < 0x12 ||
103*4882a593Smuzhiyun start + len > hpfs_sb(s)->sb_fs_size) {
104*4882a593Smuzhiyun hpfs_error(s, "sector(s) '%s' badly placed at %08x", msg, start);
105*4882a593Smuzhiyun return 1;
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun if (hpfs_sb(s)->sb_chk>=2) {
108*4882a593Smuzhiyun int i;
109*4882a593Smuzhiyun for (i = 0; i < len; i++)
110*4882a593Smuzhiyun if (chk_if_allocated(s, start + i, msg)) return 1;
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun return 0;
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun
alloc_in_bmp(struct super_block * s,secno near,unsigned n,unsigned forward)115*4882a593Smuzhiyun static secno alloc_in_bmp(struct super_block *s, secno near, unsigned n, unsigned forward)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun struct quad_buffer_head qbh;
118*4882a593Smuzhiyun __le32 *bmp;
119*4882a593Smuzhiyun unsigned bs = near & ~0x3fff;
120*4882a593Smuzhiyun unsigned nr = (near & 0x3fff) & ~(n - 1);
121*4882a593Smuzhiyun /*unsigned mnr;*/
122*4882a593Smuzhiyun unsigned i, q;
123*4882a593Smuzhiyun int a, b;
124*4882a593Smuzhiyun secno ret = 0;
125*4882a593Smuzhiyun if (n != 1 && n != 4) {
126*4882a593Smuzhiyun hpfs_error(s, "Bad allocation size: %d", n);
127*4882a593Smuzhiyun return 0;
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun if (bs != ~0x3fff) {
130*4882a593Smuzhiyun if (!(bmp = hpfs_map_bitmap(s, near >> 14, &qbh, "aib"))) goto uls;
131*4882a593Smuzhiyun } else {
132*4882a593Smuzhiyun if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) goto uls;
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun if (!tstbits(bmp, nr, n + forward)) {
135*4882a593Smuzhiyun ret = bs + nr;
136*4882a593Smuzhiyun goto rt;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun q = nr + n; b = 0;
139*4882a593Smuzhiyun while ((a = tstbits(bmp, q, n + forward)) != 0) {
140*4882a593Smuzhiyun q += a;
141*4882a593Smuzhiyun if (n != 1) q = ((q-1)&~(n-1))+n;
142*4882a593Smuzhiyun if (!b) {
143*4882a593Smuzhiyun if (q>>5 != nr>>5) {
144*4882a593Smuzhiyun b = 1;
145*4882a593Smuzhiyun q = nr & 0x1f;
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun } else if (q > nr) break;
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun if (!a) {
150*4882a593Smuzhiyun ret = bs + q;
151*4882a593Smuzhiyun goto rt;
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun nr >>= 5;
154*4882a593Smuzhiyun /*for (i = nr + 1; i != nr; i++, i &= 0x1ff) */
155*4882a593Smuzhiyun i = nr;
156*4882a593Smuzhiyun do {
157*4882a593Smuzhiyun if (!le32_to_cpu(bmp[i])) goto cont;
158*4882a593Smuzhiyun if (n + forward >= 0x3f && le32_to_cpu(bmp[i]) != 0xffffffff) goto cont;
159*4882a593Smuzhiyun q = i<<5;
160*4882a593Smuzhiyun if (i > 0) {
161*4882a593Smuzhiyun unsigned k = le32_to_cpu(bmp[i-1]);
162*4882a593Smuzhiyun while (k & 0x80000000) {
163*4882a593Smuzhiyun q--; k <<= 1;
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun if (n != 1) q = ((q-1)&~(n-1))+n;
167*4882a593Smuzhiyun while ((a = tstbits(bmp, q, n + forward)) != 0) {
168*4882a593Smuzhiyun q += a;
169*4882a593Smuzhiyun if (n != 1) q = ((q-1)&~(n-1))+n;
170*4882a593Smuzhiyun if (q>>5 > i) break;
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun if (!a) {
173*4882a593Smuzhiyun ret = bs + q;
174*4882a593Smuzhiyun goto rt;
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun cont:
177*4882a593Smuzhiyun i++, i &= 0x1ff;
178*4882a593Smuzhiyun } while (i != nr);
179*4882a593Smuzhiyun rt:
180*4882a593Smuzhiyun if (ret) {
181*4882a593Smuzhiyun if (hpfs_sb(s)->sb_chk && ((ret >> 14) != (bs >> 14) || (le32_to_cpu(bmp[(ret & 0x3fff) >> 5]) | ~(((1 << n) - 1) << (ret & 0x1f))) != 0xffffffff)) {
182*4882a593Smuzhiyun hpfs_error(s, "Allocation doesn't work! Wanted %d, allocated at %08x", n, ret);
183*4882a593Smuzhiyun ret = 0;
184*4882a593Smuzhiyun goto b;
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun bmp[(ret & 0x3fff) >> 5] &= cpu_to_le32(~(((1 << n) - 1) << (ret & 0x1f)));
187*4882a593Smuzhiyun hpfs_mark_4buffers_dirty(&qbh);
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun b:
190*4882a593Smuzhiyun hpfs_brelse4(&qbh);
191*4882a593Smuzhiyun uls:
192*4882a593Smuzhiyun return ret;
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun /*
196*4882a593Smuzhiyun * Allocation strategy: 1) search place near the sector specified
197*4882a593Smuzhiyun * 2) search bitmap where free sectors last found
198*4882a593Smuzhiyun * 3) search all bitmaps
199*4882a593Smuzhiyun * 4) search all bitmaps ignoring number of pre-allocated
200*4882a593Smuzhiyun * sectors
201*4882a593Smuzhiyun */
202*4882a593Smuzhiyun
hpfs_alloc_sector(struct super_block * s,secno near,unsigned n,int forward)203*4882a593Smuzhiyun secno hpfs_alloc_sector(struct super_block *s, secno near, unsigned n, int forward)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun secno sec;
206*4882a593Smuzhiyun int i;
207*4882a593Smuzhiyun unsigned n_bmps;
208*4882a593Smuzhiyun struct hpfs_sb_info *sbi = hpfs_sb(s);
209*4882a593Smuzhiyun int f_p = 0;
210*4882a593Smuzhiyun int near_bmp;
211*4882a593Smuzhiyun if (forward < 0) {
212*4882a593Smuzhiyun forward = -forward;
213*4882a593Smuzhiyun f_p = 1;
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun n_bmps = (sbi->sb_fs_size + 0x4000 - 1) >> 14;
216*4882a593Smuzhiyun if (near && near < sbi->sb_fs_size) {
217*4882a593Smuzhiyun if ((sec = alloc_in_bmp(s, near, n, f_p ? forward : forward/4))) goto ret;
218*4882a593Smuzhiyun near_bmp = near >> 14;
219*4882a593Smuzhiyun } else near_bmp = n_bmps / 2;
220*4882a593Smuzhiyun /*
221*4882a593Smuzhiyun if (b != -1) {
222*4882a593Smuzhiyun if ((sec = alloc_in_bmp(s, b<<14, n, f_p ? forward : forward/2))) {
223*4882a593Smuzhiyun b &= 0x0fffffff;
224*4882a593Smuzhiyun goto ret;
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun if (b > 0x10000000) if ((sec = alloc_in_bmp(s, (b&0xfffffff)<<14, n, f_p ? forward : 0))) goto ret;
227*4882a593Smuzhiyun */
228*4882a593Smuzhiyun if (!f_p) if (forward > sbi->sb_max_fwd_alloc) forward = sbi->sb_max_fwd_alloc;
229*4882a593Smuzhiyun less_fwd:
230*4882a593Smuzhiyun for (i = 0; i < n_bmps; i++) {
231*4882a593Smuzhiyun if (near_bmp+i < n_bmps && ((sec = alloc_in_bmp(s, (near_bmp+i) << 14, n, forward)))) {
232*4882a593Smuzhiyun sbi->sb_c_bitmap = near_bmp+i;
233*4882a593Smuzhiyun goto ret;
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun if (!forward) {
236*4882a593Smuzhiyun if (near_bmp-i-1 >= 0 && ((sec = alloc_in_bmp(s, (near_bmp-i-1) << 14, n, forward)))) {
237*4882a593Smuzhiyun sbi->sb_c_bitmap = near_bmp-i-1;
238*4882a593Smuzhiyun goto ret;
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun } else {
241*4882a593Smuzhiyun if (near_bmp+i >= n_bmps && ((sec = alloc_in_bmp(s, (near_bmp+i-n_bmps) << 14, n, forward)))) {
242*4882a593Smuzhiyun sbi->sb_c_bitmap = near_bmp+i-n_bmps;
243*4882a593Smuzhiyun goto ret;
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun if (i == 1 && sbi->sb_c_bitmap != -1 && ((sec = alloc_in_bmp(s, (sbi->sb_c_bitmap) << 14, n, forward)))) {
247*4882a593Smuzhiyun goto ret;
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun if (!f_p) {
251*4882a593Smuzhiyun if (forward) {
252*4882a593Smuzhiyun sbi->sb_max_fwd_alloc = forward * 3 / 4;
253*4882a593Smuzhiyun forward /= 2;
254*4882a593Smuzhiyun goto less_fwd;
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun sec = 0;
258*4882a593Smuzhiyun ret:
259*4882a593Smuzhiyun if (sec) {
260*4882a593Smuzhiyun i = 0;
261*4882a593Smuzhiyun do
262*4882a593Smuzhiyun hpfs_claim_alloc(s, sec + i);
263*4882a593Smuzhiyun while (unlikely(++i < n));
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun if (sec && f_p) {
266*4882a593Smuzhiyun for (i = 0; i < forward; i++) {
267*4882a593Smuzhiyun if (!hpfs_alloc_if_possible(s, sec + n + i)) {
268*4882a593Smuzhiyun hpfs_error(s, "Prealloc doesn't work! Wanted %d, allocated at %08x, can't allocate %d", forward, sec, i);
269*4882a593Smuzhiyun sec = 0;
270*4882a593Smuzhiyun break;
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun return sec;
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun
alloc_in_dirband(struct super_block * s,secno near)277*4882a593Smuzhiyun static secno alloc_in_dirband(struct super_block *s, secno near)
278*4882a593Smuzhiyun {
279*4882a593Smuzhiyun unsigned nr = near;
280*4882a593Smuzhiyun secno sec;
281*4882a593Smuzhiyun struct hpfs_sb_info *sbi = hpfs_sb(s);
282*4882a593Smuzhiyun if (nr < sbi->sb_dirband_start)
283*4882a593Smuzhiyun nr = sbi->sb_dirband_start;
284*4882a593Smuzhiyun if (nr >= sbi->sb_dirband_start + sbi->sb_dirband_size)
285*4882a593Smuzhiyun nr = sbi->sb_dirband_start + sbi->sb_dirband_size - 4;
286*4882a593Smuzhiyun nr -= sbi->sb_dirband_start;
287*4882a593Smuzhiyun nr >>= 2;
288*4882a593Smuzhiyun sec = alloc_in_bmp(s, (~0x3fff) | nr, 1, 0);
289*4882a593Smuzhiyun if (!sec) return 0;
290*4882a593Smuzhiyun hpfs_claim_dirband_alloc(s, sec);
291*4882a593Smuzhiyun return ((sec & 0x3fff) << 2) + sbi->sb_dirband_start;
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun /* Alloc sector if it's free */
295*4882a593Smuzhiyun
hpfs_alloc_if_possible(struct super_block * s,secno sec)296*4882a593Smuzhiyun int hpfs_alloc_if_possible(struct super_block *s, secno sec)
297*4882a593Smuzhiyun {
298*4882a593Smuzhiyun struct quad_buffer_head qbh;
299*4882a593Smuzhiyun __le32 *bmp;
300*4882a593Smuzhiyun if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "aip"))) goto end;
301*4882a593Smuzhiyun if (le32_to_cpu(bmp[(sec & 0x3fff) >> 5]) & (1 << (sec & 0x1f))) {
302*4882a593Smuzhiyun bmp[(sec & 0x3fff) >> 5] &= cpu_to_le32(~(1 << (sec & 0x1f)));
303*4882a593Smuzhiyun hpfs_mark_4buffers_dirty(&qbh);
304*4882a593Smuzhiyun hpfs_brelse4(&qbh);
305*4882a593Smuzhiyun hpfs_claim_alloc(s, sec);
306*4882a593Smuzhiyun return 1;
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun hpfs_brelse4(&qbh);
309*4882a593Smuzhiyun end:
310*4882a593Smuzhiyun return 0;
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun /* Free sectors in bitmaps */
314*4882a593Smuzhiyun
hpfs_free_sectors(struct super_block * s,secno sec,unsigned n)315*4882a593Smuzhiyun void hpfs_free_sectors(struct super_block *s, secno sec, unsigned n)
316*4882a593Smuzhiyun {
317*4882a593Smuzhiyun struct quad_buffer_head qbh;
318*4882a593Smuzhiyun __le32 *bmp;
319*4882a593Smuzhiyun struct hpfs_sb_info *sbi = hpfs_sb(s);
320*4882a593Smuzhiyun /*pr_info("2 - ");*/
321*4882a593Smuzhiyun if (!n) return;
322*4882a593Smuzhiyun if (sec < 0x12) {
323*4882a593Smuzhiyun hpfs_error(s, "Trying to free reserved sector %08x", sec);
324*4882a593Smuzhiyun return;
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun sbi->sb_max_fwd_alloc += n > 0xffff ? 0xffff : n;
327*4882a593Smuzhiyun if (sbi->sb_max_fwd_alloc > 0xffffff) sbi->sb_max_fwd_alloc = 0xffffff;
328*4882a593Smuzhiyun new_map:
329*4882a593Smuzhiyun if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "free"))) {
330*4882a593Smuzhiyun return;
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun new_tst:
333*4882a593Smuzhiyun if ((le32_to_cpu(bmp[(sec & 0x3fff) >> 5]) >> (sec & 0x1f) & 1)) {
334*4882a593Smuzhiyun hpfs_error(s, "sector %08x not allocated", sec);
335*4882a593Smuzhiyun hpfs_brelse4(&qbh);
336*4882a593Smuzhiyun return;
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun bmp[(sec & 0x3fff) >> 5] |= cpu_to_le32(1 << (sec & 0x1f));
339*4882a593Smuzhiyun hpfs_claim_free(s, sec);
340*4882a593Smuzhiyun if (!--n) {
341*4882a593Smuzhiyun hpfs_mark_4buffers_dirty(&qbh);
342*4882a593Smuzhiyun hpfs_brelse4(&qbh);
343*4882a593Smuzhiyun return;
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun if (!(++sec & 0x3fff)) {
346*4882a593Smuzhiyun hpfs_mark_4buffers_dirty(&qbh);
347*4882a593Smuzhiyun hpfs_brelse4(&qbh);
348*4882a593Smuzhiyun goto new_map;
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun goto new_tst;
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun /*
354*4882a593Smuzhiyun * Check if there are at least n free dnodes on the filesystem.
355*4882a593Smuzhiyun * Called before adding to dnode. If we run out of space while
356*4882a593Smuzhiyun * splitting dnodes, it would corrupt dnode tree.
357*4882a593Smuzhiyun */
358*4882a593Smuzhiyun
hpfs_check_free_dnodes(struct super_block * s,int n)359*4882a593Smuzhiyun int hpfs_check_free_dnodes(struct super_block *s, int n)
360*4882a593Smuzhiyun {
361*4882a593Smuzhiyun int n_bmps = (hpfs_sb(s)->sb_fs_size + 0x4000 - 1) >> 14;
362*4882a593Smuzhiyun int b = hpfs_sb(s)->sb_c_bitmap & 0x0fffffff;
363*4882a593Smuzhiyun int i, j;
364*4882a593Smuzhiyun __le32 *bmp;
365*4882a593Smuzhiyun struct quad_buffer_head qbh;
366*4882a593Smuzhiyun if ((bmp = hpfs_map_dnode_bitmap(s, &qbh))) {
367*4882a593Smuzhiyun for (j = 0; j < 512; j++) {
368*4882a593Smuzhiyun unsigned k;
369*4882a593Smuzhiyun if (!le32_to_cpu(bmp[j])) continue;
370*4882a593Smuzhiyun for (k = le32_to_cpu(bmp[j]); k; k >>= 1) if (k & 1) if (!--n) {
371*4882a593Smuzhiyun hpfs_brelse4(&qbh);
372*4882a593Smuzhiyun return 0;
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun hpfs_brelse4(&qbh);
377*4882a593Smuzhiyun i = 0;
378*4882a593Smuzhiyun if (hpfs_sb(s)->sb_c_bitmap != -1) {
379*4882a593Smuzhiyun bmp = hpfs_map_bitmap(s, b, &qbh, "chkdn1");
380*4882a593Smuzhiyun goto chk_bmp;
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun chk_next:
383*4882a593Smuzhiyun if (i == b) i++;
384*4882a593Smuzhiyun if (i >= n_bmps) return 1;
385*4882a593Smuzhiyun bmp = hpfs_map_bitmap(s, i, &qbh, "chkdn2");
386*4882a593Smuzhiyun chk_bmp:
387*4882a593Smuzhiyun if (bmp) {
388*4882a593Smuzhiyun for (j = 0; j < 512; j++) {
389*4882a593Smuzhiyun u32 k;
390*4882a593Smuzhiyun if (!le32_to_cpu(bmp[j])) continue;
391*4882a593Smuzhiyun for (k = 0xf; k; k <<= 4)
392*4882a593Smuzhiyun if ((le32_to_cpu(bmp[j]) & k) == k) {
393*4882a593Smuzhiyun if (!--n) {
394*4882a593Smuzhiyun hpfs_brelse4(&qbh);
395*4882a593Smuzhiyun return 0;
396*4882a593Smuzhiyun }
397*4882a593Smuzhiyun }
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun hpfs_brelse4(&qbh);
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun i++;
402*4882a593Smuzhiyun goto chk_next;
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun
hpfs_free_dnode(struct super_block * s,dnode_secno dno)405*4882a593Smuzhiyun void hpfs_free_dnode(struct super_block *s, dnode_secno dno)
406*4882a593Smuzhiyun {
407*4882a593Smuzhiyun if (hpfs_sb(s)->sb_chk) if (dno & 3) {
408*4882a593Smuzhiyun hpfs_error(s, "hpfs_free_dnode: dnode %08x not aligned", dno);
409*4882a593Smuzhiyun return;
410*4882a593Smuzhiyun }
411*4882a593Smuzhiyun if (dno < hpfs_sb(s)->sb_dirband_start ||
412*4882a593Smuzhiyun dno >= hpfs_sb(s)->sb_dirband_start + hpfs_sb(s)->sb_dirband_size) {
413*4882a593Smuzhiyun hpfs_free_sectors(s, dno, 4);
414*4882a593Smuzhiyun } else {
415*4882a593Smuzhiyun struct quad_buffer_head qbh;
416*4882a593Smuzhiyun __le32 *bmp;
417*4882a593Smuzhiyun unsigned ssec = (dno - hpfs_sb(s)->sb_dirband_start) / 4;
418*4882a593Smuzhiyun if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) {
419*4882a593Smuzhiyun return;
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun bmp[ssec >> 5] |= cpu_to_le32(1 << (ssec & 0x1f));
422*4882a593Smuzhiyun hpfs_mark_4buffers_dirty(&qbh);
423*4882a593Smuzhiyun hpfs_brelse4(&qbh);
424*4882a593Smuzhiyun hpfs_claim_dirband_free(s, dno);
425*4882a593Smuzhiyun }
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun
hpfs_alloc_dnode(struct super_block * s,secno near,dnode_secno * dno,struct quad_buffer_head * qbh)428*4882a593Smuzhiyun struct dnode *hpfs_alloc_dnode(struct super_block *s, secno near,
429*4882a593Smuzhiyun dnode_secno *dno, struct quad_buffer_head *qbh)
430*4882a593Smuzhiyun {
431*4882a593Smuzhiyun struct dnode *d;
432*4882a593Smuzhiyun if (hpfs_get_free_dnodes(s) > FREE_DNODES_ADD) {
433*4882a593Smuzhiyun if (!(*dno = alloc_in_dirband(s, near)))
434*4882a593Smuzhiyun if (!(*dno = hpfs_alloc_sector(s, near, 4, 0))) return NULL;
435*4882a593Smuzhiyun } else {
436*4882a593Smuzhiyun if (!(*dno = hpfs_alloc_sector(s, near, 4, 0)))
437*4882a593Smuzhiyun if (!(*dno = alloc_in_dirband(s, near))) return NULL;
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun if (!(d = hpfs_get_4sectors(s, *dno, qbh))) {
440*4882a593Smuzhiyun hpfs_free_dnode(s, *dno);
441*4882a593Smuzhiyun return NULL;
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun memset(d, 0, 2048);
444*4882a593Smuzhiyun d->magic = cpu_to_le32(DNODE_MAGIC);
445*4882a593Smuzhiyun d->first_free = cpu_to_le32(52);
446*4882a593Smuzhiyun d->dirent[0] = 32;
447*4882a593Smuzhiyun d->dirent[2] = 8;
448*4882a593Smuzhiyun d->dirent[30] = 1;
449*4882a593Smuzhiyun d->dirent[31] = 255;
450*4882a593Smuzhiyun d->self = cpu_to_le32(*dno);
451*4882a593Smuzhiyun return d;
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun
hpfs_alloc_fnode(struct super_block * s,secno near,fnode_secno * fno,struct buffer_head ** bh)454*4882a593Smuzhiyun struct fnode *hpfs_alloc_fnode(struct super_block *s, secno near, fnode_secno *fno,
455*4882a593Smuzhiyun struct buffer_head **bh)
456*4882a593Smuzhiyun {
457*4882a593Smuzhiyun struct fnode *f;
458*4882a593Smuzhiyun if (!(*fno = hpfs_alloc_sector(s, near, 1, FNODE_ALLOC_FWD))) return NULL;
459*4882a593Smuzhiyun if (!(f = hpfs_get_sector(s, *fno, bh))) {
460*4882a593Smuzhiyun hpfs_free_sectors(s, *fno, 1);
461*4882a593Smuzhiyun return NULL;
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun memset(f, 0, 512);
464*4882a593Smuzhiyun f->magic = cpu_to_le32(FNODE_MAGIC);
465*4882a593Smuzhiyun f->ea_offs = cpu_to_le16(0xc4);
466*4882a593Smuzhiyun f->btree.n_free_nodes = 8;
467*4882a593Smuzhiyun f->btree.first_free = cpu_to_le16(8);
468*4882a593Smuzhiyun return f;
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun
hpfs_alloc_anode(struct super_block * s,secno near,anode_secno * ano,struct buffer_head ** bh)471*4882a593Smuzhiyun struct anode *hpfs_alloc_anode(struct super_block *s, secno near, anode_secno *ano,
472*4882a593Smuzhiyun struct buffer_head **bh)
473*4882a593Smuzhiyun {
474*4882a593Smuzhiyun struct anode *a;
475*4882a593Smuzhiyun if (!(*ano = hpfs_alloc_sector(s, near, 1, ANODE_ALLOC_FWD))) return NULL;
476*4882a593Smuzhiyun if (!(a = hpfs_get_sector(s, *ano, bh))) {
477*4882a593Smuzhiyun hpfs_free_sectors(s, *ano, 1);
478*4882a593Smuzhiyun return NULL;
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun memset(a, 0, 512);
481*4882a593Smuzhiyun a->magic = cpu_to_le32(ANODE_MAGIC);
482*4882a593Smuzhiyun a->self = cpu_to_le32(*ano);
483*4882a593Smuzhiyun a->btree.n_free_nodes = 40;
484*4882a593Smuzhiyun a->btree.n_used_nodes = 0;
485*4882a593Smuzhiyun a->btree.first_free = cpu_to_le16(8);
486*4882a593Smuzhiyun return a;
487*4882a593Smuzhiyun }
488*4882a593Smuzhiyun
find_run(__le32 * bmp,unsigned * idx)489*4882a593Smuzhiyun static unsigned find_run(__le32 *bmp, unsigned *idx)
490*4882a593Smuzhiyun {
491*4882a593Smuzhiyun unsigned len;
492*4882a593Smuzhiyun while (tstbits(bmp, *idx, 1)) {
493*4882a593Smuzhiyun (*idx)++;
494*4882a593Smuzhiyun if (unlikely(*idx >= 0x4000))
495*4882a593Smuzhiyun return 0;
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun len = 1;
498*4882a593Smuzhiyun while (!tstbits(bmp, *idx + len, 1))
499*4882a593Smuzhiyun len++;
500*4882a593Smuzhiyun return len;
501*4882a593Smuzhiyun }
502*4882a593Smuzhiyun
do_trim(struct super_block * s,secno start,unsigned len,secno limit_start,secno limit_end,unsigned minlen,unsigned * result)503*4882a593Smuzhiyun static int do_trim(struct super_block *s, secno start, unsigned len, secno limit_start, secno limit_end, unsigned minlen, unsigned *result)
504*4882a593Smuzhiyun {
505*4882a593Smuzhiyun int err;
506*4882a593Smuzhiyun secno end;
507*4882a593Smuzhiyun if (fatal_signal_pending(current))
508*4882a593Smuzhiyun return -EINTR;
509*4882a593Smuzhiyun end = start + len;
510*4882a593Smuzhiyun if (start < limit_start)
511*4882a593Smuzhiyun start = limit_start;
512*4882a593Smuzhiyun if (end > limit_end)
513*4882a593Smuzhiyun end = limit_end;
514*4882a593Smuzhiyun if (start >= end)
515*4882a593Smuzhiyun return 0;
516*4882a593Smuzhiyun if (end - start < minlen)
517*4882a593Smuzhiyun return 0;
518*4882a593Smuzhiyun err = sb_issue_discard(s, start, end - start, GFP_NOFS, 0);
519*4882a593Smuzhiyun if (err)
520*4882a593Smuzhiyun return err;
521*4882a593Smuzhiyun *result += end - start;
522*4882a593Smuzhiyun return 0;
523*4882a593Smuzhiyun }
524*4882a593Smuzhiyun
hpfs_trim_fs(struct super_block * s,u64 start,u64 end,u64 minlen,unsigned * result)525*4882a593Smuzhiyun int hpfs_trim_fs(struct super_block *s, u64 start, u64 end, u64 minlen, unsigned *result)
526*4882a593Smuzhiyun {
527*4882a593Smuzhiyun int err = 0;
528*4882a593Smuzhiyun struct hpfs_sb_info *sbi = hpfs_sb(s);
529*4882a593Smuzhiyun unsigned idx, len, start_bmp, end_bmp;
530*4882a593Smuzhiyun __le32 *bmp;
531*4882a593Smuzhiyun struct quad_buffer_head qbh;
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun *result = 0;
534*4882a593Smuzhiyun if (!end || end > sbi->sb_fs_size)
535*4882a593Smuzhiyun end = sbi->sb_fs_size;
536*4882a593Smuzhiyun if (start >= sbi->sb_fs_size)
537*4882a593Smuzhiyun return 0;
538*4882a593Smuzhiyun if (minlen > 0x4000)
539*4882a593Smuzhiyun return 0;
540*4882a593Smuzhiyun if (start < sbi->sb_dirband_start + sbi->sb_dirband_size && end > sbi->sb_dirband_start) {
541*4882a593Smuzhiyun hpfs_lock(s);
542*4882a593Smuzhiyun if (sb_rdonly(s)) {
543*4882a593Smuzhiyun err = -EROFS;
544*4882a593Smuzhiyun goto unlock_1;
545*4882a593Smuzhiyun }
546*4882a593Smuzhiyun if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) {
547*4882a593Smuzhiyun err = -EIO;
548*4882a593Smuzhiyun goto unlock_1;
549*4882a593Smuzhiyun }
550*4882a593Smuzhiyun idx = 0;
551*4882a593Smuzhiyun while ((len = find_run(bmp, &idx)) && !err) {
552*4882a593Smuzhiyun err = do_trim(s, sbi->sb_dirband_start + idx * 4, len * 4, start, end, minlen, result);
553*4882a593Smuzhiyun idx += len;
554*4882a593Smuzhiyun }
555*4882a593Smuzhiyun hpfs_brelse4(&qbh);
556*4882a593Smuzhiyun unlock_1:
557*4882a593Smuzhiyun hpfs_unlock(s);
558*4882a593Smuzhiyun }
559*4882a593Smuzhiyun start_bmp = start >> 14;
560*4882a593Smuzhiyun end_bmp = (end + 0x3fff) >> 14;
561*4882a593Smuzhiyun while (start_bmp < end_bmp && !err) {
562*4882a593Smuzhiyun hpfs_lock(s);
563*4882a593Smuzhiyun if (sb_rdonly(s)) {
564*4882a593Smuzhiyun err = -EROFS;
565*4882a593Smuzhiyun goto unlock_2;
566*4882a593Smuzhiyun }
567*4882a593Smuzhiyun if (!(bmp = hpfs_map_bitmap(s, start_bmp, &qbh, "trim"))) {
568*4882a593Smuzhiyun err = -EIO;
569*4882a593Smuzhiyun goto unlock_2;
570*4882a593Smuzhiyun }
571*4882a593Smuzhiyun idx = 0;
572*4882a593Smuzhiyun while ((len = find_run(bmp, &idx)) && !err) {
573*4882a593Smuzhiyun err = do_trim(s, (start_bmp << 14) + idx, len, start, end, minlen, result);
574*4882a593Smuzhiyun idx += len;
575*4882a593Smuzhiyun }
576*4882a593Smuzhiyun hpfs_brelse4(&qbh);
577*4882a593Smuzhiyun unlock_2:
578*4882a593Smuzhiyun hpfs_unlock(s);
579*4882a593Smuzhiyun start_bmp++;
580*4882a593Smuzhiyun }
581*4882a593Smuzhiyun return err;
582*4882a593Smuzhiyun }
583