1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2004, OGAWA Hirofumi
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #include <linux/blkdev.h>
7*4882a593Smuzhiyun #include <linux/sched/signal.h>
8*4882a593Smuzhiyun #include "fat.h"
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun struct fatent_operations {
11*4882a593Smuzhiyun void (*ent_blocknr)(struct super_block *, int, int *, sector_t *);
12*4882a593Smuzhiyun void (*ent_set_ptr)(struct fat_entry *, int);
13*4882a593Smuzhiyun int (*ent_bread)(struct super_block *, struct fat_entry *,
14*4882a593Smuzhiyun int, sector_t);
15*4882a593Smuzhiyun int (*ent_get)(struct fat_entry *);
16*4882a593Smuzhiyun void (*ent_put)(struct fat_entry *, int);
17*4882a593Smuzhiyun int (*ent_next)(struct fat_entry *);
18*4882a593Smuzhiyun };
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun static DEFINE_SPINLOCK(fat12_entry_lock);
21*4882a593Smuzhiyun
fat12_ent_blocknr(struct super_block * sb,int entry,int * offset,sector_t * blocknr)22*4882a593Smuzhiyun static void fat12_ent_blocknr(struct super_block *sb, int entry,
23*4882a593Smuzhiyun int *offset, sector_t *blocknr)
24*4882a593Smuzhiyun {
25*4882a593Smuzhiyun struct msdos_sb_info *sbi = MSDOS_SB(sb);
26*4882a593Smuzhiyun int bytes = entry + (entry >> 1);
27*4882a593Smuzhiyun WARN_ON(!fat_valid_entry(sbi, entry));
28*4882a593Smuzhiyun *offset = bytes & (sb->s_blocksize - 1);
29*4882a593Smuzhiyun *blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits);
30*4882a593Smuzhiyun }
31*4882a593Smuzhiyun
fat_ent_blocknr(struct super_block * sb,int entry,int * offset,sector_t * blocknr)32*4882a593Smuzhiyun static void fat_ent_blocknr(struct super_block *sb, int entry,
33*4882a593Smuzhiyun int *offset, sector_t *blocknr)
34*4882a593Smuzhiyun {
35*4882a593Smuzhiyun struct msdos_sb_info *sbi = MSDOS_SB(sb);
36*4882a593Smuzhiyun int bytes = (entry << sbi->fatent_shift);
37*4882a593Smuzhiyun WARN_ON(!fat_valid_entry(sbi, entry));
38*4882a593Smuzhiyun *offset = bytes & (sb->s_blocksize - 1);
39*4882a593Smuzhiyun *blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits);
40*4882a593Smuzhiyun }
41*4882a593Smuzhiyun
fat12_ent_set_ptr(struct fat_entry * fatent,int offset)42*4882a593Smuzhiyun static void fat12_ent_set_ptr(struct fat_entry *fatent, int offset)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun struct buffer_head **bhs = fatent->bhs;
45*4882a593Smuzhiyun if (fatent->nr_bhs == 1) {
46*4882a593Smuzhiyun WARN_ON(offset >= (bhs[0]->b_size - 1));
47*4882a593Smuzhiyun fatent->u.ent12_p[0] = bhs[0]->b_data + offset;
48*4882a593Smuzhiyun fatent->u.ent12_p[1] = bhs[0]->b_data + (offset + 1);
49*4882a593Smuzhiyun } else {
50*4882a593Smuzhiyun WARN_ON(offset != (bhs[0]->b_size - 1));
51*4882a593Smuzhiyun fatent->u.ent12_p[0] = bhs[0]->b_data + offset;
52*4882a593Smuzhiyun fatent->u.ent12_p[1] = bhs[1]->b_data;
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun
fat16_ent_set_ptr(struct fat_entry * fatent,int offset)56*4882a593Smuzhiyun static void fat16_ent_set_ptr(struct fat_entry *fatent, int offset)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun WARN_ON(offset & (2 - 1));
59*4882a593Smuzhiyun fatent->u.ent16_p = (__le16 *)(fatent->bhs[0]->b_data + offset);
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun
fat32_ent_set_ptr(struct fat_entry * fatent,int offset)62*4882a593Smuzhiyun static void fat32_ent_set_ptr(struct fat_entry *fatent, int offset)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun WARN_ON(offset & (4 - 1));
65*4882a593Smuzhiyun fatent->u.ent32_p = (__le32 *)(fatent->bhs[0]->b_data + offset);
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun
fat12_ent_bread(struct super_block * sb,struct fat_entry * fatent,int offset,sector_t blocknr)68*4882a593Smuzhiyun static int fat12_ent_bread(struct super_block *sb, struct fat_entry *fatent,
69*4882a593Smuzhiyun int offset, sector_t blocknr)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun struct buffer_head **bhs = fatent->bhs;
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun WARN_ON(blocknr < MSDOS_SB(sb)->fat_start);
74*4882a593Smuzhiyun fatent->fat_inode = MSDOS_SB(sb)->fat_inode;
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun bhs[0] = sb_bread(sb, blocknr);
77*4882a593Smuzhiyun if (!bhs[0])
78*4882a593Smuzhiyun goto err;
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun if ((offset + 1) < sb->s_blocksize)
81*4882a593Smuzhiyun fatent->nr_bhs = 1;
82*4882a593Smuzhiyun else {
83*4882a593Smuzhiyun /* This entry is block boundary, it needs the next block */
84*4882a593Smuzhiyun blocknr++;
85*4882a593Smuzhiyun bhs[1] = sb_bread(sb, blocknr);
86*4882a593Smuzhiyun if (!bhs[1])
87*4882a593Smuzhiyun goto err_brelse;
88*4882a593Smuzhiyun fatent->nr_bhs = 2;
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun fat12_ent_set_ptr(fatent, offset);
91*4882a593Smuzhiyun return 0;
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun err_brelse:
94*4882a593Smuzhiyun brelse(bhs[0]);
95*4882a593Smuzhiyun err:
96*4882a593Smuzhiyun fat_msg_ratelimit(sb, KERN_ERR, "FAT read failed (blocknr %llu)",
97*4882a593Smuzhiyun (llu)blocknr);
98*4882a593Smuzhiyun return -EIO;
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun
fat_ent_bread(struct super_block * sb,struct fat_entry * fatent,int offset,sector_t blocknr)101*4882a593Smuzhiyun static int fat_ent_bread(struct super_block *sb, struct fat_entry *fatent,
102*4882a593Smuzhiyun int offset, sector_t blocknr)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun const struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops;
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun WARN_ON(blocknr < MSDOS_SB(sb)->fat_start);
107*4882a593Smuzhiyun fatent->fat_inode = MSDOS_SB(sb)->fat_inode;
108*4882a593Smuzhiyun fatent->bhs[0] = sb_bread(sb, blocknr);
109*4882a593Smuzhiyun if (!fatent->bhs[0]) {
110*4882a593Smuzhiyun fat_msg_ratelimit(sb, KERN_ERR, "FAT read failed (blocknr %llu)",
111*4882a593Smuzhiyun (llu)blocknr);
112*4882a593Smuzhiyun return -EIO;
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun fatent->nr_bhs = 1;
115*4882a593Smuzhiyun ops->ent_set_ptr(fatent, offset);
116*4882a593Smuzhiyun return 0;
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun
fat12_ent_get(struct fat_entry * fatent)119*4882a593Smuzhiyun static int fat12_ent_get(struct fat_entry *fatent)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun u8 **ent12_p = fatent->u.ent12_p;
122*4882a593Smuzhiyun int next;
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun spin_lock(&fat12_entry_lock);
125*4882a593Smuzhiyun if (fatent->entry & 1)
126*4882a593Smuzhiyun next = (*ent12_p[0] >> 4) | (*ent12_p[1] << 4);
127*4882a593Smuzhiyun else
128*4882a593Smuzhiyun next = (*ent12_p[1] << 8) | *ent12_p[0];
129*4882a593Smuzhiyun spin_unlock(&fat12_entry_lock);
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun next &= 0x0fff;
132*4882a593Smuzhiyun if (next >= BAD_FAT12)
133*4882a593Smuzhiyun next = FAT_ENT_EOF;
134*4882a593Smuzhiyun return next;
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun
fat16_ent_get(struct fat_entry * fatent)137*4882a593Smuzhiyun static int fat16_ent_get(struct fat_entry *fatent)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun int next = le16_to_cpu(*fatent->u.ent16_p);
140*4882a593Smuzhiyun WARN_ON((unsigned long)fatent->u.ent16_p & (2 - 1));
141*4882a593Smuzhiyun if (next >= BAD_FAT16)
142*4882a593Smuzhiyun next = FAT_ENT_EOF;
143*4882a593Smuzhiyun return next;
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun
fat32_ent_get(struct fat_entry * fatent)146*4882a593Smuzhiyun static int fat32_ent_get(struct fat_entry *fatent)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun int next = le32_to_cpu(*fatent->u.ent32_p) & 0x0fffffff;
149*4882a593Smuzhiyun WARN_ON((unsigned long)fatent->u.ent32_p & (4 - 1));
150*4882a593Smuzhiyun if (next >= BAD_FAT32)
151*4882a593Smuzhiyun next = FAT_ENT_EOF;
152*4882a593Smuzhiyun return next;
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun
fat12_ent_put(struct fat_entry * fatent,int new)155*4882a593Smuzhiyun static void fat12_ent_put(struct fat_entry *fatent, int new)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun u8 **ent12_p = fatent->u.ent12_p;
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun if (new == FAT_ENT_EOF)
160*4882a593Smuzhiyun new = EOF_FAT12;
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun spin_lock(&fat12_entry_lock);
163*4882a593Smuzhiyun if (fatent->entry & 1) {
164*4882a593Smuzhiyun *ent12_p[0] = (new << 4) | (*ent12_p[0] & 0x0f);
165*4882a593Smuzhiyun *ent12_p[1] = new >> 4;
166*4882a593Smuzhiyun } else {
167*4882a593Smuzhiyun *ent12_p[0] = new & 0xff;
168*4882a593Smuzhiyun *ent12_p[1] = (*ent12_p[1] & 0xf0) | (new >> 8);
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun spin_unlock(&fat12_entry_lock);
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun mark_buffer_dirty_inode(fatent->bhs[0], fatent->fat_inode);
173*4882a593Smuzhiyun if (fatent->nr_bhs == 2)
174*4882a593Smuzhiyun mark_buffer_dirty_inode(fatent->bhs[1], fatent->fat_inode);
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun
fat16_ent_put(struct fat_entry * fatent,int new)177*4882a593Smuzhiyun static void fat16_ent_put(struct fat_entry *fatent, int new)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun if (new == FAT_ENT_EOF)
180*4882a593Smuzhiyun new = EOF_FAT16;
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun *fatent->u.ent16_p = cpu_to_le16(new);
183*4882a593Smuzhiyun mark_buffer_dirty_inode(fatent->bhs[0], fatent->fat_inode);
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun
fat32_ent_put(struct fat_entry * fatent,int new)186*4882a593Smuzhiyun static void fat32_ent_put(struct fat_entry *fatent, int new)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun WARN_ON(new & 0xf0000000);
189*4882a593Smuzhiyun new |= le32_to_cpu(*fatent->u.ent32_p) & ~0x0fffffff;
190*4882a593Smuzhiyun *fatent->u.ent32_p = cpu_to_le32(new);
191*4882a593Smuzhiyun mark_buffer_dirty_inode(fatent->bhs[0], fatent->fat_inode);
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun
fat12_ent_next(struct fat_entry * fatent)194*4882a593Smuzhiyun static int fat12_ent_next(struct fat_entry *fatent)
195*4882a593Smuzhiyun {
196*4882a593Smuzhiyun u8 **ent12_p = fatent->u.ent12_p;
197*4882a593Smuzhiyun struct buffer_head **bhs = fatent->bhs;
198*4882a593Smuzhiyun u8 *nextp = ent12_p[1] + 1 + (fatent->entry & 1);
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun fatent->entry++;
201*4882a593Smuzhiyun if (fatent->nr_bhs == 1) {
202*4882a593Smuzhiyun WARN_ON(ent12_p[0] > (u8 *)(bhs[0]->b_data +
203*4882a593Smuzhiyun (bhs[0]->b_size - 2)));
204*4882a593Smuzhiyun WARN_ON(ent12_p[1] > (u8 *)(bhs[0]->b_data +
205*4882a593Smuzhiyun (bhs[0]->b_size - 1)));
206*4882a593Smuzhiyun if (nextp < (u8 *)(bhs[0]->b_data + (bhs[0]->b_size - 1))) {
207*4882a593Smuzhiyun ent12_p[0] = nextp - 1;
208*4882a593Smuzhiyun ent12_p[1] = nextp;
209*4882a593Smuzhiyun return 1;
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun } else {
212*4882a593Smuzhiyun WARN_ON(ent12_p[0] != (u8 *)(bhs[0]->b_data +
213*4882a593Smuzhiyun (bhs[0]->b_size - 1)));
214*4882a593Smuzhiyun WARN_ON(ent12_p[1] != (u8 *)bhs[1]->b_data);
215*4882a593Smuzhiyun ent12_p[0] = nextp - 1;
216*4882a593Smuzhiyun ent12_p[1] = nextp;
217*4882a593Smuzhiyun brelse(bhs[0]);
218*4882a593Smuzhiyun bhs[0] = bhs[1];
219*4882a593Smuzhiyun fatent->nr_bhs = 1;
220*4882a593Smuzhiyun return 1;
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun ent12_p[0] = NULL;
223*4882a593Smuzhiyun ent12_p[1] = NULL;
224*4882a593Smuzhiyun return 0;
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun
fat16_ent_next(struct fat_entry * fatent)227*4882a593Smuzhiyun static int fat16_ent_next(struct fat_entry *fatent)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun const struct buffer_head *bh = fatent->bhs[0];
230*4882a593Smuzhiyun fatent->entry++;
231*4882a593Smuzhiyun if (fatent->u.ent16_p < (__le16 *)(bh->b_data + (bh->b_size - 2))) {
232*4882a593Smuzhiyun fatent->u.ent16_p++;
233*4882a593Smuzhiyun return 1;
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun fatent->u.ent16_p = NULL;
236*4882a593Smuzhiyun return 0;
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun
fat32_ent_next(struct fat_entry * fatent)239*4882a593Smuzhiyun static int fat32_ent_next(struct fat_entry *fatent)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun const struct buffer_head *bh = fatent->bhs[0];
242*4882a593Smuzhiyun fatent->entry++;
243*4882a593Smuzhiyun if (fatent->u.ent32_p < (__le32 *)(bh->b_data + (bh->b_size - 4))) {
244*4882a593Smuzhiyun fatent->u.ent32_p++;
245*4882a593Smuzhiyun return 1;
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun fatent->u.ent32_p = NULL;
248*4882a593Smuzhiyun return 0;
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun static const struct fatent_operations fat12_ops = {
252*4882a593Smuzhiyun .ent_blocknr = fat12_ent_blocknr,
253*4882a593Smuzhiyun .ent_set_ptr = fat12_ent_set_ptr,
254*4882a593Smuzhiyun .ent_bread = fat12_ent_bread,
255*4882a593Smuzhiyun .ent_get = fat12_ent_get,
256*4882a593Smuzhiyun .ent_put = fat12_ent_put,
257*4882a593Smuzhiyun .ent_next = fat12_ent_next,
258*4882a593Smuzhiyun };
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun static const struct fatent_operations fat16_ops = {
261*4882a593Smuzhiyun .ent_blocknr = fat_ent_blocknr,
262*4882a593Smuzhiyun .ent_set_ptr = fat16_ent_set_ptr,
263*4882a593Smuzhiyun .ent_bread = fat_ent_bread,
264*4882a593Smuzhiyun .ent_get = fat16_ent_get,
265*4882a593Smuzhiyun .ent_put = fat16_ent_put,
266*4882a593Smuzhiyun .ent_next = fat16_ent_next,
267*4882a593Smuzhiyun };
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun static const struct fatent_operations fat32_ops = {
270*4882a593Smuzhiyun .ent_blocknr = fat_ent_blocknr,
271*4882a593Smuzhiyun .ent_set_ptr = fat32_ent_set_ptr,
272*4882a593Smuzhiyun .ent_bread = fat_ent_bread,
273*4882a593Smuzhiyun .ent_get = fat32_ent_get,
274*4882a593Smuzhiyun .ent_put = fat32_ent_put,
275*4882a593Smuzhiyun .ent_next = fat32_ent_next,
276*4882a593Smuzhiyun };
277*4882a593Smuzhiyun
lock_fat(struct msdos_sb_info * sbi)278*4882a593Smuzhiyun static inline void lock_fat(struct msdos_sb_info *sbi)
279*4882a593Smuzhiyun {
280*4882a593Smuzhiyun mutex_lock(&sbi->fat_lock);
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun
unlock_fat(struct msdos_sb_info * sbi)283*4882a593Smuzhiyun static inline void unlock_fat(struct msdos_sb_info *sbi)
284*4882a593Smuzhiyun {
285*4882a593Smuzhiyun mutex_unlock(&sbi->fat_lock);
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun
fat_ent_access_init(struct super_block * sb)288*4882a593Smuzhiyun void fat_ent_access_init(struct super_block *sb)
289*4882a593Smuzhiyun {
290*4882a593Smuzhiyun struct msdos_sb_info *sbi = MSDOS_SB(sb);
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun mutex_init(&sbi->fat_lock);
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun if (is_fat32(sbi)) {
295*4882a593Smuzhiyun sbi->fatent_shift = 2;
296*4882a593Smuzhiyun sbi->fatent_ops = &fat32_ops;
297*4882a593Smuzhiyun } else if (is_fat16(sbi)) {
298*4882a593Smuzhiyun sbi->fatent_shift = 1;
299*4882a593Smuzhiyun sbi->fatent_ops = &fat16_ops;
300*4882a593Smuzhiyun } else if (is_fat12(sbi)) {
301*4882a593Smuzhiyun sbi->fatent_shift = -1;
302*4882a593Smuzhiyun sbi->fatent_ops = &fat12_ops;
303*4882a593Smuzhiyun } else {
304*4882a593Smuzhiyun fat_fs_error(sb, "invalid FAT variant, %u bits", sbi->fat_bits);
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun
mark_fsinfo_dirty(struct super_block * sb)308*4882a593Smuzhiyun static void mark_fsinfo_dirty(struct super_block *sb)
309*4882a593Smuzhiyun {
310*4882a593Smuzhiyun struct msdos_sb_info *sbi = MSDOS_SB(sb);
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun if (sb_rdonly(sb) || !is_fat32(sbi))
313*4882a593Smuzhiyun return;
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun __mark_inode_dirty(sbi->fsinfo_inode, I_DIRTY_SYNC);
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun
fat_ent_update_ptr(struct super_block * sb,struct fat_entry * fatent,int offset,sector_t blocknr)318*4882a593Smuzhiyun static inline int fat_ent_update_ptr(struct super_block *sb,
319*4882a593Smuzhiyun struct fat_entry *fatent,
320*4882a593Smuzhiyun int offset, sector_t blocknr)
321*4882a593Smuzhiyun {
322*4882a593Smuzhiyun struct msdos_sb_info *sbi = MSDOS_SB(sb);
323*4882a593Smuzhiyun const struct fatent_operations *ops = sbi->fatent_ops;
324*4882a593Smuzhiyun struct buffer_head **bhs = fatent->bhs;
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun /* Is this fatent's blocks including this entry? */
327*4882a593Smuzhiyun if (!fatent->nr_bhs || bhs[0]->b_blocknr != blocknr)
328*4882a593Smuzhiyun return 0;
329*4882a593Smuzhiyun if (is_fat12(sbi)) {
330*4882a593Smuzhiyun if ((offset + 1) < sb->s_blocksize) {
331*4882a593Smuzhiyun /* This entry is on bhs[0]. */
332*4882a593Smuzhiyun if (fatent->nr_bhs == 2) {
333*4882a593Smuzhiyun brelse(bhs[1]);
334*4882a593Smuzhiyun fatent->nr_bhs = 1;
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun } else {
337*4882a593Smuzhiyun /* This entry needs the next block. */
338*4882a593Smuzhiyun if (fatent->nr_bhs != 2)
339*4882a593Smuzhiyun return 0;
340*4882a593Smuzhiyun if (bhs[1]->b_blocknr != (blocknr + 1))
341*4882a593Smuzhiyun return 0;
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun ops->ent_set_ptr(fatent, offset);
345*4882a593Smuzhiyun return 1;
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun
fat_ent_read(struct inode * inode,struct fat_entry * fatent,int entry)348*4882a593Smuzhiyun int fat_ent_read(struct inode *inode, struct fat_entry *fatent, int entry)
349*4882a593Smuzhiyun {
350*4882a593Smuzhiyun struct super_block *sb = inode->i_sb;
351*4882a593Smuzhiyun struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
352*4882a593Smuzhiyun const struct fatent_operations *ops = sbi->fatent_ops;
353*4882a593Smuzhiyun int err, offset;
354*4882a593Smuzhiyun sector_t blocknr;
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun if (!fat_valid_entry(sbi, entry)) {
357*4882a593Smuzhiyun fatent_brelse(fatent);
358*4882a593Smuzhiyun fat_fs_error(sb, "invalid access to FAT (entry 0x%08x)", entry);
359*4882a593Smuzhiyun return -EIO;
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun fatent_set_entry(fatent, entry);
363*4882a593Smuzhiyun ops->ent_blocknr(sb, entry, &offset, &blocknr);
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun if (!fat_ent_update_ptr(sb, fatent, offset, blocknr)) {
366*4882a593Smuzhiyun fatent_brelse(fatent);
367*4882a593Smuzhiyun err = ops->ent_bread(sb, fatent, offset, blocknr);
368*4882a593Smuzhiyun if (err)
369*4882a593Smuzhiyun return err;
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun return ops->ent_get(fatent);
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun /* FIXME: We can write the blocks as more big chunk. */
fat_mirror_bhs(struct super_block * sb,struct buffer_head ** bhs,int nr_bhs)375*4882a593Smuzhiyun static int fat_mirror_bhs(struct super_block *sb, struct buffer_head **bhs,
376*4882a593Smuzhiyun int nr_bhs)
377*4882a593Smuzhiyun {
378*4882a593Smuzhiyun struct msdos_sb_info *sbi = MSDOS_SB(sb);
379*4882a593Smuzhiyun struct buffer_head *c_bh;
380*4882a593Smuzhiyun int err, n, copy;
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun err = 0;
383*4882a593Smuzhiyun for (copy = 1; copy < sbi->fats; copy++) {
384*4882a593Smuzhiyun sector_t backup_fat = sbi->fat_length * copy;
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun for (n = 0; n < nr_bhs; n++) {
387*4882a593Smuzhiyun c_bh = sb_getblk(sb, backup_fat + bhs[n]->b_blocknr);
388*4882a593Smuzhiyun if (!c_bh) {
389*4882a593Smuzhiyun err = -ENOMEM;
390*4882a593Smuzhiyun goto error;
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun /* Avoid race with userspace read via bdev */
393*4882a593Smuzhiyun lock_buffer(c_bh);
394*4882a593Smuzhiyun memcpy(c_bh->b_data, bhs[n]->b_data, sb->s_blocksize);
395*4882a593Smuzhiyun set_buffer_uptodate(c_bh);
396*4882a593Smuzhiyun unlock_buffer(c_bh);
397*4882a593Smuzhiyun mark_buffer_dirty_inode(c_bh, sbi->fat_inode);
398*4882a593Smuzhiyun if (sb->s_flags & SB_SYNCHRONOUS)
399*4882a593Smuzhiyun err = sync_dirty_buffer(c_bh);
400*4882a593Smuzhiyun brelse(c_bh);
401*4882a593Smuzhiyun if (err)
402*4882a593Smuzhiyun goto error;
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun error:
406*4882a593Smuzhiyun return err;
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun
fat_ent_write(struct inode * inode,struct fat_entry * fatent,int new,int wait)409*4882a593Smuzhiyun int fat_ent_write(struct inode *inode, struct fat_entry *fatent,
410*4882a593Smuzhiyun int new, int wait)
411*4882a593Smuzhiyun {
412*4882a593Smuzhiyun struct super_block *sb = inode->i_sb;
413*4882a593Smuzhiyun const struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops;
414*4882a593Smuzhiyun int err;
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun ops->ent_put(fatent, new);
417*4882a593Smuzhiyun if (wait) {
418*4882a593Smuzhiyun err = fat_sync_bhs(fatent->bhs, fatent->nr_bhs);
419*4882a593Smuzhiyun if (err)
420*4882a593Smuzhiyun return err;
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun return fat_mirror_bhs(sb, fatent->bhs, fatent->nr_bhs);
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun
fat_ent_next(struct msdos_sb_info * sbi,struct fat_entry * fatent)425*4882a593Smuzhiyun static inline int fat_ent_next(struct msdos_sb_info *sbi,
426*4882a593Smuzhiyun struct fat_entry *fatent)
427*4882a593Smuzhiyun {
428*4882a593Smuzhiyun if (sbi->fatent_ops->ent_next(fatent)) {
429*4882a593Smuzhiyun if (fatent->entry < sbi->max_cluster)
430*4882a593Smuzhiyun return 1;
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun return 0;
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun
fat_ent_read_block(struct super_block * sb,struct fat_entry * fatent)435*4882a593Smuzhiyun static inline int fat_ent_read_block(struct super_block *sb,
436*4882a593Smuzhiyun struct fat_entry *fatent)
437*4882a593Smuzhiyun {
438*4882a593Smuzhiyun const struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops;
439*4882a593Smuzhiyun sector_t blocknr;
440*4882a593Smuzhiyun int offset;
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun fatent_brelse(fatent);
443*4882a593Smuzhiyun ops->ent_blocknr(sb, fatent->entry, &offset, &blocknr);
444*4882a593Smuzhiyun return ops->ent_bread(sb, fatent, offset, blocknr);
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun
fat_collect_bhs(struct buffer_head ** bhs,int * nr_bhs,struct fat_entry * fatent)447*4882a593Smuzhiyun static void fat_collect_bhs(struct buffer_head **bhs, int *nr_bhs,
448*4882a593Smuzhiyun struct fat_entry *fatent)
449*4882a593Smuzhiyun {
450*4882a593Smuzhiyun int n, i;
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun for (n = 0; n < fatent->nr_bhs; n++) {
453*4882a593Smuzhiyun for (i = 0; i < *nr_bhs; i++) {
454*4882a593Smuzhiyun if (fatent->bhs[n] == bhs[i])
455*4882a593Smuzhiyun break;
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun if (i == *nr_bhs) {
458*4882a593Smuzhiyun get_bh(fatent->bhs[n]);
459*4882a593Smuzhiyun bhs[i] = fatent->bhs[n];
460*4882a593Smuzhiyun (*nr_bhs)++;
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun
fat_alloc_clusters(struct inode * inode,int * cluster,int nr_cluster)465*4882a593Smuzhiyun int fat_alloc_clusters(struct inode *inode, int *cluster, int nr_cluster)
466*4882a593Smuzhiyun {
467*4882a593Smuzhiyun struct super_block *sb = inode->i_sb;
468*4882a593Smuzhiyun struct msdos_sb_info *sbi = MSDOS_SB(sb);
469*4882a593Smuzhiyun const struct fatent_operations *ops = sbi->fatent_ops;
470*4882a593Smuzhiyun struct fat_entry fatent, prev_ent;
471*4882a593Smuzhiyun struct buffer_head *bhs[MAX_BUF_PER_PAGE];
472*4882a593Smuzhiyun int i, count, err, nr_bhs, idx_clus;
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun BUG_ON(nr_cluster > (MAX_BUF_PER_PAGE / 2)); /* fixed limit */
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun lock_fat(sbi);
477*4882a593Smuzhiyun if (sbi->free_clusters != -1 && sbi->free_clus_valid &&
478*4882a593Smuzhiyun sbi->free_clusters < nr_cluster) {
479*4882a593Smuzhiyun unlock_fat(sbi);
480*4882a593Smuzhiyun return -ENOSPC;
481*4882a593Smuzhiyun }
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun err = nr_bhs = idx_clus = 0;
484*4882a593Smuzhiyun count = FAT_START_ENT;
485*4882a593Smuzhiyun fatent_init(&prev_ent);
486*4882a593Smuzhiyun fatent_init(&fatent);
487*4882a593Smuzhiyun fatent_set_entry(&fatent, sbi->prev_free + 1);
488*4882a593Smuzhiyun while (count < sbi->max_cluster) {
489*4882a593Smuzhiyun if (fatent.entry >= sbi->max_cluster)
490*4882a593Smuzhiyun fatent.entry = FAT_START_ENT;
491*4882a593Smuzhiyun fatent_set_entry(&fatent, fatent.entry);
492*4882a593Smuzhiyun err = fat_ent_read_block(sb, &fatent);
493*4882a593Smuzhiyun if (err)
494*4882a593Smuzhiyun goto out;
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun /* Find the free entries in a block */
497*4882a593Smuzhiyun do {
498*4882a593Smuzhiyun if (ops->ent_get(&fatent) == FAT_ENT_FREE) {
499*4882a593Smuzhiyun int entry = fatent.entry;
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun /* make the cluster chain */
502*4882a593Smuzhiyun ops->ent_put(&fatent, FAT_ENT_EOF);
503*4882a593Smuzhiyun if (prev_ent.nr_bhs)
504*4882a593Smuzhiyun ops->ent_put(&prev_ent, entry);
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun fat_collect_bhs(bhs, &nr_bhs, &fatent);
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun sbi->prev_free = entry;
509*4882a593Smuzhiyun if (sbi->free_clusters != -1)
510*4882a593Smuzhiyun sbi->free_clusters--;
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun cluster[idx_clus] = entry;
513*4882a593Smuzhiyun idx_clus++;
514*4882a593Smuzhiyun if (idx_clus == nr_cluster)
515*4882a593Smuzhiyun goto out;
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun /*
518*4882a593Smuzhiyun * fat_collect_bhs() gets ref-count of bhs,
519*4882a593Smuzhiyun * so we can still use the prev_ent.
520*4882a593Smuzhiyun */
521*4882a593Smuzhiyun prev_ent = fatent;
522*4882a593Smuzhiyun }
523*4882a593Smuzhiyun count++;
524*4882a593Smuzhiyun if (count == sbi->max_cluster)
525*4882a593Smuzhiyun break;
526*4882a593Smuzhiyun } while (fat_ent_next(sbi, &fatent));
527*4882a593Smuzhiyun }
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun /* Couldn't allocate the free entries */
530*4882a593Smuzhiyun sbi->free_clusters = 0;
531*4882a593Smuzhiyun sbi->free_clus_valid = 1;
532*4882a593Smuzhiyun err = -ENOSPC;
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun out:
535*4882a593Smuzhiyun unlock_fat(sbi);
536*4882a593Smuzhiyun mark_fsinfo_dirty(sb);
537*4882a593Smuzhiyun fatent_brelse(&fatent);
538*4882a593Smuzhiyun if (!err) {
539*4882a593Smuzhiyun if (inode_needs_sync(inode))
540*4882a593Smuzhiyun err = fat_sync_bhs(bhs, nr_bhs);
541*4882a593Smuzhiyun if (!err)
542*4882a593Smuzhiyun err = fat_mirror_bhs(sb, bhs, nr_bhs);
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun for (i = 0; i < nr_bhs; i++)
545*4882a593Smuzhiyun brelse(bhs[i]);
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun if (err && idx_clus)
548*4882a593Smuzhiyun fat_free_clusters(inode, cluster[0]);
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun return err;
551*4882a593Smuzhiyun }
552*4882a593Smuzhiyun
fat_free_clusters(struct inode * inode,int cluster)553*4882a593Smuzhiyun int fat_free_clusters(struct inode *inode, int cluster)
554*4882a593Smuzhiyun {
555*4882a593Smuzhiyun struct super_block *sb = inode->i_sb;
556*4882a593Smuzhiyun struct msdos_sb_info *sbi = MSDOS_SB(sb);
557*4882a593Smuzhiyun const struct fatent_operations *ops = sbi->fatent_ops;
558*4882a593Smuzhiyun struct fat_entry fatent;
559*4882a593Smuzhiyun struct buffer_head *bhs[MAX_BUF_PER_PAGE];
560*4882a593Smuzhiyun int i, err, nr_bhs;
561*4882a593Smuzhiyun int first_cl = cluster, dirty_fsinfo = 0;
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun nr_bhs = 0;
564*4882a593Smuzhiyun fatent_init(&fatent);
565*4882a593Smuzhiyun lock_fat(sbi);
566*4882a593Smuzhiyun do {
567*4882a593Smuzhiyun cluster = fat_ent_read(inode, &fatent, cluster);
568*4882a593Smuzhiyun if (cluster < 0) {
569*4882a593Smuzhiyun err = cluster;
570*4882a593Smuzhiyun goto error;
571*4882a593Smuzhiyun } else if (cluster == FAT_ENT_FREE) {
572*4882a593Smuzhiyun fat_fs_error(sb, "%s: deleting FAT entry beyond EOF",
573*4882a593Smuzhiyun __func__);
574*4882a593Smuzhiyun err = -EIO;
575*4882a593Smuzhiyun goto error;
576*4882a593Smuzhiyun }
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun if (sbi->options.discard) {
579*4882a593Smuzhiyun /*
580*4882a593Smuzhiyun * Issue discard for the sectors we no longer
581*4882a593Smuzhiyun * care about, batching contiguous clusters
582*4882a593Smuzhiyun * into one request
583*4882a593Smuzhiyun */
584*4882a593Smuzhiyun if (cluster != fatent.entry + 1) {
585*4882a593Smuzhiyun int nr_clus = fatent.entry - first_cl + 1;
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun sb_issue_discard(sb,
588*4882a593Smuzhiyun fat_clus_to_blknr(sbi, first_cl),
589*4882a593Smuzhiyun nr_clus * sbi->sec_per_clus,
590*4882a593Smuzhiyun GFP_NOFS, 0);
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun first_cl = cluster;
593*4882a593Smuzhiyun }
594*4882a593Smuzhiyun }
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun ops->ent_put(&fatent, FAT_ENT_FREE);
597*4882a593Smuzhiyun if (sbi->free_clusters != -1) {
598*4882a593Smuzhiyun sbi->free_clusters++;
599*4882a593Smuzhiyun dirty_fsinfo = 1;
600*4882a593Smuzhiyun }
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun if (nr_bhs + fatent.nr_bhs > MAX_BUF_PER_PAGE) {
603*4882a593Smuzhiyun if (sb->s_flags & SB_SYNCHRONOUS) {
604*4882a593Smuzhiyun err = fat_sync_bhs(bhs, nr_bhs);
605*4882a593Smuzhiyun if (err)
606*4882a593Smuzhiyun goto error;
607*4882a593Smuzhiyun }
608*4882a593Smuzhiyun err = fat_mirror_bhs(sb, bhs, nr_bhs);
609*4882a593Smuzhiyun if (err)
610*4882a593Smuzhiyun goto error;
611*4882a593Smuzhiyun for (i = 0; i < nr_bhs; i++)
612*4882a593Smuzhiyun brelse(bhs[i]);
613*4882a593Smuzhiyun nr_bhs = 0;
614*4882a593Smuzhiyun }
615*4882a593Smuzhiyun fat_collect_bhs(bhs, &nr_bhs, &fatent);
616*4882a593Smuzhiyun } while (cluster != FAT_ENT_EOF);
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun if (sb->s_flags & SB_SYNCHRONOUS) {
619*4882a593Smuzhiyun err = fat_sync_bhs(bhs, nr_bhs);
620*4882a593Smuzhiyun if (err)
621*4882a593Smuzhiyun goto error;
622*4882a593Smuzhiyun }
623*4882a593Smuzhiyun err = fat_mirror_bhs(sb, bhs, nr_bhs);
624*4882a593Smuzhiyun error:
625*4882a593Smuzhiyun fatent_brelse(&fatent);
626*4882a593Smuzhiyun for (i = 0; i < nr_bhs; i++)
627*4882a593Smuzhiyun brelse(bhs[i]);
628*4882a593Smuzhiyun unlock_fat(sbi);
629*4882a593Smuzhiyun if (dirty_fsinfo)
630*4882a593Smuzhiyun mark_fsinfo_dirty(sb);
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun return err;
633*4882a593Smuzhiyun }
634*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(fat_free_clusters);
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun struct fatent_ra {
637*4882a593Smuzhiyun sector_t cur;
638*4882a593Smuzhiyun sector_t limit;
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun unsigned int ra_blocks;
641*4882a593Smuzhiyun sector_t ra_advance;
642*4882a593Smuzhiyun sector_t ra_next;
643*4882a593Smuzhiyun sector_t ra_limit;
644*4882a593Smuzhiyun };
645*4882a593Smuzhiyun
fat_ra_init(struct super_block * sb,struct fatent_ra * ra,struct fat_entry * fatent,int ent_limit)646*4882a593Smuzhiyun static void fat_ra_init(struct super_block *sb, struct fatent_ra *ra,
647*4882a593Smuzhiyun struct fat_entry *fatent, int ent_limit)
648*4882a593Smuzhiyun {
649*4882a593Smuzhiyun struct msdos_sb_info *sbi = MSDOS_SB(sb);
650*4882a593Smuzhiyun const struct fatent_operations *ops = sbi->fatent_ops;
651*4882a593Smuzhiyun sector_t blocknr, block_end;
652*4882a593Smuzhiyun int offset;
653*4882a593Smuzhiyun /*
654*4882a593Smuzhiyun * This is the sequential read, so ra_pages * 2 (but try to
655*4882a593Smuzhiyun * align the optimal hardware IO size).
656*4882a593Smuzhiyun * [BTW, 128kb covers the whole sectors for FAT12 and FAT16]
657*4882a593Smuzhiyun */
658*4882a593Smuzhiyun unsigned long ra_pages = sb->s_bdi->ra_pages;
659*4882a593Smuzhiyun unsigned int reada_blocks;
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun if (fatent->entry >= ent_limit)
662*4882a593Smuzhiyun return;
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun if (ra_pages > sb->s_bdi->io_pages)
665*4882a593Smuzhiyun ra_pages = rounddown(ra_pages, sb->s_bdi->io_pages);
666*4882a593Smuzhiyun reada_blocks = ra_pages << (PAGE_SHIFT - sb->s_blocksize_bits + 1);
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun /* Initialize the range for sequential read */
669*4882a593Smuzhiyun ops->ent_blocknr(sb, fatent->entry, &offset, &blocknr);
670*4882a593Smuzhiyun ops->ent_blocknr(sb, ent_limit - 1, &offset, &block_end);
671*4882a593Smuzhiyun ra->cur = 0;
672*4882a593Smuzhiyun ra->limit = (block_end + 1) - blocknr;
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun /* Advancing the window at half size */
675*4882a593Smuzhiyun ra->ra_blocks = reada_blocks >> 1;
676*4882a593Smuzhiyun ra->ra_advance = ra->cur;
677*4882a593Smuzhiyun ra->ra_next = ra->cur;
678*4882a593Smuzhiyun ra->ra_limit = ra->cur + min_t(sector_t, reada_blocks, ra->limit);
679*4882a593Smuzhiyun }
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun /* Assuming to be called before reading a new block (increments ->cur). */
fat_ent_reada(struct super_block * sb,struct fatent_ra * ra,struct fat_entry * fatent)682*4882a593Smuzhiyun static void fat_ent_reada(struct super_block *sb, struct fatent_ra *ra,
683*4882a593Smuzhiyun struct fat_entry *fatent)
684*4882a593Smuzhiyun {
685*4882a593Smuzhiyun if (ra->ra_next >= ra->ra_limit)
686*4882a593Smuzhiyun return;
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun if (ra->cur >= ra->ra_advance) {
689*4882a593Smuzhiyun struct msdos_sb_info *sbi = MSDOS_SB(sb);
690*4882a593Smuzhiyun const struct fatent_operations *ops = sbi->fatent_ops;
691*4882a593Smuzhiyun struct blk_plug plug;
692*4882a593Smuzhiyun sector_t blocknr, diff;
693*4882a593Smuzhiyun int offset;
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun ops->ent_blocknr(sb, fatent->entry, &offset, &blocknr);
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun diff = blocknr - ra->cur;
698*4882a593Smuzhiyun blk_start_plug(&plug);
699*4882a593Smuzhiyun /*
700*4882a593Smuzhiyun * FIXME: we would want to directly use the bio with
701*4882a593Smuzhiyun * pages to reduce the number of segments.
702*4882a593Smuzhiyun */
703*4882a593Smuzhiyun for (; ra->ra_next < ra->ra_limit; ra->ra_next++)
704*4882a593Smuzhiyun sb_breadahead(sb, ra->ra_next + diff);
705*4882a593Smuzhiyun blk_finish_plug(&plug);
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun /* Advance the readahead window */
708*4882a593Smuzhiyun ra->ra_advance += ra->ra_blocks;
709*4882a593Smuzhiyun ra->ra_limit += min_t(sector_t,
710*4882a593Smuzhiyun ra->ra_blocks, ra->limit - ra->ra_limit);
711*4882a593Smuzhiyun }
712*4882a593Smuzhiyun ra->cur++;
713*4882a593Smuzhiyun }
714*4882a593Smuzhiyun
fat_count_free_clusters(struct super_block * sb)715*4882a593Smuzhiyun int fat_count_free_clusters(struct super_block *sb)
716*4882a593Smuzhiyun {
717*4882a593Smuzhiyun struct msdos_sb_info *sbi = MSDOS_SB(sb);
718*4882a593Smuzhiyun const struct fatent_operations *ops = sbi->fatent_ops;
719*4882a593Smuzhiyun struct fat_entry fatent;
720*4882a593Smuzhiyun struct fatent_ra fatent_ra;
721*4882a593Smuzhiyun int err = 0, free;
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun lock_fat(sbi);
724*4882a593Smuzhiyun if (sbi->free_clusters != -1 && sbi->free_clus_valid)
725*4882a593Smuzhiyun goto out;
726*4882a593Smuzhiyun
727*4882a593Smuzhiyun free = 0;
728*4882a593Smuzhiyun fatent_init(&fatent);
729*4882a593Smuzhiyun fatent_set_entry(&fatent, FAT_START_ENT);
730*4882a593Smuzhiyun fat_ra_init(sb, &fatent_ra, &fatent, sbi->max_cluster);
731*4882a593Smuzhiyun while (fatent.entry < sbi->max_cluster) {
732*4882a593Smuzhiyun /* readahead of fat blocks */
733*4882a593Smuzhiyun fat_ent_reada(sb, &fatent_ra, &fatent);
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun err = fat_ent_read_block(sb, &fatent);
736*4882a593Smuzhiyun if (err)
737*4882a593Smuzhiyun goto out;
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun do {
740*4882a593Smuzhiyun if (ops->ent_get(&fatent) == FAT_ENT_FREE)
741*4882a593Smuzhiyun free++;
742*4882a593Smuzhiyun } while (fat_ent_next(sbi, &fatent));
743*4882a593Smuzhiyun cond_resched();
744*4882a593Smuzhiyun }
745*4882a593Smuzhiyun sbi->free_clusters = free;
746*4882a593Smuzhiyun sbi->free_clus_valid = 1;
747*4882a593Smuzhiyun mark_fsinfo_dirty(sb);
748*4882a593Smuzhiyun fatent_brelse(&fatent);
749*4882a593Smuzhiyun out:
750*4882a593Smuzhiyun unlock_fat(sbi);
751*4882a593Smuzhiyun return err;
752*4882a593Smuzhiyun }
753*4882a593Smuzhiyun
fat_trim_clusters(struct super_block * sb,u32 clus,u32 nr_clus)754*4882a593Smuzhiyun static int fat_trim_clusters(struct super_block *sb, u32 clus, u32 nr_clus)
755*4882a593Smuzhiyun {
756*4882a593Smuzhiyun struct msdos_sb_info *sbi = MSDOS_SB(sb);
757*4882a593Smuzhiyun return sb_issue_discard(sb, fat_clus_to_blknr(sbi, clus),
758*4882a593Smuzhiyun nr_clus * sbi->sec_per_clus, GFP_NOFS, 0);
759*4882a593Smuzhiyun }
760*4882a593Smuzhiyun
fat_trim_fs(struct inode * inode,struct fstrim_range * range)761*4882a593Smuzhiyun int fat_trim_fs(struct inode *inode, struct fstrim_range *range)
762*4882a593Smuzhiyun {
763*4882a593Smuzhiyun struct super_block *sb = inode->i_sb;
764*4882a593Smuzhiyun struct msdos_sb_info *sbi = MSDOS_SB(sb);
765*4882a593Smuzhiyun const struct fatent_operations *ops = sbi->fatent_ops;
766*4882a593Smuzhiyun struct fat_entry fatent;
767*4882a593Smuzhiyun struct fatent_ra fatent_ra;
768*4882a593Smuzhiyun u64 ent_start, ent_end, minlen, trimmed = 0;
769*4882a593Smuzhiyun u32 free = 0;
770*4882a593Smuzhiyun int err = 0;
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun /*
773*4882a593Smuzhiyun * FAT data is organized as clusters, trim at the granulary of cluster.
774*4882a593Smuzhiyun *
775*4882a593Smuzhiyun * fstrim_range is in byte, convert vaules to cluster index.
776*4882a593Smuzhiyun * Treat sectors before data region as all used, not to trim them.
777*4882a593Smuzhiyun */
778*4882a593Smuzhiyun ent_start = max_t(u64, range->start>>sbi->cluster_bits, FAT_START_ENT);
779*4882a593Smuzhiyun ent_end = ent_start + (range->len >> sbi->cluster_bits) - 1;
780*4882a593Smuzhiyun minlen = range->minlen >> sbi->cluster_bits;
781*4882a593Smuzhiyun
782*4882a593Smuzhiyun if (ent_start >= sbi->max_cluster || range->len < sbi->cluster_size)
783*4882a593Smuzhiyun return -EINVAL;
784*4882a593Smuzhiyun if (ent_end >= sbi->max_cluster)
785*4882a593Smuzhiyun ent_end = sbi->max_cluster - 1;
786*4882a593Smuzhiyun
787*4882a593Smuzhiyun fatent_init(&fatent);
788*4882a593Smuzhiyun lock_fat(sbi);
789*4882a593Smuzhiyun fatent_set_entry(&fatent, ent_start);
790*4882a593Smuzhiyun fat_ra_init(sb, &fatent_ra, &fatent, ent_end + 1);
791*4882a593Smuzhiyun while (fatent.entry <= ent_end) {
792*4882a593Smuzhiyun /* readahead of fat blocks */
793*4882a593Smuzhiyun fat_ent_reada(sb, &fatent_ra, &fatent);
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun err = fat_ent_read_block(sb, &fatent);
796*4882a593Smuzhiyun if (err)
797*4882a593Smuzhiyun goto error;
798*4882a593Smuzhiyun do {
799*4882a593Smuzhiyun if (ops->ent_get(&fatent) == FAT_ENT_FREE) {
800*4882a593Smuzhiyun free++;
801*4882a593Smuzhiyun } else if (free) {
802*4882a593Smuzhiyun if (free >= minlen) {
803*4882a593Smuzhiyun u32 clus = fatent.entry - free;
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun err = fat_trim_clusters(sb, clus, free);
806*4882a593Smuzhiyun if (err && err != -EOPNOTSUPP)
807*4882a593Smuzhiyun goto error;
808*4882a593Smuzhiyun if (!err)
809*4882a593Smuzhiyun trimmed += free;
810*4882a593Smuzhiyun err = 0;
811*4882a593Smuzhiyun }
812*4882a593Smuzhiyun free = 0;
813*4882a593Smuzhiyun }
814*4882a593Smuzhiyun } while (fat_ent_next(sbi, &fatent) && fatent.entry <= ent_end);
815*4882a593Smuzhiyun
816*4882a593Smuzhiyun if (fatal_signal_pending(current)) {
817*4882a593Smuzhiyun err = -ERESTARTSYS;
818*4882a593Smuzhiyun goto error;
819*4882a593Smuzhiyun }
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun if (need_resched()) {
822*4882a593Smuzhiyun fatent_brelse(&fatent);
823*4882a593Smuzhiyun unlock_fat(sbi);
824*4882a593Smuzhiyun cond_resched();
825*4882a593Smuzhiyun lock_fat(sbi);
826*4882a593Smuzhiyun }
827*4882a593Smuzhiyun }
828*4882a593Smuzhiyun /* handle scenario when tail entries are all free */
829*4882a593Smuzhiyun if (free && free >= minlen) {
830*4882a593Smuzhiyun u32 clus = fatent.entry - free;
831*4882a593Smuzhiyun
832*4882a593Smuzhiyun err = fat_trim_clusters(sb, clus, free);
833*4882a593Smuzhiyun if (err && err != -EOPNOTSUPP)
834*4882a593Smuzhiyun goto error;
835*4882a593Smuzhiyun if (!err)
836*4882a593Smuzhiyun trimmed += free;
837*4882a593Smuzhiyun err = 0;
838*4882a593Smuzhiyun }
839*4882a593Smuzhiyun
840*4882a593Smuzhiyun error:
841*4882a593Smuzhiyun fatent_brelse(&fatent);
842*4882a593Smuzhiyun unlock_fat(sbi);
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun range->len = trimmed << sbi->cluster_bits;
845*4882a593Smuzhiyun
846*4882a593Smuzhiyun return err;
847*4882a593Smuzhiyun }
848