1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * linux/fs/hpfs/buffer.c
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * general buffer i/o
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun #include <linux/sched.h>
10*4882a593Smuzhiyun #include <linux/slab.h>
11*4882a593Smuzhiyun #include <linux/blkdev.h>
12*4882a593Smuzhiyun #include "hpfs_fn.h"
13*4882a593Smuzhiyun
hpfs_search_hotfix_map(struct super_block * s,secno sec)14*4882a593Smuzhiyun secno hpfs_search_hotfix_map(struct super_block *s, secno sec)
15*4882a593Smuzhiyun {
16*4882a593Smuzhiyun unsigned i;
17*4882a593Smuzhiyun struct hpfs_sb_info *sbi = hpfs_sb(s);
18*4882a593Smuzhiyun for (i = 0; unlikely(i < sbi->n_hotfixes); i++) {
19*4882a593Smuzhiyun if (sbi->hotfix_from[i] == sec) {
20*4882a593Smuzhiyun return sbi->hotfix_to[i];
21*4882a593Smuzhiyun }
22*4882a593Smuzhiyun }
23*4882a593Smuzhiyun return sec;
24*4882a593Smuzhiyun }
25*4882a593Smuzhiyun
hpfs_search_hotfix_map_for_range(struct super_block * s,secno sec,unsigned n)26*4882a593Smuzhiyun unsigned hpfs_search_hotfix_map_for_range(struct super_block *s, secno sec, unsigned n)
27*4882a593Smuzhiyun {
28*4882a593Smuzhiyun unsigned i;
29*4882a593Smuzhiyun struct hpfs_sb_info *sbi = hpfs_sb(s);
30*4882a593Smuzhiyun for (i = 0; unlikely(i < sbi->n_hotfixes); i++) {
31*4882a593Smuzhiyun if (sbi->hotfix_from[i] >= sec && sbi->hotfix_from[i] < sec + n) {
32*4882a593Smuzhiyun n = sbi->hotfix_from[i] - sec;
33*4882a593Smuzhiyun }
34*4882a593Smuzhiyun }
35*4882a593Smuzhiyun return n;
36*4882a593Smuzhiyun }
37*4882a593Smuzhiyun
hpfs_prefetch_sectors(struct super_block * s,unsigned secno,int n)38*4882a593Smuzhiyun void hpfs_prefetch_sectors(struct super_block *s, unsigned secno, int n)
39*4882a593Smuzhiyun {
40*4882a593Smuzhiyun struct buffer_head *bh;
41*4882a593Smuzhiyun struct blk_plug plug;
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun if (n <= 0 || unlikely(secno >= hpfs_sb(s)->sb_fs_size))
44*4882a593Smuzhiyun return;
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun if (unlikely(hpfs_search_hotfix_map_for_range(s, secno, n) != n))
47*4882a593Smuzhiyun return;
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun bh = sb_find_get_block(s, secno);
50*4882a593Smuzhiyun if (bh) {
51*4882a593Smuzhiyun if (buffer_uptodate(bh)) {
52*4882a593Smuzhiyun brelse(bh);
53*4882a593Smuzhiyun return;
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun brelse(bh);
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun blk_start_plug(&plug);
59*4882a593Smuzhiyun while (n > 0) {
60*4882a593Smuzhiyun if (unlikely(secno >= hpfs_sb(s)->sb_fs_size))
61*4882a593Smuzhiyun break;
62*4882a593Smuzhiyun sb_breadahead(s, secno);
63*4882a593Smuzhiyun secno++;
64*4882a593Smuzhiyun n--;
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun blk_finish_plug(&plug);
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun /* Map a sector into a buffer and return pointers to it and to the buffer. */
70*4882a593Smuzhiyun
hpfs_map_sector(struct super_block * s,unsigned secno,struct buffer_head ** bhp,int ahead)71*4882a593Smuzhiyun void *hpfs_map_sector(struct super_block *s, unsigned secno, struct buffer_head **bhp,
72*4882a593Smuzhiyun int ahead)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun struct buffer_head *bh;
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun hpfs_lock_assert(s);
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun hpfs_prefetch_sectors(s, secno, ahead);
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun cond_resched();
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun *bhp = bh = sb_bread(s, hpfs_search_hotfix_map(s, secno));
83*4882a593Smuzhiyun if (bh != NULL)
84*4882a593Smuzhiyun return bh->b_data;
85*4882a593Smuzhiyun else {
86*4882a593Smuzhiyun pr_err("%s(): read error\n", __func__);
87*4882a593Smuzhiyun return NULL;
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun /* Like hpfs_map_sector but don't read anything */
92*4882a593Smuzhiyun
hpfs_get_sector(struct super_block * s,unsigned secno,struct buffer_head ** bhp)93*4882a593Smuzhiyun void *hpfs_get_sector(struct super_block *s, unsigned secno, struct buffer_head **bhp)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun struct buffer_head *bh;
96*4882a593Smuzhiyun /*return hpfs_map_sector(s, secno, bhp, 0);*/
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun hpfs_lock_assert(s);
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun cond_resched();
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun if ((*bhp = bh = sb_getblk(s, hpfs_search_hotfix_map(s, secno))) != NULL) {
103*4882a593Smuzhiyun if (!buffer_uptodate(bh)) wait_on_buffer(bh);
104*4882a593Smuzhiyun set_buffer_uptodate(bh);
105*4882a593Smuzhiyun return bh->b_data;
106*4882a593Smuzhiyun } else {
107*4882a593Smuzhiyun pr_err("%s(): getblk failed\n", __func__);
108*4882a593Smuzhiyun return NULL;
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun /* Map 4 sectors into a 4buffer and return pointers to it and to the buffer. */
113*4882a593Smuzhiyun
hpfs_map_4sectors(struct super_block * s,unsigned secno,struct quad_buffer_head * qbh,int ahead)114*4882a593Smuzhiyun void *hpfs_map_4sectors(struct super_block *s, unsigned secno, struct quad_buffer_head *qbh,
115*4882a593Smuzhiyun int ahead)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun char *data;
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun hpfs_lock_assert(s);
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun cond_resched();
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun if (secno & 3) {
124*4882a593Smuzhiyun pr_err("%s(): unaligned read\n", __func__);
125*4882a593Smuzhiyun return NULL;
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun hpfs_prefetch_sectors(s, secno, 4 + ahead);
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun if (!hpfs_map_sector(s, secno + 0, &qbh->bh[0], 0)) goto bail0;
131*4882a593Smuzhiyun if (!hpfs_map_sector(s, secno + 1, &qbh->bh[1], 0)) goto bail1;
132*4882a593Smuzhiyun if (!hpfs_map_sector(s, secno + 2, &qbh->bh[2], 0)) goto bail2;
133*4882a593Smuzhiyun if (!hpfs_map_sector(s, secno + 3, &qbh->bh[3], 0)) goto bail3;
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun if (likely(qbh->bh[1]->b_data == qbh->bh[0]->b_data + 1 * 512) &&
136*4882a593Smuzhiyun likely(qbh->bh[2]->b_data == qbh->bh[0]->b_data + 2 * 512) &&
137*4882a593Smuzhiyun likely(qbh->bh[3]->b_data == qbh->bh[0]->b_data + 3 * 512)) {
138*4882a593Smuzhiyun return qbh->data = qbh->bh[0]->b_data;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun qbh->data = data = kmalloc(2048, GFP_NOFS);
142*4882a593Smuzhiyun if (!data) {
143*4882a593Smuzhiyun pr_err("%s(): out of memory\n", __func__);
144*4882a593Smuzhiyun goto bail4;
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun memcpy(data + 0 * 512, qbh->bh[0]->b_data, 512);
148*4882a593Smuzhiyun memcpy(data + 1 * 512, qbh->bh[1]->b_data, 512);
149*4882a593Smuzhiyun memcpy(data + 2 * 512, qbh->bh[2]->b_data, 512);
150*4882a593Smuzhiyun memcpy(data + 3 * 512, qbh->bh[3]->b_data, 512);
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun return data;
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun bail4:
155*4882a593Smuzhiyun brelse(qbh->bh[3]);
156*4882a593Smuzhiyun bail3:
157*4882a593Smuzhiyun brelse(qbh->bh[2]);
158*4882a593Smuzhiyun bail2:
159*4882a593Smuzhiyun brelse(qbh->bh[1]);
160*4882a593Smuzhiyun bail1:
161*4882a593Smuzhiyun brelse(qbh->bh[0]);
162*4882a593Smuzhiyun bail0:
163*4882a593Smuzhiyun return NULL;
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun /* Don't read sectors */
167*4882a593Smuzhiyun
hpfs_get_4sectors(struct super_block * s,unsigned secno,struct quad_buffer_head * qbh)168*4882a593Smuzhiyun void *hpfs_get_4sectors(struct super_block *s, unsigned secno,
169*4882a593Smuzhiyun struct quad_buffer_head *qbh)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun cond_resched();
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun hpfs_lock_assert(s);
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun if (secno & 3) {
176*4882a593Smuzhiyun pr_err("%s(): unaligned read\n", __func__);
177*4882a593Smuzhiyun return NULL;
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun if (!hpfs_get_sector(s, secno + 0, &qbh->bh[0])) goto bail0;
181*4882a593Smuzhiyun if (!hpfs_get_sector(s, secno + 1, &qbh->bh[1])) goto bail1;
182*4882a593Smuzhiyun if (!hpfs_get_sector(s, secno + 2, &qbh->bh[2])) goto bail2;
183*4882a593Smuzhiyun if (!hpfs_get_sector(s, secno + 3, &qbh->bh[3])) goto bail3;
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun if (likely(qbh->bh[1]->b_data == qbh->bh[0]->b_data + 1 * 512) &&
186*4882a593Smuzhiyun likely(qbh->bh[2]->b_data == qbh->bh[0]->b_data + 2 * 512) &&
187*4882a593Smuzhiyun likely(qbh->bh[3]->b_data == qbh->bh[0]->b_data + 3 * 512)) {
188*4882a593Smuzhiyun return qbh->data = qbh->bh[0]->b_data;
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun if (!(qbh->data = kmalloc(2048, GFP_NOFS))) {
192*4882a593Smuzhiyun pr_err("%s(): out of memory\n", __func__);
193*4882a593Smuzhiyun goto bail4;
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun return qbh->data;
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun bail4:
198*4882a593Smuzhiyun brelse(qbh->bh[3]);
199*4882a593Smuzhiyun bail3:
200*4882a593Smuzhiyun brelse(qbh->bh[2]);
201*4882a593Smuzhiyun bail2:
202*4882a593Smuzhiyun brelse(qbh->bh[1]);
203*4882a593Smuzhiyun bail1:
204*4882a593Smuzhiyun brelse(qbh->bh[0]);
205*4882a593Smuzhiyun bail0:
206*4882a593Smuzhiyun return NULL;
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun
hpfs_brelse4(struct quad_buffer_head * qbh)210*4882a593Smuzhiyun void hpfs_brelse4(struct quad_buffer_head *qbh)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun if (unlikely(qbh->data != qbh->bh[0]->b_data))
213*4882a593Smuzhiyun kfree(qbh->data);
214*4882a593Smuzhiyun brelse(qbh->bh[0]);
215*4882a593Smuzhiyun brelse(qbh->bh[1]);
216*4882a593Smuzhiyun brelse(qbh->bh[2]);
217*4882a593Smuzhiyun brelse(qbh->bh[3]);
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun
hpfs_mark_4buffers_dirty(struct quad_buffer_head * qbh)220*4882a593Smuzhiyun void hpfs_mark_4buffers_dirty(struct quad_buffer_head *qbh)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun if (unlikely(qbh->data != qbh->bh[0]->b_data)) {
223*4882a593Smuzhiyun memcpy(qbh->bh[0]->b_data, qbh->data + 0 * 512, 512);
224*4882a593Smuzhiyun memcpy(qbh->bh[1]->b_data, qbh->data + 1 * 512, 512);
225*4882a593Smuzhiyun memcpy(qbh->bh[2]->b_data, qbh->data + 2 * 512, 512);
226*4882a593Smuzhiyun memcpy(qbh->bh[3]->b_data, qbh->data + 3 * 512, 512);
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun mark_buffer_dirty(qbh->bh[0]);
229*4882a593Smuzhiyun mark_buffer_dirty(qbh->bh[1]);
230*4882a593Smuzhiyun mark_buffer_dirty(qbh->bh[2]);
231*4882a593Smuzhiyun mark_buffer_dirty(qbh->bh[3]);
232*4882a593Smuzhiyun }
233