1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * linux/fs/ufs/cylinder.c
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 1998
6*4882a593Smuzhiyun * Daniel Pirkl <daniel.pirkl@email.cz>
7*4882a593Smuzhiyun * Charles University, Faculty of Mathematics and Physics
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * ext2 - inode (block) bitmap caching inspired
10*4882a593Smuzhiyun */
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include <linux/fs.h>
13*4882a593Smuzhiyun #include <linux/time.h>
14*4882a593Smuzhiyun #include <linux/stat.h>
15*4882a593Smuzhiyun #include <linux/string.h>
16*4882a593Smuzhiyun #include <linux/bitops.h>
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #include <asm/byteorder.h>
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun #include "ufs_fs.h"
21*4882a593Smuzhiyun #include "ufs.h"
22*4882a593Smuzhiyun #include "swab.h"
23*4882a593Smuzhiyun #include "util.h"
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun /*
26*4882a593Smuzhiyun * Read cylinder group into cache. The memory space for ufs_cg_private_info
27*4882a593Smuzhiyun * structure is already allocated during ufs_read_super.
28*4882a593Smuzhiyun */
ufs_read_cylinder(struct super_block * sb,unsigned cgno,unsigned bitmap_nr)29*4882a593Smuzhiyun static void ufs_read_cylinder (struct super_block * sb,
30*4882a593Smuzhiyun unsigned cgno, unsigned bitmap_nr)
31*4882a593Smuzhiyun {
32*4882a593Smuzhiyun struct ufs_sb_info * sbi = UFS_SB(sb);
33*4882a593Smuzhiyun struct ufs_sb_private_info * uspi;
34*4882a593Smuzhiyun struct ufs_cg_private_info * ucpi;
35*4882a593Smuzhiyun struct ufs_cylinder_group * ucg;
36*4882a593Smuzhiyun unsigned i, j;
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun UFSD("ENTER, cgno %u, bitmap_nr %u\n", cgno, bitmap_nr);
39*4882a593Smuzhiyun uspi = sbi->s_uspi;
40*4882a593Smuzhiyun ucpi = sbi->s_ucpi[bitmap_nr];
41*4882a593Smuzhiyun ucg = (struct ufs_cylinder_group *)sbi->s_ucg[cgno]->b_data;
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun UCPI_UBH(ucpi)->fragment = ufs_cgcmin(cgno);
44*4882a593Smuzhiyun UCPI_UBH(ucpi)->count = uspi->s_cgsize >> sb->s_blocksize_bits;
45*4882a593Smuzhiyun /*
46*4882a593Smuzhiyun * We have already the first fragment of cylinder group block in buffer
47*4882a593Smuzhiyun */
48*4882a593Smuzhiyun UCPI_UBH(ucpi)->bh[0] = sbi->s_ucg[cgno];
49*4882a593Smuzhiyun for (i = 1; i < UCPI_UBH(ucpi)->count; i++)
50*4882a593Smuzhiyun if (!(UCPI_UBH(ucpi)->bh[i] = sb_bread(sb, UCPI_UBH(ucpi)->fragment + i)))
51*4882a593Smuzhiyun goto failed;
52*4882a593Smuzhiyun sbi->s_cgno[bitmap_nr] = cgno;
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun ucpi->c_cgx = fs32_to_cpu(sb, ucg->cg_cgx);
55*4882a593Smuzhiyun ucpi->c_ncyl = fs16_to_cpu(sb, ucg->cg_ncyl);
56*4882a593Smuzhiyun ucpi->c_niblk = fs16_to_cpu(sb, ucg->cg_niblk);
57*4882a593Smuzhiyun ucpi->c_ndblk = fs32_to_cpu(sb, ucg->cg_ndblk);
58*4882a593Smuzhiyun ucpi->c_rotor = fs32_to_cpu(sb, ucg->cg_rotor);
59*4882a593Smuzhiyun ucpi->c_frotor = fs32_to_cpu(sb, ucg->cg_frotor);
60*4882a593Smuzhiyun ucpi->c_irotor = fs32_to_cpu(sb, ucg->cg_irotor);
61*4882a593Smuzhiyun ucpi->c_btotoff = fs32_to_cpu(sb, ucg->cg_btotoff);
62*4882a593Smuzhiyun ucpi->c_boff = fs32_to_cpu(sb, ucg->cg_boff);
63*4882a593Smuzhiyun ucpi->c_iusedoff = fs32_to_cpu(sb, ucg->cg_iusedoff);
64*4882a593Smuzhiyun ucpi->c_freeoff = fs32_to_cpu(sb, ucg->cg_freeoff);
65*4882a593Smuzhiyun ucpi->c_nextfreeoff = fs32_to_cpu(sb, ucg->cg_nextfreeoff);
66*4882a593Smuzhiyun ucpi->c_clustersumoff = fs32_to_cpu(sb, ucg->cg_u.cg_44.cg_clustersumoff);
67*4882a593Smuzhiyun ucpi->c_clusteroff = fs32_to_cpu(sb, ucg->cg_u.cg_44.cg_clusteroff);
68*4882a593Smuzhiyun ucpi->c_nclusterblks = fs32_to_cpu(sb, ucg->cg_u.cg_44.cg_nclusterblks);
69*4882a593Smuzhiyun UFSD("EXIT\n");
70*4882a593Smuzhiyun return;
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun failed:
73*4882a593Smuzhiyun for (j = 1; j < i; j++)
74*4882a593Smuzhiyun brelse (sbi->s_ucg[j]);
75*4882a593Smuzhiyun sbi->s_cgno[bitmap_nr] = UFS_CGNO_EMPTY;
76*4882a593Smuzhiyun ufs_error (sb, "ufs_read_cylinder", "can't read cylinder group block %u", cgno);
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun /*
80*4882a593Smuzhiyun * Remove cylinder group from cache, doesn't release memory
81*4882a593Smuzhiyun * allocated for cylinder group (this is done at ufs_put_super only).
82*4882a593Smuzhiyun */
ufs_put_cylinder(struct super_block * sb,unsigned bitmap_nr)83*4882a593Smuzhiyun void ufs_put_cylinder (struct super_block * sb, unsigned bitmap_nr)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun struct ufs_sb_info * sbi = UFS_SB(sb);
86*4882a593Smuzhiyun struct ufs_sb_private_info * uspi;
87*4882a593Smuzhiyun struct ufs_cg_private_info * ucpi;
88*4882a593Smuzhiyun struct ufs_cylinder_group * ucg;
89*4882a593Smuzhiyun unsigned i;
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun UFSD("ENTER, bitmap_nr %u\n", bitmap_nr);
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun uspi = sbi->s_uspi;
94*4882a593Smuzhiyun if (sbi->s_cgno[bitmap_nr] == UFS_CGNO_EMPTY) {
95*4882a593Smuzhiyun UFSD("EXIT\n");
96*4882a593Smuzhiyun return;
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun ucpi = sbi->s_ucpi[bitmap_nr];
99*4882a593Smuzhiyun ucg = ubh_get_ucg(UCPI_UBH(ucpi));
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun if (uspi->s_ncg > UFS_MAX_GROUP_LOADED && bitmap_nr >= sbi->s_cg_loaded) {
102*4882a593Smuzhiyun ufs_panic (sb, "ufs_put_cylinder", "internal error");
103*4882a593Smuzhiyun return;
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun /*
106*4882a593Smuzhiyun * rotor is not so important data, so we put it to disk
107*4882a593Smuzhiyun * at the end of working with cylinder
108*4882a593Smuzhiyun */
109*4882a593Smuzhiyun ucg->cg_rotor = cpu_to_fs32(sb, ucpi->c_rotor);
110*4882a593Smuzhiyun ucg->cg_frotor = cpu_to_fs32(sb, ucpi->c_frotor);
111*4882a593Smuzhiyun ucg->cg_irotor = cpu_to_fs32(sb, ucpi->c_irotor);
112*4882a593Smuzhiyun ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
113*4882a593Smuzhiyun for (i = 1; i < UCPI_UBH(ucpi)->count; i++) {
114*4882a593Smuzhiyun brelse (UCPI_UBH(ucpi)->bh[i]);
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun sbi->s_cgno[bitmap_nr] = UFS_CGNO_EMPTY;
118*4882a593Smuzhiyun UFSD("EXIT\n");
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun /*
122*4882a593Smuzhiyun * Find cylinder group in cache and return it as pointer.
123*4882a593Smuzhiyun * If cylinder group is not in cache, we will load it from disk.
124*4882a593Smuzhiyun *
125*4882a593Smuzhiyun * The cache is managed by LRU algorithm.
126*4882a593Smuzhiyun */
ufs_load_cylinder(struct super_block * sb,unsigned cgno)127*4882a593Smuzhiyun struct ufs_cg_private_info * ufs_load_cylinder (
128*4882a593Smuzhiyun struct super_block * sb, unsigned cgno)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun struct ufs_sb_info * sbi = UFS_SB(sb);
131*4882a593Smuzhiyun struct ufs_sb_private_info * uspi;
132*4882a593Smuzhiyun struct ufs_cg_private_info * ucpi;
133*4882a593Smuzhiyun unsigned cg, i, j;
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun UFSD("ENTER, cgno %u\n", cgno);
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun uspi = sbi->s_uspi;
138*4882a593Smuzhiyun if (cgno >= uspi->s_ncg) {
139*4882a593Smuzhiyun ufs_panic (sb, "ufs_load_cylinder", "internal error, high number of cg");
140*4882a593Smuzhiyun return NULL;
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun /*
143*4882a593Smuzhiyun * Cylinder group number cg it in cache and it was last used
144*4882a593Smuzhiyun */
145*4882a593Smuzhiyun if (sbi->s_cgno[0] == cgno) {
146*4882a593Smuzhiyun UFSD("EXIT\n");
147*4882a593Smuzhiyun return sbi->s_ucpi[0];
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun /*
150*4882a593Smuzhiyun * Number of cylinder groups is not higher than UFS_MAX_GROUP_LOADED
151*4882a593Smuzhiyun */
152*4882a593Smuzhiyun if (uspi->s_ncg <= UFS_MAX_GROUP_LOADED) {
153*4882a593Smuzhiyun if (sbi->s_cgno[cgno] != UFS_CGNO_EMPTY) {
154*4882a593Smuzhiyun if (sbi->s_cgno[cgno] != cgno) {
155*4882a593Smuzhiyun ufs_panic (sb, "ufs_load_cylinder", "internal error, wrong number of cg in cache");
156*4882a593Smuzhiyun UFSD("EXIT (FAILED)\n");
157*4882a593Smuzhiyun return NULL;
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun else {
160*4882a593Smuzhiyun UFSD("EXIT\n");
161*4882a593Smuzhiyun return sbi->s_ucpi[cgno];
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun } else {
164*4882a593Smuzhiyun ufs_read_cylinder (sb, cgno, cgno);
165*4882a593Smuzhiyun UFSD("EXIT\n");
166*4882a593Smuzhiyun return sbi->s_ucpi[cgno];
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun /*
170*4882a593Smuzhiyun * Cylinder group number cg is in cache but it was not last used,
171*4882a593Smuzhiyun * we will move to the first position
172*4882a593Smuzhiyun */
173*4882a593Smuzhiyun for (i = 0; i < sbi->s_cg_loaded && sbi->s_cgno[i] != cgno; i++);
174*4882a593Smuzhiyun if (i < sbi->s_cg_loaded && sbi->s_cgno[i] == cgno) {
175*4882a593Smuzhiyun cg = sbi->s_cgno[i];
176*4882a593Smuzhiyun ucpi = sbi->s_ucpi[i];
177*4882a593Smuzhiyun for (j = i; j > 0; j--) {
178*4882a593Smuzhiyun sbi->s_cgno[j] = sbi->s_cgno[j-1];
179*4882a593Smuzhiyun sbi->s_ucpi[j] = sbi->s_ucpi[j-1];
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun sbi->s_cgno[0] = cg;
182*4882a593Smuzhiyun sbi->s_ucpi[0] = ucpi;
183*4882a593Smuzhiyun /*
184*4882a593Smuzhiyun * Cylinder group number cg is not in cache, we will read it from disk
185*4882a593Smuzhiyun * and put it to the first position
186*4882a593Smuzhiyun */
187*4882a593Smuzhiyun } else {
188*4882a593Smuzhiyun if (sbi->s_cg_loaded < UFS_MAX_GROUP_LOADED)
189*4882a593Smuzhiyun sbi->s_cg_loaded++;
190*4882a593Smuzhiyun else
191*4882a593Smuzhiyun ufs_put_cylinder (sb, UFS_MAX_GROUP_LOADED-1);
192*4882a593Smuzhiyun ucpi = sbi->s_ucpi[sbi->s_cg_loaded - 1];
193*4882a593Smuzhiyun for (j = sbi->s_cg_loaded - 1; j > 0; j--) {
194*4882a593Smuzhiyun sbi->s_cgno[j] = sbi->s_cgno[j-1];
195*4882a593Smuzhiyun sbi->s_ucpi[j] = sbi->s_ucpi[j-1];
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun sbi->s_ucpi[0] = ucpi;
198*4882a593Smuzhiyun ufs_read_cylinder (sb, cgno, 0);
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun UFSD("EXIT\n");
201*4882a593Smuzhiyun return sbi->s_ucpi[0];
202*4882a593Smuzhiyun }
203