1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * linux/fs/hfsplus/bitmap.c
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2001
6*4882a593Smuzhiyun * Brad Boyer (flar@allandria.com)
7*4882a593Smuzhiyun * (C) 2003 Ardis Technologies <roman@ardistech.com>
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * Handling of allocation file
10*4882a593Smuzhiyun */
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include <linux/pagemap.h>
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #include "hfsplus_fs.h"
15*4882a593Smuzhiyun #include "hfsplus_raw.h"
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #define PAGE_CACHE_BITS (PAGE_SIZE * 8)
18*4882a593Smuzhiyun
hfsplus_block_allocate(struct super_block * sb,u32 size,u32 offset,u32 * max)19*4882a593Smuzhiyun int hfsplus_block_allocate(struct super_block *sb, u32 size,
20*4882a593Smuzhiyun u32 offset, u32 *max)
21*4882a593Smuzhiyun {
22*4882a593Smuzhiyun struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
23*4882a593Smuzhiyun struct page *page;
24*4882a593Smuzhiyun struct address_space *mapping;
25*4882a593Smuzhiyun __be32 *pptr, *curr, *end;
26*4882a593Smuzhiyun u32 mask, start, len, n;
27*4882a593Smuzhiyun __be32 val;
28*4882a593Smuzhiyun int i;
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun len = *max;
31*4882a593Smuzhiyun if (!len)
32*4882a593Smuzhiyun return size;
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun hfs_dbg(BITMAP, "block_allocate: %u,%u,%u\n", size, offset, len);
35*4882a593Smuzhiyun mutex_lock(&sbi->alloc_mutex);
36*4882a593Smuzhiyun mapping = sbi->alloc_file->i_mapping;
37*4882a593Smuzhiyun page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL);
38*4882a593Smuzhiyun if (IS_ERR(page)) {
39*4882a593Smuzhiyun start = size;
40*4882a593Smuzhiyun goto out;
41*4882a593Smuzhiyun }
42*4882a593Smuzhiyun pptr = kmap(page);
43*4882a593Smuzhiyun curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
44*4882a593Smuzhiyun i = offset % 32;
45*4882a593Smuzhiyun offset &= ~(PAGE_CACHE_BITS - 1);
46*4882a593Smuzhiyun if ((size ^ offset) / PAGE_CACHE_BITS)
47*4882a593Smuzhiyun end = pptr + PAGE_CACHE_BITS / 32;
48*4882a593Smuzhiyun else
49*4882a593Smuzhiyun end = pptr + ((size + 31) & (PAGE_CACHE_BITS - 1)) / 32;
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun /* scan the first partial u32 for zero bits */
52*4882a593Smuzhiyun val = *curr;
53*4882a593Smuzhiyun if (~val) {
54*4882a593Smuzhiyun n = be32_to_cpu(val);
55*4882a593Smuzhiyun mask = (1U << 31) >> i;
56*4882a593Smuzhiyun for (; i < 32; mask >>= 1, i++) {
57*4882a593Smuzhiyun if (!(n & mask))
58*4882a593Smuzhiyun goto found;
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun curr++;
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun /* scan complete u32s for the first zero bit */
64*4882a593Smuzhiyun while (1) {
65*4882a593Smuzhiyun while (curr < end) {
66*4882a593Smuzhiyun val = *curr;
67*4882a593Smuzhiyun if (~val) {
68*4882a593Smuzhiyun n = be32_to_cpu(val);
69*4882a593Smuzhiyun mask = 1 << 31;
70*4882a593Smuzhiyun for (i = 0; i < 32; mask >>= 1, i++) {
71*4882a593Smuzhiyun if (!(n & mask))
72*4882a593Smuzhiyun goto found;
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun curr++;
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun kunmap(page);
78*4882a593Smuzhiyun offset += PAGE_CACHE_BITS;
79*4882a593Smuzhiyun if (offset >= size)
80*4882a593Smuzhiyun break;
81*4882a593Smuzhiyun page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS,
82*4882a593Smuzhiyun NULL);
83*4882a593Smuzhiyun if (IS_ERR(page)) {
84*4882a593Smuzhiyun start = size;
85*4882a593Smuzhiyun goto out;
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun curr = pptr = kmap(page);
88*4882a593Smuzhiyun if ((size ^ offset) / PAGE_CACHE_BITS)
89*4882a593Smuzhiyun end = pptr + PAGE_CACHE_BITS / 32;
90*4882a593Smuzhiyun else
91*4882a593Smuzhiyun end = pptr + ((size + 31) & (PAGE_CACHE_BITS - 1)) / 32;
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun hfs_dbg(BITMAP, "bitmap full\n");
94*4882a593Smuzhiyun start = size;
95*4882a593Smuzhiyun goto out;
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun found:
98*4882a593Smuzhiyun start = offset + (curr - pptr) * 32 + i;
99*4882a593Smuzhiyun if (start >= size) {
100*4882a593Smuzhiyun hfs_dbg(BITMAP, "bitmap full\n");
101*4882a593Smuzhiyun goto out;
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun /* do any partial u32 at the start */
104*4882a593Smuzhiyun len = min(size - start, len);
105*4882a593Smuzhiyun while (1) {
106*4882a593Smuzhiyun n |= mask;
107*4882a593Smuzhiyun if (++i >= 32)
108*4882a593Smuzhiyun break;
109*4882a593Smuzhiyun mask >>= 1;
110*4882a593Smuzhiyun if (!--len || n & mask)
111*4882a593Smuzhiyun goto done;
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun if (!--len)
114*4882a593Smuzhiyun goto done;
115*4882a593Smuzhiyun *curr++ = cpu_to_be32(n);
116*4882a593Smuzhiyun /* do full u32s */
117*4882a593Smuzhiyun while (1) {
118*4882a593Smuzhiyun while (curr < end) {
119*4882a593Smuzhiyun n = be32_to_cpu(*curr);
120*4882a593Smuzhiyun if (len < 32)
121*4882a593Smuzhiyun goto last;
122*4882a593Smuzhiyun if (n) {
123*4882a593Smuzhiyun len = 32;
124*4882a593Smuzhiyun goto last;
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun *curr++ = cpu_to_be32(0xffffffff);
127*4882a593Smuzhiyun len -= 32;
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun set_page_dirty(page);
130*4882a593Smuzhiyun kunmap(page);
131*4882a593Smuzhiyun offset += PAGE_CACHE_BITS;
132*4882a593Smuzhiyun page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS,
133*4882a593Smuzhiyun NULL);
134*4882a593Smuzhiyun if (IS_ERR(page)) {
135*4882a593Smuzhiyun start = size;
136*4882a593Smuzhiyun goto out;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun pptr = kmap(page);
139*4882a593Smuzhiyun curr = pptr;
140*4882a593Smuzhiyun end = pptr + PAGE_CACHE_BITS / 32;
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun last:
143*4882a593Smuzhiyun /* do any partial u32 at end */
144*4882a593Smuzhiyun mask = 1U << 31;
145*4882a593Smuzhiyun for (i = 0; i < len; i++) {
146*4882a593Smuzhiyun if (n & mask)
147*4882a593Smuzhiyun break;
148*4882a593Smuzhiyun n |= mask;
149*4882a593Smuzhiyun mask >>= 1;
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun done:
152*4882a593Smuzhiyun *curr = cpu_to_be32(n);
153*4882a593Smuzhiyun set_page_dirty(page);
154*4882a593Smuzhiyun kunmap(page);
155*4882a593Smuzhiyun *max = offset + (curr - pptr) * 32 + i - start;
156*4882a593Smuzhiyun sbi->free_blocks -= *max;
157*4882a593Smuzhiyun hfsplus_mark_mdb_dirty(sb);
158*4882a593Smuzhiyun hfs_dbg(BITMAP, "-> %u,%u\n", start, *max);
159*4882a593Smuzhiyun out:
160*4882a593Smuzhiyun mutex_unlock(&sbi->alloc_mutex);
161*4882a593Smuzhiyun return start;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun
hfsplus_block_free(struct super_block * sb,u32 offset,u32 count)164*4882a593Smuzhiyun int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
167*4882a593Smuzhiyun struct page *page;
168*4882a593Smuzhiyun struct address_space *mapping;
169*4882a593Smuzhiyun __be32 *pptr, *curr, *end;
170*4882a593Smuzhiyun u32 mask, len, pnr;
171*4882a593Smuzhiyun int i;
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun /* is there any actual work to be done? */
174*4882a593Smuzhiyun if (!count)
175*4882a593Smuzhiyun return 0;
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun hfs_dbg(BITMAP, "block_free: %u,%u\n", offset, count);
178*4882a593Smuzhiyun /* are all of the bits in range? */
179*4882a593Smuzhiyun if ((offset + count) > sbi->total_blocks)
180*4882a593Smuzhiyun return -ENOENT;
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun mutex_lock(&sbi->alloc_mutex);
183*4882a593Smuzhiyun mapping = sbi->alloc_file->i_mapping;
184*4882a593Smuzhiyun pnr = offset / PAGE_CACHE_BITS;
185*4882a593Smuzhiyun page = read_mapping_page(mapping, pnr, NULL);
186*4882a593Smuzhiyun if (IS_ERR(page))
187*4882a593Smuzhiyun goto kaboom;
188*4882a593Smuzhiyun pptr = kmap(page);
189*4882a593Smuzhiyun curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
190*4882a593Smuzhiyun end = pptr + PAGE_CACHE_BITS / 32;
191*4882a593Smuzhiyun len = count;
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun /* do any partial u32 at the start */
194*4882a593Smuzhiyun i = offset % 32;
195*4882a593Smuzhiyun if (i) {
196*4882a593Smuzhiyun int j = 32 - i;
197*4882a593Smuzhiyun mask = 0xffffffffU << j;
198*4882a593Smuzhiyun if (j > count) {
199*4882a593Smuzhiyun mask |= 0xffffffffU >> (i + count);
200*4882a593Smuzhiyun *curr++ &= cpu_to_be32(mask);
201*4882a593Smuzhiyun goto out;
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun *curr++ &= cpu_to_be32(mask);
204*4882a593Smuzhiyun count -= j;
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun /* do full u32s */
208*4882a593Smuzhiyun while (1) {
209*4882a593Smuzhiyun while (curr < end) {
210*4882a593Smuzhiyun if (count < 32)
211*4882a593Smuzhiyun goto done;
212*4882a593Smuzhiyun *curr++ = 0;
213*4882a593Smuzhiyun count -= 32;
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun if (!count)
216*4882a593Smuzhiyun break;
217*4882a593Smuzhiyun set_page_dirty(page);
218*4882a593Smuzhiyun kunmap(page);
219*4882a593Smuzhiyun page = read_mapping_page(mapping, ++pnr, NULL);
220*4882a593Smuzhiyun if (IS_ERR(page))
221*4882a593Smuzhiyun goto kaboom;
222*4882a593Smuzhiyun pptr = kmap(page);
223*4882a593Smuzhiyun curr = pptr;
224*4882a593Smuzhiyun end = pptr + PAGE_CACHE_BITS / 32;
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun done:
227*4882a593Smuzhiyun /* do any partial u32 at end */
228*4882a593Smuzhiyun if (count) {
229*4882a593Smuzhiyun mask = 0xffffffffU >> count;
230*4882a593Smuzhiyun *curr &= cpu_to_be32(mask);
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun out:
233*4882a593Smuzhiyun set_page_dirty(page);
234*4882a593Smuzhiyun kunmap(page);
235*4882a593Smuzhiyun sbi->free_blocks += len;
236*4882a593Smuzhiyun hfsplus_mark_mdb_dirty(sb);
237*4882a593Smuzhiyun mutex_unlock(&sbi->alloc_mutex);
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun return 0;
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun kaboom:
242*4882a593Smuzhiyun pr_crit("unable to mark blocks free: error %ld\n", PTR_ERR(page));
243*4882a593Smuzhiyun mutex_unlock(&sbi->alloc_mutex);
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun return -EIO;
246*4882a593Smuzhiyun }
247