xref: /OK3568_Linux_fs/kernel/fs/udf/balloc.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * balloc.c
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * PURPOSE
5*4882a593Smuzhiyun  *	Block allocation handling routines for the OSTA-UDF(tm) filesystem.
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * COPYRIGHT
8*4882a593Smuzhiyun  *	This file is distributed under the terms of the GNU General Public
9*4882a593Smuzhiyun  *	License (GPL). Copies of the GPL can be obtained from:
10*4882a593Smuzhiyun  *		ftp://prep.ai.mit.edu/pub/gnu/GPL
11*4882a593Smuzhiyun  *	Each contributing author retains all rights to their own work.
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  *  (C) 1999-2001 Ben Fennema
14*4882a593Smuzhiyun  *  (C) 1999 Stelias Computing Inc
15*4882a593Smuzhiyun  *
16*4882a593Smuzhiyun  * HISTORY
17*4882a593Smuzhiyun  *
18*4882a593Smuzhiyun  *  02/24/99 blf  Created.
19*4882a593Smuzhiyun  *
20*4882a593Smuzhiyun  */
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #include "udfdecl.h"
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #include <linux/bitops.h>
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun #include "udf_i.h"
27*4882a593Smuzhiyun #include "udf_sb.h"
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun #define udf_clear_bit	__test_and_clear_bit_le
30*4882a593Smuzhiyun #define udf_set_bit	__test_and_set_bit_le
31*4882a593Smuzhiyun #define udf_test_bit	test_bit_le
32*4882a593Smuzhiyun #define udf_find_next_one_bit	find_next_bit_le
33*4882a593Smuzhiyun 
read_block_bitmap(struct super_block * sb,struct udf_bitmap * bitmap,unsigned int block,unsigned long bitmap_nr)34*4882a593Smuzhiyun static int read_block_bitmap(struct super_block *sb,
35*4882a593Smuzhiyun 			     struct udf_bitmap *bitmap, unsigned int block,
36*4882a593Smuzhiyun 			     unsigned long bitmap_nr)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun 	struct buffer_head *bh = NULL;
39*4882a593Smuzhiyun 	int retval = 0;
40*4882a593Smuzhiyun 	struct kernel_lb_addr loc;
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 	loc.logicalBlockNum = bitmap->s_extPosition;
43*4882a593Smuzhiyun 	loc.partitionReferenceNum = UDF_SB(sb)->s_partition;
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 	bh = udf_tread(sb, udf_get_lb_pblock(sb, &loc, block));
46*4882a593Smuzhiyun 	if (!bh)
47*4882a593Smuzhiyun 		retval = -EIO;
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun 	bitmap->s_block_bitmap[bitmap_nr] = bh;
50*4882a593Smuzhiyun 	return retval;
51*4882a593Smuzhiyun }
52*4882a593Smuzhiyun 
__load_block_bitmap(struct super_block * sb,struct udf_bitmap * bitmap,unsigned int block_group)53*4882a593Smuzhiyun static int __load_block_bitmap(struct super_block *sb,
54*4882a593Smuzhiyun 			       struct udf_bitmap *bitmap,
55*4882a593Smuzhiyun 			       unsigned int block_group)
56*4882a593Smuzhiyun {
57*4882a593Smuzhiyun 	int retval = 0;
58*4882a593Smuzhiyun 	int nr_groups = bitmap->s_nr_groups;
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	if (block_group >= nr_groups) {
61*4882a593Smuzhiyun 		udf_debug("block_group (%u) > nr_groups (%d)\n",
62*4882a593Smuzhiyun 			  block_group, nr_groups);
63*4882a593Smuzhiyun 	}
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	if (bitmap->s_block_bitmap[block_group])
66*4882a593Smuzhiyun 		return block_group;
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 	retval = read_block_bitmap(sb, bitmap, block_group, block_group);
69*4882a593Smuzhiyun 	if (retval < 0)
70*4882a593Smuzhiyun 		return retval;
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	return block_group;
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun 
load_block_bitmap(struct super_block * sb,struct udf_bitmap * bitmap,unsigned int block_group)75*4882a593Smuzhiyun static inline int load_block_bitmap(struct super_block *sb,
76*4882a593Smuzhiyun 				    struct udf_bitmap *bitmap,
77*4882a593Smuzhiyun 				    unsigned int block_group)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun 	int slot;
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	slot = __load_block_bitmap(sb, bitmap, block_group);
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	if (slot < 0)
84*4882a593Smuzhiyun 		return slot;
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	if (!bitmap->s_block_bitmap[slot])
87*4882a593Smuzhiyun 		return -EIO;
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	return slot;
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun 
udf_add_free_space(struct super_block * sb,u16 partition,u32 cnt)92*4882a593Smuzhiyun static void udf_add_free_space(struct super_block *sb, u16 partition, u32 cnt)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun 	struct udf_sb_info *sbi = UDF_SB(sb);
95*4882a593Smuzhiyun 	struct logicalVolIntegrityDesc *lvid;
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	if (!sbi->s_lvid_bh)
98*4882a593Smuzhiyun 		return;
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
101*4882a593Smuzhiyun 	le32_add_cpu(&lvid->freeSpaceTable[partition], cnt);
102*4882a593Smuzhiyun 	udf_updated_lvid(sb);
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun 
udf_bitmap_free_blocks(struct super_block * sb,struct udf_bitmap * bitmap,struct kernel_lb_addr * bloc,uint32_t offset,uint32_t count)105*4882a593Smuzhiyun static void udf_bitmap_free_blocks(struct super_block *sb,
106*4882a593Smuzhiyun 				   struct udf_bitmap *bitmap,
107*4882a593Smuzhiyun 				   struct kernel_lb_addr *bloc,
108*4882a593Smuzhiyun 				   uint32_t offset,
109*4882a593Smuzhiyun 				   uint32_t count)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun 	struct udf_sb_info *sbi = UDF_SB(sb);
112*4882a593Smuzhiyun 	struct buffer_head *bh = NULL;
113*4882a593Smuzhiyun 	struct udf_part_map *partmap;
114*4882a593Smuzhiyun 	unsigned long block;
115*4882a593Smuzhiyun 	unsigned long block_group;
116*4882a593Smuzhiyun 	unsigned long bit;
117*4882a593Smuzhiyun 	unsigned long i;
118*4882a593Smuzhiyun 	int bitmap_nr;
119*4882a593Smuzhiyun 	unsigned long overflow;
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	mutex_lock(&sbi->s_alloc_mutex);
122*4882a593Smuzhiyun 	partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
123*4882a593Smuzhiyun 	if (bloc->logicalBlockNum + count < count ||
124*4882a593Smuzhiyun 	    (bloc->logicalBlockNum + count) > partmap->s_partition_len) {
125*4882a593Smuzhiyun 		udf_debug("%u < %d || %u + %u > %u\n",
126*4882a593Smuzhiyun 			  bloc->logicalBlockNum, 0,
127*4882a593Smuzhiyun 			  bloc->logicalBlockNum, count,
128*4882a593Smuzhiyun 			  partmap->s_partition_len);
129*4882a593Smuzhiyun 		goto error_return;
130*4882a593Smuzhiyun 	}
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	block = bloc->logicalBlockNum + offset +
133*4882a593Smuzhiyun 		(sizeof(struct spaceBitmapDesc) << 3);
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	do {
136*4882a593Smuzhiyun 		overflow = 0;
137*4882a593Smuzhiyun 		block_group = block >> (sb->s_blocksize_bits + 3);
138*4882a593Smuzhiyun 		bit = block % (sb->s_blocksize << 3);
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 		/*
141*4882a593Smuzhiyun 		* Check to see if we are freeing blocks across a group boundary.
142*4882a593Smuzhiyun 		*/
143*4882a593Smuzhiyun 		if (bit + count > (sb->s_blocksize << 3)) {
144*4882a593Smuzhiyun 			overflow = bit + count - (sb->s_blocksize << 3);
145*4882a593Smuzhiyun 			count -= overflow;
146*4882a593Smuzhiyun 		}
147*4882a593Smuzhiyun 		bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
148*4882a593Smuzhiyun 		if (bitmap_nr < 0)
149*4882a593Smuzhiyun 			goto error_return;
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 		bh = bitmap->s_block_bitmap[bitmap_nr];
152*4882a593Smuzhiyun 		for (i = 0; i < count; i++) {
153*4882a593Smuzhiyun 			if (udf_set_bit(bit + i, bh->b_data)) {
154*4882a593Smuzhiyun 				udf_debug("bit %lu already set\n", bit + i);
155*4882a593Smuzhiyun 				udf_debug("byte=%2x\n",
156*4882a593Smuzhiyun 					  ((__u8 *)bh->b_data)[(bit + i) >> 3]);
157*4882a593Smuzhiyun 			}
158*4882a593Smuzhiyun 		}
159*4882a593Smuzhiyun 		udf_add_free_space(sb, sbi->s_partition, count);
160*4882a593Smuzhiyun 		mark_buffer_dirty(bh);
161*4882a593Smuzhiyun 		if (overflow) {
162*4882a593Smuzhiyun 			block += count;
163*4882a593Smuzhiyun 			count = overflow;
164*4882a593Smuzhiyun 		}
165*4882a593Smuzhiyun 	} while (overflow);
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun error_return:
168*4882a593Smuzhiyun 	mutex_unlock(&sbi->s_alloc_mutex);
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun 
udf_bitmap_prealloc_blocks(struct super_block * sb,struct udf_bitmap * bitmap,uint16_t partition,uint32_t first_block,uint32_t block_count)171*4882a593Smuzhiyun static int udf_bitmap_prealloc_blocks(struct super_block *sb,
172*4882a593Smuzhiyun 				      struct udf_bitmap *bitmap,
173*4882a593Smuzhiyun 				      uint16_t partition, uint32_t first_block,
174*4882a593Smuzhiyun 				      uint32_t block_count)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun 	struct udf_sb_info *sbi = UDF_SB(sb);
177*4882a593Smuzhiyun 	int alloc_count = 0;
178*4882a593Smuzhiyun 	int bit, block, block_group;
179*4882a593Smuzhiyun 	int bitmap_nr;
180*4882a593Smuzhiyun 	struct buffer_head *bh;
181*4882a593Smuzhiyun 	__u32 part_len;
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	mutex_lock(&sbi->s_alloc_mutex);
184*4882a593Smuzhiyun 	part_len = sbi->s_partmaps[partition].s_partition_len;
185*4882a593Smuzhiyun 	if (first_block >= part_len)
186*4882a593Smuzhiyun 		goto out;
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	if (first_block + block_count > part_len)
189*4882a593Smuzhiyun 		block_count = part_len - first_block;
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	do {
192*4882a593Smuzhiyun 		block = first_block + (sizeof(struct spaceBitmapDesc) << 3);
193*4882a593Smuzhiyun 		block_group = block >> (sb->s_blocksize_bits + 3);
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 		bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
196*4882a593Smuzhiyun 		if (bitmap_nr < 0)
197*4882a593Smuzhiyun 			goto out;
198*4882a593Smuzhiyun 		bh = bitmap->s_block_bitmap[bitmap_nr];
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 		bit = block % (sb->s_blocksize << 3);
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 		while (bit < (sb->s_blocksize << 3) && block_count > 0) {
203*4882a593Smuzhiyun 			if (!udf_clear_bit(bit, bh->b_data))
204*4882a593Smuzhiyun 				goto out;
205*4882a593Smuzhiyun 			block_count--;
206*4882a593Smuzhiyun 			alloc_count++;
207*4882a593Smuzhiyun 			bit++;
208*4882a593Smuzhiyun 			block++;
209*4882a593Smuzhiyun 		}
210*4882a593Smuzhiyun 		mark_buffer_dirty(bh);
211*4882a593Smuzhiyun 	} while (block_count > 0);
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun out:
214*4882a593Smuzhiyun 	udf_add_free_space(sb, partition, -alloc_count);
215*4882a593Smuzhiyun 	mutex_unlock(&sbi->s_alloc_mutex);
216*4882a593Smuzhiyun 	return alloc_count;
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun 
udf_bitmap_new_block(struct super_block * sb,struct udf_bitmap * bitmap,uint16_t partition,uint32_t goal,int * err)219*4882a593Smuzhiyun static udf_pblk_t udf_bitmap_new_block(struct super_block *sb,
220*4882a593Smuzhiyun 				struct udf_bitmap *bitmap, uint16_t partition,
221*4882a593Smuzhiyun 				uint32_t goal, int *err)
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun 	struct udf_sb_info *sbi = UDF_SB(sb);
224*4882a593Smuzhiyun 	int newbit, bit = 0;
225*4882a593Smuzhiyun 	udf_pblk_t block;
226*4882a593Smuzhiyun 	int block_group, group_start;
227*4882a593Smuzhiyun 	int end_goal, nr_groups, bitmap_nr, i;
228*4882a593Smuzhiyun 	struct buffer_head *bh = NULL;
229*4882a593Smuzhiyun 	char *ptr;
230*4882a593Smuzhiyun 	udf_pblk_t newblock = 0;
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	*err = -ENOSPC;
233*4882a593Smuzhiyun 	mutex_lock(&sbi->s_alloc_mutex);
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun repeat:
236*4882a593Smuzhiyun 	if (goal >= sbi->s_partmaps[partition].s_partition_len)
237*4882a593Smuzhiyun 		goal = 0;
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	nr_groups = bitmap->s_nr_groups;
240*4882a593Smuzhiyun 	block = goal + (sizeof(struct spaceBitmapDesc) << 3);
241*4882a593Smuzhiyun 	block_group = block >> (sb->s_blocksize_bits + 3);
242*4882a593Smuzhiyun 	group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
245*4882a593Smuzhiyun 	if (bitmap_nr < 0)
246*4882a593Smuzhiyun 		goto error_return;
247*4882a593Smuzhiyun 	bh = bitmap->s_block_bitmap[bitmap_nr];
248*4882a593Smuzhiyun 	ptr = memscan((char *)bh->b_data + group_start, 0xFF,
249*4882a593Smuzhiyun 		      sb->s_blocksize - group_start);
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
252*4882a593Smuzhiyun 		bit = block % (sb->s_blocksize << 3);
253*4882a593Smuzhiyun 		if (udf_test_bit(bit, bh->b_data))
254*4882a593Smuzhiyun 			goto got_block;
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 		end_goal = (bit + 63) & ~63;
257*4882a593Smuzhiyun 		bit = udf_find_next_one_bit(bh->b_data, end_goal, bit);
258*4882a593Smuzhiyun 		if (bit < end_goal)
259*4882a593Smuzhiyun 			goto got_block;
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 		ptr = memscan((char *)bh->b_data + (bit >> 3), 0xFF,
262*4882a593Smuzhiyun 			      sb->s_blocksize - ((bit + 7) >> 3));
263*4882a593Smuzhiyun 		newbit = (ptr - ((char *)bh->b_data)) << 3;
264*4882a593Smuzhiyun 		if (newbit < sb->s_blocksize << 3) {
265*4882a593Smuzhiyun 			bit = newbit;
266*4882a593Smuzhiyun 			goto search_back;
267*4882a593Smuzhiyun 		}
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 		newbit = udf_find_next_one_bit(bh->b_data,
270*4882a593Smuzhiyun 					       sb->s_blocksize << 3, bit);
271*4882a593Smuzhiyun 		if (newbit < sb->s_blocksize << 3) {
272*4882a593Smuzhiyun 			bit = newbit;
273*4882a593Smuzhiyun 			goto got_block;
274*4882a593Smuzhiyun 		}
275*4882a593Smuzhiyun 	}
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	for (i = 0; i < (nr_groups * 2); i++) {
278*4882a593Smuzhiyun 		block_group++;
279*4882a593Smuzhiyun 		if (block_group >= nr_groups)
280*4882a593Smuzhiyun 			block_group = 0;
281*4882a593Smuzhiyun 		group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 		bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
284*4882a593Smuzhiyun 		if (bitmap_nr < 0)
285*4882a593Smuzhiyun 			goto error_return;
286*4882a593Smuzhiyun 		bh = bitmap->s_block_bitmap[bitmap_nr];
287*4882a593Smuzhiyun 		if (i < nr_groups) {
288*4882a593Smuzhiyun 			ptr = memscan((char *)bh->b_data + group_start, 0xFF,
289*4882a593Smuzhiyun 				      sb->s_blocksize - group_start);
290*4882a593Smuzhiyun 			if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
291*4882a593Smuzhiyun 				bit = (ptr - ((char *)bh->b_data)) << 3;
292*4882a593Smuzhiyun 				break;
293*4882a593Smuzhiyun 			}
294*4882a593Smuzhiyun 		} else {
295*4882a593Smuzhiyun 			bit = udf_find_next_one_bit(bh->b_data,
296*4882a593Smuzhiyun 						    sb->s_blocksize << 3,
297*4882a593Smuzhiyun 						    group_start << 3);
298*4882a593Smuzhiyun 			if (bit < sb->s_blocksize << 3)
299*4882a593Smuzhiyun 				break;
300*4882a593Smuzhiyun 		}
301*4882a593Smuzhiyun 	}
302*4882a593Smuzhiyun 	if (i >= (nr_groups * 2)) {
303*4882a593Smuzhiyun 		mutex_unlock(&sbi->s_alloc_mutex);
304*4882a593Smuzhiyun 		return newblock;
305*4882a593Smuzhiyun 	}
306*4882a593Smuzhiyun 	if (bit < sb->s_blocksize << 3)
307*4882a593Smuzhiyun 		goto search_back;
308*4882a593Smuzhiyun 	else
309*4882a593Smuzhiyun 		bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3,
310*4882a593Smuzhiyun 					    group_start << 3);
311*4882a593Smuzhiyun 	if (bit >= sb->s_blocksize << 3) {
312*4882a593Smuzhiyun 		mutex_unlock(&sbi->s_alloc_mutex);
313*4882a593Smuzhiyun 		return 0;
314*4882a593Smuzhiyun 	}
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun search_back:
317*4882a593Smuzhiyun 	i = 0;
318*4882a593Smuzhiyun 	while (i < 7 && bit > (group_start << 3) &&
319*4882a593Smuzhiyun 	       udf_test_bit(bit - 1, bh->b_data)) {
320*4882a593Smuzhiyun 		++i;
321*4882a593Smuzhiyun 		--bit;
322*4882a593Smuzhiyun 	}
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun got_block:
325*4882a593Smuzhiyun 	newblock = bit + (block_group << (sb->s_blocksize_bits + 3)) -
326*4882a593Smuzhiyun 		(sizeof(struct spaceBitmapDesc) << 3);
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	if (newblock >= sbi->s_partmaps[partition].s_partition_len) {
329*4882a593Smuzhiyun 		/*
330*4882a593Smuzhiyun 		 * Ran off the end of the bitmap, and bits following are
331*4882a593Smuzhiyun 		 * non-compliant (not all zero)
332*4882a593Smuzhiyun 		 */
333*4882a593Smuzhiyun 		udf_err(sb, "bitmap for partition %d corrupted (block %u marked"
334*4882a593Smuzhiyun 			" as free, partition length is %u)\n", partition,
335*4882a593Smuzhiyun 			newblock, sbi->s_partmaps[partition].s_partition_len);
336*4882a593Smuzhiyun 		goto error_return;
337*4882a593Smuzhiyun 	}
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	if (!udf_clear_bit(bit, bh->b_data)) {
340*4882a593Smuzhiyun 		udf_debug("bit already cleared for block %d\n", bit);
341*4882a593Smuzhiyun 		goto repeat;
342*4882a593Smuzhiyun 	}
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	mark_buffer_dirty(bh);
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 	udf_add_free_space(sb, partition, -1);
347*4882a593Smuzhiyun 	mutex_unlock(&sbi->s_alloc_mutex);
348*4882a593Smuzhiyun 	*err = 0;
349*4882a593Smuzhiyun 	return newblock;
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun error_return:
352*4882a593Smuzhiyun 	*err = -EIO;
353*4882a593Smuzhiyun 	mutex_unlock(&sbi->s_alloc_mutex);
354*4882a593Smuzhiyun 	return 0;
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun 
udf_table_free_blocks(struct super_block * sb,struct inode * table,struct kernel_lb_addr * bloc,uint32_t offset,uint32_t count)357*4882a593Smuzhiyun static void udf_table_free_blocks(struct super_block *sb,
358*4882a593Smuzhiyun 				  struct inode *table,
359*4882a593Smuzhiyun 				  struct kernel_lb_addr *bloc,
360*4882a593Smuzhiyun 				  uint32_t offset,
361*4882a593Smuzhiyun 				  uint32_t count)
362*4882a593Smuzhiyun {
363*4882a593Smuzhiyun 	struct udf_sb_info *sbi = UDF_SB(sb);
364*4882a593Smuzhiyun 	struct udf_part_map *partmap;
365*4882a593Smuzhiyun 	uint32_t start, end;
366*4882a593Smuzhiyun 	uint32_t elen;
367*4882a593Smuzhiyun 	struct kernel_lb_addr eloc;
368*4882a593Smuzhiyun 	struct extent_position oepos, epos;
369*4882a593Smuzhiyun 	int8_t etype;
370*4882a593Smuzhiyun 	struct udf_inode_info *iinfo;
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	mutex_lock(&sbi->s_alloc_mutex);
373*4882a593Smuzhiyun 	partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
374*4882a593Smuzhiyun 	if (bloc->logicalBlockNum + count < count ||
375*4882a593Smuzhiyun 	    (bloc->logicalBlockNum + count) > partmap->s_partition_len) {
376*4882a593Smuzhiyun 		udf_debug("%u < %d || %u + %u > %u\n",
377*4882a593Smuzhiyun 			  bloc->logicalBlockNum, 0,
378*4882a593Smuzhiyun 			  bloc->logicalBlockNum, count,
379*4882a593Smuzhiyun 			  partmap->s_partition_len);
380*4882a593Smuzhiyun 		goto error_return;
381*4882a593Smuzhiyun 	}
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	iinfo = UDF_I(table);
384*4882a593Smuzhiyun 	udf_add_free_space(sb, sbi->s_partition, count);
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 	start = bloc->logicalBlockNum + offset;
387*4882a593Smuzhiyun 	end = bloc->logicalBlockNum + offset + count - 1;
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	epos.offset = oepos.offset = sizeof(struct unallocSpaceEntry);
390*4882a593Smuzhiyun 	elen = 0;
391*4882a593Smuzhiyun 	epos.block = oepos.block = iinfo->i_location;
392*4882a593Smuzhiyun 	epos.bh = oepos.bh = NULL;
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun 	while (count &&
395*4882a593Smuzhiyun 	       (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
396*4882a593Smuzhiyun 		if (((eloc.logicalBlockNum +
397*4882a593Smuzhiyun 			(elen >> sb->s_blocksize_bits)) == start)) {
398*4882a593Smuzhiyun 			if ((0x3FFFFFFF - elen) <
399*4882a593Smuzhiyun 					(count << sb->s_blocksize_bits)) {
400*4882a593Smuzhiyun 				uint32_t tmp = ((0x3FFFFFFF - elen) >>
401*4882a593Smuzhiyun 							sb->s_blocksize_bits);
402*4882a593Smuzhiyun 				count -= tmp;
403*4882a593Smuzhiyun 				start += tmp;
404*4882a593Smuzhiyun 				elen = (etype << 30) |
405*4882a593Smuzhiyun 					(0x40000000 - sb->s_blocksize);
406*4882a593Smuzhiyun 			} else {
407*4882a593Smuzhiyun 				elen = (etype << 30) |
408*4882a593Smuzhiyun 					(elen +
409*4882a593Smuzhiyun 					(count << sb->s_blocksize_bits));
410*4882a593Smuzhiyun 				start += count;
411*4882a593Smuzhiyun 				count = 0;
412*4882a593Smuzhiyun 			}
413*4882a593Smuzhiyun 			udf_write_aext(table, &oepos, &eloc, elen, 1);
414*4882a593Smuzhiyun 		} else if (eloc.logicalBlockNum == (end + 1)) {
415*4882a593Smuzhiyun 			if ((0x3FFFFFFF - elen) <
416*4882a593Smuzhiyun 					(count << sb->s_blocksize_bits)) {
417*4882a593Smuzhiyun 				uint32_t tmp = ((0x3FFFFFFF - elen) >>
418*4882a593Smuzhiyun 						sb->s_blocksize_bits);
419*4882a593Smuzhiyun 				count -= tmp;
420*4882a593Smuzhiyun 				end -= tmp;
421*4882a593Smuzhiyun 				eloc.logicalBlockNum -= tmp;
422*4882a593Smuzhiyun 				elen = (etype << 30) |
423*4882a593Smuzhiyun 					(0x40000000 - sb->s_blocksize);
424*4882a593Smuzhiyun 			} else {
425*4882a593Smuzhiyun 				eloc.logicalBlockNum = start;
426*4882a593Smuzhiyun 				elen = (etype << 30) |
427*4882a593Smuzhiyun 					(elen +
428*4882a593Smuzhiyun 					(count << sb->s_blocksize_bits));
429*4882a593Smuzhiyun 				end -= count;
430*4882a593Smuzhiyun 				count = 0;
431*4882a593Smuzhiyun 			}
432*4882a593Smuzhiyun 			udf_write_aext(table, &oepos, &eloc, elen, 1);
433*4882a593Smuzhiyun 		}
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun 		if (epos.bh != oepos.bh) {
436*4882a593Smuzhiyun 			oepos.block = epos.block;
437*4882a593Smuzhiyun 			brelse(oepos.bh);
438*4882a593Smuzhiyun 			get_bh(epos.bh);
439*4882a593Smuzhiyun 			oepos.bh = epos.bh;
440*4882a593Smuzhiyun 			oepos.offset = 0;
441*4882a593Smuzhiyun 		} else {
442*4882a593Smuzhiyun 			oepos.offset = epos.offset;
443*4882a593Smuzhiyun 		}
444*4882a593Smuzhiyun 	}
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 	if (count) {
447*4882a593Smuzhiyun 		/*
448*4882a593Smuzhiyun 		 * NOTE: we CANNOT use udf_add_aext here, as it can try to
449*4882a593Smuzhiyun 		 * allocate a new block, and since we hold the super block
450*4882a593Smuzhiyun 		 * lock already very bad things would happen :)
451*4882a593Smuzhiyun 		 *
452*4882a593Smuzhiyun 		 * We copy the behavior of udf_add_aext, but instead of
453*4882a593Smuzhiyun 		 * trying to allocate a new block close to the existing one,
454*4882a593Smuzhiyun 		 * we just steal a block from the extent we are trying to add.
455*4882a593Smuzhiyun 		 *
456*4882a593Smuzhiyun 		 * It would be nice if the blocks were close together, but it
457*4882a593Smuzhiyun 		 * isn't required.
458*4882a593Smuzhiyun 		 */
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 		int adsize;
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 		eloc.logicalBlockNum = start;
463*4882a593Smuzhiyun 		elen = EXT_RECORDED_ALLOCATED |
464*4882a593Smuzhiyun 			(count << sb->s_blocksize_bits);
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 		if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
467*4882a593Smuzhiyun 			adsize = sizeof(struct short_ad);
468*4882a593Smuzhiyun 		else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
469*4882a593Smuzhiyun 			adsize = sizeof(struct long_ad);
470*4882a593Smuzhiyun 		else {
471*4882a593Smuzhiyun 			brelse(oepos.bh);
472*4882a593Smuzhiyun 			brelse(epos.bh);
473*4882a593Smuzhiyun 			goto error_return;
474*4882a593Smuzhiyun 		}
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 		if (epos.offset + (2 * adsize) > sb->s_blocksize) {
477*4882a593Smuzhiyun 			/* Steal a block from the extent being free'd */
478*4882a593Smuzhiyun 			udf_setup_indirect_aext(table, eloc.logicalBlockNum,
479*4882a593Smuzhiyun 						&epos);
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 			eloc.logicalBlockNum++;
482*4882a593Smuzhiyun 			elen -= sb->s_blocksize;
483*4882a593Smuzhiyun 		}
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 		/* It's possible that stealing the block emptied the extent */
486*4882a593Smuzhiyun 		if (elen)
487*4882a593Smuzhiyun 			__udf_add_aext(table, &epos, &eloc, elen, 1);
488*4882a593Smuzhiyun 	}
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 	brelse(epos.bh);
491*4882a593Smuzhiyun 	brelse(oepos.bh);
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun error_return:
494*4882a593Smuzhiyun 	mutex_unlock(&sbi->s_alloc_mutex);
495*4882a593Smuzhiyun 	return;
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun 
udf_table_prealloc_blocks(struct super_block * sb,struct inode * table,uint16_t partition,uint32_t first_block,uint32_t block_count)498*4882a593Smuzhiyun static int udf_table_prealloc_blocks(struct super_block *sb,
499*4882a593Smuzhiyun 				     struct inode *table, uint16_t partition,
500*4882a593Smuzhiyun 				     uint32_t first_block, uint32_t block_count)
501*4882a593Smuzhiyun {
502*4882a593Smuzhiyun 	struct udf_sb_info *sbi = UDF_SB(sb);
503*4882a593Smuzhiyun 	int alloc_count = 0;
504*4882a593Smuzhiyun 	uint32_t elen, adsize;
505*4882a593Smuzhiyun 	struct kernel_lb_addr eloc;
506*4882a593Smuzhiyun 	struct extent_position epos;
507*4882a593Smuzhiyun 	int8_t etype = -1;
508*4882a593Smuzhiyun 	struct udf_inode_info *iinfo;
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 	if (first_block >= sbi->s_partmaps[partition].s_partition_len)
511*4882a593Smuzhiyun 		return 0;
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun 	iinfo = UDF_I(table);
514*4882a593Smuzhiyun 	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
515*4882a593Smuzhiyun 		adsize = sizeof(struct short_ad);
516*4882a593Smuzhiyun 	else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
517*4882a593Smuzhiyun 		adsize = sizeof(struct long_ad);
518*4882a593Smuzhiyun 	else
519*4882a593Smuzhiyun 		return 0;
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 	mutex_lock(&sbi->s_alloc_mutex);
522*4882a593Smuzhiyun 	epos.offset = sizeof(struct unallocSpaceEntry);
523*4882a593Smuzhiyun 	epos.block = iinfo->i_location;
524*4882a593Smuzhiyun 	epos.bh = NULL;
525*4882a593Smuzhiyun 	eloc.logicalBlockNum = 0xFFFFFFFF;
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 	while (first_block != eloc.logicalBlockNum &&
528*4882a593Smuzhiyun 	       (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
529*4882a593Smuzhiyun 		udf_debug("eloc=%u, elen=%u, first_block=%u\n",
530*4882a593Smuzhiyun 			  eloc.logicalBlockNum, elen, first_block);
531*4882a593Smuzhiyun 		; /* empty loop body */
532*4882a593Smuzhiyun 	}
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun 	if (first_block == eloc.logicalBlockNum) {
535*4882a593Smuzhiyun 		epos.offset -= adsize;
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 		alloc_count = (elen >> sb->s_blocksize_bits);
538*4882a593Smuzhiyun 		if (alloc_count > block_count) {
539*4882a593Smuzhiyun 			alloc_count = block_count;
540*4882a593Smuzhiyun 			eloc.logicalBlockNum += alloc_count;
541*4882a593Smuzhiyun 			elen -= (alloc_count << sb->s_blocksize_bits);
542*4882a593Smuzhiyun 			udf_write_aext(table, &epos, &eloc,
543*4882a593Smuzhiyun 					(etype << 30) | elen, 1);
544*4882a593Smuzhiyun 		} else
545*4882a593Smuzhiyun 			udf_delete_aext(table, epos);
546*4882a593Smuzhiyun 	} else {
547*4882a593Smuzhiyun 		alloc_count = 0;
548*4882a593Smuzhiyun 	}
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun 	brelse(epos.bh);
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun 	if (alloc_count)
553*4882a593Smuzhiyun 		udf_add_free_space(sb, partition, -alloc_count);
554*4882a593Smuzhiyun 	mutex_unlock(&sbi->s_alloc_mutex);
555*4882a593Smuzhiyun 	return alloc_count;
556*4882a593Smuzhiyun }
557*4882a593Smuzhiyun 
udf_table_new_block(struct super_block * sb,struct inode * table,uint16_t partition,uint32_t goal,int * err)558*4882a593Smuzhiyun static udf_pblk_t udf_table_new_block(struct super_block *sb,
559*4882a593Smuzhiyun 			       struct inode *table, uint16_t partition,
560*4882a593Smuzhiyun 			       uint32_t goal, int *err)
561*4882a593Smuzhiyun {
562*4882a593Smuzhiyun 	struct udf_sb_info *sbi = UDF_SB(sb);
563*4882a593Smuzhiyun 	uint32_t spread = 0xFFFFFFFF, nspread = 0xFFFFFFFF;
564*4882a593Smuzhiyun 	udf_pblk_t newblock = 0;
565*4882a593Smuzhiyun 	uint32_t adsize;
566*4882a593Smuzhiyun 	uint32_t elen, goal_elen = 0;
567*4882a593Smuzhiyun 	struct kernel_lb_addr eloc, goal_eloc;
568*4882a593Smuzhiyun 	struct extent_position epos, goal_epos;
569*4882a593Smuzhiyun 	int8_t etype;
570*4882a593Smuzhiyun 	struct udf_inode_info *iinfo = UDF_I(table);
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun 	*err = -ENOSPC;
573*4882a593Smuzhiyun 
574*4882a593Smuzhiyun 	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
575*4882a593Smuzhiyun 		adsize = sizeof(struct short_ad);
576*4882a593Smuzhiyun 	else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
577*4882a593Smuzhiyun 		adsize = sizeof(struct long_ad);
578*4882a593Smuzhiyun 	else
579*4882a593Smuzhiyun 		return newblock;
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun 	mutex_lock(&sbi->s_alloc_mutex);
582*4882a593Smuzhiyun 	if (goal >= sbi->s_partmaps[partition].s_partition_len)
583*4882a593Smuzhiyun 		goal = 0;
584*4882a593Smuzhiyun 
585*4882a593Smuzhiyun 	/* We search for the closest matching block to goal. If we find
586*4882a593Smuzhiyun 	   a exact hit, we stop. Otherwise we keep going till we run out
587*4882a593Smuzhiyun 	   of extents. We store the buffer_head, bloc, and extoffset
588*4882a593Smuzhiyun 	   of the current closest match and use that when we are done.
589*4882a593Smuzhiyun 	 */
590*4882a593Smuzhiyun 	epos.offset = sizeof(struct unallocSpaceEntry);
591*4882a593Smuzhiyun 	epos.block = iinfo->i_location;
592*4882a593Smuzhiyun 	epos.bh = goal_epos.bh = NULL;
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 	while (spread &&
595*4882a593Smuzhiyun 	       (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
596*4882a593Smuzhiyun 		if (goal >= eloc.logicalBlockNum) {
597*4882a593Smuzhiyun 			if (goal < eloc.logicalBlockNum +
598*4882a593Smuzhiyun 					(elen >> sb->s_blocksize_bits))
599*4882a593Smuzhiyun 				nspread = 0;
600*4882a593Smuzhiyun 			else
601*4882a593Smuzhiyun 				nspread = goal - eloc.logicalBlockNum -
602*4882a593Smuzhiyun 					(elen >> sb->s_blocksize_bits);
603*4882a593Smuzhiyun 		} else {
604*4882a593Smuzhiyun 			nspread = eloc.logicalBlockNum - goal;
605*4882a593Smuzhiyun 		}
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 		if (nspread < spread) {
608*4882a593Smuzhiyun 			spread = nspread;
609*4882a593Smuzhiyun 			if (goal_epos.bh != epos.bh) {
610*4882a593Smuzhiyun 				brelse(goal_epos.bh);
611*4882a593Smuzhiyun 				goal_epos.bh = epos.bh;
612*4882a593Smuzhiyun 				get_bh(goal_epos.bh);
613*4882a593Smuzhiyun 			}
614*4882a593Smuzhiyun 			goal_epos.block = epos.block;
615*4882a593Smuzhiyun 			goal_epos.offset = epos.offset - adsize;
616*4882a593Smuzhiyun 			goal_eloc = eloc;
617*4882a593Smuzhiyun 			goal_elen = (etype << 30) | elen;
618*4882a593Smuzhiyun 		}
619*4882a593Smuzhiyun 	}
620*4882a593Smuzhiyun 
621*4882a593Smuzhiyun 	brelse(epos.bh);
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun 	if (spread == 0xFFFFFFFF) {
624*4882a593Smuzhiyun 		brelse(goal_epos.bh);
625*4882a593Smuzhiyun 		mutex_unlock(&sbi->s_alloc_mutex);
626*4882a593Smuzhiyun 		return 0;
627*4882a593Smuzhiyun 	}
628*4882a593Smuzhiyun 
629*4882a593Smuzhiyun 	/* Only allocate blocks from the beginning of the extent.
630*4882a593Smuzhiyun 	   That way, we only delete (empty) extents, never have to insert an
631*4882a593Smuzhiyun 	   extent because of splitting */
632*4882a593Smuzhiyun 	/* This works, but very poorly.... */
633*4882a593Smuzhiyun 
634*4882a593Smuzhiyun 	newblock = goal_eloc.logicalBlockNum;
635*4882a593Smuzhiyun 	goal_eloc.logicalBlockNum++;
636*4882a593Smuzhiyun 	goal_elen -= sb->s_blocksize;
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun 	if (goal_elen)
639*4882a593Smuzhiyun 		udf_write_aext(table, &goal_epos, &goal_eloc, goal_elen, 1);
640*4882a593Smuzhiyun 	else
641*4882a593Smuzhiyun 		udf_delete_aext(table, goal_epos);
642*4882a593Smuzhiyun 	brelse(goal_epos.bh);
643*4882a593Smuzhiyun 
644*4882a593Smuzhiyun 	udf_add_free_space(sb, partition, -1);
645*4882a593Smuzhiyun 
646*4882a593Smuzhiyun 	mutex_unlock(&sbi->s_alloc_mutex);
647*4882a593Smuzhiyun 	*err = 0;
648*4882a593Smuzhiyun 	return newblock;
649*4882a593Smuzhiyun }
650*4882a593Smuzhiyun 
udf_free_blocks(struct super_block * sb,struct inode * inode,struct kernel_lb_addr * bloc,uint32_t offset,uint32_t count)651*4882a593Smuzhiyun void udf_free_blocks(struct super_block *sb, struct inode *inode,
652*4882a593Smuzhiyun 		     struct kernel_lb_addr *bloc, uint32_t offset,
653*4882a593Smuzhiyun 		     uint32_t count)
654*4882a593Smuzhiyun {
655*4882a593Smuzhiyun 	uint16_t partition = bloc->partitionReferenceNum;
656*4882a593Smuzhiyun 	struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
657*4882a593Smuzhiyun 
658*4882a593Smuzhiyun 	if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
659*4882a593Smuzhiyun 		udf_bitmap_free_blocks(sb, map->s_uspace.s_bitmap,
660*4882a593Smuzhiyun 				       bloc, offset, count);
661*4882a593Smuzhiyun 	} else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) {
662*4882a593Smuzhiyun 		udf_table_free_blocks(sb, map->s_uspace.s_table,
663*4882a593Smuzhiyun 				      bloc, offset, count);
664*4882a593Smuzhiyun 	}
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun 	if (inode) {
667*4882a593Smuzhiyun 		inode_sub_bytes(inode,
668*4882a593Smuzhiyun 				((sector_t)count) << sb->s_blocksize_bits);
669*4882a593Smuzhiyun 	}
670*4882a593Smuzhiyun }
671*4882a593Smuzhiyun 
udf_prealloc_blocks(struct super_block * sb,struct inode * inode,uint16_t partition,uint32_t first_block,uint32_t block_count)672*4882a593Smuzhiyun inline int udf_prealloc_blocks(struct super_block *sb,
673*4882a593Smuzhiyun 			       struct inode *inode,
674*4882a593Smuzhiyun 			       uint16_t partition, uint32_t first_block,
675*4882a593Smuzhiyun 			       uint32_t block_count)
676*4882a593Smuzhiyun {
677*4882a593Smuzhiyun 	struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
678*4882a593Smuzhiyun 	int allocated;
679*4882a593Smuzhiyun 
680*4882a593Smuzhiyun 	if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
681*4882a593Smuzhiyun 		allocated = udf_bitmap_prealloc_blocks(sb,
682*4882a593Smuzhiyun 						       map->s_uspace.s_bitmap,
683*4882a593Smuzhiyun 						       partition, first_block,
684*4882a593Smuzhiyun 						       block_count);
685*4882a593Smuzhiyun 	else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
686*4882a593Smuzhiyun 		allocated = udf_table_prealloc_blocks(sb,
687*4882a593Smuzhiyun 						      map->s_uspace.s_table,
688*4882a593Smuzhiyun 						      partition, first_block,
689*4882a593Smuzhiyun 						      block_count);
690*4882a593Smuzhiyun 	else
691*4882a593Smuzhiyun 		return 0;
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun 	if (inode && allocated > 0)
694*4882a593Smuzhiyun 		inode_add_bytes(inode, allocated << sb->s_blocksize_bits);
695*4882a593Smuzhiyun 	return allocated;
696*4882a593Smuzhiyun }
697*4882a593Smuzhiyun 
udf_new_block(struct super_block * sb,struct inode * inode,uint16_t partition,uint32_t goal,int * err)698*4882a593Smuzhiyun inline udf_pblk_t udf_new_block(struct super_block *sb,
699*4882a593Smuzhiyun 			 struct inode *inode,
700*4882a593Smuzhiyun 			 uint16_t partition, uint32_t goal, int *err)
701*4882a593Smuzhiyun {
702*4882a593Smuzhiyun 	struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
703*4882a593Smuzhiyun 	udf_pblk_t block;
704*4882a593Smuzhiyun 
705*4882a593Smuzhiyun 	if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
706*4882a593Smuzhiyun 		block = udf_bitmap_new_block(sb,
707*4882a593Smuzhiyun 					     map->s_uspace.s_bitmap,
708*4882a593Smuzhiyun 					     partition, goal, err);
709*4882a593Smuzhiyun 	else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
710*4882a593Smuzhiyun 		block = udf_table_new_block(sb,
711*4882a593Smuzhiyun 					    map->s_uspace.s_table,
712*4882a593Smuzhiyun 					    partition, goal, err);
713*4882a593Smuzhiyun 	else {
714*4882a593Smuzhiyun 		*err = -EIO;
715*4882a593Smuzhiyun 		return 0;
716*4882a593Smuzhiyun 	}
717*4882a593Smuzhiyun 	if (inode && block)
718*4882a593Smuzhiyun 		inode_add_bytes(inode, sb->s_blocksize);
719*4882a593Smuzhiyun 	return block;
720*4882a593Smuzhiyun }
721