xref: /OK3568_Linux_fs/kernel/fs/hfsplus/btree.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *  linux/fs/hfsplus/btree.c
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2001
6*4882a593Smuzhiyun  * Brad Boyer (flar@allandria.com)
7*4882a593Smuzhiyun  * (C) 2003 Ardis Technologies <roman@ardistech.com>
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * Handle opening/closing btree
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <linux/slab.h>
13*4882a593Smuzhiyun #include <linux/pagemap.h>
14*4882a593Smuzhiyun #include <linux/log2.h>
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #include "hfsplus_fs.h"
17*4882a593Smuzhiyun #include "hfsplus_raw.h"
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun /*
20*4882a593Smuzhiyun  * Initial source code of clump size calculation is gotten
21*4882a593Smuzhiyun  * from http://opensource.apple.com/tarballs/diskdev_cmds/
22*4882a593Smuzhiyun  */
23*4882a593Smuzhiyun #define CLUMP_ENTRIES	15
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun static short clumptbl[CLUMP_ENTRIES * 3] = {
26*4882a593Smuzhiyun /*
27*4882a593Smuzhiyun  *	    Volume	Attributes	 Catalog	 Extents
28*4882a593Smuzhiyun  *	     Size	Clump (MB)	Clump (MB)	Clump (MB)
29*4882a593Smuzhiyun  */
30*4882a593Smuzhiyun 	/*   1GB */	  4,		  4,		 4,
31*4882a593Smuzhiyun 	/*   2GB */	  6,		  6,		 4,
32*4882a593Smuzhiyun 	/*   4GB */	  8,		  8,		 4,
33*4882a593Smuzhiyun 	/*   8GB */	 11,		 11,		 5,
34*4882a593Smuzhiyun 	/*
35*4882a593Smuzhiyun 	 * For volumes 16GB and larger, we want to make sure that a full OS
36*4882a593Smuzhiyun 	 * install won't require fragmentation of the Catalog or Attributes
37*4882a593Smuzhiyun 	 * B-trees.  We do this by making the clump sizes sufficiently large,
38*4882a593Smuzhiyun 	 * and by leaving a gap after the B-trees for them to grow into.
39*4882a593Smuzhiyun 	 *
40*4882a593Smuzhiyun 	 * For SnowLeopard 10A298, a FullNetInstall with all packages selected
41*4882a593Smuzhiyun 	 * results in:
42*4882a593Smuzhiyun 	 * Catalog B-tree Header
43*4882a593Smuzhiyun 	 *	nodeSize:          8192
44*4882a593Smuzhiyun 	 *	totalNodes:       31616
45*4882a593Smuzhiyun 	 *	freeNodes:         1978
46*4882a593Smuzhiyun 	 * (used = 231.55 MB)
47*4882a593Smuzhiyun 	 * Attributes B-tree Header
48*4882a593Smuzhiyun 	 *	nodeSize:          8192
49*4882a593Smuzhiyun 	 *	totalNodes:       63232
50*4882a593Smuzhiyun 	 *	freeNodes:          958
51*4882a593Smuzhiyun 	 * (used = 486.52 MB)
52*4882a593Smuzhiyun 	 *
53*4882a593Smuzhiyun 	 * We also want Time Machine backup volumes to have a sufficiently
54*4882a593Smuzhiyun 	 * large clump size to reduce fragmentation.
55*4882a593Smuzhiyun 	 *
56*4882a593Smuzhiyun 	 * The series of numbers for Catalog and Attribute form a geometric
57*4882a593Smuzhiyun 	 * series. For Catalog (16GB to 512GB), each term is 8**(1/5) times
58*4882a593Smuzhiyun 	 * the previous term.  For Attributes (16GB to 512GB), each term is
59*4882a593Smuzhiyun 	 * 4**(1/5) times the previous term.  For 1TB to 16TB, each term is
60*4882a593Smuzhiyun 	 * 2**(1/5) times the previous term.
61*4882a593Smuzhiyun 	 */
62*4882a593Smuzhiyun 	/*  16GB */	 64,		 32,		 5,
63*4882a593Smuzhiyun 	/*  32GB */	 84,		 49,		 6,
64*4882a593Smuzhiyun 	/*  64GB */	111,		 74,		 7,
65*4882a593Smuzhiyun 	/* 128GB */	147,		111,		 8,
66*4882a593Smuzhiyun 	/* 256GB */	194,		169,		 9,
67*4882a593Smuzhiyun 	/* 512GB */	256,		256,		11,
68*4882a593Smuzhiyun 	/*   1TB */	294,		294,		14,
69*4882a593Smuzhiyun 	/*   2TB */	338,		338,		16,
70*4882a593Smuzhiyun 	/*   4TB */	388,		388,		20,
71*4882a593Smuzhiyun 	/*   8TB */	446,		446,		25,
72*4882a593Smuzhiyun 	/*  16TB */	512,		512,		32
73*4882a593Smuzhiyun };
74*4882a593Smuzhiyun 
hfsplus_calc_btree_clump_size(u32 block_size,u32 node_size,u64 sectors,int file_id)75*4882a593Smuzhiyun u32 hfsplus_calc_btree_clump_size(u32 block_size, u32 node_size,
76*4882a593Smuzhiyun 					u64 sectors, int file_id)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun 	u32 mod = max(node_size, block_size);
79*4882a593Smuzhiyun 	u32 clump_size;
80*4882a593Smuzhiyun 	int column;
81*4882a593Smuzhiyun 	int i;
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	/* Figure out which column of the above table to use for this file. */
84*4882a593Smuzhiyun 	switch (file_id) {
85*4882a593Smuzhiyun 	case HFSPLUS_ATTR_CNID:
86*4882a593Smuzhiyun 		column = 0;
87*4882a593Smuzhiyun 		break;
88*4882a593Smuzhiyun 	case HFSPLUS_CAT_CNID:
89*4882a593Smuzhiyun 		column = 1;
90*4882a593Smuzhiyun 		break;
91*4882a593Smuzhiyun 	default:
92*4882a593Smuzhiyun 		column = 2;
93*4882a593Smuzhiyun 		break;
94*4882a593Smuzhiyun 	}
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	/*
97*4882a593Smuzhiyun 	 * The default clump size is 0.8% of the volume size. And
98*4882a593Smuzhiyun 	 * it must also be a multiple of the node and block size.
99*4882a593Smuzhiyun 	 */
100*4882a593Smuzhiyun 	if (sectors < 0x200000) {
101*4882a593Smuzhiyun 		clump_size = sectors << 2;	/*  0.8 %  */
102*4882a593Smuzhiyun 		if (clump_size < (8 * node_size))
103*4882a593Smuzhiyun 			clump_size = 8 * node_size;
104*4882a593Smuzhiyun 	} else {
105*4882a593Smuzhiyun 		/* turn exponent into table index... */
106*4882a593Smuzhiyun 		for (i = 0, sectors = sectors >> 22;
107*4882a593Smuzhiyun 		     sectors && (i < CLUMP_ENTRIES - 1);
108*4882a593Smuzhiyun 		     ++i, sectors = sectors >> 1) {
109*4882a593Smuzhiyun 			/* empty body */
110*4882a593Smuzhiyun 		}
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 		clump_size = clumptbl[column + (i) * 3] * 1024 * 1024;
113*4882a593Smuzhiyun 	}
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	/*
116*4882a593Smuzhiyun 	 * Round the clump size to a multiple of node and block size.
117*4882a593Smuzhiyun 	 * NOTE: This rounds down.
118*4882a593Smuzhiyun 	 */
119*4882a593Smuzhiyun 	clump_size /= mod;
120*4882a593Smuzhiyun 	clump_size *= mod;
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	/*
123*4882a593Smuzhiyun 	 * Rounding down could have rounded down to 0 if the block size was
124*4882a593Smuzhiyun 	 * greater than the clump size.  If so, just use one block or node.
125*4882a593Smuzhiyun 	 */
126*4882a593Smuzhiyun 	if (clump_size == 0)
127*4882a593Smuzhiyun 		clump_size = mod;
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	return clump_size;
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun /* Get a reference to a B*Tree and do some initial checks */
hfs_btree_open(struct super_block * sb,u32 id)133*4882a593Smuzhiyun struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
134*4882a593Smuzhiyun {
135*4882a593Smuzhiyun 	struct hfs_btree *tree;
136*4882a593Smuzhiyun 	struct hfs_btree_header_rec *head;
137*4882a593Smuzhiyun 	struct address_space *mapping;
138*4882a593Smuzhiyun 	struct inode *inode;
139*4882a593Smuzhiyun 	struct page *page;
140*4882a593Smuzhiyun 	unsigned int size;
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	tree = kzalloc(sizeof(*tree), GFP_KERNEL);
143*4882a593Smuzhiyun 	if (!tree)
144*4882a593Smuzhiyun 		return NULL;
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	mutex_init(&tree->tree_lock);
147*4882a593Smuzhiyun 	spin_lock_init(&tree->hash_lock);
148*4882a593Smuzhiyun 	tree->sb = sb;
149*4882a593Smuzhiyun 	tree->cnid = id;
150*4882a593Smuzhiyun 	inode = hfsplus_iget(sb, id);
151*4882a593Smuzhiyun 	if (IS_ERR(inode))
152*4882a593Smuzhiyun 		goto free_tree;
153*4882a593Smuzhiyun 	tree->inode = inode;
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	if (!HFSPLUS_I(tree->inode)->first_blocks) {
156*4882a593Smuzhiyun 		pr_err("invalid btree extent records (0 size)\n");
157*4882a593Smuzhiyun 		goto free_inode;
158*4882a593Smuzhiyun 	}
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	mapping = tree->inode->i_mapping;
161*4882a593Smuzhiyun 	page = read_mapping_page(mapping, 0, NULL);
162*4882a593Smuzhiyun 	if (IS_ERR(page))
163*4882a593Smuzhiyun 		goto free_inode;
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	/* Load the header */
166*4882a593Smuzhiyun 	head = (struct hfs_btree_header_rec *)(kmap(page) +
167*4882a593Smuzhiyun 		sizeof(struct hfs_bnode_desc));
168*4882a593Smuzhiyun 	tree->root = be32_to_cpu(head->root);
169*4882a593Smuzhiyun 	tree->leaf_count = be32_to_cpu(head->leaf_count);
170*4882a593Smuzhiyun 	tree->leaf_head = be32_to_cpu(head->leaf_head);
171*4882a593Smuzhiyun 	tree->leaf_tail = be32_to_cpu(head->leaf_tail);
172*4882a593Smuzhiyun 	tree->node_count = be32_to_cpu(head->node_count);
173*4882a593Smuzhiyun 	tree->free_nodes = be32_to_cpu(head->free_nodes);
174*4882a593Smuzhiyun 	tree->attributes = be32_to_cpu(head->attributes);
175*4882a593Smuzhiyun 	tree->node_size = be16_to_cpu(head->node_size);
176*4882a593Smuzhiyun 	tree->max_key_len = be16_to_cpu(head->max_key_len);
177*4882a593Smuzhiyun 	tree->depth = be16_to_cpu(head->depth);
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	/* Verify the tree and set the correct compare function */
180*4882a593Smuzhiyun 	switch (id) {
181*4882a593Smuzhiyun 	case HFSPLUS_EXT_CNID:
182*4882a593Smuzhiyun 		if (tree->max_key_len != HFSPLUS_EXT_KEYLEN - sizeof(u16)) {
183*4882a593Smuzhiyun 			pr_err("invalid extent max_key_len %d\n",
184*4882a593Smuzhiyun 				tree->max_key_len);
185*4882a593Smuzhiyun 			goto fail_page;
186*4882a593Smuzhiyun 		}
187*4882a593Smuzhiyun 		if (tree->attributes & HFS_TREE_VARIDXKEYS) {
188*4882a593Smuzhiyun 			pr_err("invalid extent btree flag\n");
189*4882a593Smuzhiyun 			goto fail_page;
190*4882a593Smuzhiyun 		}
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 		tree->keycmp = hfsplus_ext_cmp_key;
193*4882a593Smuzhiyun 		break;
194*4882a593Smuzhiyun 	case HFSPLUS_CAT_CNID:
195*4882a593Smuzhiyun 		if (tree->max_key_len != HFSPLUS_CAT_KEYLEN - sizeof(u16)) {
196*4882a593Smuzhiyun 			pr_err("invalid catalog max_key_len %d\n",
197*4882a593Smuzhiyun 				tree->max_key_len);
198*4882a593Smuzhiyun 			goto fail_page;
199*4882a593Smuzhiyun 		}
200*4882a593Smuzhiyun 		if (!(tree->attributes & HFS_TREE_VARIDXKEYS)) {
201*4882a593Smuzhiyun 			pr_err("invalid catalog btree flag\n");
202*4882a593Smuzhiyun 			goto fail_page;
203*4882a593Smuzhiyun 		}
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 		if (test_bit(HFSPLUS_SB_HFSX, &HFSPLUS_SB(sb)->flags) &&
206*4882a593Smuzhiyun 		    (head->key_type == HFSPLUS_KEY_BINARY))
207*4882a593Smuzhiyun 			tree->keycmp = hfsplus_cat_bin_cmp_key;
208*4882a593Smuzhiyun 		else {
209*4882a593Smuzhiyun 			tree->keycmp = hfsplus_cat_case_cmp_key;
210*4882a593Smuzhiyun 			set_bit(HFSPLUS_SB_CASEFOLD, &HFSPLUS_SB(sb)->flags);
211*4882a593Smuzhiyun 		}
212*4882a593Smuzhiyun 		break;
213*4882a593Smuzhiyun 	case HFSPLUS_ATTR_CNID:
214*4882a593Smuzhiyun 		if (tree->max_key_len != HFSPLUS_ATTR_KEYLEN - sizeof(u16)) {
215*4882a593Smuzhiyun 			pr_err("invalid attributes max_key_len %d\n",
216*4882a593Smuzhiyun 				tree->max_key_len);
217*4882a593Smuzhiyun 			goto fail_page;
218*4882a593Smuzhiyun 		}
219*4882a593Smuzhiyun 		tree->keycmp = hfsplus_attr_bin_cmp_key;
220*4882a593Smuzhiyun 		break;
221*4882a593Smuzhiyun 	default:
222*4882a593Smuzhiyun 		pr_err("unknown B*Tree requested\n");
223*4882a593Smuzhiyun 		goto fail_page;
224*4882a593Smuzhiyun 	}
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	if (!(tree->attributes & HFS_TREE_BIGKEYS)) {
227*4882a593Smuzhiyun 		pr_err("invalid btree flag\n");
228*4882a593Smuzhiyun 		goto fail_page;
229*4882a593Smuzhiyun 	}
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	size = tree->node_size;
232*4882a593Smuzhiyun 	if (!is_power_of_2(size))
233*4882a593Smuzhiyun 		goto fail_page;
234*4882a593Smuzhiyun 	if (!tree->node_count)
235*4882a593Smuzhiyun 		goto fail_page;
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	tree->node_size_shift = ffs(size) - 1;
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	tree->pages_per_bnode =
240*4882a593Smuzhiyun 		(tree->node_size + PAGE_SIZE - 1) >>
241*4882a593Smuzhiyun 		PAGE_SHIFT;
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	kunmap(page);
244*4882a593Smuzhiyun 	put_page(page);
245*4882a593Smuzhiyun 	return tree;
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun  fail_page:
248*4882a593Smuzhiyun 	put_page(page);
249*4882a593Smuzhiyun  free_inode:
250*4882a593Smuzhiyun 	tree->inode->i_mapping->a_ops = &hfsplus_aops;
251*4882a593Smuzhiyun 	iput(tree->inode);
252*4882a593Smuzhiyun  free_tree:
253*4882a593Smuzhiyun 	kfree(tree);
254*4882a593Smuzhiyun 	return NULL;
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun /* Release resources used by a btree */
hfs_btree_close(struct hfs_btree * tree)258*4882a593Smuzhiyun void hfs_btree_close(struct hfs_btree *tree)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun 	struct hfs_bnode *node;
261*4882a593Smuzhiyun 	int i;
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	if (!tree)
264*4882a593Smuzhiyun 		return;
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	for (i = 0; i < NODE_HASH_SIZE; i++) {
267*4882a593Smuzhiyun 		while ((node = tree->node_hash[i])) {
268*4882a593Smuzhiyun 			tree->node_hash[i] = node->next_hash;
269*4882a593Smuzhiyun 			if (atomic_read(&node->refcnt))
270*4882a593Smuzhiyun 				pr_crit("node %d:%d "
271*4882a593Smuzhiyun 						"still has %d user(s)!\n",
272*4882a593Smuzhiyun 					node->tree->cnid, node->this,
273*4882a593Smuzhiyun 					atomic_read(&node->refcnt));
274*4882a593Smuzhiyun 			hfs_bnode_free(node);
275*4882a593Smuzhiyun 			tree->node_hash_cnt--;
276*4882a593Smuzhiyun 		}
277*4882a593Smuzhiyun 	}
278*4882a593Smuzhiyun 	iput(tree->inode);
279*4882a593Smuzhiyun 	kfree(tree);
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun 
hfs_btree_write(struct hfs_btree * tree)282*4882a593Smuzhiyun int hfs_btree_write(struct hfs_btree *tree)
283*4882a593Smuzhiyun {
284*4882a593Smuzhiyun 	struct hfs_btree_header_rec *head;
285*4882a593Smuzhiyun 	struct hfs_bnode *node;
286*4882a593Smuzhiyun 	struct page *page;
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	node = hfs_bnode_find(tree, 0);
289*4882a593Smuzhiyun 	if (IS_ERR(node))
290*4882a593Smuzhiyun 		/* panic? */
291*4882a593Smuzhiyun 		return -EIO;
292*4882a593Smuzhiyun 	/* Load the header */
293*4882a593Smuzhiyun 	page = node->page[0];
294*4882a593Smuzhiyun 	head = (struct hfs_btree_header_rec *)(kmap(page) +
295*4882a593Smuzhiyun 		sizeof(struct hfs_bnode_desc));
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	head->root = cpu_to_be32(tree->root);
298*4882a593Smuzhiyun 	head->leaf_count = cpu_to_be32(tree->leaf_count);
299*4882a593Smuzhiyun 	head->leaf_head = cpu_to_be32(tree->leaf_head);
300*4882a593Smuzhiyun 	head->leaf_tail = cpu_to_be32(tree->leaf_tail);
301*4882a593Smuzhiyun 	head->node_count = cpu_to_be32(tree->node_count);
302*4882a593Smuzhiyun 	head->free_nodes = cpu_to_be32(tree->free_nodes);
303*4882a593Smuzhiyun 	head->attributes = cpu_to_be32(tree->attributes);
304*4882a593Smuzhiyun 	head->depth = cpu_to_be16(tree->depth);
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	kunmap(page);
307*4882a593Smuzhiyun 	set_page_dirty(page);
308*4882a593Smuzhiyun 	hfs_bnode_put(node);
309*4882a593Smuzhiyun 	return 0;
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun 
hfs_bmap_new_bmap(struct hfs_bnode * prev,u32 idx)312*4882a593Smuzhiyun static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx)
313*4882a593Smuzhiyun {
314*4882a593Smuzhiyun 	struct hfs_btree *tree = prev->tree;
315*4882a593Smuzhiyun 	struct hfs_bnode *node;
316*4882a593Smuzhiyun 	struct hfs_bnode_desc desc;
317*4882a593Smuzhiyun 	__be32 cnid;
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	node = hfs_bnode_create(tree, idx);
320*4882a593Smuzhiyun 	if (IS_ERR(node))
321*4882a593Smuzhiyun 		return node;
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	tree->free_nodes--;
324*4882a593Smuzhiyun 	prev->next = idx;
325*4882a593Smuzhiyun 	cnid = cpu_to_be32(idx);
326*4882a593Smuzhiyun 	hfs_bnode_write(prev, &cnid, offsetof(struct hfs_bnode_desc, next), 4);
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	node->type = HFS_NODE_MAP;
329*4882a593Smuzhiyun 	node->num_recs = 1;
330*4882a593Smuzhiyun 	hfs_bnode_clear(node, 0, tree->node_size);
331*4882a593Smuzhiyun 	desc.next = 0;
332*4882a593Smuzhiyun 	desc.prev = 0;
333*4882a593Smuzhiyun 	desc.type = HFS_NODE_MAP;
334*4882a593Smuzhiyun 	desc.height = 0;
335*4882a593Smuzhiyun 	desc.num_recs = cpu_to_be16(1);
336*4882a593Smuzhiyun 	desc.reserved = 0;
337*4882a593Smuzhiyun 	hfs_bnode_write(node, &desc, 0, sizeof(desc));
338*4882a593Smuzhiyun 	hfs_bnode_write_u16(node, 14, 0x8000);
339*4882a593Smuzhiyun 	hfs_bnode_write_u16(node, tree->node_size - 2, 14);
340*4882a593Smuzhiyun 	hfs_bnode_write_u16(node, tree->node_size - 4, tree->node_size - 6);
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	return node;
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun /* Make sure @tree has enough space for the @rsvd_nodes */
hfs_bmap_reserve(struct hfs_btree * tree,int rsvd_nodes)346*4882a593Smuzhiyun int hfs_bmap_reserve(struct hfs_btree *tree, int rsvd_nodes)
347*4882a593Smuzhiyun {
348*4882a593Smuzhiyun 	struct inode *inode = tree->inode;
349*4882a593Smuzhiyun 	struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
350*4882a593Smuzhiyun 	u32 count;
351*4882a593Smuzhiyun 	int res;
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	if (rsvd_nodes <= 0)
354*4882a593Smuzhiyun 		return 0;
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	while (tree->free_nodes < rsvd_nodes) {
357*4882a593Smuzhiyun 		res = hfsplus_file_extend(inode, hfs_bnode_need_zeroout(tree));
358*4882a593Smuzhiyun 		if (res)
359*4882a593Smuzhiyun 			return res;
360*4882a593Smuzhiyun 		hip->phys_size = inode->i_size =
361*4882a593Smuzhiyun 			(loff_t)hip->alloc_blocks <<
362*4882a593Smuzhiyun 				HFSPLUS_SB(tree->sb)->alloc_blksz_shift;
363*4882a593Smuzhiyun 		hip->fs_blocks =
364*4882a593Smuzhiyun 			hip->alloc_blocks << HFSPLUS_SB(tree->sb)->fs_shift;
365*4882a593Smuzhiyun 		inode_set_bytes(inode, inode->i_size);
366*4882a593Smuzhiyun 		count = inode->i_size >> tree->node_size_shift;
367*4882a593Smuzhiyun 		tree->free_nodes += count - tree->node_count;
368*4882a593Smuzhiyun 		tree->node_count = count;
369*4882a593Smuzhiyun 	}
370*4882a593Smuzhiyun 	return 0;
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun 
hfs_bmap_alloc(struct hfs_btree * tree)373*4882a593Smuzhiyun struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
374*4882a593Smuzhiyun {
375*4882a593Smuzhiyun 	struct hfs_bnode *node, *next_node;
376*4882a593Smuzhiyun 	struct page **pagep;
377*4882a593Smuzhiyun 	u32 nidx, idx;
378*4882a593Smuzhiyun 	unsigned off;
379*4882a593Smuzhiyun 	u16 off16;
380*4882a593Smuzhiyun 	u16 len;
381*4882a593Smuzhiyun 	u8 *data, byte, m;
382*4882a593Smuzhiyun 	int i, res;
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	res = hfs_bmap_reserve(tree, 1);
385*4882a593Smuzhiyun 	if (res)
386*4882a593Smuzhiyun 		return ERR_PTR(res);
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 	nidx = 0;
389*4882a593Smuzhiyun 	node = hfs_bnode_find(tree, nidx);
390*4882a593Smuzhiyun 	if (IS_ERR(node))
391*4882a593Smuzhiyun 		return node;
392*4882a593Smuzhiyun 	len = hfs_brec_lenoff(node, 2, &off16);
393*4882a593Smuzhiyun 	off = off16;
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	off += node->page_offset;
396*4882a593Smuzhiyun 	pagep = node->page + (off >> PAGE_SHIFT);
397*4882a593Smuzhiyun 	data = kmap(*pagep);
398*4882a593Smuzhiyun 	off &= ~PAGE_MASK;
399*4882a593Smuzhiyun 	idx = 0;
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	for (;;) {
402*4882a593Smuzhiyun 		while (len) {
403*4882a593Smuzhiyun 			byte = data[off];
404*4882a593Smuzhiyun 			if (byte != 0xff) {
405*4882a593Smuzhiyun 				for (m = 0x80, i = 0; i < 8; m >>= 1, i++) {
406*4882a593Smuzhiyun 					if (!(byte & m)) {
407*4882a593Smuzhiyun 						idx += i;
408*4882a593Smuzhiyun 						data[off] |= m;
409*4882a593Smuzhiyun 						set_page_dirty(*pagep);
410*4882a593Smuzhiyun 						kunmap(*pagep);
411*4882a593Smuzhiyun 						tree->free_nodes--;
412*4882a593Smuzhiyun 						mark_inode_dirty(tree->inode);
413*4882a593Smuzhiyun 						hfs_bnode_put(node);
414*4882a593Smuzhiyun 						return hfs_bnode_create(tree,
415*4882a593Smuzhiyun 							idx);
416*4882a593Smuzhiyun 					}
417*4882a593Smuzhiyun 				}
418*4882a593Smuzhiyun 			}
419*4882a593Smuzhiyun 			if (++off >= PAGE_SIZE) {
420*4882a593Smuzhiyun 				kunmap(*pagep);
421*4882a593Smuzhiyun 				data = kmap(*++pagep);
422*4882a593Smuzhiyun 				off = 0;
423*4882a593Smuzhiyun 			}
424*4882a593Smuzhiyun 			idx += 8;
425*4882a593Smuzhiyun 			len--;
426*4882a593Smuzhiyun 		}
427*4882a593Smuzhiyun 		kunmap(*pagep);
428*4882a593Smuzhiyun 		nidx = node->next;
429*4882a593Smuzhiyun 		if (!nidx) {
430*4882a593Smuzhiyun 			hfs_dbg(BNODE_MOD, "create new bmap node\n");
431*4882a593Smuzhiyun 			next_node = hfs_bmap_new_bmap(node, idx);
432*4882a593Smuzhiyun 		} else
433*4882a593Smuzhiyun 			next_node = hfs_bnode_find(tree, nidx);
434*4882a593Smuzhiyun 		hfs_bnode_put(node);
435*4882a593Smuzhiyun 		if (IS_ERR(next_node))
436*4882a593Smuzhiyun 			return next_node;
437*4882a593Smuzhiyun 		node = next_node;
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 		len = hfs_brec_lenoff(node, 0, &off16);
440*4882a593Smuzhiyun 		off = off16;
441*4882a593Smuzhiyun 		off += node->page_offset;
442*4882a593Smuzhiyun 		pagep = node->page + (off >> PAGE_SHIFT);
443*4882a593Smuzhiyun 		data = kmap(*pagep);
444*4882a593Smuzhiyun 		off &= ~PAGE_MASK;
445*4882a593Smuzhiyun 	}
446*4882a593Smuzhiyun }
447*4882a593Smuzhiyun 
hfs_bmap_free(struct hfs_bnode * node)448*4882a593Smuzhiyun void hfs_bmap_free(struct hfs_bnode *node)
449*4882a593Smuzhiyun {
450*4882a593Smuzhiyun 	struct hfs_btree *tree;
451*4882a593Smuzhiyun 	struct page *page;
452*4882a593Smuzhiyun 	u16 off, len;
453*4882a593Smuzhiyun 	u32 nidx;
454*4882a593Smuzhiyun 	u8 *data, byte, m;
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 	hfs_dbg(BNODE_MOD, "btree_free_node: %u\n", node->this);
457*4882a593Smuzhiyun 	BUG_ON(!node->this);
458*4882a593Smuzhiyun 	tree = node->tree;
459*4882a593Smuzhiyun 	nidx = node->this;
460*4882a593Smuzhiyun 	node = hfs_bnode_find(tree, 0);
461*4882a593Smuzhiyun 	if (IS_ERR(node))
462*4882a593Smuzhiyun 		return;
463*4882a593Smuzhiyun 	len = hfs_brec_lenoff(node, 2, &off);
464*4882a593Smuzhiyun 	while (nidx >= len * 8) {
465*4882a593Smuzhiyun 		u32 i;
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 		nidx -= len * 8;
468*4882a593Smuzhiyun 		i = node->next;
469*4882a593Smuzhiyun 		if (!i) {
470*4882a593Smuzhiyun 			/* panic */;
471*4882a593Smuzhiyun 			pr_crit("unable to free bnode %u. "
472*4882a593Smuzhiyun 					"bmap not found!\n",
473*4882a593Smuzhiyun 				node->this);
474*4882a593Smuzhiyun 			hfs_bnode_put(node);
475*4882a593Smuzhiyun 			return;
476*4882a593Smuzhiyun 		}
477*4882a593Smuzhiyun 		hfs_bnode_put(node);
478*4882a593Smuzhiyun 		node = hfs_bnode_find(tree, i);
479*4882a593Smuzhiyun 		if (IS_ERR(node))
480*4882a593Smuzhiyun 			return;
481*4882a593Smuzhiyun 		if (node->type != HFS_NODE_MAP) {
482*4882a593Smuzhiyun 			/* panic */;
483*4882a593Smuzhiyun 			pr_crit("invalid bmap found! "
484*4882a593Smuzhiyun 					"(%u,%d)\n",
485*4882a593Smuzhiyun 				node->this, node->type);
486*4882a593Smuzhiyun 			hfs_bnode_put(node);
487*4882a593Smuzhiyun 			return;
488*4882a593Smuzhiyun 		}
489*4882a593Smuzhiyun 		len = hfs_brec_lenoff(node, 0, &off);
490*4882a593Smuzhiyun 	}
491*4882a593Smuzhiyun 	off += node->page_offset + nidx / 8;
492*4882a593Smuzhiyun 	page = node->page[off >> PAGE_SHIFT];
493*4882a593Smuzhiyun 	data = kmap(page);
494*4882a593Smuzhiyun 	off &= ~PAGE_MASK;
495*4882a593Smuzhiyun 	m = 1 << (~nidx & 7);
496*4882a593Smuzhiyun 	byte = data[off];
497*4882a593Smuzhiyun 	if (!(byte & m)) {
498*4882a593Smuzhiyun 		pr_crit("trying to free free bnode "
499*4882a593Smuzhiyun 				"%u(%d)\n",
500*4882a593Smuzhiyun 			node->this, node->type);
501*4882a593Smuzhiyun 		kunmap(page);
502*4882a593Smuzhiyun 		hfs_bnode_put(node);
503*4882a593Smuzhiyun 		return;
504*4882a593Smuzhiyun 	}
505*4882a593Smuzhiyun 	data[off] = byte & ~m;
506*4882a593Smuzhiyun 	set_page_dirty(page);
507*4882a593Smuzhiyun 	kunmap(page);
508*4882a593Smuzhiyun 	hfs_bnode_put(node);
509*4882a593Smuzhiyun 	tree->free_nodes++;
510*4882a593Smuzhiyun 	mark_inode_dirty(tree->inode);
511*4882a593Smuzhiyun }
512