xref: /OK3568_Linux_fs/kernel/drivers/md/persistent-data/dm-btree-internal.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright (C) 2011 Red Hat, Inc.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * This file is released under the GPL.
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #ifndef DM_BTREE_INTERNAL_H
8*4882a593Smuzhiyun #define DM_BTREE_INTERNAL_H
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include "dm-btree.h"
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun /*----------------------------------------------------------------*/
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun /*
15*4882a593Smuzhiyun  * We'll need 2 accessor functions for n->csum and n->blocknr
16*4882a593Smuzhiyun  * to support dm-btree-spine.c in that case.
17*4882a593Smuzhiyun  */
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun enum node_flags {
20*4882a593Smuzhiyun 	INTERNAL_NODE = 1,
21*4882a593Smuzhiyun 	LEAF_NODE = 1 << 1
22*4882a593Smuzhiyun };
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun /*
25*4882a593Smuzhiyun  * Every btree node begins with this structure.  Make sure it's a multiple
26*4882a593Smuzhiyun  * of 8-bytes in size, otherwise the 64bit keys will be mis-aligned.
27*4882a593Smuzhiyun  */
28*4882a593Smuzhiyun struct node_header {
29*4882a593Smuzhiyun 	__le32 csum;
30*4882a593Smuzhiyun 	__le32 flags;
31*4882a593Smuzhiyun 	__le64 blocknr; /* Block this node is supposed to live in. */
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun 	__le32 nr_entries;
34*4882a593Smuzhiyun 	__le32 max_entries;
35*4882a593Smuzhiyun 	__le32 value_size;
36*4882a593Smuzhiyun 	__le32 padding;
37*4882a593Smuzhiyun } __attribute__((packed, aligned(8)));
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun struct btree_node {
40*4882a593Smuzhiyun 	struct node_header header;
41*4882a593Smuzhiyun 	__le64 keys[];
42*4882a593Smuzhiyun } __attribute__((packed, aligned(8)));
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun /*
46*4882a593Smuzhiyun  * Locks a block using the btree node validator.
47*4882a593Smuzhiyun  */
48*4882a593Smuzhiyun int bn_read_lock(struct dm_btree_info *info, dm_block_t b,
49*4882a593Smuzhiyun 		 struct dm_block **result);
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun void inc_children(struct dm_transaction_manager *tm, struct btree_node *n,
52*4882a593Smuzhiyun 		  struct dm_btree_value_type *vt);
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun int new_block(struct dm_btree_info *info, struct dm_block **result);
55*4882a593Smuzhiyun void unlock_block(struct dm_btree_info *info, struct dm_block *b);
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun /*
58*4882a593Smuzhiyun  * Spines keep track of the rolling locks.  There are 2 variants, read-only
59*4882a593Smuzhiyun  * and one that uses shadowing.  These are separate structs to allow the
60*4882a593Smuzhiyun  * type checker to spot misuse, for example accidentally calling read_lock
61*4882a593Smuzhiyun  * on a shadow spine.
62*4882a593Smuzhiyun  */
63*4882a593Smuzhiyun struct ro_spine {
64*4882a593Smuzhiyun 	struct dm_btree_info *info;
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 	int count;
67*4882a593Smuzhiyun 	struct dm_block *nodes[2];
68*4882a593Smuzhiyun };
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun void init_ro_spine(struct ro_spine *s, struct dm_btree_info *info);
71*4882a593Smuzhiyun void exit_ro_spine(struct ro_spine *s);
72*4882a593Smuzhiyun int ro_step(struct ro_spine *s, dm_block_t new_child);
73*4882a593Smuzhiyun void ro_pop(struct ro_spine *s);
74*4882a593Smuzhiyun struct btree_node *ro_node(struct ro_spine *s);
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun struct shadow_spine {
77*4882a593Smuzhiyun 	struct dm_btree_info *info;
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	int count;
80*4882a593Smuzhiyun 	struct dm_block *nodes[2];
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	dm_block_t root;
83*4882a593Smuzhiyun };
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun void init_shadow_spine(struct shadow_spine *s, struct dm_btree_info *info);
86*4882a593Smuzhiyun int exit_shadow_spine(struct shadow_spine *s);
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun int shadow_step(struct shadow_spine *s, dm_block_t b,
89*4882a593Smuzhiyun 		struct dm_btree_value_type *vt);
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun /*
92*4882a593Smuzhiyun  * The spine must have at least one entry before calling this.
93*4882a593Smuzhiyun  */
94*4882a593Smuzhiyun struct dm_block *shadow_current(struct shadow_spine *s);
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun /*
97*4882a593Smuzhiyun  * The spine must have at least two entries before calling this.
98*4882a593Smuzhiyun  */
99*4882a593Smuzhiyun struct dm_block *shadow_parent(struct shadow_spine *s);
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun int shadow_has_parent(struct shadow_spine *s);
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun int shadow_root(struct shadow_spine *s);
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun /*
106*4882a593Smuzhiyun  * Some inlines.
107*4882a593Smuzhiyun  */
key_ptr(struct btree_node * n,uint32_t index)108*4882a593Smuzhiyun static inline __le64 *key_ptr(struct btree_node *n, uint32_t index)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun 	return n->keys + index;
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun 
value_base(struct btree_node * n)113*4882a593Smuzhiyun static inline void *value_base(struct btree_node *n)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun 	return &n->keys[le32_to_cpu(n->header.max_entries)];
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun 
value_ptr(struct btree_node * n,uint32_t index)118*4882a593Smuzhiyun static inline void *value_ptr(struct btree_node *n, uint32_t index)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun 	uint32_t value_size = le32_to_cpu(n->header.value_size);
121*4882a593Smuzhiyun 	return value_base(n) + (value_size * index);
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun /*
125*4882a593Smuzhiyun  * Assumes the values are suitably-aligned and converts to core format.
126*4882a593Smuzhiyun  */
value64(struct btree_node * n,uint32_t index)127*4882a593Smuzhiyun static inline uint64_t value64(struct btree_node *n, uint32_t index)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun 	__le64 *values_le = value_base(n);
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	return le64_to_cpu(values_le[index]);
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun /*
135*4882a593Smuzhiyun  * Searching for a key within a single node.
136*4882a593Smuzhiyun  */
137*4882a593Smuzhiyun int lower_bound(struct btree_node *n, uint64_t key);
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun extern struct dm_block_validator btree_node_validator;
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun /*
142*4882a593Smuzhiyun  * Value type for upper levels of multi-level btrees.
143*4882a593Smuzhiyun  */
144*4882a593Smuzhiyun extern void init_le64_type(struct dm_transaction_manager *tm,
145*4882a593Smuzhiyun 			   struct dm_btree_value_type *vt);
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun #endif	/* DM_BTREE_INTERNAL_H */
148