1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef CEPH_CRUSH_CRUSH_H
3*4882a593Smuzhiyun #define CEPH_CRUSH_CRUSH_H
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun #ifdef __KERNEL__
6*4882a593Smuzhiyun # include <linux/rbtree.h>
7*4882a593Smuzhiyun # include <linux/types.h>
8*4882a593Smuzhiyun #else
9*4882a593Smuzhiyun # include "crush_compat.h"
10*4882a593Smuzhiyun #endif
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun /*
13*4882a593Smuzhiyun * CRUSH is a pseudo-random data distribution algorithm that
14*4882a593Smuzhiyun * efficiently distributes input values (typically, data objects)
15*4882a593Smuzhiyun * across a heterogeneous, structured storage cluster.
16*4882a593Smuzhiyun *
17*4882a593Smuzhiyun * The algorithm was originally described in detail in this paper
18*4882a593Smuzhiyun * (although the algorithm has evolved somewhat since then):
19*4882a593Smuzhiyun *
20*4882a593Smuzhiyun * https://www.ssrc.ucsc.edu/Papers/weil-sc06.pdf
21*4882a593Smuzhiyun *
22*4882a593Smuzhiyun * LGPL2
23*4882a593Smuzhiyun */
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun #define CRUSH_MAGIC 0x00010000ul /* for detecting algorithm revisions */
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #define CRUSH_MAX_DEPTH 10 /* max crush hierarchy depth */
29*4882a593Smuzhiyun #define CRUSH_MAX_RULESET (1<<8) /* max crush ruleset number */
30*4882a593Smuzhiyun #define CRUSH_MAX_RULES CRUSH_MAX_RULESET /* should be the same as max rulesets */
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun #define CRUSH_MAX_DEVICE_WEIGHT (100u * 0x10000u)
33*4882a593Smuzhiyun #define CRUSH_MAX_BUCKET_WEIGHT (65535u * 0x10000u)
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun #define CRUSH_ITEM_UNDEF 0x7ffffffe /* undefined result (internal use only) */
36*4882a593Smuzhiyun #define CRUSH_ITEM_NONE 0x7fffffff /* no result */
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun /*
39*4882a593Smuzhiyun * CRUSH uses user-defined "rules" to describe how inputs should be
40*4882a593Smuzhiyun * mapped to devices. A rule consists of sequence of steps to perform
41*4882a593Smuzhiyun * to generate the set of output devices.
42*4882a593Smuzhiyun */
43*4882a593Smuzhiyun struct crush_rule_step {
44*4882a593Smuzhiyun __u32 op;
45*4882a593Smuzhiyun __s32 arg1;
46*4882a593Smuzhiyun __s32 arg2;
47*4882a593Smuzhiyun };
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun /* step op codes */
50*4882a593Smuzhiyun enum {
51*4882a593Smuzhiyun CRUSH_RULE_NOOP = 0,
52*4882a593Smuzhiyun CRUSH_RULE_TAKE = 1, /* arg1 = value to start with */
53*4882a593Smuzhiyun CRUSH_RULE_CHOOSE_FIRSTN = 2, /* arg1 = num items to pick */
54*4882a593Smuzhiyun /* arg2 = type */
55*4882a593Smuzhiyun CRUSH_RULE_CHOOSE_INDEP = 3, /* same */
56*4882a593Smuzhiyun CRUSH_RULE_EMIT = 4, /* no args */
57*4882a593Smuzhiyun CRUSH_RULE_CHOOSELEAF_FIRSTN = 6,
58*4882a593Smuzhiyun CRUSH_RULE_CHOOSELEAF_INDEP = 7,
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun CRUSH_RULE_SET_CHOOSE_TRIES = 8, /* override choose_total_tries */
61*4882a593Smuzhiyun CRUSH_RULE_SET_CHOOSELEAF_TRIES = 9, /* override chooseleaf_descend_once */
62*4882a593Smuzhiyun CRUSH_RULE_SET_CHOOSE_LOCAL_TRIES = 10,
63*4882a593Smuzhiyun CRUSH_RULE_SET_CHOOSE_LOCAL_FALLBACK_TRIES = 11,
64*4882a593Smuzhiyun CRUSH_RULE_SET_CHOOSELEAF_VARY_R = 12,
65*4882a593Smuzhiyun CRUSH_RULE_SET_CHOOSELEAF_STABLE = 13
66*4882a593Smuzhiyun };
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun /*
69*4882a593Smuzhiyun * for specifying choose num (arg1) relative to the max parameter
70*4882a593Smuzhiyun * passed to do_rule
71*4882a593Smuzhiyun */
72*4882a593Smuzhiyun #define CRUSH_CHOOSE_N 0
73*4882a593Smuzhiyun #define CRUSH_CHOOSE_N_MINUS(x) (-(x))
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun /*
76*4882a593Smuzhiyun * The rule mask is used to describe what the rule is intended for.
77*4882a593Smuzhiyun * Given a ruleset and size of output set, we search through the
78*4882a593Smuzhiyun * rule list for a matching rule_mask.
79*4882a593Smuzhiyun */
80*4882a593Smuzhiyun struct crush_rule_mask {
81*4882a593Smuzhiyun __u8 ruleset;
82*4882a593Smuzhiyun __u8 type;
83*4882a593Smuzhiyun __u8 min_size;
84*4882a593Smuzhiyun __u8 max_size;
85*4882a593Smuzhiyun };
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun struct crush_rule {
88*4882a593Smuzhiyun __u32 len;
89*4882a593Smuzhiyun struct crush_rule_mask mask;
90*4882a593Smuzhiyun struct crush_rule_step steps[];
91*4882a593Smuzhiyun };
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun #define crush_rule_size(len) (sizeof(struct crush_rule) + \
94*4882a593Smuzhiyun (len)*sizeof(struct crush_rule_step))
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun /*
99*4882a593Smuzhiyun * A bucket is a named container of other items (either devices or
100*4882a593Smuzhiyun * other buckets). Items within a bucket are chosen using one of a
101*4882a593Smuzhiyun * few different algorithms. The table summarizes how the speed of
102*4882a593Smuzhiyun * each option measures up against mapping stability when items are
103*4882a593Smuzhiyun * added or removed.
104*4882a593Smuzhiyun *
105*4882a593Smuzhiyun * Bucket Alg Speed Additions Removals
106*4882a593Smuzhiyun * ------------------------------------------------
107*4882a593Smuzhiyun * uniform O(1) poor poor
108*4882a593Smuzhiyun * list O(n) optimal poor
109*4882a593Smuzhiyun * tree O(log n) good good
110*4882a593Smuzhiyun * straw O(n) better better
111*4882a593Smuzhiyun * straw2 O(n) optimal optimal
112*4882a593Smuzhiyun */
113*4882a593Smuzhiyun enum {
114*4882a593Smuzhiyun CRUSH_BUCKET_UNIFORM = 1,
115*4882a593Smuzhiyun CRUSH_BUCKET_LIST = 2,
116*4882a593Smuzhiyun CRUSH_BUCKET_TREE = 3,
117*4882a593Smuzhiyun CRUSH_BUCKET_STRAW = 4,
118*4882a593Smuzhiyun CRUSH_BUCKET_STRAW2 = 5,
119*4882a593Smuzhiyun };
120*4882a593Smuzhiyun extern const char *crush_bucket_alg_name(int alg);
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun /*
123*4882a593Smuzhiyun * although tree was a legacy algorithm, it has been buggy, so
124*4882a593Smuzhiyun * exclude it.
125*4882a593Smuzhiyun */
126*4882a593Smuzhiyun #define CRUSH_LEGACY_ALLOWED_BUCKET_ALGS ( \
127*4882a593Smuzhiyun (1 << CRUSH_BUCKET_UNIFORM) | \
128*4882a593Smuzhiyun (1 << CRUSH_BUCKET_LIST) | \
129*4882a593Smuzhiyun (1 << CRUSH_BUCKET_STRAW))
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun struct crush_bucket {
132*4882a593Smuzhiyun __s32 id; /* this'll be negative */
133*4882a593Smuzhiyun __u16 type; /* non-zero; type=0 is reserved for devices */
134*4882a593Smuzhiyun __u8 alg; /* one of CRUSH_BUCKET_* */
135*4882a593Smuzhiyun __u8 hash; /* which hash function to use, CRUSH_HASH_* */
136*4882a593Smuzhiyun __u32 weight; /* 16-bit fixed point */
137*4882a593Smuzhiyun __u32 size; /* num items */
138*4882a593Smuzhiyun __s32 *items;
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun };
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun /** @ingroup API
143*4882a593Smuzhiyun *
144*4882a593Smuzhiyun * Replacement weights for each item in a bucket. The size of the
145*4882a593Smuzhiyun * array must be exactly the size of the straw2 bucket, just as the
146*4882a593Smuzhiyun * item_weights array.
147*4882a593Smuzhiyun *
148*4882a593Smuzhiyun */
149*4882a593Smuzhiyun struct crush_weight_set {
150*4882a593Smuzhiyun __u32 *weights; /*!< 16.16 fixed point weights
151*4882a593Smuzhiyun in the same order as items */
152*4882a593Smuzhiyun __u32 size; /*!< size of the __weights__ array */
153*4882a593Smuzhiyun };
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun /** @ingroup API
156*4882a593Smuzhiyun *
157*4882a593Smuzhiyun * Replacement weights and ids for a given straw2 bucket, for
158*4882a593Smuzhiyun * placement purposes.
159*4882a593Smuzhiyun *
160*4882a593Smuzhiyun * When crush_do_rule() chooses the Nth item from a straw2 bucket, the
161*4882a593Smuzhiyun * replacement weights found at __weight_set[N]__ are used instead of
162*4882a593Smuzhiyun * the weights from __item_weights__. If __N__ is greater than
163*4882a593Smuzhiyun * __weight_set_size__, the weights found at __weight_set_size-1__ are
164*4882a593Smuzhiyun * used instead. For instance if __weight_set__ is:
165*4882a593Smuzhiyun *
166*4882a593Smuzhiyun * [ [ 0x10000, 0x20000 ], // position 0
167*4882a593Smuzhiyun * [ 0x20000, 0x40000 ] ] // position 1
168*4882a593Smuzhiyun *
169*4882a593Smuzhiyun * choosing the 0th item will use position 0 weights [ 0x10000, 0x20000 ]
170*4882a593Smuzhiyun * choosing the 1th item will use position 1 weights [ 0x20000, 0x40000 ]
171*4882a593Smuzhiyun * choosing the 2th item will use position 1 weights [ 0x20000, 0x40000 ]
172*4882a593Smuzhiyun * etc.
173*4882a593Smuzhiyun *
174*4882a593Smuzhiyun */
175*4882a593Smuzhiyun struct crush_choose_arg {
176*4882a593Smuzhiyun __s32 *ids; /*!< values to use instead of items */
177*4882a593Smuzhiyun __u32 ids_size; /*!< size of the __ids__ array */
178*4882a593Smuzhiyun struct crush_weight_set *weight_set; /*!< weight replacements for
179*4882a593Smuzhiyun a given position */
180*4882a593Smuzhiyun __u32 weight_set_size; /*!< size of the __weight_set__ array */
181*4882a593Smuzhiyun };
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun /** @ingroup API
184*4882a593Smuzhiyun *
185*4882a593Smuzhiyun * Replacement weights and ids for each bucket in the crushmap. The
186*4882a593Smuzhiyun * __size__ of the __args__ array must be exactly the same as the
187*4882a593Smuzhiyun * __map->max_buckets__.
188*4882a593Smuzhiyun *
189*4882a593Smuzhiyun * The __crush_choose_arg__ at index N will be used when choosing
190*4882a593Smuzhiyun * an item from the bucket __map->buckets[N]__ bucket, provided it
191*4882a593Smuzhiyun * is a straw2 bucket.
192*4882a593Smuzhiyun *
193*4882a593Smuzhiyun */
194*4882a593Smuzhiyun struct crush_choose_arg_map {
195*4882a593Smuzhiyun #ifdef __KERNEL__
196*4882a593Smuzhiyun struct rb_node node;
197*4882a593Smuzhiyun s64 choose_args_index;
198*4882a593Smuzhiyun #endif
199*4882a593Smuzhiyun struct crush_choose_arg *args; /*!< replacement for each bucket
200*4882a593Smuzhiyun in the crushmap */
201*4882a593Smuzhiyun __u32 size; /*!< size of the __args__ array */
202*4882a593Smuzhiyun };
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun struct crush_bucket_uniform {
205*4882a593Smuzhiyun struct crush_bucket h;
206*4882a593Smuzhiyun __u32 item_weight; /* 16-bit fixed point; all items equally weighted */
207*4882a593Smuzhiyun };
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun struct crush_bucket_list {
210*4882a593Smuzhiyun struct crush_bucket h;
211*4882a593Smuzhiyun __u32 *item_weights; /* 16-bit fixed point */
212*4882a593Smuzhiyun __u32 *sum_weights; /* 16-bit fixed point. element i is sum
213*4882a593Smuzhiyun of weights 0..i, inclusive */
214*4882a593Smuzhiyun };
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun struct crush_bucket_tree {
217*4882a593Smuzhiyun struct crush_bucket h; /* note: h.size is _tree_ size, not number of
218*4882a593Smuzhiyun actual items */
219*4882a593Smuzhiyun __u8 num_nodes;
220*4882a593Smuzhiyun __u32 *node_weights;
221*4882a593Smuzhiyun };
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun struct crush_bucket_straw {
224*4882a593Smuzhiyun struct crush_bucket h;
225*4882a593Smuzhiyun __u32 *item_weights; /* 16-bit fixed point */
226*4882a593Smuzhiyun __u32 *straws; /* 16-bit fixed point */
227*4882a593Smuzhiyun };
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun struct crush_bucket_straw2 {
230*4882a593Smuzhiyun struct crush_bucket h;
231*4882a593Smuzhiyun __u32 *item_weights; /* 16-bit fixed point */
232*4882a593Smuzhiyun };
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun /*
237*4882a593Smuzhiyun * CRUSH map includes all buckets, rules, etc.
238*4882a593Smuzhiyun */
239*4882a593Smuzhiyun struct crush_map {
240*4882a593Smuzhiyun struct crush_bucket **buckets;
241*4882a593Smuzhiyun struct crush_rule **rules;
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun __s32 max_buckets;
244*4882a593Smuzhiyun __u32 max_rules;
245*4882a593Smuzhiyun __s32 max_devices;
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun /* choose local retries before re-descent */
248*4882a593Smuzhiyun __u32 choose_local_tries;
249*4882a593Smuzhiyun /* choose local attempts using a fallback permutation before
250*4882a593Smuzhiyun * re-descent */
251*4882a593Smuzhiyun __u32 choose_local_fallback_tries;
252*4882a593Smuzhiyun /* choose attempts before giving up */
253*4882a593Smuzhiyun __u32 choose_total_tries;
254*4882a593Smuzhiyun /* attempt chooseleaf inner descent once for firstn mode; on
255*4882a593Smuzhiyun * reject retry outer descent. Note that this does *not*
256*4882a593Smuzhiyun * apply to a collision: in that case we will retry as we used
257*4882a593Smuzhiyun * to. */
258*4882a593Smuzhiyun __u32 chooseleaf_descend_once;
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun /* if non-zero, feed r into chooseleaf, bit-shifted right by (r-1)
261*4882a593Smuzhiyun * bits. a value of 1 is best for new clusters. for legacy clusters
262*4882a593Smuzhiyun * that want to limit reshuffling, a value of 3 or 4 will make the
263*4882a593Smuzhiyun * mappings line up a bit better with previous mappings. */
264*4882a593Smuzhiyun __u8 chooseleaf_vary_r;
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun /* if true, it makes chooseleaf firstn to return stable results (if
267*4882a593Smuzhiyun * no local retry) so that data migrations would be optimal when some
268*4882a593Smuzhiyun * device fails. */
269*4882a593Smuzhiyun __u8 chooseleaf_stable;
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun /*
272*4882a593Smuzhiyun * This value is calculated after decode or construction by
273*4882a593Smuzhiyun * the builder. It is exposed here (rather than having a
274*4882a593Smuzhiyun * 'build CRUSH working space' function) so that callers can
275*4882a593Smuzhiyun * reserve a static buffer, allocate space on the stack, or
276*4882a593Smuzhiyun * otherwise avoid calling into the heap allocator if they
277*4882a593Smuzhiyun * want to. The size of the working space depends on the map,
278*4882a593Smuzhiyun * while the size of the scratch vector passed to the mapper
279*4882a593Smuzhiyun * depends on the size of the desired result set.
280*4882a593Smuzhiyun *
281*4882a593Smuzhiyun * Nothing stops the caller from allocating both in one swell
282*4882a593Smuzhiyun * foop and passing in two points, though.
283*4882a593Smuzhiyun */
284*4882a593Smuzhiyun size_t working_size;
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun #ifndef __KERNEL__
287*4882a593Smuzhiyun /*
288*4882a593Smuzhiyun * version 0 (original) of straw_calc has various flaws. version 1
289*4882a593Smuzhiyun * fixes a few of them.
290*4882a593Smuzhiyun */
291*4882a593Smuzhiyun __u8 straw_calc_version;
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun /*
294*4882a593Smuzhiyun * allowed bucket algs is a bitmask, here the bit positions
295*4882a593Smuzhiyun * are CRUSH_BUCKET_*. note that these are *bits* and
296*4882a593Smuzhiyun * CRUSH_BUCKET_* values are not, so we need to or together (1
297*4882a593Smuzhiyun * << CRUSH_BUCKET_WHATEVER). The 0th bit is not used to
298*4882a593Smuzhiyun * minimize confusion (bucket type values start at 1).
299*4882a593Smuzhiyun */
300*4882a593Smuzhiyun __u32 allowed_bucket_algs;
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun __u32 *choose_tries;
303*4882a593Smuzhiyun #else
304*4882a593Smuzhiyun /* device/bucket type id -> type name (CrushWrapper::type_map) */
305*4882a593Smuzhiyun struct rb_root type_names;
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun /* device/bucket id -> name (CrushWrapper::name_map) */
308*4882a593Smuzhiyun struct rb_root names;
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun /* CrushWrapper::choose_args */
311*4882a593Smuzhiyun struct rb_root choose_args;
312*4882a593Smuzhiyun #endif
313*4882a593Smuzhiyun };
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun /* crush.c */
317*4882a593Smuzhiyun extern int crush_get_bucket_item_weight(const struct crush_bucket *b, int pos);
318*4882a593Smuzhiyun extern void crush_destroy_bucket_uniform(struct crush_bucket_uniform *b);
319*4882a593Smuzhiyun extern void crush_destroy_bucket_list(struct crush_bucket_list *b);
320*4882a593Smuzhiyun extern void crush_destroy_bucket_tree(struct crush_bucket_tree *b);
321*4882a593Smuzhiyun extern void crush_destroy_bucket_straw(struct crush_bucket_straw *b);
322*4882a593Smuzhiyun extern void crush_destroy_bucket_straw2(struct crush_bucket_straw2 *b);
323*4882a593Smuzhiyun extern void crush_destroy_bucket(struct crush_bucket *b);
324*4882a593Smuzhiyun extern void crush_destroy_rule(struct crush_rule *r);
325*4882a593Smuzhiyun extern void crush_destroy(struct crush_map *map);
326*4882a593Smuzhiyun
crush_calc_tree_node(int i)327*4882a593Smuzhiyun static inline int crush_calc_tree_node(int i)
328*4882a593Smuzhiyun {
329*4882a593Smuzhiyun return ((i+1) << 1)-1;
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun /*
333*4882a593Smuzhiyun * These data structures are private to the CRUSH implementation. They
334*4882a593Smuzhiyun * are exposed in this header file because builder needs their
335*4882a593Smuzhiyun * definitions to calculate the total working size.
336*4882a593Smuzhiyun *
337*4882a593Smuzhiyun * Moving this out of the crush map allow us to treat the CRUSH map as
338*4882a593Smuzhiyun * immutable within the mapper and removes the requirement for a CRUSH
339*4882a593Smuzhiyun * map lock.
340*4882a593Smuzhiyun */
341*4882a593Smuzhiyun struct crush_work_bucket {
342*4882a593Smuzhiyun __u32 perm_x; /* @x for which *perm is defined */
343*4882a593Smuzhiyun __u32 perm_n; /* num elements of *perm that are permuted/defined */
344*4882a593Smuzhiyun __u32 *perm; /* Permutation of the bucket's items */
345*4882a593Smuzhiyun };
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun struct crush_work {
348*4882a593Smuzhiyun struct crush_work_bucket **work; /* Per-bucket working store */
349*4882a593Smuzhiyun #ifdef __KERNEL__
350*4882a593Smuzhiyun struct list_head item;
351*4882a593Smuzhiyun #endif
352*4882a593Smuzhiyun };
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun #ifdef __KERNEL__
355*4882a593Smuzhiyun /* osdmap.c */
356*4882a593Smuzhiyun void clear_crush_names(struct rb_root *root);
357*4882a593Smuzhiyun void clear_choose_args(struct crush_map *c);
358*4882a593Smuzhiyun #endif
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun #endif
361