xref: /OK3568_Linux_fs/kernel/include/uapi/linux/bcache.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2*4882a593Smuzhiyun #ifndef _LINUX_BCACHE_H
3*4882a593Smuzhiyun #define _LINUX_BCACHE_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun /*
6*4882a593Smuzhiyun  * Bcache on disk data structures
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/types.h>
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #define BITMASK(name, type, field, offset, size)		\
12*4882a593Smuzhiyun static inline __u64 name(const type *k)				\
13*4882a593Smuzhiyun { return (k->field >> offset) & ~(~0ULL << size); }		\
14*4882a593Smuzhiyun 								\
15*4882a593Smuzhiyun static inline void SET_##name(type *k, __u64 v)			\
16*4882a593Smuzhiyun {								\
17*4882a593Smuzhiyun 	k->field &= ~(~(~0ULL << size) << offset);		\
18*4882a593Smuzhiyun 	k->field |= (v & ~(~0ULL << size)) << offset;		\
19*4882a593Smuzhiyun }
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun /* Btree keys - all units are in sectors */
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun struct bkey {
24*4882a593Smuzhiyun 	__u64	high;
25*4882a593Smuzhiyun 	__u64	low;
26*4882a593Smuzhiyun 	__u64	ptr[];
27*4882a593Smuzhiyun };
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun #define KEY_FIELD(name, field, offset, size)				\
30*4882a593Smuzhiyun 	BITMASK(name, struct bkey, field, offset, size)
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun #define PTR_FIELD(name, offset, size)					\
33*4882a593Smuzhiyun static inline __u64 name(const struct bkey *k, unsigned int i)		\
34*4882a593Smuzhiyun { return (k->ptr[i] >> offset) & ~(~0ULL << size); }			\
35*4882a593Smuzhiyun 									\
36*4882a593Smuzhiyun static inline void SET_##name(struct bkey *k, unsigned int i, __u64 v)	\
37*4882a593Smuzhiyun {									\
38*4882a593Smuzhiyun 	k->ptr[i] &= ~(~(~0ULL << size) << offset);			\
39*4882a593Smuzhiyun 	k->ptr[i] |= (v & ~(~0ULL << size)) << offset;			\
40*4882a593Smuzhiyun }
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun #define KEY_SIZE_BITS		16
43*4882a593Smuzhiyun #define KEY_MAX_U64S		8
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun KEY_FIELD(KEY_PTRS,	high, 60, 3)
46*4882a593Smuzhiyun KEY_FIELD(HEADER_SIZE,	high, 58, 2)
47*4882a593Smuzhiyun KEY_FIELD(KEY_CSUM,	high, 56, 2)
48*4882a593Smuzhiyun KEY_FIELD(KEY_PINNED,	high, 55, 1)
49*4882a593Smuzhiyun KEY_FIELD(KEY_DIRTY,	high, 36, 1)
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun KEY_FIELD(KEY_SIZE,	high, 20, KEY_SIZE_BITS)
52*4882a593Smuzhiyun KEY_FIELD(KEY_INODE,	high, 0,  20)
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun /* Next time I change the on disk format, KEY_OFFSET() won't be 64 bits */
55*4882a593Smuzhiyun 
KEY_OFFSET(const struct bkey * k)56*4882a593Smuzhiyun static inline __u64 KEY_OFFSET(const struct bkey *k)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun 	return k->low;
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun 
SET_KEY_OFFSET(struct bkey * k,__u64 v)61*4882a593Smuzhiyun static inline void SET_KEY_OFFSET(struct bkey *k, __u64 v)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun 	k->low = v;
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun /*
67*4882a593Smuzhiyun  * The high bit being set is a relic from when we used it to do binary
68*4882a593Smuzhiyun  * searches - it told you where a key started. It's not used anymore,
69*4882a593Smuzhiyun  * and can probably be safely dropped.
70*4882a593Smuzhiyun  */
71*4882a593Smuzhiyun #define KEY(inode, offset, size)					\
72*4882a593Smuzhiyun ((struct bkey) {							\
73*4882a593Smuzhiyun 	.high = (1ULL << 63) | ((__u64) (size) << 20) | (inode),	\
74*4882a593Smuzhiyun 	.low = (offset)							\
75*4882a593Smuzhiyun })
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun #define ZERO_KEY			KEY(0, 0, 0)
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun #define MAX_KEY_INODE			(~(~0 << 20))
80*4882a593Smuzhiyun #define MAX_KEY_OFFSET			(~0ULL >> 1)
81*4882a593Smuzhiyun #define MAX_KEY				KEY(MAX_KEY_INODE, MAX_KEY_OFFSET, 0)
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun #define KEY_START(k)			(KEY_OFFSET(k) - KEY_SIZE(k))
84*4882a593Smuzhiyun #define START_KEY(k)			KEY(KEY_INODE(k), KEY_START(k), 0)
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun #define PTR_DEV_BITS			12
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun PTR_FIELD(PTR_DEV,			51, PTR_DEV_BITS)
89*4882a593Smuzhiyun PTR_FIELD(PTR_OFFSET,			8,  43)
90*4882a593Smuzhiyun PTR_FIELD(PTR_GEN,			0,  8)
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun #define PTR_CHECK_DEV			((1 << PTR_DEV_BITS) - 1)
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun #define MAKE_PTR(gen, offset, dev)					\
95*4882a593Smuzhiyun 	((((__u64) dev) << 51) | ((__u64) offset) << 8 | gen)
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun /* Bkey utility code */
98*4882a593Smuzhiyun 
bkey_u64s(const struct bkey * k)99*4882a593Smuzhiyun static inline unsigned long bkey_u64s(const struct bkey *k)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun 	return (sizeof(struct bkey) / sizeof(__u64)) + KEY_PTRS(k);
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun 
bkey_bytes(const struct bkey * k)104*4882a593Smuzhiyun static inline unsigned long bkey_bytes(const struct bkey *k)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun 	return bkey_u64s(k) * sizeof(__u64);
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun #define bkey_copy(_dest, _src)	memcpy(_dest, _src, bkey_bytes(_src))
110*4882a593Smuzhiyun 
bkey_copy_key(struct bkey * dest,const struct bkey * src)111*4882a593Smuzhiyun static inline void bkey_copy_key(struct bkey *dest, const struct bkey *src)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun 	SET_KEY_INODE(dest, KEY_INODE(src));
114*4882a593Smuzhiyun 	SET_KEY_OFFSET(dest, KEY_OFFSET(src));
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun 
bkey_next(const struct bkey * k)117*4882a593Smuzhiyun static inline struct bkey *bkey_next(const struct bkey *k)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun 	__u64 *d = (void *) k;
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	return (struct bkey *) (d + bkey_u64s(k));
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun 
bkey_idx(const struct bkey * k,unsigned int nr_keys)124*4882a593Smuzhiyun static inline struct bkey *bkey_idx(const struct bkey *k, unsigned int nr_keys)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun 	__u64 *d = (void *) k;
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	return (struct bkey *) (d + nr_keys);
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun /* Enough for a key with 6 pointers */
131*4882a593Smuzhiyun #define BKEY_PAD		8
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun #define BKEY_PADDED(key)					\
134*4882a593Smuzhiyun 	union { struct bkey key; __u64 key ## _pad[BKEY_PAD]; }
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun /* Superblock */
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun /* Version 0: Cache device
139*4882a593Smuzhiyun  * Version 1: Backing device
140*4882a593Smuzhiyun  * Version 2: Seed pointer into btree node checksum
141*4882a593Smuzhiyun  * Version 3: Cache device with new UUID format
142*4882a593Smuzhiyun  * Version 4: Backing device with data offset
143*4882a593Smuzhiyun  */
144*4882a593Smuzhiyun #define BCACHE_SB_VERSION_CDEV			0
145*4882a593Smuzhiyun #define BCACHE_SB_VERSION_BDEV			1
146*4882a593Smuzhiyun #define BCACHE_SB_VERSION_CDEV_WITH_UUID	3
147*4882a593Smuzhiyun #define BCACHE_SB_VERSION_BDEV_WITH_OFFSET	4
148*4882a593Smuzhiyun #define BCACHE_SB_VERSION_CDEV_WITH_FEATURES	5
149*4882a593Smuzhiyun #define BCACHE_SB_VERSION_BDEV_WITH_FEATURES	6
150*4882a593Smuzhiyun #define BCACHE_SB_MAX_VERSION			6
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun #define SB_SECTOR			8
153*4882a593Smuzhiyun #define SB_OFFSET			(SB_SECTOR << SECTOR_SHIFT)
154*4882a593Smuzhiyun #define SB_SIZE				4096
155*4882a593Smuzhiyun #define SB_LABEL_SIZE			32
156*4882a593Smuzhiyun #define SB_JOURNAL_BUCKETS		256U
157*4882a593Smuzhiyun /* SB_JOURNAL_BUCKETS must be divisible by BITS_PER_LONG */
158*4882a593Smuzhiyun #define MAX_CACHES_PER_SET		8
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun #define BDEV_DATA_START_DEFAULT		16	/* sectors */
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun struct cache_sb_disk {
163*4882a593Smuzhiyun 	__le64			csum;
164*4882a593Smuzhiyun 	__le64			offset;	/* sector where this sb was written */
165*4882a593Smuzhiyun 	__le64			version;
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	__u8			magic[16];
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	__u8			uuid[16];
170*4882a593Smuzhiyun 	union {
171*4882a593Smuzhiyun 		__u8		set_uuid[16];
172*4882a593Smuzhiyun 		__le64		set_magic;
173*4882a593Smuzhiyun 	};
174*4882a593Smuzhiyun 	__u8			label[SB_LABEL_SIZE];
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	__le64			flags;
177*4882a593Smuzhiyun 	__le64			seq;
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	__le64			feature_compat;
180*4882a593Smuzhiyun 	__le64			feature_incompat;
181*4882a593Smuzhiyun 	__le64			feature_ro_compat;
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	__le64			pad[5];
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	union {
186*4882a593Smuzhiyun 	struct {
187*4882a593Smuzhiyun 		/* Cache devices */
188*4882a593Smuzhiyun 		__le64		nbuckets;	/* device size */
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 		__le16		block_size;	/* sectors */
191*4882a593Smuzhiyun 		__le16		bucket_size;	/* sectors */
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 		__le16		nr_in_set;
194*4882a593Smuzhiyun 		__le16		nr_this_dev;
195*4882a593Smuzhiyun 	};
196*4882a593Smuzhiyun 	struct {
197*4882a593Smuzhiyun 		/* Backing devices */
198*4882a593Smuzhiyun 		__le64		data_offset;
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 		/*
201*4882a593Smuzhiyun 		 * block_size from the cache device section is still used by
202*4882a593Smuzhiyun 		 * backing devices, so don't add anything here until we fix
203*4882a593Smuzhiyun 		 * things to not need it for backing devices anymore
204*4882a593Smuzhiyun 		 */
205*4882a593Smuzhiyun 	};
206*4882a593Smuzhiyun 	};
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	__le32			last_mount;	/* time overflow in y2106 */
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	__le16			first_bucket;
211*4882a593Smuzhiyun 	union {
212*4882a593Smuzhiyun 		__le16		njournal_buckets;
213*4882a593Smuzhiyun 		__le16		keys;
214*4882a593Smuzhiyun 	};
215*4882a593Smuzhiyun 	__le64			d[SB_JOURNAL_BUCKETS];	/* journal buckets */
216*4882a593Smuzhiyun 	__le16			obso_bucket_size_hi;	/* obsoleted */
217*4882a593Smuzhiyun };
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun /*
220*4882a593Smuzhiyun  * This is for in-memory bcache super block.
221*4882a593Smuzhiyun  * NOTE: cache_sb is NOT exactly mapping to cache_sb_disk, the member
222*4882a593Smuzhiyun  *       size, ordering and even whole struct size may be different
223*4882a593Smuzhiyun  *       from cache_sb_disk.
224*4882a593Smuzhiyun  */
225*4882a593Smuzhiyun struct cache_sb {
226*4882a593Smuzhiyun 	__u64			offset;	/* sector where this sb was written */
227*4882a593Smuzhiyun 	__u64			version;
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	__u8			magic[16];
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	__u8			uuid[16];
232*4882a593Smuzhiyun 	union {
233*4882a593Smuzhiyun 		__u8		set_uuid[16];
234*4882a593Smuzhiyun 		__u64		set_magic;
235*4882a593Smuzhiyun 	};
236*4882a593Smuzhiyun 	__u8			label[SB_LABEL_SIZE];
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	__u64			flags;
239*4882a593Smuzhiyun 	__u64			seq;
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	__u64			feature_compat;
242*4882a593Smuzhiyun 	__u64			feature_incompat;
243*4882a593Smuzhiyun 	__u64			feature_ro_compat;
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	union {
246*4882a593Smuzhiyun 	struct {
247*4882a593Smuzhiyun 		/* Cache devices */
248*4882a593Smuzhiyun 		__u64		nbuckets;	/* device size */
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 		__u16		block_size;	/* sectors */
251*4882a593Smuzhiyun 		__u16		nr_in_set;
252*4882a593Smuzhiyun 		__u16		nr_this_dev;
253*4882a593Smuzhiyun 		__u32		bucket_size;	/* sectors */
254*4882a593Smuzhiyun 	};
255*4882a593Smuzhiyun 	struct {
256*4882a593Smuzhiyun 		/* Backing devices */
257*4882a593Smuzhiyun 		__u64		data_offset;
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 		/*
260*4882a593Smuzhiyun 		 * block_size from the cache device section is still used by
261*4882a593Smuzhiyun 		 * backing devices, so don't add anything here until we fix
262*4882a593Smuzhiyun 		 * things to not need it for backing devices anymore
263*4882a593Smuzhiyun 		 */
264*4882a593Smuzhiyun 	};
265*4882a593Smuzhiyun 	};
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	__u32			last_mount;	/* time overflow in y2106 */
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	__u16			first_bucket;
270*4882a593Smuzhiyun 	union {
271*4882a593Smuzhiyun 		__u16		njournal_buckets;
272*4882a593Smuzhiyun 		__u16		keys;
273*4882a593Smuzhiyun 	};
274*4882a593Smuzhiyun 	__u64			d[SB_JOURNAL_BUCKETS];	/* journal buckets */
275*4882a593Smuzhiyun };
276*4882a593Smuzhiyun 
SB_IS_BDEV(const struct cache_sb * sb)277*4882a593Smuzhiyun static inline _Bool SB_IS_BDEV(const struct cache_sb *sb)
278*4882a593Smuzhiyun {
279*4882a593Smuzhiyun 	return sb->version == BCACHE_SB_VERSION_BDEV
280*4882a593Smuzhiyun 		|| sb->version == BCACHE_SB_VERSION_BDEV_WITH_OFFSET
281*4882a593Smuzhiyun 		|| sb->version == BCACHE_SB_VERSION_BDEV_WITH_FEATURES;
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun BITMASK(CACHE_SYNC,			struct cache_sb, flags, 0, 1);
285*4882a593Smuzhiyun BITMASK(CACHE_DISCARD,			struct cache_sb, flags, 1, 1);
286*4882a593Smuzhiyun BITMASK(CACHE_REPLACEMENT,		struct cache_sb, flags, 2, 3);
287*4882a593Smuzhiyun #define CACHE_REPLACEMENT_LRU		0U
288*4882a593Smuzhiyun #define CACHE_REPLACEMENT_FIFO		1U
289*4882a593Smuzhiyun #define CACHE_REPLACEMENT_RANDOM	2U
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun BITMASK(BDEV_CACHE_MODE,		struct cache_sb, flags, 0, 4);
292*4882a593Smuzhiyun #define CACHE_MODE_WRITETHROUGH		0U
293*4882a593Smuzhiyun #define CACHE_MODE_WRITEBACK		1U
294*4882a593Smuzhiyun #define CACHE_MODE_WRITEAROUND		2U
295*4882a593Smuzhiyun #define CACHE_MODE_NONE			3U
296*4882a593Smuzhiyun BITMASK(BDEV_STATE,			struct cache_sb, flags, 61, 2);
297*4882a593Smuzhiyun #define BDEV_STATE_NONE			0U
298*4882a593Smuzhiyun #define BDEV_STATE_CLEAN		1U
299*4882a593Smuzhiyun #define BDEV_STATE_DIRTY		2U
300*4882a593Smuzhiyun #define BDEV_STATE_STALE		3U
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun /*
303*4882a593Smuzhiyun  * Magic numbers
304*4882a593Smuzhiyun  *
305*4882a593Smuzhiyun  * The various other data structures have their own magic numbers, which are
306*4882a593Smuzhiyun  * xored with the first part of the cache set's UUID
307*4882a593Smuzhiyun  */
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun #define JSET_MAGIC			0x245235c1a3625032ULL
310*4882a593Smuzhiyun #define PSET_MAGIC			0x6750e15f87337f91ULL
311*4882a593Smuzhiyun #define BSET_MAGIC			0x90135c78b99e07f5ULL
312*4882a593Smuzhiyun 
jset_magic(struct cache_sb * sb)313*4882a593Smuzhiyun static inline __u64 jset_magic(struct cache_sb *sb)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun 	return sb->set_magic ^ JSET_MAGIC;
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun 
pset_magic(struct cache_sb * sb)318*4882a593Smuzhiyun static inline __u64 pset_magic(struct cache_sb *sb)
319*4882a593Smuzhiyun {
320*4882a593Smuzhiyun 	return sb->set_magic ^ PSET_MAGIC;
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun 
bset_magic(struct cache_sb * sb)323*4882a593Smuzhiyun static inline __u64 bset_magic(struct cache_sb *sb)
324*4882a593Smuzhiyun {
325*4882a593Smuzhiyun 	return sb->set_magic ^ BSET_MAGIC;
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun /*
329*4882a593Smuzhiyun  * Journal
330*4882a593Smuzhiyun  *
331*4882a593Smuzhiyun  * On disk format for a journal entry:
332*4882a593Smuzhiyun  * seq is monotonically increasing; every journal entry has its own unique
333*4882a593Smuzhiyun  * sequence number.
334*4882a593Smuzhiyun  *
335*4882a593Smuzhiyun  * last_seq is the oldest journal entry that still has keys the btree hasn't
336*4882a593Smuzhiyun  * flushed to disk yet.
337*4882a593Smuzhiyun  *
338*4882a593Smuzhiyun  * version is for on disk format changes.
339*4882a593Smuzhiyun  */
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun #define BCACHE_JSET_VERSION_UUIDv1	1
342*4882a593Smuzhiyun #define BCACHE_JSET_VERSION_UUID	1	/* Always latest UUID format */
343*4882a593Smuzhiyun #define BCACHE_JSET_VERSION		1
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun struct jset {
346*4882a593Smuzhiyun 	__u64			csum;
347*4882a593Smuzhiyun 	__u64			magic;
348*4882a593Smuzhiyun 	__u64			seq;
349*4882a593Smuzhiyun 	__u32			version;
350*4882a593Smuzhiyun 	__u32			keys;
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	__u64			last_seq;
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 	BKEY_PADDED(uuid_bucket);
355*4882a593Smuzhiyun 	BKEY_PADDED(btree_root);
356*4882a593Smuzhiyun 	__u16			btree_level;
357*4882a593Smuzhiyun 	__u16			pad[3];
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	__u64			prio_bucket[MAX_CACHES_PER_SET];
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 	union {
362*4882a593Smuzhiyun 		struct bkey	start[0];
363*4882a593Smuzhiyun 		__u64		d[0];
364*4882a593Smuzhiyun 	};
365*4882a593Smuzhiyun };
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun /* Bucket prios/gens */
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun struct prio_set {
370*4882a593Smuzhiyun 	__u64			csum;
371*4882a593Smuzhiyun 	__u64			magic;
372*4882a593Smuzhiyun 	__u64			seq;
373*4882a593Smuzhiyun 	__u32			version;
374*4882a593Smuzhiyun 	__u32			pad;
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 	__u64			next_bucket;
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	struct bucket_disk {
379*4882a593Smuzhiyun 		__u16		prio;
380*4882a593Smuzhiyun 		__u8		gen;
381*4882a593Smuzhiyun 	} __attribute((packed)) data[];
382*4882a593Smuzhiyun };
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun /* UUIDS - per backing device/flash only volume metadata */
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun struct uuid_entry {
387*4882a593Smuzhiyun 	union {
388*4882a593Smuzhiyun 		struct {
389*4882a593Smuzhiyun 			__u8	uuid[16];
390*4882a593Smuzhiyun 			__u8	label[32];
391*4882a593Smuzhiyun 			__u32	first_reg; /* time overflow in y2106 */
392*4882a593Smuzhiyun 			__u32	last_reg;
393*4882a593Smuzhiyun 			__u32	invalidated;
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 			__u32	flags;
396*4882a593Smuzhiyun 			/* Size of flash only volumes */
397*4882a593Smuzhiyun 			__u64	sectors;
398*4882a593Smuzhiyun 		};
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 		__u8		pad[128];
401*4882a593Smuzhiyun 	};
402*4882a593Smuzhiyun };
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun BITMASK(UUID_FLASH_ONLY,	struct uuid_entry, flags, 0, 1);
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun /* Btree nodes */
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun /* Version 1: Seed pointer into btree node checksum
409*4882a593Smuzhiyun  */
410*4882a593Smuzhiyun #define BCACHE_BSET_CSUM		1
411*4882a593Smuzhiyun #define BCACHE_BSET_VERSION		1
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun /*
414*4882a593Smuzhiyun  * Btree nodes
415*4882a593Smuzhiyun  *
416*4882a593Smuzhiyun  * On disk a btree node is a list/log of these; within each set the keys are
417*4882a593Smuzhiyun  * sorted
418*4882a593Smuzhiyun  */
419*4882a593Smuzhiyun struct bset {
420*4882a593Smuzhiyun 	__u64			csum;
421*4882a593Smuzhiyun 	__u64			magic;
422*4882a593Smuzhiyun 	__u64			seq;
423*4882a593Smuzhiyun 	__u32			version;
424*4882a593Smuzhiyun 	__u32			keys;
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun 	union {
427*4882a593Smuzhiyun 		struct bkey	start[0];
428*4882a593Smuzhiyun 		__u64		d[0];
429*4882a593Smuzhiyun 	};
430*4882a593Smuzhiyun };
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun /* OBSOLETE */
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun /* UUIDS - per backing device/flash only volume metadata */
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun struct uuid_entry_v0 {
437*4882a593Smuzhiyun 	__u8		uuid[16];
438*4882a593Smuzhiyun 	__u8		label[32];
439*4882a593Smuzhiyun 	__u32		first_reg;
440*4882a593Smuzhiyun 	__u32		last_reg;
441*4882a593Smuzhiyun 	__u32		invalidated;
442*4882a593Smuzhiyun 	__u32		pad;
443*4882a593Smuzhiyun };
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun #endif /* _LINUX_BCACHE_H */
446