1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun md_p.h : physical layout of Linux RAID devices
4*4882a593Smuzhiyun Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun This program is free software; you can redistribute it and/or modify
7*4882a593Smuzhiyun it under the terms of the GNU General Public License as published by
8*4882a593Smuzhiyun the Free Software Foundation; either version 2, or (at your option)
9*4882a593Smuzhiyun any later version.
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun You should have received a copy of the GNU General Public License
12*4882a593Smuzhiyun (for example /usr/src/linux/COPYING); if not, write to the Free
13*4882a593Smuzhiyun Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
14*4882a593Smuzhiyun */
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #ifndef _MD_P_H
17*4882a593Smuzhiyun #define _MD_P_H
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #include <linux/types.h>
20*4882a593Smuzhiyun #include <asm/byteorder.h>
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun /*
23*4882a593Smuzhiyun * RAID superblock.
24*4882a593Smuzhiyun *
25*4882a593Smuzhiyun * The RAID superblock maintains some statistics on each RAID configuration.
26*4882a593Smuzhiyun * Each real device in the RAID set contains it near the end of the device.
27*4882a593Smuzhiyun * Some of the ideas are copied from the ext2fs implementation.
28*4882a593Smuzhiyun *
29*4882a593Smuzhiyun * We currently use 4096 bytes as follows:
30*4882a593Smuzhiyun *
31*4882a593Smuzhiyun * word offset function
32*4882a593Smuzhiyun *
33*4882a593Smuzhiyun * 0 - 31 Constant generic RAID device information.
34*4882a593Smuzhiyun * 32 - 63 Generic state information.
35*4882a593Smuzhiyun * 64 - 127 Personality specific information.
36*4882a593Smuzhiyun * 128 - 511 12 32-words descriptors of the disks in the raid set.
37*4882a593Smuzhiyun * 512 - 911 Reserved.
38*4882a593Smuzhiyun * 912 - 1023 Disk specific descriptor.
39*4882a593Smuzhiyun */
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun /*
42*4882a593Smuzhiyun * If x is the real device size in bytes, we return an apparent size of:
43*4882a593Smuzhiyun *
44*4882a593Smuzhiyun * y = (x & ~(MD_RESERVED_BYTES - 1)) - MD_RESERVED_BYTES
45*4882a593Smuzhiyun *
46*4882a593Smuzhiyun * and place the 4kB superblock at offset y.
47*4882a593Smuzhiyun */
48*4882a593Smuzhiyun #define MD_RESERVED_BYTES (64 * 1024)
49*4882a593Smuzhiyun #define MD_RESERVED_SECTORS (MD_RESERVED_BYTES / 512)
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun #define MD_NEW_SIZE_SECTORS(x) ((x & ~(MD_RESERVED_SECTORS - 1)) - MD_RESERVED_SECTORS)
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun #define MD_SB_BYTES 4096
54*4882a593Smuzhiyun #define MD_SB_WORDS (MD_SB_BYTES / 4)
55*4882a593Smuzhiyun #define MD_SB_SECTORS (MD_SB_BYTES / 512)
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun /*
58*4882a593Smuzhiyun * The following are counted in 32-bit words
59*4882a593Smuzhiyun */
60*4882a593Smuzhiyun #define MD_SB_GENERIC_OFFSET 0
61*4882a593Smuzhiyun #define MD_SB_PERSONALITY_OFFSET 64
62*4882a593Smuzhiyun #define MD_SB_DISKS_OFFSET 128
63*4882a593Smuzhiyun #define MD_SB_DESCRIPTOR_OFFSET 992
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun #define MD_SB_GENERIC_CONSTANT_WORDS 32
66*4882a593Smuzhiyun #define MD_SB_GENERIC_STATE_WORDS 32
67*4882a593Smuzhiyun #define MD_SB_GENERIC_WORDS (MD_SB_GENERIC_CONSTANT_WORDS + MD_SB_GENERIC_STATE_WORDS)
68*4882a593Smuzhiyun #define MD_SB_PERSONALITY_WORDS 64
69*4882a593Smuzhiyun #define MD_SB_DESCRIPTOR_WORDS 32
70*4882a593Smuzhiyun #define MD_SB_DISKS 27
71*4882a593Smuzhiyun #define MD_SB_DISKS_WORDS (MD_SB_DISKS*MD_SB_DESCRIPTOR_WORDS)
72*4882a593Smuzhiyun #define MD_SB_RESERVED_WORDS (1024 - MD_SB_GENERIC_WORDS - MD_SB_PERSONALITY_WORDS - MD_SB_DISKS_WORDS - MD_SB_DESCRIPTOR_WORDS)
73*4882a593Smuzhiyun #define MD_SB_EQUAL_WORDS (MD_SB_GENERIC_WORDS + MD_SB_PERSONALITY_WORDS + MD_SB_DISKS_WORDS)
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun /*
76*4882a593Smuzhiyun * Device "operational" state bits
77*4882a593Smuzhiyun */
78*4882a593Smuzhiyun #define MD_DISK_FAULTY 0 /* disk is faulty / operational */
79*4882a593Smuzhiyun #define MD_DISK_ACTIVE 1 /* disk is running or spare disk */
80*4882a593Smuzhiyun #define MD_DISK_SYNC 2 /* disk is in sync with the raid set */
81*4882a593Smuzhiyun #define MD_DISK_REMOVED 3 /* disk is in sync with the raid set */
82*4882a593Smuzhiyun #define MD_DISK_CLUSTER_ADD 4 /* Initiate a disk add across the cluster
83*4882a593Smuzhiyun * For clustered enviroments only.
84*4882a593Smuzhiyun */
85*4882a593Smuzhiyun #define MD_DISK_CANDIDATE 5 /* disk is added as spare (local) until confirmed
86*4882a593Smuzhiyun * For clustered enviroments only.
87*4882a593Smuzhiyun */
88*4882a593Smuzhiyun #define MD_DISK_FAILFAST 10 /* Send REQ_FAILFAST if there are multiple
89*4882a593Smuzhiyun * devices available - and don't try to
90*4882a593Smuzhiyun * correct read errors.
91*4882a593Smuzhiyun */
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun #define MD_DISK_WRITEMOSTLY 9 /* disk is "write-mostly" is RAID1 config.
94*4882a593Smuzhiyun * read requests will only be sent here in
95*4882a593Smuzhiyun * dire need
96*4882a593Smuzhiyun */
97*4882a593Smuzhiyun #define MD_DISK_JOURNAL 18 /* disk is used as the write journal in RAID-5/6 */
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun #define MD_DISK_ROLE_SPARE 0xffff
100*4882a593Smuzhiyun #define MD_DISK_ROLE_FAULTY 0xfffe
101*4882a593Smuzhiyun #define MD_DISK_ROLE_JOURNAL 0xfffd
102*4882a593Smuzhiyun #define MD_DISK_ROLE_MAX 0xff00 /* max value of regular disk role */
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun typedef struct mdp_device_descriptor_s {
105*4882a593Smuzhiyun __u32 number; /* 0 Device number in the entire set */
106*4882a593Smuzhiyun __u32 major; /* 1 Device major number */
107*4882a593Smuzhiyun __u32 minor; /* 2 Device minor number */
108*4882a593Smuzhiyun __u32 raid_disk; /* 3 The role of the device in the raid set */
109*4882a593Smuzhiyun __u32 state; /* 4 Operational state */
110*4882a593Smuzhiyun __u32 reserved[MD_SB_DESCRIPTOR_WORDS - 5];
111*4882a593Smuzhiyun } mdp_disk_t;
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun #define MD_SB_MAGIC 0xa92b4efc
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun /*
116*4882a593Smuzhiyun * Superblock state bits
117*4882a593Smuzhiyun */
118*4882a593Smuzhiyun #define MD_SB_CLEAN 0
119*4882a593Smuzhiyun #define MD_SB_ERRORS 1
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun #define MD_SB_CLUSTERED 5 /* MD is clustered */
122*4882a593Smuzhiyun #define MD_SB_BITMAP_PRESENT 8 /* bitmap may be present nearby */
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun /*
125*4882a593Smuzhiyun * Notes:
126*4882a593Smuzhiyun * - if an array is being reshaped (restriped) in order to change
127*4882a593Smuzhiyun * the number of active devices in the array, 'raid_disks' will be
128*4882a593Smuzhiyun * the larger of the old and new numbers. 'delta_disks' will
129*4882a593Smuzhiyun * be the "new - old". So if +ve, raid_disks is the new value, and
130*4882a593Smuzhiyun * "raid_disks-delta_disks" is the old. If -ve, raid_disks is the
131*4882a593Smuzhiyun * old value and "raid_disks+delta_disks" is the new (smaller) value.
132*4882a593Smuzhiyun */
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun typedef struct mdp_superblock_s {
136*4882a593Smuzhiyun /*
137*4882a593Smuzhiyun * Constant generic information
138*4882a593Smuzhiyun */
139*4882a593Smuzhiyun __u32 md_magic; /* 0 MD identifier */
140*4882a593Smuzhiyun __u32 major_version; /* 1 major version to which the set conforms */
141*4882a593Smuzhiyun __u32 minor_version; /* 2 minor version ... */
142*4882a593Smuzhiyun __u32 patch_version; /* 3 patchlevel version ... */
143*4882a593Smuzhiyun __u32 gvalid_words; /* 4 Number of used words in this section */
144*4882a593Smuzhiyun __u32 set_uuid0; /* 5 Raid set identifier */
145*4882a593Smuzhiyun __u32 ctime; /* 6 Creation time */
146*4882a593Smuzhiyun __u32 level; /* 7 Raid personality */
147*4882a593Smuzhiyun __u32 size; /* 8 Apparent size of each individual disk */
148*4882a593Smuzhiyun __u32 nr_disks; /* 9 total disks in the raid set */
149*4882a593Smuzhiyun __u32 raid_disks; /* 10 disks in a fully functional raid set */
150*4882a593Smuzhiyun __u32 md_minor; /* 11 preferred MD minor device number */
151*4882a593Smuzhiyun __u32 not_persistent; /* 12 does it have a persistent superblock */
152*4882a593Smuzhiyun __u32 set_uuid1; /* 13 Raid set identifier #2 */
153*4882a593Smuzhiyun __u32 set_uuid2; /* 14 Raid set identifier #3 */
154*4882a593Smuzhiyun __u32 set_uuid3; /* 15 Raid set identifier #4 */
155*4882a593Smuzhiyun __u32 gstate_creserved[MD_SB_GENERIC_CONSTANT_WORDS - 16];
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun /*
158*4882a593Smuzhiyun * Generic state information
159*4882a593Smuzhiyun */
160*4882a593Smuzhiyun __u32 utime; /* 0 Superblock update time */
161*4882a593Smuzhiyun __u32 state; /* 1 State bits (clean, ...) */
162*4882a593Smuzhiyun __u32 active_disks; /* 2 Number of currently active disks */
163*4882a593Smuzhiyun __u32 working_disks; /* 3 Number of working disks */
164*4882a593Smuzhiyun __u32 failed_disks; /* 4 Number of failed disks */
165*4882a593Smuzhiyun __u32 spare_disks; /* 5 Number of spare disks */
166*4882a593Smuzhiyun __u32 sb_csum; /* 6 checksum of the whole superblock */
167*4882a593Smuzhiyun #if defined(__BYTE_ORDER) ? __BYTE_ORDER == __BIG_ENDIAN : defined(__BIG_ENDIAN)
168*4882a593Smuzhiyun __u32 events_hi; /* 7 high-order of superblock update count */
169*4882a593Smuzhiyun __u32 events_lo; /* 8 low-order of superblock update count */
170*4882a593Smuzhiyun __u32 cp_events_hi; /* 9 high-order of checkpoint update count */
171*4882a593Smuzhiyun __u32 cp_events_lo; /* 10 low-order of checkpoint update count */
172*4882a593Smuzhiyun #elif defined(__BYTE_ORDER) ? __BYTE_ORDER == __LITTLE_ENDIAN : defined(__LITTLE_ENDIAN)
173*4882a593Smuzhiyun __u32 events_lo; /* 7 low-order of superblock update count */
174*4882a593Smuzhiyun __u32 events_hi; /* 8 high-order of superblock update count */
175*4882a593Smuzhiyun __u32 cp_events_lo; /* 9 low-order of checkpoint update count */
176*4882a593Smuzhiyun __u32 cp_events_hi; /* 10 high-order of checkpoint update count */
177*4882a593Smuzhiyun #else
178*4882a593Smuzhiyun #error unspecified endianness
179*4882a593Smuzhiyun #endif
180*4882a593Smuzhiyun __u32 recovery_cp; /* 11 recovery checkpoint sector count */
181*4882a593Smuzhiyun /* There are only valid for minor_version > 90 */
182*4882a593Smuzhiyun __u64 reshape_position; /* 12,13 next address in array-space for reshape */
183*4882a593Smuzhiyun __u32 new_level; /* 14 new level we are reshaping to */
184*4882a593Smuzhiyun __u32 delta_disks; /* 15 change in number of raid_disks */
185*4882a593Smuzhiyun __u32 new_layout; /* 16 new layout */
186*4882a593Smuzhiyun __u32 new_chunk; /* 17 new chunk size (bytes) */
187*4882a593Smuzhiyun __u32 gstate_sreserved[MD_SB_GENERIC_STATE_WORDS - 18];
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun /*
190*4882a593Smuzhiyun * Personality information
191*4882a593Smuzhiyun */
192*4882a593Smuzhiyun __u32 layout; /* 0 the array's physical layout */
193*4882a593Smuzhiyun __u32 chunk_size; /* 1 chunk size in bytes */
194*4882a593Smuzhiyun __u32 root_pv; /* 2 LV root PV */
195*4882a593Smuzhiyun __u32 root_block; /* 3 LV root block */
196*4882a593Smuzhiyun __u32 pstate_reserved[MD_SB_PERSONALITY_WORDS - 4];
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun /*
199*4882a593Smuzhiyun * Disks information
200*4882a593Smuzhiyun */
201*4882a593Smuzhiyun mdp_disk_t disks[MD_SB_DISKS];
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun /*
204*4882a593Smuzhiyun * Reserved
205*4882a593Smuzhiyun */
206*4882a593Smuzhiyun __u32 reserved[MD_SB_RESERVED_WORDS];
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun /*
209*4882a593Smuzhiyun * Active descriptor
210*4882a593Smuzhiyun */
211*4882a593Smuzhiyun mdp_disk_t this_disk;
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun } mdp_super_t;
214*4882a593Smuzhiyun
md_event(mdp_super_t * sb)215*4882a593Smuzhiyun static inline __u64 md_event(mdp_super_t *sb) {
216*4882a593Smuzhiyun __u64 ev = sb->events_hi;
217*4882a593Smuzhiyun return (ev<<32)| sb->events_lo;
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun #define MD_SUPERBLOCK_1_TIME_SEC_MASK ((1ULL<<40) - 1)
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun /*
223*4882a593Smuzhiyun * The version-1 superblock :
224*4882a593Smuzhiyun * All numeric fields are little-endian.
225*4882a593Smuzhiyun *
226*4882a593Smuzhiyun * total size: 256 bytes plus 2 per device.
227*4882a593Smuzhiyun * 1K allows 384 devices.
228*4882a593Smuzhiyun */
229*4882a593Smuzhiyun struct mdp_superblock_1 {
230*4882a593Smuzhiyun /* constant array information - 128 bytes */
231*4882a593Smuzhiyun __le32 magic; /* MD_SB_MAGIC: 0xa92b4efc - little endian */
232*4882a593Smuzhiyun __le32 major_version; /* 1 */
233*4882a593Smuzhiyun __le32 feature_map; /* bit 0 set if 'bitmap_offset' is meaningful */
234*4882a593Smuzhiyun __le32 pad0; /* always set to 0 when writing */
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun __u8 set_uuid[16]; /* user-space generated. */
237*4882a593Smuzhiyun char set_name[32]; /* set and interpreted by user-space */
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun __le64 ctime; /* lo 40 bits are seconds, top 24 are microseconds or 0*/
240*4882a593Smuzhiyun __le32 level; /* -4 (multipath), -1 (linear), 0,1,4,5 */
241*4882a593Smuzhiyun __le32 layout; /* only for raid5 and raid10 currently */
242*4882a593Smuzhiyun __le64 size; /* used size of component devices, in 512byte sectors */
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun __le32 chunksize; /* in 512byte sectors */
245*4882a593Smuzhiyun __le32 raid_disks;
246*4882a593Smuzhiyun union {
247*4882a593Smuzhiyun __le32 bitmap_offset; /* sectors after start of superblock that bitmap starts
248*4882a593Smuzhiyun * NOTE: signed, so bitmap can be before superblock
249*4882a593Smuzhiyun * only meaningful of feature_map[0] is set.
250*4882a593Smuzhiyun */
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun /* only meaningful when feature_map[MD_FEATURE_PPL] is set */
253*4882a593Smuzhiyun struct {
254*4882a593Smuzhiyun __le16 offset; /* sectors from start of superblock that ppl starts (signed) */
255*4882a593Smuzhiyun __le16 size; /* ppl size in sectors */
256*4882a593Smuzhiyun } ppl;
257*4882a593Smuzhiyun };
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun /* These are only valid with feature bit '4' */
260*4882a593Smuzhiyun __le32 new_level; /* new level we are reshaping to */
261*4882a593Smuzhiyun __le64 reshape_position; /* next address in array-space for reshape */
262*4882a593Smuzhiyun __le32 delta_disks; /* change in number of raid_disks */
263*4882a593Smuzhiyun __le32 new_layout; /* new layout */
264*4882a593Smuzhiyun __le32 new_chunk; /* new chunk size (512byte sectors) */
265*4882a593Smuzhiyun __le32 new_offset; /* signed number to add to data_offset in new
266*4882a593Smuzhiyun * layout. 0 == no-change. This can be
267*4882a593Smuzhiyun * different on each device in the array.
268*4882a593Smuzhiyun */
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun /* constant this-device information - 64 bytes */
271*4882a593Smuzhiyun __le64 data_offset; /* sector start of data, often 0 */
272*4882a593Smuzhiyun __le64 data_size; /* sectors in this device that can be used for data */
273*4882a593Smuzhiyun __le64 super_offset; /* sector start of this superblock */
274*4882a593Smuzhiyun union {
275*4882a593Smuzhiyun __le64 recovery_offset;/* sectors before this offset (from data_offset) have been recovered */
276*4882a593Smuzhiyun __le64 journal_tail;/* journal tail of journal device (from data_offset) */
277*4882a593Smuzhiyun };
278*4882a593Smuzhiyun __le32 dev_number; /* permanent identifier of this device - not role in raid */
279*4882a593Smuzhiyun __le32 cnt_corrected_read; /* number of read errors that were corrected by re-writing */
280*4882a593Smuzhiyun __u8 device_uuid[16]; /* user-space setable, ignored by kernel */
281*4882a593Smuzhiyun __u8 devflags; /* per-device flags. Only two defined...*/
282*4882a593Smuzhiyun #define WriteMostly1 1 /* mask for writemostly flag in above */
283*4882a593Smuzhiyun #define FailFast1 2 /* Should avoid retries and fixups and just fail */
284*4882a593Smuzhiyun /* Bad block log. If there are any bad blocks the feature flag is set.
285*4882a593Smuzhiyun * If offset and size are non-zero, that space is reserved and available
286*4882a593Smuzhiyun */
287*4882a593Smuzhiyun __u8 bblog_shift; /* shift from sectors to block size */
288*4882a593Smuzhiyun __le16 bblog_size; /* number of sectors reserved for list */
289*4882a593Smuzhiyun __le32 bblog_offset; /* sector offset from superblock to bblog,
290*4882a593Smuzhiyun * signed - not unsigned */
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun /* array state information - 64 bytes */
293*4882a593Smuzhiyun __le64 utime; /* 40 bits second, 24 bits microseconds */
294*4882a593Smuzhiyun __le64 events; /* incremented when superblock updated */
295*4882a593Smuzhiyun __le64 resync_offset; /* data before this offset (from data_offset) known to be in sync */
296*4882a593Smuzhiyun __le32 sb_csum; /* checksum up to devs[max_dev] */
297*4882a593Smuzhiyun __le32 max_dev; /* size of devs[] array to consider */
298*4882a593Smuzhiyun __u8 pad3[64-32]; /* set to 0 when writing */
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun /* device state information. Indexed by dev_number.
301*4882a593Smuzhiyun * 2 bytes per device
302*4882a593Smuzhiyun * Note there are no per-device state flags. State information is rolled
303*4882a593Smuzhiyun * into the 'roles' value. If a device is spare or faulty, then it doesn't
304*4882a593Smuzhiyun * have a meaningful role.
305*4882a593Smuzhiyun */
306*4882a593Smuzhiyun __le16 dev_roles[0]; /* role in array, or 0xffff for a spare, or 0xfffe for faulty */
307*4882a593Smuzhiyun };
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun /* feature_map bits */
310*4882a593Smuzhiyun #define MD_FEATURE_BITMAP_OFFSET 1
311*4882a593Smuzhiyun #define MD_FEATURE_RECOVERY_OFFSET 2 /* recovery_offset is present and
312*4882a593Smuzhiyun * must be honoured
313*4882a593Smuzhiyun */
314*4882a593Smuzhiyun #define MD_FEATURE_RESHAPE_ACTIVE 4
315*4882a593Smuzhiyun #define MD_FEATURE_BAD_BLOCKS 8 /* badblock list is not empty */
316*4882a593Smuzhiyun #define MD_FEATURE_REPLACEMENT 16 /* This device is replacing an
317*4882a593Smuzhiyun * active device with same 'role'.
318*4882a593Smuzhiyun * 'recovery_offset' is also set.
319*4882a593Smuzhiyun */
320*4882a593Smuzhiyun #define MD_FEATURE_RESHAPE_BACKWARDS 32 /* Reshape doesn't change number
321*4882a593Smuzhiyun * of devices, but is going
322*4882a593Smuzhiyun * backwards anyway.
323*4882a593Smuzhiyun */
324*4882a593Smuzhiyun #define MD_FEATURE_NEW_OFFSET 64 /* new_offset must be honoured */
325*4882a593Smuzhiyun #define MD_FEATURE_RECOVERY_BITMAP 128 /* recovery that is happening
326*4882a593Smuzhiyun * is guided by bitmap.
327*4882a593Smuzhiyun */
328*4882a593Smuzhiyun #define MD_FEATURE_CLUSTERED 256 /* clustered MD */
329*4882a593Smuzhiyun #define MD_FEATURE_JOURNAL 512 /* support write cache */
330*4882a593Smuzhiyun #define MD_FEATURE_PPL 1024 /* support PPL */
331*4882a593Smuzhiyun #define MD_FEATURE_MULTIPLE_PPLS 2048 /* support for multiple PPLs */
332*4882a593Smuzhiyun #define MD_FEATURE_RAID0_LAYOUT 4096 /* layout is meaningful for RAID0 */
333*4882a593Smuzhiyun #define MD_FEATURE_ALL (MD_FEATURE_BITMAP_OFFSET \
334*4882a593Smuzhiyun |MD_FEATURE_RECOVERY_OFFSET \
335*4882a593Smuzhiyun |MD_FEATURE_RESHAPE_ACTIVE \
336*4882a593Smuzhiyun |MD_FEATURE_BAD_BLOCKS \
337*4882a593Smuzhiyun |MD_FEATURE_REPLACEMENT \
338*4882a593Smuzhiyun |MD_FEATURE_RESHAPE_BACKWARDS \
339*4882a593Smuzhiyun |MD_FEATURE_NEW_OFFSET \
340*4882a593Smuzhiyun |MD_FEATURE_RECOVERY_BITMAP \
341*4882a593Smuzhiyun |MD_FEATURE_CLUSTERED \
342*4882a593Smuzhiyun |MD_FEATURE_JOURNAL \
343*4882a593Smuzhiyun |MD_FEATURE_PPL \
344*4882a593Smuzhiyun |MD_FEATURE_MULTIPLE_PPLS \
345*4882a593Smuzhiyun |MD_FEATURE_RAID0_LAYOUT \
346*4882a593Smuzhiyun )
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun struct r5l_payload_header {
349*4882a593Smuzhiyun __le16 type;
350*4882a593Smuzhiyun __le16 flags;
351*4882a593Smuzhiyun } __attribute__ ((__packed__));
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun enum r5l_payload_type {
354*4882a593Smuzhiyun R5LOG_PAYLOAD_DATA = 0,
355*4882a593Smuzhiyun R5LOG_PAYLOAD_PARITY = 1,
356*4882a593Smuzhiyun R5LOG_PAYLOAD_FLUSH = 2,
357*4882a593Smuzhiyun };
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun struct r5l_payload_data_parity {
360*4882a593Smuzhiyun struct r5l_payload_header header;
361*4882a593Smuzhiyun __le32 size; /* sector. data/parity size. each 4k
362*4882a593Smuzhiyun * has a checksum */
363*4882a593Smuzhiyun __le64 location; /* sector. For data, it's raid sector. For
364*4882a593Smuzhiyun * parity, it's stripe sector */
365*4882a593Smuzhiyun __le32 checksum[];
366*4882a593Smuzhiyun } __attribute__ ((__packed__));
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun enum r5l_payload_data_parity_flag {
369*4882a593Smuzhiyun R5LOG_PAYLOAD_FLAG_DISCARD = 1, /* payload is discard */
370*4882a593Smuzhiyun /*
371*4882a593Smuzhiyun * RESHAPED/RESHAPING is only set when there is reshape activity. Note,
372*4882a593Smuzhiyun * both data/parity of a stripe should have the same flag set
373*4882a593Smuzhiyun *
374*4882a593Smuzhiyun * RESHAPED: reshape is running, and this stripe finished reshape
375*4882a593Smuzhiyun * RESHAPING: reshape is running, and this stripe isn't reshaped
376*4882a593Smuzhiyun */
377*4882a593Smuzhiyun R5LOG_PAYLOAD_FLAG_RESHAPED = 2,
378*4882a593Smuzhiyun R5LOG_PAYLOAD_FLAG_RESHAPING = 3,
379*4882a593Smuzhiyun };
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun struct r5l_payload_flush {
382*4882a593Smuzhiyun struct r5l_payload_header header;
383*4882a593Smuzhiyun __le32 size; /* flush_stripes size, bytes */
384*4882a593Smuzhiyun __le64 flush_stripes[];
385*4882a593Smuzhiyun } __attribute__ ((__packed__));
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun enum r5l_payload_flush_flag {
388*4882a593Smuzhiyun R5LOG_PAYLOAD_FLAG_FLUSH_STRIPE = 1, /* data represents whole stripe */
389*4882a593Smuzhiyun };
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun struct r5l_meta_block {
392*4882a593Smuzhiyun __le32 magic;
393*4882a593Smuzhiyun __le32 checksum;
394*4882a593Smuzhiyun __u8 version;
395*4882a593Smuzhiyun __u8 __zero_pading_1;
396*4882a593Smuzhiyun __le16 __zero_pading_2;
397*4882a593Smuzhiyun __le32 meta_size; /* whole size of the block */
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun __le64 seq;
400*4882a593Smuzhiyun __le64 position; /* sector, start from rdev->data_offset, current position */
401*4882a593Smuzhiyun struct r5l_payload_header payloads[];
402*4882a593Smuzhiyun } __attribute__ ((__packed__));
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun #define R5LOG_VERSION 0x1
405*4882a593Smuzhiyun #define R5LOG_MAGIC 0x6433c509
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun struct ppl_header_entry {
408*4882a593Smuzhiyun __le64 data_sector; /* raid sector of the new data */
409*4882a593Smuzhiyun __le32 pp_size; /* length of partial parity */
410*4882a593Smuzhiyun __le32 data_size; /* length of data */
411*4882a593Smuzhiyun __le32 parity_disk; /* member disk containing parity */
412*4882a593Smuzhiyun __le32 checksum; /* checksum of partial parity data for this
413*4882a593Smuzhiyun * entry (~crc32c) */
414*4882a593Smuzhiyun } __attribute__ ((__packed__));
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun #define PPL_HEADER_SIZE 4096
417*4882a593Smuzhiyun #define PPL_HDR_RESERVED 512
418*4882a593Smuzhiyun #define PPL_HDR_ENTRY_SPACE \
419*4882a593Smuzhiyun (PPL_HEADER_SIZE - PPL_HDR_RESERVED - 4 * sizeof(__le32) - sizeof(__le64))
420*4882a593Smuzhiyun #define PPL_HDR_MAX_ENTRIES \
421*4882a593Smuzhiyun (PPL_HDR_ENTRY_SPACE / sizeof(struct ppl_header_entry))
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun struct ppl_header {
424*4882a593Smuzhiyun __u8 reserved[PPL_HDR_RESERVED];/* reserved space, fill with 0xff */
425*4882a593Smuzhiyun __le32 signature; /* signature (family number of volume) */
426*4882a593Smuzhiyun __le32 padding; /* zero pad */
427*4882a593Smuzhiyun __le64 generation; /* generation number of the header */
428*4882a593Smuzhiyun __le32 entries_count; /* number of entries in entry array */
429*4882a593Smuzhiyun __le32 checksum; /* checksum of the header (~crc32c) */
430*4882a593Smuzhiyun struct ppl_header_entry entries[PPL_HDR_MAX_ENTRIES];
431*4882a593Smuzhiyun } __attribute__ ((__packed__));
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun #endif
434