xref: /OK3568_Linux_fs/kernel/drivers/block/drbd/drbd_protocol.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef __DRBD_PROTOCOL_H
3*4882a593Smuzhiyun #define __DRBD_PROTOCOL_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun enum drbd_packet {
6*4882a593Smuzhiyun 	/* receiver (data socket) */
7*4882a593Smuzhiyun 	P_DATA		      = 0x00,
8*4882a593Smuzhiyun 	P_DATA_REPLY	      = 0x01, /* Response to P_DATA_REQUEST */
9*4882a593Smuzhiyun 	P_RS_DATA_REPLY	      = 0x02, /* Response to P_RS_DATA_REQUEST */
10*4882a593Smuzhiyun 	P_BARRIER	      = 0x03,
11*4882a593Smuzhiyun 	P_BITMAP	      = 0x04,
12*4882a593Smuzhiyun 	P_BECOME_SYNC_TARGET  = 0x05,
13*4882a593Smuzhiyun 	P_BECOME_SYNC_SOURCE  = 0x06,
14*4882a593Smuzhiyun 	P_UNPLUG_REMOTE	      = 0x07, /* Used at various times to hint the peer */
15*4882a593Smuzhiyun 	P_DATA_REQUEST	      = 0x08, /* Used to ask for a data block */
16*4882a593Smuzhiyun 	P_RS_DATA_REQUEST     = 0x09, /* Used to ask for a data block for resync */
17*4882a593Smuzhiyun 	P_SYNC_PARAM	      = 0x0a,
18*4882a593Smuzhiyun 	P_PROTOCOL	      = 0x0b,
19*4882a593Smuzhiyun 	P_UUIDS		      = 0x0c,
20*4882a593Smuzhiyun 	P_SIZES		      = 0x0d,
21*4882a593Smuzhiyun 	P_STATE		      = 0x0e,
22*4882a593Smuzhiyun 	P_SYNC_UUID	      = 0x0f,
23*4882a593Smuzhiyun 	P_AUTH_CHALLENGE      = 0x10,
24*4882a593Smuzhiyun 	P_AUTH_RESPONSE	      = 0x11,
25*4882a593Smuzhiyun 	P_STATE_CHG_REQ	      = 0x12,
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun 	/* (meta socket) */
28*4882a593Smuzhiyun 	P_PING		      = 0x13,
29*4882a593Smuzhiyun 	P_PING_ACK	      = 0x14,
30*4882a593Smuzhiyun 	P_RECV_ACK	      = 0x15, /* Used in protocol B */
31*4882a593Smuzhiyun 	P_WRITE_ACK	      = 0x16, /* Used in protocol C */
32*4882a593Smuzhiyun 	P_RS_WRITE_ACK	      = 0x17, /* Is a P_WRITE_ACK, additionally call set_in_sync(). */
33*4882a593Smuzhiyun 	P_SUPERSEDED	      = 0x18, /* Used in proto C, two-primaries conflict detection */
34*4882a593Smuzhiyun 	P_NEG_ACK	      = 0x19, /* Sent if local disk is unusable */
35*4882a593Smuzhiyun 	P_NEG_DREPLY	      = 0x1a, /* Local disk is broken... */
36*4882a593Smuzhiyun 	P_NEG_RS_DREPLY	      = 0x1b, /* Local disk is broken... */
37*4882a593Smuzhiyun 	P_BARRIER_ACK	      = 0x1c,
38*4882a593Smuzhiyun 	P_STATE_CHG_REPLY     = 0x1d,
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun 	/* "new" commands, no longer fitting into the ordering scheme above */
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 	P_OV_REQUEST	      = 0x1e, /* data socket */
43*4882a593Smuzhiyun 	P_OV_REPLY	      = 0x1f,
44*4882a593Smuzhiyun 	P_OV_RESULT	      = 0x20, /* meta socket */
45*4882a593Smuzhiyun 	P_CSUM_RS_REQUEST     = 0x21, /* data socket */
46*4882a593Smuzhiyun 	P_RS_IS_IN_SYNC	      = 0x22, /* meta socket */
47*4882a593Smuzhiyun 	P_SYNC_PARAM89	      = 0x23, /* data socket, protocol version 89 replacement for P_SYNC_PARAM */
48*4882a593Smuzhiyun 	P_COMPRESSED_BITMAP   = 0x24, /* compressed or otherwise encoded bitmap transfer */
49*4882a593Smuzhiyun 	/* P_CKPT_FENCE_REQ      = 0x25, * currently reserved for protocol D */
50*4882a593Smuzhiyun 	/* P_CKPT_DISABLE_REQ    = 0x26, * currently reserved for protocol D */
51*4882a593Smuzhiyun 	P_DELAY_PROBE         = 0x27, /* is used on BOTH sockets */
52*4882a593Smuzhiyun 	P_OUT_OF_SYNC         = 0x28, /* Mark as out of sync (Outrunning), data socket */
53*4882a593Smuzhiyun 	P_RS_CANCEL           = 0x29, /* meta: Used to cancel RS_DATA_REQUEST packet by SyncSource */
54*4882a593Smuzhiyun 	P_CONN_ST_CHG_REQ     = 0x2a, /* data sock: Connection wide state request */
55*4882a593Smuzhiyun 	P_CONN_ST_CHG_REPLY   = 0x2b, /* meta sock: Connection side state req reply */
56*4882a593Smuzhiyun 	P_RETRY_WRITE	      = 0x2c, /* Protocol C: retry conflicting write request */
57*4882a593Smuzhiyun 	P_PROTOCOL_UPDATE     = 0x2d, /* data sock: is used in established connections */
58*4882a593Smuzhiyun         /* 0x2e to 0x30 reserved, used in drbd 9 */
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	/* REQ_OP_DISCARD. We used "discard" in different contexts before,
61*4882a593Smuzhiyun 	 * which is why I chose TRIM here, to disambiguate. */
62*4882a593Smuzhiyun 	P_TRIM                = 0x31,
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 	/* Only use these two if both support FF_THIN_RESYNC */
65*4882a593Smuzhiyun 	P_RS_THIN_REQ         = 0x32, /* Request a block for resync or reply P_RS_DEALLOCATED */
66*4882a593Smuzhiyun 	P_RS_DEALLOCATED      = 0x33, /* Contains only zeros on sync source node */
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 	/* REQ_WRITE_SAME.
69*4882a593Smuzhiyun 	 * On a receiving side without REQ_WRITE_SAME,
70*4882a593Smuzhiyun 	 * we may fall back to an opencoded loop instead. */
71*4882a593Smuzhiyun 	P_WSAME               = 0x34,
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	/* 0x35 already claimed in DRBD 9 */
74*4882a593Smuzhiyun 	P_ZEROES              = 0x36, /* data sock: zero-out, WRITE_ZEROES */
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	/* 0x40 .. 0x48 already claimed in DRBD 9 */
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 	P_MAY_IGNORE	      = 0x100, /* Flag to test if (cmd > P_MAY_IGNORE) ... */
79*4882a593Smuzhiyun 	P_MAX_OPT_CMD	      = 0x101,
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	/* special command ids for handshake */
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	P_INITIAL_META	      = 0xfff1, /* First Packet on the MetaSock */
84*4882a593Smuzhiyun 	P_INITIAL_DATA	      = 0xfff2, /* First Packet on the Socket */
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	P_CONNECTION_FEATURES = 0xfffe	/* FIXED for the next century! */
87*4882a593Smuzhiyun };
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun #ifndef __packed
90*4882a593Smuzhiyun #define __packed __attribute__((packed))
91*4882a593Smuzhiyun #endif
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun /* This is the layout for a packet on the wire.
94*4882a593Smuzhiyun  * The byteorder is the network byte order.
95*4882a593Smuzhiyun  *     (except block_id and barrier fields.
96*4882a593Smuzhiyun  *	these are pointers to local structs
97*4882a593Smuzhiyun  *	and have no relevance for the partner,
98*4882a593Smuzhiyun  *	which just echoes them as received.)
99*4882a593Smuzhiyun  *
100*4882a593Smuzhiyun  * NOTE that the payload starts at a long aligned offset,
101*4882a593Smuzhiyun  * regardless of 32 or 64 bit arch!
102*4882a593Smuzhiyun  */
103*4882a593Smuzhiyun struct p_header80 {
104*4882a593Smuzhiyun 	u32	  magic;
105*4882a593Smuzhiyun 	u16	  command;
106*4882a593Smuzhiyun 	u16	  length;	/* bytes of data after this header */
107*4882a593Smuzhiyun } __packed;
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun /* Header for big packets, Used for data packets exceeding 64kB */
110*4882a593Smuzhiyun struct p_header95 {
111*4882a593Smuzhiyun 	u16	  magic;	/* use DRBD_MAGIC_BIG here */
112*4882a593Smuzhiyun 	u16	  command;
113*4882a593Smuzhiyun 	u32	  length;
114*4882a593Smuzhiyun } __packed;
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun struct p_header100 {
117*4882a593Smuzhiyun 	u32	  magic;
118*4882a593Smuzhiyun 	u16	  volume;
119*4882a593Smuzhiyun 	u16	  command;
120*4882a593Smuzhiyun 	u32	  length;
121*4882a593Smuzhiyun 	u32	  pad;
122*4882a593Smuzhiyun } __packed;
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun /* These defines must not be changed without changing the protocol version.
125*4882a593Smuzhiyun  * New defines may only be introduced together with protocol version bump or
126*4882a593Smuzhiyun  * new protocol feature flags.
127*4882a593Smuzhiyun  */
128*4882a593Smuzhiyun #define DP_HARDBARRIER	      1 /* no longer used */
129*4882a593Smuzhiyun #define DP_RW_SYNC	      2 /* equals REQ_SYNC    */
130*4882a593Smuzhiyun #define DP_MAY_SET_IN_SYNC    4
131*4882a593Smuzhiyun #define DP_UNPLUG             8 /* not used anymore   */
132*4882a593Smuzhiyun #define DP_FUA               16 /* equals REQ_FUA     */
133*4882a593Smuzhiyun #define DP_FLUSH             32 /* equals REQ_PREFLUSH   */
134*4882a593Smuzhiyun #define DP_DISCARD           64 /* equals REQ_OP_DISCARD */
135*4882a593Smuzhiyun #define DP_SEND_RECEIVE_ACK 128 /* This is a proto B write request */
136*4882a593Smuzhiyun #define DP_SEND_WRITE_ACK   256 /* This is a proto C write request */
137*4882a593Smuzhiyun #define DP_WSAME            512 /* equiv. REQ_WRITE_SAME */
138*4882a593Smuzhiyun #define DP_ZEROES          1024 /* equiv. REQ_OP_WRITE_ZEROES */
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun /* possible combinations:
141*4882a593Smuzhiyun  * REQ_OP_WRITE_ZEROES:  DP_DISCARD | DP_ZEROES
142*4882a593Smuzhiyun  * REQ_OP_WRITE_ZEROES + REQ_NOUNMAP: DP_ZEROES
143*4882a593Smuzhiyun  */
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun struct p_data {
146*4882a593Smuzhiyun 	u64	    sector;    /* 64 bits sector number */
147*4882a593Smuzhiyun 	u64	    block_id;  /* to identify the request in protocol B&C */
148*4882a593Smuzhiyun 	u32	    seq_num;
149*4882a593Smuzhiyun 	u32	    dp_flags;
150*4882a593Smuzhiyun } __packed;
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun struct p_trim {
153*4882a593Smuzhiyun 	struct p_data p_data;
154*4882a593Smuzhiyun 	u32	    size;	/* == bio->bi_size */
155*4882a593Smuzhiyun } __packed;
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun struct p_wsame {
158*4882a593Smuzhiyun 	struct p_data p_data;
159*4882a593Smuzhiyun 	u32           size;     /* == bio->bi_size */
160*4882a593Smuzhiyun } __packed;
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun /*
163*4882a593Smuzhiyun  * commands which share a struct:
164*4882a593Smuzhiyun  *  p_block_ack:
165*4882a593Smuzhiyun  *   P_RECV_ACK (proto B), P_WRITE_ACK (proto C),
166*4882a593Smuzhiyun  *   P_SUPERSEDED (proto C, two-primaries conflict detection)
167*4882a593Smuzhiyun  *  p_block_req:
168*4882a593Smuzhiyun  *   P_DATA_REQUEST, P_RS_DATA_REQUEST
169*4882a593Smuzhiyun  */
170*4882a593Smuzhiyun struct p_block_ack {
171*4882a593Smuzhiyun 	u64	    sector;
172*4882a593Smuzhiyun 	u64	    block_id;
173*4882a593Smuzhiyun 	u32	    blksize;
174*4882a593Smuzhiyun 	u32	    seq_num;
175*4882a593Smuzhiyun } __packed;
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun struct p_block_req {
178*4882a593Smuzhiyun 	u64 sector;
179*4882a593Smuzhiyun 	u64 block_id;
180*4882a593Smuzhiyun 	u32 blksize;
181*4882a593Smuzhiyun 	u32 pad;	/* to multiple of 8 Byte */
182*4882a593Smuzhiyun } __packed;
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun /*
185*4882a593Smuzhiyun  * commands with their own struct for additional fields:
186*4882a593Smuzhiyun  *   P_CONNECTION_FEATURES
187*4882a593Smuzhiyun  *   P_BARRIER
188*4882a593Smuzhiyun  *   P_BARRIER_ACK
189*4882a593Smuzhiyun  *   P_SYNC_PARAM
190*4882a593Smuzhiyun  *   ReportParams
191*4882a593Smuzhiyun  */
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun /* supports TRIM/DISCARD on the "wire" protocol */
194*4882a593Smuzhiyun #define DRBD_FF_TRIM 1
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun /* Detect all-zeros during resync, and rather TRIM/UNMAP/DISCARD those blocks
197*4882a593Smuzhiyun  * instead of fully allocate a supposedly thin volume on initial resync */
198*4882a593Smuzhiyun #define DRBD_FF_THIN_RESYNC 2
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun /* supports REQ_WRITE_SAME on the "wire" protocol.
201*4882a593Smuzhiyun  * Note: this flag is overloaded,
202*4882a593Smuzhiyun  * its presence also
203*4882a593Smuzhiyun  *   - indicates support for 128 MiB "batch bios",
204*4882a593Smuzhiyun  *     max discard size of 128 MiB
205*4882a593Smuzhiyun  *     instead of 4M before that.
206*4882a593Smuzhiyun  *   - indicates that we exchange additional settings in p_sizes
207*4882a593Smuzhiyun  *     drbd_send_sizes()/receive_sizes()
208*4882a593Smuzhiyun  */
209*4882a593Smuzhiyun #define DRBD_FF_WSAME 4
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun /* supports REQ_OP_WRITE_ZEROES on the "wire" protocol.
212*4882a593Smuzhiyun  *
213*4882a593Smuzhiyun  * We used to map that to "discard" on the sending side, and if we cannot
214*4882a593Smuzhiyun  * guarantee that discard zeroes data, the receiving side would map discard
215*4882a593Smuzhiyun  * back to zero-out.
216*4882a593Smuzhiyun  *
217*4882a593Smuzhiyun  * With the introduction of REQ_OP_WRITE_ZEROES,
218*4882a593Smuzhiyun  * we started to use that for both WRITE_ZEROES and DISCARDS,
219*4882a593Smuzhiyun  * hoping that WRITE_ZEROES would "do what we want",
220*4882a593Smuzhiyun  * UNMAP if possible, zero-out the rest.
221*4882a593Smuzhiyun  *
222*4882a593Smuzhiyun  * The example scenario is some LVM "thin" backend.
223*4882a593Smuzhiyun  *
224*4882a593Smuzhiyun  * While an un-allocated block on dm-thin reads as zeroes, on a dm-thin
225*4882a593Smuzhiyun  * with "skip_block_zeroing=true", after a partial block write allocated
226*4882a593Smuzhiyun  * that block, that same block may well map "undefined old garbage" from
227*4882a593Smuzhiyun  * the backends on LBAs that have not yet been written to.
228*4882a593Smuzhiyun  *
229*4882a593Smuzhiyun  * If we cannot distinguish between zero-out and discard on the receiving
230*4882a593Smuzhiyun  * side, to avoid "undefined old garbage" to pop up randomly at later times
231*4882a593Smuzhiyun  * on supposedly zero-initialized blocks, we'd need to map all discards to
232*4882a593Smuzhiyun  * zero-out on the receiving side.  But that would potentially do a full
233*4882a593Smuzhiyun  * alloc on thinly provisioned backends, even when the expectation was to
234*4882a593Smuzhiyun  * unmap/trim/discard/de-allocate.
235*4882a593Smuzhiyun  *
236*4882a593Smuzhiyun  * We need to distinguish on the protocol level, whether we need to guarantee
237*4882a593Smuzhiyun  * zeroes (and thus use zero-out, potentially doing the mentioned full-alloc),
238*4882a593Smuzhiyun  * or if we want to put the emphasis on discard, and only do a "best effort
239*4882a593Smuzhiyun  * zeroing" (by "discarding" blocks aligned to discard-granularity, and zeroing
240*4882a593Smuzhiyun  * only potential unaligned head and tail clippings), to at least *try* to
241*4882a593Smuzhiyun  * avoid "false positives" in an online-verify later, hoping that someone
242*4882a593Smuzhiyun  * set skip_block_zeroing=false.
243*4882a593Smuzhiyun  */
244*4882a593Smuzhiyun #define DRBD_FF_WZEROES 8
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun struct p_connection_features {
248*4882a593Smuzhiyun 	u32 protocol_min;
249*4882a593Smuzhiyun 	u32 feature_flags;
250*4882a593Smuzhiyun 	u32 protocol_max;
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	/* should be more than enough for future enhancements
253*4882a593Smuzhiyun 	 * for now, feature_flags and the reserved array shall be zero.
254*4882a593Smuzhiyun 	 */
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	u32 _pad;
257*4882a593Smuzhiyun 	u64 reserved[7];
258*4882a593Smuzhiyun } __packed;
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun struct p_barrier {
261*4882a593Smuzhiyun 	u32 barrier;	/* barrier number _handle_ only */
262*4882a593Smuzhiyun 	u32 pad;	/* to multiple of 8 Byte */
263*4882a593Smuzhiyun } __packed;
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun struct p_barrier_ack {
266*4882a593Smuzhiyun 	u32 barrier;
267*4882a593Smuzhiyun 	u32 set_size;
268*4882a593Smuzhiyun } __packed;
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun struct p_rs_param {
271*4882a593Smuzhiyun 	u32 resync_rate;
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	      /* Since protocol version 88 and higher. */
274*4882a593Smuzhiyun 	char verify_alg[];
275*4882a593Smuzhiyun } __packed;
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun struct p_rs_param_89 {
278*4882a593Smuzhiyun 	u32 resync_rate;
279*4882a593Smuzhiyun 	/* protocol version 89: */
280*4882a593Smuzhiyun 	char verify_alg[SHARED_SECRET_MAX];
281*4882a593Smuzhiyun 	char csums_alg[SHARED_SECRET_MAX];
282*4882a593Smuzhiyun } __packed;
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun struct p_rs_param_95 {
285*4882a593Smuzhiyun 	u32 resync_rate;
286*4882a593Smuzhiyun 	char verify_alg[SHARED_SECRET_MAX];
287*4882a593Smuzhiyun 	char csums_alg[SHARED_SECRET_MAX];
288*4882a593Smuzhiyun 	u32 c_plan_ahead;
289*4882a593Smuzhiyun 	u32 c_delay_target;
290*4882a593Smuzhiyun 	u32 c_fill_target;
291*4882a593Smuzhiyun 	u32 c_max_rate;
292*4882a593Smuzhiyun } __packed;
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun enum drbd_conn_flags {
295*4882a593Smuzhiyun 	CF_DISCARD_MY_DATA = 1,
296*4882a593Smuzhiyun 	CF_DRY_RUN = 2,
297*4882a593Smuzhiyun };
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun struct p_protocol {
300*4882a593Smuzhiyun 	u32 protocol;
301*4882a593Smuzhiyun 	u32 after_sb_0p;
302*4882a593Smuzhiyun 	u32 after_sb_1p;
303*4882a593Smuzhiyun 	u32 after_sb_2p;
304*4882a593Smuzhiyun 	u32 conn_flags;
305*4882a593Smuzhiyun 	u32 two_primaries;
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	/* Since protocol version 87 and higher. */
308*4882a593Smuzhiyun 	char integrity_alg[];
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun } __packed;
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun struct p_uuids {
313*4882a593Smuzhiyun 	u64 uuid[UI_EXTENDED_SIZE];
314*4882a593Smuzhiyun } __packed;
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun struct p_rs_uuid {
317*4882a593Smuzhiyun 	u64	    uuid;
318*4882a593Smuzhiyun } __packed;
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun /* optional queue_limits if (agreed_features & DRBD_FF_WSAME)
321*4882a593Smuzhiyun  * see also struct queue_limits, as of late 2015 */
322*4882a593Smuzhiyun struct o_qlim {
323*4882a593Smuzhiyun 	/* we don't need it yet, but we may as well communicate it now */
324*4882a593Smuzhiyun 	u32 physical_block_size;
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	/* so the original in struct queue_limits is unsigned short,
327*4882a593Smuzhiyun 	 * but I'd have to put in padding anyways. */
328*4882a593Smuzhiyun 	u32 logical_block_size;
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	/* One incoming bio becomes one DRBD request,
331*4882a593Smuzhiyun 	 * which may be translated to several bio on the receiving side.
332*4882a593Smuzhiyun 	 * We don't need to communicate chunk/boundary/segment ... limits.
333*4882a593Smuzhiyun 	 */
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	/* various IO hints may be useful with "diskless client" setups */
336*4882a593Smuzhiyun 	u32 alignment_offset;
337*4882a593Smuzhiyun 	u32 io_min;
338*4882a593Smuzhiyun 	u32 io_opt;
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	/* We may need to communicate integrity stuff at some point,
341*4882a593Smuzhiyun 	 * but let's not get ahead of ourselves. */
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	/* Backend discard capabilities.
344*4882a593Smuzhiyun 	 * Receiving side uses "blkdev_issue_discard()", no need to communicate
345*4882a593Smuzhiyun 	 * more specifics.  If the backend cannot do discards, the DRBD peer
346*4882a593Smuzhiyun 	 * may fall back to blkdev_issue_zeroout().
347*4882a593Smuzhiyun 	 */
348*4882a593Smuzhiyun 	u8 discard_enabled;
349*4882a593Smuzhiyun 	u8 discard_zeroes_data;
350*4882a593Smuzhiyun 	u8 write_same_capable;
351*4882a593Smuzhiyun 	u8 _pad;
352*4882a593Smuzhiyun } __packed;
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun struct p_sizes {
355*4882a593Smuzhiyun 	u64	    d_size;  /* size of disk */
356*4882a593Smuzhiyun 	u64	    u_size;  /* user requested size */
357*4882a593Smuzhiyun 	u64	    c_size;  /* current exported size */
358*4882a593Smuzhiyun 	u32	    max_bio_size;  /* Maximal size of a BIO */
359*4882a593Smuzhiyun 	u16	    queue_order_type;  /* not yet implemented in DRBD*/
360*4882a593Smuzhiyun 	u16	    dds_flags; /* use enum dds_flags here. */
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	/* optional queue_limits if (agreed_features & DRBD_FF_WSAME) */
363*4882a593Smuzhiyun 	struct o_qlim qlim[];
364*4882a593Smuzhiyun } __packed;
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun struct p_state {
367*4882a593Smuzhiyun 	u32	    state;
368*4882a593Smuzhiyun } __packed;
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun struct p_req_state {
371*4882a593Smuzhiyun 	u32	    mask;
372*4882a593Smuzhiyun 	u32	    val;
373*4882a593Smuzhiyun } __packed;
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun struct p_req_state_reply {
376*4882a593Smuzhiyun 	u32	    retcode;
377*4882a593Smuzhiyun } __packed;
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun struct p_drbd06_param {
380*4882a593Smuzhiyun 	u64	  size;
381*4882a593Smuzhiyun 	u32	  state;
382*4882a593Smuzhiyun 	u32	  blksize;
383*4882a593Smuzhiyun 	u32	  protocol;
384*4882a593Smuzhiyun 	u32	  version;
385*4882a593Smuzhiyun 	u32	  gen_cnt[5];
386*4882a593Smuzhiyun 	u32	  bit_map_gen[5];
387*4882a593Smuzhiyun } __packed;
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun struct p_block_desc {
390*4882a593Smuzhiyun 	u64 sector;
391*4882a593Smuzhiyun 	u32 blksize;
392*4882a593Smuzhiyun 	u32 pad;	/* to multiple of 8 Byte */
393*4882a593Smuzhiyun } __packed;
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun /* Valid values for the encoding field.
396*4882a593Smuzhiyun  * Bump proto version when changing this. */
397*4882a593Smuzhiyun enum drbd_bitmap_code {
398*4882a593Smuzhiyun 	/* RLE_VLI_Bytes = 0,
399*4882a593Smuzhiyun 	 * and other bit variants had been defined during
400*4882a593Smuzhiyun 	 * algorithm evaluation. */
401*4882a593Smuzhiyun 	RLE_VLI_Bits = 2,
402*4882a593Smuzhiyun };
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun struct p_compressed_bm {
405*4882a593Smuzhiyun 	/* (encoding & 0x0f): actual encoding, see enum drbd_bitmap_code
406*4882a593Smuzhiyun 	 * (encoding & 0x80): polarity (set/unset) of first runlength
407*4882a593Smuzhiyun 	 * ((encoding >> 4) & 0x07): pad_bits, number of trailing zero bits
408*4882a593Smuzhiyun 	 * used to pad up to head.length bytes
409*4882a593Smuzhiyun 	 */
410*4882a593Smuzhiyun 	u8 encoding;
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 	u8 code[];
413*4882a593Smuzhiyun } __packed;
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun struct p_delay_probe93 {
416*4882a593Smuzhiyun 	u32     seq_num; /* sequence number to match the two probe packets */
417*4882a593Smuzhiyun 	u32     offset;  /* usecs the probe got sent after the reference time point */
418*4882a593Smuzhiyun } __packed;
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun /*
421*4882a593Smuzhiyun  * Bitmap packets need to fit within a single page on the sender and receiver,
422*4882a593Smuzhiyun  * so we are limited to 4 KiB (and not to PAGE_SIZE, which can be bigger).
423*4882a593Smuzhiyun  */
424*4882a593Smuzhiyun #define DRBD_SOCKET_BUFFER_SIZE 4096
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun #endif  /* __DRBD_PROTOCOL_H */
427