1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef DRBD_STATE_H
3*4882a593Smuzhiyun #define DRBD_STATE_H
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun struct drbd_device;
6*4882a593Smuzhiyun struct drbd_connection;
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun /**
9*4882a593Smuzhiyun * DOC: DRBD State macros
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * These macros are used to express state changes in easily readable form.
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * The NS macros expand to a mask and a value, that can be bit ored onto the
14*4882a593Smuzhiyun * current state as soon as the spinlock (req_lock) was taken.
15*4882a593Smuzhiyun *
16*4882a593Smuzhiyun * The _NS macros are used for state functions that get called with the
17*4882a593Smuzhiyun * spinlock. These macros expand directly to the new state value.
18*4882a593Smuzhiyun *
19*4882a593Smuzhiyun * Besides the basic forms NS() and _NS() additional _?NS[23] are defined
20*4882a593Smuzhiyun * to express state changes that affect more than one aspect of the state.
21*4882a593Smuzhiyun *
22*4882a593Smuzhiyun * E.g. NS2(conn, C_CONNECTED, peer, R_SECONDARY)
23*4882a593Smuzhiyun * Means that the network connection was established and that the peer
24*4882a593Smuzhiyun * is in secondary role.
25*4882a593Smuzhiyun */
26*4882a593Smuzhiyun #define role_MASK R_MASK
27*4882a593Smuzhiyun #define peer_MASK R_MASK
28*4882a593Smuzhiyun #define disk_MASK D_MASK
29*4882a593Smuzhiyun #define pdsk_MASK D_MASK
30*4882a593Smuzhiyun #define conn_MASK C_MASK
31*4882a593Smuzhiyun #define susp_MASK 1
32*4882a593Smuzhiyun #define user_isp_MASK 1
33*4882a593Smuzhiyun #define aftr_isp_MASK 1
34*4882a593Smuzhiyun #define susp_nod_MASK 1
35*4882a593Smuzhiyun #define susp_fen_MASK 1
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun #define NS(T, S) \
38*4882a593Smuzhiyun ({ union drbd_state mask; mask.i = 0; mask.T = T##_MASK; mask; }), \
39*4882a593Smuzhiyun ({ union drbd_state val; val.i = 0; val.T = (S); val; })
40*4882a593Smuzhiyun #define NS2(T1, S1, T2, S2) \
41*4882a593Smuzhiyun ({ union drbd_state mask; mask.i = 0; mask.T1 = T1##_MASK; \
42*4882a593Smuzhiyun mask.T2 = T2##_MASK; mask; }), \
43*4882a593Smuzhiyun ({ union drbd_state val; val.i = 0; val.T1 = (S1); \
44*4882a593Smuzhiyun val.T2 = (S2); val; })
45*4882a593Smuzhiyun #define NS3(T1, S1, T2, S2, T3, S3) \
46*4882a593Smuzhiyun ({ union drbd_state mask; mask.i = 0; mask.T1 = T1##_MASK; \
47*4882a593Smuzhiyun mask.T2 = T2##_MASK; mask.T3 = T3##_MASK; mask; }), \
48*4882a593Smuzhiyun ({ union drbd_state val; val.i = 0; val.T1 = (S1); \
49*4882a593Smuzhiyun val.T2 = (S2); val.T3 = (S3); val; })
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun #define _NS(D, T, S) \
52*4882a593Smuzhiyun D, ({ union drbd_state __ns; __ns = drbd_read_state(D); __ns.T = (S); __ns; })
53*4882a593Smuzhiyun #define _NS2(D, T1, S1, T2, S2) \
54*4882a593Smuzhiyun D, ({ union drbd_state __ns; __ns = drbd_read_state(D); __ns.T1 = (S1); \
55*4882a593Smuzhiyun __ns.T2 = (S2); __ns; })
56*4882a593Smuzhiyun #define _NS3(D, T1, S1, T2, S2, T3, S3) \
57*4882a593Smuzhiyun D, ({ union drbd_state __ns; __ns = drbd_read_state(D); __ns.T1 = (S1); \
58*4882a593Smuzhiyun __ns.T2 = (S2); __ns.T3 = (S3); __ns; })
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun enum chg_state_flags {
61*4882a593Smuzhiyun CS_HARD = 1 << 0,
62*4882a593Smuzhiyun CS_VERBOSE = 1 << 1,
63*4882a593Smuzhiyun CS_WAIT_COMPLETE = 1 << 2,
64*4882a593Smuzhiyun CS_SERIALIZE = 1 << 3,
65*4882a593Smuzhiyun CS_ORDERED = CS_WAIT_COMPLETE + CS_SERIALIZE,
66*4882a593Smuzhiyun CS_LOCAL_ONLY = 1 << 4, /* Do not consider a device pair wide state change */
67*4882a593Smuzhiyun CS_DC_ROLE = 1 << 5, /* DC = display as connection state change */
68*4882a593Smuzhiyun CS_DC_PEER = 1 << 6,
69*4882a593Smuzhiyun CS_DC_CONN = 1 << 7,
70*4882a593Smuzhiyun CS_DC_DISK = 1 << 8,
71*4882a593Smuzhiyun CS_DC_PDSK = 1 << 9,
72*4882a593Smuzhiyun CS_DC_SUSP = 1 << 10,
73*4882a593Smuzhiyun CS_DC_MASK = CS_DC_ROLE + CS_DC_PEER + CS_DC_CONN + CS_DC_DISK + CS_DC_PDSK,
74*4882a593Smuzhiyun CS_IGN_OUTD_FAIL = 1 << 11,
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun /* Make sure no meta data IO is in flight, by calling
77*4882a593Smuzhiyun * drbd_md_get_buffer(). Used for graceful detach. */
78*4882a593Smuzhiyun CS_INHIBIT_MD_IO = 1 << 12,
79*4882a593Smuzhiyun };
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun /* drbd_dev_state and drbd_state are different types. This is to stress the
82*4882a593Smuzhiyun small difference. There is no suspended flag (.susp), and no suspended
83*4882a593Smuzhiyun while fence handler runs flas (susp_fen). */
84*4882a593Smuzhiyun union drbd_dev_state {
85*4882a593Smuzhiyun struct {
86*4882a593Smuzhiyun #if defined(__LITTLE_ENDIAN_BITFIELD)
87*4882a593Smuzhiyun unsigned role:2 ; /* 3/4 primary/secondary/unknown */
88*4882a593Smuzhiyun unsigned peer:2 ; /* 3/4 primary/secondary/unknown */
89*4882a593Smuzhiyun unsigned conn:5 ; /* 17/32 cstates */
90*4882a593Smuzhiyun unsigned disk:4 ; /* 8/16 from D_DISKLESS to D_UP_TO_DATE */
91*4882a593Smuzhiyun unsigned pdsk:4 ; /* 8/16 from D_DISKLESS to D_UP_TO_DATE */
92*4882a593Smuzhiyun unsigned _unused:1 ;
93*4882a593Smuzhiyun unsigned aftr_isp:1 ; /* isp .. imposed sync pause */
94*4882a593Smuzhiyun unsigned peer_isp:1 ;
95*4882a593Smuzhiyun unsigned user_isp:1 ;
96*4882a593Smuzhiyun unsigned _pad:11; /* 0 unused */
97*4882a593Smuzhiyun #elif defined(__BIG_ENDIAN_BITFIELD)
98*4882a593Smuzhiyun unsigned _pad:11;
99*4882a593Smuzhiyun unsigned user_isp:1 ;
100*4882a593Smuzhiyun unsigned peer_isp:1 ;
101*4882a593Smuzhiyun unsigned aftr_isp:1 ; /* isp .. imposed sync pause */
102*4882a593Smuzhiyun unsigned _unused:1 ;
103*4882a593Smuzhiyun unsigned pdsk:4 ; /* 8/16 from D_DISKLESS to D_UP_TO_DATE */
104*4882a593Smuzhiyun unsigned disk:4 ; /* 8/16 from D_DISKLESS to D_UP_TO_DATE */
105*4882a593Smuzhiyun unsigned conn:5 ; /* 17/32 cstates */
106*4882a593Smuzhiyun unsigned peer:2 ; /* 3/4 primary/secondary/unknown */
107*4882a593Smuzhiyun unsigned role:2 ; /* 3/4 primary/secondary/unknown */
108*4882a593Smuzhiyun #else
109*4882a593Smuzhiyun # error "this endianess is not supported"
110*4882a593Smuzhiyun #endif
111*4882a593Smuzhiyun };
112*4882a593Smuzhiyun unsigned int i;
113*4882a593Smuzhiyun };
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun extern enum drbd_state_rv drbd_change_state(struct drbd_device *device,
116*4882a593Smuzhiyun enum chg_state_flags f,
117*4882a593Smuzhiyun union drbd_state mask,
118*4882a593Smuzhiyun union drbd_state val);
119*4882a593Smuzhiyun extern void drbd_force_state(struct drbd_device *, union drbd_state,
120*4882a593Smuzhiyun union drbd_state);
121*4882a593Smuzhiyun extern enum drbd_state_rv _drbd_request_state(struct drbd_device *,
122*4882a593Smuzhiyun union drbd_state,
123*4882a593Smuzhiyun union drbd_state,
124*4882a593Smuzhiyun enum chg_state_flags);
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun extern enum drbd_state_rv
127*4882a593Smuzhiyun _drbd_request_state_holding_state_mutex(struct drbd_device *, union drbd_state,
128*4882a593Smuzhiyun union drbd_state, enum chg_state_flags);
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun extern enum drbd_state_rv _drbd_set_state(struct drbd_device *, union drbd_state,
131*4882a593Smuzhiyun enum chg_state_flags,
132*4882a593Smuzhiyun struct completion *done);
133*4882a593Smuzhiyun extern void print_st_err(struct drbd_device *, union drbd_state,
134*4882a593Smuzhiyun union drbd_state, enum drbd_state_rv);
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun enum drbd_state_rv
137*4882a593Smuzhiyun _conn_request_state(struct drbd_connection *connection, union drbd_state mask, union drbd_state val,
138*4882a593Smuzhiyun enum chg_state_flags flags);
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun enum drbd_state_rv
141*4882a593Smuzhiyun conn_request_state(struct drbd_connection *connection, union drbd_state mask, union drbd_state val,
142*4882a593Smuzhiyun enum chg_state_flags flags);
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun extern void drbd_resume_al(struct drbd_device *device);
145*4882a593Smuzhiyun extern bool conn_all_vols_unconf(struct drbd_connection *connection);
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun /**
148*4882a593Smuzhiyun * drbd_request_state() - Request a state change
149*4882a593Smuzhiyun * @device: DRBD device.
150*4882a593Smuzhiyun * @mask: mask of state bits to change.
151*4882a593Smuzhiyun * @val: value of new state bits.
152*4882a593Smuzhiyun *
153*4882a593Smuzhiyun * This is the most graceful way of requesting a state change. It is verbose
154*4882a593Smuzhiyun * quite verbose in case the state change is not possible, and all those
155*4882a593Smuzhiyun * state changes are globally serialized.
156*4882a593Smuzhiyun */
drbd_request_state(struct drbd_device * device,union drbd_state mask,union drbd_state val)157*4882a593Smuzhiyun static inline int drbd_request_state(struct drbd_device *device,
158*4882a593Smuzhiyun union drbd_state mask,
159*4882a593Smuzhiyun union drbd_state val)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun return _drbd_request_state(device, mask, val, CS_VERBOSE + CS_ORDERED);
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun /* for use in adm_detach() (drbd_adm_detach(), drbd_adm_down()) */
165*4882a593Smuzhiyun int drbd_request_detach_interruptible(struct drbd_device *device);
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun enum drbd_role conn_highest_role(struct drbd_connection *connection);
168*4882a593Smuzhiyun enum drbd_role conn_highest_peer(struct drbd_connection *connection);
169*4882a593Smuzhiyun enum drbd_disk_state conn_highest_disk(struct drbd_connection *connection);
170*4882a593Smuzhiyun enum drbd_disk_state conn_lowest_disk(struct drbd_connection *connection);
171*4882a593Smuzhiyun enum drbd_disk_state conn_highest_pdsk(struct drbd_connection *connection);
172*4882a593Smuzhiyun enum drbd_conns conn_lowest_conn(struct drbd_connection *connection);
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun #endif
175