1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-or-later */
2*4882a593Smuzhiyun /* -*- mode: c; c-basic-offset: 8; -*-
3*4882a593Smuzhiyun * vim: noexpandtab sw=8 ts=8 sts=0:
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * dlmcommon.h
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Copyright (C) 2004 Oracle. All rights reserved.
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #ifndef DLMCOMMON_H
11*4882a593Smuzhiyun #define DLMCOMMON_H
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include <linux/kref.h>
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #define DLM_HB_NODE_DOWN_PRI (0xf000000)
16*4882a593Smuzhiyun #define DLM_HB_NODE_UP_PRI (0x8000000)
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #define DLM_LOCKID_NAME_MAX 32
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun #define DLM_DOMAIN_NAME_MAX_LEN 255
21*4882a593Smuzhiyun #define DLM_LOCK_RES_OWNER_UNKNOWN O2NM_MAX_NODES
22*4882a593Smuzhiyun #define DLM_THREAD_SHUFFLE_INTERVAL 5 // flush everything every 5 passes
23*4882a593Smuzhiyun #define DLM_THREAD_MS 200 // flush at least every 200 ms
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun #define DLM_HASH_SIZE_DEFAULT (1 << 17)
26*4882a593Smuzhiyun #if DLM_HASH_SIZE_DEFAULT < PAGE_SIZE
27*4882a593Smuzhiyun # define DLM_HASH_PAGES 1
28*4882a593Smuzhiyun #else
29*4882a593Smuzhiyun # define DLM_HASH_PAGES (DLM_HASH_SIZE_DEFAULT / PAGE_SIZE)
30*4882a593Smuzhiyun #endif
31*4882a593Smuzhiyun #define DLM_BUCKETS_PER_PAGE (PAGE_SIZE / sizeof(struct hlist_head))
32*4882a593Smuzhiyun #define DLM_HASH_BUCKETS (DLM_HASH_PAGES * DLM_BUCKETS_PER_PAGE)
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun /* Intended to make it easier for us to switch out hash functions */
35*4882a593Smuzhiyun #define dlm_lockid_hash(_n, _l) full_name_hash(NULL, _n, _l)
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun enum dlm_mle_type {
38*4882a593Smuzhiyun DLM_MLE_BLOCK = 0,
39*4882a593Smuzhiyun DLM_MLE_MASTER = 1,
40*4882a593Smuzhiyun DLM_MLE_MIGRATION = 2,
41*4882a593Smuzhiyun DLM_MLE_NUM_TYPES = 3,
42*4882a593Smuzhiyun };
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun struct dlm_master_list_entry {
45*4882a593Smuzhiyun struct hlist_node master_hash_node;
46*4882a593Smuzhiyun struct list_head hb_events;
47*4882a593Smuzhiyun struct dlm_ctxt *dlm;
48*4882a593Smuzhiyun spinlock_t spinlock;
49*4882a593Smuzhiyun wait_queue_head_t wq;
50*4882a593Smuzhiyun atomic_t woken;
51*4882a593Smuzhiyun struct kref mle_refs;
52*4882a593Smuzhiyun int inuse;
53*4882a593Smuzhiyun unsigned long maybe_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
54*4882a593Smuzhiyun unsigned long vote_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
55*4882a593Smuzhiyun unsigned long response_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
56*4882a593Smuzhiyun unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
57*4882a593Smuzhiyun u8 master;
58*4882a593Smuzhiyun u8 new_master;
59*4882a593Smuzhiyun enum dlm_mle_type type;
60*4882a593Smuzhiyun struct o2hb_callback_func mle_hb_up;
61*4882a593Smuzhiyun struct o2hb_callback_func mle_hb_down;
62*4882a593Smuzhiyun struct dlm_lock_resource *mleres;
63*4882a593Smuzhiyun unsigned char mname[DLM_LOCKID_NAME_MAX];
64*4882a593Smuzhiyun unsigned int mnamelen;
65*4882a593Smuzhiyun unsigned int mnamehash;
66*4882a593Smuzhiyun };
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun enum dlm_ast_type {
69*4882a593Smuzhiyun DLM_AST = 0,
70*4882a593Smuzhiyun DLM_BAST = 1,
71*4882a593Smuzhiyun DLM_ASTUNLOCK = 2,
72*4882a593Smuzhiyun };
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun #define LKM_VALID_FLAGS (LKM_VALBLK | LKM_CONVERT | LKM_UNLOCK | \
76*4882a593Smuzhiyun LKM_CANCEL | LKM_INVVALBLK | LKM_FORCE | \
77*4882a593Smuzhiyun LKM_RECOVERY | LKM_LOCAL | LKM_NOQUEUE)
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun #define DLM_RECOVERY_LOCK_NAME "$RECOVERY"
80*4882a593Smuzhiyun #define DLM_RECOVERY_LOCK_NAME_LEN 9
81*4882a593Smuzhiyun
dlm_is_recovery_lock(const char * lock_name,int name_len)82*4882a593Smuzhiyun static inline int dlm_is_recovery_lock(const char *lock_name, int name_len)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun if (name_len == DLM_RECOVERY_LOCK_NAME_LEN &&
85*4882a593Smuzhiyun memcmp(lock_name, DLM_RECOVERY_LOCK_NAME, name_len)==0)
86*4882a593Smuzhiyun return 1;
87*4882a593Smuzhiyun return 0;
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun #define DLM_RECO_STATE_ACTIVE 0x0001
91*4882a593Smuzhiyun #define DLM_RECO_STATE_FINALIZE 0x0002
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun struct dlm_recovery_ctxt
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun struct list_head resources;
96*4882a593Smuzhiyun struct list_head node_data;
97*4882a593Smuzhiyun u8 new_master;
98*4882a593Smuzhiyun u8 dead_node;
99*4882a593Smuzhiyun u16 state;
100*4882a593Smuzhiyun unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
101*4882a593Smuzhiyun wait_queue_head_t event;
102*4882a593Smuzhiyun };
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun enum dlm_ctxt_state {
105*4882a593Smuzhiyun DLM_CTXT_NEW = 0,
106*4882a593Smuzhiyun DLM_CTXT_JOINED = 1,
107*4882a593Smuzhiyun DLM_CTXT_IN_SHUTDOWN = 2,
108*4882a593Smuzhiyun DLM_CTXT_LEAVING = 3,
109*4882a593Smuzhiyun };
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun struct dlm_ctxt
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun struct list_head list;
114*4882a593Smuzhiyun struct hlist_head **lockres_hash;
115*4882a593Smuzhiyun struct list_head dirty_list;
116*4882a593Smuzhiyun struct list_head purge_list;
117*4882a593Smuzhiyun struct list_head pending_asts;
118*4882a593Smuzhiyun struct list_head pending_basts;
119*4882a593Smuzhiyun struct list_head tracking_list;
120*4882a593Smuzhiyun unsigned int purge_count;
121*4882a593Smuzhiyun spinlock_t spinlock;
122*4882a593Smuzhiyun spinlock_t ast_lock;
123*4882a593Smuzhiyun spinlock_t track_lock;
124*4882a593Smuzhiyun char *name;
125*4882a593Smuzhiyun u8 node_num;
126*4882a593Smuzhiyun u32 key;
127*4882a593Smuzhiyun u8 joining_node;
128*4882a593Smuzhiyun u8 migrate_done; /* set to 1 means node has migrated all lock resources */
129*4882a593Smuzhiyun wait_queue_head_t dlm_join_events;
130*4882a593Smuzhiyun unsigned long live_nodes_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
131*4882a593Smuzhiyun unsigned long domain_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
132*4882a593Smuzhiyun unsigned long exit_domain_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
133*4882a593Smuzhiyun unsigned long recovery_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
134*4882a593Smuzhiyun struct dlm_recovery_ctxt reco;
135*4882a593Smuzhiyun spinlock_t master_lock;
136*4882a593Smuzhiyun struct hlist_head **master_hash;
137*4882a593Smuzhiyun struct list_head mle_hb_events;
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun /* these give a really vague idea of the system load */
140*4882a593Smuzhiyun atomic_t mle_tot_count[DLM_MLE_NUM_TYPES];
141*4882a593Smuzhiyun atomic_t mle_cur_count[DLM_MLE_NUM_TYPES];
142*4882a593Smuzhiyun atomic_t res_tot_count;
143*4882a593Smuzhiyun atomic_t res_cur_count;
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun struct dentry *dlm_debugfs_subroot;
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun /* NOTE: Next three are protected by dlm_domain_lock */
148*4882a593Smuzhiyun struct kref dlm_refs;
149*4882a593Smuzhiyun enum dlm_ctxt_state dlm_state;
150*4882a593Smuzhiyun unsigned int num_joins;
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun struct o2hb_callback_func dlm_hb_up;
153*4882a593Smuzhiyun struct o2hb_callback_func dlm_hb_down;
154*4882a593Smuzhiyun struct task_struct *dlm_thread_task;
155*4882a593Smuzhiyun struct task_struct *dlm_reco_thread_task;
156*4882a593Smuzhiyun struct workqueue_struct *dlm_worker;
157*4882a593Smuzhiyun wait_queue_head_t dlm_thread_wq;
158*4882a593Smuzhiyun wait_queue_head_t dlm_reco_thread_wq;
159*4882a593Smuzhiyun wait_queue_head_t ast_wq;
160*4882a593Smuzhiyun wait_queue_head_t migration_wq;
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun struct work_struct dispatched_work;
163*4882a593Smuzhiyun struct list_head work_list;
164*4882a593Smuzhiyun spinlock_t work_lock;
165*4882a593Smuzhiyun struct list_head dlm_domain_handlers;
166*4882a593Smuzhiyun struct list_head dlm_eviction_callbacks;
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun /* The filesystem specifies this at domain registration. We
169*4882a593Smuzhiyun * cache it here to know what to tell other nodes. */
170*4882a593Smuzhiyun struct dlm_protocol_version fs_locking_proto;
171*4882a593Smuzhiyun /* This is the inter-dlm communication version */
172*4882a593Smuzhiyun struct dlm_protocol_version dlm_locking_proto;
173*4882a593Smuzhiyun };
174*4882a593Smuzhiyun
dlm_lockres_hash(struct dlm_ctxt * dlm,unsigned i)175*4882a593Smuzhiyun static inline struct hlist_head *dlm_lockres_hash(struct dlm_ctxt *dlm, unsigned i)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun return dlm->lockres_hash[(i / DLM_BUCKETS_PER_PAGE) % DLM_HASH_PAGES] + (i % DLM_BUCKETS_PER_PAGE);
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun
dlm_master_hash(struct dlm_ctxt * dlm,unsigned i)180*4882a593Smuzhiyun static inline struct hlist_head *dlm_master_hash(struct dlm_ctxt *dlm,
181*4882a593Smuzhiyun unsigned i)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun return dlm->master_hash[(i / DLM_BUCKETS_PER_PAGE) % DLM_HASH_PAGES] +
184*4882a593Smuzhiyun (i % DLM_BUCKETS_PER_PAGE);
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun /* these keventd work queue items are for less-frequently
188*4882a593Smuzhiyun * called functions that cannot be directly called from the
189*4882a593Smuzhiyun * net message handlers for some reason, usually because
190*4882a593Smuzhiyun * they need to send net messages of their own. */
191*4882a593Smuzhiyun void dlm_dispatch_work(struct work_struct *work);
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun struct dlm_lock_resource;
194*4882a593Smuzhiyun struct dlm_work_item;
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun typedef void (dlm_workfunc_t)(struct dlm_work_item *, void *);
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun struct dlm_request_all_locks_priv
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun u8 reco_master;
201*4882a593Smuzhiyun u8 dead_node;
202*4882a593Smuzhiyun };
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun struct dlm_mig_lockres_priv
205*4882a593Smuzhiyun {
206*4882a593Smuzhiyun struct dlm_lock_resource *lockres;
207*4882a593Smuzhiyun u8 real_master;
208*4882a593Smuzhiyun u8 extra_ref;
209*4882a593Smuzhiyun };
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun struct dlm_assert_master_priv
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun struct dlm_lock_resource *lockres;
214*4882a593Smuzhiyun u8 request_from;
215*4882a593Smuzhiyun u32 flags;
216*4882a593Smuzhiyun unsigned ignore_higher:1;
217*4882a593Smuzhiyun };
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun struct dlm_deref_lockres_priv
220*4882a593Smuzhiyun {
221*4882a593Smuzhiyun struct dlm_lock_resource *deref_res;
222*4882a593Smuzhiyun u8 deref_node;
223*4882a593Smuzhiyun };
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun struct dlm_work_item
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun struct list_head list;
228*4882a593Smuzhiyun dlm_workfunc_t *func;
229*4882a593Smuzhiyun struct dlm_ctxt *dlm;
230*4882a593Smuzhiyun void *data;
231*4882a593Smuzhiyun union {
232*4882a593Smuzhiyun struct dlm_request_all_locks_priv ral;
233*4882a593Smuzhiyun struct dlm_mig_lockres_priv ml;
234*4882a593Smuzhiyun struct dlm_assert_master_priv am;
235*4882a593Smuzhiyun struct dlm_deref_lockres_priv dl;
236*4882a593Smuzhiyun } u;
237*4882a593Smuzhiyun };
238*4882a593Smuzhiyun
dlm_init_work_item(struct dlm_ctxt * dlm,struct dlm_work_item * i,dlm_workfunc_t * f,void * data)239*4882a593Smuzhiyun static inline void dlm_init_work_item(struct dlm_ctxt *dlm,
240*4882a593Smuzhiyun struct dlm_work_item *i,
241*4882a593Smuzhiyun dlm_workfunc_t *f, void *data)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun memset(i, 0, sizeof(*i));
244*4882a593Smuzhiyun i->func = f;
245*4882a593Smuzhiyun INIT_LIST_HEAD(&i->list);
246*4882a593Smuzhiyun i->data = data;
247*4882a593Smuzhiyun i->dlm = dlm; /* must have already done a dlm_grab on this! */
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun
__dlm_set_joining_node(struct dlm_ctxt * dlm,u8 node)252*4882a593Smuzhiyun static inline void __dlm_set_joining_node(struct dlm_ctxt *dlm,
253*4882a593Smuzhiyun u8 node)
254*4882a593Smuzhiyun {
255*4882a593Smuzhiyun assert_spin_locked(&dlm->spinlock);
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun dlm->joining_node = node;
258*4882a593Smuzhiyun wake_up(&dlm->dlm_join_events);
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun #define DLM_LOCK_RES_UNINITED 0x00000001
262*4882a593Smuzhiyun #define DLM_LOCK_RES_RECOVERING 0x00000002
263*4882a593Smuzhiyun #define DLM_LOCK_RES_READY 0x00000004
264*4882a593Smuzhiyun #define DLM_LOCK_RES_DIRTY 0x00000008
265*4882a593Smuzhiyun #define DLM_LOCK_RES_IN_PROGRESS 0x00000010
266*4882a593Smuzhiyun #define DLM_LOCK_RES_MIGRATING 0x00000020
267*4882a593Smuzhiyun #define DLM_LOCK_RES_DROPPING_REF 0x00000040
268*4882a593Smuzhiyun #define DLM_LOCK_RES_BLOCK_DIRTY 0x00001000
269*4882a593Smuzhiyun #define DLM_LOCK_RES_SETREF_INPROG 0x00002000
270*4882a593Smuzhiyun #define DLM_LOCK_RES_RECOVERY_WAITING 0x00004000
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun /* max milliseconds to wait to sync up a network failure with a node death */
273*4882a593Smuzhiyun #define DLM_NODE_DEATH_WAIT_MAX (5 * 1000)
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun #define DLM_PURGE_INTERVAL_MS (8 * 1000)
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun struct dlm_lock_resource
278*4882a593Smuzhiyun {
279*4882a593Smuzhiyun /* WARNING: Please see the comment in dlm_init_lockres before
280*4882a593Smuzhiyun * adding fields here. */
281*4882a593Smuzhiyun struct hlist_node hash_node;
282*4882a593Smuzhiyun struct qstr lockname;
283*4882a593Smuzhiyun struct kref refs;
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun /*
286*4882a593Smuzhiyun * Please keep granted, converting, and blocked in this order,
287*4882a593Smuzhiyun * as some funcs want to iterate over all lists.
288*4882a593Smuzhiyun *
289*4882a593Smuzhiyun * All four lists are protected by the hash's reference.
290*4882a593Smuzhiyun */
291*4882a593Smuzhiyun struct list_head granted;
292*4882a593Smuzhiyun struct list_head converting;
293*4882a593Smuzhiyun struct list_head blocked;
294*4882a593Smuzhiyun struct list_head purge;
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun /*
297*4882a593Smuzhiyun * These two lists require you to hold an additional reference
298*4882a593Smuzhiyun * while they are on the list.
299*4882a593Smuzhiyun */
300*4882a593Smuzhiyun struct list_head dirty;
301*4882a593Smuzhiyun struct list_head recovering; // dlm_recovery_ctxt.resources list
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun /* Added during init and removed during release */
304*4882a593Smuzhiyun struct list_head tracking; /* dlm->tracking_list */
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun /* unused lock resources have their last_used stamped and are
307*4882a593Smuzhiyun * put on a list for the dlm thread to run. */
308*4882a593Smuzhiyun unsigned long last_used;
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun struct dlm_ctxt *dlm;
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun unsigned migration_pending:1;
313*4882a593Smuzhiyun atomic_t asts_reserved;
314*4882a593Smuzhiyun spinlock_t spinlock;
315*4882a593Smuzhiyun wait_queue_head_t wq;
316*4882a593Smuzhiyun u8 owner; //node which owns the lock resource, or unknown
317*4882a593Smuzhiyun u16 state;
318*4882a593Smuzhiyun char lvb[DLM_LVB_LEN];
319*4882a593Smuzhiyun unsigned int inflight_locks;
320*4882a593Smuzhiyun unsigned int inflight_assert_workers;
321*4882a593Smuzhiyun unsigned long refmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
322*4882a593Smuzhiyun };
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun struct dlm_migratable_lock
325*4882a593Smuzhiyun {
326*4882a593Smuzhiyun __be64 cookie;
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun /* these 3 are just padding for the in-memory structure, but
329*4882a593Smuzhiyun * list and flags are actually used when sent over the wire */
330*4882a593Smuzhiyun __be16 pad1;
331*4882a593Smuzhiyun u8 list; // 0=granted, 1=converting, 2=blocked
332*4882a593Smuzhiyun u8 flags;
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun s8 type;
335*4882a593Smuzhiyun s8 convert_type;
336*4882a593Smuzhiyun s8 highest_blocked;
337*4882a593Smuzhiyun u8 node;
338*4882a593Smuzhiyun }; // 16 bytes
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun struct dlm_lock
341*4882a593Smuzhiyun {
342*4882a593Smuzhiyun struct dlm_migratable_lock ml;
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun struct list_head list;
345*4882a593Smuzhiyun struct list_head ast_list;
346*4882a593Smuzhiyun struct list_head bast_list;
347*4882a593Smuzhiyun struct dlm_lock_resource *lockres;
348*4882a593Smuzhiyun spinlock_t spinlock;
349*4882a593Smuzhiyun struct kref lock_refs;
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun // ast and bast must be callable while holding a spinlock!
352*4882a593Smuzhiyun dlm_astlockfunc_t *ast;
353*4882a593Smuzhiyun dlm_bastlockfunc_t *bast;
354*4882a593Smuzhiyun void *astdata;
355*4882a593Smuzhiyun struct dlm_lockstatus *lksb;
356*4882a593Smuzhiyun unsigned ast_pending:1,
357*4882a593Smuzhiyun bast_pending:1,
358*4882a593Smuzhiyun convert_pending:1,
359*4882a593Smuzhiyun lock_pending:1,
360*4882a593Smuzhiyun cancel_pending:1,
361*4882a593Smuzhiyun unlock_pending:1,
362*4882a593Smuzhiyun lksb_kernel_allocated:1;
363*4882a593Smuzhiyun };
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun enum dlm_lockres_list {
366*4882a593Smuzhiyun DLM_GRANTED_LIST = 0,
367*4882a593Smuzhiyun DLM_CONVERTING_LIST = 1,
368*4882a593Smuzhiyun DLM_BLOCKED_LIST = 2,
369*4882a593Smuzhiyun };
370*4882a593Smuzhiyun
dlm_lvb_is_empty(char * lvb)371*4882a593Smuzhiyun static inline int dlm_lvb_is_empty(char *lvb)
372*4882a593Smuzhiyun {
373*4882a593Smuzhiyun int i;
374*4882a593Smuzhiyun for (i=0; i<DLM_LVB_LEN; i++)
375*4882a593Smuzhiyun if (lvb[i])
376*4882a593Smuzhiyun return 0;
377*4882a593Smuzhiyun return 1;
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun
dlm_list_in_text(enum dlm_lockres_list idx)380*4882a593Smuzhiyun static inline char *dlm_list_in_text(enum dlm_lockres_list idx)
381*4882a593Smuzhiyun {
382*4882a593Smuzhiyun if (idx == DLM_GRANTED_LIST)
383*4882a593Smuzhiyun return "granted";
384*4882a593Smuzhiyun else if (idx == DLM_CONVERTING_LIST)
385*4882a593Smuzhiyun return "converting";
386*4882a593Smuzhiyun else if (idx == DLM_BLOCKED_LIST)
387*4882a593Smuzhiyun return "blocked";
388*4882a593Smuzhiyun else
389*4882a593Smuzhiyun return "unknown";
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun static inline struct list_head *
dlm_list_idx_to_ptr(struct dlm_lock_resource * res,enum dlm_lockres_list idx)393*4882a593Smuzhiyun dlm_list_idx_to_ptr(struct dlm_lock_resource *res, enum dlm_lockres_list idx)
394*4882a593Smuzhiyun {
395*4882a593Smuzhiyun struct list_head *ret = NULL;
396*4882a593Smuzhiyun if (idx == DLM_GRANTED_LIST)
397*4882a593Smuzhiyun ret = &res->granted;
398*4882a593Smuzhiyun else if (idx == DLM_CONVERTING_LIST)
399*4882a593Smuzhiyun ret = &res->converting;
400*4882a593Smuzhiyun else if (idx == DLM_BLOCKED_LIST)
401*4882a593Smuzhiyun ret = &res->blocked;
402*4882a593Smuzhiyun else
403*4882a593Smuzhiyun BUG();
404*4882a593Smuzhiyun return ret;
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun struct dlm_node_iter
411*4882a593Smuzhiyun {
412*4882a593Smuzhiyun unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
413*4882a593Smuzhiyun int curnode;
414*4882a593Smuzhiyun };
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun enum {
418*4882a593Smuzhiyun DLM_MASTER_REQUEST_MSG = 500,
419*4882a593Smuzhiyun DLM_UNUSED_MSG1 = 501,
420*4882a593Smuzhiyun DLM_ASSERT_MASTER_MSG = 502,
421*4882a593Smuzhiyun DLM_CREATE_LOCK_MSG = 503,
422*4882a593Smuzhiyun DLM_CONVERT_LOCK_MSG = 504,
423*4882a593Smuzhiyun DLM_PROXY_AST_MSG = 505,
424*4882a593Smuzhiyun DLM_UNLOCK_LOCK_MSG = 506,
425*4882a593Smuzhiyun DLM_DEREF_LOCKRES_MSG = 507,
426*4882a593Smuzhiyun DLM_MIGRATE_REQUEST_MSG = 508,
427*4882a593Smuzhiyun DLM_MIG_LOCKRES_MSG = 509,
428*4882a593Smuzhiyun DLM_QUERY_JOIN_MSG = 510,
429*4882a593Smuzhiyun DLM_ASSERT_JOINED_MSG = 511,
430*4882a593Smuzhiyun DLM_CANCEL_JOIN_MSG = 512,
431*4882a593Smuzhiyun DLM_EXIT_DOMAIN_MSG = 513,
432*4882a593Smuzhiyun DLM_MASTER_REQUERY_MSG = 514,
433*4882a593Smuzhiyun DLM_LOCK_REQUEST_MSG = 515,
434*4882a593Smuzhiyun DLM_RECO_DATA_DONE_MSG = 516,
435*4882a593Smuzhiyun DLM_BEGIN_RECO_MSG = 517,
436*4882a593Smuzhiyun DLM_FINALIZE_RECO_MSG = 518,
437*4882a593Smuzhiyun DLM_QUERY_REGION = 519,
438*4882a593Smuzhiyun DLM_QUERY_NODEINFO = 520,
439*4882a593Smuzhiyun DLM_BEGIN_EXIT_DOMAIN_MSG = 521,
440*4882a593Smuzhiyun DLM_DEREF_LOCKRES_DONE = 522,
441*4882a593Smuzhiyun };
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun struct dlm_reco_node_data
444*4882a593Smuzhiyun {
445*4882a593Smuzhiyun int state;
446*4882a593Smuzhiyun u8 node_num;
447*4882a593Smuzhiyun struct list_head list;
448*4882a593Smuzhiyun };
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun enum {
451*4882a593Smuzhiyun DLM_RECO_NODE_DATA_DEAD = -1,
452*4882a593Smuzhiyun DLM_RECO_NODE_DATA_INIT = 0,
453*4882a593Smuzhiyun DLM_RECO_NODE_DATA_REQUESTING = 1,
454*4882a593Smuzhiyun DLM_RECO_NODE_DATA_REQUESTED = 2,
455*4882a593Smuzhiyun DLM_RECO_NODE_DATA_RECEIVING = 3,
456*4882a593Smuzhiyun DLM_RECO_NODE_DATA_DONE = 4,
457*4882a593Smuzhiyun DLM_RECO_NODE_DATA_FINALIZE_SENT = 5,
458*4882a593Smuzhiyun };
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun enum {
462*4882a593Smuzhiyun DLM_MASTER_RESP_NO = 0,
463*4882a593Smuzhiyun DLM_MASTER_RESP_YES = 1,
464*4882a593Smuzhiyun DLM_MASTER_RESP_MAYBE = 2,
465*4882a593Smuzhiyun DLM_MASTER_RESP_ERROR = 3,
466*4882a593Smuzhiyun };
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun struct dlm_master_request
470*4882a593Smuzhiyun {
471*4882a593Smuzhiyun u8 node_idx;
472*4882a593Smuzhiyun u8 namelen;
473*4882a593Smuzhiyun __be16 pad1;
474*4882a593Smuzhiyun __be32 flags;
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun u8 name[O2NM_MAX_NAME_LEN];
477*4882a593Smuzhiyun };
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun #define DLM_ASSERT_RESPONSE_REASSERT 0x00000001
480*4882a593Smuzhiyun #define DLM_ASSERT_RESPONSE_MASTERY_REF 0x00000002
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun #define DLM_ASSERT_MASTER_MLE_CLEANUP 0x00000001
483*4882a593Smuzhiyun #define DLM_ASSERT_MASTER_REQUERY 0x00000002
484*4882a593Smuzhiyun #define DLM_ASSERT_MASTER_FINISH_MIGRATION 0x00000004
485*4882a593Smuzhiyun struct dlm_assert_master
486*4882a593Smuzhiyun {
487*4882a593Smuzhiyun u8 node_idx;
488*4882a593Smuzhiyun u8 namelen;
489*4882a593Smuzhiyun __be16 pad1;
490*4882a593Smuzhiyun __be32 flags;
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun u8 name[O2NM_MAX_NAME_LEN];
493*4882a593Smuzhiyun };
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun #define DLM_MIGRATE_RESPONSE_MASTERY_REF 0x00000001
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun struct dlm_migrate_request
498*4882a593Smuzhiyun {
499*4882a593Smuzhiyun u8 master;
500*4882a593Smuzhiyun u8 new_master;
501*4882a593Smuzhiyun u8 namelen;
502*4882a593Smuzhiyun u8 pad1;
503*4882a593Smuzhiyun __be32 pad2;
504*4882a593Smuzhiyun u8 name[O2NM_MAX_NAME_LEN];
505*4882a593Smuzhiyun };
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun struct dlm_master_requery
508*4882a593Smuzhiyun {
509*4882a593Smuzhiyun u8 pad1;
510*4882a593Smuzhiyun u8 pad2;
511*4882a593Smuzhiyun u8 node_idx;
512*4882a593Smuzhiyun u8 namelen;
513*4882a593Smuzhiyun __be32 pad3;
514*4882a593Smuzhiyun u8 name[O2NM_MAX_NAME_LEN];
515*4882a593Smuzhiyun };
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun #define DLM_MRES_RECOVERY 0x01
518*4882a593Smuzhiyun #define DLM_MRES_MIGRATION 0x02
519*4882a593Smuzhiyun #define DLM_MRES_ALL_DONE 0x04
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun /*
522*4882a593Smuzhiyun * We would like to get one whole lockres into a single network
523*4882a593Smuzhiyun * message whenever possible. Generally speaking, there will be
524*4882a593Smuzhiyun * at most one dlm_lock on a lockres for each node in the cluster,
525*4882a593Smuzhiyun * plus (infrequently) any additional locks coming in from userdlm.
526*4882a593Smuzhiyun *
527*4882a593Smuzhiyun * struct _dlm_lockres_page
528*4882a593Smuzhiyun * {
529*4882a593Smuzhiyun * dlm_migratable_lockres mres;
530*4882a593Smuzhiyun * dlm_migratable_lock ml[DLM_MAX_MIGRATABLE_LOCKS];
531*4882a593Smuzhiyun * u8 pad[DLM_MIG_LOCKRES_RESERVED];
532*4882a593Smuzhiyun * };
533*4882a593Smuzhiyun *
534*4882a593Smuzhiyun * from ../cluster/tcp.h
535*4882a593Smuzhiyun * O2NET_MAX_PAYLOAD_BYTES (4096 - sizeof(net_msg))
536*4882a593Smuzhiyun * (roughly 4080 bytes)
537*4882a593Smuzhiyun * and sizeof(dlm_migratable_lockres) = 112 bytes
538*4882a593Smuzhiyun * and sizeof(dlm_migratable_lock) = 16 bytes
539*4882a593Smuzhiyun *
540*4882a593Smuzhiyun * Choosing DLM_MAX_MIGRATABLE_LOCKS=240 and
541*4882a593Smuzhiyun * DLM_MIG_LOCKRES_RESERVED=128 means we have this:
542*4882a593Smuzhiyun *
543*4882a593Smuzhiyun * (DLM_MAX_MIGRATABLE_LOCKS * sizeof(dlm_migratable_lock)) +
544*4882a593Smuzhiyun * sizeof(dlm_migratable_lockres) + DLM_MIG_LOCKRES_RESERVED =
545*4882a593Smuzhiyun * NET_MAX_PAYLOAD_BYTES
546*4882a593Smuzhiyun * (240 * 16) + 112 + 128 = 4080
547*4882a593Smuzhiyun *
548*4882a593Smuzhiyun * So a lockres would need more than 240 locks before it would
549*4882a593Smuzhiyun * use more than one network packet to recover. Not too bad.
550*4882a593Smuzhiyun */
551*4882a593Smuzhiyun #define DLM_MAX_MIGRATABLE_LOCKS 240
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun struct dlm_migratable_lockres
554*4882a593Smuzhiyun {
555*4882a593Smuzhiyun u8 master;
556*4882a593Smuzhiyun u8 lockname_len;
557*4882a593Smuzhiyun u8 num_locks; // locks sent in this structure
558*4882a593Smuzhiyun u8 flags;
559*4882a593Smuzhiyun __be32 total_locks; // locks to be sent for this migration cookie
560*4882a593Smuzhiyun __be64 mig_cookie; // cookie for this lockres migration
561*4882a593Smuzhiyun // or zero if not needed
562*4882a593Smuzhiyun // 16 bytes
563*4882a593Smuzhiyun u8 lockname[DLM_LOCKID_NAME_MAX];
564*4882a593Smuzhiyun // 48 bytes
565*4882a593Smuzhiyun u8 lvb[DLM_LVB_LEN];
566*4882a593Smuzhiyun // 112 bytes
567*4882a593Smuzhiyun struct dlm_migratable_lock ml[]; // 16 bytes each, begins at byte 112
568*4882a593Smuzhiyun };
569*4882a593Smuzhiyun #define DLM_MIG_LOCKRES_MAX_LEN \
570*4882a593Smuzhiyun (sizeof(struct dlm_migratable_lockres) + \
571*4882a593Smuzhiyun (sizeof(struct dlm_migratable_lock) * \
572*4882a593Smuzhiyun DLM_MAX_MIGRATABLE_LOCKS) )
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun /* from above, 128 bytes
575*4882a593Smuzhiyun * for some undetermined future use */
576*4882a593Smuzhiyun #define DLM_MIG_LOCKRES_RESERVED (O2NET_MAX_PAYLOAD_BYTES - \
577*4882a593Smuzhiyun DLM_MIG_LOCKRES_MAX_LEN)
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun struct dlm_create_lock
580*4882a593Smuzhiyun {
581*4882a593Smuzhiyun __be64 cookie;
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun __be32 flags;
584*4882a593Smuzhiyun u8 pad1;
585*4882a593Smuzhiyun u8 node_idx;
586*4882a593Smuzhiyun s8 requested_type;
587*4882a593Smuzhiyun u8 namelen;
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun u8 name[O2NM_MAX_NAME_LEN];
590*4882a593Smuzhiyun };
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun struct dlm_convert_lock
593*4882a593Smuzhiyun {
594*4882a593Smuzhiyun __be64 cookie;
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun __be32 flags;
597*4882a593Smuzhiyun u8 pad1;
598*4882a593Smuzhiyun u8 node_idx;
599*4882a593Smuzhiyun s8 requested_type;
600*4882a593Smuzhiyun u8 namelen;
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun u8 name[O2NM_MAX_NAME_LEN];
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun s8 lvb[];
605*4882a593Smuzhiyun };
606*4882a593Smuzhiyun #define DLM_CONVERT_LOCK_MAX_LEN (sizeof(struct dlm_convert_lock)+DLM_LVB_LEN)
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun struct dlm_unlock_lock
609*4882a593Smuzhiyun {
610*4882a593Smuzhiyun __be64 cookie;
611*4882a593Smuzhiyun
612*4882a593Smuzhiyun __be32 flags;
613*4882a593Smuzhiyun __be16 pad1;
614*4882a593Smuzhiyun u8 node_idx;
615*4882a593Smuzhiyun u8 namelen;
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun u8 name[O2NM_MAX_NAME_LEN];
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun s8 lvb[];
620*4882a593Smuzhiyun };
621*4882a593Smuzhiyun #define DLM_UNLOCK_LOCK_MAX_LEN (sizeof(struct dlm_unlock_lock)+DLM_LVB_LEN)
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun struct dlm_proxy_ast
624*4882a593Smuzhiyun {
625*4882a593Smuzhiyun __be64 cookie;
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun __be32 flags;
628*4882a593Smuzhiyun u8 node_idx;
629*4882a593Smuzhiyun u8 type;
630*4882a593Smuzhiyun u8 blocked_type;
631*4882a593Smuzhiyun u8 namelen;
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun u8 name[O2NM_MAX_NAME_LEN];
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun s8 lvb[];
636*4882a593Smuzhiyun };
637*4882a593Smuzhiyun #define DLM_PROXY_AST_MAX_LEN (sizeof(struct dlm_proxy_ast)+DLM_LVB_LEN)
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun #define DLM_MOD_KEY (0x666c6172)
640*4882a593Smuzhiyun enum dlm_query_join_response_code {
641*4882a593Smuzhiyun JOIN_DISALLOW = 0,
642*4882a593Smuzhiyun JOIN_OK = 1,
643*4882a593Smuzhiyun JOIN_OK_NO_MAP = 2,
644*4882a593Smuzhiyun JOIN_PROTOCOL_MISMATCH = 3,
645*4882a593Smuzhiyun };
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun struct dlm_query_join_packet {
648*4882a593Smuzhiyun u8 code; /* Response code. dlm_minor and fs_minor
649*4882a593Smuzhiyun are only valid if this is JOIN_OK */
650*4882a593Smuzhiyun u8 dlm_minor; /* The minor version of the protocol the
651*4882a593Smuzhiyun dlm is speaking. */
652*4882a593Smuzhiyun u8 fs_minor; /* The minor version of the protocol the
653*4882a593Smuzhiyun filesystem is speaking. */
654*4882a593Smuzhiyun u8 reserved;
655*4882a593Smuzhiyun };
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun union dlm_query_join_response {
658*4882a593Smuzhiyun __be32 intval;
659*4882a593Smuzhiyun struct dlm_query_join_packet packet;
660*4882a593Smuzhiyun };
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun struct dlm_lock_request
663*4882a593Smuzhiyun {
664*4882a593Smuzhiyun u8 node_idx;
665*4882a593Smuzhiyun u8 dead_node;
666*4882a593Smuzhiyun __be16 pad1;
667*4882a593Smuzhiyun __be32 pad2;
668*4882a593Smuzhiyun };
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun struct dlm_reco_data_done
671*4882a593Smuzhiyun {
672*4882a593Smuzhiyun u8 node_idx;
673*4882a593Smuzhiyun u8 dead_node;
674*4882a593Smuzhiyun __be16 pad1;
675*4882a593Smuzhiyun __be32 pad2;
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun /* unused for now */
678*4882a593Smuzhiyun /* eventually we can use this to attempt
679*4882a593Smuzhiyun * lvb recovery based on each node's info */
680*4882a593Smuzhiyun u8 reco_lvb[DLM_LVB_LEN];
681*4882a593Smuzhiyun };
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun struct dlm_begin_reco
684*4882a593Smuzhiyun {
685*4882a593Smuzhiyun u8 node_idx;
686*4882a593Smuzhiyun u8 dead_node;
687*4882a593Smuzhiyun __be16 pad1;
688*4882a593Smuzhiyun __be32 pad2;
689*4882a593Smuzhiyun };
690*4882a593Smuzhiyun
691*4882a593Smuzhiyun struct dlm_query_join_request
692*4882a593Smuzhiyun {
693*4882a593Smuzhiyun u8 node_idx;
694*4882a593Smuzhiyun u8 pad1[2];
695*4882a593Smuzhiyun u8 name_len;
696*4882a593Smuzhiyun struct dlm_protocol_version dlm_proto;
697*4882a593Smuzhiyun struct dlm_protocol_version fs_proto;
698*4882a593Smuzhiyun u8 domain[O2NM_MAX_NAME_LEN];
699*4882a593Smuzhiyun u8 node_map[BITS_TO_BYTES(O2NM_MAX_NODES)];
700*4882a593Smuzhiyun };
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun struct dlm_assert_joined
703*4882a593Smuzhiyun {
704*4882a593Smuzhiyun u8 node_idx;
705*4882a593Smuzhiyun u8 pad1[2];
706*4882a593Smuzhiyun u8 name_len;
707*4882a593Smuzhiyun u8 domain[O2NM_MAX_NAME_LEN];
708*4882a593Smuzhiyun };
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun struct dlm_cancel_join
711*4882a593Smuzhiyun {
712*4882a593Smuzhiyun u8 node_idx;
713*4882a593Smuzhiyun u8 pad1[2];
714*4882a593Smuzhiyun u8 name_len;
715*4882a593Smuzhiyun u8 domain[O2NM_MAX_NAME_LEN];
716*4882a593Smuzhiyun };
717*4882a593Smuzhiyun
718*4882a593Smuzhiyun struct dlm_query_region {
719*4882a593Smuzhiyun u8 qr_node;
720*4882a593Smuzhiyun u8 qr_numregions;
721*4882a593Smuzhiyun u8 qr_namelen;
722*4882a593Smuzhiyun u8 pad1;
723*4882a593Smuzhiyun u8 qr_domain[O2NM_MAX_NAME_LEN];
724*4882a593Smuzhiyun u8 qr_regions[O2HB_MAX_REGION_NAME_LEN * O2NM_MAX_REGIONS];
725*4882a593Smuzhiyun };
726*4882a593Smuzhiyun
727*4882a593Smuzhiyun struct dlm_node_info {
728*4882a593Smuzhiyun u8 ni_nodenum;
729*4882a593Smuzhiyun u8 pad1;
730*4882a593Smuzhiyun __be16 ni_ipv4_port;
731*4882a593Smuzhiyun __be32 ni_ipv4_address;
732*4882a593Smuzhiyun };
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun struct dlm_query_nodeinfo {
735*4882a593Smuzhiyun u8 qn_nodenum;
736*4882a593Smuzhiyun u8 qn_numnodes;
737*4882a593Smuzhiyun u8 qn_namelen;
738*4882a593Smuzhiyun u8 pad1;
739*4882a593Smuzhiyun u8 qn_domain[O2NM_MAX_NAME_LEN];
740*4882a593Smuzhiyun struct dlm_node_info qn_nodes[O2NM_MAX_NODES];
741*4882a593Smuzhiyun };
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun struct dlm_exit_domain
744*4882a593Smuzhiyun {
745*4882a593Smuzhiyun u8 node_idx;
746*4882a593Smuzhiyun u8 pad1[3];
747*4882a593Smuzhiyun };
748*4882a593Smuzhiyun
749*4882a593Smuzhiyun struct dlm_finalize_reco
750*4882a593Smuzhiyun {
751*4882a593Smuzhiyun u8 node_idx;
752*4882a593Smuzhiyun u8 dead_node;
753*4882a593Smuzhiyun u8 flags;
754*4882a593Smuzhiyun u8 pad1;
755*4882a593Smuzhiyun __be32 pad2;
756*4882a593Smuzhiyun };
757*4882a593Smuzhiyun
758*4882a593Smuzhiyun struct dlm_deref_lockres
759*4882a593Smuzhiyun {
760*4882a593Smuzhiyun u32 pad1;
761*4882a593Smuzhiyun u16 pad2;
762*4882a593Smuzhiyun u8 node_idx;
763*4882a593Smuzhiyun u8 namelen;
764*4882a593Smuzhiyun
765*4882a593Smuzhiyun u8 name[O2NM_MAX_NAME_LEN];
766*4882a593Smuzhiyun };
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun enum {
769*4882a593Smuzhiyun DLM_DEREF_RESPONSE_DONE = 0,
770*4882a593Smuzhiyun DLM_DEREF_RESPONSE_INPROG = 1,
771*4882a593Smuzhiyun };
772*4882a593Smuzhiyun
773*4882a593Smuzhiyun struct dlm_deref_lockres_done {
774*4882a593Smuzhiyun u32 pad1;
775*4882a593Smuzhiyun u16 pad2;
776*4882a593Smuzhiyun u8 node_idx;
777*4882a593Smuzhiyun u8 namelen;
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun u8 name[O2NM_MAX_NAME_LEN];
780*4882a593Smuzhiyun };
781*4882a593Smuzhiyun
782*4882a593Smuzhiyun static inline enum dlm_status
__dlm_lockres_state_to_status(struct dlm_lock_resource * res)783*4882a593Smuzhiyun __dlm_lockres_state_to_status(struct dlm_lock_resource *res)
784*4882a593Smuzhiyun {
785*4882a593Smuzhiyun enum dlm_status status = DLM_NORMAL;
786*4882a593Smuzhiyun
787*4882a593Smuzhiyun assert_spin_locked(&res->spinlock);
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun if (res->state & (DLM_LOCK_RES_RECOVERING|
790*4882a593Smuzhiyun DLM_LOCK_RES_RECOVERY_WAITING))
791*4882a593Smuzhiyun status = DLM_RECOVERING;
792*4882a593Smuzhiyun else if (res->state & DLM_LOCK_RES_MIGRATING)
793*4882a593Smuzhiyun status = DLM_MIGRATING;
794*4882a593Smuzhiyun else if (res->state & DLM_LOCK_RES_IN_PROGRESS)
795*4882a593Smuzhiyun status = DLM_FORWARD;
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun return status;
798*4882a593Smuzhiyun }
799*4882a593Smuzhiyun
dlm_get_lock_cookie_node(u64 cookie)800*4882a593Smuzhiyun static inline u8 dlm_get_lock_cookie_node(u64 cookie)
801*4882a593Smuzhiyun {
802*4882a593Smuzhiyun u8 ret;
803*4882a593Smuzhiyun cookie >>= 56;
804*4882a593Smuzhiyun ret = (u8)(cookie & 0xffULL);
805*4882a593Smuzhiyun return ret;
806*4882a593Smuzhiyun }
807*4882a593Smuzhiyun
dlm_get_lock_cookie_seq(u64 cookie)808*4882a593Smuzhiyun static inline unsigned long long dlm_get_lock_cookie_seq(u64 cookie)
809*4882a593Smuzhiyun {
810*4882a593Smuzhiyun unsigned long long ret;
811*4882a593Smuzhiyun ret = ((unsigned long long)cookie) & 0x00ffffffffffffffULL;
812*4882a593Smuzhiyun return ret;
813*4882a593Smuzhiyun }
814*4882a593Smuzhiyun
815*4882a593Smuzhiyun struct dlm_lock * dlm_new_lock(int type, u8 node, u64 cookie,
816*4882a593Smuzhiyun struct dlm_lockstatus *lksb);
817*4882a593Smuzhiyun void dlm_lock_get(struct dlm_lock *lock);
818*4882a593Smuzhiyun void dlm_lock_put(struct dlm_lock *lock);
819*4882a593Smuzhiyun
820*4882a593Smuzhiyun void dlm_lock_attach_lockres(struct dlm_lock *lock,
821*4882a593Smuzhiyun struct dlm_lock_resource *res);
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun int dlm_create_lock_handler(struct o2net_msg *msg, u32 len, void *data,
824*4882a593Smuzhiyun void **ret_data);
825*4882a593Smuzhiyun int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data,
826*4882a593Smuzhiyun void **ret_data);
827*4882a593Smuzhiyun int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data,
828*4882a593Smuzhiyun void **ret_data);
829*4882a593Smuzhiyun
830*4882a593Smuzhiyun void dlm_revert_pending_convert(struct dlm_lock_resource *res,
831*4882a593Smuzhiyun struct dlm_lock *lock);
832*4882a593Smuzhiyun void dlm_revert_pending_lock(struct dlm_lock_resource *res,
833*4882a593Smuzhiyun struct dlm_lock *lock);
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun int dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data,
836*4882a593Smuzhiyun void **ret_data);
837*4882a593Smuzhiyun void dlm_commit_pending_cancel(struct dlm_lock_resource *res,
838*4882a593Smuzhiyun struct dlm_lock *lock);
839*4882a593Smuzhiyun void dlm_commit_pending_unlock(struct dlm_lock_resource *res,
840*4882a593Smuzhiyun struct dlm_lock *lock);
841*4882a593Smuzhiyun
842*4882a593Smuzhiyun int dlm_launch_thread(struct dlm_ctxt *dlm);
843*4882a593Smuzhiyun void dlm_complete_thread(struct dlm_ctxt *dlm);
844*4882a593Smuzhiyun int dlm_launch_recovery_thread(struct dlm_ctxt *dlm);
845*4882a593Smuzhiyun void dlm_complete_recovery_thread(struct dlm_ctxt *dlm);
846*4882a593Smuzhiyun void dlm_wait_for_recovery(struct dlm_ctxt *dlm);
847*4882a593Smuzhiyun void dlm_kick_recovery_thread(struct dlm_ctxt *dlm);
848*4882a593Smuzhiyun int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node);
849*4882a593Smuzhiyun void dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout);
850*4882a593Smuzhiyun void dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout);
851*4882a593Smuzhiyun
852*4882a593Smuzhiyun void dlm_put(struct dlm_ctxt *dlm);
853*4882a593Smuzhiyun struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm);
854*4882a593Smuzhiyun int dlm_domain_fully_joined(struct dlm_ctxt *dlm);
855*4882a593Smuzhiyun
856*4882a593Smuzhiyun void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
857*4882a593Smuzhiyun struct dlm_lock_resource *res);
858*4882a593Smuzhiyun void dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
859*4882a593Smuzhiyun struct dlm_lock_resource *res);
dlm_lockres_get(struct dlm_lock_resource * res)860*4882a593Smuzhiyun static inline void dlm_lockres_get(struct dlm_lock_resource *res)
861*4882a593Smuzhiyun {
862*4882a593Smuzhiyun /* This is called on every lookup, so it might be worth
863*4882a593Smuzhiyun * inlining. */
864*4882a593Smuzhiyun kref_get(&res->refs);
865*4882a593Smuzhiyun }
866*4882a593Smuzhiyun void dlm_lockres_put(struct dlm_lock_resource *res);
867*4882a593Smuzhiyun void __dlm_unhash_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
868*4882a593Smuzhiyun void __dlm_insert_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
869*4882a593Smuzhiyun struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm,
870*4882a593Smuzhiyun const char *name,
871*4882a593Smuzhiyun unsigned int len,
872*4882a593Smuzhiyun unsigned int hash);
873*4882a593Smuzhiyun struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm,
874*4882a593Smuzhiyun const char *name,
875*4882a593Smuzhiyun unsigned int len,
876*4882a593Smuzhiyun unsigned int hash);
877*4882a593Smuzhiyun struct dlm_lock_resource * dlm_lookup_lockres(struct dlm_ctxt *dlm,
878*4882a593Smuzhiyun const char *name,
879*4882a593Smuzhiyun unsigned int len);
880*4882a593Smuzhiyun
881*4882a593Smuzhiyun int dlm_is_host_down(int errno);
882*4882a593Smuzhiyun
883*4882a593Smuzhiyun struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm,
884*4882a593Smuzhiyun const char *lockid,
885*4882a593Smuzhiyun int namelen,
886*4882a593Smuzhiyun int flags);
887*4882a593Smuzhiyun struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm,
888*4882a593Smuzhiyun const char *name,
889*4882a593Smuzhiyun unsigned int namelen);
890*4882a593Smuzhiyun
891*4882a593Smuzhiyun void dlm_lockres_set_refmap_bit(struct dlm_ctxt *dlm,
892*4882a593Smuzhiyun struct dlm_lock_resource *res, int bit);
893*4882a593Smuzhiyun void dlm_lockres_clear_refmap_bit(struct dlm_ctxt *dlm,
894*4882a593Smuzhiyun struct dlm_lock_resource *res, int bit);
895*4882a593Smuzhiyun
896*4882a593Smuzhiyun void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
897*4882a593Smuzhiyun struct dlm_lock_resource *res);
898*4882a593Smuzhiyun void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
899*4882a593Smuzhiyun struct dlm_lock_resource *res);
900*4882a593Smuzhiyun
901*4882a593Smuzhiyun void __dlm_lockres_grab_inflight_worker(struct dlm_ctxt *dlm,
902*4882a593Smuzhiyun struct dlm_lock_resource *res);
903*4882a593Smuzhiyun
904*4882a593Smuzhiyun void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
905*4882a593Smuzhiyun void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
906*4882a593Smuzhiyun void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
907*4882a593Smuzhiyun void __dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
908*4882a593Smuzhiyun void dlm_do_local_ast(struct dlm_ctxt *dlm,
909*4882a593Smuzhiyun struct dlm_lock_resource *res,
910*4882a593Smuzhiyun struct dlm_lock *lock);
911*4882a593Smuzhiyun int dlm_do_remote_ast(struct dlm_ctxt *dlm,
912*4882a593Smuzhiyun struct dlm_lock_resource *res,
913*4882a593Smuzhiyun struct dlm_lock *lock);
914*4882a593Smuzhiyun void dlm_do_local_bast(struct dlm_ctxt *dlm,
915*4882a593Smuzhiyun struct dlm_lock_resource *res,
916*4882a593Smuzhiyun struct dlm_lock *lock,
917*4882a593Smuzhiyun int blocked_type);
918*4882a593Smuzhiyun int dlm_send_proxy_ast_msg(struct dlm_ctxt *dlm,
919*4882a593Smuzhiyun struct dlm_lock_resource *res,
920*4882a593Smuzhiyun struct dlm_lock *lock,
921*4882a593Smuzhiyun int msg_type,
922*4882a593Smuzhiyun int blocked_type, int flags);
dlm_send_proxy_bast(struct dlm_ctxt * dlm,struct dlm_lock_resource * res,struct dlm_lock * lock,int blocked_type)923*4882a593Smuzhiyun static inline int dlm_send_proxy_bast(struct dlm_ctxt *dlm,
924*4882a593Smuzhiyun struct dlm_lock_resource *res,
925*4882a593Smuzhiyun struct dlm_lock *lock,
926*4882a593Smuzhiyun int blocked_type)
927*4882a593Smuzhiyun {
928*4882a593Smuzhiyun return dlm_send_proxy_ast_msg(dlm, res, lock, DLM_BAST,
929*4882a593Smuzhiyun blocked_type, 0);
930*4882a593Smuzhiyun }
931*4882a593Smuzhiyun
dlm_send_proxy_ast(struct dlm_ctxt * dlm,struct dlm_lock_resource * res,struct dlm_lock * lock,int flags)932*4882a593Smuzhiyun static inline int dlm_send_proxy_ast(struct dlm_ctxt *dlm,
933*4882a593Smuzhiyun struct dlm_lock_resource *res,
934*4882a593Smuzhiyun struct dlm_lock *lock,
935*4882a593Smuzhiyun int flags)
936*4882a593Smuzhiyun {
937*4882a593Smuzhiyun return dlm_send_proxy_ast_msg(dlm, res, lock, DLM_AST,
938*4882a593Smuzhiyun 0, flags);
939*4882a593Smuzhiyun }
940*4882a593Smuzhiyun
941*4882a593Smuzhiyun void dlm_print_one_lock_resource(struct dlm_lock_resource *res);
942*4882a593Smuzhiyun void __dlm_print_one_lock_resource(struct dlm_lock_resource *res);
943*4882a593Smuzhiyun
944*4882a593Smuzhiyun void dlm_kick_thread(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
945*4882a593Smuzhiyun void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
946*4882a593Smuzhiyun
947*4882a593Smuzhiyun
948*4882a593Smuzhiyun void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data);
949*4882a593Smuzhiyun void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data);
950*4882a593Smuzhiyun
951*4882a593Smuzhiyun int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
952*4882a593Smuzhiyun int dlm_finish_migration(struct dlm_ctxt *dlm,
953*4882a593Smuzhiyun struct dlm_lock_resource *res,
954*4882a593Smuzhiyun u8 old_master);
955*4882a593Smuzhiyun void dlm_lockres_release_ast(struct dlm_ctxt *dlm,
956*4882a593Smuzhiyun struct dlm_lock_resource *res);
957*4882a593Smuzhiyun void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res);
958*4882a593Smuzhiyun
959*4882a593Smuzhiyun int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data,
960*4882a593Smuzhiyun void **ret_data);
961*4882a593Smuzhiyun int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data,
962*4882a593Smuzhiyun void **ret_data);
963*4882a593Smuzhiyun void dlm_assert_master_post_handler(int status, void *data, void *ret_data);
964*4882a593Smuzhiyun int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
965*4882a593Smuzhiyun void **ret_data);
966*4882a593Smuzhiyun int dlm_deref_lockres_done_handler(struct o2net_msg *msg, u32 len, void *data,
967*4882a593Smuzhiyun void **ret_data);
968*4882a593Smuzhiyun int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data,
969*4882a593Smuzhiyun void **ret_data);
970*4882a593Smuzhiyun int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
971*4882a593Smuzhiyun void **ret_data);
972*4882a593Smuzhiyun int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
973*4882a593Smuzhiyun void **ret_data);
974*4882a593Smuzhiyun int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data,
975*4882a593Smuzhiyun void **ret_data);
976*4882a593Smuzhiyun int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data,
977*4882a593Smuzhiyun void **ret_data);
978*4882a593Smuzhiyun int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data,
979*4882a593Smuzhiyun void **ret_data);
980*4882a593Smuzhiyun int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data,
981*4882a593Smuzhiyun void **ret_data);
982*4882a593Smuzhiyun int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
983*4882a593Smuzhiyun u8 nodenum, u8 *real_master);
984*4882a593Smuzhiyun
985*4882a593Smuzhiyun void __dlm_do_purge_lockres(struct dlm_ctxt *dlm,
986*4882a593Smuzhiyun struct dlm_lock_resource *res);
987*4882a593Smuzhiyun
988*4882a593Smuzhiyun int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
989*4882a593Smuzhiyun struct dlm_lock_resource *res,
990*4882a593Smuzhiyun int ignore_higher,
991*4882a593Smuzhiyun u8 request_from,
992*4882a593Smuzhiyun u32 flags);
993*4882a593Smuzhiyun
994*4882a593Smuzhiyun
995*4882a593Smuzhiyun int dlm_send_one_lockres(struct dlm_ctxt *dlm,
996*4882a593Smuzhiyun struct dlm_lock_resource *res,
997*4882a593Smuzhiyun struct dlm_migratable_lockres *mres,
998*4882a593Smuzhiyun u8 send_to,
999*4882a593Smuzhiyun u8 flags);
1000*4882a593Smuzhiyun void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
1001*4882a593Smuzhiyun struct dlm_lock_resource *res);
1002*4882a593Smuzhiyun
1003*4882a593Smuzhiyun /* will exit holding res->spinlock, but may drop in function */
1004*4882a593Smuzhiyun void __dlm_wait_on_lockres_flags(struct dlm_lock_resource *res, int flags);
1005*4882a593Smuzhiyun
1006*4882a593Smuzhiyun /* will exit holding res->spinlock, but may drop in function */
__dlm_wait_on_lockres(struct dlm_lock_resource * res)1007*4882a593Smuzhiyun static inline void __dlm_wait_on_lockres(struct dlm_lock_resource *res)
1008*4882a593Smuzhiyun {
1009*4882a593Smuzhiyun __dlm_wait_on_lockres_flags(res, (DLM_LOCK_RES_IN_PROGRESS|
1010*4882a593Smuzhiyun DLM_LOCK_RES_RECOVERING|
1011*4882a593Smuzhiyun DLM_LOCK_RES_RECOVERY_WAITING|
1012*4882a593Smuzhiyun DLM_LOCK_RES_MIGRATING));
1013*4882a593Smuzhiyun }
1014*4882a593Smuzhiyun
1015*4882a593Smuzhiyun void __dlm_unlink_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle);
1016*4882a593Smuzhiyun void __dlm_insert_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle);
1017*4882a593Smuzhiyun
1018*4882a593Smuzhiyun /* create/destroy slab caches */
1019*4882a593Smuzhiyun int dlm_init_master_caches(void);
1020*4882a593Smuzhiyun void dlm_destroy_master_caches(void);
1021*4882a593Smuzhiyun
1022*4882a593Smuzhiyun int dlm_init_lock_cache(void);
1023*4882a593Smuzhiyun void dlm_destroy_lock_cache(void);
1024*4882a593Smuzhiyun
1025*4882a593Smuzhiyun int dlm_init_mle_cache(void);
1026*4882a593Smuzhiyun void dlm_destroy_mle_cache(void);
1027*4882a593Smuzhiyun
1028*4882a593Smuzhiyun void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up);
1029*4882a593Smuzhiyun int dlm_drop_lockres_ref(struct dlm_ctxt *dlm,
1030*4882a593Smuzhiyun struct dlm_lock_resource *res);
1031*4882a593Smuzhiyun void dlm_clean_master_list(struct dlm_ctxt *dlm,
1032*4882a593Smuzhiyun u8 dead_node);
1033*4882a593Smuzhiyun void dlm_force_free_mles(struct dlm_ctxt *dlm);
1034*4882a593Smuzhiyun int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock);
1035*4882a593Smuzhiyun int __dlm_lockres_has_locks(struct dlm_lock_resource *res);
1036*4882a593Smuzhiyun int __dlm_lockres_unused(struct dlm_lock_resource *res);
1037*4882a593Smuzhiyun
dlm_lock_mode_name(int mode)1038*4882a593Smuzhiyun static inline const char * dlm_lock_mode_name(int mode)
1039*4882a593Smuzhiyun {
1040*4882a593Smuzhiyun switch (mode) {
1041*4882a593Smuzhiyun case LKM_EXMODE:
1042*4882a593Smuzhiyun return "EX";
1043*4882a593Smuzhiyun case LKM_PRMODE:
1044*4882a593Smuzhiyun return "PR";
1045*4882a593Smuzhiyun case LKM_NLMODE:
1046*4882a593Smuzhiyun return "NL";
1047*4882a593Smuzhiyun }
1048*4882a593Smuzhiyun return "UNKNOWN";
1049*4882a593Smuzhiyun }
1050*4882a593Smuzhiyun
1051*4882a593Smuzhiyun
dlm_lock_compatible(int existing,int request)1052*4882a593Smuzhiyun static inline int dlm_lock_compatible(int existing, int request)
1053*4882a593Smuzhiyun {
1054*4882a593Smuzhiyun /* NO_LOCK compatible with all */
1055*4882a593Smuzhiyun if (request == LKM_NLMODE ||
1056*4882a593Smuzhiyun existing == LKM_NLMODE)
1057*4882a593Smuzhiyun return 1;
1058*4882a593Smuzhiyun
1059*4882a593Smuzhiyun /* EX incompatible with all non-NO_LOCK */
1060*4882a593Smuzhiyun if (request == LKM_EXMODE)
1061*4882a593Smuzhiyun return 0;
1062*4882a593Smuzhiyun
1063*4882a593Smuzhiyun /* request must be PR, which is compatible with PR */
1064*4882a593Smuzhiyun if (existing == LKM_PRMODE)
1065*4882a593Smuzhiyun return 1;
1066*4882a593Smuzhiyun
1067*4882a593Smuzhiyun return 0;
1068*4882a593Smuzhiyun }
1069*4882a593Smuzhiyun
dlm_lock_on_list(struct list_head * head,struct dlm_lock * lock)1070*4882a593Smuzhiyun static inline int dlm_lock_on_list(struct list_head *head,
1071*4882a593Smuzhiyun struct dlm_lock *lock)
1072*4882a593Smuzhiyun {
1073*4882a593Smuzhiyun struct dlm_lock *tmplock;
1074*4882a593Smuzhiyun
1075*4882a593Smuzhiyun list_for_each_entry(tmplock, head, list) {
1076*4882a593Smuzhiyun if (tmplock == lock)
1077*4882a593Smuzhiyun return 1;
1078*4882a593Smuzhiyun }
1079*4882a593Smuzhiyun return 0;
1080*4882a593Smuzhiyun }
1081*4882a593Smuzhiyun
1082*4882a593Smuzhiyun
dlm_err_to_dlm_status(int err)1083*4882a593Smuzhiyun static inline enum dlm_status dlm_err_to_dlm_status(int err)
1084*4882a593Smuzhiyun {
1085*4882a593Smuzhiyun enum dlm_status ret;
1086*4882a593Smuzhiyun if (err == -ENOMEM)
1087*4882a593Smuzhiyun ret = DLM_SYSERR;
1088*4882a593Smuzhiyun else if (err == -ETIMEDOUT || o2net_link_down(err, NULL))
1089*4882a593Smuzhiyun ret = DLM_NOLOCKMGR;
1090*4882a593Smuzhiyun else if (err == -EINVAL)
1091*4882a593Smuzhiyun ret = DLM_BADPARAM;
1092*4882a593Smuzhiyun else if (err == -ENAMETOOLONG)
1093*4882a593Smuzhiyun ret = DLM_IVBUFLEN;
1094*4882a593Smuzhiyun else
1095*4882a593Smuzhiyun ret = DLM_BADARGS;
1096*4882a593Smuzhiyun return ret;
1097*4882a593Smuzhiyun }
1098*4882a593Smuzhiyun
1099*4882a593Smuzhiyun
dlm_node_iter_init(unsigned long * map,struct dlm_node_iter * iter)1100*4882a593Smuzhiyun static inline void dlm_node_iter_init(unsigned long *map,
1101*4882a593Smuzhiyun struct dlm_node_iter *iter)
1102*4882a593Smuzhiyun {
1103*4882a593Smuzhiyun memcpy(iter->node_map, map, sizeof(iter->node_map));
1104*4882a593Smuzhiyun iter->curnode = -1;
1105*4882a593Smuzhiyun }
1106*4882a593Smuzhiyun
dlm_node_iter_next(struct dlm_node_iter * iter)1107*4882a593Smuzhiyun static inline int dlm_node_iter_next(struct dlm_node_iter *iter)
1108*4882a593Smuzhiyun {
1109*4882a593Smuzhiyun int bit;
1110*4882a593Smuzhiyun bit = find_next_bit(iter->node_map, O2NM_MAX_NODES, iter->curnode+1);
1111*4882a593Smuzhiyun if (bit >= O2NM_MAX_NODES) {
1112*4882a593Smuzhiyun iter->curnode = O2NM_MAX_NODES;
1113*4882a593Smuzhiyun return -ENOENT;
1114*4882a593Smuzhiyun }
1115*4882a593Smuzhiyun iter->curnode = bit;
1116*4882a593Smuzhiyun return bit;
1117*4882a593Smuzhiyun }
1118*4882a593Smuzhiyun
dlm_set_lockres_owner(struct dlm_ctxt * dlm,struct dlm_lock_resource * res,u8 owner)1119*4882a593Smuzhiyun static inline void dlm_set_lockres_owner(struct dlm_ctxt *dlm,
1120*4882a593Smuzhiyun struct dlm_lock_resource *res,
1121*4882a593Smuzhiyun u8 owner)
1122*4882a593Smuzhiyun {
1123*4882a593Smuzhiyun assert_spin_locked(&res->spinlock);
1124*4882a593Smuzhiyun
1125*4882a593Smuzhiyun res->owner = owner;
1126*4882a593Smuzhiyun }
1127*4882a593Smuzhiyun
dlm_change_lockres_owner(struct dlm_ctxt * dlm,struct dlm_lock_resource * res,u8 owner)1128*4882a593Smuzhiyun static inline void dlm_change_lockres_owner(struct dlm_ctxt *dlm,
1129*4882a593Smuzhiyun struct dlm_lock_resource *res,
1130*4882a593Smuzhiyun u8 owner)
1131*4882a593Smuzhiyun {
1132*4882a593Smuzhiyun assert_spin_locked(&res->spinlock);
1133*4882a593Smuzhiyun
1134*4882a593Smuzhiyun if (owner != res->owner)
1135*4882a593Smuzhiyun dlm_set_lockres_owner(dlm, res, owner);
1136*4882a593Smuzhiyun }
1137*4882a593Smuzhiyun
1138*4882a593Smuzhiyun #endif /* DLMCOMMON_H */
1139