1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /* -*- mode: c; c-basic-offset: 8; -*-
3*4882a593Smuzhiyun * vim: noexpandtab sw=8 ts=8 sts=0:
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * dlmconvert.c
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * underlying calls for lock conversion
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * Copyright (C) 2004 Oracle. All rights reserved.
10*4882a593Smuzhiyun */
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include <linux/module.h>
14*4882a593Smuzhiyun #include <linux/fs.h>
15*4882a593Smuzhiyun #include <linux/types.h>
16*4882a593Smuzhiyun #include <linux/highmem.h>
17*4882a593Smuzhiyun #include <linux/init.h>
18*4882a593Smuzhiyun #include <linux/sysctl.h>
19*4882a593Smuzhiyun #include <linux/random.h>
20*4882a593Smuzhiyun #include <linux/blkdev.h>
21*4882a593Smuzhiyun #include <linux/socket.h>
22*4882a593Smuzhiyun #include <linux/inet.h>
23*4882a593Smuzhiyun #include <linux/spinlock.h>
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun #include "../cluster/heartbeat.h"
27*4882a593Smuzhiyun #include "../cluster/nodemanager.h"
28*4882a593Smuzhiyun #include "../cluster/tcp.h"
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun #include "dlmapi.h"
31*4882a593Smuzhiyun #include "dlmcommon.h"
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun #include "dlmconvert.h"
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun #define MLOG_MASK_PREFIX ML_DLM
36*4882a593Smuzhiyun #include "../cluster/masklog.h"
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun /* NOTE: __dlmconvert_master is the only function in here that
39*4882a593Smuzhiyun * needs a spinlock held on entry (res->spinlock) and it is the
40*4882a593Smuzhiyun * only one that holds a lock on exit (res->spinlock).
41*4882a593Smuzhiyun * All other functions in here need no locks and drop all of
42*4882a593Smuzhiyun * the locks that they acquire. */
43*4882a593Smuzhiyun static enum dlm_status __dlmconvert_master(struct dlm_ctxt *dlm,
44*4882a593Smuzhiyun struct dlm_lock_resource *res,
45*4882a593Smuzhiyun struct dlm_lock *lock, int flags,
46*4882a593Smuzhiyun int type, int *call_ast,
47*4882a593Smuzhiyun int *kick_thread);
48*4882a593Smuzhiyun static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm,
49*4882a593Smuzhiyun struct dlm_lock_resource *res,
50*4882a593Smuzhiyun struct dlm_lock *lock, int flags, int type);
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun /*
53*4882a593Smuzhiyun * this is only called directly by dlmlock(), and only when the
54*4882a593Smuzhiyun * local node is the owner of the lockres
55*4882a593Smuzhiyun * locking:
56*4882a593Smuzhiyun * caller needs: none
57*4882a593Smuzhiyun * taken: takes and drops res->spinlock
58*4882a593Smuzhiyun * held on exit: none
59*4882a593Smuzhiyun * returns: see __dlmconvert_master
60*4882a593Smuzhiyun */
dlmconvert_master(struct dlm_ctxt * dlm,struct dlm_lock_resource * res,struct dlm_lock * lock,int flags,int type)61*4882a593Smuzhiyun enum dlm_status dlmconvert_master(struct dlm_ctxt *dlm,
62*4882a593Smuzhiyun struct dlm_lock_resource *res,
63*4882a593Smuzhiyun struct dlm_lock *lock, int flags, int type)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun int call_ast = 0, kick_thread = 0;
66*4882a593Smuzhiyun enum dlm_status status;
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun spin_lock(&res->spinlock);
69*4882a593Smuzhiyun /* we are not in a network handler, this is fine */
70*4882a593Smuzhiyun __dlm_wait_on_lockres(res);
71*4882a593Smuzhiyun __dlm_lockres_reserve_ast(res);
72*4882a593Smuzhiyun res->state |= DLM_LOCK_RES_IN_PROGRESS;
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun status = __dlmconvert_master(dlm, res, lock, flags, type,
75*4882a593Smuzhiyun &call_ast, &kick_thread);
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
78*4882a593Smuzhiyun spin_unlock(&res->spinlock);
79*4882a593Smuzhiyun wake_up(&res->wq);
80*4882a593Smuzhiyun if (status != DLM_NORMAL && status != DLM_NOTQUEUED)
81*4882a593Smuzhiyun dlm_error(status);
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun /* either queue the ast or release it */
84*4882a593Smuzhiyun if (call_ast)
85*4882a593Smuzhiyun dlm_queue_ast(dlm, lock);
86*4882a593Smuzhiyun else
87*4882a593Smuzhiyun dlm_lockres_release_ast(dlm, res);
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun if (kick_thread)
90*4882a593Smuzhiyun dlm_kick_thread(dlm, res);
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun return status;
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun /* performs lock conversion at the lockres master site
96*4882a593Smuzhiyun * locking:
97*4882a593Smuzhiyun * caller needs: res->spinlock
98*4882a593Smuzhiyun * taken: takes and drops lock->spinlock
99*4882a593Smuzhiyun * held on exit: res->spinlock
100*4882a593Smuzhiyun * returns: DLM_NORMAL, DLM_NOTQUEUED, DLM_DENIED
101*4882a593Smuzhiyun * call_ast: whether ast should be called for this lock
102*4882a593Smuzhiyun * kick_thread: whether dlm_kick_thread should be called
103*4882a593Smuzhiyun */
__dlmconvert_master(struct dlm_ctxt * dlm,struct dlm_lock_resource * res,struct dlm_lock * lock,int flags,int type,int * call_ast,int * kick_thread)104*4882a593Smuzhiyun static enum dlm_status __dlmconvert_master(struct dlm_ctxt *dlm,
105*4882a593Smuzhiyun struct dlm_lock_resource *res,
106*4882a593Smuzhiyun struct dlm_lock *lock, int flags,
107*4882a593Smuzhiyun int type, int *call_ast,
108*4882a593Smuzhiyun int *kick_thread)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun enum dlm_status status = DLM_NORMAL;
111*4882a593Smuzhiyun struct dlm_lock *tmplock=NULL;
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun assert_spin_locked(&res->spinlock);
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun mlog(0, "type=%d, convert_type=%d, new convert_type=%d\n",
116*4882a593Smuzhiyun lock->ml.type, lock->ml.convert_type, type);
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun spin_lock(&lock->spinlock);
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun /* already converting? */
121*4882a593Smuzhiyun if (lock->ml.convert_type != LKM_IVMODE) {
122*4882a593Smuzhiyun mlog(ML_ERROR, "attempted to convert a lock with a lock "
123*4882a593Smuzhiyun "conversion pending\n");
124*4882a593Smuzhiyun status = DLM_DENIED;
125*4882a593Smuzhiyun goto unlock_exit;
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun /* must be on grant queue to convert */
129*4882a593Smuzhiyun if (!dlm_lock_on_list(&res->granted, lock)) {
130*4882a593Smuzhiyun mlog(ML_ERROR, "attempted to convert a lock not on grant "
131*4882a593Smuzhiyun "queue\n");
132*4882a593Smuzhiyun status = DLM_DENIED;
133*4882a593Smuzhiyun goto unlock_exit;
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun if (flags & LKM_VALBLK) {
137*4882a593Smuzhiyun switch (lock->ml.type) {
138*4882a593Smuzhiyun case LKM_EXMODE:
139*4882a593Smuzhiyun /* EX + LKM_VALBLK + convert == set lvb */
140*4882a593Smuzhiyun mlog(0, "will set lvb: converting %s->%s\n",
141*4882a593Smuzhiyun dlm_lock_mode_name(lock->ml.type),
142*4882a593Smuzhiyun dlm_lock_mode_name(type));
143*4882a593Smuzhiyun lock->lksb->flags |= DLM_LKSB_PUT_LVB;
144*4882a593Smuzhiyun break;
145*4882a593Smuzhiyun case LKM_PRMODE:
146*4882a593Smuzhiyun case LKM_NLMODE:
147*4882a593Smuzhiyun /* refetch if new level is not NL */
148*4882a593Smuzhiyun if (type > LKM_NLMODE) {
149*4882a593Smuzhiyun mlog(0, "will fetch new value into "
150*4882a593Smuzhiyun "lvb: converting %s->%s\n",
151*4882a593Smuzhiyun dlm_lock_mode_name(lock->ml.type),
152*4882a593Smuzhiyun dlm_lock_mode_name(type));
153*4882a593Smuzhiyun lock->lksb->flags |= DLM_LKSB_GET_LVB;
154*4882a593Smuzhiyun } else {
155*4882a593Smuzhiyun mlog(0, "will NOT fetch new value "
156*4882a593Smuzhiyun "into lvb: converting %s->%s\n",
157*4882a593Smuzhiyun dlm_lock_mode_name(lock->ml.type),
158*4882a593Smuzhiyun dlm_lock_mode_name(type));
159*4882a593Smuzhiyun flags &= ~(LKM_VALBLK);
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun break;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun /* in-place downconvert? */
167*4882a593Smuzhiyun if (type <= lock->ml.type)
168*4882a593Smuzhiyun goto grant;
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun /* upconvert from here on */
171*4882a593Smuzhiyun status = DLM_NORMAL;
172*4882a593Smuzhiyun list_for_each_entry(tmplock, &res->granted, list) {
173*4882a593Smuzhiyun if (tmplock == lock)
174*4882a593Smuzhiyun continue;
175*4882a593Smuzhiyun if (!dlm_lock_compatible(tmplock->ml.type, type))
176*4882a593Smuzhiyun goto switch_queues;
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun list_for_each_entry(tmplock, &res->converting, list) {
180*4882a593Smuzhiyun if (!dlm_lock_compatible(tmplock->ml.type, type))
181*4882a593Smuzhiyun goto switch_queues;
182*4882a593Smuzhiyun /* existing conversion requests take precedence */
183*4882a593Smuzhiyun if (!dlm_lock_compatible(tmplock->ml.convert_type, type))
184*4882a593Smuzhiyun goto switch_queues;
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun /* fall thru to grant */
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun grant:
190*4882a593Smuzhiyun mlog(0, "res %.*s, granting %s lock\n", res->lockname.len,
191*4882a593Smuzhiyun res->lockname.name, dlm_lock_mode_name(type));
192*4882a593Smuzhiyun /* immediately grant the new lock type */
193*4882a593Smuzhiyun lock->lksb->status = DLM_NORMAL;
194*4882a593Smuzhiyun if (lock->ml.node == dlm->node_num)
195*4882a593Smuzhiyun mlog(0, "doing in-place convert for nonlocal lock\n");
196*4882a593Smuzhiyun lock->ml.type = type;
197*4882a593Smuzhiyun if (lock->lksb->flags & DLM_LKSB_PUT_LVB)
198*4882a593Smuzhiyun memcpy(res->lvb, lock->lksb->lvb, DLM_LVB_LEN);
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun /*
201*4882a593Smuzhiyun * Move the lock to the tail because it may be the only lock which has
202*4882a593Smuzhiyun * an invalid lvb.
203*4882a593Smuzhiyun */
204*4882a593Smuzhiyun list_move_tail(&lock->list, &res->granted);
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun status = DLM_NORMAL;
207*4882a593Smuzhiyun *call_ast = 1;
208*4882a593Smuzhiyun goto unlock_exit;
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun switch_queues:
211*4882a593Smuzhiyun if (flags & LKM_NOQUEUE) {
212*4882a593Smuzhiyun mlog(0, "failed to convert NOQUEUE lock %.*s from "
213*4882a593Smuzhiyun "%d to %d...\n", res->lockname.len, res->lockname.name,
214*4882a593Smuzhiyun lock->ml.type, type);
215*4882a593Smuzhiyun status = DLM_NOTQUEUED;
216*4882a593Smuzhiyun goto unlock_exit;
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun mlog(0, "res %.*s, queueing...\n", res->lockname.len,
219*4882a593Smuzhiyun res->lockname.name);
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun lock->ml.convert_type = type;
222*4882a593Smuzhiyun /* do not alter lock refcount. switching lists. */
223*4882a593Smuzhiyun list_move_tail(&lock->list, &res->converting);
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun unlock_exit:
226*4882a593Smuzhiyun spin_unlock(&lock->spinlock);
227*4882a593Smuzhiyun if (status == DLM_DENIED) {
228*4882a593Smuzhiyun __dlm_print_one_lock_resource(res);
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun if (status == DLM_NORMAL)
231*4882a593Smuzhiyun *kick_thread = 1;
232*4882a593Smuzhiyun return status;
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun
dlm_revert_pending_convert(struct dlm_lock_resource * res,struct dlm_lock * lock)235*4882a593Smuzhiyun void dlm_revert_pending_convert(struct dlm_lock_resource *res,
236*4882a593Smuzhiyun struct dlm_lock *lock)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun /* do not alter lock refcount. switching lists. */
239*4882a593Smuzhiyun list_move_tail(&lock->list, &res->granted);
240*4882a593Smuzhiyun lock->ml.convert_type = LKM_IVMODE;
241*4882a593Smuzhiyun lock->lksb->flags &= ~(DLM_LKSB_GET_LVB|DLM_LKSB_PUT_LVB);
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun /* messages the master site to do lock conversion
245*4882a593Smuzhiyun * locking:
246*4882a593Smuzhiyun * caller needs: none
247*4882a593Smuzhiyun * taken: takes and drops res->spinlock, uses DLM_LOCK_RES_IN_PROGRESS
248*4882a593Smuzhiyun * held on exit: none
249*4882a593Smuzhiyun * returns: DLM_NORMAL, DLM_RECOVERING, status from remote node
250*4882a593Smuzhiyun */
dlmconvert_remote(struct dlm_ctxt * dlm,struct dlm_lock_resource * res,struct dlm_lock * lock,int flags,int type)251*4882a593Smuzhiyun enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
252*4882a593Smuzhiyun struct dlm_lock_resource *res,
253*4882a593Smuzhiyun struct dlm_lock *lock, int flags, int type)
254*4882a593Smuzhiyun {
255*4882a593Smuzhiyun enum dlm_status status;
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun mlog(0, "type=%d, convert_type=%d, busy=%d\n", lock->ml.type,
258*4882a593Smuzhiyun lock->ml.convert_type, res->state & DLM_LOCK_RES_IN_PROGRESS);
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun spin_lock(&res->spinlock);
261*4882a593Smuzhiyun if (res->state & DLM_LOCK_RES_RECOVERING) {
262*4882a593Smuzhiyun mlog(0, "bailing out early since res is RECOVERING "
263*4882a593Smuzhiyun "on secondary queue\n");
264*4882a593Smuzhiyun /* __dlm_print_one_lock_resource(res); */
265*4882a593Smuzhiyun status = DLM_RECOVERING;
266*4882a593Smuzhiyun goto bail;
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun /* will exit this call with spinlock held */
269*4882a593Smuzhiyun __dlm_wait_on_lockres(res);
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun if (lock->ml.convert_type != LKM_IVMODE) {
272*4882a593Smuzhiyun __dlm_print_one_lock_resource(res);
273*4882a593Smuzhiyun mlog(ML_ERROR, "converting a remote lock that is already "
274*4882a593Smuzhiyun "converting! (cookie=%u:%llu, conv=%d)\n",
275*4882a593Smuzhiyun dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
276*4882a593Smuzhiyun dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
277*4882a593Smuzhiyun lock->ml.convert_type);
278*4882a593Smuzhiyun status = DLM_DENIED;
279*4882a593Smuzhiyun goto bail;
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun if (lock->ml.type == type && lock->ml.convert_type == LKM_IVMODE) {
283*4882a593Smuzhiyun mlog(0, "last convert request returned DLM_RECOVERING, but "
284*4882a593Smuzhiyun "owner has already queued and sent ast to me. res %.*s, "
285*4882a593Smuzhiyun "(cookie=%u:%llu, type=%d, conv=%d)\n",
286*4882a593Smuzhiyun res->lockname.len, res->lockname.name,
287*4882a593Smuzhiyun dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
288*4882a593Smuzhiyun dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
289*4882a593Smuzhiyun lock->ml.type, lock->ml.convert_type);
290*4882a593Smuzhiyun status = DLM_NORMAL;
291*4882a593Smuzhiyun goto bail;
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun res->state |= DLM_LOCK_RES_IN_PROGRESS;
295*4882a593Smuzhiyun /* move lock to local convert queue */
296*4882a593Smuzhiyun /* do not alter lock refcount. switching lists. */
297*4882a593Smuzhiyun list_move_tail(&lock->list, &res->converting);
298*4882a593Smuzhiyun lock->convert_pending = 1;
299*4882a593Smuzhiyun lock->ml.convert_type = type;
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun if (flags & LKM_VALBLK) {
302*4882a593Smuzhiyun if (lock->ml.type == LKM_EXMODE) {
303*4882a593Smuzhiyun flags |= LKM_PUT_LVB;
304*4882a593Smuzhiyun lock->lksb->flags |= DLM_LKSB_PUT_LVB;
305*4882a593Smuzhiyun } else {
306*4882a593Smuzhiyun if (lock->ml.convert_type == LKM_NLMODE)
307*4882a593Smuzhiyun flags &= ~LKM_VALBLK;
308*4882a593Smuzhiyun else {
309*4882a593Smuzhiyun flags |= LKM_GET_LVB;
310*4882a593Smuzhiyun lock->lksb->flags |= DLM_LKSB_GET_LVB;
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun }
314*4882a593Smuzhiyun spin_unlock(&res->spinlock);
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun /* no locks held here.
317*4882a593Smuzhiyun * need to wait for a reply as to whether it got queued or not. */
318*4882a593Smuzhiyun status = dlm_send_remote_convert_request(dlm, res, lock, flags, type);
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun spin_lock(&res->spinlock);
321*4882a593Smuzhiyun res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
322*4882a593Smuzhiyun /* if it failed, move it back to granted queue.
323*4882a593Smuzhiyun * if master returns DLM_NORMAL and then down before sending ast,
324*4882a593Smuzhiyun * it may have already been moved to granted queue, reset to
325*4882a593Smuzhiyun * DLM_RECOVERING and retry convert */
326*4882a593Smuzhiyun if (status != DLM_NORMAL) {
327*4882a593Smuzhiyun if (status != DLM_NOTQUEUED)
328*4882a593Smuzhiyun dlm_error(status);
329*4882a593Smuzhiyun dlm_revert_pending_convert(res, lock);
330*4882a593Smuzhiyun } else if (!lock->convert_pending) {
331*4882a593Smuzhiyun mlog(0, "%s: res %.*s, owner died and lock has been moved back "
332*4882a593Smuzhiyun "to granted list, retry convert.\n",
333*4882a593Smuzhiyun dlm->name, res->lockname.len, res->lockname.name);
334*4882a593Smuzhiyun status = DLM_RECOVERING;
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun lock->convert_pending = 0;
338*4882a593Smuzhiyun bail:
339*4882a593Smuzhiyun spin_unlock(&res->spinlock);
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun /* TODO: should this be a wake_one? */
342*4882a593Smuzhiyun /* wake up any IN_PROGRESS waiters */
343*4882a593Smuzhiyun wake_up(&res->wq);
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun return status;
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun /* sends DLM_CONVERT_LOCK_MSG to master site
349*4882a593Smuzhiyun * locking:
350*4882a593Smuzhiyun * caller needs: none
351*4882a593Smuzhiyun * taken: none
352*4882a593Smuzhiyun * held on exit: none
353*4882a593Smuzhiyun * returns: DLM_NOLOCKMGR, status from remote node
354*4882a593Smuzhiyun */
dlm_send_remote_convert_request(struct dlm_ctxt * dlm,struct dlm_lock_resource * res,struct dlm_lock * lock,int flags,int type)355*4882a593Smuzhiyun static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm,
356*4882a593Smuzhiyun struct dlm_lock_resource *res,
357*4882a593Smuzhiyun struct dlm_lock *lock, int flags, int type)
358*4882a593Smuzhiyun {
359*4882a593Smuzhiyun struct dlm_convert_lock convert;
360*4882a593Smuzhiyun int tmpret;
361*4882a593Smuzhiyun enum dlm_status ret;
362*4882a593Smuzhiyun int status = 0;
363*4882a593Smuzhiyun struct kvec vec[2];
364*4882a593Smuzhiyun size_t veclen = 1;
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun mlog(0, "%.*s\n", res->lockname.len, res->lockname.name);
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun memset(&convert, 0, sizeof(struct dlm_convert_lock));
369*4882a593Smuzhiyun convert.node_idx = dlm->node_num;
370*4882a593Smuzhiyun convert.requested_type = type;
371*4882a593Smuzhiyun convert.cookie = lock->ml.cookie;
372*4882a593Smuzhiyun convert.namelen = res->lockname.len;
373*4882a593Smuzhiyun convert.flags = cpu_to_be32(flags);
374*4882a593Smuzhiyun memcpy(convert.name, res->lockname.name, convert.namelen);
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun vec[0].iov_len = sizeof(struct dlm_convert_lock);
377*4882a593Smuzhiyun vec[0].iov_base = &convert;
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun if (flags & LKM_PUT_LVB) {
380*4882a593Smuzhiyun /* extra data to send if we are updating lvb */
381*4882a593Smuzhiyun vec[1].iov_len = DLM_LVB_LEN;
382*4882a593Smuzhiyun vec[1].iov_base = lock->lksb->lvb;
383*4882a593Smuzhiyun veclen++;
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun tmpret = o2net_send_message_vec(DLM_CONVERT_LOCK_MSG, dlm->key,
387*4882a593Smuzhiyun vec, veclen, res->owner, &status);
388*4882a593Smuzhiyun if (tmpret >= 0) {
389*4882a593Smuzhiyun // successfully sent and received
390*4882a593Smuzhiyun ret = status; // this is already a dlm_status
391*4882a593Smuzhiyun if (ret == DLM_RECOVERING) {
392*4882a593Smuzhiyun mlog(0, "node %u returned DLM_RECOVERING from convert "
393*4882a593Smuzhiyun "message!\n", res->owner);
394*4882a593Smuzhiyun } else if (ret == DLM_MIGRATING) {
395*4882a593Smuzhiyun mlog(0, "node %u returned DLM_MIGRATING from convert "
396*4882a593Smuzhiyun "message!\n", res->owner);
397*4882a593Smuzhiyun } else if (ret == DLM_FORWARD) {
398*4882a593Smuzhiyun mlog(0, "node %u returned DLM_FORWARD from convert "
399*4882a593Smuzhiyun "message!\n", res->owner);
400*4882a593Smuzhiyun } else if (ret != DLM_NORMAL && ret != DLM_NOTQUEUED)
401*4882a593Smuzhiyun dlm_error(ret);
402*4882a593Smuzhiyun } else {
403*4882a593Smuzhiyun mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
404*4882a593Smuzhiyun "node %u\n", tmpret, DLM_CONVERT_LOCK_MSG, dlm->key,
405*4882a593Smuzhiyun res->owner);
406*4882a593Smuzhiyun if (dlm_is_host_down(tmpret)) {
407*4882a593Smuzhiyun /* instead of logging the same network error over
408*4882a593Smuzhiyun * and over, sleep here and wait for the heartbeat
409*4882a593Smuzhiyun * to notice the node is dead. times out after 5s. */
410*4882a593Smuzhiyun dlm_wait_for_node_death(dlm, res->owner,
411*4882a593Smuzhiyun DLM_NODE_DEATH_WAIT_MAX);
412*4882a593Smuzhiyun ret = DLM_RECOVERING;
413*4882a593Smuzhiyun mlog(0, "node %u died so returning DLM_RECOVERING "
414*4882a593Smuzhiyun "from convert message!\n", res->owner);
415*4882a593Smuzhiyun } else {
416*4882a593Smuzhiyun ret = dlm_err_to_dlm_status(tmpret);
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun return ret;
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun /* handler for DLM_CONVERT_LOCK_MSG on master site
424*4882a593Smuzhiyun * locking:
425*4882a593Smuzhiyun * caller needs: none
426*4882a593Smuzhiyun * taken: takes and drop res->spinlock
427*4882a593Smuzhiyun * held on exit: none
428*4882a593Smuzhiyun * returns: DLM_NORMAL, DLM_IVLOCKID, DLM_BADARGS,
429*4882a593Smuzhiyun * status from __dlmconvert_master
430*4882a593Smuzhiyun */
dlm_convert_lock_handler(struct o2net_msg * msg,u32 len,void * data,void ** ret_data)431*4882a593Smuzhiyun int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data,
432*4882a593Smuzhiyun void **ret_data)
433*4882a593Smuzhiyun {
434*4882a593Smuzhiyun struct dlm_ctxt *dlm = data;
435*4882a593Smuzhiyun struct dlm_convert_lock *cnv = (struct dlm_convert_lock *)msg->buf;
436*4882a593Smuzhiyun struct dlm_lock_resource *res = NULL;
437*4882a593Smuzhiyun struct dlm_lock *lock = NULL;
438*4882a593Smuzhiyun struct dlm_lock *tmp_lock;
439*4882a593Smuzhiyun struct dlm_lockstatus *lksb;
440*4882a593Smuzhiyun enum dlm_status status = DLM_NORMAL;
441*4882a593Smuzhiyun u32 flags;
442*4882a593Smuzhiyun int call_ast = 0, kick_thread = 0, ast_reserved = 0, wake = 0;
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun if (!dlm_grab(dlm)) {
445*4882a593Smuzhiyun dlm_error(DLM_REJECTED);
446*4882a593Smuzhiyun return DLM_REJECTED;
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun mlog_bug_on_msg(!dlm_domain_fully_joined(dlm),
450*4882a593Smuzhiyun "Domain %s not fully joined!\n", dlm->name);
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun if (cnv->namelen > DLM_LOCKID_NAME_MAX) {
453*4882a593Smuzhiyun status = DLM_IVBUFLEN;
454*4882a593Smuzhiyun dlm_error(status);
455*4882a593Smuzhiyun goto leave;
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun flags = be32_to_cpu(cnv->flags);
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun if ((flags & (LKM_PUT_LVB|LKM_GET_LVB)) ==
461*4882a593Smuzhiyun (LKM_PUT_LVB|LKM_GET_LVB)) {
462*4882a593Smuzhiyun mlog(ML_ERROR, "both PUT and GET lvb specified\n");
463*4882a593Smuzhiyun status = DLM_BADARGS;
464*4882a593Smuzhiyun goto leave;
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun mlog(0, "lvb: %s\n", flags & LKM_PUT_LVB ? "put lvb" :
468*4882a593Smuzhiyun (flags & LKM_GET_LVB ? "get lvb" : "none"));
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun status = DLM_IVLOCKID;
471*4882a593Smuzhiyun res = dlm_lookup_lockres(dlm, cnv->name, cnv->namelen);
472*4882a593Smuzhiyun if (!res) {
473*4882a593Smuzhiyun dlm_error(status);
474*4882a593Smuzhiyun goto leave;
475*4882a593Smuzhiyun }
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun spin_lock(&res->spinlock);
478*4882a593Smuzhiyun status = __dlm_lockres_state_to_status(res);
479*4882a593Smuzhiyun if (status != DLM_NORMAL) {
480*4882a593Smuzhiyun spin_unlock(&res->spinlock);
481*4882a593Smuzhiyun dlm_error(status);
482*4882a593Smuzhiyun goto leave;
483*4882a593Smuzhiyun }
484*4882a593Smuzhiyun list_for_each_entry(tmp_lock, &res->granted, list) {
485*4882a593Smuzhiyun if (tmp_lock->ml.cookie == cnv->cookie &&
486*4882a593Smuzhiyun tmp_lock->ml.node == cnv->node_idx) {
487*4882a593Smuzhiyun lock = tmp_lock;
488*4882a593Smuzhiyun dlm_lock_get(lock);
489*4882a593Smuzhiyun break;
490*4882a593Smuzhiyun }
491*4882a593Smuzhiyun }
492*4882a593Smuzhiyun spin_unlock(&res->spinlock);
493*4882a593Smuzhiyun if (!lock) {
494*4882a593Smuzhiyun status = DLM_IVLOCKID;
495*4882a593Smuzhiyun mlog(ML_ERROR, "did not find lock to convert on grant queue! "
496*4882a593Smuzhiyun "cookie=%u:%llu\n",
497*4882a593Smuzhiyun dlm_get_lock_cookie_node(be64_to_cpu(cnv->cookie)),
498*4882a593Smuzhiyun dlm_get_lock_cookie_seq(be64_to_cpu(cnv->cookie)));
499*4882a593Smuzhiyun dlm_print_one_lock_resource(res);
500*4882a593Smuzhiyun goto leave;
501*4882a593Smuzhiyun }
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun /* found the lock */
504*4882a593Smuzhiyun lksb = lock->lksb;
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun /* see if caller needed to get/put lvb */
507*4882a593Smuzhiyun if (flags & LKM_PUT_LVB) {
508*4882a593Smuzhiyun BUG_ON(lksb->flags & (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB));
509*4882a593Smuzhiyun lksb->flags |= DLM_LKSB_PUT_LVB;
510*4882a593Smuzhiyun memcpy(&lksb->lvb[0], &cnv->lvb[0], DLM_LVB_LEN);
511*4882a593Smuzhiyun } else if (flags & LKM_GET_LVB) {
512*4882a593Smuzhiyun BUG_ON(lksb->flags & (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB));
513*4882a593Smuzhiyun lksb->flags |= DLM_LKSB_GET_LVB;
514*4882a593Smuzhiyun }
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun spin_lock(&res->spinlock);
517*4882a593Smuzhiyun status = __dlm_lockres_state_to_status(res);
518*4882a593Smuzhiyun if (status == DLM_NORMAL) {
519*4882a593Smuzhiyun __dlm_lockres_reserve_ast(res);
520*4882a593Smuzhiyun ast_reserved = 1;
521*4882a593Smuzhiyun res->state |= DLM_LOCK_RES_IN_PROGRESS;
522*4882a593Smuzhiyun status = __dlmconvert_master(dlm, res, lock, flags,
523*4882a593Smuzhiyun cnv->requested_type,
524*4882a593Smuzhiyun &call_ast, &kick_thread);
525*4882a593Smuzhiyun res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
526*4882a593Smuzhiyun wake = 1;
527*4882a593Smuzhiyun }
528*4882a593Smuzhiyun spin_unlock(&res->spinlock);
529*4882a593Smuzhiyun if (wake)
530*4882a593Smuzhiyun wake_up(&res->wq);
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun if (status != DLM_NORMAL) {
533*4882a593Smuzhiyun if (status != DLM_NOTQUEUED)
534*4882a593Smuzhiyun dlm_error(status);
535*4882a593Smuzhiyun lksb->flags &= ~(DLM_LKSB_GET_LVB|DLM_LKSB_PUT_LVB);
536*4882a593Smuzhiyun }
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun leave:
539*4882a593Smuzhiyun if (lock)
540*4882a593Smuzhiyun dlm_lock_put(lock);
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun /* either queue the ast or release it, if reserved */
543*4882a593Smuzhiyun if (call_ast)
544*4882a593Smuzhiyun dlm_queue_ast(dlm, lock);
545*4882a593Smuzhiyun else if (ast_reserved)
546*4882a593Smuzhiyun dlm_lockres_release_ast(dlm, res);
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun if (kick_thread)
549*4882a593Smuzhiyun dlm_kick_thread(dlm, res);
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun if (res)
552*4882a593Smuzhiyun dlm_lockres_put(res);
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun dlm_put(dlm);
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun return status;
557*4882a593Smuzhiyun }
558