1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/module.h>
29#include <linux/fs.h>
30#include <linux/types.h>
31#include <linux/highmem.h>
32#include <linux/init.h>
33#include <linux/sysctl.h>
34#include <linux/random.h>
35#include <linux/blkdev.h>
36#include <linux/socket.h>
37#include <linux/inet.h>
38#include <linux/spinlock.h>
39
40
41#include "cluster/heartbeat.h"
42#include "cluster/nodemanager.h"
43#include "cluster/tcp.h"
44
45#include "dlmapi.h"
46#include "dlmcommon.h"
47
48#include "dlmconvert.h"
49
50#define MLOG_MASK_PREFIX ML_DLM
51#include "cluster/masklog.h"
52
53
54
55
56
57
58static enum dlm_status __dlmconvert_master(struct dlm_ctxt *dlm,
59 struct dlm_lock_resource *res,
60 struct dlm_lock *lock, int flags,
61 int type, int *call_ast,
62 int *kick_thread);
63static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm,
64 struct dlm_lock_resource *res,
65 struct dlm_lock *lock, int flags, int type);
66
67
68
69
70
71
72
73
74
75
76enum dlm_status dlmconvert_master(struct dlm_ctxt *dlm,
77 struct dlm_lock_resource *res,
78 struct dlm_lock *lock, int flags, int type)
79{
80 int call_ast = 0, kick_thread = 0;
81 enum dlm_status status;
82
83 spin_lock(&res->spinlock);
84
85 __dlm_wait_on_lockres(res);
86 __dlm_lockres_reserve_ast(res);
87 res->state |= DLM_LOCK_RES_IN_PROGRESS;
88
89 status = __dlmconvert_master(dlm, res, lock, flags, type,
90 &call_ast, &kick_thread);
91
92 res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
93 spin_unlock(&res->spinlock);
94 wake_up(&res->wq);
95 if (status != DLM_NORMAL && status != DLM_NOTQUEUED)
96 dlm_error(status);
97
98
99 if (call_ast)
100 dlm_queue_ast(dlm, lock);
101 else
102 dlm_lockres_release_ast(dlm, res);
103
104 if (kick_thread)
105 dlm_kick_thread(dlm, res);
106
107 return status;
108}
109
110
111
112
113
114
115
116
117
118
119static enum dlm_status __dlmconvert_master(struct dlm_ctxt *dlm,
120 struct dlm_lock_resource *res,
121 struct dlm_lock *lock, int flags,
122 int type, int *call_ast,
123 int *kick_thread)
124{
125 enum dlm_status status = DLM_NORMAL;
126 struct dlm_lock *tmplock=NULL;
127
128 assert_spin_locked(&res->spinlock);
129
130 mlog(0, "type=%d, convert_type=%d, new convert_type=%d\n",
131 lock->ml.type, lock->ml.convert_type, type);
132
133 spin_lock(&lock->spinlock);
134
135
136 if (lock->ml.convert_type != LKM_IVMODE) {
137 mlog(ML_ERROR, "attempted to convert a lock with a lock "
138 "conversion pending\n");
139 status = DLM_DENIED;
140 goto unlock_exit;
141 }
142
143
144 if (!dlm_lock_on_list(&res->granted, lock)) {
145 mlog(ML_ERROR, "attempted to convert a lock not on grant "
146 "queue\n");
147 status = DLM_DENIED;
148 goto unlock_exit;
149 }
150
151 if (flags & LKM_VALBLK) {
152 switch (lock->ml.type) {
153 case LKM_EXMODE:
154
155 mlog(0, "will set lvb: converting %s->%s\n",
156 dlm_lock_mode_name(lock->ml.type),
157 dlm_lock_mode_name(type));
158 lock->lksb->flags |= DLM_LKSB_PUT_LVB;
159 break;
160 case LKM_PRMODE:
161 case LKM_NLMODE:
162
163 if (type > LKM_NLMODE) {
164 mlog(0, "will fetch new value into "
165 "lvb: converting %s->%s\n",
166 dlm_lock_mode_name(lock->ml.type),
167 dlm_lock_mode_name(type));
168 lock->lksb->flags |= DLM_LKSB_GET_LVB;
169 } else {
170 mlog(0, "will NOT fetch new value "
171 "into lvb: converting %s->%s\n",
172 dlm_lock_mode_name(lock->ml.type),
173 dlm_lock_mode_name(type));
174 flags &= ~(LKM_VALBLK);
175 }
176 break;
177 }
178 }
179
180
181
182 if (type <= lock->ml.type)
183 goto grant;
184
185
186 status = DLM_NORMAL;
187 list_for_each_entry(tmplock, &res->granted, list) {
188 if (tmplock == lock)
189 continue;
190 if (!dlm_lock_compatible(tmplock->ml.type, type))
191 goto switch_queues;
192 }
193
194 list_for_each_entry(tmplock, &res->converting, list) {
195 if (!dlm_lock_compatible(tmplock->ml.type, type))
196 goto switch_queues;
197
198 if (!dlm_lock_compatible(tmplock->ml.convert_type, type))
199 goto switch_queues;
200 }
201
202
203
204grant:
205 mlog(0, "res %.*s, granting %s lock\n", res->lockname.len,
206 res->lockname.name, dlm_lock_mode_name(type));
207
208 lock->lksb->status = DLM_NORMAL;
209 if (lock->ml.node == dlm->node_num)
210 mlog(0, "doing in-place convert for nonlocal lock\n");
211 lock->ml.type = type;
212 if (lock->lksb->flags & DLM_LKSB_PUT_LVB)
213 memcpy(res->lvb, lock->lksb->lvb, DLM_LVB_LEN);
214
215
216
217
218
219 list_move_tail(&lock->list, &res->granted);
220
221 status = DLM_NORMAL;
222 *call_ast = 1;
223 goto unlock_exit;
224
225switch_queues:
226 if (flags & LKM_NOQUEUE) {
227 mlog(0, "failed to convert NOQUEUE lock %.*s from "
228 "%d to %d...\n", res->lockname.len, res->lockname.name,
229 lock->ml.type, type);
230 status = DLM_NOTQUEUED;
231 goto unlock_exit;
232 }
233 mlog(0, "res %.*s, queueing...\n", res->lockname.len,
234 res->lockname.name);
235
236 lock->ml.convert_type = type;
237
238 list_move_tail(&lock->list, &res->converting);
239
240unlock_exit:
241 spin_unlock(&lock->spinlock);
242 if (status == DLM_DENIED) {
243 __dlm_print_one_lock_resource(res);
244 }
245 if (status == DLM_NORMAL)
246 *kick_thread = 1;
247 return status;
248}
249
250void dlm_revert_pending_convert(struct dlm_lock_resource *res,
251 struct dlm_lock *lock)
252{
253
254 list_move_tail(&lock->list, &res->granted);
255 lock->ml.convert_type = LKM_IVMODE;
256 lock->lksb->flags &= ~(DLM_LKSB_GET_LVB|DLM_LKSB_PUT_LVB);
257}
258
259
260
261
262
263
264
265
266enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
267 struct dlm_lock_resource *res,
268 struct dlm_lock *lock, int flags, int type)
269{
270 enum dlm_status status;
271
272 mlog(0, "type=%d, convert_type=%d, busy=%d\n", lock->ml.type,
273 lock->ml.convert_type, res->state & DLM_LOCK_RES_IN_PROGRESS);
274
275 spin_lock(&res->spinlock);
276 if (res->state & DLM_LOCK_RES_RECOVERING) {
277 mlog(0, "bailing out early since res is RECOVERING "
278 "on secondary queue\n");
279
280 status = DLM_RECOVERING;
281 goto bail;
282 }
283
284 __dlm_wait_on_lockres(res);
285
286 if (lock->ml.convert_type != LKM_IVMODE) {
287 __dlm_print_one_lock_resource(res);
288 mlog(ML_ERROR, "converting a remote lock that is already "
289 "converting! (cookie=%u:%llu, conv=%d)\n",
290 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
291 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
292 lock->ml.convert_type);
293 status = DLM_DENIED;
294 goto bail;
295 }
296
297 if (lock->ml.type == type && lock->ml.convert_type == LKM_IVMODE) {
298 mlog(0, "last convert request returned DLM_RECOVERING, but "
299 "owner has already queued and sent ast to me. res %.*s, "
300 "(cookie=%u:%llu, type=%d, conv=%d)\n",
301 res->lockname.len, res->lockname.name,
302 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
303 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
304 lock->ml.type, lock->ml.convert_type);
305 status = DLM_NORMAL;
306 goto bail;
307 }
308
309 res->state |= DLM_LOCK_RES_IN_PROGRESS;
310
311
312 list_move_tail(&lock->list, &res->converting);
313 lock->convert_pending = 1;
314 lock->ml.convert_type = type;
315
316 if (flags & LKM_VALBLK) {
317 if (lock->ml.type == LKM_EXMODE) {
318 flags |= LKM_PUT_LVB;
319 lock->lksb->flags |= DLM_LKSB_PUT_LVB;
320 } else {
321 if (lock->ml.convert_type == LKM_NLMODE)
322 flags &= ~LKM_VALBLK;
323 else {
324 flags |= LKM_GET_LVB;
325 lock->lksb->flags |= DLM_LKSB_GET_LVB;
326 }
327 }
328 }
329 spin_unlock(&res->spinlock);
330
331
332
333 status = dlm_send_remote_convert_request(dlm, res, lock, flags, type);
334
335 spin_lock(&res->spinlock);
336 res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
337
338
339
340
341 if (status != DLM_NORMAL) {
342 if (status != DLM_NOTQUEUED)
343 dlm_error(status);
344 dlm_revert_pending_convert(res, lock);
345 } else if (!lock->convert_pending) {
346 mlog(0, "%s: res %.*s, owner died and lock has been moved back "
347 "to granted list, retry convert.\n",
348 dlm->name, res->lockname.len, res->lockname.name);
349 status = DLM_RECOVERING;
350 }
351
352 lock->convert_pending = 0;
353bail:
354 spin_unlock(&res->spinlock);
355
356
357
358 wake_up(&res->wq);
359
360 return status;
361}
362
363
364
365
366
367
368
369
370static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm,
371 struct dlm_lock_resource *res,
372 struct dlm_lock *lock, int flags, int type)
373{
374 struct dlm_convert_lock convert;
375 int tmpret;
376 enum dlm_status ret;
377 int status = 0;
378 struct kvec vec[2];
379 size_t veclen = 1;
380
381 mlog(0, "%.*s\n", res->lockname.len, res->lockname.name);
382
383 memset(&convert, 0, sizeof(struct dlm_convert_lock));
384 convert.node_idx = dlm->node_num;
385 convert.requested_type = type;
386 convert.cookie = lock->ml.cookie;
387 convert.namelen = res->lockname.len;
388 convert.flags = cpu_to_be32(flags);
389 memcpy(convert.name, res->lockname.name, convert.namelen);
390
391 vec[0].iov_len = sizeof(struct dlm_convert_lock);
392 vec[0].iov_base = &convert;
393
394 if (flags & LKM_PUT_LVB) {
395
396 vec[1].iov_len = DLM_LVB_LEN;
397 vec[1].iov_base = lock->lksb->lvb;
398 veclen++;
399 }
400
401 tmpret = o2net_send_message_vec(DLM_CONVERT_LOCK_MSG, dlm->key,
402 vec, veclen, res->owner, &status);
403 if (tmpret >= 0) {
404
405 ret = status;
406 if (ret == DLM_RECOVERING) {
407 mlog(0, "node %u returned DLM_RECOVERING from convert "
408 "message!\n", res->owner);
409 } else if (ret == DLM_MIGRATING) {
410 mlog(0, "node %u returned DLM_MIGRATING from convert "
411 "message!\n", res->owner);
412 } else if (ret == DLM_FORWARD) {
413 mlog(0, "node %u returned DLM_FORWARD from convert "
414 "message!\n", res->owner);
415 } else if (ret != DLM_NORMAL && ret != DLM_NOTQUEUED)
416 dlm_error(ret);
417 } else {
418 mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
419 "node %u\n", tmpret, DLM_CONVERT_LOCK_MSG, dlm->key,
420 res->owner);
421 if (dlm_is_host_down(tmpret)) {
422
423
424
425 dlm_wait_for_node_death(dlm, res->owner,
426 DLM_NODE_DEATH_WAIT_MAX);
427 ret = DLM_RECOVERING;
428 mlog(0, "node %u died so returning DLM_RECOVERING "
429 "from convert message!\n", res->owner);
430 } else {
431 ret = dlm_err_to_dlm_status(tmpret);
432 }
433 }
434
435 return ret;
436}
437
438
439
440
441
442
443
444
445
446int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data,
447 void **ret_data)
448{
449 struct dlm_ctxt *dlm = data;
450 struct dlm_convert_lock *cnv = (struct dlm_convert_lock *)msg->buf;
451 struct dlm_lock_resource *res = NULL;
452 struct dlm_lock *lock = NULL;
453 struct dlm_lock *tmp_lock;
454 struct dlm_lockstatus *lksb;
455 enum dlm_status status = DLM_NORMAL;
456 u32 flags;
457 int call_ast = 0, kick_thread = 0, ast_reserved = 0, wake = 0;
458
459 if (!dlm_grab(dlm)) {
460 dlm_error(DLM_REJECTED);
461 return DLM_REJECTED;
462 }
463
464 mlog_bug_on_msg(!dlm_domain_fully_joined(dlm),
465 "Domain %s not fully joined!\n", dlm->name);
466
467 if (cnv->namelen > DLM_LOCKID_NAME_MAX) {
468 status = DLM_IVBUFLEN;
469 dlm_error(status);
470 goto leave;
471 }
472
473 flags = be32_to_cpu(cnv->flags);
474
475 if ((flags & (LKM_PUT_LVB|LKM_GET_LVB)) ==
476 (LKM_PUT_LVB|LKM_GET_LVB)) {
477 mlog(ML_ERROR, "both PUT and GET lvb specified\n");
478 status = DLM_BADARGS;
479 goto leave;
480 }
481
482 mlog(0, "lvb: %s\n", flags & LKM_PUT_LVB ? "put lvb" :
483 (flags & LKM_GET_LVB ? "get lvb" : "none"));
484
485 status = DLM_IVLOCKID;
486 res = dlm_lookup_lockres(dlm, cnv->name, cnv->namelen);
487 if (!res) {
488 dlm_error(status);
489 goto leave;
490 }
491
492 spin_lock(&res->spinlock);
493 status = __dlm_lockres_state_to_status(res);
494 if (status != DLM_NORMAL) {
495 spin_unlock(&res->spinlock);
496 dlm_error(status);
497 goto leave;
498 }
499 list_for_each_entry(tmp_lock, &res->granted, list) {
500 if (tmp_lock->ml.cookie == cnv->cookie &&
501 tmp_lock->ml.node == cnv->node_idx) {
502 lock = tmp_lock;
503 dlm_lock_get(lock);
504 break;
505 }
506 }
507 spin_unlock(&res->spinlock);
508 if (!lock) {
509 status = DLM_IVLOCKID;
510 mlog(ML_ERROR, "did not find lock to convert on grant queue! "
511 "cookie=%u:%llu\n",
512 dlm_get_lock_cookie_node(be64_to_cpu(cnv->cookie)),
513 dlm_get_lock_cookie_seq(be64_to_cpu(cnv->cookie)));
514 dlm_print_one_lock_resource(res);
515 goto leave;
516 }
517
518
519 lksb = lock->lksb;
520
521
522 if (flags & LKM_PUT_LVB) {
523 BUG_ON(lksb->flags & (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB));
524 lksb->flags |= DLM_LKSB_PUT_LVB;
525 memcpy(&lksb->lvb[0], &cnv->lvb[0], DLM_LVB_LEN);
526 } else if (flags & LKM_GET_LVB) {
527 BUG_ON(lksb->flags & (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB));
528 lksb->flags |= DLM_LKSB_GET_LVB;
529 }
530
531 spin_lock(&res->spinlock);
532 status = __dlm_lockres_state_to_status(res);
533 if (status == DLM_NORMAL) {
534 __dlm_lockres_reserve_ast(res);
535 ast_reserved = 1;
536 res->state |= DLM_LOCK_RES_IN_PROGRESS;
537 status = __dlmconvert_master(dlm, res, lock, flags,
538 cnv->requested_type,
539 &call_ast, &kick_thread);
540 res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
541 wake = 1;
542 }
543 spin_unlock(&res->spinlock);
544 if (wake)
545 wake_up(&res->wq);
546
547 if (status != DLM_NORMAL) {
548 if (status != DLM_NOTQUEUED)
549 dlm_error(status);
550 lksb->flags &= ~(DLM_LKSB_GET_LVB|DLM_LKSB_PUT_LVB);
551 }
552
553leave:
554 if (lock)
555 dlm_lock_put(lock);
556
557
558 if (call_ast)
559 dlm_queue_ast(dlm, lock);
560 else if (ast_reserved)
561 dlm_lockres_release_ast(dlm, res);
562
563 if (kick_thread)
564 dlm_kick_thread(dlm, res);
565
566 if (res)
567 dlm_lockres_put(res);
568
569 dlm_put(dlm);
570
571 return status;
572}
573