1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/module.h>
29#include <linux/fs.h>
30#include <linux/types.h>
31#include <linux/slab.h>
32#include <linux/highmem.h>
33#include <linux/init.h>
34#include <linux/sysctl.h>
35#include <linux/random.h>
36#include <linux/blkdev.h>
37#include <linux/socket.h>
38#include <linux/inet.h>
39#include <linux/timer.h>
40#include <linux/kthread.h>
41#include <linux/delay.h>
42
43
44#include "cluster/heartbeat.h"
45#include "cluster/nodemanager.h"
46#include "cluster/tcp.h"
47
48#include "dlmapi.h"
49#include "dlmcommon.h"
50#include "dlmdomain.h"
51
52#define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_RECOVERY)
53#include "cluster/masklog.h"
54
55static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node);
56
57static int dlm_recovery_thread(void *data);
58void dlm_complete_recovery_thread(struct dlm_ctxt *dlm);
59int dlm_launch_recovery_thread(struct dlm_ctxt *dlm);
60void dlm_kick_recovery_thread(struct dlm_ctxt *dlm);
61static int dlm_do_recovery(struct dlm_ctxt *dlm);
62
63static int dlm_pick_recovery_master(struct dlm_ctxt *dlm);
64static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node);
65static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node);
66static int dlm_request_all_locks(struct dlm_ctxt *dlm,
67 u8 request_from, u8 dead_node);
68static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node);
69
70static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res);
71static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
72 const char *lockname, int namelen,
73 int total_locks, u64 cookie,
74 u8 flags, u8 master);
75static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
76 struct dlm_migratable_lockres *mres,
77 u8 send_to,
78 struct dlm_lock_resource *res,
79 int total_locks);
80static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
81 struct dlm_lock_resource *res,
82 struct dlm_migratable_lockres *mres);
83static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm);
84static int dlm_send_all_done_msg(struct dlm_ctxt *dlm,
85 u8 dead_node, u8 send_to);
86static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node);
87static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
88 struct list_head *list, u8 dead_node);
89static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
90 u8 dead_node, u8 new_master);
91static void dlm_reco_ast(void *astdata);
92static void dlm_reco_bast(void *astdata, int blocked_type);
93static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st);
94static void dlm_request_all_locks_worker(struct dlm_work_item *item,
95 void *data);
96static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data);
97static int dlm_lockres_master_requery(struct dlm_ctxt *dlm,
98 struct dlm_lock_resource *res,
99 u8 *real_master);
100
101static u64 dlm_get_next_mig_cookie(void);
102
103static DEFINE_SPINLOCK(dlm_reco_state_lock);
104static DEFINE_SPINLOCK(dlm_mig_cookie_lock);
105static u64 dlm_mig_cookie = 1;
106
107static u64 dlm_get_next_mig_cookie(void)
108{
109 u64 c;
110 spin_lock(&dlm_mig_cookie_lock);
111 c = dlm_mig_cookie;
112 if (dlm_mig_cookie == (~0ULL))
113 dlm_mig_cookie = 1;
114 else
115 dlm_mig_cookie++;
116 spin_unlock(&dlm_mig_cookie_lock);
117 return c;
118}
119
120static inline void dlm_set_reco_dead_node(struct dlm_ctxt *dlm,
121 u8 dead_node)
122{
123 assert_spin_locked(&dlm->spinlock);
124 if (dlm->reco.dead_node != dead_node)
125 mlog(0, "%s: changing dead_node from %u to %u\n",
126 dlm->name, dlm->reco.dead_node, dead_node);
127 dlm->reco.dead_node = dead_node;
128}
129
130static inline void dlm_set_reco_master(struct dlm_ctxt *dlm,
131 u8 master)
132{
133 assert_spin_locked(&dlm->spinlock);
134 mlog(0, "%s: changing new_master from %u to %u\n",
135 dlm->name, dlm->reco.new_master, master);
136 dlm->reco.new_master = master;
137}
138
139static inline void __dlm_reset_recovery(struct dlm_ctxt *dlm)
140{
141 assert_spin_locked(&dlm->spinlock);
142 clear_bit(dlm->reco.dead_node, dlm->recovery_map);
143 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
144 dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM);
145}
146
147static inline void dlm_reset_recovery(struct dlm_ctxt *dlm)
148{
149 spin_lock(&dlm->spinlock);
150 __dlm_reset_recovery(dlm);
151 spin_unlock(&dlm->spinlock);
152}
153
154
155void dlm_dispatch_work(struct work_struct *work)
156{
157 struct dlm_ctxt *dlm =
158 container_of(work, struct dlm_ctxt, dispatched_work);
159 LIST_HEAD(tmp_list);
160 struct dlm_work_item *item, *next;
161 dlm_workfunc_t *workfunc;
162 int tot=0;
163
164 spin_lock(&dlm->work_lock);
165 list_splice_init(&dlm->work_list, &tmp_list);
166 spin_unlock(&dlm->work_lock);
167
168 list_for_each_entry(item, &tmp_list, list) {
169 tot++;
170 }
171 mlog(0, "%s: work thread has %d work items\n", dlm->name, tot);
172
173 list_for_each_entry_safe(item, next, &tmp_list, list) {
174 workfunc = item->func;
175 list_del_init(&item->list);
176
177
178
179 BUG_ON(item->dlm != dlm);
180
181
182
183 workfunc(item, item->data);
184
185 dlm_put(dlm);
186 kfree(item);
187 }
188}
189
190
191
192
193
194void dlm_kick_recovery_thread(struct dlm_ctxt *dlm)
195{
196
197
198
199
200
201
202 wake_up(&dlm->dlm_reco_thread_wq);
203}
204
205
206int dlm_launch_recovery_thread(struct dlm_ctxt *dlm)
207{
208 mlog(0, "starting dlm recovery thread...\n");
209
210 dlm->dlm_reco_thread_task = kthread_run(dlm_recovery_thread, dlm,
211 "dlm_reco_thread");
212 if (IS_ERR(dlm->dlm_reco_thread_task)) {
213 mlog_errno(PTR_ERR(dlm->dlm_reco_thread_task));
214 dlm->dlm_reco_thread_task = NULL;
215 return -EINVAL;
216 }
217
218 return 0;
219}
220
221void dlm_complete_recovery_thread(struct dlm_ctxt *dlm)
222{
223 if (dlm->dlm_reco_thread_task) {
224 mlog(0, "waiting for dlm recovery thread to exit\n");
225 kthread_stop(dlm->dlm_reco_thread_task);
226 dlm->dlm_reco_thread_task = NULL;
227 }
228}
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255static void dlm_print_reco_node_status(struct dlm_ctxt *dlm)
256{
257 struct dlm_reco_node_data *ndata;
258 struct dlm_lock_resource *res;
259
260 mlog(ML_NOTICE, "%s(%d): recovery info, state=%s, dead=%u, master=%u\n",
261 dlm->name, task_pid_nr(dlm->dlm_reco_thread_task),
262 dlm->reco.state & DLM_RECO_STATE_ACTIVE ? "ACTIVE" : "inactive",
263 dlm->reco.dead_node, dlm->reco.new_master);
264
265 list_for_each_entry(ndata, &dlm->reco.node_data, list) {
266 char *st = "unknown";
267 switch (ndata->state) {
268 case DLM_RECO_NODE_DATA_INIT:
269 st = "init";
270 break;
271 case DLM_RECO_NODE_DATA_REQUESTING:
272 st = "requesting";
273 break;
274 case DLM_RECO_NODE_DATA_DEAD:
275 st = "dead";
276 break;
277 case DLM_RECO_NODE_DATA_RECEIVING:
278 st = "receiving";
279 break;
280 case DLM_RECO_NODE_DATA_REQUESTED:
281 st = "requested";
282 break;
283 case DLM_RECO_NODE_DATA_DONE:
284 st = "done";
285 break;
286 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
287 st = "finalize-sent";
288 break;
289 default:
290 st = "bad";
291 break;
292 }
293 mlog(ML_NOTICE, "%s: reco state, node %u, state=%s\n",
294 dlm->name, ndata->node_num, st);
295 }
296 list_for_each_entry(res, &dlm->reco.resources, recovering) {
297 mlog(ML_NOTICE, "%s: lockres %.*s on recovering list\n",
298 dlm->name, res->lockname.len, res->lockname.name);
299 }
300}
301
302#define DLM_RECO_THREAD_TIMEOUT_MS (5 * 1000)
303
304static int dlm_recovery_thread(void *data)
305{
306 int status;
307 struct dlm_ctxt *dlm = data;
308 unsigned long timeout = msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS);
309
310 mlog(0, "dlm thread running for %s...\n", dlm->name);
311
312 while (!kthread_should_stop()) {
313 if (dlm_domain_fully_joined(dlm)) {
314 status = dlm_do_recovery(dlm);
315 if (status == -EAGAIN) {
316
317 continue;
318 }
319 if (status < 0)
320 mlog_errno(status);
321 }
322
323 wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq,
324 kthread_should_stop(),
325 timeout);
326 }
327
328 mlog(0, "quitting DLM recovery thread\n");
329 return 0;
330}
331
332
333static int dlm_reco_master_ready(struct dlm_ctxt *dlm)
334{
335 int ready;
336 spin_lock(&dlm->spinlock);
337 ready = (dlm->reco.new_master != O2NM_INVALID_NODE_NUM);
338 spin_unlock(&dlm->spinlock);
339 return ready;
340}
341
342
343
344int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node)
345{
346 int dead;
347 spin_lock(&dlm->spinlock);
348 dead = !test_bit(node, dlm->domain_map);
349 spin_unlock(&dlm->spinlock);
350 return dead;
351}
352
353
354
355static int dlm_is_node_recovered(struct dlm_ctxt *dlm, u8 node)
356{
357 int recovered;
358 spin_lock(&dlm->spinlock);
359 recovered = !test_bit(node, dlm->recovery_map);
360 spin_unlock(&dlm->spinlock);
361 return recovered;
362}
363
364
365int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout)
366{
367 if (timeout) {
368 mlog(ML_NOTICE, "%s: waiting %dms for notification of "
369 "death of node %u\n", dlm->name, timeout, node);
370 wait_event_timeout(dlm->dlm_reco_thread_wq,
371 dlm_is_node_dead(dlm, node),
372 msecs_to_jiffies(timeout));
373 } else {
374 mlog(ML_NOTICE, "%s: waiting indefinitely for notification "
375 "of death of node %u\n", dlm->name, node);
376 wait_event(dlm->dlm_reco_thread_wq,
377 dlm_is_node_dead(dlm, node));
378 }
379
380 return 0;
381}
382
383int dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout)
384{
385 if (timeout) {
386 mlog(0, "%s: waiting %dms for notification of "
387 "recovery of node %u\n", dlm->name, timeout, node);
388 wait_event_timeout(dlm->dlm_reco_thread_wq,
389 dlm_is_node_recovered(dlm, node),
390 msecs_to_jiffies(timeout));
391 } else {
392 mlog(0, "%s: waiting indefinitely for notification "
393 "of recovery of node %u\n", dlm->name, node);
394 wait_event(dlm->dlm_reco_thread_wq,
395 dlm_is_node_recovered(dlm, node));
396 }
397
398 return 0;
399}
400
401
402
403
404
405
406
407static int dlm_in_recovery(struct dlm_ctxt *dlm)
408{
409 int in_recovery;
410 spin_lock(&dlm->spinlock);
411 in_recovery = !!(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
412 spin_unlock(&dlm->spinlock);
413 return in_recovery;
414}
415
416
417void dlm_wait_for_recovery(struct dlm_ctxt *dlm)
418{
419 if (dlm_in_recovery(dlm)) {
420 mlog(0, "%s: reco thread %d in recovery: "
421 "state=%d, master=%u, dead=%u\n",
422 dlm->name, task_pid_nr(dlm->dlm_reco_thread_task),
423 dlm->reco.state, dlm->reco.new_master,
424 dlm->reco.dead_node);
425 }
426 wait_event(dlm->reco.event, !dlm_in_recovery(dlm));
427}
428
429static void dlm_begin_recovery(struct dlm_ctxt *dlm)
430{
431 spin_lock(&dlm->spinlock);
432 BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
433 dlm->reco.state |= DLM_RECO_STATE_ACTIVE;
434 spin_unlock(&dlm->spinlock);
435}
436
437static void dlm_end_recovery(struct dlm_ctxt *dlm)
438{
439 spin_lock(&dlm->spinlock);
440 BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE));
441 dlm->reco.state &= ~DLM_RECO_STATE_ACTIVE;
442 spin_unlock(&dlm->spinlock);
443 wake_up(&dlm->reco.event);
444}
445
446static int dlm_do_recovery(struct dlm_ctxt *dlm)
447{
448 int status = 0;
449 int ret;
450
451 spin_lock(&dlm->spinlock);
452
453
454 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM &&
455 test_bit(dlm->reco.new_master, dlm->recovery_map)) {
456 mlog(0, "new master %u died while recovering %u!\n",
457 dlm->reco.new_master, dlm->reco.dead_node);
458
459 dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM);
460 }
461
462
463 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
464 int bit;
465
466 bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES, 0);
467 if (bit >= O2NM_MAX_NODES || bit < 0)
468 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
469 else
470 dlm_set_reco_dead_node(dlm, bit);
471 } else if (!test_bit(dlm->reco.dead_node, dlm->recovery_map)) {
472
473 mlog(ML_ERROR, "dead_node %u no longer in recovery map!\n",
474 dlm->reco.dead_node);
475 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
476 }
477
478 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
479
480 spin_unlock(&dlm->spinlock);
481
482 return 0;
483 }
484 mlog(0, "%s(%d):recovery thread found node %u in the recovery map!\n",
485 dlm->name, task_pid_nr(dlm->dlm_reco_thread_task),
486 dlm->reco.dead_node);
487 spin_unlock(&dlm->spinlock);
488
489
490
491 dlm_begin_recovery(dlm);
492
493 if (dlm->reco.new_master == dlm->node_num)
494 goto master_here;
495
496 if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) {
497
498
499
500
501 ret = dlm_pick_recovery_master(dlm);
502 if (!ret) {
503
504 goto master_here;
505 }
506 mlog(0, "another node will master this recovery session.\n");
507 }
508 mlog(0, "dlm=%s (%d), new_master=%u, this node=%u, dead_node=%u\n",
509 dlm->name, task_pid_nr(dlm->dlm_reco_thread_task), dlm->reco.new_master,
510 dlm->node_num, dlm->reco.dead_node);
511
512
513
514
515 dlm_end_recovery(dlm);
516
517
518 return 0;
519
520master_here:
521 mlog(ML_NOTICE, "(%d) Node %u is the Recovery Master for the Dead Node "
522 "%u for Domain %s\n", task_pid_nr(dlm->dlm_reco_thread_task),
523 dlm->node_num, dlm->reco.dead_node, dlm->name);
524
525 status = dlm_remaster_locks(dlm, dlm->reco.dead_node);
526 if (status < 0) {
527
528 mlog(ML_ERROR, "error %d remastering locks for node %u, "
529 "retrying.\n", status, dlm->reco.dead_node);
530
531
532 msleep(100);
533 } else {
534
535 mlog(0, "DONE mastering recovery of %s:%u here(this=%u)!\n",
536 dlm->name, dlm->reco.dead_node, dlm->node_num);
537 dlm_reset_recovery(dlm);
538 }
539 dlm_end_recovery(dlm);
540
541
542 return -EAGAIN;
543}
544
545static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
546{
547 int status = 0;
548 struct dlm_reco_node_data *ndata;
549 int all_nodes_done;
550 int destroy = 0;
551 int pass = 0;
552
553 do {
554
555
556 status = dlm_init_recovery_area(dlm, dead_node);
557 if (status < 0) {
558 mlog(ML_ERROR, "%s: failed to alloc recovery area, "
559 "retrying\n", dlm->name);
560 msleep(1000);
561 }
562 } while (status != 0);
563
564
565
566 list_for_each_entry(ndata, &dlm->reco.node_data, list) {
567 BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT);
568 ndata->state = DLM_RECO_NODE_DATA_REQUESTING;
569
570 mlog(0, "requesting lock info from node %u\n",
571 ndata->node_num);
572
573 if (ndata->node_num == dlm->node_num) {
574 ndata->state = DLM_RECO_NODE_DATA_DONE;
575 continue;
576 }
577
578 do {
579 status = dlm_request_all_locks(dlm, ndata->node_num,
580 dead_node);
581 if (status < 0) {
582 mlog_errno(status);
583 if (dlm_is_host_down(status)) {
584
585 status = 0;
586 ndata->state = DLM_RECO_NODE_DATA_DEAD;
587
588
589 wait_event_timeout(dlm->dlm_reco_thread_wq,
590 dlm_is_node_dead(dlm,
591 ndata->node_num),
592 msecs_to_jiffies(1000));
593 mlog(0, "waited 1 sec for %u, "
594 "dead? %s\n", ndata->node_num,
595 dlm_is_node_dead(dlm, ndata->node_num) ?
596 "yes" : "no");
597 } else {
598
599 mlog(0, "%s: node %u returned "
600 "%d during recovery, retrying "
601 "after a short wait\n",
602 dlm->name, ndata->node_num,
603 status);
604 msleep(100);
605 }
606 }
607 } while (status != 0);
608
609 spin_lock(&dlm_reco_state_lock);
610 switch (ndata->state) {
611 case DLM_RECO_NODE_DATA_INIT:
612 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
613 case DLM_RECO_NODE_DATA_REQUESTED:
614 BUG();
615 break;
616 case DLM_RECO_NODE_DATA_DEAD:
617 mlog(0, "node %u died after requesting "
618 "recovery info for node %u\n",
619 ndata->node_num, dead_node);
620
621
622 break;
623 case DLM_RECO_NODE_DATA_REQUESTING:
624 ndata->state = DLM_RECO_NODE_DATA_REQUESTED;
625 mlog(0, "now receiving recovery data from "
626 "node %u for dead node %u\n",
627 ndata->node_num, dead_node);
628 break;
629 case DLM_RECO_NODE_DATA_RECEIVING:
630 mlog(0, "already receiving recovery data from "
631 "node %u for dead node %u\n",
632 ndata->node_num, dead_node);
633 break;
634 case DLM_RECO_NODE_DATA_DONE:
635 mlog(0, "already DONE receiving recovery data "
636 "from node %u for dead node %u\n",
637 ndata->node_num, dead_node);
638 break;
639 }
640 spin_unlock(&dlm_reco_state_lock);
641 }
642
643 mlog(0, "done requesting all lock info\n");
644
645
646
647
648 while (1) {
649
650
651 all_nodes_done = 1;
652 spin_lock(&dlm_reco_state_lock);
653 list_for_each_entry(ndata, &dlm->reco.node_data, list) {
654 mlog(0, "checking recovery state of node %u\n",
655 ndata->node_num);
656 switch (ndata->state) {
657 case DLM_RECO_NODE_DATA_INIT:
658 case DLM_RECO_NODE_DATA_REQUESTING:
659 mlog(ML_ERROR, "bad ndata state for "
660 "node %u: state=%d\n",
661 ndata->node_num, ndata->state);
662 BUG();
663 break;
664 case DLM_RECO_NODE_DATA_DEAD:
665 mlog(0, "node %u died after "
666 "requesting recovery info for "
667 "node %u\n", ndata->node_num,
668 dead_node);
669 break;
670 case DLM_RECO_NODE_DATA_RECEIVING:
671 case DLM_RECO_NODE_DATA_REQUESTED:
672 mlog(0, "%s: node %u still in state %s\n",
673 dlm->name, ndata->node_num,
674 ndata->state==DLM_RECO_NODE_DATA_RECEIVING ?
675 "receiving" : "requested");
676 all_nodes_done = 0;
677 break;
678 case DLM_RECO_NODE_DATA_DONE:
679 mlog(0, "%s: node %u state is done\n",
680 dlm->name, ndata->node_num);
681 break;
682 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
683 mlog(0, "%s: node %u state is finalize\n",
684 dlm->name, ndata->node_num);
685 break;
686 }
687 }
688 spin_unlock(&dlm_reco_state_lock);
689
690 mlog(0, "pass #%d, all_nodes_done?: %s\n", ++pass,
691 all_nodes_done?"yes":"no");
692 if (all_nodes_done) {
693 int ret;
694
695
696
697
698 mlog(0, "all nodes are done! send finalize\n");
699 ret = dlm_send_finalize_reco_message(dlm);
700 if (ret < 0)
701 mlog_errno(ret);
702
703 spin_lock(&dlm->spinlock);
704 dlm_finish_local_lockres_recovery(dlm, dead_node,
705 dlm->node_num);
706 spin_unlock(&dlm->spinlock);
707 mlog(0, "should be done with recovery!\n");
708
709 mlog(0, "finishing recovery of %s at %lu, "
710 "dead=%u, this=%u, new=%u\n", dlm->name,
711 jiffies, dlm->reco.dead_node,
712 dlm->node_num, dlm->reco.new_master);
713 destroy = 1;
714 status = 0;
715
716 dlm_kick_thread(dlm, NULL);
717 break;
718 }
719
720
721 wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq,
722 kthread_should_stop(),
723 msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS));
724
725 }
726
727 if (destroy)
728 dlm_destroy_recovery_area(dlm, dead_node);
729
730 mlog_exit(status);
731 return status;
732}
733
734static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
735{
736 int num=0;
737 struct dlm_reco_node_data *ndata;
738
739 spin_lock(&dlm->spinlock);
740 memcpy(dlm->reco.node_map, dlm->domain_map, sizeof(dlm->domain_map));
741
742
743 spin_unlock(&dlm->spinlock);
744
745 while (1) {
746 num = find_next_bit (dlm->reco.node_map, O2NM_MAX_NODES, num);
747 if (num >= O2NM_MAX_NODES) {
748 break;
749 }
750 BUG_ON(num == dead_node);
751
752 ndata = kzalloc(sizeof(*ndata), GFP_NOFS);
753 if (!ndata) {
754 dlm_destroy_recovery_area(dlm, dead_node);
755 return -ENOMEM;
756 }
757 ndata->node_num = num;
758 ndata->state = DLM_RECO_NODE_DATA_INIT;
759 spin_lock(&dlm_reco_state_lock);
760 list_add_tail(&ndata->list, &dlm->reco.node_data);
761 spin_unlock(&dlm_reco_state_lock);
762 num++;
763 }
764
765 return 0;
766}
767
768static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
769{
770 struct dlm_reco_node_data *ndata, *next;
771 LIST_HEAD(tmplist);
772
773 spin_lock(&dlm_reco_state_lock);
774 list_splice_init(&dlm->reco.node_data, &tmplist);
775 spin_unlock(&dlm_reco_state_lock);
776
777 list_for_each_entry_safe(ndata, next, &tmplist, list) {
778 list_del_init(&ndata->list);
779 kfree(ndata);
780 }
781}
782
783static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from,
784 u8 dead_node)
785{
786 struct dlm_lock_request lr;
787 enum dlm_status ret;
788
789 mlog(0, "\n");
790
791
792 mlog(0, "dlm_request_all_locks: dead node is %u, sending request "
793 "to %u\n", dead_node, request_from);
794
795 memset(&lr, 0, sizeof(lr));
796 lr.node_idx = dlm->node_num;
797 lr.dead_node = dead_node;
798
799
800 ret = DLM_NOLOCKMGR;
801 ret = o2net_send_message(DLM_LOCK_REQUEST_MSG, dlm->key,
802 &lr, sizeof(lr), request_from, NULL);
803
804
805 if (ret < 0)
806 mlog(ML_ERROR, "Error %d when sending message %u (key "
807 "0x%x) to node %u\n", ret, DLM_LOCK_REQUEST_MSG,
808 dlm->key, request_from);
809
810
811
812 return ret;
813
814}
815
816int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data,
817 void **ret_data)
818{
819 struct dlm_ctxt *dlm = data;
820 struct dlm_lock_request *lr = (struct dlm_lock_request *)msg->buf;
821 char *buf = NULL;
822 struct dlm_work_item *item = NULL;
823
824 if (!dlm_grab(dlm))
825 return -EINVAL;
826
827 if (lr->dead_node != dlm->reco.dead_node) {
828 mlog(ML_ERROR, "%s: node %u sent dead_node=%u, but local "
829 "dead_node is %u\n", dlm->name, lr->node_idx,
830 lr->dead_node, dlm->reco.dead_node);
831 dlm_print_reco_node_status(dlm);
832
833 dlm_put(dlm);
834 return -ENOMEM;
835 }
836 BUG_ON(lr->dead_node != dlm->reco.dead_node);
837
838 item = kzalloc(sizeof(*item), GFP_NOFS);
839 if (!item) {
840 dlm_put(dlm);
841 return -ENOMEM;
842 }
843
844
845 buf = (char *) __get_free_page(GFP_NOFS);
846 if (!buf) {
847 kfree(item);
848 dlm_put(dlm);
849 return -ENOMEM;
850 }
851
852
853 dlm_grab(dlm);
854 dlm_init_work_item(dlm, item, dlm_request_all_locks_worker, buf);
855 item->u.ral.reco_master = lr->node_idx;
856 item->u.ral.dead_node = lr->dead_node;
857 spin_lock(&dlm->work_lock);
858 list_add_tail(&item->list, &dlm->work_list);
859 spin_unlock(&dlm->work_lock);
860 queue_work(dlm->dlm_worker, &dlm->dispatched_work);
861
862 dlm_put(dlm);
863 return 0;
864}
865
866static void dlm_request_all_locks_worker(struct dlm_work_item *item, void *data)
867{
868 struct dlm_migratable_lockres *mres;
869 struct dlm_lock_resource *res;
870 struct dlm_ctxt *dlm;
871 LIST_HEAD(resources);
872 int ret;
873 u8 dead_node, reco_master;
874 int skip_all_done = 0;
875
876 dlm = item->dlm;
877 dead_node = item->u.ral.dead_node;
878 reco_master = item->u.ral.reco_master;
879 mres = (struct dlm_migratable_lockres *)data;
880
881 mlog(0, "%s: recovery worker started, dead=%u, master=%u\n",
882 dlm->name, dead_node, reco_master);
883
884 if (dead_node != dlm->reco.dead_node ||
885 reco_master != dlm->reco.new_master) {
886
887
888 if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) {
889 mlog(ML_NOTICE, "%s: will not send recovery state, "
890 "recovery master %u died, thread=(dead=%u,mas=%u)"
891 " current=(dead=%u,mas=%u)\n", dlm->name,
892 reco_master, dead_node, reco_master,
893 dlm->reco.dead_node, dlm->reco.new_master);
894 } else {
895 mlog(ML_NOTICE, "%s: reco state invalid: reco(dead=%u, "
896 "master=%u), request(dead=%u, master=%u)\n",
897 dlm->name, dlm->reco.dead_node,
898 dlm->reco.new_master, dead_node, reco_master);
899 }
900 goto leave;
901 }
902
903
904
905
906
907
908
909 dlm_move_reco_locks_to_list(dlm, &resources, dead_node);
910
911
912
913
914
915 list_for_each_entry(res, &resources, recovering) {
916 ret = dlm_send_one_lockres(dlm, res, mres, reco_master,
917 DLM_MRES_RECOVERY);
918 if (ret < 0) {
919 mlog(ML_ERROR, "%s: node %u went down while sending "
920 "recovery state for dead node %u, ret=%d\n", dlm->name,
921 reco_master, dead_node, ret);
922 skip_all_done = 1;
923 break;
924 }
925 }
926
927
928 spin_lock(&dlm->spinlock);
929 list_splice_init(&resources, &dlm->reco.resources);
930 spin_unlock(&dlm->spinlock);
931
932 if (!skip_all_done) {
933 ret = dlm_send_all_done_msg(dlm, dead_node, reco_master);
934 if (ret < 0) {
935 mlog(ML_ERROR, "%s: node %u went down while sending "
936 "recovery all-done for dead node %u, ret=%d\n",
937 dlm->name, reco_master, dead_node, ret);
938 }
939 }
940leave:
941 free_page((unsigned long)data);
942}
943
944
945static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to)
946{
947 int ret, tmpret;
948 struct dlm_reco_data_done done_msg;
949
950 memset(&done_msg, 0, sizeof(done_msg));
951 done_msg.node_idx = dlm->node_num;
952 done_msg.dead_node = dead_node;
953 mlog(0, "sending DATA DONE message to %u, "
954 "my node=%u, dead node=%u\n", send_to, done_msg.node_idx,
955 done_msg.dead_node);
956
957 ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg,
958 sizeof(done_msg), send_to, &tmpret);
959 if (ret < 0) {
960 mlog(ML_ERROR, "Error %d when sending message %u (key "
961 "0x%x) to node %u\n", ret, DLM_RECO_DATA_DONE_MSG,
962 dlm->key, send_to);
963 if (!dlm_is_host_down(ret)) {
964 BUG();
965 }
966 } else
967 ret = tmpret;
968 return ret;
969}
970
971
972int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data,
973 void **ret_data)
974{
975 struct dlm_ctxt *dlm = data;
976 struct dlm_reco_data_done *done = (struct dlm_reco_data_done *)msg->buf;
977 struct dlm_reco_node_data *ndata = NULL;
978 int ret = -EINVAL;
979
980 if (!dlm_grab(dlm))
981 return -EINVAL;
982
983 mlog(0, "got DATA DONE: dead_node=%u, reco.dead_node=%u, "
984 "node_idx=%u, this node=%u\n", done->dead_node,
985 dlm->reco.dead_node, done->node_idx, dlm->node_num);
986
987 mlog_bug_on_msg((done->dead_node != dlm->reco.dead_node),
988 "Got DATA DONE: dead_node=%u, reco.dead_node=%u, "
989 "node_idx=%u, this node=%u\n", done->dead_node,
990 dlm->reco.dead_node, done->node_idx, dlm->node_num);
991
992 spin_lock(&dlm_reco_state_lock);
993 list_for_each_entry(ndata, &dlm->reco.node_data, list) {
994 if (ndata->node_num != done->node_idx)
995 continue;
996
997 switch (ndata->state) {
998
999 case DLM_RECO_NODE_DATA_INIT:
1000 case DLM_RECO_NODE_DATA_DEAD:
1001 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
1002 mlog(ML_ERROR, "bad ndata state for node %u:"
1003 " state=%d\n", ndata->node_num,
1004 ndata->state);
1005 BUG();
1006 break;
1007
1008
1009 case DLM_RECO_NODE_DATA_DONE:
1010 case DLM_RECO_NODE_DATA_RECEIVING:
1011 case DLM_RECO_NODE_DATA_REQUESTED:
1012 case DLM_RECO_NODE_DATA_REQUESTING:
1013 mlog(0, "node %u is DONE sending "
1014 "recovery data!\n",
1015 ndata->node_num);
1016
1017 ndata->state = DLM_RECO_NODE_DATA_DONE;
1018 ret = 0;
1019 break;
1020 }
1021 }
1022 spin_unlock(&dlm_reco_state_lock);
1023
1024
1025 if (!ret)
1026 dlm_kick_recovery_thread(dlm);
1027
1028 if (ret < 0)
1029 mlog(ML_ERROR, "failed to find recovery node data for node "
1030 "%u\n", done->node_idx);
1031 dlm_put(dlm);
1032
1033 mlog(0, "leaving reco data done handler, ret=%d\n", ret);
1034 return ret;
1035}
1036
1037static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
1038 struct list_head *list,
1039 u8 dead_node)
1040{
1041 struct dlm_lock_resource *res, *next;
1042 struct dlm_lock *lock;
1043
1044 spin_lock(&dlm->spinlock);
1045 list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) {
1046
1047
1048 if (dlm_is_recovery_lock(res->lockname.name,
1049 res->lockname.len)) {
1050 spin_lock(&res->spinlock);
1051 list_for_each_entry(lock, &res->granted, list) {
1052 if (lock->ml.node == dead_node) {
1053 mlog(0, "AHA! there was "
1054 "a $RECOVERY lock for dead "
1055 "node %u (%s)!\n",
1056 dead_node, dlm->name);
1057 list_del_init(&lock->list);
1058 dlm_lock_put(lock);
1059 break;
1060 }
1061 }
1062 spin_unlock(&res->spinlock);
1063 continue;
1064 }
1065
1066 if (res->owner == dead_node) {
1067 mlog(0, "found lockres owned by dead node while "
1068 "doing recovery for node %u. sending it.\n",
1069 dead_node);
1070 list_move_tail(&res->recovering, list);
1071 } else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
1072 mlog(0, "found UNKNOWN owner while doing recovery "
1073 "for node %u. sending it.\n", dead_node);
1074 list_move_tail(&res->recovering, list);
1075 }
1076 }
1077 spin_unlock(&dlm->spinlock);
1078}
1079
1080static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res)
1081{
1082 int total_locks = 0;
1083 struct list_head *iter, *queue = &res->granted;
1084 int i;
1085
1086 for (i=0; i<3; i++) {
1087 list_for_each(iter, queue)
1088 total_locks++;
1089 queue++;
1090 }
1091 return total_locks;
1092}
1093
1094
1095static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
1096 struct dlm_migratable_lockres *mres,
1097 u8 send_to,
1098 struct dlm_lock_resource *res,
1099 int total_locks)
1100{
1101 u64 mig_cookie = be64_to_cpu(mres->mig_cookie);
1102 int mres_total_locks = be32_to_cpu(mres->total_locks);
1103 int sz, ret = 0, status = 0;
1104 u8 orig_flags = mres->flags,
1105 orig_master = mres->master;
1106
1107 BUG_ON(mres->num_locks > DLM_MAX_MIGRATABLE_LOCKS);
1108 if (!mres->num_locks)
1109 return 0;
1110
1111 sz = sizeof(struct dlm_migratable_lockres) +
1112 (mres->num_locks * sizeof(struct dlm_migratable_lock));
1113
1114
1115 orig_flags = mres->flags;
1116 BUG_ON(total_locks > mres_total_locks);
1117 if (total_locks == mres_total_locks)
1118 mres->flags |= DLM_MRES_ALL_DONE;
1119
1120 mlog(0, "%s:%.*s: sending mig lockres (%s) to %u\n",
1121 dlm->name, res->lockname.len, res->lockname.name,
1122 orig_flags & DLM_MRES_MIGRATION ? "migration" : "recovery",
1123 send_to);
1124
1125
1126 ret = o2net_send_message(DLM_MIG_LOCKRES_MSG, dlm->key, mres,
1127 sz, send_to, &status);
1128 if (ret < 0) {
1129
1130
1131 mlog(ML_ERROR, "Error %d when sending message %u (key "
1132 "0x%x) to node %u\n", ret, DLM_MIG_LOCKRES_MSG,
1133 dlm->key, send_to);
1134 } else {
1135
1136 ret = status;
1137 if (ret < 0) {
1138 mlog_errno(ret);
1139
1140 if (ret == -EFAULT) {
1141 mlog(ML_ERROR, "node %u told me to kill "
1142 "myself!\n", send_to);
1143 BUG();
1144 }
1145 }
1146 }
1147
1148
1149 dlm_init_migratable_lockres(mres, res->lockname.name,
1150 res->lockname.len, mres_total_locks,
1151 mig_cookie, orig_flags, orig_master);
1152 return ret;
1153}
1154
1155static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
1156 const char *lockname, int namelen,
1157 int total_locks, u64 cookie,
1158 u8 flags, u8 master)
1159{
1160
1161 clear_page(mres);
1162 mres->lockname_len = namelen;
1163 memcpy(mres->lockname, lockname, namelen);
1164 mres->num_locks = 0;
1165 mres->total_locks = cpu_to_be32(total_locks);
1166 mres->mig_cookie = cpu_to_be64(cookie);
1167 mres->flags = flags;
1168 mres->master = master;
1169}
1170
1171static void dlm_prepare_lvb_for_migration(struct dlm_lock *lock,
1172 struct dlm_migratable_lockres *mres,
1173 int queue)
1174{
1175 if (!lock->lksb)
1176 return;
1177
1178
1179 if (queue == DLM_BLOCKED_LIST)
1180 return;
1181
1182
1183 if (lock->ml.type != LKM_EXMODE && lock->ml.type != LKM_PRMODE)
1184 return;
1185
1186 if (dlm_lvb_is_empty(mres->lvb)) {
1187 memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN);
1188 return;
1189 }
1190
1191
1192 if (!memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))
1193 return;
1194
1195 mlog(ML_ERROR, "Mismatched lvb in lock cookie=%u:%llu, name=%.*s, "
1196 "node=%u\n",
1197 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
1198 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
1199 lock->lockres->lockname.len, lock->lockres->lockname.name,
1200 lock->ml.node);
1201 dlm_print_one_lock_resource(lock->lockres);
1202 BUG();
1203}
1204
1205
1206
1207static int dlm_add_lock_to_array(struct dlm_lock *lock,
1208 struct dlm_migratable_lockres *mres, int queue)
1209{
1210 struct dlm_migratable_lock *ml;
1211 int lock_num = mres->num_locks;
1212
1213 ml = &(mres->ml[lock_num]);
1214 ml->cookie = lock->ml.cookie;
1215 ml->type = lock->ml.type;
1216 ml->convert_type = lock->ml.convert_type;
1217 ml->highest_blocked = lock->ml.highest_blocked;
1218 ml->list = queue;
1219 if (lock->lksb) {
1220 ml->flags = lock->lksb->flags;
1221 dlm_prepare_lvb_for_migration(lock, mres, queue);
1222 }
1223 ml->node = lock->ml.node;
1224 mres->num_locks++;
1225
1226 if (mres->num_locks == DLM_MAX_MIGRATABLE_LOCKS)
1227 return 1;
1228 return 0;
1229}
1230
1231static void dlm_add_dummy_lock(struct dlm_ctxt *dlm,
1232 struct dlm_migratable_lockres *mres)
1233{
1234 struct dlm_lock dummy;
1235 memset(&dummy, 0, sizeof(dummy));
1236 dummy.ml.cookie = 0;
1237 dummy.ml.type = LKM_IVMODE;
1238 dummy.ml.convert_type = LKM_IVMODE;
1239 dummy.ml.highest_blocked = LKM_IVMODE;
1240 dummy.lksb = NULL;
1241 dummy.ml.node = dlm->node_num;
1242 dlm_add_lock_to_array(&dummy, mres, DLM_BLOCKED_LIST);
1243}
1244
1245static inline int dlm_is_dummy_lock(struct dlm_ctxt *dlm,
1246 struct dlm_migratable_lock *ml,
1247 u8 *nodenum)
1248{
1249 if (unlikely(ml->cookie == 0 &&
1250 ml->type == LKM_IVMODE &&
1251 ml->convert_type == LKM_IVMODE &&
1252 ml->highest_blocked == LKM_IVMODE &&
1253 ml->list == DLM_BLOCKED_LIST)) {
1254 *nodenum = ml->node;
1255 return 1;
1256 }
1257 return 0;
1258}
1259
1260int dlm_send_one_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
1261 struct dlm_migratable_lockres *mres,
1262 u8 send_to, u8 flags)
1263{
1264 struct list_head *queue;
1265 int total_locks, i;
1266 u64 mig_cookie = 0;
1267 struct dlm_lock *lock;
1268 int ret = 0;
1269
1270 BUG_ON(!(flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
1271
1272 mlog(0, "sending to %u\n", send_to);
1273
1274 total_locks = dlm_num_locks_in_lockres(res);
1275 if (total_locks > DLM_MAX_MIGRATABLE_LOCKS) {
1276
1277 mlog(0, "argh. lockres has %d locks. this will "
1278 "require more than one network packet to "
1279 "migrate\n", total_locks);
1280 mig_cookie = dlm_get_next_mig_cookie();
1281 }
1282
1283 dlm_init_migratable_lockres(mres, res->lockname.name,
1284 res->lockname.len, total_locks,
1285 mig_cookie, flags, res->owner);
1286
1287 total_locks = 0;
1288 for (i=DLM_GRANTED_LIST; i<=DLM_BLOCKED_LIST; i++) {
1289 queue = dlm_list_idx_to_ptr(res, i);
1290 list_for_each_entry(lock, queue, list) {
1291
1292 total_locks++;
1293 if (!dlm_add_lock_to_array(lock, mres, i))
1294 continue;
1295
1296
1297
1298 ret = dlm_send_mig_lockres_msg(dlm, mres, send_to,
1299 res, total_locks);
1300 if (ret < 0)
1301 goto error;
1302 }
1303 }
1304 if (total_locks == 0) {
1305
1306 mlog(0, "%s:%.*s: sending dummy lock to %u, %s\n",
1307 dlm->name, res->lockname.len, res->lockname.name,
1308 send_to, flags & DLM_MRES_RECOVERY ? "recovery" :
1309 "migration");
1310 dlm_add_dummy_lock(dlm, mres);
1311 }
1312
1313 ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, res, total_locks);
1314 if (ret < 0)
1315 goto error;
1316 return ret;
1317
1318error:
1319 mlog(ML_ERROR, "%s: dlm_send_mig_lockres_msg returned %d\n",
1320 dlm->name, ret);
1321 if (!dlm_is_host_down(ret))
1322 BUG();
1323 mlog(0, "%s: node %u went down while sending %s "
1324 "lockres %.*s\n", dlm->name, send_to,
1325 flags & DLM_MRES_RECOVERY ? "recovery" : "migration",
1326 res->lockname.len, res->lockname.name);
1327 return ret;
1328}
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
1346 void **ret_data)
1347{
1348 struct dlm_ctxt *dlm = data;
1349 struct dlm_migratable_lockres *mres =
1350 (struct dlm_migratable_lockres *)msg->buf;
1351 int ret = 0;
1352 u8 real_master;
1353 u8 extra_refs = 0;
1354 char *buf = NULL;
1355 struct dlm_work_item *item = NULL;
1356 struct dlm_lock_resource *res = NULL;
1357
1358 if (!dlm_grab(dlm))
1359 return -EINVAL;
1360
1361 BUG_ON(!(mres->flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
1362
1363 real_master = mres->master;
1364 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1365
1366 BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
1367 }
1368
1369 mlog(0, "%s message received from node %u\n",
1370 (mres->flags & DLM_MRES_RECOVERY) ?
1371 "recovery" : "migration", mres->master);
1372 if (mres->flags & DLM_MRES_ALL_DONE)
1373 mlog(0, "all done flag. all lockres data received!\n");
1374
1375 ret = -ENOMEM;
1376 buf = kmalloc(be16_to_cpu(msg->data_len), GFP_NOFS);
1377 item = kzalloc(sizeof(*item), GFP_NOFS);
1378 if (!buf || !item)
1379 goto leave;
1380
1381
1382
1383
1384 res = dlm_lookup_lockres(dlm, mres->lockname, mres->lockname_len);
1385 if (res) {
1386
1387
1388 spin_lock(&res->spinlock);
1389 if (mres->flags & DLM_MRES_RECOVERY) {
1390 res->state |= DLM_LOCK_RES_RECOVERING;
1391 } else {
1392 if (res->state & DLM_LOCK_RES_MIGRATING) {
1393
1394
1395 mlog(0, "lock %.*s is already migrating\n",
1396 mres->lockname_len,
1397 mres->lockname);
1398 } else if (res->state & DLM_LOCK_RES_RECOVERING) {
1399
1400 mlog(ML_ERROR, "node is attempting to migrate "
1401 "lock %.*s, but marked as recovering!\n",
1402 mres->lockname_len, mres->lockname);
1403 ret = -EFAULT;
1404 spin_unlock(&res->spinlock);
1405 goto leave;
1406 }
1407 res->state |= DLM_LOCK_RES_MIGRATING;
1408 }
1409 spin_unlock(&res->spinlock);
1410 } else {
1411
1412
1413 res = dlm_new_lockres(dlm, mres->lockname, mres->lockname_len);
1414 if (!res)
1415 goto leave;
1416
1417
1418
1419 dlm_lockres_get(res);
1420
1421
1422 if (mres->flags & DLM_MRES_RECOVERY)
1423 res->state |= DLM_LOCK_RES_RECOVERING;
1424 else
1425 res->state |= DLM_LOCK_RES_MIGRATING;
1426
1427 spin_lock(&dlm->spinlock);
1428 __dlm_insert_lockres(dlm, res);
1429 spin_unlock(&dlm->spinlock);
1430
1431
1432
1433
1434 dlm_lockres_get(res);
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445 extra_refs++;
1446
1447
1448
1449 spin_lock(&res->spinlock);
1450 res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
1451 spin_unlock(&res->spinlock);
1452 wake_up(&res->wq);
1453 }
1454
1455
1456
1457
1458 ret = 0;
1459 spin_lock(&res->spinlock);
1460
1461
1462 dlm_lockres_grab_inflight_ref(dlm, res);
1463 if (mres->master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1464
1465 BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
1466 mlog(0, "recovery has passed me a lockres with an "
1467 "unknown owner.. will need to requery: "
1468 "%.*s\n", mres->lockname_len, mres->lockname);
1469 } else {
1470
1471
1472 dlm_change_lockres_owner(dlm, res, dlm->node_num);
1473 }
1474 spin_unlock(&res->spinlock);
1475
1476
1477 dlm_grab(dlm);
1478 memcpy(buf, msg->buf, be16_to_cpu(msg->data_len));
1479 dlm_init_work_item(dlm, item, dlm_mig_lockres_worker, buf);
1480 item->u.ml.lockres = res;
1481 item->u.ml.real_master = real_master;
1482 item->u.ml.extra_ref = extra_refs;
1483 spin_lock(&dlm->work_lock);
1484 list_add_tail(&item->list, &dlm->work_list);
1485 spin_unlock(&dlm->work_lock);
1486 queue_work(dlm->dlm_worker, &dlm->dispatched_work);
1487
1488leave:
1489
1490 if (extra_refs)
1491 dlm_lockres_put(res);
1492
1493 dlm_put(dlm);
1494 if (ret < 0) {
1495 if (buf)
1496 kfree(buf);
1497 if (item)
1498 kfree(item);
1499 }
1500
1501 mlog_exit(ret);
1502 return ret;
1503}
1504
1505
1506static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data)
1507{
1508 struct dlm_ctxt *dlm;
1509 struct dlm_migratable_lockres *mres;
1510 int ret = 0;
1511 struct dlm_lock_resource *res;
1512 u8 real_master;
1513 u8 extra_ref;
1514
1515 dlm = item->dlm;
1516 mres = (struct dlm_migratable_lockres *)data;
1517
1518 res = item->u.ml.lockres;
1519 real_master = item->u.ml.real_master;
1520 extra_ref = item->u.ml.extra_ref;
1521
1522 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1523
1524
1525again:
1526 ret = dlm_lockres_master_requery(dlm, res, &real_master);
1527 if (ret < 0) {
1528 mlog(0, "dlm_lockres_master_requery ret=%d\n",
1529 ret);
1530 goto again;
1531 }
1532 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1533 mlog(0, "lockres %.*s not claimed. "
1534 "this node will take it.\n",
1535 res->lockname.len, res->lockname.name);
1536 } else {
1537 spin_lock(&res->spinlock);
1538 dlm_lockres_drop_inflight_ref(dlm, res);
1539 spin_unlock(&res->spinlock);
1540 mlog(0, "master needs to respond to sender "
1541 "that node %u still owns %.*s\n",
1542 real_master, res->lockname.len,
1543 res->lockname.name);
1544
1545 goto leave;
1546 }
1547 }
1548
1549 ret = dlm_process_recovery_data(dlm, res, mres);
1550 if (ret < 0)
1551 mlog(0, "dlm_process_recovery_data returned %d\n", ret);
1552 else
1553 mlog(0, "dlm_process_recovery_data succeeded\n");
1554
1555 if ((mres->flags & (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) ==
1556 (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) {
1557 ret = dlm_finish_migration(dlm, res, mres->master);
1558 if (ret < 0)
1559 mlog_errno(ret);
1560 }
1561
1562leave:
1563
1564 if (res) {
1565 if (extra_ref)
1566 dlm_lockres_put(res);
1567 dlm_lockres_put(res);
1568 }
1569 kfree(data);
1570 mlog_exit(ret);
1571}
1572
1573
1574
1575static int dlm_lockres_master_requery(struct dlm_ctxt *dlm,
1576 struct dlm_lock_resource *res,
1577 u8 *real_master)
1578{
1579 struct dlm_node_iter iter;
1580 int nodenum;
1581 int ret = 0;
1582
1583 *real_master = DLM_LOCK_RES_OWNER_UNKNOWN;
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608 spin_lock(&dlm->spinlock);
1609 dlm_node_iter_init(dlm->domain_map, &iter);
1610 spin_unlock(&dlm->spinlock);
1611
1612 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
1613
1614 if (nodenum == dlm->node_num)
1615 continue;
1616 ret = dlm_do_master_requery(dlm, res, nodenum, real_master);
1617 if (ret < 0) {
1618 mlog_errno(ret);
1619 if (!dlm_is_host_down(ret))
1620 BUG();
1621
1622
1623 }
1624 if (*real_master != DLM_LOCK_RES_OWNER_UNKNOWN) {
1625 mlog(0, "lock master is %u\n", *real_master);
1626 break;
1627 }
1628 }
1629 return ret;
1630}
1631
1632
1633int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
1634 u8 nodenum, u8 *real_master)
1635{
1636 int ret = -EINVAL;
1637 struct dlm_master_requery req;
1638 int status = DLM_LOCK_RES_OWNER_UNKNOWN;
1639
1640 memset(&req, 0, sizeof(req));
1641 req.node_idx = dlm->node_num;
1642 req.namelen = res->lockname.len;
1643 memcpy(req.name, res->lockname.name, res->lockname.len);
1644
1645 ret = o2net_send_message(DLM_MASTER_REQUERY_MSG, dlm->key,
1646 &req, sizeof(req), nodenum, &status);
1647
1648 if (ret < 0)
1649 mlog(ML_ERROR, "Error %d when sending message %u (key "
1650 "0x%x) to node %u\n", ret, DLM_MASTER_REQUERY_MSG,
1651 dlm->key, nodenum);
1652 else {
1653 BUG_ON(status < 0);
1654 BUG_ON(status > DLM_LOCK_RES_OWNER_UNKNOWN);
1655 *real_master = (u8) (status & 0xff);
1656 mlog(0, "node %u responded to master requery with %u\n",
1657 nodenum, *real_master);
1658 ret = 0;
1659 }
1660 return ret;
1661}
1662
1663
1664
1665
1666
1667int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
1668 void **ret_data)
1669{
1670 struct dlm_ctxt *dlm = data;
1671 struct dlm_master_requery *req = (struct dlm_master_requery *)msg->buf;
1672 struct dlm_lock_resource *res = NULL;
1673 unsigned int hash;
1674 int master = DLM_LOCK_RES_OWNER_UNKNOWN;
1675 u32 flags = DLM_ASSERT_MASTER_REQUERY;
1676
1677 if (!dlm_grab(dlm)) {
1678
1679
1680 return master;
1681 }
1682
1683 hash = dlm_lockid_hash(req->name, req->namelen);
1684
1685 spin_lock(&dlm->spinlock);
1686 res = __dlm_lookup_lockres(dlm, req->name, req->namelen, hash);
1687 if (res) {
1688 spin_lock(&res->spinlock);
1689 master = res->owner;
1690 if (master == dlm->node_num) {
1691 int ret = dlm_dispatch_assert_master(dlm, res,
1692 0, 0, flags);
1693 if (ret < 0) {
1694 mlog_errno(-ENOMEM);
1695
1696 BUG();
1697 }
1698 } else
1699 dlm_lockres_put(res);
1700 spin_unlock(&res->spinlock);
1701 }
1702 spin_unlock(&dlm->spinlock);
1703
1704 dlm_put(dlm);
1705 return master;
1706}
1707
1708static inline struct list_head *
1709dlm_list_num_to_pointer(struct dlm_lock_resource *res, int list_num)
1710{
1711 struct list_head *ret;
1712 BUG_ON(list_num < 0);
1713 BUG_ON(list_num > 2);
1714 ret = &(res->granted);
1715 ret += list_num;
1716 return ret;
1717}
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
1746 struct dlm_lock_resource *res,
1747 struct dlm_migratable_lockres *mres)
1748{
1749 struct dlm_migratable_lock *ml;
1750 struct list_head *queue;
1751 struct list_head *tmpq = NULL;
1752 struct dlm_lock *newlock = NULL;
1753 struct dlm_lockstatus *lksb = NULL;
1754 int ret = 0;
1755 int i, j, bad;
1756 struct dlm_lock *lock = NULL;
1757 u8 from = O2NM_MAX_NODES;
1758 unsigned int added = 0;
1759 __be64 c;
1760
1761 mlog(0, "running %d locks for this lockres\n", mres->num_locks);
1762 for (i=0; i<mres->num_locks; i++) {
1763 ml = &(mres->ml[i]);
1764
1765 if (dlm_is_dummy_lock(dlm, ml, &from)) {
1766
1767 BUG_ON(mres->num_locks != 1);
1768 mlog(0, "%s:%.*s: dummy lock for %u\n",
1769 dlm->name, mres->lockname_len, mres->lockname,
1770 from);
1771 spin_lock(&res->spinlock);
1772 dlm_lockres_set_refmap_bit(from, res);
1773 spin_unlock(&res->spinlock);
1774 added++;
1775 break;
1776 }
1777 BUG_ON(ml->highest_blocked != LKM_IVMODE);
1778 newlock = NULL;
1779 lksb = NULL;
1780
1781 queue = dlm_list_num_to_pointer(res, ml->list);
1782 tmpq = NULL;
1783
1784
1785
1786
1787 if (ml->node == dlm->node_num) {
1788
1789 BUG_ON(!(mres->flags & DLM_MRES_MIGRATION));
1790
1791 spin_lock(&res->spinlock);
1792 for (j = DLM_GRANTED_LIST; j <= DLM_BLOCKED_LIST; j++) {
1793 tmpq = dlm_list_idx_to_ptr(res, j);
1794 list_for_each_entry(lock, tmpq, list) {
1795 if (lock->ml.cookie != ml->cookie)
1796 lock = NULL;
1797 else
1798 break;
1799 }
1800 if (lock)
1801 break;
1802 }
1803
1804
1805
1806 if (!lock) {
1807 c = ml->cookie;
1808 mlog(ML_ERROR, "Could not find local lock "
1809 "with cookie %u:%llu, node %u, "
1810 "list %u, flags 0x%x, type %d, "
1811 "conv %d, highest blocked %d\n",
1812 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1813 dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1814 ml->node, ml->list, ml->flags, ml->type,
1815 ml->convert_type, ml->highest_blocked);
1816 __dlm_print_one_lock_resource(res);
1817 BUG();
1818 }
1819
1820 if (lock->ml.node != ml->node) {
1821 c = lock->ml.cookie;
1822 mlog(ML_ERROR, "Mismatched node# in lock "
1823 "cookie %u:%llu, name %.*s, node %u\n",
1824 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1825 dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1826 res->lockname.len, res->lockname.name,
1827 lock->ml.node);
1828 c = ml->cookie;
1829 mlog(ML_ERROR, "Migrate lock cookie %u:%llu, "
1830 "node %u, list %u, flags 0x%x, type %d, "
1831 "conv %d, highest blocked %d\n",
1832 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1833 dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1834 ml->node, ml->list, ml->flags, ml->type,
1835 ml->convert_type, ml->highest_blocked);
1836 __dlm_print_one_lock_resource(res);
1837 BUG();
1838 }
1839
1840 if (tmpq != queue) {
1841 c = ml->cookie;
1842 mlog(0, "Lock cookie %u:%llu was on list %u "
1843 "instead of list %u for %.*s\n",
1844 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1845 dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1846 j, ml->list, res->lockname.len,
1847 res->lockname.name);
1848 __dlm_print_one_lock_resource(res);
1849 spin_unlock(&res->spinlock);
1850 continue;
1851 }
1852
1853
1854
1855
1856
1857
1858 list_move_tail(&lock->list, queue);
1859 spin_unlock(&res->spinlock);
1860 added++;
1861
1862 mlog(0, "just reordered a local lock!\n");
1863 continue;
1864 }
1865
1866
1867 newlock = dlm_new_lock(ml->type, ml->node,
1868 be64_to_cpu(ml->cookie), NULL);
1869 if (!newlock) {
1870 ret = -ENOMEM;
1871 goto leave;
1872 }
1873 lksb = newlock->lksb;
1874 dlm_lock_attach_lockres(newlock, res);
1875
1876 if (ml->convert_type != LKM_IVMODE) {
1877 BUG_ON(queue != &res->converting);
1878 newlock->ml.convert_type = ml->convert_type;
1879 }
1880 lksb->flags |= (ml->flags &
1881 (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB));
1882
1883 if (ml->type == LKM_NLMODE)
1884 goto skip_lvb;
1885
1886 if (!dlm_lvb_is_empty(mres->lvb)) {
1887 if (lksb->flags & DLM_LKSB_PUT_LVB) {
1888
1889
1890
1891 memcpy(lksb->lvb, mres->lvb, DLM_LVB_LEN);
1892
1893
1894
1895
1896 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
1897 } else {
1898
1899
1900 BUG_ON(ml->type != LKM_EXMODE &&
1901 ml->type != LKM_PRMODE);
1902 if (!dlm_lvb_is_empty(res->lvb) &&
1903 (ml->type == LKM_EXMODE ||
1904 memcmp(res->lvb, mres->lvb, DLM_LVB_LEN))) {
1905 int i;
1906 mlog(ML_ERROR, "%s:%.*s: received bad "
1907 "lvb! type=%d\n", dlm->name,
1908 res->lockname.len,
1909 res->lockname.name, ml->type);
1910 printk("lockres lvb=[");
1911 for (i=0; i<DLM_LVB_LEN; i++)
1912 printk("%02x", res->lvb[i]);
1913 printk("]\nmigrated lvb=[");
1914 for (i=0; i<DLM_LVB_LEN; i++)
1915 printk("%02x", mres->lvb[i]);
1916 printk("]\n");
1917 dlm_print_one_lock_resource(res);
1918 BUG();
1919 }
1920 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
1921 }
1922 }
1923skip_lvb:
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941 bad = 0;
1942 spin_lock(&res->spinlock);
1943 list_for_each_entry(lock, queue, list) {
1944 if (lock->ml.cookie == ml->cookie) {
1945 c = lock->ml.cookie;
1946 mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already "
1947 "exists on this lockres!\n", dlm->name,
1948 res->lockname.len, res->lockname.name,
1949 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1950 dlm_get_lock_cookie_seq(be64_to_cpu(c)));
1951
1952 mlog(ML_NOTICE, "sent lock: type=%d, conv=%d, "
1953 "node=%u, cookie=%u:%llu, queue=%d\n",
1954 ml->type, ml->convert_type, ml->node,
1955 dlm_get_lock_cookie_node(be64_to_cpu(ml->cookie)),
1956 dlm_get_lock_cookie_seq(be64_to_cpu(ml->cookie)),
1957 ml->list);
1958
1959 __dlm_print_one_lock_resource(res);
1960 bad = 1;
1961 break;
1962 }
1963 }
1964 if (!bad) {
1965 dlm_lock_get(newlock);
1966 list_add_tail(&newlock->list, queue);
1967 mlog(0, "%s:%.*s: added lock for node %u, "
1968 "setting refmap bit\n", dlm->name,
1969 res->lockname.len, res->lockname.name, ml->node);
1970 dlm_lockres_set_refmap_bit(ml->node, res);
1971 added++;
1972 }
1973 spin_unlock(&res->spinlock);
1974 }
1975 mlog(0, "done running all the locks\n");
1976
1977leave:
1978
1979 spin_lock(&res->spinlock);
1980 dlm_lockres_drop_inflight_ref(dlm, res);
1981 spin_unlock(&res->spinlock);
1982
1983 if (ret < 0) {
1984 mlog_errno(ret);
1985 if (newlock)
1986 dlm_lock_put(newlock);
1987 }
1988
1989 mlog_exit(ret);
1990 return ret;
1991}
1992
1993void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
1994 struct dlm_lock_resource *res)
1995{
1996 int i;
1997 struct list_head *queue;
1998 struct dlm_lock *lock, *next;
1999
2000 assert_spin_locked(&dlm->spinlock);
2001 assert_spin_locked(&res->spinlock);
2002 res->state |= DLM_LOCK_RES_RECOVERING;
2003 if (!list_empty(&res->recovering)) {
2004 mlog(0,
2005 "Recovering res %s:%.*s, is already on recovery list!\n",
2006 dlm->name, res->lockname.len, res->lockname.name);
2007 list_del_init(&res->recovering);
2008 dlm_lockres_put(res);
2009 }
2010
2011 dlm_lockres_get(res);
2012 list_add_tail(&res->recovering, &dlm->reco.resources);
2013
2014
2015 for (i=DLM_BLOCKED_LIST; i>=DLM_GRANTED_LIST; i--) {
2016 queue = dlm_list_idx_to_ptr(res, i);
2017 list_for_each_entry_safe(lock, next, queue, list) {
2018 dlm_lock_get(lock);
2019 if (lock->convert_pending) {
2020
2021 BUG_ON(i != DLM_CONVERTING_LIST);
2022 mlog(0, "node died with convert pending "
2023 "on %.*s. move back to granted list.\n",
2024 res->lockname.len, res->lockname.name);
2025 dlm_revert_pending_convert(res, lock);
2026 lock->convert_pending = 0;
2027 } else if (lock->lock_pending) {
2028
2029 BUG_ON(i != DLM_BLOCKED_LIST);
2030 mlog(0, "node died with lock pending "
2031 "on %.*s. remove from blocked list and skip.\n",
2032 res->lockname.len, res->lockname.name);
2033
2034
2035
2036
2037
2038 dlm_revert_pending_lock(res, lock);
2039 lock->lock_pending = 0;
2040 } else if (lock->unlock_pending) {
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050 BUG_ON(i != DLM_GRANTED_LIST);
2051 mlog(0, "node died with unlock pending "
2052 "on %.*s. remove from blocked list and skip.\n",
2053 res->lockname.len, res->lockname.name);
2054 dlm_commit_pending_unlock(res, lock);
2055 lock->unlock_pending = 0;
2056 } else if (lock->cancel_pending) {
2057
2058
2059
2060
2061 BUG_ON(i != DLM_CONVERTING_LIST);
2062 mlog(0, "node died with cancel pending "
2063 "on %.*s. move back to granted list.\n",
2064 res->lockname.len, res->lockname.name);
2065 dlm_commit_pending_cancel(res, lock);
2066 lock->cancel_pending = 0;
2067 }
2068 dlm_lock_put(lock);
2069 }
2070 }
2071}
2072
2073
2074
2075
2076
2077
2078static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
2079 u8 dead_node, u8 new_master)
2080{
2081 int i;
2082 struct hlist_node *hash_iter;
2083 struct hlist_head *bucket;
2084 struct dlm_lock_resource *res, *next;
2085
2086 mlog_entry_void();
2087
2088 assert_spin_locked(&dlm->spinlock);
2089
2090 list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) {
2091 if (res->owner == dead_node) {
2092 list_del_init(&res->recovering);
2093 spin_lock(&res->spinlock);
2094
2095
2096 dlm_change_lockres_owner(dlm, res, new_master);
2097 res->state &= ~DLM_LOCK_RES_RECOVERING;
2098 if (__dlm_lockres_has_locks(res))
2099 __dlm_dirty_lockres(dlm, res);
2100 spin_unlock(&res->spinlock);
2101 wake_up(&res->wq);
2102 dlm_lockres_put(res);
2103 }
2104 }
2105
2106
2107
2108
2109
2110 for (i = 0; i < DLM_HASH_BUCKETS; i++) {
2111 bucket = dlm_lockres_hash(dlm, i);
2112 hlist_for_each_entry(res, hash_iter, bucket, hash_node) {
2113 if (res->state & DLM_LOCK_RES_RECOVERING) {
2114 if (res->owner == dead_node) {
2115 mlog(0, "(this=%u) res %.*s owner=%u "
2116 "was not on recovering list, but "
2117 "clearing state anyway\n",
2118 dlm->node_num, res->lockname.len,
2119 res->lockname.name, new_master);
2120 } else if (res->owner == dlm->node_num) {
2121 mlog(0, "(this=%u) res %.*s owner=%u "
2122 "was not on recovering list, "
2123 "owner is THIS node, clearing\n",
2124 dlm->node_num, res->lockname.len,
2125 res->lockname.name, new_master);
2126 } else
2127 continue;
2128
2129 if (!list_empty(&res->recovering)) {
2130 mlog(0, "%s:%.*s: lockres was "
2131 "marked RECOVERING, owner=%u\n",
2132 dlm->name, res->lockname.len,
2133 res->lockname.name, res->owner);
2134 list_del_init(&res->recovering);
2135 dlm_lockres_put(res);
2136 }
2137 spin_lock(&res->spinlock);
2138
2139
2140 dlm_change_lockres_owner(dlm, res, new_master);
2141 res->state &= ~DLM_LOCK_RES_RECOVERING;
2142 if (__dlm_lockres_has_locks(res))
2143 __dlm_dirty_lockres(dlm, res);
2144 spin_unlock(&res->spinlock);
2145 wake_up(&res->wq);
2146 }
2147 }
2148 }
2149}
2150
2151static inline int dlm_lvb_needs_invalidation(struct dlm_lock *lock, int local)
2152{
2153 if (local) {
2154 if (lock->ml.type != LKM_EXMODE &&
2155 lock->ml.type != LKM_PRMODE)
2156 return 1;
2157 } else if (lock->ml.type == LKM_EXMODE)
2158 return 1;
2159 return 0;
2160}
2161
2162static void dlm_revalidate_lvb(struct dlm_ctxt *dlm,
2163 struct dlm_lock_resource *res, u8 dead_node)
2164{
2165 struct list_head *queue;
2166 struct dlm_lock *lock;
2167 int blank_lvb = 0, local = 0;
2168 int i;
2169 u8 search_node;
2170
2171 assert_spin_locked(&dlm->spinlock);
2172 assert_spin_locked(&res->spinlock);
2173
2174 if (res->owner == dlm->node_num)
2175
2176
2177 search_node = dead_node;
2178 else {
2179
2180
2181 search_node = dlm->node_num;
2182 local = 1;
2183 }
2184
2185 for (i=DLM_GRANTED_LIST; i<=DLM_CONVERTING_LIST; i++) {
2186 queue = dlm_list_idx_to_ptr(res, i);
2187 list_for_each_entry(lock, queue, list) {
2188 if (lock->ml.node == search_node) {
2189 if (dlm_lvb_needs_invalidation(lock, local)) {
2190
2191 blank_lvb = 1;
2192 memset(lock->lksb->lvb, 0, DLM_LVB_LEN);
2193 }
2194 }
2195 }
2196 }
2197
2198 if (blank_lvb) {
2199 mlog(0, "clearing %.*s lvb, dead node %u had EX\n",
2200 res->lockname.len, res->lockname.name, dead_node);
2201 memset(res->lvb, 0, DLM_LVB_LEN);
2202 }
2203}
2204
2205static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
2206 struct dlm_lock_resource *res, u8 dead_node)
2207{
2208 struct dlm_lock *lock, *next;
2209 unsigned int freed = 0;
2210
2211
2212
2213
2214
2215 assert_spin_locked(&dlm->spinlock);
2216 assert_spin_locked(&res->spinlock);
2217
2218
2219
2220
2221
2222 list_for_each_entry_safe(lock, next, &res->granted, list) {
2223 if (lock->ml.node == dead_node) {
2224 list_del_init(&lock->list);
2225 dlm_lock_put(lock);
2226
2227 dlm_lock_put(lock);
2228 freed++;
2229 }
2230 }
2231 list_for_each_entry_safe(lock, next, &res->converting, list) {
2232 if (lock->ml.node == dead_node) {
2233 list_del_init(&lock->list);
2234 dlm_lock_put(lock);
2235
2236 dlm_lock_put(lock);
2237 freed++;
2238 }
2239 }
2240 list_for_each_entry_safe(lock, next, &res->blocked, list) {
2241 if (lock->ml.node == dead_node) {
2242 list_del_init(&lock->list);
2243 dlm_lock_put(lock);
2244
2245 dlm_lock_put(lock);
2246 freed++;
2247 }
2248 }
2249
2250 if (freed) {
2251 mlog(0, "%s:%.*s: freed %u locks for dead node %u, "
2252 "dropping ref from lockres\n", dlm->name,
2253 res->lockname.len, res->lockname.name, freed, dead_node);
2254 if(!test_bit(dead_node, res->refmap)) {
2255 mlog(ML_ERROR, "%s:%.*s: freed %u locks for dead node %u, "
2256 "but ref was not set\n", dlm->name,
2257 res->lockname.len, res->lockname.name, freed, dead_node);
2258 __dlm_print_one_lock_resource(res);
2259 }
2260 dlm_lockres_clear_refmap_bit(dead_node, res);
2261 } else if (test_bit(dead_node, res->refmap)) {
2262 mlog(0, "%s:%.*s: dead node %u had a ref, but had "
2263 "no locks and had not purged before dying\n", dlm->name,
2264 res->lockname.len, res->lockname.name, dead_node);
2265 dlm_lockres_clear_refmap_bit(dead_node, res);
2266 }
2267
2268
2269 __dlm_dirty_lockres(dlm, res);
2270}
2271
2272
2273
2274
2275
2276
2277
2278
2279static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
2280{
2281 struct hlist_node *iter;
2282 struct dlm_lock_resource *res;
2283 int i;
2284 struct hlist_head *bucket;
2285 struct dlm_lock *lock;
2286
2287
2288
2289 dlm_clean_master_list(dlm, dead_node);
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305 for (i = 0; i < DLM_HASH_BUCKETS; i++) {
2306 bucket = dlm_lockres_hash(dlm, i);
2307 hlist_for_each_entry(res, iter, bucket, hash_node) {
2308
2309
2310 if (dlm_is_recovery_lock(res->lockname.name,
2311 res->lockname.len)) {
2312 spin_lock(&res->spinlock);
2313 list_for_each_entry(lock, &res->granted, list) {
2314 if (lock->ml.node == dead_node) {
2315 mlog(0, "AHA! there was "
2316 "a $RECOVERY lock for dead "
2317 "node %u (%s)!\n",
2318 dead_node, dlm->name);
2319 list_del_init(&lock->list);
2320 dlm_lock_put(lock);
2321 break;
2322 }
2323 }
2324 spin_unlock(&res->spinlock);
2325 continue;
2326 }
2327 spin_lock(&res->spinlock);
2328
2329 dlm_revalidate_lvb(dlm, res, dead_node);
2330 if (res->owner == dead_node) {
2331 if (res->state & DLM_LOCK_RES_DROPPING_REF) {
2332 mlog(ML_NOTICE, "Ignore %.*s for "
2333 "recovery as it is being freed\n",
2334 res->lockname.len,
2335 res->lockname.name);
2336 } else
2337 dlm_move_lockres_to_recovery_list(dlm,
2338 res);
2339
2340 } else if (res->owner == dlm->node_num) {
2341 dlm_free_dead_locks(dlm, res, dead_node);
2342 __dlm_lockres_calc_usage(dlm, res);
2343 }
2344 spin_unlock(&res->spinlock);
2345 }
2346 }
2347
2348}
2349
2350static void __dlm_hb_node_down(struct dlm_ctxt *dlm, int idx)
2351{
2352 assert_spin_locked(&dlm->spinlock);
2353
2354 if (dlm->reco.new_master == idx) {
2355 mlog(0, "%s: recovery master %d just died\n",
2356 dlm->name, idx);
2357 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2358
2359
2360
2361 mlog(0, "%s: dead master %d had reached "
2362 "finalize1 state, clearing\n", dlm->name, idx);
2363 dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
2364 __dlm_reset_recovery(dlm);
2365 }
2366 }
2367
2368
2369 if (dlm->joining_node == idx) {
2370 mlog(0, "Clearing join state for node %u\n", idx);
2371 __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
2372 }
2373
2374
2375 if (!test_bit(idx, dlm->live_nodes_map)) {
2376 mlog(0, "for domain %s, node %d is already dead. "
2377 "another node likely did recovery already.\n",
2378 dlm->name, idx);
2379 return;
2380 }
2381
2382
2383 if (!test_bit(idx, dlm->domain_map)) {
2384
2385
2386 mlog(0, "node %u already removed from domain!\n", idx);
2387 return;
2388 }
2389
2390 clear_bit(idx, dlm->live_nodes_map);
2391
2392
2393 if (!test_bit(idx, dlm->recovery_map))
2394 dlm_do_local_recovery_cleanup(dlm, idx);
2395
2396
2397 dlm_hb_event_notify_attached(dlm, idx, 0);
2398
2399 mlog(0, "node %u being removed from domain map!\n", idx);
2400 clear_bit(idx, dlm->domain_map);
2401
2402
2403 wake_up(&dlm->migration_wq);
2404
2405 if (test_bit(idx, dlm->recovery_map))
2406 mlog(0, "domain %s, node %u already added "
2407 "to recovery map!\n", dlm->name, idx);
2408 else
2409 set_bit(idx, dlm->recovery_map);
2410}
2411
2412void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data)
2413{
2414 struct dlm_ctxt *dlm = data;
2415
2416 if (!dlm_grab(dlm))
2417 return;
2418
2419
2420
2421
2422
2423 if (test_bit(idx, dlm->domain_map))
2424 dlm_fire_domain_eviction_callbacks(dlm, idx);
2425
2426 spin_lock(&dlm->spinlock);
2427 __dlm_hb_node_down(dlm, idx);
2428 spin_unlock(&dlm->spinlock);
2429
2430 dlm_put(dlm);
2431}
2432
2433void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data)
2434{
2435 struct dlm_ctxt *dlm = data;
2436
2437 if (!dlm_grab(dlm))
2438 return;
2439
2440 spin_lock(&dlm->spinlock);
2441 set_bit(idx, dlm->live_nodes_map);
2442
2443
2444 spin_unlock(&dlm->spinlock);
2445
2446 dlm_put(dlm);
2447}
2448
2449static void dlm_reco_ast(void *astdata)
2450{
2451 struct dlm_ctxt *dlm = astdata;
2452 mlog(0, "ast for recovery lock fired!, this=%u, dlm=%s\n",
2453 dlm->node_num, dlm->name);
2454}
2455static void dlm_reco_bast(void *astdata, int blocked_type)
2456{
2457 struct dlm_ctxt *dlm = astdata;
2458 mlog(0, "bast for recovery lock fired!, this=%u, dlm=%s\n",
2459 dlm->node_num, dlm->name);
2460}
2461static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st)
2462{
2463 mlog(0, "unlockast for recovery lock fired!\n");
2464}
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478static int dlm_pick_recovery_master(struct dlm_ctxt *dlm)
2479{
2480 enum dlm_status ret;
2481 struct dlm_lockstatus lksb;
2482 int status = -EINVAL;
2483
2484 mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n",
2485 dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num);
2486again:
2487 memset(&lksb, 0, sizeof(lksb));
2488
2489 ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY,
2490 DLM_RECOVERY_LOCK_NAME, DLM_RECOVERY_LOCK_NAME_LEN,
2491 dlm_reco_ast, dlm, dlm_reco_bast);
2492
2493 mlog(0, "%s: dlmlock($RECOVERY) returned %d, lksb=%d\n",
2494 dlm->name, ret, lksb.status);
2495
2496 if (ret == DLM_NORMAL) {
2497 mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n",
2498 dlm->name, dlm->node_num);
2499
2500
2501
2502 if (dlm_reco_master_ready(dlm)) {
2503 mlog(0, "%s: got reco EX lock, but %u will "
2504 "do the recovery\n", dlm->name,
2505 dlm->reco.new_master);
2506 status = -EEXIST;
2507 } else {
2508 status = 0;
2509
2510
2511 spin_lock(&dlm->spinlock);
2512 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
2513 status = -EINVAL;
2514 mlog(0, "%s: got reco EX lock, but "
2515 "node got recovered already\n", dlm->name);
2516 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
2517 mlog(ML_ERROR, "%s: new master is %u "
2518 "but no dead node!\n",
2519 dlm->name, dlm->reco.new_master);
2520 BUG();
2521 }
2522 }
2523 spin_unlock(&dlm->spinlock);
2524 }
2525
2526
2527
2528 if (!status) {
2529 mlog(0, "%s: dead=%u, this=%u, sending "
2530 "begin_reco now\n", dlm->name,
2531 dlm->reco.dead_node, dlm->node_num);
2532 status = dlm_send_begin_reco_message(dlm,
2533 dlm->reco.dead_node);
2534
2535 BUG_ON(status);
2536
2537
2538 spin_lock(&dlm->spinlock);
2539 dlm_set_reco_master(dlm, dlm->node_num);
2540 spin_unlock(&dlm->spinlock);
2541 }
2542
2543
2544
2545 ret = dlmunlock(dlm, &lksb, 0, dlm_reco_unlock_ast, dlm);
2546 if (ret == DLM_DENIED) {
2547 mlog(0, "got DLM_DENIED, trying LKM_CANCEL\n");
2548 ret = dlmunlock(dlm, &lksb, LKM_CANCEL, dlm_reco_unlock_ast, dlm);
2549 }
2550 if (ret != DLM_NORMAL) {
2551
2552
2553
2554
2555
2556
2557 mlog(ML_ERROR, "dlmunlock returned %d\n", ret);
2558 }
2559 } else if (ret == DLM_NOTQUEUED) {
2560 mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n",
2561 dlm->name, dlm->node_num);
2562
2563
2564
2565 wait_event_timeout(dlm->dlm_reco_thread_wq,
2566 dlm_reco_master_ready(dlm),
2567 msecs_to_jiffies(1000));
2568 if (!dlm_reco_master_ready(dlm)) {
2569 mlog(0, "%s: reco master taking awhile\n",
2570 dlm->name);
2571 goto again;
2572 }
2573
2574 mlog(0, "%s: reco master %u is ready to recover %u\n",
2575 dlm->name, dlm->reco.new_master, dlm->reco.dead_node);
2576 status = -EEXIST;
2577 } else if (ret == DLM_RECOVERING) {
2578 mlog(0, "dlm=%s dlmlock says master node died (this=%u)\n",
2579 dlm->name, dlm->node_num);
2580 goto again;
2581 } else {
2582 struct dlm_lock_resource *res;
2583
2584
2585 mlog(ML_ERROR, "%s: got %s from dlmlock($RECOVERY), "
2586 "lksb.status=%s\n", dlm->name, dlm_errname(ret),
2587 dlm_errname(lksb.status));
2588 res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME,
2589 DLM_RECOVERY_LOCK_NAME_LEN);
2590 if (res) {
2591 dlm_print_one_lock_resource(res);
2592 dlm_lockres_put(res);
2593 } else {
2594 mlog(ML_ERROR, "recovery lock not found\n");
2595 }
2596 BUG();
2597 }
2598
2599 return status;
2600}
2601
2602static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node)
2603{
2604 struct dlm_begin_reco br;
2605 int ret = 0;
2606 struct dlm_node_iter iter;
2607 int nodenum;
2608 int status;
2609
2610 mlog_entry("%u\n", dead_node);
2611
2612 mlog(0, "%s: dead node is %u\n", dlm->name, dead_node);
2613
2614 spin_lock(&dlm->spinlock);
2615 dlm_node_iter_init(dlm->domain_map, &iter);
2616 spin_unlock(&dlm->spinlock);
2617
2618 clear_bit(dead_node, iter.node_map);
2619
2620 memset(&br, 0, sizeof(br));
2621 br.node_idx = dlm->node_num;
2622 br.dead_node = dead_node;
2623
2624 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2625 ret = 0;
2626 if (nodenum == dead_node) {
2627 mlog(0, "not sending begin reco to dead node "
2628 "%u\n", dead_node);
2629 continue;
2630 }
2631 if (nodenum == dlm->node_num) {
2632 mlog(0, "not sending begin reco to self\n");
2633 continue;
2634 }
2635retry:
2636 ret = -EINVAL;
2637 mlog(0, "attempting to send begin reco msg to %d\n",
2638 nodenum);
2639 ret = o2net_send_message(DLM_BEGIN_RECO_MSG, dlm->key,
2640 &br, sizeof(br), nodenum, &status);
2641
2642 if (ret >= 0)
2643 ret = status;
2644 if (dlm_is_host_down(ret)) {
2645
2646
2647 mlog(ML_NOTICE, "%s: node %u was down when sending "
2648 "begin reco msg (%d)\n", dlm->name, nodenum, ret);
2649 ret = 0;
2650 }
2651
2652
2653
2654
2655
2656
2657 if (ret == -EAGAIN || ret == EAGAIN) {
2658 mlog(0, "%s: trying to start recovery of node "
2659 "%u, but node %u is waiting for last recovery "
2660 "to complete, backoff for a bit\n", dlm->name,
2661 dead_node, nodenum);
2662 msleep(100);
2663 goto retry;
2664 }
2665 if (ret < 0) {
2666 struct dlm_lock_resource *res;
2667
2668
2669
2670 mlog_errno(ret);
2671 mlog(ML_ERROR, "begin reco of dlm %s to node %u "
2672 "returned %d\n", dlm->name, nodenum, ret);
2673 res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME,
2674 DLM_RECOVERY_LOCK_NAME_LEN);
2675 if (res) {
2676 dlm_print_one_lock_resource(res);
2677 dlm_lockres_put(res);
2678 } else {
2679 mlog(ML_ERROR, "recovery lock not found\n");
2680 }
2681
2682
2683 msleep(100);
2684 goto retry;
2685 }
2686 }
2687
2688 return ret;
2689}
2690
2691int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data,
2692 void **ret_data)
2693{
2694 struct dlm_ctxt *dlm = data;
2695 struct dlm_begin_reco *br = (struct dlm_begin_reco *)msg->buf;
2696
2697
2698 if (!dlm_grab(dlm))
2699 return 0;
2700
2701 spin_lock(&dlm->spinlock);
2702 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2703 mlog(0, "%s: node %u wants to recover node %u (%u:%u) "
2704 "but this node is in finalize state, waiting on finalize2\n",
2705 dlm->name, br->node_idx, br->dead_node,
2706 dlm->reco.dead_node, dlm->reco.new_master);
2707 spin_unlock(&dlm->spinlock);
2708 return -EAGAIN;
2709 }
2710 spin_unlock(&dlm->spinlock);
2711
2712 mlog(0, "%s: node %u wants to recover node %u (%u:%u)\n",
2713 dlm->name, br->node_idx, br->dead_node,
2714 dlm->reco.dead_node, dlm->reco.new_master);
2715
2716 dlm_fire_domain_eviction_callbacks(dlm, br->dead_node);
2717
2718 spin_lock(&dlm->spinlock);
2719 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
2720 if (test_bit(dlm->reco.new_master, dlm->recovery_map)) {
2721 mlog(0, "%s: new_master %u died, changing "
2722 "to %u\n", dlm->name, dlm->reco.new_master,
2723 br->node_idx);
2724 } else {
2725 mlog(0, "%s: new_master %u NOT DEAD, changing "
2726 "to %u\n", dlm->name, dlm->reco.new_master,
2727 br->node_idx);
2728
2729 }
2730 }
2731 if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) {
2732 mlog(ML_NOTICE, "%s: dead_node previously set to %u, "
2733 "node %u changing it to %u\n", dlm->name,
2734 dlm->reco.dead_node, br->node_idx, br->dead_node);
2735 }
2736 dlm_set_reco_master(dlm, br->node_idx);
2737 dlm_set_reco_dead_node(dlm, br->dead_node);
2738 if (!test_bit(br->dead_node, dlm->recovery_map)) {
2739 mlog(0, "recovery master %u sees %u as dead, but this "
2740 "node has not yet. marking %u as dead\n",
2741 br->node_idx, br->dead_node, br->dead_node);
2742 if (!test_bit(br->dead_node, dlm->domain_map) ||
2743 !test_bit(br->dead_node, dlm->live_nodes_map))
2744 mlog(0, "%u not in domain/live_nodes map "
2745 "so setting it in reco map manually\n",
2746 br->dead_node);
2747
2748
2749 set_bit(br->dead_node, dlm->domain_map);
2750 set_bit(br->dead_node, dlm->live_nodes_map);
2751 __dlm_hb_node_down(dlm, br->dead_node);
2752 }
2753 spin_unlock(&dlm->spinlock);
2754
2755 dlm_kick_recovery_thread(dlm);
2756
2757 mlog(0, "%s: recovery started by node %u, for %u (%u:%u)\n",
2758 dlm->name, br->node_idx, br->dead_node,
2759 dlm->reco.dead_node, dlm->reco.new_master);
2760
2761 dlm_put(dlm);
2762 return 0;
2763}
2764
2765#define DLM_FINALIZE_STAGE2 0x01
2766static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm)
2767{
2768 int ret = 0;
2769 struct dlm_finalize_reco fr;
2770 struct dlm_node_iter iter;
2771 int nodenum;
2772 int status;
2773 int stage = 1;
2774
2775 mlog(0, "finishing recovery for node %s:%u, "
2776 "stage %d\n", dlm->name, dlm->reco.dead_node, stage);
2777
2778 spin_lock(&dlm->spinlock);
2779 dlm_node_iter_init(dlm->domain_map, &iter);
2780 spin_unlock(&dlm->spinlock);
2781
2782stage2:
2783 memset(&fr, 0, sizeof(fr));
2784 fr.node_idx = dlm->node_num;
2785 fr.dead_node = dlm->reco.dead_node;
2786 if (stage == 2)
2787 fr.flags |= DLM_FINALIZE_STAGE2;
2788
2789 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2790 if (nodenum == dlm->node_num)
2791 continue;
2792 ret = o2net_send_message(DLM_FINALIZE_RECO_MSG, dlm->key,
2793 &fr, sizeof(fr), nodenum, &status);
2794 if (ret >= 0)
2795 ret = status;
2796 if (ret < 0) {
2797 mlog(ML_ERROR, "Error %d when sending message %u (key "
2798 "0x%x) to node %u\n", ret, DLM_FINALIZE_RECO_MSG,
2799 dlm->key, nodenum);
2800 if (dlm_is_host_down(ret)) {
2801
2802
2803
2804 mlog(ML_ERROR, "node %u went down after this "
2805 "node finished recovery.\n", nodenum);
2806 ret = 0;
2807 continue;
2808 }
2809 break;
2810 }
2811 }
2812 if (stage == 1) {
2813
2814 iter.curnode = -1;
2815 stage = 2;
2816 goto stage2;
2817 }
2818
2819 return ret;
2820}
2821
2822int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data,
2823 void **ret_data)
2824{
2825 struct dlm_ctxt *dlm = data;
2826 struct dlm_finalize_reco *fr = (struct dlm_finalize_reco *)msg->buf;
2827 int stage = 1;
2828
2829
2830 if (!dlm_grab(dlm))
2831 return 0;
2832
2833 if (fr->flags & DLM_FINALIZE_STAGE2)
2834 stage = 2;
2835
2836 mlog(0, "%s: node %u finalizing recovery stage%d of "
2837 "node %u (%u:%u)\n", dlm->name, fr->node_idx, stage,
2838 fr->dead_node, dlm->reco.dead_node, dlm->reco.new_master);
2839
2840 spin_lock(&dlm->spinlock);
2841
2842 if (dlm->reco.new_master != fr->node_idx) {
2843 mlog(ML_ERROR, "node %u sent recovery finalize msg, but node "
2844 "%u is supposed to be the new master, dead=%u\n",
2845 fr->node_idx, dlm->reco.new_master, fr->dead_node);
2846 BUG();
2847 }
2848 if (dlm->reco.dead_node != fr->dead_node) {
2849 mlog(ML_ERROR, "node %u sent recovery finalize msg for dead "
2850 "node %u, but node %u is supposed to be dead\n",
2851 fr->node_idx, fr->dead_node, dlm->reco.dead_node);
2852 BUG();
2853 }
2854
2855 switch (stage) {
2856 case 1:
2857 dlm_finish_local_lockres_recovery(dlm, fr->dead_node, fr->node_idx);
2858 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2859 mlog(ML_ERROR, "%s: received finalize1 from "
2860 "new master %u for dead node %u, but "
2861 "this node has already received it!\n",
2862 dlm->name, fr->node_idx, fr->dead_node);
2863 dlm_print_reco_node_status(dlm);
2864 BUG();
2865 }
2866 dlm->reco.state |= DLM_RECO_STATE_FINALIZE;
2867 spin_unlock(&dlm->spinlock);
2868 break;
2869 case 2:
2870 if (!(dlm->reco.state & DLM_RECO_STATE_FINALIZE)) {
2871 mlog(ML_ERROR, "%s: received finalize2 from "
2872 "new master %u for dead node %u, but "
2873 "this node did not have finalize1!\n",
2874 dlm->name, fr->node_idx, fr->dead_node);
2875 dlm_print_reco_node_status(dlm);
2876 BUG();
2877 }
2878 dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
2879 spin_unlock(&dlm->spinlock);
2880 dlm_reset_recovery(dlm);
2881 dlm_kick_recovery_thread(dlm);
2882 break;
2883 default:
2884 BUG();
2885 }
2886
2887 mlog(0, "%s: recovery done, reco master was %u, dead now %u, master now %u\n",
2888 dlm->name, fr->node_idx, dlm->reco.dead_node, dlm->reco.new_master);
2889
2890 dlm_put(dlm);
2891 return 0;
2892}
2893