1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/module.h>
29#include <linux/fs.h>
30#include <linux/types.h>
31#include <linux/slab.h>
32#include <linux/highmem.h>
33#include <linux/init.h>
34#include <linux/sysctl.h>
35#include <linux/random.h>
36#include <linux/blkdev.h>
37#include <linux/socket.h>
38#include <linux/inet.h>
39#include <linux/timer.h>
40#include <linux/kthread.h>
41#include <linux/delay.h>
42
43
44#include "cluster/heartbeat.h"
45#include "cluster/nodemanager.h"
46#include "cluster/tcp.h"
47
48#include "dlmapi.h"
49#include "dlmcommon.h"
50#include "dlmdomain.h"
51
52#define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_RECOVERY)
53#include "cluster/masklog.h"
54
55static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node);
56
57static int dlm_recovery_thread(void *data);
58static int dlm_do_recovery(struct dlm_ctxt *dlm);
59
60static int dlm_pick_recovery_master(struct dlm_ctxt *dlm);
61static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node);
62static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node);
63static int dlm_request_all_locks(struct dlm_ctxt *dlm,
64 u8 request_from, u8 dead_node);
65static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node);
66
67static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res);
68static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
69 const char *lockname, int namelen,
70 int total_locks, u64 cookie,
71 u8 flags, u8 master);
72static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
73 struct dlm_migratable_lockres *mres,
74 u8 send_to,
75 struct dlm_lock_resource *res,
76 int total_locks);
77static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
78 struct dlm_lock_resource *res,
79 struct dlm_migratable_lockres *mres);
80static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm);
81static int dlm_send_all_done_msg(struct dlm_ctxt *dlm,
82 u8 dead_node, u8 send_to);
83static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node);
84static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
85 struct list_head *list, u8 dead_node);
86static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
87 u8 dead_node, u8 new_master);
88static void dlm_reco_ast(void *astdata);
89static void dlm_reco_bast(void *astdata, int blocked_type);
90static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st);
91static void dlm_request_all_locks_worker(struct dlm_work_item *item,
92 void *data);
93static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data);
94static int dlm_lockres_master_requery(struct dlm_ctxt *dlm,
95 struct dlm_lock_resource *res,
96 u8 *real_master);
97
98static u64 dlm_get_next_mig_cookie(void);
99
100static DEFINE_SPINLOCK(dlm_reco_state_lock);
101static DEFINE_SPINLOCK(dlm_mig_cookie_lock);
102static u64 dlm_mig_cookie = 1;
103
104static u64 dlm_get_next_mig_cookie(void)
105{
106 u64 c;
107 spin_lock(&dlm_mig_cookie_lock);
108 c = dlm_mig_cookie;
109 if (dlm_mig_cookie == (~0ULL))
110 dlm_mig_cookie = 1;
111 else
112 dlm_mig_cookie++;
113 spin_unlock(&dlm_mig_cookie_lock);
114 return c;
115}
116
117static inline void dlm_set_reco_dead_node(struct dlm_ctxt *dlm,
118 u8 dead_node)
119{
120 assert_spin_locked(&dlm->spinlock);
121 if (dlm->reco.dead_node != dead_node)
122 mlog(0, "%s: changing dead_node from %u to %u\n",
123 dlm->name, dlm->reco.dead_node, dead_node);
124 dlm->reco.dead_node = dead_node;
125}
126
127static inline void dlm_set_reco_master(struct dlm_ctxt *dlm,
128 u8 master)
129{
130 assert_spin_locked(&dlm->spinlock);
131 mlog(0, "%s: changing new_master from %u to %u\n",
132 dlm->name, dlm->reco.new_master, master);
133 dlm->reco.new_master = master;
134}
135
136static inline void __dlm_reset_recovery(struct dlm_ctxt *dlm)
137{
138 assert_spin_locked(&dlm->spinlock);
139 clear_bit(dlm->reco.dead_node, dlm->recovery_map);
140 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
141 dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM);
142}
143
144static inline void dlm_reset_recovery(struct dlm_ctxt *dlm)
145{
146 spin_lock(&dlm->spinlock);
147 __dlm_reset_recovery(dlm);
148 spin_unlock(&dlm->spinlock);
149}
150
151
152void dlm_dispatch_work(struct work_struct *work)
153{
154 struct dlm_ctxt *dlm =
155 container_of(work, struct dlm_ctxt, dispatched_work);
156 LIST_HEAD(tmp_list);
157 struct dlm_work_item *item, *next;
158 dlm_workfunc_t *workfunc;
159 int tot=0;
160
161 spin_lock(&dlm->work_lock);
162 list_splice_init(&dlm->work_list, &tmp_list);
163 spin_unlock(&dlm->work_lock);
164
165 list_for_each_entry(item, &tmp_list, list) {
166 tot++;
167 }
168 mlog(0, "%s: work thread has %d work items\n", dlm->name, tot);
169
170 list_for_each_entry_safe(item, next, &tmp_list, list) {
171 workfunc = item->func;
172 list_del_init(&item->list);
173
174
175
176 BUG_ON(item->dlm != dlm);
177
178
179
180 workfunc(item, item->data);
181
182 dlm_put(dlm);
183 kfree(item);
184 }
185}
186
187
188
189
190
191void dlm_kick_recovery_thread(struct dlm_ctxt *dlm)
192{
193
194
195
196
197
198
199 wake_up(&dlm->dlm_reco_thread_wq);
200}
201
202
203int dlm_launch_recovery_thread(struct dlm_ctxt *dlm)
204{
205 mlog(0, "starting dlm recovery thread...\n");
206
207 dlm->dlm_reco_thread_task = kthread_run(dlm_recovery_thread, dlm,
208 "dlm_reco_thread");
209 if (IS_ERR(dlm->dlm_reco_thread_task)) {
210 mlog_errno(PTR_ERR(dlm->dlm_reco_thread_task));
211 dlm->dlm_reco_thread_task = NULL;
212 return -EINVAL;
213 }
214
215 return 0;
216}
217
218void dlm_complete_recovery_thread(struct dlm_ctxt *dlm)
219{
220 if (dlm->dlm_reco_thread_task) {
221 mlog(0, "waiting for dlm recovery thread to exit\n");
222 kthread_stop(dlm->dlm_reco_thread_task);
223 dlm->dlm_reco_thread_task = NULL;
224 }
225}
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252static void dlm_print_reco_node_status(struct dlm_ctxt *dlm)
253{
254 struct dlm_reco_node_data *ndata;
255 struct dlm_lock_resource *res;
256
257 mlog(ML_NOTICE, "%s(%d): recovery info, state=%s, dead=%u, master=%u\n",
258 dlm->name, task_pid_nr(dlm->dlm_reco_thread_task),
259 dlm->reco.state & DLM_RECO_STATE_ACTIVE ? "ACTIVE" : "inactive",
260 dlm->reco.dead_node, dlm->reco.new_master);
261
262 list_for_each_entry(ndata, &dlm->reco.node_data, list) {
263 char *st = "unknown";
264 switch (ndata->state) {
265 case DLM_RECO_NODE_DATA_INIT:
266 st = "init";
267 break;
268 case DLM_RECO_NODE_DATA_REQUESTING:
269 st = "requesting";
270 break;
271 case DLM_RECO_NODE_DATA_DEAD:
272 st = "dead";
273 break;
274 case DLM_RECO_NODE_DATA_RECEIVING:
275 st = "receiving";
276 break;
277 case DLM_RECO_NODE_DATA_REQUESTED:
278 st = "requested";
279 break;
280 case DLM_RECO_NODE_DATA_DONE:
281 st = "done";
282 break;
283 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
284 st = "finalize-sent";
285 break;
286 default:
287 st = "bad";
288 break;
289 }
290 mlog(ML_NOTICE, "%s: reco state, node %u, state=%s\n",
291 dlm->name, ndata->node_num, st);
292 }
293 list_for_each_entry(res, &dlm->reco.resources, recovering) {
294 mlog(ML_NOTICE, "%s: lockres %.*s on recovering list\n",
295 dlm->name, res->lockname.len, res->lockname.name);
296 }
297}
298
299#define DLM_RECO_THREAD_TIMEOUT_MS (5 * 1000)
300
301static int dlm_recovery_thread(void *data)
302{
303 int status;
304 struct dlm_ctxt *dlm = data;
305 unsigned long timeout = msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS);
306
307 mlog(0, "dlm thread running for %s...\n", dlm->name);
308
309 while (!kthread_should_stop()) {
310 if (dlm_domain_fully_joined(dlm)) {
311 status = dlm_do_recovery(dlm);
312 if (status == -EAGAIN) {
313
314 continue;
315 }
316 if (status < 0)
317 mlog_errno(status);
318 }
319
320 wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq,
321 kthread_should_stop(),
322 timeout);
323 }
324
325 mlog(0, "quitting DLM recovery thread\n");
326 return 0;
327}
328
329
330static int dlm_reco_master_ready(struct dlm_ctxt *dlm)
331{
332 int ready;
333 spin_lock(&dlm->spinlock);
334 ready = (dlm->reco.new_master != O2NM_INVALID_NODE_NUM);
335 spin_unlock(&dlm->spinlock);
336 return ready;
337}
338
339
340
341int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node)
342{
343 int dead;
344 spin_lock(&dlm->spinlock);
345 dead = !test_bit(node, dlm->domain_map);
346 spin_unlock(&dlm->spinlock);
347 return dead;
348}
349
350
351
352static int dlm_is_node_recovered(struct dlm_ctxt *dlm, u8 node)
353{
354 int recovered;
355 spin_lock(&dlm->spinlock);
356 recovered = !test_bit(node, dlm->recovery_map);
357 spin_unlock(&dlm->spinlock);
358 return recovered;
359}
360
361
362void dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout)
363{
364 if (dlm_is_node_dead(dlm, node))
365 return;
366
367 printk(KERN_NOTICE "o2dlm: Waiting on the death of node %u in "
368 "domain %s\n", node, dlm->name);
369
370 if (timeout)
371 wait_event_timeout(dlm->dlm_reco_thread_wq,
372 dlm_is_node_dead(dlm, node),
373 msecs_to_jiffies(timeout));
374 else
375 wait_event(dlm->dlm_reco_thread_wq,
376 dlm_is_node_dead(dlm, node));
377}
378
379void dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout)
380{
381 if (dlm_is_node_recovered(dlm, node))
382 return;
383
384 printk(KERN_NOTICE "o2dlm: Waiting on the recovery of node %u in "
385 "domain %s\n", node, dlm->name);
386
387 if (timeout)
388 wait_event_timeout(dlm->dlm_reco_thread_wq,
389 dlm_is_node_recovered(dlm, node),
390 msecs_to_jiffies(timeout));
391 else
392 wait_event(dlm->dlm_reco_thread_wq,
393 dlm_is_node_recovered(dlm, node));
394}
395
396
397
398
399
400
401
402static int dlm_in_recovery(struct dlm_ctxt *dlm)
403{
404 int in_recovery;
405 spin_lock(&dlm->spinlock);
406 in_recovery = !!(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
407 spin_unlock(&dlm->spinlock);
408 return in_recovery;
409}
410
411
412void dlm_wait_for_recovery(struct dlm_ctxt *dlm)
413{
414 if (dlm_in_recovery(dlm)) {
415 mlog(0, "%s: reco thread %d in recovery: "
416 "state=%d, master=%u, dead=%u\n",
417 dlm->name, task_pid_nr(dlm->dlm_reco_thread_task),
418 dlm->reco.state, dlm->reco.new_master,
419 dlm->reco.dead_node);
420 }
421 wait_event(dlm->reco.event, !dlm_in_recovery(dlm));
422}
423
424static void dlm_begin_recovery(struct dlm_ctxt *dlm)
425{
426 spin_lock(&dlm->spinlock);
427 BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
428 printk(KERN_NOTICE "o2dlm: Begin recovery on domain %s for node %u\n",
429 dlm->name, dlm->reco.dead_node);
430 dlm->reco.state |= DLM_RECO_STATE_ACTIVE;
431 spin_unlock(&dlm->spinlock);
432}
433
434static void dlm_end_recovery(struct dlm_ctxt *dlm)
435{
436 spin_lock(&dlm->spinlock);
437 BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE));
438 dlm->reco.state &= ~DLM_RECO_STATE_ACTIVE;
439 spin_unlock(&dlm->spinlock);
440 printk(KERN_NOTICE "o2dlm: End recovery on domain %s\n", dlm->name);
441 wake_up(&dlm->reco.event);
442}
443
444static void dlm_print_recovery_master(struct dlm_ctxt *dlm)
445{
446 printk(KERN_NOTICE "o2dlm: Node %u (%s) is the Recovery Master for the "
447 "dead node %u in domain %s\n", dlm->reco.new_master,
448 (dlm->node_num == dlm->reco.new_master ? "me" : "he"),
449 dlm->reco.dead_node, dlm->name);
450}
451
452static int dlm_do_recovery(struct dlm_ctxt *dlm)
453{
454 int status = 0;
455 int ret;
456
457 spin_lock(&dlm->spinlock);
458
459
460 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM &&
461 test_bit(dlm->reco.new_master, dlm->recovery_map)) {
462 mlog(0, "new master %u died while recovering %u!\n",
463 dlm->reco.new_master, dlm->reco.dead_node);
464
465 dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM);
466 }
467
468
469 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
470 int bit;
471
472 bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES, 0);
473 if (bit >= O2NM_MAX_NODES || bit < 0)
474 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
475 else
476 dlm_set_reco_dead_node(dlm, bit);
477 } else if (!test_bit(dlm->reco.dead_node, dlm->recovery_map)) {
478
479 mlog(ML_ERROR, "dead_node %u no longer in recovery map!\n",
480 dlm->reco.dead_node);
481 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
482 }
483
484 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
485
486 spin_unlock(&dlm->spinlock);
487
488 return 0;
489 }
490 mlog(0, "%s(%d):recovery thread found node %u in the recovery map!\n",
491 dlm->name, task_pid_nr(dlm->dlm_reco_thread_task),
492 dlm->reco.dead_node);
493 spin_unlock(&dlm->spinlock);
494
495
496
497 dlm_begin_recovery(dlm);
498
499 if (dlm->reco.new_master == dlm->node_num)
500 goto master_here;
501
502 if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) {
503
504
505
506
507 ret = dlm_pick_recovery_master(dlm);
508 if (!ret) {
509
510 goto master_here;
511 }
512 mlog(0, "another node will master this recovery session.\n");
513 }
514
515 dlm_print_recovery_master(dlm);
516
517
518
519
520 dlm_end_recovery(dlm);
521
522
523 return 0;
524
525master_here:
526 dlm_print_recovery_master(dlm);
527
528 status = dlm_remaster_locks(dlm, dlm->reco.dead_node);
529 if (status < 0) {
530
531 mlog(ML_ERROR, "%s: Error %d remastering locks for node %u, "
532 "retrying.\n", dlm->name, status, dlm->reco.dead_node);
533
534
535 msleep(100);
536 } else {
537
538 mlog(0, "DONE mastering recovery of %s:%u here(this=%u)!\n",
539 dlm->name, dlm->reco.dead_node, dlm->node_num);
540 spin_lock(&dlm->spinlock);
541 __dlm_reset_recovery(dlm);
542 dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
543 spin_unlock(&dlm->spinlock);
544 }
545 dlm_end_recovery(dlm);
546
547
548 return -EAGAIN;
549}
550
551static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
552{
553 int status = 0;
554 struct dlm_reco_node_data *ndata;
555 int all_nodes_done;
556 int destroy = 0;
557 int pass = 0;
558
559 do {
560
561
562 status = dlm_init_recovery_area(dlm, dead_node);
563 if (status < 0) {
564 mlog(ML_ERROR, "%s: failed to alloc recovery area, "
565 "retrying\n", dlm->name);
566 msleep(1000);
567 }
568 } while (status != 0);
569
570
571
572 list_for_each_entry(ndata, &dlm->reco.node_data, list) {
573 BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT);
574 ndata->state = DLM_RECO_NODE_DATA_REQUESTING;
575
576 mlog(0, "%s: Requesting lock info from node %u\n", dlm->name,
577 ndata->node_num);
578
579 if (ndata->node_num == dlm->node_num) {
580 ndata->state = DLM_RECO_NODE_DATA_DONE;
581 continue;
582 }
583
584 do {
585 status = dlm_request_all_locks(dlm, ndata->node_num,
586 dead_node);
587 if (status < 0) {
588 mlog_errno(status);
589 if (dlm_is_host_down(status)) {
590
591 status = 0;
592 ndata->state = DLM_RECO_NODE_DATA_DEAD;
593
594
595 wait_event_timeout(dlm->dlm_reco_thread_wq,
596 dlm_is_node_dead(dlm,
597 ndata->node_num),
598 msecs_to_jiffies(1000));
599 mlog(0, "waited 1 sec for %u, "
600 "dead? %s\n", ndata->node_num,
601 dlm_is_node_dead(dlm, ndata->node_num) ?
602 "yes" : "no");
603 } else {
604
605 mlog(0, "%s: node %u returned "
606 "%d during recovery, retrying "
607 "after a short wait\n",
608 dlm->name, ndata->node_num,
609 status);
610 msleep(100);
611 }
612 }
613 } while (status != 0);
614
615 spin_lock(&dlm_reco_state_lock);
616 switch (ndata->state) {
617 case DLM_RECO_NODE_DATA_INIT:
618 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
619 case DLM_RECO_NODE_DATA_REQUESTED:
620 BUG();
621 break;
622 case DLM_RECO_NODE_DATA_DEAD:
623 mlog(0, "node %u died after requesting "
624 "recovery info for node %u\n",
625 ndata->node_num, dead_node);
626
627
628 break;
629 case DLM_RECO_NODE_DATA_REQUESTING:
630 ndata->state = DLM_RECO_NODE_DATA_REQUESTED;
631 mlog(0, "now receiving recovery data from "
632 "node %u for dead node %u\n",
633 ndata->node_num, dead_node);
634 break;
635 case DLM_RECO_NODE_DATA_RECEIVING:
636 mlog(0, "already receiving recovery data from "
637 "node %u for dead node %u\n",
638 ndata->node_num, dead_node);
639 break;
640 case DLM_RECO_NODE_DATA_DONE:
641 mlog(0, "already DONE receiving recovery data "
642 "from node %u for dead node %u\n",
643 ndata->node_num, dead_node);
644 break;
645 }
646 spin_unlock(&dlm_reco_state_lock);
647 }
648
649 mlog(0, "%s: Done requesting all lock info\n", dlm->name);
650
651
652
653
654 while (1) {
655
656
657 all_nodes_done = 1;
658 spin_lock(&dlm_reco_state_lock);
659 list_for_each_entry(ndata, &dlm->reco.node_data, list) {
660 mlog(0, "checking recovery state of node %u\n",
661 ndata->node_num);
662 switch (ndata->state) {
663 case DLM_RECO_NODE_DATA_INIT:
664 case DLM_RECO_NODE_DATA_REQUESTING:
665 mlog(ML_ERROR, "bad ndata state for "
666 "node %u: state=%d\n",
667 ndata->node_num, ndata->state);
668 BUG();
669 break;
670 case DLM_RECO_NODE_DATA_DEAD:
671 mlog(0, "node %u died after "
672 "requesting recovery info for "
673 "node %u\n", ndata->node_num,
674 dead_node);
675 break;
676 case DLM_RECO_NODE_DATA_RECEIVING:
677 case DLM_RECO_NODE_DATA_REQUESTED:
678 mlog(0, "%s: node %u still in state %s\n",
679 dlm->name, ndata->node_num,
680 ndata->state==DLM_RECO_NODE_DATA_RECEIVING ?
681 "receiving" : "requested");
682 all_nodes_done = 0;
683 break;
684 case DLM_RECO_NODE_DATA_DONE:
685 mlog(0, "%s: node %u state is done\n",
686 dlm->name, ndata->node_num);
687 break;
688 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
689 mlog(0, "%s: node %u state is finalize\n",
690 dlm->name, ndata->node_num);
691 break;
692 }
693 }
694 spin_unlock(&dlm_reco_state_lock);
695
696 mlog(0, "pass #%d, all_nodes_done?: %s\n", ++pass,
697 all_nodes_done?"yes":"no");
698 if (all_nodes_done) {
699 int ret;
700
701
702
703
704
705 spin_lock(&dlm->spinlock);
706 dlm->reco.state |= DLM_RECO_STATE_FINALIZE;
707 spin_unlock(&dlm->spinlock);
708
709
710
711
712 mlog(0, "all nodes are done! send finalize\n");
713 ret = dlm_send_finalize_reco_message(dlm);
714 if (ret < 0)
715 mlog_errno(ret);
716
717 spin_lock(&dlm->spinlock);
718 dlm_finish_local_lockres_recovery(dlm, dead_node,
719 dlm->node_num);
720 spin_unlock(&dlm->spinlock);
721 mlog(0, "should be done with recovery!\n");
722
723 mlog(0, "finishing recovery of %s at %lu, "
724 "dead=%u, this=%u, new=%u\n", dlm->name,
725 jiffies, dlm->reco.dead_node,
726 dlm->node_num, dlm->reco.new_master);
727 destroy = 1;
728 status = 0;
729
730 dlm_kick_thread(dlm, NULL);
731 break;
732 }
733
734
735 wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq,
736 kthread_should_stop(),
737 msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS));
738
739 }
740
741 if (destroy)
742 dlm_destroy_recovery_area(dlm, dead_node);
743
744 return status;
745}
746
747static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
748{
749 int num=0;
750 struct dlm_reco_node_data *ndata;
751
752 spin_lock(&dlm->spinlock);
753 memcpy(dlm->reco.node_map, dlm->domain_map, sizeof(dlm->domain_map));
754
755
756 spin_unlock(&dlm->spinlock);
757
758 while (1) {
759 num = find_next_bit (dlm->reco.node_map, O2NM_MAX_NODES, num);
760 if (num >= O2NM_MAX_NODES) {
761 break;
762 }
763 BUG_ON(num == dead_node);
764
765 ndata = kzalloc(sizeof(*ndata), GFP_NOFS);
766 if (!ndata) {
767 dlm_destroy_recovery_area(dlm, dead_node);
768 return -ENOMEM;
769 }
770 ndata->node_num = num;
771 ndata->state = DLM_RECO_NODE_DATA_INIT;
772 spin_lock(&dlm_reco_state_lock);
773 list_add_tail(&ndata->list, &dlm->reco.node_data);
774 spin_unlock(&dlm_reco_state_lock);
775 num++;
776 }
777
778 return 0;
779}
780
781static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
782{
783 struct dlm_reco_node_data *ndata, *next;
784 LIST_HEAD(tmplist);
785
786 spin_lock(&dlm_reco_state_lock);
787 list_splice_init(&dlm->reco.node_data, &tmplist);
788 spin_unlock(&dlm_reco_state_lock);
789
790 list_for_each_entry_safe(ndata, next, &tmplist, list) {
791 list_del_init(&ndata->list);
792 kfree(ndata);
793 }
794}
795
796static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from,
797 u8 dead_node)
798{
799 struct dlm_lock_request lr;
800 int ret;
801 int status;
802
803 mlog(0, "\n");
804
805
806 mlog(0, "dlm_request_all_locks: dead node is %u, sending request "
807 "to %u\n", dead_node, request_from);
808
809 memset(&lr, 0, sizeof(lr));
810 lr.node_idx = dlm->node_num;
811 lr.dead_node = dead_node;
812
813
814 ret = o2net_send_message(DLM_LOCK_REQUEST_MSG, dlm->key,
815 &lr, sizeof(lr), request_from, &status);
816
817
818 if (ret < 0)
819 mlog(ML_ERROR, "%s: Error %d send LOCK_REQUEST to node %u "
820 "to recover dead node %u\n", dlm->name, ret,
821 request_from, dead_node);
822 else
823 ret = status;
824
825
826 return ret;
827
828}
829
830int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data,
831 void **ret_data)
832{
833 struct dlm_ctxt *dlm = data;
834 struct dlm_lock_request *lr = (struct dlm_lock_request *)msg->buf;
835 char *buf = NULL;
836 struct dlm_work_item *item = NULL;
837
838 if (!dlm_grab(dlm))
839 return -EINVAL;
840
841 if (lr->dead_node != dlm->reco.dead_node) {
842 mlog(ML_ERROR, "%s: node %u sent dead_node=%u, but local "
843 "dead_node is %u\n", dlm->name, lr->node_idx,
844 lr->dead_node, dlm->reco.dead_node);
845 dlm_print_reco_node_status(dlm);
846
847 dlm_put(dlm);
848 return -ENOMEM;
849 }
850 BUG_ON(lr->dead_node != dlm->reco.dead_node);
851
852 item = kzalloc(sizeof(*item), GFP_NOFS);
853 if (!item) {
854 dlm_put(dlm);
855 return -ENOMEM;
856 }
857
858
859 buf = (char *) __get_free_page(GFP_NOFS);
860 if (!buf) {
861 kfree(item);
862 dlm_put(dlm);
863 return -ENOMEM;
864 }
865
866
867 dlm_grab(dlm);
868 dlm_init_work_item(dlm, item, dlm_request_all_locks_worker, buf);
869 item->u.ral.reco_master = lr->node_idx;
870 item->u.ral.dead_node = lr->dead_node;
871 spin_lock(&dlm->work_lock);
872 list_add_tail(&item->list, &dlm->work_list);
873 spin_unlock(&dlm->work_lock);
874 queue_work(dlm->dlm_worker, &dlm->dispatched_work);
875
876 dlm_put(dlm);
877 return 0;
878}
879
880static void dlm_request_all_locks_worker(struct dlm_work_item *item, void *data)
881{
882 struct dlm_migratable_lockres *mres;
883 struct dlm_lock_resource *res;
884 struct dlm_ctxt *dlm;
885 LIST_HEAD(resources);
886 int ret;
887 u8 dead_node, reco_master;
888 int skip_all_done = 0;
889
890 dlm = item->dlm;
891 dead_node = item->u.ral.dead_node;
892 reco_master = item->u.ral.reco_master;
893 mres = (struct dlm_migratable_lockres *)data;
894
895 mlog(0, "%s: recovery worker started, dead=%u, master=%u\n",
896 dlm->name, dead_node, reco_master);
897
898 if (dead_node != dlm->reco.dead_node ||
899 reco_master != dlm->reco.new_master) {
900
901
902 if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) {
903 mlog(ML_NOTICE, "%s: will not send recovery state, "
904 "recovery master %u died, thread=(dead=%u,mas=%u)"
905 " current=(dead=%u,mas=%u)\n", dlm->name,
906 reco_master, dead_node, reco_master,
907 dlm->reco.dead_node, dlm->reco.new_master);
908 } else {
909 mlog(ML_NOTICE, "%s: reco state invalid: reco(dead=%u, "
910 "master=%u), request(dead=%u, master=%u)\n",
911 dlm->name, dlm->reco.dead_node,
912 dlm->reco.new_master, dead_node, reco_master);
913 }
914 goto leave;
915 }
916
917
918
919
920
921
922
923 dlm_move_reco_locks_to_list(dlm, &resources, dead_node);
924
925
926
927
928
929 list_for_each_entry(res, &resources, recovering) {
930 ret = dlm_send_one_lockres(dlm, res, mres, reco_master,
931 DLM_MRES_RECOVERY);
932 if (ret < 0) {
933 mlog(ML_ERROR, "%s: node %u went down while sending "
934 "recovery state for dead node %u, ret=%d\n", dlm->name,
935 reco_master, dead_node, ret);
936 skip_all_done = 1;
937 break;
938 }
939 }
940
941
942 spin_lock(&dlm->spinlock);
943 list_splice_init(&resources, &dlm->reco.resources);
944 spin_unlock(&dlm->spinlock);
945
946 if (!skip_all_done) {
947 ret = dlm_send_all_done_msg(dlm, dead_node, reco_master);
948 if (ret < 0) {
949 mlog(ML_ERROR, "%s: node %u went down while sending "
950 "recovery all-done for dead node %u, ret=%d\n",
951 dlm->name, reco_master, dead_node, ret);
952 }
953 }
954leave:
955 free_page((unsigned long)data);
956}
957
958
959static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to)
960{
961 int ret, tmpret;
962 struct dlm_reco_data_done done_msg;
963
964 memset(&done_msg, 0, sizeof(done_msg));
965 done_msg.node_idx = dlm->node_num;
966 done_msg.dead_node = dead_node;
967 mlog(0, "sending DATA DONE message to %u, "
968 "my node=%u, dead node=%u\n", send_to, done_msg.node_idx,
969 done_msg.dead_node);
970
971 ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg,
972 sizeof(done_msg), send_to, &tmpret);
973 if (ret < 0) {
974 mlog(ML_ERROR, "%s: Error %d send RECO_DATA_DONE to node %u "
975 "to recover dead node %u\n", dlm->name, ret, send_to,
976 dead_node);
977 if (!dlm_is_host_down(ret)) {
978 BUG();
979 }
980 } else
981 ret = tmpret;
982 return ret;
983}
984
985
986int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data,
987 void **ret_data)
988{
989 struct dlm_ctxt *dlm = data;
990 struct dlm_reco_data_done *done = (struct dlm_reco_data_done *)msg->buf;
991 struct dlm_reco_node_data *ndata = NULL;
992 int ret = -EINVAL;
993
994 if (!dlm_grab(dlm))
995 return -EINVAL;
996
997 mlog(0, "got DATA DONE: dead_node=%u, reco.dead_node=%u, "
998 "node_idx=%u, this node=%u\n", done->dead_node,
999 dlm->reco.dead_node, done->node_idx, dlm->node_num);
1000
1001 mlog_bug_on_msg((done->dead_node != dlm->reco.dead_node),
1002 "Got DATA DONE: dead_node=%u, reco.dead_node=%u, "
1003 "node_idx=%u, this node=%u\n", done->dead_node,
1004 dlm->reco.dead_node, done->node_idx, dlm->node_num);
1005
1006 spin_lock(&dlm_reco_state_lock);
1007 list_for_each_entry(ndata, &dlm->reco.node_data, list) {
1008 if (ndata->node_num != done->node_idx)
1009 continue;
1010
1011 switch (ndata->state) {
1012
1013 case DLM_RECO_NODE_DATA_INIT:
1014 case DLM_RECO_NODE_DATA_DEAD:
1015 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
1016 mlog(ML_ERROR, "bad ndata state for node %u:"
1017 " state=%d\n", ndata->node_num,
1018 ndata->state);
1019 BUG();
1020 break;
1021
1022
1023 case DLM_RECO_NODE_DATA_DONE:
1024 case DLM_RECO_NODE_DATA_RECEIVING:
1025 case DLM_RECO_NODE_DATA_REQUESTED:
1026 case DLM_RECO_NODE_DATA_REQUESTING:
1027 mlog(0, "node %u is DONE sending "
1028 "recovery data!\n",
1029 ndata->node_num);
1030
1031 ndata->state = DLM_RECO_NODE_DATA_DONE;
1032 ret = 0;
1033 break;
1034 }
1035 }
1036 spin_unlock(&dlm_reco_state_lock);
1037
1038
1039 if (!ret)
1040 dlm_kick_recovery_thread(dlm);
1041
1042 if (ret < 0)
1043 mlog(ML_ERROR, "failed to find recovery node data for node "
1044 "%u\n", done->node_idx);
1045 dlm_put(dlm);
1046
1047 mlog(0, "leaving reco data done handler, ret=%d\n", ret);
1048 return ret;
1049}
1050
1051static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
1052 struct list_head *list,
1053 u8 dead_node)
1054{
1055 struct dlm_lock_resource *res, *next;
1056 struct dlm_lock *lock;
1057
1058 spin_lock(&dlm->spinlock);
1059 list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) {
1060
1061
1062 if (dlm_is_recovery_lock(res->lockname.name,
1063 res->lockname.len)) {
1064 spin_lock(&res->spinlock);
1065 list_for_each_entry(lock, &res->granted, list) {
1066 if (lock->ml.node == dead_node) {
1067 mlog(0, "AHA! there was "
1068 "a $RECOVERY lock for dead "
1069 "node %u (%s)!\n",
1070 dead_node, dlm->name);
1071 list_del_init(&lock->list);
1072 dlm_lock_put(lock);
1073 break;
1074 }
1075 }
1076 spin_unlock(&res->spinlock);
1077 continue;
1078 }
1079
1080 if (res->owner == dead_node) {
1081 mlog(0, "found lockres owned by dead node while "
1082 "doing recovery for node %u. sending it.\n",
1083 dead_node);
1084 list_move_tail(&res->recovering, list);
1085 } else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
1086 mlog(0, "found UNKNOWN owner while doing recovery "
1087 "for node %u. sending it.\n", dead_node);
1088 list_move_tail(&res->recovering, list);
1089 }
1090 }
1091 spin_unlock(&dlm->spinlock);
1092}
1093
1094static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res)
1095{
1096 int total_locks = 0;
1097 struct list_head *iter, *queue = &res->granted;
1098 int i;
1099
1100 for (i=0; i<3; i++) {
1101 list_for_each(iter, queue)
1102 total_locks++;
1103 queue++;
1104 }
1105 return total_locks;
1106}
1107
1108
1109static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
1110 struct dlm_migratable_lockres *mres,
1111 u8 send_to,
1112 struct dlm_lock_resource *res,
1113 int total_locks)
1114{
1115 u64 mig_cookie = be64_to_cpu(mres->mig_cookie);
1116 int mres_total_locks = be32_to_cpu(mres->total_locks);
1117 int sz, ret = 0, status = 0;
1118 u8 orig_flags = mres->flags,
1119 orig_master = mres->master;
1120
1121 BUG_ON(mres->num_locks > DLM_MAX_MIGRATABLE_LOCKS);
1122 if (!mres->num_locks)
1123 return 0;
1124
1125 sz = sizeof(struct dlm_migratable_lockres) +
1126 (mres->num_locks * sizeof(struct dlm_migratable_lock));
1127
1128
1129 orig_flags = mres->flags;
1130 BUG_ON(total_locks > mres_total_locks);
1131 if (total_locks == mres_total_locks)
1132 mres->flags |= DLM_MRES_ALL_DONE;
1133
1134 mlog(0, "%s:%.*s: sending mig lockres (%s) to %u\n",
1135 dlm->name, res->lockname.len, res->lockname.name,
1136 orig_flags & DLM_MRES_MIGRATION ? "migration" : "recovery",
1137 send_to);
1138
1139
1140 ret = o2net_send_message(DLM_MIG_LOCKRES_MSG, dlm->key, mres,
1141 sz, send_to, &status);
1142 if (ret < 0) {
1143
1144
1145 mlog(ML_ERROR, "%s: res %.*s, Error %d send MIG_LOCKRES to "
1146 "node %u (%s)\n", dlm->name, mres->lockname_len,
1147 mres->lockname, ret, send_to,
1148 (orig_flags & DLM_MRES_MIGRATION ?
1149 "migration" : "recovery"));
1150 } else {
1151
1152 ret = status;
1153 if (ret < 0) {
1154 mlog_errno(ret);
1155
1156 if (ret == -EFAULT) {
1157 mlog(ML_ERROR, "node %u told me to kill "
1158 "myself!\n", send_to);
1159 BUG();
1160 }
1161 }
1162 }
1163
1164
1165 dlm_init_migratable_lockres(mres, res->lockname.name,
1166 res->lockname.len, mres_total_locks,
1167 mig_cookie, orig_flags, orig_master);
1168 return ret;
1169}
1170
1171static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
1172 const char *lockname, int namelen,
1173 int total_locks, u64 cookie,
1174 u8 flags, u8 master)
1175{
1176
1177 clear_page(mres);
1178 mres->lockname_len = namelen;
1179 memcpy(mres->lockname, lockname, namelen);
1180 mres->num_locks = 0;
1181 mres->total_locks = cpu_to_be32(total_locks);
1182 mres->mig_cookie = cpu_to_be64(cookie);
1183 mres->flags = flags;
1184 mres->master = master;
1185}
1186
1187static void dlm_prepare_lvb_for_migration(struct dlm_lock *lock,
1188 struct dlm_migratable_lockres *mres,
1189 int queue)
1190{
1191 if (!lock->lksb)
1192 return;
1193
1194
1195 if (queue == DLM_BLOCKED_LIST)
1196 return;
1197
1198
1199 if (lock->ml.type != LKM_EXMODE && lock->ml.type != LKM_PRMODE)
1200 return;
1201
1202 if (dlm_lvb_is_empty(mres->lvb)) {
1203 memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN);
1204 return;
1205 }
1206
1207
1208 if (!memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))
1209 return;
1210
1211 mlog(ML_ERROR, "Mismatched lvb in lock cookie=%u:%llu, name=%.*s, "
1212 "node=%u\n",
1213 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
1214 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
1215 lock->lockres->lockname.len, lock->lockres->lockname.name,
1216 lock->ml.node);
1217 dlm_print_one_lock_resource(lock->lockres);
1218 BUG();
1219}
1220
1221
1222
1223static int dlm_add_lock_to_array(struct dlm_lock *lock,
1224 struct dlm_migratable_lockres *mres, int queue)
1225{
1226 struct dlm_migratable_lock *ml;
1227 int lock_num = mres->num_locks;
1228
1229 ml = &(mres->ml[lock_num]);
1230 ml->cookie = lock->ml.cookie;
1231 ml->type = lock->ml.type;
1232 ml->convert_type = lock->ml.convert_type;
1233 ml->highest_blocked = lock->ml.highest_blocked;
1234 ml->list = queue;
1235 if (lock->lksb) {
1236 ml->flags = lock->lksb->flags;
1237 dlm_prepare_lvb_for_migration(lock, mres, queue);
1238 }
1239 ml->node = lock->ml.node;
1240 mres->num_locks++;
1241
1242 if (mres->num_locks == DLM_MAX_MIGRATABLE_LOCKS)
1243 return 1;
1244 return 0;
1245}
1246
1247static void dlm_add_dummy_lock(struct dlm_ctxt *dlm,
1248 struct dlm_migratable_lockres *mres)
1249{
1250 struct dlm_lock dummy;
1251 memset(&dummy, 0, sizeof(dummy));
1252 dummy.ml.cookie = 0;
1253 dummy.ml.type = LKM_IVMODE;
1254 dummy.ml.convert_type = LKM_IVMODE;
1255 dummy.ml.highest_blocked = LKM_IVMODE;
1256 dummy.lksb = NULL;
1257 dummy.ml.node = dlm->node_num;
1258 dlm_add_lock_to_array(&dummy, mres, DLM_BLOCKED_LIST);
1259}
1260
1261static inline int dlm_is_dummy_lock(struct dlm_ctxt *dlm,
1262 struct dlm_migratable_lock *ml,
1263 u8 *nodenum)
1264{
1265 if (unlikely(ml->cookie == 0 &&
1266 ml->type == LKM_IVMODE &&
1267 ml->convert_type == LKM_IVMODE &&
1268 ml->highest_blocked == LKM_IVMODE &&
1269 ml->list == DLM_BLOCKED_LIST)) {
1270 *nodenum = ml->node;
1271 return 1;
1272 }
1273 return 0;
1274}
1275
1276int dlm_send_one_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
1277 struct dlm_migratable_lockres *mres,
1278 u8 send_to, u8 flags)
1279{
1280 struct list_head *queue;
1281 int total_locks, i;
1282 u64 mig_cookie = 0;
1283 struct dlm_lock *lock;
1284 int ret = 0;
1285
1286 BUG_ON(!(flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
1287
1288 mlog(0, "sending to %u\n", send_to);
1289
1290 total_locks = dlm_num_locks_in_lockres(res);
1291 if (total_locks > DLM_MAX_MIGRATABLE_LOCKS) {
1292
1293 mlog(0, "argh. lockres has %d locks. this will "
1294 "require more than one network packet to "
1295 "migrate\n", total_locks);
1296 mig_cookie = dlm_get_next_mig_cookie();
1297 }
1298
1299 dlm_init_migratable_lockres(mres, res->lockname.name,
1300 res->lockname.len, total_locks,
1301 mig_cookie, flags, res->owner);
1302
1303 total_locks = 0;
1304 for (i=DLM_GRANTED_LIST; i<=DLM_BLOCKED_LIST; i++) {
1305 queue = dlm_list_idx_to_ptr(res, i);
1306 list_for_each_entry(lock, queue, list) {
1307
1308 total_locks++;
1309 if (!dlm_add_lock_to_array(lock, mres, i))
1310 continue;
1311
1312
1313
1314 ret = dlm_send_mig_lockres_msg(dlm, mres, send_to,
1315 res, total_locks);
1316 if (ret < 0)
1317 goto error;
1318 }
1319 }
1320 if (total_locks == 0) {
1321
1322 mlog(0, "%s:%.*s: sending dummy lock to %u, %s\n",
1323 dlm->name, res->lockname.len, res->lockname.name,
1324 send_to, flags & DLM_MRES_RECOVERY ? "recovery" :
1325 "migration");
1326 dlm_add_dummy_lock(dlm, mres);
1327 }
1328
1329 ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, res, total_locks);
1330 if (ret < 0)
1331 goto error;
1332 return ret;
1333
1334error:
1335 mlog(ML_ERROR, "%s: dlm_send_mig_lockres_msg returned %d\n",
1336 dlm->name, ret);
1337 if (!dlm_is_host_down(ret))
1338 BUG();
1339 mlog(0, "%s: node %u went down while sending %s "
1340 "lockres %.*s\n", dlm->name, send_to,
1341 flags & DLM_MRES_RECOVERY ? "recovery" : "migration",
1342 res->lockname.len, res->lockname.name);
1343 return ret;
1344}
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
1362 void **ret_data)
1363{
1364 struct dlm_ctxt *dlm = data;
1365 struct dlm_migratable_lockres *mres =
1366 (struct dlm_migratable_lockres *)msg->buf;
1367 int ret = 0;
1368 u8 real_master;
1369 u8 extra_refs = 0;
1370 char *buf = NULL;
1371 struct dlm_work_item *item = NULL;
1372 struct dlm_lock_resource *res = NULL;
1373
1374 if (!dlm_grab(dlm))
1375 return -EINVAL;
1376
1377 BUG_ON(!(mres->flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
1378
1379 real_master = mres->master;
1380 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1381
1382 BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
1383 }
1384
1385 mlog(0, "%s message received from node %u\n",
1386 (mres->flags & DLM_MRES_RECOVERY) ?
1387 "recovery" : "migration", mres->master);
1388 if (mres->flags & DLM_MRES_ALL_DONE)
1389 mlog(0, "all done flag. all lockres data received!\n");
1390
1391 ret = -ENOMEM;
1392 buf = kmalloc(be16_to_cpu(msg->data_len), GFP_NOFS);
1393 item = kzalloc(sizeof(*item), GFP_NOFS);
1394 if (!buf || !item)
1395 goto leave;
1396
1397
1398
1399
1400 res = dlm_lookup_lockres(dlm, mres->lockname, mres->lockname_len);
1401 if (res) {
1402
1403
1404 spin_lock(&res->spinlock);
1405 if (mres->flags & DLM_MRES_RECOVERY) {
1406 res->state |= DLM_LOCK_RES_RECOVERING;
1407 } else {
1408 if (res->state & DLM_LOCK_RES_MIGRATING) {
1409
1410
1411 mlog(0, "lock %.*s is already migrating\n",
1412 mres->lockname_len,
1413 mres->lockname);
1414 } else if (res->state & DLM_LOCK_RES_RECOVERING) {
1415
1416 mlog(ML_ERROR, "node is attempting to migrate "
1417 "lock %.*s, but marked as recovering!\n",
1418 mres->lockname_len, mres->lockname);
1419 ret = -EFAULT;
1420 spin_unlock(&res->spinlock);
1421 dlm_lockres_put(res);
1422 goto leave;
1423 }
1424 res->state |= DLM_LOCK_RES_MIGRATING;
1425 }
1426 spin_unlock(&res->spinlock);
1427 } else {
1428
1429
1430 res = dlm_new_lockres(dlm, mres->lockname, mres->lockname_len);
1431 if (!res)
1432 goto leave;
1433
1434
1435
1436 dlm_lockres_get(res);
1437
1438
1439 if (mres->flags & DLM_MRES_RECOVERY)
1440 res->state |= DLM_LOCK_RES_RECOVERING;
1441 else
1442 res->state |= DLM_LOCK_RES_MIGRATING;
1443
1444 spin_lock(&dlm->spinlock);
1445 __dlm_insert_lockres(dlm, res);
1446 spin_unlock(&dlm->spinlock);
1447
1448
1449
1450
1451 dlm_lockres_get(res);
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462 extra_refs++;
1463
1464
1465
1466 spin_lock(&res->spinlock);
1467 res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
1468 spin_unlock(&res->spinlock);
1469 wake_up(&res->wq);
1470 }
1471
1472
1473
1474
1475 ret = 0;
1476 spin_lock(&res->spinlock);
1477
1478
1479 dlm_lockres_grab_inflight_ref(dlm, res);
1480 if (mres->master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1481
1482 BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
1483 mlog(0, "recovery has passed me a lockres with an "
1484 "unknown owner.. will need to requery: "
1485 "%.*s\n", mres->lockname_len, mres->lockname);
1486 } else {
1487
1488
1489 dlm_change_lockres_owner(dlm, res, dlm->node_num);
1490 }
1491 spin_unlock(&res->spinlock);
1492
1493
1494 dlm_grab(dlm);
1495 memcpy(buf, msg->buf, be16_to_cpu(msg->data_len));
1496 dlm_init_work_item(dlm, item, dlm_mig_lockres_worker, buf);
1497 item->u.ml.lockres = res;
1498 item->u.ml.real_master = real_master;
1499 item->u.ml.extra_ref = extra_refs;
1500 spin_lock(&dlm->work_lock);
1501 list_add_tail(&item->list, &dlm->work_list);
1502 spin_unlock(&dlm->work_lock);
1503 queue_work(dlm->dlm_worker, &dlm->dispatched_work);
1504
1505leave:
1506
1507 if (extra_refs)
1508 dlm_lockres_put(res);
1509
1510 dlm_put(dlm);
1511 if (ret < 0) {
1512 kfree(buf);
1513 kfree(item);
1514 mlog_errno(ret);
1515 }
1516
1517 return ret;
1518}
1519
1520
1521static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data)
1522{
1523 struct dlm_ctxt *dlm;
1524 struct dlm_migratable_lockres *mres;
1525 int ret = 0;
1526 struct dlm_lock_resource *res;
1527 u8 real_master;
1528 u8 extra_ref;
1529
1530 dlm = item->dlm;
1531 mres = (struct dlm_migratable_lockres *)data;
1532
1533 res = item->u.ml.lockres;
1534 real_master = item->u.ml.real_master;
1535 extra_ref = item->u.ml.extra_ref;
1536
1537 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1538
1539
1540again:
1541 ret = dlm_lockres_master_requery(dlm, res, &real_master);
1542 if (ret < 0) {
1543 mlog(0, "dlm_lockres_master_requery ret=%d\n",
1544 ret);
1545 goto again;
1546 }
1547 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1548 mlog(0, "lockres %.*s not claimed. "
1549 "this node will take it.\n",
1550 res->lockname.len, res->lockname.name);
1551 } else {
1552 spin_lock(&res->spinlock);
1553 dlm_lockres_drop_inflight_ref(dlm, res);
1554 spin_unlock(&res->spinlock);
1555 mlog(0, "master needs to respond to sender "
1556 "that node %u still owns %.*s\n",
1557 real_master, res->lockname.len,
1558 res->lockname.name);
1559
1560 goto leave;
1561 }
1562 }
1563
1564 ret = dlm_process_recovery_data(dlm, res, mres);
1565 if (ret < 0)
1566 mlog(0, "dlm_process_recovery_data returned %d\n", ret);
1567 else
1568 mlog(0, "dlm_process_recovery_data succeeded\n");
1569
1570 if ((mres->flags & (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) ==
1571 (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) {
1572 ret = dlm_finish_migration(dlm, res, mres->master);
1573 if (ret < 0)
1574 mlog_errno(ret);
1575 }
1576
1577leave:
1578
1579 if (res) {
1580 if (extra_ref)
1581 dlm_lockres_put(res);
1582 dlm_lockres_put(res);
1583 }
1584 kfree(data);
1585}
1586
1587
1588
1589static int dlm_lockres_master_requery(struct dlm_ctxt *dlm,
1590 struct dlm_lock_resource *res,
1591 u8 *real_master)
1592{
1593 struct dlm_node_iter iter;
1594 int nodenum;
1595 int ret = 0;
1596
1597 *real_master = DLM_LOCK_RES_OWNER_UNKNOWN;
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622 spin_lock(&dlm->spinlock);
1623 dlm_node_iter_init(dlm->domain_map, &iter);
1624 spin_unlock(&dlm->spinlock);
1625
1626 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
1627
1628 if (nodenum == dlm->node_num)
1629 continue;
1630 ret = dlm_do_master_requery(dlm, res, nodenum, real_master);
1631 if (ret < 0) {
1632 mlog_errno(ret);
1633 if (!dlm_is_host_down(ret))
1634 BUG();
1635
1636
1637 }
1638 if (*real_master != DLM_LOCK_RES_OWNER_UNKNOWN) {
1639 mlog(0, "lock master is %u\n", *real_master);
1640 break;
1641 }
1642 }
1643 return ret;
1644}
1645
1646
1647int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
1648 u8 nodenum, u8 *real_master)
1649{
1650 int ret = -EINVAL;
1651 struct dlm_master_requery req;
1652 int status = DLM_LOCK_RES_OWNER_UNKNOWN;
1653
1654 memset(&req, 0, sizeof(req));
1655 req.node_idx = dlm->node_num;
1656 req.namelen = res->lockname.len;
1657 memcpy(req.name, res->lockname.name, res->lockname.len);
1658
1659 ret = o2net_send_message(DLM_MASTER_REQUERY_MSG, dlm->key,
1660 &req, sizeof(req), nodenum, &status);
1661
1662 if (ret < 0)
1663 mlog(ML_ERROR, "Error %d when sending message %u (key "
1664 "0x%x) to node %u\n", ret, DLM_MASTER_REQUERY_MSG,
1665 dlm->key, nodenum);
1666 else {
1667 BUG_ON(status < 0);
1668 BUG_ON(status > DLM_LOCK_RES_OWNER_UNKNOWN);
1669 *real_master = (u8) (status & 0xff);
1670 mlog(0, "node %u responded to master requery with %u\n",
1671 nodenum, *real_master);
1672 ret = 0;
1673 }
1674 return ret;
1675}
1676
1677
1678
1679
1680
1681int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
1682 void **ret_data)
1683{
1684 struct dlm_ctxt *dlm = data;
1685 struct dlm_master_requery *req = (struct dlm_master_requery *)msg->buf;
1686 struct dlm_lock_resource *res = NULL;
1687 unsigned int hash;
1688 int master = DLM_LOCK_RES_OWNER_UNKNOWN;
1689 u32 flags = DLM_ASSERT_MASTER_REQUERY;
1690
1691 if (!dlm_grab(dlm)) {
1692
1693
1694 return master;
1695 }
1696
1697 hash = dlm_lockid_hash(req->name, req->namelen);
1698
1699 spin_lock(&dlm->spinlock);
1700 res = __dlm_lookup_lockres(dlm, req->name, req->namelen, hash);
1701 if (res) {
1702 spin_lock(&res->spinlock);
1703 master = res->owner;
1704 if (master == dlm->node_num) {
1705 int ret = dlm_dispatch_assert_master(dlm, res,
1706 0, 0, flags);
1707 if (ret < 0) {
1708 mlog_errno(-ENOMEM);
1709
1710 BUG();
1711 } else
1712 __dlm_lockres_grab_inflight_worker(dlm, res);
1713 spin_unlock(&res->spinlock);
1714 } else {
1715
1716 spin_unlock(&res->spinlock);
1717 dlm_lockres_put(res);
1718 }
1719 }
1720 spin_unlock(&dlm->spinlock);
1721
1722 dlm_put(dlm);
1723 return master;
1724}
1725
1726static inline struct list_head *
1727dlm_list_num_to_pointer(struct dlm_lock_resource *res, int list_num)
1728{
1729 struct list_head *ret;
1730 BUG_ON(list_num < 0);
1731 BUG_ON(list_num > 2);
1732 ret = &(res->granted);
1733 ret += list_num;
1734 return ret;
1735}
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
1764 struct dlm_lock_resource *res,
1765 struct dlm_migratable_lockres *mres)
1766{
1767 struct dlm_migratable_lock *ml;
1768 struct list_head *queue, *iter;
1769 struct list_head *tmpq = NULL;
1770 struct dlm_lock *newlock = NULL;
1771 struct dlm_lockstatus *lksb = NULL;
1772 int ret = 0;
1773 int i, j, bad;
1774 struct dlm_lock *lock;
1775 u8 from = O2NM_MAX_NODES;
1776 unsigned int added = 0;
1777 __be64 c;
1778
1779 mlog(0, "running %d locks for this lockres\n", mres->num_locks);
1780 for (i=0; i<mres->num_locks; i++) {
1781 ml = &(mres->ml[i]);
1782
1783 if (dlm_is_dummy_lock(dlm, ml, &from)) {
1784
1785 BUG_ON(mres->num_locks != 1);
1786 mlog(0, "%s:%.*s: dummy lock for %u\n",
1787 dlm->name, mres->lockname_len, mres->lockname,
1788 from);
1789 spin_lock(&res->spinlock);
1790 dlm_lockres_set_refmap_bit(dlm, res, from);
1791 spin_unlock(&res->spinlock);
1792 added++;
1793 break;
1794 }
1795 BUG_ON(ml->highest_blocked != LKM_IVMODE);
1796 newlock = NULL;
1797 lksb = NULL;
1798
1799 queue = dlm_list_num_to_pointer(res, ml->list);
1800 tmpq = NULL;
1801
1802
1803
1804
1805 if (ml->node == dlm->node_num) {
1806
1807 BUG_ON(!(mres->flags & DLM_MRES_MIGRATION));
1808
1809 lock = NULL;
1810 spin_lock(&res->spinlock);
1811 for (j = DLM_GRANTED_LIST; j <= DLM_BLOCKED_LIST; j++) {
1812 tmpq = dlm_list_idx_to_ptr(res, j);
1813 list_for_each(iter, tmpq) {
1814 lock = list_entry(iter,
1815 struct dlm_lock, list);
1816 if (lock->ml.cookie == ml->cookie)
1817 break;
1818 lock = NULL;
1819 }
1820 if (lock)
1821 break;
1822 }
1823
1824
1825
1826 if (!lock) {
1827 c = ml->cookie;
1828 mlog(ML_ERROR, "Could not find local lock "
1829 "with cookie %u:%llu, node %u, "
1830 "list %u, flags 0x%x, type %d, "
1831 "conv %d, highest blocked %d\n",
1832 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1833 dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1834 ml->node, ml->list, ml->flags, ml->type,
1835 ml->convert_type, ml->highest_blocked);
1836 __dlm_print_one_lock_resource(res);
1837 BUG();
1838 }
1839
1840 if (lock->ml.node != ml->node) {
1841 c = lock->ml.cookie;
1842 mlog(ML_ERROR, "Mismatched node# in lock "
1843 "cookie %u:%llu, name %.*s, node %u\n",
1844 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1845 dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1846 res->lockname.len, res->lockname.name,
1847 lock->ml.node);
1848 c = ml->cookie;
1849 mlog(ML_ERROR, "Migrate lock cookie %u:%llu, "
1850 "node %u, list %u, flags 0x%x, type %d, "
1851 "conv %d, highest blocked %d\n",
1852 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1853 dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1854 ml->node, ml->list, ml->flags, ml->type,
1855 ml->convert_type, ml->highest_blocked);
1856 __dlm_print_one_lock_resource(res);
1857 BUG();
1858 }
1859
1860 if (tmpq != queue) {
1861 c = ml->cookie;
1862 mlog(0, "Lock cookie %u:%llu was on list %u "
1863 "instead of list %u for %.*s\n",
1864 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1865 dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1866 j, ml->list, res->lockname.len,
1867 res->lockname.name);
1868 __dlm_print_one_lock_resource(res);
1869 spin_unlock(&res->spinlock);
1870 continue;
1871 }
1872
1873
1874
1875
1876
1877
1878 list_move_tail(&lock->list, queue);
1879 spin_unlock(&res->spinlock);
1880 added++;
1881
1882 mlog(0, "just reordered a local lock!\n");
1883 continue;
1884 }
1885
1886
1887 newlock = dlm_new_lock(ml->type, ml->node,
1888 be64_to_cpu(ml->cookie), NULL);
1889 if (!newlock) {
1890 ret = -ENOMEM;
1891 goto leave;
1892 }
1893 lksb = newlock->lksb;
1894 dlm_lock_attach_lockres(newlock, res);
1895
1896 if (ml->convert_type != LKM_IVMODE) {
1897 BUG_ON(queue != &res->converting);
1898 newlock->ml.convert_type = ml->convert_type;
1899 }
1900 lksb->flags |= (ml->flags &
1901 (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB));
1902
1903 if (ml->type == LKM_NLMODE)
1904 goto skip_lvb;
1905
1906
1907
1908
1909
1910 if (ml->list == DLM_BLOCKED_LIST)
1911 goto skip_lvb;
1912
1913 if (!dlm_lvb_is_empty(mres->lvb)) {
1914 if (lksb->flags & DLM_LKSB_PUT_LVB) {
1915
1916
1917
1918 memcpy(lksb->lvb, mres->lvb, DLM_LVB_LEN);
1919
1920
1921
1922
1923 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
1924 } else {
1925
1926
1927 BUG_ON(ml->type != LKM_EXMODE &&
1928 ml->type != LKM_PRMODE);
1929 if (!dlm_lvb_is_empty(res->lvb) &&
1930 (ml->type == LKM_EXMODE ||
1931 memcmp(res->lvb, mres->lvb, DLM_LVB_LEN))) {
1932 int i;
1933 mlog(ML_ERROR, "%s:%.*s: received bad "
1934 "lvb! type=%d\n", dlm->name,
1935 res->lockname.len,
1936 res->lockname.name, ml->type);
1937 printk("lockres lvb=[");
1938 for (i=0; i<DLM_LVB_LEN; i++)
1939 printk("%02x", res->lvb[i]);
1940 printk("]\nmigrated lvb=[");
1941 for (i=0; i<DLM_LVB_LEN; i++)
1942 printk("%02x", mres->lvb[i]);
1943 printk("]\n");
1944 dlm_print_one_lock_resource(res);
1945 BUG();
1946 }
1947 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
1948 }
1949 }
1950skip_lvb:
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968 bad = 0;
1969 spin_lock(&res->spinlock);
1970 list_for_each_entry(lock, queue, list) {
1971 if (lock->ml.cookie == ml->cookie) {
1972 c = lock->ml.cookie;
1973 mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already "
1974 "exists on this lockres!\n", dlm->name,
1975 res->lockname.len, res->lockname.name,
1976 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1977 dlm_get_lock_cookie_seq(be64_to_cpu(c)));
1978
1979 mlog(ML_NOTICE, "sent lock: type=%d, conv=%d, "
1980 "node=%u, cookie=%u:%llu, queue=%d\n",
1981 ml->type, ml->convert_type, ml->node,
1982 dlm_get_lock_cookie_node(be64_to_cpu(ml->cookie)),
1983 dlm_get_lock_cookie_seq(be64_to_cpu(ml->cookie)),
1984 ml->list);
1985
1986 __dlm_print_one_lock_resource(res);
1987 bad = 1;
1988 break;
1989 }
1990 }
1991 if (!bad) {
1992 dlm_lock_get(newlock);
1993 if (mres->flags & DLM_MRES_RECOVERY &&
1994 ml->list == DLM_CONVERTING_LIST &&
1995 newlock->ml.type >
1996 newlock->ml.convert_type) {
1997
1998
1999 list_add(&newlock->list, queue);
2000 } else
2001 list_add_tail(&newlock->list, queue);
2002 mlog(0, "%s:%.*s: added lock for node %u, "
2003 "setting refmap bit\n", dlm->name,
2004 res->lockname.len, res->lockname.name, ml->node);
2005 dlm_lockres_set_refmap_bit(dlm, res, ml->node);
2006 added++;
2007 }
2008 spin_unlock(&res->spinlock);
2009 }
2010 mlog(0, "done running all the locks\n");
2011
2012leave:
2013
2014 spin_lock(&res->spinlock);
2015 dlm_lockres_drop_inflight_ref(dlm, res);
2016 spin_unlock(&res->spinlock);
2017
2018 if (ret < 0) {
2019 mlog_errno(ret);
2020 if (newlock)
2021 dlm_lock_put(newlock);
2022 }
2023
2024 return ret;
2025}
2026
2027void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
2028 struct dlm_lock_resource *res)
2029{
2030 int i;
2031 struct list_head *queue;
2032 struct dlm_lock *lock, *next;
2033
2034 assert_spin_locked(&dlm->spinlock);
2035 assert_spin_locked(&res->spinlock);
2036 res->state |= DLM_LOCK_RES_RECOVERING;
2037 if (!list_empty(&res->recovering)) {
2038 mlog(0,
2039 "Recovering res %s:%.*s, is already on recovery list!\n",
2040 dlm->name, res->lockname.len, res->lockname.name);
2041 list_del_init(&res->recovering);
2042 dlm_lockres_put(res);
2043 }
2044
2045 dlm_lockres_get(res);
2046 list_add_tail(&res->recovering, &dlm->reco.resources);
2047
2048
2049 for (i=DLM_BLOCKED_LIST; i>=DLM_GRANTED_LIST; i--) {
2050 queue = dlm_list_idx_to_ptr(res, i);
2051 list_for_each_entry_safe(lock, next, queue, list) {
2052 dlm_lock_get(lock);
2053 if (lock->convert_pending) {
2054
2055 BUG_ON(i != DLM_CONVERTING_LIST);
2056 mlog(0, "node died with convert pending "
2057 "on %.*s. move back to granted list.\n",
2058 res->lockname.len, res->lockname.name);
2059 dlm_revert_pending_convert(res, lock);
2060 lock->convert_pending = 0;
2061 } else if (lock->lock_pending) {
2062
2063 BUG_ON(i != DLM_BLOCKED_LIST);
2064 mlog(0, "node died with lock pending "
2065 "on %.*s. remove from blocked list and skip.\n",
2066 res->lockname.len, res->lockname.name);
2067
2068
2069
2070
2071
2072 dlm_revert_pending_lock(res, lock);
2073 lock->lock_pending = 0;
2074 } else if (lock->unlock_pending) {
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084 BUG_ON(i != DLM_GRANTED_LIST);
2085 mlog(0, "node died with unlock pending "
2086 "on %.*s. remove from blocked list and skip.\n",
2087 res->lockname.len, res->lockname.name);
2088 dlm_commit_pending_unlock(res, lock);
2089 lock->unlock_pending = 0;
2090 } else if (lock->cancel_pending) {
2091
2092
2093
2094
2095 BUG_ON(i != DLM_CONVERTING_LIST);
2096 mlog(0, "node died with cancel pending "
2097 "on %.*s. move back to granted list.\n",
2098 res->lockname.len, res->lockname.name);
2099 dlm_commit_pending_cancel(res, lock);
2100 lock->cancel_pending = 0;
2101 }
2102 dlm_lock_put(lock);
2103 }
2104 }
2105}
2106
2107
2108
2109
2110
2111
2112static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
2113 u8 dead_node, u8 new_master)
2114{
2115 int i;
2116 struct hlist_head *bucket;
2117 struct dlm_lock_resource *res, *next;
2118
2119 assert_spin_locked(&dlm->spinlock);
2120
2121 list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) {
2122 if (res->owner == dead_node) {
2123 mlog(0, "%s: res %.*s, Changing owner from %u to %u\n",
2124 dlm->name, res->lockname.len, res->lockname.name,
2125 res->owner, new_master);
2126 list_del_init(&res->recovering);
2127 spin_lock(&res->spinlock);
2128
2129
2130 dlm_change_lockres_owner(dlm, res, new_master);
2131 res->state &= ~DLM_LOCK_RES_RECOVERING;
2132 if (__dlm_lockres_has_locks(res))
2133 __dlm_dirty_lockres(dlm, res);
2134 spin_unlock(&res->spinlock);
2135 wake_up(&res->wq);
2136 dlm_lockres_put(res);
2137 }
2138 }
2139
2140
2141
2142
2143
2144 for (i = 0; i < DLM_HASH_BUCKETS; i++) {
2145 bucket = dlm_lockres_hash(dlm, i);
2146 hlist_for_each_entry(res, bucket, hash_node) {
2147 if (!(res->state & DLM_LOCK_RES_RECOVERING))
2148 continue;
2149
2150 if (res->owner != dead_node &&
2151 res->owner != dlm->node_num)
2152 continue;
2153
2154 if (!list_empty(&res->recovering)) {
2155 list_del_init(&res->recovering);
2156 dlm_lockres_put(res);
2157 }
2158
2159
2160
2161 mlog(0, "%s: res %.*s, Changing owner from %u to %u\n",
2162 dlm->name, res->lockname.len, res->lockname.name,
2163 res->owner, new_master);
2164 spin_lock(&res->spinlock);
2165 dlm_change_lockres_owner(dlm, res, new_master);
2166 res->state &= ~DLM_LOCK_RES_RECOVERING;
2167 if (__dlm_lockres_has_locks(res))
2168 __dlm_dirty_lockres(dlm, res);
2169 spin_unlock(&res->spinlock);
2170 wake_up(&res->wq);
2171 }
2172 }
2173}
2174
2175static inline int dlm_lvb_needs_invalidation(struct dlm_lock *lock, int local)
2176{
2177 if (local) {
2178 if (lock->ml.type != LKM_EXMODE &&
2179 lock->ml.type != LKM_PRMODE)
2180 return 1;
2181 } else if (lock->ml.type == LKM_EXMODE)
2182 return 1;
2183 return 0;
2184}
2185
2186static void dlm_revalidate_lvb(struct dlm_ctxt *dlm,
2187 struct dlm_lock_resource *res, u8 dead_node)
2188{
2189 struct list_head *queue;
2190 struct dlm_lock *lock;
2191 int blank_lvb = 0, local = 0;
2192 int i;
2193 u8 search_node;
2194
2195 assert_spin_locked(&dlm->spinlock);
2196 assert_spin_locked(&res->spinlock);
2197
2198 if (res->owner == dlm->node_num)
2199
2200
2201 search_node = dead_node;
2202 else {
2203
2204
2205 search_node = dlm->node_num;
2206 local = 1;
2207 }
2208
2209 for (i=DLM_GRANTED_LIST; i<=DLM_CONVERTING_LIST; i++) {
2210 queue = dlm_list_idx_to_ptr(res, i);
2211 list_for_each_entry(lock, queue, list) {
2212 if (lock->ml.node == search_node) {
2213 if (dlm_lvb_needs_invalidation(lock, local)) {
2214
2215 blank_lvb = 1;
2216 memset(lock->lksb->lvb, 0, DLM_LVB_LEN);
2217 }
2218 }
2219 }
2220 }
2221
2222 if (blank_lvb) {
2223 mlog(0, "clearing %.*s lvb, dead node %u had EX\n",
2224 res->lockname.len, res->lockname.name, dead_node);
2225 memset(res->lvb, 0, DLM_LVB_LEN);
2226 }
2227}
2228
2229static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
2230 struct dlm_lock_resource *res, u8 dead_node)
2231{
2232 struct dlm_lock *lock, *next;
2233 unsigned int freed = 0;
2234
2235
2236
2237
2238
2239 assert_spin_locked(&dlm->spinlock);
2240 assert_spin_locked(&res->spinlock);
2241
2242
2243
2244
2245
2246 list_for_each_entry_safe(lock, next, &res->granted, list) {
2247 if (lock->ml.node == dead_node) {
2248 list_del_init(&lock->list);
2249 dlm_lock_put(lock);
2250
2251 dlm_lock_put(lock);
2252 freed++;
2253 }
2254 }
2255 list_for_each_entry_safe(lock, next, &res->converting, list) {
2256 if (lock->ml.node == dead_node) {
2257 list_del_init(&lock->list);
2258 dlm_lock_put(lock);
2259
2260 dlm_lock_put(lock);
2261 freed++;
2262 }
2263 }
2264 list_for_each_entry_safe(lock, next, &res->blocked, list) {
2265 if (lock->ml.node == dead_node) {
2266 list_del_init(&lock->list);
2267 dlm_lock_put(lock);
2268
2269 dlm_lock_put(lock);
2270 freed++;
2271 }
2272 }
2273
2274 if (freed) {
2275 mlog(0, "%s:%.*s: freed %u locks for dead node %u, "
2276 "dropping ref from lockres\n", dlm->name,
2277 res->lockname.len, res->lockname.name, freed, dead_node);
2278 if(!test_bit(dead_node, res->refmap)) {
2279 mlog(ML_ERROR, "%s:%.*s: freed %u locks for dead node %u, "
2280 "but ref was not set\n", dlm->name,
2281 res->lockname.len, res->lockname.name, freed, dead_node);
2282 __dlm_print_one_lock_resource(res);
2283 }
2284 dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
2285 } else if (test_bit(dead_node, res->refmap)) {
2286 mlog(0, "%s:%.*s: dead node %u had a ref, but had "
2287 "no locks and had not purged before dying\n", dlm->name,
2288 res->lockname.len, res->lockname.name, dead_node);
2289 dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
2290 }
2291
2292
2293 __dlm_dirty_lockres(dlm, res);
2294}
2295
2296
2297
2298
2299
2300
2301
2302
2303static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
2304{
2305 struct dlm_lock_resource *res;
2306 int i;
2307 struct hlist_head *bucket;
2308 struct dlm_lock *lock;
2309
2310
2311
2312 dlm_clean_master_list(dlm, dead_node);
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328 for (i = 0; i < DLM_HASH_BUCKETS; i++) {
2329 bucket = dlm_lockres_hash(dlm, i);
2330 hlist_for_each_entry(res, bucket, hash_node) {
2331
2332
2333 if (dlm_is_recovery_lock(res->lockname.name,
2334 res->lockname.len)) {
2335 spin_lock(&res->spinlock);
2336 list_for_each_entry(lock, &res->granted, list) {
2337 if (lock->ml.node == dead_node) {
2338 mlog(0, "AHA! there was "
2339 "a $RECOVERY lock for dead "
2340 "node %u (%s)!\n",
2341 dead_node, dlm->name);
2342 list_del_init(&lock->list);
2343 dlm_lock_put(lock);
2344 break;
2345 }
2346 }
2347 spin_unlock(&res->spinlock);
2348 continue;
2349 }
2350 spin_lock(&res->spinlock);
2351
2352 dlm_revalidate_lvb(dlm, res, dead_node);
2353 if (res->owner == dead_node) {
2354 if (res->state & DLM_LOCK_RES_DROPPING_REF) {
2355 mlog(ML_NOTICE, "%s: res %.*s, Skip "
2356 "recovery as it is being freed\n",
2357 dlm->name, res->lockname.len,
2358 res->lockname.name);
2359 } else
2360 dlm_move_lockres_to_recovery_list(dlm,
2361 res);
2362
2363 } else if (res->owner == dlm->node_num) {
2364 dlm_free_dead_locks(dlm, res, dead_node);
2365 __dlm_lockres_calc_usage(dlm, res);
2366 } else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
2367 if (test_bit(dead_node, res->refmap)) {
2368 mlog(0, "%s:%.*s: dead node %u had a ref, but had "
2369 "no locks and had not purged before dying\n",
2370 dlm->name, res->lockname.len,
2371 res->lockname.name, dead_node);
2372 dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
2373 }
2374 }
2375 spin_unlock(&res->spinlock);
2376 }
2377 }
2378
2379}
2380
2381static void __dlm_hb_node_down(struct dlm_ctxt *dlm, int idx)
2382{
2383 assert_spin_locked(&dlm->spinlock);
2384
2385 if (dlm->reco.new_master == idx) {
2386 mlog(0, "%s: recovery master %d just died\n",
2387 dlm->name, idx);
2388 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2389
2390
2391
2392 mlog(0, "%s: dead master %d had reached "
2393 "finalize1 state, clearing\n", dlm->name, idx);
2394 dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
2395 __dlm_reset_recovery(dlm);
2396 }
2397 }
2398
2399
2400 if (dlm->joining_node == idx) {
2401 mlog(0, "Clearing join state for node %u\n", idx);
2402 __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
2403 }
2404
2405
2406 if (!test_bit(idx, dlm->live_nodes_map)) {
2407 mlog(0, "for domain %s, node %d is already dead. "
2408 "another node likely did recovery already.\n",
2409 dlm->name, idx);
2410 return;
2411 }
2412
2413
2414 if (!test_bit(idx, dlm->domain_map)) {
2415
2416
2417 mlog(0, "node %u already removed from domain!\n", idx);
2418 return;
2419 }
2420
2421 clear_bit(idx, dlm->live_nodes_map);
2422
2423
2424 if (!test_bit(idx, dlm->recovery_map))
2425 dlm_do_local_recovery_cleanup(dlm, idx);
2426
2427
2428 dlm_hb_event_notify_attached(dlm, idx, 0);
2429
2430 mlog(0, "node %u being removed from domain map!\n", idx);
2431 clear_bit(idx, dlm->domain_map);
2432 clear_bit(idx, dlm->exit_domain_map);
2433
2434
2435 wake_up(&dlm->migration_wq);
2436
2437 if (test_bit(idx, dlm->recovery_map))
2438 mlog(0, "domain %s, node %u already added "
2439 "to recovery map!\n", dlm->name, idx);
2440 else
2441 set_bit(idx, dlm->recovery_map);
2442}
2443
2444void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data)
2445{
2446 struct dlm_ctxt *dlm = data;
2447
2448 if (!dlm_grab(dlm))
2449 return;
2450
2451
2452
2453
2454
2455 if (test_bit(idx, dlm->domain_map))
2456 dlm_fire_domain_eviction_callbacks(dlm, idx);
2457
2458 spin_lock(&dlm->spinlock);
2459 __dlm_hb_node_down(dlm, idx);
2460 spin_unlock(&dlm->spinlock);
2461
2462 dlm_put(dlm);
2463}
2464
2465void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data)
2466{
2467 struct dlm_ctxt *dlm = data;
2468
2469 if (!dlm_grab(dlm))
2470 return;
2471
2472 spin_lock(&dlm->spinlock);
2473 set_bit(idx, dlm->live_nodes_map);
2474
2475
2476 spin_unlock(&dlm->spinlock);
2477
2478 dlm_put(dlm);
2479}
2480
2481static void dlm_reco_ast(void *astdata)
2482{
2483 struct dlm_ctxt *dlm = astdata;
2484 mlog(0, "ast for recovery lock fired!, this=%u, dlm=%s\n",
2485 dlm->node_num, dlm->name);
2486}
2487static void dlm_reco_bast(void *astdata, int blocked_type)
2488{
2489 struct dlm_ctxt *dlm = astdata;
2490 mlog(0, "bast for recovery lock fired!, this=%u, dlm=%s\n",
2491 dlm->node_num, dlm->name);
2492}
2493static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st)
2494{
2495 mlog(0, "unlockast for recovery lock fired!\n");
2496}
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510static int dlm_pick_recovery_master(struct dlm_ctxt *dlm)
2511{
2512 enum dlm_status ret;
2513 struct dlm_lockstatus lksb;
2514 int status = -EINVAL;
2515
2516 mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n",
2517 dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num);
2518again:
2519 memset(&lksb, 0, sizeof(lksb));
2520
2521 ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY,
2522 DLM_RECOVERY_LOCK_NAME, DLM_RECOVERY_LOCK_NAME_LEN,
2523 dlm_reco_ast, dlm, dlm_reco_bast);
2524
2525 mlog(0, "%s: dlmlock($RECOVERY) returned %d, lksb=%d\n",
2526 dlm->name, ret, lksb.status);
2527
2528 if (ret == DLM_NORMAL) {
2529 mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n",
2530 dlm->name, dlm->node_num);
2531
2532
2533
2534 if (dlm_reco_master_ready(dlm)) {
2535 mlog(0, "%s: got reco EX lock, but %u will "
2536 "do the recovery\n", dlm->name,
2537 dlm->reco.new_master);
2538 status = -EEXIST;
2539 } else {
2540 status = 0;
2541
2542
2543 spin_lock(&dlm->spinlock);
2544 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
2545 status = -EINVAL;
2546 mlog(0, "%s: got reco EX lock, but "
2547 "node got recovered already\n", dlm->name);
2548 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
2549 mlog(ML_ERROR, "%s: new master is %u "
2550 "but no dead node!\n",
2551 dlm->name, dlm->reco.new_master);
2552 BUG();
2553 }
2554 }
2555 spin_unlock(&dlm->spinlock);
2556 }
2557
2558
2559
2560 if (!status) {
2561 mlog(0, "%s: dead=%u, this=%u, sending "
2562 "begin_reco now\n", dlm->name,
2563 dlm->reco.dead_node, dlm->node_num);
2564 status = dlm_send_begin_reco_message(dlm,
2565 dlm->reco.dead_node);
2566
2567 BUG_ON(status);
2568
2569
2570 spin_lock(&dlm->spinlock);
2571 dlm_set_reco_master(dlm, dlm->node_num);
2572 spin_unlock(&dlm->spinlock);
2573 }
2574
2575
2576
2577 ret = dlmunlock(dlm, &lksb, 0, dlm_reco_unlock_ast, dlm);
2578 if (ret == DLM_DENIED) {
2579 mlog(0, "got DLM_DENIED, trying LKM_CANCEL\n");
2580 ret = dlmunlock(dlm, &lksb, LKM_CANCEL, dlm_reco_unlock_ast, dlm);
2581 }
2582 if (ret != DLM_NORMAL) {
2583
2584
2585
2586
2587
2588
2589 mlog(ML_ERROR, "dlmunlock returned %d\n", ret);
2590 }
2591 } else if (ret == DLM_NOTQUEUED) {
2592 mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n",
2593 dlm->name, dlm->node_num);
2594
2595
2596
2597 wait_event_timeout(dlm->dlm_reco_thread_wq,
2598 dlm_reco_master_ready(dlm),
2599 msecs_to_jiffies(1000));
2600 if (!dlm_reco_master_ready(dlm)) {
2601 mlog(0, "%s: reco master taking awhile\n",
2602 dlm->name);
2603 goto again;
2604 }
2605
2606 mlog(0, "%s: reco master %u is ready to recover %u\n",
2607 dlm->name, dlm->reco.new_master, dlm->reco.dead_node);
2608 status = -EEXIST;
2609 } else if (ret == DLM_RECOVERING) {
2610 mlog(0, "dlm=%s dlmlock says master node died (this=%u)\n",
2611 dlm->name, dlm->node_num);
2612 goto again;
2613 } else {
2614 struct dlm_lock_resource *res;
2615
2616
2617 mlog(ML_ERROR, "%s: got %s from dlmlock($RECOVERY), "
2618 "lksb.status=%s\n", dlm->name, dlm_errname(ret),
2619 dlm_errname(lksb.status));
2620 res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME,
2621 DLM_RECOVERY_LOCK_NAME_LEN);
2622 if (res) {
2623 dlm_print_one_lock_resource(res);
2624 dlm_lockres_put(res);
2625 } else {
2626 mlog(ML_ERROR, "recovery lock not found\n");
2627 }
2628 BUG();
2629 }
2630
2631 return status;
2632}
2633
2634static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node)
2635{
2636 struct dlm_begin_reco br;
2637 int ret = 0;
2638 struct dlm_node_iter iter;
2639 int nodenum;
2640 int status;
2641
2642 mlog(0, "%s: dead node is %u\n", dlm->name, dead_node);
2643
2644 spin_lock(&dlm->spinlock);
2645 dlm_node_iter_init(dlm->domain_map, &iter);
2646 spin_unlock(&dlm->spinlock);
2647
2648 clear_bit(dead_node, iter.node_map);
2649
2650 memset(&br, 0, sizeof(br));
2651 br.node_idx = dlm->node_num;
2652 br.dead_node = dead_node;
2653
2654 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2655 ret = 0;
2656 if (nodenum == dead_node) {
2657 mlog(0, "not sending begin reco to dead node "
2658 "%u\n", dead_node);
2659 continue;
2660 }
2661 if (nodenum == dlm->node_num) {
2662 mlog(0, "not sending begin reco to self\n");
2663 continue;
2664 }
2665retry:
2666 ret = -EINVAL;
2667 mlog(0, "attempting to send begin reco msg to %d\n",
2668 nodenum);
2669 ret = o2net_send_message(DLM_BEGIN_RECO_MSG, dlm->key,
2670 &br, sizeof(br), nodenum, &status);
2671
2672 if (ret >= 0)
2673 ret = status;
2674 if (dlm_is_host_down(ret)) {
2675
2676
2677 mlog(ML_NOTICE, "%s: node %u was down when sending "
2678 "begin reco msg (%d)\n", dlm->name, nodenum, ret);
2679 ret = 0;
2680 }
2681
2682
2683
2684
2685
2686
2687 if (ret == -EAGAIN || ret == EAGAIN) {
2688 mlog(0, "%s: trying to start recovery of node "
2689 "%u, but node %u is waiting for last recovery "
2690 "to complete, backoff for a bit\n", dlm->name,
2691 dead_node, nodenum);
2692 msleep(100);
2693 goto retry;
2694 }
2695 if (ret < 0) {
2696 struct dlm_lock_resource *res;
2697
2698
2699
2700 mlog_errno(ret);
2701 mlog(ML_ERROR, "begin reco of dlm %s to node %u "
2702 "returned %d\n", dlm->name, nodenum, ret);
2703 res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME,
2704 DLM_RECOVERY_LOCK_NAME_LEN);
2705 if (res) {
2706 dlm_print_one_lock_resource(res);
2707 dlm_lockres_put(res);
2708 } else {
2709 mlog(ML_ERROR, "recovery lock not found\n");
2710 }
2711
2712
2713 msleep(100);
2714 goto retry;
2715 }
2716 }
2717
2718 return ret;
2719}
2720
2721int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data,
2722 void **ret_data)
2723{
2724 struct dlm_ctxt *dlm = data;
2725 struct dlm_begin_reco *br = (struct dlm_begin_reco *)msg->buf;
2726
2727
2728 if (!dlm_grab(dlm))
2729 return 0;
2730
2731 spin_lock(&dlm->spinlock);
2732 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2733 mlog(0, "%s: node %u wants to recover node %u (%u:%u) "
2734 "but this node is in finalize state, waiting on finalize2\n",
2735 dlm->name, br->node_idx, br->dead_node,
2736 dlm->reco.dead_node, dlm->reco.new_master);
2737 spin_unlock(&dlm->spinlock);
2738 dlm_put(dlm);
2739 return -EAGAIN;
2740 }
2741 spin_unlock(&dlm->spinlock);
2742
2743 mlog(0, "%s: node %u wants to recover node %u (%u:%u)\n",
2744 dlm->name, br->node_idx, br->dead_node,
2745 dlm->reco.dead_node, dlm->reco.new_master);
2746
2747 dlm_fire_domain_eviction_callbacks(dlm, br->dead_node);
2748
2749 spin_lock(&dlm->spinlock);
2750 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
2751 if (test_bit(dlm->reco.new_master, dlm->recovery_map)) {
2752 mlog(0, "%s: new_master %u died, changing "
2753 "to %u\n", dlm->name, dlm->reco.new_master,
2754 br->node_idx);
2755 } else {
2756 mlog(0, "%s: new_master %u NOT DEAD, changing "
2757 "to %u\n", dlm->name, dlm->reco.new_master,
2758 br->node_idx);
2759
2760 }
2761 }
2762 if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) {
2763 mlog(ML_NOTICE, "%s: dead_node previously set to %u, "
2764 "node %u changing it to %u\n", dlm->name,
2765 dlm->reco.dead_node, br->node_idx, br->dead_node);
2766 }
2767 dlm_set_reco_master(dlm, br->node_idx);
2768 dlm_set_reco_dead_node(dlm, br->dead_node);
2769 if (!test_bit(br->dead_node, dlm->recovery_map)) {
2770 mlog(0, "recovery master %u sees %u as dead, but this "
2771 "node has not yet. marking %u as dead\n",
2772 br->node_idx, br->dead_node, br->dead_node);
2773 if (!test_bit(br->dead_node, dlm->domain_map) ||
2774 !test_bit(br->dead_node, dlm->live_nodes_map))
2775 mlog(0, "%u not in domain/live_nodes map "
2776 "so setting it in reco map manually\n",
2777 br->dead_node);
2778
2779
2780 set_bit(br->dead_node, dlm->domain_map);
2781 set_bit(br->dead_node, dlm->live_nodes_map);
2782 __dlm_hb_node_down(dlm, br->dead_node);
2783 }
2784 spin_unlock(&dlm->spinlock);
2785
2786 dlm_kick_recovery_thread(dlm);
2787
2788 mlog(0, "%s: recovery started by node %u, for %u (%u:%u)\n",
2789 dlm->name, br->node_idx, br->dead_node,
2790 dlm->reco.dead_node, dlm->reco.new_master);
2791
2792 dlm_put(dlm);
2793 return 0;
2794}
2795
2796#define DLM_FINALIZE_STAGE2 0x01
2797static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm)
2798{
2799 int ret = 0;
2800 struct dlm_finalize_reco fr;
2801 struct dlm_node_iter iter;
2802 int nodenum;
2803 int status;
2804 int stage = 1;
2805
2806 mlog(0, "finishing recovery for node %s:%u, "
2807 "stage %d\n", dlm->name, dlm->reco.dead_node, stage);
2808
2809 spin_lock(&dlm->spinlock);
2810 dlm_node_iter_init(dlm->domain_map, &iter);
2811 spin_unlock(&dlm->spinlock);
2812
2813stage2:
2814 memset(&fr, 0, sizeof(fr));
2815 fr.node_idx = dlm->node_num;
2816 fr.dead_node = dlm->reco.dead_node;
2817 if (stage == 2)
2818 fr.flags |= DLM_FINALIZE_STAGE2;
2819
2820 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2821 if (nodenum == dlm->node_num)
2822 continue;
2823 ret = o2net_send_message(DLM_FINALIZE_RECO_MSG, dlm->key,
2824 &fr, sizeof(fr), nodenum, &status);
2825 if (ret >= 0)
2826 ret = status;
2827 if (ret < 0) {
2828 mlog(ML_ERROR, "Error %d when sending message %u (key "
2829 "0x%x) to node %u\n", ret, DLM_FINALIZE_RECO_MSG,
2830 dlm->key, nodenum);
2831 if (dlm_is_host_down(ret)) {
2832
2833
2834
2835 mlog(ML_ERROR, "node %u went down after this "
2836 "node finished recovery.\n", nodenum);
2837 ret = 0;
2838 continue;
2839 }
2840 break;
2841 }
2842 }
2843 if (stage == 1) {
2844
2845 iter.curnode = -1;
2846 stage = 2;
2847 goto stage2;
2848 }
2849
2850 return ret;
2851}
2852
2853int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data,
2854 void **ret_data)
2855{
2856 struct dlm_ctxt *dlm = data;
2857 struct dlm_finalize_reco *fr = (struct dlm_finalize_reco *)msg->buf;
2858 int stage = 1;
2859
2860
2861 if (!dlm_grab(dlm))
2862 return 0;
2863
2864 if (fr->flags & DLM_FINALIZE_STAGE2)
2865 stage = 2;
2866
2867 mlog(0, "%s: node %u finalizing recovery stage%d of "
2868 "node %u (%u:%u)\n", dlm->name, fr->node_idx, stage,
2869 fr->dead_node, dlm->reco.dead_node, dlm->reco.new_master);
2870
2871 spin_lock(&dlm->spinlock);
2872
2873 if (dlm->reco.new_master != fr->node_idx) {
2874 mlog(ML_ERROR, "node %u sent recovery finalize msg, but node "
2875 "%u is supposed to be the new master, dead=%u\n",
2876 fr->node_idx, dlm->reco.new_master, fr->dead_node);
2877 BUG();
2878 }
2879 if (dlm->reco.dead_node != fr->dead_node) {
2880 mlog(ML_ERROR, "node %u sent recovery finalize msg for dead "
2881 "node %u, but node %u is supposed to be dead\n",
2882 fr->node_idx, fr->dead_node, dlm->reco.dead_node);
2883 BUG();
2884 }
2885
2886 switch (stage) {
2887 case 1:
2888 dlm_finish_local_lockres_recovery(dlm, fr->dead_node, fr->node_idx);
2889 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2890 mlog(ML_ERROR, "%s: received finalize1 from "
2891 "new master %u for dead node %u, but "
2892 "this node has already received it!\n",
2893 dlm->name, fr->node_idx, fr->dead_node);
2894 dlm_print_reco_node_status(dlm);
2895 BUG();
2896 }
2897 dlm->reco.state |= DLM_RECO_STATE_FINALIZE;
2898 spin_unlock(&dlm->spinlock);
2899 break;
2900 case 2:
2901 if (!(dlm->reco.state & DLM_RECO_STATE_FINALIZE)) {
2902 mlog(ML_ERROR, "%s: received finalize2 from "
2903 "new master %u for dead node %u, but "
2904 "this node did not have finalize1!\n",
2905 dlm->name, fr->node_idx, fr->dead_node);
2906 dlm_print_reco_node_status(dlm);
2907 BUG();
2908 }
2909 dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
2910 __dlm_reset_recovery(dlm);
2911 spin_unlock(&dlm->spinlock);
2912 dlm_kick_recovery_thread(dlm);
2913 break;
2914 default:
2915 BUG();
2916 }
2917
2918 mlog(0, "%s: recovery done, reco master was %u, dead now %u, master now %u\n",
2919 dlm->name, fr->node_idx, dlm->reco.dead_node, dlm->reco.new_master);
2920
2921 dlm_put(dlm);
2922 return 0;
2923}
2924