1
2
3
4
5
6
7
8#ifndef DLMCOMMON_H
9#define DLMCOMMON_H
10
11#include <linux/kref.h>
12
13#define DLM_HB_NODE_DOWN_PRI (0xf000000)
14#define DLM_HB_NODE_UP_PRI (0x8000000)
15
16#define DLM_LOCKID_NAME_MAX 32
17
18#define DLM_LOCK_RES_OWNER_UNKNOWN O2NM_MAX_NODES
19
20#define DLM_HASH_SIZE_DEFAULT (1 << 17)
21#if DLM_HASH_SIZE_DEFAULT < PAGE_SIZE
22# define DLM_HASH_PAGES 1
23#else
24# define DLM_HASH_PAGES (DLM_HASH_SIZE_DEFAULT / PAGE_SIZE)
25#endif
26#define DLM_BUCKETS_PER_PAGE (PAGE_SIZE / sizeof(struct hlist_head))
27#define DLM_HASH_BUCKETS (DLM_HASH_PAGES * DLM_BUCKETS_PER_PAGE)
28
29
30#define dlm_lockid_hash(_n, _l) full_name_hash(NULL, _n, _l)
31
32enum dlm_mle_type {
33 DLM_MLE_BLOCK = 0,
34 DLM_MLE_MASTER = 1,
35 DLM_MLE_MIGRATION = 2,
36 DLM_MLE_NUM_TYPES = 3,
37};
38
39struct dlm_master_list_entry {
40 struct hlist_node master_hash_node;
41 struct list_head hb_events;
42 struct dlm_ctxt *dlm;
43 spinlock_t spinlock;
44 wait_queue_head_t wq;
45 atomic_t woken;
46 struct kref mle_refs;
47 int inuse;
48 unsigned long maybe_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
49 unsigned long vote_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
50 unsigned long response_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
51 unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
52 u8 master;
53 u8 new_master;
54 enum dlm_mle_type type;
55 struct o2hb_callback_func mle_hb_up;
56 struct o2hb_callback_func mle_hb_down;
57 struct dlm_lock_resource *mleres;
58 unsigned char mname[DLM_LOCKID_NAME_MAX];
59 unsigned int mnamelen;
60 unsigned int mnamehash;
61};
62
63enum dlm_ast_type {
64 DLM_AST = 0,
65 DLM_BAST = 1,
66 DLM_ASTUNLOCK = 2,
67};
68
69
70#define LKM_VALID_FLAGS (LKM_VALBLK | LKM_CONVERT | LKM_UNLOCK | \
71 LKM_CANCEL | LKM_INVVALBLK | LKM_FORCE | \
72 LKM_RECOVERY | LKM_LOCAL | LKM_NOQUEUE)
73
74#define DLM_RECOVERY_LOCK_NAME "$RECOVERY"
75#define DLM_RECOVERY_LOCK_NAME_LEN 9
76
77static inline int dlm_is_recovery_lock(const char *lock_name, int name_len)
78{
79 if (name_len == DLM_RECOVERY_LOCK_NAME_LEN &&
80 memcmp(lock_name, DLM_RECOVERY_LOCK_NAME, name_len)==0)
81 return 1;
82 return 0;
83}
84
85#define DLM_RECO_STATE_ACTIVE 0x0001
86#define DLM_RECO_STATE_FINALIZE 0x0002
87
88struct dlm_recovery_ctxt
89{
90 struct list_head resources;
91 struct list_head node_data;
92 u8 new_master;
93 u8 dead_node;
94 u16 state;
95 unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
96 wait_queue_head_t event;
97};
98
99enum dlm_ctxt_state {
100 DLM_CTXT_NEW = 0,
101 DLM_CTXT_JOINED = 1,
102 DLM_CTXT_IN_SHUTDOWN = 2,
103 DLM_CTXT_LEAVING = 3,
104};
105
106struct dlm_ctxt
107{
108 struct list_head list;
109 struct hlist_head **lockres_hash;
110 struct list_head dirty_list;
111 struct list_head purge_list;
112 struct list_head pending_asts;
113 struct list_head pending_basts;
114 struct list_head tracking_list;
115 unsigned int purge_count;
116 spinlock_t spinlock;
117 spinlock_t ast_lock;
118 spinlock_t track_lock;
119 char *name;
120 u8 node_num;
121 u32 key;
122 u8 joining_node;
123 u8 migrate_done;
124 wait_queue_head_t dlm_join_events;
125 unsigned long live_nodes_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
126 unsigned long domain_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
127 unsigned long exit_domain_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
128 unsigned long recovery_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
129 struct dlm_recovery_ctxt reco;
130 spinlock_t master_lock;
131 struct hlist_head **master_hash;
132 struct list_head mle_hb_events;
133
134
135 atomic_t mle_tot_count[DLM_MLE_NUM_TYPES];
136 atomic_t mle_cur_count[DLM_MLE_NUM_TYPES];
137 atomic_t res_tot_count;
138 atomic_t res_cur_count;
139
140 struct dentry *dlm_debugfs_subroot;
141
142
143 struct kref dlm_refs;
144 enum dlm_ctxt_state dlm_state;
145 unsigned int num_joins;
146
147 struct o2hb_callback_func dlm_hb_up;
148 struct o2hb_callback_func dlm_hb_down;
149 struct task_struct *dlm_thread_task;
150 struct task_struct *dlm_reco_thread_task;
151 struct workqueue_struct *dlm_worker;
152 wait_queue_head_t dlm_thread_wq;
153 wait_queue_head_t dlm_reco_thread_wq;
154 wait_queue_head_t ast_wq;
155 wait_queue_head_t migration_wq;
156
157 struct work_struct dispatched_work;
158 struct list_head work_list;
159 spinlock_t work_lock;
160 struct list_head dlm_domain_handlers;
161 struct list_head dlm_eviction_callbacks;
162
163
164
165 struct dlm_protocol_version fs_locking_proto;
166
167 struct dlm_protocol_version dlm_locking_proto;
168};
169
170static inline struct hlist_head *dlm_lockres_hash(struct dlm_ctxt *dlm, unsigned i)
171{
172 return dlm->lockres_hash[(i / DLM_BUCKETS_PER_PAGE) % DLM_HASH_PAGES] + (i % DLM_BUCKETS_PER_PAGE);
173}
174
175static inline struct hlist_head *dlm_master_hash(struct dlm_ctxt *dlm,
176 unsigned i)
177{
178 return dlm->master_hash[(i / DLM_BUCKETS_PER_PAGE) % DLM_HASH_PAGES] +
179 (i % DLM_BUCKETS_PER_PAGE);
180}
181
182
183
184
185
186void dlm_dispatch_work(struct work_struct *work);
187
188struct dlm_lock_resource;
189struct dlm_work_item;
190
191typedef void (dlm_workfunc_t)(struct dlm_work_item *, void *);
192
193struct dlm_request_all_locks_priv
194{
195 u8 reco_master;
196 u8 dead_node;
197};
198
199struct dlm_mig_lockres_priv
200{
201 struct dlm_lock_resource *lockres;
202 u8 real_master;
203 u8 extra_ref;
204};
205
206struct dlm_assert_master_priv
207{
208 struct dlm_lock_resource *lockres;
209 u8 request_from;
210 u32 flags;
211 unsigned ignore_higher:1;
212};
213
214struct dlm_deref_lockres_priv
215{
216 struct dlm_lock_resource *deref_res;
217 u8 deref_node;
218};
219
220struct dlm_work_item
221{
222 struct list_head list;
223 dlm_workfunc_t *func;
224 struct dlm_ctxt *dlm;
225 void *data;
226 union {
227 struct dlm_request_all_locks_priv ral;
228 struct dlm_mig_lockres_priv ml;
229 struct dlm_assert_master_priv am;
230 struct dlm_deref_lockres_priv dl;
231 } u;
232};
233
234static inline void dlm_init_work_item(struct dlm_ctxt *dlm,
235 struct dlm_work_item *i,
236 dlm_workfunc_t *f, void *data)
237{
238 memset(i, 0, sizeof(*i));
239 i->func = f;
240 INIT_LIST_HEAD(&i->list);
241 i->data = data;
242 i->dlm = dlm;
243}
244
245
246
247static inline void __dlm_set_joining_node(struct dlm_ctxt *dlm,
248 u8 node)
249{
250 assert_spin_locked(&dlm->spinlock);
251
252 dlm->joining_node = node;
253 wake_up(&dlm->dlm_join_events);
254}
255
256#define DLM_LOCK_RES_UNINITED 0x00000001
257#define DLM_LOCK_RES_RECOVERING 0x00000002
258#define DLM_LOCK_RES_READY 0x00000004
259#define DLM_LOCK_RES_DIRTY 0x00000008
260#define DLM_LOCK_RES_IN_PROGRESS 0x00000010
261#define DLM_LOCK_RES_MIGRATING 0x00000020
262#define DLM_LOCK_RES_DROPPING_REF 0x00000040
263#define DLM_LOCK_RES_BLOCK_DIRTY 0x00001000
264#define DLM_LOCK_RES_SETREF_INPROG 0x00002000
265#define DLM_LOCK_RES_RECOVERY_WAITING 0x00004000
266
267
268#define DLM_NODE_DEATH_WAIT_MAX (5 * 1000)
269
270#define DLM_PURGE_INTERVAL_MS (8 * 1000)
271
272struct dlm_lock_resource
273{
274
275
276 struct hlist_node hash_node;
277 struct qstr lockname;
278 struct kref refs;
279
280
281
282
283
284
285
286 struct list_head granted;
287 struct list_head converting;
288 struct list_head blocked;
289 struct list_head purge;
290
291
292
293
294
295 struct list_head dirty;
296 struct list_head recovering;
297
298
299 struct list_head tracking;
300
301
302
303 unsigned long last_used;
304
305 struct dlm_ctxt *dlm;
306
307 unsigned migration_pending:1;
308 atomic_t asts_reserved;
309 spinlock_t spinlock;
310 wait_queue_head_t wq;
311 u8 owner;
312 u16 state;
313 char lvb[DLM_LVB_LEN];
314 unsigned int inflight_locks;
315 unsigned int inflight_assert_workers;
316 unsigned long refmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
317};
318
319struct dlm_migratable_lock
320{
321 __be64 cookie;
322
323
324
325 __be16 pad1;
326 u8 list;
327 u8 flags;
328
329 s8 type;
330 s8 convert_type;
331 s8 highest_blocked;
332 u8 node;
333};
334
335struct dlm_lock
336{
337 struct dlm_migratable_lock ml;
338
339 struct list_head list;
340 struct list_head ast_list;
341 struct list_head bast_list;
342 struct dlm_lock_resource *lockres;
343 spinlock_t spinlock;
344 struct kref lock_refs;
345
346
347 dlm_astlockfunc_t *ast;
348 dlm_bastlockfunc_t *bast;
349 void *astdata;
350 struct dlm_lockstatus *lksb;
351 unsigned ast_pending:1,
352 bast_pending:1,
353 convert_pending:1,
354 lock_pending:1,
355 cancel_pending:1,
356 unlock_pending:1,
357 lksb_kernel_allocated:1;
358};
359
360enum dlm_lockres_list {
361 DLM_GRANTED_LIST = 0,
362 DLM_CONVERTING_LIST = 1,
363 DLM_BLOCKED_LIST = 2,
364};
365
366static inline int dlm_lvb_is_empty(char *lvb)
367{
368 int i;
369 for (i=0; i<DLM_LVB_LEN; i++)
370 if (lvb[i])
371 return 0;
372 return 1;
373}
374
375static inline char *dlm_list_in_text(enum dlm_lockres_list idx)
376{
377 if (idx == DLM_GRANTED_LIST)
378 return "granted";
379 else if (idx == DLM_CONVERTING_LIST)
380 return "converting";
381 else if (idx == DLM_BLOCKED_LIST)
382 return "blocked";
383 else
384 return "unknown";
385}
386
387static inline struct list_head *
388dlm_list_idx_to_ptr(struct dlm_lock_resource *res, enum dlm_lockres_list idx)
389{
390 struct list_head *ret = NULL;
391 if (idx == DLM_GRANTED_LIST)
392 ret = &res->granted;
393 else if (idx == DLM_CONVERTING_LIST)
394 ret = &res->converting;
395 else if (idx == DLM_BLOCKED_LIST)
396 ret = &res->blocked;
397 else
398 BUG();
399 return ret;
400}
401
402
403
404
405struct dlm_node_iter
406{
407 unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
408 int curnode;
409};
410
411
412enum {
413 DLM_MASTER_REQUEST_MSG = 500,
414 DLM_UNUSED_MSG1 = 501,
415 DLM_ASSERT_MASTER_MSG = 502,
416 DLM_CREATE_LOCK_MSG = 503,
417 DLM_CONVERT_LOCK_MSG = 504,
418 DLM_PROXY_AST_MSG = 505,
419 DLM_UNLOCK_LOCK_MSG = 506,
420 DLM_DEREF_LOCKRES_MSG = 507,
421 DLM_MIGRATE_REQUEST_MSG = 508,
422 DLM_MIG_LOCKRES_MSG = 509,
423 DLM_QUERY_JOIN_MSG = 510,
424 DLM_ASSERT_JOINED_MSG = 511,
425 DLM_CANCEL_JOIN_MSG = 512,
426 DLM_EXIT_DOMAIN_MSG = 513,
427 DLM_MASTER_REQUERY_MSG = 514,
428 DLM_LOCK_REQUEST_MSG = 515,
429 DLM_RECO_DATA_DONE_MSG = 516,
430 DLM_BEGIN_RECO_MSG = 517,
431 DLM_FINALIZE_RECO_MSG = 518,
432 DLM_QUERY_REGION = 519,
433 DLM_QUERY_NODEINFO = 520,
434 DLM_BEGIN_EXIT_DOMAIN_MSG = 521,
435 DLM_DEREF_LOCKRES_DONE = 522,
436};
437
438struct dlm_reco_node_data
439{
440 int state;
441 u8 node_num;
442 struct list_head list;
443};
444
445enum {
446 DLM_RECO_NODE_DATA_DEAD = -1,
447 DLM_RECO_NODE_DATA_INIT = 0,
448 DLM_RECO_NODE_DATA_REQUESTING = 1,
449 DLM_RECO_NODE_DATA_REQUESTED = 2,
450 DLM_RECO_NODE_DATA_RECEIVING = 3,
451 DLM_RECO_NODE_DATA_DONE = 4,
452 DLM_RECO_NODE_DATA_FINALIZE_SENT = 5,
453};
454
455
456enum {
457 DLM_MASTER_RESP_NO = 0,
458 DLM_MASTER_RESP_YES = 1,
459 DLM_MASTER_RESP_MAYBE = 2,
460 DLM_MASTER_RESP_ERROR = 3,
461};
462
463
464struct dlm_master_request
465{
466 u8 node_idx;
467 u8 namelen;
468 __be16 pad1;
469 __be32 flags;
470
471 u8 name[O2NM_MAX_NAME_LEN];
472};
473
474#define DLM_ASSERT_RESPONSE_REASSERT 0x00000001
475#define DLM_ASSERT_RESPONSE_MASTERY_REF 0x00000002
476
477#define DLM_ASSERT_MASTER_MLE_CLEANUP 0x00000001
478#define DLM_ASSERT_MASTER_REQUERY 0x00000002
479#define DLM_ASSERT_MASTER_FINISH_MIGRATION 0x00000004
480struct dlm_assert_master
481{
482 u8 node_idx;
483 u8 namelen;
484 __be16 pad1;
485 __be32 flags;
486
487 u8 name[O2NM_MAX_NAME_LEN];
488};
489
490#define DLM_MIGRATE_RESPONSE_MASTERY_REF 0x00000001
491
492struct dlm_migrate_request
493{
494 u8 master;
495 u8 new_master;
496 u8 namelen;
497 u8 pad1;
498 __be32 pad2;
499 u8 name[O2NM_MAX_NAME_LEN];
500};
501
502struct dlm_master_requery
503{
504 u8 pad1;
505 u8 pad2;
506 u8 node_idx;
507 u8 namelen;
508 __be32 pad3;
509 u8 name[O2NM_MAX_NAME_LEN];
510};
511
512#define DLM_MRES_RECOVERY 0x01
513#define DLM_MRES_MIGRATION 0x02
514#define DLM_MRES_ALL_DONE 0x04
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546#define DLM_MAX_MIGRATABLE_LOCKS 240
547
548struct dlm_migratable_lockres
549{
550 u8 master;
551 u8 lockname_len;
552 u8 num_locks;
553 u8 flags;
554 __be32 total_locks;
555 __be64 mig_cookie;
556
557
558 u8 lockname[DLM_LOCKID_NAME_MAX];
559
560 u8 lvb[DLM_LVB_LEN];
561
562 struct dlm_migratable_lock ml[];
563};
564#define DLM_MIG_LOCKRES_MAX_LEN \
565 (sizeof(struct dlm_migratable_lockres) + \
566 (sizeof(struct dlm_migratable_lock) * \
567 DLM_MAX_MIGRATABLE_LOCKS) )
568
569
570
571#define DLM_MIG_LOCKRES_RESERVED (O2NET_MAX_PAYLOAD_BYTES - \
572 DLM_MIG_LOCKRES_MAX_LEN)
573
574struct dlm_create_lock
575{
576 __be64 cookie;
577
578 __be32 flags;
579 u8 pad1;
580 u8 node_idx;
581 s8 requested_type;
582 u8 namelen;
583
584 u8 name[O2NM_MAX_NAME_LEN];
585};
586
587struct dlm_convert_lock
588{
589 __be64 cookie;
590
591 __be32 flags;
592 u8 pad1;
593 u8 node_idx;
594 s8 requested_type;
595 u8 namelen;
596
597 u8 name[O2NM_MAX_NAME_LEN];
598
599 s8 lvb[];
600};
601#define DLM_CONVERT_LOCK_MAX_LEN (sizeof(struct dlm_convert_lock)+DLM_LVB_LEN)
602
603struct dlm_unlock_lock
604{
605 __be64 cookie;
606
607 __be32 flags;
608 __be16 pad1;
609 u8 node_idx;
610 u8 namelen;
611
612 u8 name[O2NM_MAX_NAME_LEN];
613
614 s8 lvb[];
615};
616#define DLM_UNLOCK_LOCK_MAX_LEN (sizeof(struct dlm_unlock_lock)+DLM_LVB_LEN)
617
618struct dlm_proxy_ast
619{
620 __be64 cookie;
621
622 __be32 flags;
623 u8 node_idx;
624 u8 type;
625 u8 blocked_type;
626 u8 namelen;
627
628 u8 name[O2NM_MAX_NAME_LEN];
629
630 s8 lvb[];
631};
632#define DLM_PROXY_AST_MAX_LEN (sizeof(struct dlm_proxy_ast)+DLM_LVB_LEN)
633
634#define DLM_MOD_KEY (0x666c6172)
635enum dlm_query_join_response_code {
636 JOIN_DISALLOW = 0,
637 JOIN_OK = 1,
638 JOIN_OK_NO_MAP = 2,
639 JOIN_PROTOCOL_MISMATCH = 3,
640};
641
642struct dlm_query_join_packet {
643 u8 code;
644
645 u8 dlm_minor;
646
647 u8 fs_minor;
648
649 u8 reserved;
650};
651
652union dlm_query_join_response {
653 __be32 intval;
654 struct dlm_query_join_packet packet;
655};
656
657struct dlm_lock_request
658{
659 u8 node_idx;
660 u8 dead_node;
661 __be16 pad1;
662 __be32 pad2;
663};
664
665struct dlm_reco_data_done
666{
667 u8 node_idx;
668 u8 dead_node;
669 __be16 pad1;
670 __be32 pad2;
671
672
673
674
675 u8 reco_lvb[DLM_LVB_LEN];
676};
677
678struct dlm_begin_reco
679{
680 u8 node_idx;
681 u8 dead_node;
682 __be16 pad1;
683 __be32 pad2;
684};
685
686struct dlm_query_join_request
687{
688 u8 node_idx;
689 u8 pad1[2];
690 u8 name_len;
691 struct dlm_protocol_version dlm_proto;
692 struct dlm_protocol_version fs_proto;
693 u8 domain[O2NM_MAX_NAME_LEN];
694 u8 node_map[BITS_TO_BYTES(O2NM_MAX_NODES)];
695};
696
697struct dlm_assert_joined
698{
699 u8 node_idx;
700 u8 pad1[2];
701 u8 name_len;
702 u8 domain[O2NM_MAX_NAME_LEN];
703};
704
705struct dlm_cancel_join
706{
707 u8 node_idx;
708 u8 pad1[2];
709 u8 name_len;
710 u8 domain[O2NM_MAX_NAME_LEN];
711};
712
713struct dlm_query_region {
714 u8 qr_node;
715 u8 qr_numregions;
716 u8 qr_namelen;
717 u8 pad1;
718 u8 qr_domain[O2NM_MAX_NAME_LEN];
719 u8 qr_regions[O2HB_MAX_REGION_NAME_LEN * O2NM_MAX_REGIONS];
720};
721
722struct dlm_node_info {
723 u8 ni_nodenum;
724 u8 pad1;
725 __be16 ni_ipv4_port;
726 __be32 ni_ipv4_address;
727};
728
729struct dlm_query_nodeinfo {
730 u8 qn_nodenum;
731 u8 qn_numnodes;
732 u8 qn_namelen;
733 u8 pad1;
734 u8 qn_domain[O2NM_MAX_NAME_LEN];
735 struct dlm_node_info qn_nodes[O2NM_MAX_NODES];
736};
737
738struct dlm_exit_domain
739{
740 u8 node_idx;
741 u8 pad1[3];
742};
743
744struct dlm_finalize_reco
745{
746 u8 node_idx;
747 u8 dead_node;
748 u8 flags;
749 u8 pad1;
750 __be32 pad2;
751};
752
753struct dlm_deref_lockres
754{
755 u32 pad1;
756 u16 pad2;
757 u8 node_idx;
758 u8 namelen;
759
760 u8 name[O2NM_MAX_NAME_LEN];
761};
762
763enum {
764 DLM_DEREF_RESPONSE_DONE = 0,
765 DLM_DEREF_RESPONSE_INPROG = 1,
766};
767
768struct dlm_deref_lockres_done {
769 u32 pad1;
770 u16 pad2;
771 u8 node_idx;
772 u8 namelen;
773
774 u8 name[O2NM_MAX_NAME_LEN];
775};
776
777static inline enum dlm_status
778__dlm_lockres_state_to_status(struct dlm_lock_resource *res)
779{
780 enum dlm_status status = DLM_NORMAL;
781
782 assert_spin_locked(&res->spinlock);
783
784 if (res->state & (DLM_LOCK_RES_RECOVERING|
785 DLM_LOCK_RES_RECOVERY_WAITING))
786 status = DLM_RECOVERING;
787 else if (res->state & DLM_LOCK_RES_MIGRATING)
788 status = DLM_MIGRATING;
789 else if (res->state & DLM_LOCK_RES_IN_PROGRESS)
790 status = DLM_FORWARD;
791
792 return status;
793}
794
795static inline u8 dlm_get_lock_cookie_node(u64 cookie)
796{
797 u8 ret;
798 cookie >>= 56;
799 ret = (u8)(cookie & 0xffULL);
800 return ret;
801}
802
803static inline unsigned long long dlm_get_lock_cookie_seq(u64 cookie)
804{
805 unsigned long long ret;
806 ret = ((unsigned long long)cookie) & 0x00ffffffffffffffULL;
807 return ret;
808}
809
810struct dlm_lock * dlm_new_lock(int type, u8 node, u64 cookie,
811 struct dlm_lockstatus *lksb);
812void dlm_lock_get(struct dlm_lock *lock);
813void dlm_lock_put(struct dlm_lock *lock);
814
815void dlm_lock_attach_lockres(struct dlm_lock *lock,
816 struct dlm_lock_resource *res);
817
818int dlm_create_lock_handler(struct o2net_msg *msg, u32 len, void *data,
819 void **ret_data);
820int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data,
821 void **ret_data);
822int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data,
823 void **ret_data);
824
825void dlm_revert_pending_convert(struct dlm_lock_resource *res,
826 struct dlm_lock *lock);
827void dlm_revert_pending_lock(struct dlm_lock_resource *res,
828 struct dlm_lock *lock);
829
830int dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data,
831 void **ret_data);
832void dlm_commit_pending_cancel(struct dlm_lock_resource *res,
833 struct dlm_lock *lock);
834void dlm_commit_pending_unlock(struct dlm_lock_resource *res,
835 struct dlm_lock *lock);
836
837int dlm_launch_thread(struct dlm_ctxt *dlm);
838void dlm_complete_thread(struct dlm_ctxt *dlm);
839int dlm_launch_recovery_thread(struct dlm_ctxt *dlm);
840void dlm_complete_recovery_thread(struct dlm_ctxt *dlm);
841void dlm_wait_for_recovery(struct dlm_ctxt *dlm);
842void dlm_kick_recovery_thread(struct dlm_ctxt *dlm);
843int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node);
844void dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout);
845void dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout);
846
847void dlm_put(struct dlm_ctxt *dlm);
848struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm);
849int dlm_domain_fully_joined(struct dlm_ctxt *dlm);
850
851void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
852 struct dlm_lock_resource *res);
853void dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
854 struct dlm_lock_resource *res);
855static inline void dlm_lockres_get(struct dlm_lock_resource *res)
856{
857
858
859 kref_get(&res->refs);
860}
861void dlm_lockres_put(struct dlm_lock_resource *res);
862void __dlm_unhash_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
863void __dlm_insert_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
864struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm,
865 const char *name,
866 unsigned int len,
867 unsigned int hash);
868struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm,
869 const char *name,
870 unsigned int len,
871 unsigned int hash);
872struct dlm_lock_resource * dlm_lookup_lockres(struct dlm_ctxt *dlm,
873 const char *name,
874 unsigned int len);
875
876int dlm_is_host_down(int errno);
877
878struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm,
879 const char *lockid,
880 int namelen,
881 int flags);
882struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm,
883 const char *name,
884 unsigned int namelen);
885
886void dlm_lockres_set_refmap_bit(struct dlm_ctxt *dlm,
887 struct dlm_lock_resource *res, int bit);
888void dlm_lockres_clear_refmap_bit(struct dlm_ctxt *dlm,
889 struct dlm_lock_resource *res, int bit);
890
891void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
892 struct dlm_lock_resource *res);
893void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
894 struct dlm_lock_resource *res);
895
896void __dlm_lockres_grab_inflight_worker(struct dlm_ctxt *dlm,
897 struct dlm_lock_resource *res);
898
899void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
900void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
901void __dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
902void dlm_do_local_ast(struct dlm_ctxt *dlm,
903 struct dlm_lock_resource *res,
904 struct dlm_lock *lock);
905int dlm_do_remote_ast(struct dlm_ctxt *dlm,
906 struct dlm_lock_resource *res,
907 struct dlm_lock *lock);
908void dlm_do_local_bast(struct dlm_ctxt *dlm,
909 struct dlm_lock_resource *res,
910 struct dlm_lock *lock,
911 int blocked_type);
912int dlm_send_proxy_ast_msg(struct dlm_ctxt *dlm,
913 struct dlm_lock_resource *res,
914 struct dlm_lock *lock,
915 int msg_type,
916 int blocked_type, int flags);
917static inline int dlm_send_proxy_bast(struct dlm_ctxt *dlm,
918 struct dlm_lock_resource *res,
919 struct dlm_lock *lock,
920 int blocked_type)
921{
922 return dlm_send_proxy_ast_msg(dlm, res, lock, DLM_BAST,
923 blocked_type, 0);
924}
925
926static inline int dlm_send_proxy_ast(struct dlm_ctxt *dlm,
927 struct dlm_lock_resource *res,
928 struct dlm_lock *lock,
929 int flags)
930{
931 return dlm_send_proxy_ast_msg(dlm, res, lock, DLM_AST,
932 0, flags);
933}
934
935void dlm_print_one_lock_resource(struct dlm_lock_resource *res);
936void __dlm_print_one_lock_resource(struct dlm_lock_resource *res);
937
938void dlm_kick_thread(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
939void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
940
941
942void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data);
943void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data);
944
945int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
946int dlm_finish_migration(struct dlm_ctxt *dlm,
947 struct dlm_lock_resource *res,
948 u8 old_master);
949void dlm_lockres_release_ast(struct dlm_ctxt *dlm,
950 struct dlm_lock_resource *res);
951void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res);
952
953int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data,
954 void **ret_data);
955int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data,
956 void **ret_data);
957void dlm_assert_master_post_handler(int status, void *data, void *ret_data);
958int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
959 void **ret_data);
960int dlm_deref_lockres_done_handler(struct o2net_msg *msg, u32 len, void *data,
961 void **ret_data);
962int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data,
963 void **ret_data);
964int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
965 void **ret_data);
966int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
967 void **ret_data);
968int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data,
969 void **ret_data);
970int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data,
971 void **ret_data);
972int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data,
973 void **ret_data);
974int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data,
975 void **ret_data);
976int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
977 u8 nodenum, u8 *real_master);
978
979void __dlm_do_purge_lockres(struct dlm_ctxt *dlm,
980 struct dlm_lock_resource *res);
981
982int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
983 struct dlm_lock_resource *res,
984 int ignore_higher,
985 u8 request_from,
986 u32 flags);
987
988
989int dlm_send_one_lockres(struct dlm_ctxt *dlm,
990 struct dlm_lock_resource *res,
991 struct dlm_migratable_lockres *mres,
992 u8 send_to,
993 u8 flags);
994void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
995 struct dlm_lock_resource *res);
996
997
998void __dlm_wait_on_lockres_flags(struct dlm_lock_resource *res, int flags);
999
1000
1001static inline void __dlm_wait_on_lockres(struct dlm_lock_resource *res)
1002{
1003 __dlm_wait_on_lockres_flags(res, (DLM_LOCK_RES_IN_PROGRESS|
1004 DLM_LOCK_RES_RECOVERING|
1005 DLM_LOCK_RES_RECOVERY_WAITING|
1006 DLM_LOCK_RES_MIGRATING));
1007}
1008
1009void __dlm_unlink_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle);
1010void __dlm_insert_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle);
1011
1012
1013int dlm_init_master_caches(void);
1014void dlm_destroy_master_caches(void);
1015
1016int dlm_init_lock_cache(void);
1017void dlm_destroy_lock_cache(void);
1018
1019int dlm_init_mle_cache(void);
1020void dlm_destroy_mle_cache(void);
1021
1022void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up);
1023int dlm_drop_lockres_ref(struct dlm_ctxt *dlm,
1024 struct dlm_lock_resource *res);
1025void dlm_clean_master_list(struct dlm_ctxt *dlm,
1026 u8 dead_node);
1027void dlm_force_free_mles(struct dlm_ctxt *dlm);
1028int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock);
1029int __dlm_lockres_has_locks(struct dlm_lock_resource *res);
1030int __dlm_lockres_unused(struct dlm_lock_resource *res);
1031
1032static inline const char * dlm_lock_mode_name(int mode)
1033{
1034 switch (mode) {
1035 case LKM_EXMODE:
1036 return "EX";
1037 case LKM_PRMODE:
1038 return "PR";
1039 case LKM_NLMODE:
1040 return "NL";
1041 }
1042 return "UNKNOWN";
1043}
1044
1045
1046static inline int dlm_lock_compatible(int existing, int request)
1047{
1048
1049 if (request == LKM_NLMODE ||
1050 existing == LKM_NLMODE)
1051 return 1;
1052
1053
1054 if (request == LKM_EXMODE)
1055 return 0;
1056
1057
1058 if (existing == LKM_PRMODE)
1059 return 1;
1060
1061 return 0;
1062}
1063
1064static inline int dlm_lock_on_list(struct list_head *head,
1065 struct dlm_lock *lock)
1066{
1067 struct dlm_lock *tmplock;
1068
1069 list_for_each_entry(tmplock, head, list) {
1070 if (tmplock == lock)
1071 return 1;
1072 }
1073 return 0;
1074}
1075
1076
1077static inline enum dlm_status dlm_err_to_dlm_status(int err)
1078{
1079 enum dlm_status ret;
1080 if (err == -ENOMEM)
1081 ret = DLM_SYSERR;
1082 else if (err == -ETIMEDOUT || o2net_link_down(err, NULL))
1083 ret = DLM_NOLOCKMGR;
1084 else if (err == -EINVAL)
1085 ret = DLM_BADPARAM;
1086 else if (err == -ENAMETOOLONG)
1087 ret = DLM_IVBUFLEN;
1088 else
1089 ret = DLM_BADARGS;
1090 return ret;
1091}
1092
1093
1094static inline void dlm_node_iter_init(unsigned long *map,
1095 struct dlm_node_iter *iter)
1096{
1097 memcpy(iter->node_map, map, sizeof(iter->node_map));
1098 iter->curnode = -1;
1099}
1100
1101static inline int dlm_node_iter_next(struct dlm_node_iter *iter)
1102{
1103 int bit;
1104 bit = find_next_bit(iter->node_map, O2NM_MAX_NODES, iter->curnode+1);
1105 if (bit >= O2NM_MAX_NODES) {
1106 iter->curnode = O2NM_MAX_NODES;
1107 return -ENOENT;
1108 }
1109 iter->curnode = bit;
1110 return bit;
1111}
1112
1113static inline void dlm_set_lockres_owner(struct dlm_ctxt *dlm,
1114 struct dlm_lock_resource *res,
1115 u8 owner)
1116{
1117 assert_spin_locked(&res->spinlock);
1118
1119 res->owner = owner;
1120}
1121
1122static inline void dlm_change_lockres_owner(struct dlm_ctxt *dlm,
1123 struct dlm_lock_resource *res,
1124 u8 owner)
1125{
1126 assert_spin_locked(&res->spinlock);
1127
1128 if (owner != res->owner)
1129 dlm_set_lockres_owner(dlm, res, owner);
1130}
1131
1132#endif
1133