1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#ifndef DLMCOMMON_H
26#define DLMCOMMON_H
27
28#include <linux/kref.h>
29
30#define DLM_HB_NODE_DOWN_PRI (0xf000000)
31#define DLM_HB_NODE_UP_PRI (0x8000000)
32
33#define DLM_LOCKID_NAME_MAX 32
34
35#define DLM_DOMAIN_NAME_MAX_LEN 255
36#define DLM_LOCK_RES_OWNER_UNKNOWN O2NM_MAX_NODES
37#define DLM_THREAD_SHUFFLE_INTERVAL 5
38#define DLM_THREAD_MS 200
39
40#define DLM_HASH_SIZE_DEFAULT (1 << 17)
41#if DLM_HASH_SIZE_DEFAULT < PAGE_SIZE
42# define DLM_HASH_PAGES 1
43#else
44# define DLM_HASH_PAGES (DLM_HASH_SIZE_DEFAULT / PAGE_SIZE)
45#endif
46#define DLM_BUCKETS_PER_PAGE (PAGE_SIZE / sizeof(struct hlist_head))
47#define DLM_HASH_BUCKETS (DLM_HASH_PAGES * DLM_BUCKETS_PER_PAGE)
48
49
50#define dlm_lockid_hash(_n, _l) full_name_hash(_n, _l)
51
52enum dlm_mle_type {
53 DLM_MLE_BLOCK = 0,
54 DLM_MLE_MASTER = 1,
55 DLM_MLE_MIGRATION = 2,
56 DLM_MLE_NUM_TYPES = 3,
57};
58
59struct dlm_master_list_entry {
60 struct hlist_node master_hash_node;
61 struct list_head hb_events;
62 struct dlm_ctxt *dlm;
63 spinlock_t spinlock;
64 wait_queue_head_t wq;
65 atomic_t woken;
66 struct kref mle_refs;
67 int inuse;
68 unsigned long maybe_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
69 unsigned long vote_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
70 unsigned long response_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
71 unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
72 u8 master;
73 u8 new_master;
74 enum dlm_mle_type type;
75 struct o2hb_callback_func mle_hb_up;
76 struct o2hb_callback_func mle_hb_down;
77 struct dlm_lock_resource *mleres;
78 unsigned char mname[DLM_LOCKID_NAME_MAX];
79 unsigned int mnamelen;
80 unsigned int mnamehash;
81};
82
83enum dlm_ast_type {
84 DLM_AST = 0,
85 DLM_BAST = 1,
86 DLM_ASTUNLOCK = 2,
87};
88
89
90#define LKM_VALID_FLAGS (LKM_VALBLK | LKM_CONVERT | LKM_UNLOCK | \
91 LKM_CANCEL | LKM_INVVALBLK | LKM_FORCE | \
92 LKM_RECOVERY | LKM_LOCAL | LKM_NOQUEUE)
93
94#define DLM_RECOVERY_LOCK_NAME "$RECOVERY"
95#define DLM_RECOVERY_LOCK_NAME_LEN 9
96
97static inline int dlm_is_recovery_lock(const char *lock_name, int name_len)
98{
99 if (name_len == DLM_RECOVERY_LOCK_NAME_LEN &&
100 memcmp(lock_name, DLM_RECOVERY_LOCK_NAME, name_len)==0)
101 return 1;
102 return 0;
103}
104
105#define DLM_RECO_STATE_ACTIVE 0x0001
106#define DLM_RECO_STATE_FINALIZE 0x0002
107
108struct dlm_recovery_ctxt
109{
110 struct list_head resources;
111 struct list_head received;
112 struct list_head node_data;
113 u8 new_master;
114 u8 dead_node;
115 u16 state;
116 unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
117 wait_queue_head_t event;
118};
119
120enum dlm_ctxt_state {
121 DLM_CTXT_NEW = 0,
122 DLM_CTXT_JOINED = 1,
123 DLM_CTXT_IN_SHUTDOWN = 2,
124 DLM_CTXT_LEAVING = 3,
125};
126
127struct dlm_ctxt
128{
129 struct list_head list;
130 struct hlist_head **lockres_hash;
131 struct list_head dirty_list;
132 struct list_head purge_list;
133 struct list_head pending_asts;
134 struct list_head pending_basts;
135 struct list_head tracking_list;
136 unsigned int purge_count;
137 spinlock_t spinlock;
138 spinlock_t ast_lock;
139 spinlock_t track_lock;
140 char *name;
141 u8 node_num;
142 u32 key;
143 u8 joining_node;
144 wait_queue_head_t dlm_join_events;
145 unsigned long live_nodes_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
146 unsigned long domain_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
147 unsigned long recovery_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
148 struct dlm_recovery_ctxt reco;
149 spinlock_t master_lock;
150 struct hlist_head **master_hash;
151 struct list_head mle_hb_events;
152
153
154 atomic_t mle_tot_count[DLM_MLE_NUM_TYPES];
155 atomic_t mle_cur_count[DLM_MLE_NUM_TYPES];
156 atomic_t res_tot_count;
157 atomic_t res_cur_count;
158
159 struct dlm_debug_ctxt *dlm_debug_ctxt;
160 struct dentry *dlm_debugfs_subroot;
161
162
163 struct kref dlm_refs;
164 enum dlm_ctxt_state dlm_state;
165 unsigned int num_joins;
166
167 struct o2hb_callback_func dlm_hb_up;
168 struct o2hb_callback_func dlm_hb_down;
169 struct task_struct *dlm_thread_task;
170 struct task_struct *dlm_reco_thread_task;
171 struct workqueue_struct *dlm_worker;
172 wait_queue_head_t dlm_thread_wq;
173 wait_queue_head_t dlm_reco_thread_wq;
174 wait_queue_head_t ast_wq;
175 wait_queue_head_t migration_wq;
176
177 struct work_struct dispatched_work;
178 struct list_head work_list;
179 spinlock_t work_lock;
180 struct list_head dlm_domain_handlers;
181 struct list_head dlm_eviction_callbacks;
182
183
184
185 struct dlm_protocol_version fs_locking_proto;
186
187 struct dlm_protocol_version dlm_locking_proto;
188};
189
190static inline struct hlist_head *dlm_lockres_hash(struct dlm_ctxt *dlm, unsigned i)
191{
192 return dlm->lockres_hash[(i / DLM_BUCKETS_PER_PAGE) % DLM_HASH_PAGES] + (i % DLM_BUCKETS_PER_PAGE);
193}
194
195static inline struct hlist_head *dlm_master_hash(struct dlm_ctxt *dlm,
196 unsigned i)
197{
198 return dlm->master_hash[(i / DLM_BUCKETS_PER_PAGE) % DLM_HASH_PAGES] +
199 (i % DLM_BUCKETS_PER_PAGE);
200}
201
202
203
204
205
206void dlm_dispatch_work(struct work_struct *work);
207
208struct dlm_lock_resource;
209struct dlm_work_item;
210
211typedef void (dlm_workfunc_t)(struct dlm_work_item *, void *);
212
213struct dlm_request_all_locks_priv
214{
215 u8 reco_master;
216 u8 dead_node;
217};
218
219struct dlm_mig_lockres_priv
220{
221 struct dlm_lock_resource *lockres;
222 u8 real_master;
223 u8 extra_ref;
224};
225
226struct dlm_assert_master_priv
227{
228 struct dlm_lock_resource *lockres;
229 u8 request_from;
230 u32 flags;
231 unsigned ignore_higher:1;
232};
233
234struct dlm_deref_lockres_priv
235{
236 struct dlm_lock_resource *deref_res;
237 u8 deref_node;
238};
239
240struct dlm_work_item
241{
242 struct list_head list;
243 dlm_workfunc_t *func;
244 struct dlm_ctxt *dlm;
245 void *data;
246 union {
247 struct dlm_request_all_locks_priv ral;
248 struct dlm_mig_lockres_priv ml;
249 struct dlm_assert_master_priv am;
250 struct dlm_deref_lockres_priv dl;
251 } u;
252};
253
254static inline void dlm_init_work_item(struct dlm_ctxt *dlm,
255 struct dlm_work_item *i,
256 dlm_workfunc_t *f, void *data)
257{
258 memset(i, 0, sizeof(*i));
259 i->func = f;
260 INIT_LIST_HEAD(&i->list);
261 i->data = data;
262 i->dlm = dlm;
263}
264
265
266
267static inline void __dlm_set_joining_node(struct dlm_ctxt *dlm,
268 u8 node)
269{
270 assert_spin_locked(&dlm->spinlock);
271
272 dlm->joining_node = node;
273 wake_up(&dlm->dlm_join_events);
274}
275
276#define DLM_LOCK_RES_UNINITED 0x00000001
277#define DLM_LOCK_RES_RECOVERING 0x00000002
278#define DLM_LOCK_RES_READY 0x00000004
279#define DLM_LOCK_RES_DIRTY 0x00000008
280#define DLM_LOCK_RES_IN_PROGRESS 0x00000010
281#define DLM_LOCK_RES_MIGRATING 0x00000020
282#define DLM_LOCK_RES_DROPPING_REF 0x00000040
283#define DLM_LOCK_RES_BLOCK_DIRTY 0x00001000
284#define DLM_LOCK_RES_SETREF_INPROG 0x00002000
285
286
287#define DLM_NODE_DEATH_WAIT_MAX (5 * 1000)
288
289#define DLM_PURGE_INTERVAL_MS (8 * 1000)
290
291struct dlm_lock_resource
292{
293
294
295 struct hlist_node hash_node;
296 struct qstr lockname;
297 struct kref refs;
298
299
300
301
302
303
304
305 struct list_head granted;
306 struct list_head converting;
307 struct list_head blocked;
308 struct list_head purge;
309
310
311
312
313
314 struct list_head dirty;
315 struct list_head recovering;
316
317
318 struct list_head tracking;
319
320
321
322 unsigned long last_used;
323
324 struct dlm_ctxt *dlm;
325
326 unsigned migration_pending:1;
327 atomic_t asts_reserved;
328 spinlock_t spinlock;
329 wait_queue_head_t wq;
330 u8 owner;
331 u16 state;
332 char lvb[DLM_LVB_LEN];
333 unsigned int inflight_locks;
334 unsigned long refmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
335};
336
337struct dlm_migratable_lock
338{
339 __be64 cookie;
340
341
342
343 __be16 pad1;
344 u8 list;
345 u8 flags;
346
347 s8 type;
348 s8 convert_type;
349 s8 highest_blocked;
350 u8 node;
351};
352
353struct dlm_lock
354{
355 struct dlm_migratable_lock ml;
356
357 struct list_head list;
358 struct list_head ast_list;
359 struct list_head bast_list;
360 struct dlm_lock_resource *lockres;
361 spinlock_t spinlock;
362 struct kref lock_refs;
363
364
365 dlm_astlockfunc_t *ast;
366 dlm_bastlockfunc_t *bast;
367 void *astdata;
368 struct dlm_lockstatus *lksb;
369 unsigned ast_pending:1,
370 bast_pending:1,
371 convert_pending:1,
372 lock_pending:1,
373 cancel_pending:1,
374 unlock_pending:1,
375 lksb_kernel_allocated:1;
376};
377
378
379#define DLM_LKSB_UNUSED1 0x01
380#define DLM_LKSB_PUT_LVB 0x02
381#define DLM_LKSB_GET_LVB 0x04
382#define DLM_LKSB_UNUSED2 0x08
383#define DLM_LKSB_UNUSED3 0x10
384#define DLM_LKSB_UNUSED4 0x20
385#define DLM_LKSB_UNUSED5 0x40
386#define DLM_LKSB_UNUSED6 0x80
387
388
389enum dlm_lockres_list {
390 DLM_GRANTED_LIST = 0,
391 DLM_CONVERTING_LIST = 1,
392 DLM_BLOCKED_LIST = 2,
393};
394
395static inline int dlm_lvb_is_empty(char *lvb)
396{
397 int i;
398 for (i=0; i<DLM_LVB_LEN; i++)
399 if (lvb[i])
400 return 0;
401 return 1;
402}
403
404static inline struct list_head *
405dlm_list_idx_to_ptr(struct dlm_lock_resource *res, enum dlm_lockres_list idx)
406{
407 struct list_head *ret = NULL;
408 if (idx == DLM_GRANTED_LIST)
409 ret = &res->granted;
410 else if (idx == DLM_CONVERTING_LIST)
411 ret = &res->converting;
412 else if (idx == DLM_BLOCKED_LIST)
413 ret = &res->blocked;
414 else
415 BUG();
416 return ret;
417}
418
419
420
421
422struct dlm_node_iter
423{
424 unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
425 int curnode;
426};
427
428
429enum {
430 DLM_MASTER_REQUEST_MSG = 500,
431 DLM_UNUSED_MSG1 = 501,
432 DLM_ASSERT_MASTER_MSG = 502,
433 DLM_CREATE_LOCK_MSG = 503,
434 DLM_CONVERT_LOCK_MSG = 504,
435 DLM_PROXY_AST_MSG = 505,
436 DLM_UNLOCK_LOCK_MSG = 506,
437 DLM_DEREF_LOCKRES_MSG = 507,
438 DLM_MIGRATE_REQUEST_MSG = 508,
439 DLM_MIG_LOCKRES_MSG = 509,
440 DLM_QUERY_JOIN_MSG = 510,
441 DLM_ASSERT_JOINED_MSG = 511,
442 DLM_CANCEL_JOIN_MSG = 512,
443 DLM_EXIT_DOMAIN_MSG = 513,
444 DLM_MASTER_REQUERY_MSG = 514,
445 DLM_LOCK_REQUEST_MSG = 515,
446 DLM_RECO_DATA_DONE_MSG = 516,
447 DLM_BEGIN_RECO_MSG = 517,
448 DLM_FINALIZE_RECO_MSG = 518,
449 DLM_QUERY_REGION = 519,
450 DLM_QUERY_NODEINFO = 520,
451};
452
453struct dlm_reco_node_data
454{
455 int state;
456 u8 node_num;
457 struct list_head list;
458};
459
460enum {
461 DLM_RECO_NODE_DATA_DEAD = -1,
462 DLM_RECO_NODE_DATA_INIT = 0,
463 DLM_RECO_NODE_DATA_REQUESTING = 1,
464 DLM_RECO_NODE_DATA_REQUESTED = 2,
465 DLM_RECO_NODE_DATA_RECEIVING = 3,
466 DLM_RECO_NODE_DATA_DONE = 4,
467 DLM_RECO_NODE_DATA_FINALIZE_SENT = 5,
468};
469
470
471enum {
472 DLM_MASTER_RESP_NO = 0,
473 DLM_MASTER_RESP_YES = 1,
474 DLM_MASTER_RESP_MAYBE = 2,
475 DLM_MASTER_RESP_ERROR = 3,
476};
477
478
479struct dlm_master_request
480{
481 u8 node_idx;
482 u8 namelen;
483 __be16 pad1;
484 __be32 flags;
485
486 u8 name[O2NM_MAX_NAME_LEN];
487};
488
489#define DLM_ASSERT_RESPONSE_REASSERT 0x00000001
490#define DLM_ASSERT_RESPONSE_MASTERY_REF 0x00000002
491
492#define DLM_ASSERT_MASTER_MLE_CLEANUP 0x00000001
493#define DLM_ASSERT_MASTER_REQUERY 0x00000002
494#define DLM_ASSERT_MASTER_FINISH_MIGRATION 0x00000004
495struct dlm_assert_master
496{
497 u8 node_idx;
498 u8 namelen;
499 __be16 pad1;
500 __be32 flags;
501
502 u8 name[O2NM_MAX_NAME_LEN];
503};
504
505#define DLM_MIGRATE_RESPONSE_MASTERY_REF 0x00000001
506
507struct dlm_migrate_request
508{
509 u8 master;
510 u8 new_master;
511 u8 namelen;
512 u8 pad1;
513 __be32 pad2;
514 u8 name[O2NM_MAX_NAME_LEN];
515};
516
517struct dlm_master_requery
518{
519 u8 pad1;
520 u8 pad2;
521 u8 node_idx;
522 u8 namelen;
523 __be32 pad3;
524 u8 name[O2NM_MAX_NAME_LEN];
525};
526
527#define DLM_MRES_RECOVERY 0x01
528#define DLM_MRES_MIGRATION 0x02
529#define DLM_MRES_ALL_DONE 0x04
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561#define DLM_MAX_MIGRATABLE_LOCKS 240
562
563struct dlm_migratable_lockres
564{
565 u8 master;
566 u8 lockname_len;
567 u8 num_locks;
568 u8 flags;
569 __be32 total_locks;
570 __be64 mig_cookie;
571
572
573 u8 lockname[DLM_LOCKID_NAME_MAX];
574
575 u8 lvb[DLM_LVB_LEN];
576
577 struct dlm_migratable_lock ml[0];
578};
579#define DLM_MIG_LOCKRES_MAX_LEN \
580 (sizeof(struct dlm_migratable_lockres) + \
581 (sizeof(struct dlm_migratable_lock) * \
582 DLM_MAX_MIGRATABLE_LOCKS) )
583
584
585
586#define DLM_MIG_LOCKRES_RESERVED (NET_MAX_PAYLOAD_BYTES - \
587 DLM_MIG_LOCKRES_MAX_LEN)
588
589struct dlm_create_lock
590{
591 __be64 cookie;
592
593 __be32 flags;
594 u8 pad1;
595 u8 node_idx;
596 s8 requested_type;
597 u8 namelen;
598
599 u8 name[O2NM_MAX_NAME_LEN];
600};
601
602struct dlm_convert_lock
603{
604 __be64 cookie;
605
606 __be32 flags;
607 u8 pad1;
608 u8 node_idx;
609 s8 requested_type;
610 u8 namelen;
611
612 u8 name[O2NM_MAX_NAME_LEN];
613
614 s8 lvb[0];
615};
616#define DLM_CONVERT_LOCK_MAX_LEN (sizeof(struct dlm_convert_lock)+DLM_LVB_LEN)
617
618struct dlm_unlock_lock
619{
620 __be64 cookie;
621
622 __be32 flags;
623 __be16 pad1;
624 u8 node_idx;
625 u8 namelen;
626
627 u8 name[O2NM_MAX_NAME_LEN];
628
629 s8 lvb[0];
630};
631#define DLM_UNLOCK_LOCK_MAX_LEN (sizeof(struct dlm_unlock_lock)+DLM_LVB_LEN)
632
633struct dlm_proxy_ast
634{
635 __be64 cookie;
636
637 __be32 flags;
638 u8 node_idx;
639 u8 type;
640 u8 blocked_type;
641 u8 namelen;
642
643 u8 name[O2NM_MAX_NAME_LEN];
644
645 s8 lvb[0];
646};
647#define DLM_PROXY_AST_MAX_LEN (sizeof(struct dlm_proxy_ast)+DLM_LVB_LEN)
648
649#define DLM_MOD_KEY (0x666c6172)
650enum dlm_query_join_response_code {
651 JOIN_DISALLOW = 0,
652 JOIN_OK = 1,
653 JOIN_OK_NO_MAP = 2,
654 JOIN_PROTOCOL_MISMATCH = 3,
655};
656
657struct dlm_query_join_packet {
658 u8 code;
659
660 u8 dlm_minor;
661
662 u8 fs_minor;
663
664 u8 reserved;
665};
666
667union dlm_query_join_response {
668 u32 intval;
669 struct dlm_query_join_packet packet;
670};
671
672struct dlm_lock_request
673{
674 u8 node_idx;
675 u8 dead_node;
676 __be16 pad1;
677 __be32 pad2;
678};
679
680struct dlm_reco_data_done
681{
682 u8 node_idx;
683 u8 dead_node;
684 __be16 pad1;
685 __be32 pad2;
686
687
688
689
690 u8 reco_lvb[DLM_LVB_LEN];
691};
692
693struct dlm_begin_reco
694{
695 u8 node_idx;
696 u8 dead_node;
697 __be16 pad1;
698 __be32 pad2;
699};
700
701
702#define BITS_PER_BYTE 8
703#define BITS_TO_BYTES(bits) (((bits)+BITS_PER_BYTE-1)/BITS_PER_BYTE)
704
705struct dlm_query_join_request
706{
707 u8 node_idx;
708 u8 pad1[2];
709 u8 name_len;
710 struct dlm_protocol_version dlm_proto;
711 struct dlm_protocol_version fs_proto;
712 u8 domain[O2NM_MAX_NAME_LEN];
713 u8 node_map[BITS_TO_BYTES(O2NM_MAX_NODES)];
714};
715
716struct dlm_assert_joined
717{
718 u8 node_idx;
719 u8 pad1[2];
720 u8 name_len;
721 u8 domain[O2NM_MAX_NAME_LEN];
722};
723
724struct dlm_cancel_join
725{
726 u8 node_idx;
727 u8 pad1[2];
728 u8 name_len;
729 u8 domain[O2NM_MAX_NAME_LEN];
730};
731
732struct dlm_query_region {
733 u8 qr_node;
734 u8 qr_numregions;
735 u8 qr_namelen;
736 u8 pad1;
737 u8 qr_domain[O2NM_MAX_NAME_LEN];
738 u8 qr_regions[O2HB_MAX_REGION_NAME_LEN * O2NM_MAX_REGIONS];
739};
740
741struct dlm_node_info {
742 u8 ni_nodenum;
743 u8 pad1;
744 u16 ni_ipv4_port;
745 u32 ni_ipv4_address;
746};
747
748struct dlm_query_nodeinfo {
749 u8 qn_nodenum;
750 u8 qn_numnodes;
751 u8 qn_namelen;
752 u8 pad1;
753 u8 qn_domain[O2NM_MAX_NAME_LEN];
754 struct dlm_node_info qn_nodes[O2NM_MAX_NODES];
755};
756
757struct dlm_exit_domain
758{
759 u8 node_idx;
760 u8 pad1[3];
761};
762
763struct dlm_finalize_reco
764{
765 u8 node_idx;
766 u8 dead_node;
767 u8 flags;
768 u8 pad1;
769 __be32 pad2;
770};
771
772struct dlm_deref_lockres
773{
774 u32 pad1;
775 u16 pad2;
776 u8 node_idx;
777 u8 namelen;
778
779 u8 name[O2NM_MAX_NAME_LEN];
780};
781
782static inline enum dlm_status
783__dlm_lockres_state_to_status(struct dlm_lock_resource *res)
784{
785 enum dlm_status status = DLM_NORMAL;
786
787 assert_spin_locked(&res->spinlock);
788
789 if (res->state & DLM_LOCK_RES_RECOVERING)
790 status = DLM_RECOVERING;
791 else if (res->state & DLM_LOCK_RES_MIGRATING)
792 status = DLM_MIGRATING;
793 else if (res->state & DLM_LOCK_RES_IN_PROGRESS)
794 status = DLM_FORWARD;
795
796 return status;
797}
798
799static inline u8 dlm_get_lock_cookie_node(u64 cookie)
800{
801 u8 ret;
802 cookie >>= 56;
803 ret = (u8)(cookie & 0xffULL);
804 return ret;
805}
806
807static inline unsigned long long dlm_get_lock_cookie_seq(u64 cookie)
808{
809 unsigned long long ret;
810 ret = ((unsigned long long)cookie) & 0x00ffffffffffffffULL;
811 return ret;
812}
813
814struct dlm_lock * dlm_new_lock(int type, u8 node, u64 cookie,
815 struct dlm_lockstatus *lksb);
816void dlm_lock_get(struct dlm_lock *lock);
817void dlm_lock_put(struct dlm_lock *lock);
818
819void dlm_lock_attach_lockres(struct dlm_lock *lock,
820 struct dlm_lock_resource *res);
821
822int dlm_create_lock_handler(struct o2net_msg *msg, u32 len, void *data,
823 void **ret_data);
824int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data,
825 void **ret_data);
826int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data,
827 void **ret_data);
828
829void dlm_revert_pending_convert(struct dlm_lock_resource *res,
830 struct dlm_lock *lock);
831void dlm_revert_pending_lock(struct dlm_lock_resource *res,
832 struct dlm_lock *lock);
833
834int dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data,
835 void **ret_data);
836void dlm_commit_pending_cancel(struct dlm_lock_resource *res,
837 struct dlm_lock *lock);
838void dlm_commit_pending_unlock(struct dlm_lock_resource *res,
839 struct dlm_lock *lock);
840
841int dlm_launch_thread(struct dlm_ctxt *dlm);
842void dlm_complete_thread(struct dlm_ctxt *dlm);
843int dlm_launch_recovery_thread(struct dlm_ctxt *dlm);
844void dlm_complete_recovery_thread(struct dlm_ctxt *dlm);
845void dlm_wait_for_recovery(struct dlm_ctxt *dlm);
846void dlm_kick_recovery_thread(struct dlm_ctxt *dlm);
847int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node);
848int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout);
849int dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout);
850
851void dlm_put(struct dlm_ctxt *dlm);
852struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm);
853int dlm_domain_fully_joined(struct dlm_ctxt *dlm);
854
855void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
856 struct dlm_lock_resource *res);
857void dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
858 struct dlm_lock_resource *res);
859static inline void dlm_lockres_get(struct dlm_lock_resource *res)
860{
861
862
863 kref_get(&res->refs);
864}
865void dlm_lockres_put(struct dlm_lock_resource *res);
866void __dlm_unhash_lockres(struct dlm_lock_resource *res);
867void __dlm_insert_lockres(struct dlm_ctxt *dlm,
868 struct dlm_lock_resource *res);
869struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm,
870 const char *name,
871 unsigned int len,
872 unsigned int hash);
873struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm,
874 const char *name,
875 unsigned int len,
876 unsigned int hash);
877struct dlm_lock_resource * dlm_lookup_lockres(struct dlm_ctxt *dlm,
878 const char *name,
879 unsigned int len);
880
881int dlm_is_host_down(int errno);
882
883struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm,
884 const char *lockid,
885 int namelen,
886 int flags);
887struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm,
888 const char *name,
889 unsigned int namelen);
890
891#define dlm_lockres_set_refmap_bit(bit,res) \
892 __dlm_lockres_set_refmap_bit(bit,res,__FILE__,__LINE__)
893#define dlm_lockres_clear_refmap_bit(bit,res) \
894 __dlm_lockres_clear_refmap_bit(bit,res,__FILE__,__LINE__)
895
896static inline void __dlm_lockres_set_refmap_bit(int bit,
897 struct dlm_lock_resource *res,
898 const char *file,
899 int line)
900{
901
902
903 set_bit(bit, res->refmap);
904}
905
906static inline void __dlm_lockres_clear_refmap_bit(int bit,
907 struct dlm_lock_resource *res,
908 const char *file,
909 int line)
910{
911
912
913 clear_bit(bit, res->refmap);
914}
915
916void __dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
917 struct dlm_lock_resource *res,
918 const char *file,
919 int line);
920void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
921 struct dlm_lock_resource *res,
922 int new_lockres,
923 const char *file,
924 int line);
925#define dlm_lockres_drop_inflight_ref(d,r) \
926 __dlm_lockres_drop_inflight_ref(d,r,__FILE__,__LINE__)
927#define dlm_lockres_grab_inflight_ref(d,r) \
928 __dlm_lockres_grab_inflight_ref(d,r,0,__FILE__,__LINE__)
929#define dlm_lockres_grab_inflight_ref_new(d,r) \
930 __dlm_lockres_grab_inflight_ref(d,r,1,__FILE__,__LINE__)
931
932void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
933void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
934void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
935void __dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
936void dlm_do_local_ast(struct dlm_ctxt *dlm,
937 struct dlm_lock_resource *res,
938 struct dlm_lock *lock);
939int dlm_do_remote_ast(struct dlm_ctxt *dlm,
940 struct dlm_lock_resource *res,
941 struct dlm_lock *lock);
942void dlm_do_local_bast(struct dlm_ctxt *dlm,
943 struct dlm_lock_resource *res,
944 struct dlm_lock *lock,
945 int blocked_type);
946int dlm_send_proxy_ast_msg(struct dlm_ctxt *dlm,
947 struct dlm_lock_resource *res,
948 struct dlm_lock *lock,
949 int msg_type,
950 int blocked_type, int flags);
951static inline int dlm_send_proxy_bast(struct dlm_ctxt *dlm,
952 struct dlm_lock_resource *res,
953 struct dlm_lock *lock,
954 int blocked_type)
955{
956 return dlm_send_proxy_ast_msg(dlm, res, lock, DLM_BAST,
957 blocked_type, 0);
958}
959
960static inline int dlm_send_proxy_ast(struct dlm_ctxt *dlm,
961 struct dlm_lock_resource *res,
962 struct dlm_lock *lock,
963 int flags)
964{
965 return dlm_send_proxy_ast_msg(dlm, res, lock, DLM_AST,
966 0, flags);
967}
968
969void dlm_print_one_lock_resource(struct dlm_lock_resource *res);
970void __dlm_print_one_lock_resource(struct dlm_lock_resource *res);
971
972u8 dlm_nm_this_node(struct dlm_ctxt *dlm);
973void dlm_kick_thread(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
974void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
975
976
977int dlm_nm_init(struct dlm_ctxt *dlm);
978int dlm_heartbeat_init(struct dlm_ctxt *dlm);
979void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data);
980void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data);
981
982int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
983int dlm_finish_migration(struct dlm_ctxt *dlm,
984 struct dlm_lock_resource *res,
985 u8 old_master);
986void dlm_lockres_release_ast(struct dlm_ctxt *dlm,
987 struct dlm_lock_resource *res);
988void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res);
989
990int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data,
991 void **ret_data);
992int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data,
993 void **ret_data);
994void dlm_assert_master_post_handler(int status, void *data, void *ret_data);
995int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
996 void **ret_data);
997int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data,
998 void **ret_data);
999int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
1000 void **ret_data);
1001int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
1002 void **ret_data);
1003int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data,
1004 void **ret_data);
1005int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data,
1006 void **ret_data);
1007int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data,
1008 void **ret_data);
1009int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data,
1010 void **ret_data);
1011int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
1012 u8 nodenum, u8 *real_master);
1013
1014
1015int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
1016 struct dlm_lock_resource *res,
1017 int ignore_higher,
1018 u8 request_from,
1019 u32 flags);
1020
1021
1022int dlm_send_one_lockres(struct dlm_ctxt *dlm,
1023 struct dlm_lock_resource *res,
1024 struct dlm_migratable_lockres *mres,
1025 u8 send_to,
1026 u8 flags);
1027void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
1028 struct dlm_lock_resource *res);
1029
1030
1031void __dlm_wait_on_lockres_flags(struct dlm_lock_resource *res, int flags);
1032void __dlm_wait_on_lockres_flags_set(struct dlm_lock_resource *res, int flags);
1033
1034
1035static inline void __dlm_wait_on_lockres(struct dlm_lock_resource *res)
1036{
1037 __dlm_wait_on_lockres_flags(res, (DLM_LOCK_RES_IN_PROGRESS|
1038 DLM_LOCK_RES_RECOVERING|
1039 DLM_LOCK_RES_MIGRATING));
1040}
1041
1042void __dlm_unlink_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle);
1043void __dlm_insert_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle);
1044
1045
1046int dlm_init_master_caches(void);
1047void dlm_destroy_master_caches(void);
1048
1049int dlm_init_lock_cache(void);
1050void dlm_destroy_lock_cache(void);
1051
1052int dlm_init_mle_cache(void);
1053void dlm_destroy_mle_cache(void);
1054
1055void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up);
1056int dlm_drop_lockres_ref(struct dlm_ctxt *dlm,
1057 struct dlm_lock_resource *res);
1058void dlm_clean_master_list(struct dlm_ctxt *dlm,
1059 u8 dead_node);
1060void dlm_force_free_mles(struct dlm_ctxt *dlm);
1061int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock);
1062int __dlm_lockres_has_locks(struct dlm_lock_resource *res);
1063int __dlm_lockres_unused(struct dlm_lock_resource *res);
1064
1065static inline const char * dlm_lock_mode_name(int mode)
1066{
1067 switch (mode) {
1068 case LKM_EXMODE:
1069 return "EX";
1070 case LKM_PRMODE:
1071 return "PR";
1072 case LKM_NLMODE:
1073 return "NL";
1074 }
1075 return "UNKNOWN";
1076}
1077
1078
1079static inline int dlm_lock_compatible(int existing, int request)
1080{
1081
1082 if (request == LKM_NLMODE ||
1083 existing == LKM_NLMODE)
1084 return 1;
1085
1086
1087 if (request == LKM_EXMODE)
1088 return 0;
1089
1090
1091 if (existing == LKM_PRMODE)
1092 return 1;
1093
1094 return 0;
1095}
1096
1097static inline int dlm_lock_on_list(struct list_head *head,
1098 struct dlm_lock *lock)
1099{
1100 struct list_head *iter;
1101 struct dlm_lock *tmplock;
1102
1103 list_for_each(iter, head) {
1104 tmplock = list_entry(iter, struct dlm_lock, list);
1105 if (tmplock == lock)
1106 return 1;
1107 }
1108 return 0;
1109}
1110
1111
1112static inline enum dlm_status dlm_err_to_dlm_status(int err)
1113{
1114 enum dlm_status ret;
1115 if (err == -ENOMEM)
1116 ret = DLM_SYSERR;
1117 else if (err == -ETIMEDOUT || o2net_link_down(err, NULL))
1118 ret = DLM_NOLOCKMGR;
1119 else if (err == -EINVAL)
1120 ret = DLM_BADPARAM;
1121 else if (err == -ENAMETOOLONG)
1122 ret = DLM_IVBUFLEN;
1123 else
1124 ret = DLM_BADARGS;
1125 return ret;
1126}
1127
1128
1129static inline void dlm_node_iter_init(unsigned long *map,
1130 struct dlm_node_iter *iter)
1131{
1132 memcpy(iter->node_map, map, sizeof(iter->node_map));
1133 iter->curnode = -1;
1134}
1135
1136static inline int dlm_node_iter_next(struct dlm_node_iter *iter)
1137{
1138 int bit;
1139 bit = find_next_bit(iter->node_map, O2NM_MAX_NODES, iter->curnode+1);
1140 if (bit >= O2NM_MAX_NODES) {
1141 iter->curnode = O2NM_MAX_NODES;
1142 return -ENOENT;
1143 }
1144 iter->curnode = bit;
1145 return bit;
1146}
1147
1148static inline void dlm_set_lockres_owner(struct dlm_ctxt *dlm,
1149 struct dlm_lock_resource *res,
1150 u8 owner)
1151{
1152 assert_spin_locked(&res->spinlock);
1153
1154 res->owner = owner;
1155}
1156
1157static inline void dlm_change_lockres_owner(struct dlm_ctxt *dlm,
1158 struct dlm_lock_resource *res,
1159 u8 owner)
1160{
1161 assert_spin_locked(&res->spinlock);
1162
1163 if (owner != res->owner)
1164 dlm_set_lockres_owner(dlm, res, owner);
1165}
1166
1167#endif
1168