1
2
3
4
5
6
7
8
9
10#ifndef DLMCOMMON_H
11#define DLMCOMMON_H
12
13#include <linux/kref.h>
14
15#define DLM_HB_NODE_DOWN_PRI (0xf000000)
16#define DLM_HB_NODE_UP_PRI (0x8000000)
17
18#define DLM_LOCKID_NAME_MAX 32
19
20#define DLM_DOMAIN_NAME_MAX_LEN 255
21#define DLM_LOCK_RES_OWNER_UNKNOWN O2NM_MAX_NODES
22#define DLM_THREAD_SHUFFLE_INTERVAL 5
23#define DLM_THREAD_MS 200
24
25#define DLM_HASH_SIZE_DEFAULT (1 << 17)
26#if DLM_HASH_SIZE_DEFAULT < PAGE_SIZE
27# define DLM_HASH_PAGES 1
28#else
29# define DLM_HASH_PAGES (DLM_HASH_SIZE_DEFAULT / PAGE_SIZE)
30#endif
31#define DLM_BUCKETS_PER_PAGE (PAGE_SIZE / sizeof(struct hlist_head))
32#define DLM_HASH_BUCKETS (DLM_HASH_PAGES * DLM_BUCKETS_PER_PAGE)
33
34
35#define dlm_lockid_hash(_n, _l) full_name_hash(NULL, _n, _l)
36
37enum dlm_mle_type {
38 DLM_MLE_BLOCK = 0,
39 DLM_MLE_MASTER = 1,
40 DLM_MLE_MIGRATION = 2,
41 DLM_MLE_NUM_TYPES = 3,
42};
43
44struct dlm_master_list_entry {
45 struct hlist_node master_hash_node;
46 struct list_head hb_events;
47 struct dlm_ctxt *dlm;
48 spinlock_t spinlock;
49 wait_queue_head_t wq;
50 atomic_t woken;
51 struct kref mle_refs;
52 int inuse;
53 unsigned long maybe_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
54 unsigned long vote_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
55 unsigned long response_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
56 unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
57 u8 master;
58 u8 new_master;
59 enum dlm_mle_type type;
60 struct o2hb_callback_func mle_hb_up;
61 struct o2hb_callback_func mle_hb_down;
62 struct dlm_lock_resource *mleres;
63 unsigned char mname[DLM_LOCKID_NAME_MAX];
64 unsigned int mnamelen;
65 unsigned int mnamehash;
66};
67
68enum dlm_ast_type {
69 DLM_AST = 0,
70 DLM_BAST = 1,
71 DLM_ASTUNLOCK = 2,
72};
73
74
75#define LKM_VALID_FLAGS (LKM_VALBLK | LKM_CONVERT | LKM_UNLOCK | \
76 LKM_CANCEL | LKM_INVVALBLK | LKM_FORCE | \
77 LKM_RECOVERY | LKM_LOCAL | LKM_NOQUEUE)
78
79#define DLM_RECOVERY_LOCK_NAME "$RECOVERY"
80#define DLM_RECOVERY_LOCK_NAME_LEN 9
81
82static inline int dlm_is_recovery_lock(const char *lock_name, int name_len)
83{
84 if (name_len == DLM_RECOVERY_LOCK_NAME_LEN &&
85 memcmp(lock_name, DLM_RECOVERY_LOCK_NAME, name_len)==0)
86 return 1;
87 return 0;
88}
89
90#define DLM_RECO_STATE_ACTIVE 0x0001
91#define DLM_RECO_STATE_FINALIZE 0x0002
92
93struct dlm_recovery_ctxt
94{
95 struct list_head resources;
96 struct list_head node_data;
97 u8 new_master;
98 u8 dead_node;
99 u16 state;
100 unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
101 wait_queue_head_t event;
102};
103
104enum dlm_ctxt_state {
105 DLM_CTXT_NEW = 0,
106 DLM_CTXT_JOINED = 1,
107 DLM_CTXT_IN_SHUTDOWN = 2,
108 DLM_CTXT_LEAVING = 3,
109};
110
111struct dlm_ctxt
112{
113 struct list_head list;
114 struct hlist_head **lockres_hash;
115 struct list_head dirty_list;
116 struct list_head purge_list;
117 struct list_head pending_asts;
118 struct list_head pending_basts;
119 struct list_head tracking_list;
120 unsigned int purge_count;
121 spinlock_t spinlock;
122 spinlock_t ast_lock;
123 spinlock_t track_lock;
124 char *name;
125 u8 node_num;
126 u32 key;
127 u8 joining_node;
128 u8 migrate_done;
129 wait_queue_head_t dlm_join_events;
130 unsigned long live_nodes_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
131 unsigned long domain_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
132 unsigned long exit_domain_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
133 unsigned long recovery_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
134 struct dlm_recovery_ctxt reco;
135 spinlock_t master_lock;
136 struct hlist_head **master_hash;
137 struct list_head mle_hb_events;
138
139
140 atomic_t mle_tot_count[DLM_MLE_NUM_TYPES];
141 atomic_t mle_cur_count[DLM_MLE_NUM_TYPES];
142 atomic_t res_tot_count;
143 atomic_t res_cur_count;
144
145 struct dlm_debug_ctxt *dlm_debug_ctxt;
146 struct dentry *dlm_debugfs_subroot;
147
148
149 struct kref dlm_refs;
150 enum dlm_ctxt_state dlm_state;
151 unsigned int num_joins;
152
153 struct o2hb_callback_func dlm_hb_up;
154 struct o2hb_callback_func dlm_hb_down;
155 struct task_struct *dlm_thread_task;
156 struct task_struct *dlm_reco_thread_task;
157 struct workqueue_struct *dlm_worker;
158 wait_queue_head_t dlm_thread_wq;
159 wait_queue_head_t dlm_reco_thread_wq;
160 wait_queue_head_t ast_wq;
161 wait_queue_head_t migration_wq;
162
163 struct work_struct dispatched_work;
164 struct list_head work_list;
165 spinlock_t work_lock;
166 struct list_head dlm_domain_handlers;
167 struct list_head dlm_eviction_callbacks;
168
169
170
171 struct dlm_protocol_version fs_locking_proto;
172
173 struct dlm_protocol_version dlm_locking_proto;
174};
175
176static inline struct hlist_head *dlm_lockres_hash(struct dlm_ctxt *dlm, unsigned i)
177{
178 return dlm->lockres_hash[(i / DLM_BUCKETS_PER_PAGE) % DLM_HASH_PAGES] + (i % DLM_BUCKETS_PER_PAGE);
179}
180
181static inline struct hlist_head *dlm_master_hash(struct dlm_ctxt *dlm,
182 unsigned i)
183{
184 return dlm->master_hash[(i / DLM_BUCKETS_PER_PAGE) % DLM_HASH_PAGES] +
185 (i % DLM_BUCKETS_PER_PAGE);
186}
187
188
189
190
191
192void dlm_dispatch_work(struct work_struct *work);
193
194struct dlm_lock_resource;
195struct dlm_work_item;
196
197typedef void (dlm_workfunc_t)(struct dlm_work_item *, void *);
198
199struct dlm_request_all_locks_priv
200{
201 u8 reco_master;
202 u8 dead_node;
203};
204
205struct dlm_mig_lockres_priv
206{
207 struct dlm_lock_resource *lockres;
208 u8 real_master;
209 u8 extra_ref;
210};
211
212struct dlm_assert_master_priv
213{
214 struct dlm_lock_resource *lockres;
215 u8 request_from;
216 u32 flags;
217 unsigned ignore_higher:1;
218};
219
220struct dlm_deref_lockres_priv
221{
222 struct dlm_lock_resource *deref_res;
223 u8 deref_node;
224};
225
226struct dlm_work_item
227{
228 struct list_head list;
229 dlm_workfunc_t *func;
230 struct dlm_ctxt *dlm;
231 void *data;
232 union {
233 struct dlm_request_all_locks_priv ral;
234 struct dlm_mig_lockres_priv ml;
235 struct dlm_assert_master_priv am;
236 struct dlm_deref_lockres_priv dl;
237 } u;
238};
239
240static inline void dlm_init_work_item(struct dlm_ctxt *dlm,
241 struct dlm_work_item *i,
242 dlm_workfunc_t *f, void *data)
243{
244 memset(i, 0, sizeof(*i));
245 i->func = f;
246 INIT_LIST_HEAD(&i->list);
247 i->data = data;
248 i->dlm = dlm;
249}
250
251
252
253static inline void __dlm_set_joining_node(struct dlm_ctxt *dlm,
254 u8 node)
255{
256 assert_spin_locked(&dlm->spinlock);
257
258 dlm->joining_node = node;
259 wake_up(&dlm->dlm_join_events);
260}
261
262#define DLM_LOCK_RES_UNINITED 0x00000001
263#define DLM_LOCK_RES_RECOVERING 0x00000002
264#define DLM_LOCK_RES_READY 0x00000004
265#define DLM_LOCK_RES_DIRTY 0x00000008
266#define DLM_LOCK_RES_IN_PROGRESS 0x00000010
267#define DLM_LOCK_RES_MIGRATING 0x00000020
268#define DLM_LOCK_RES_DROPPING_REF 0x00000040
269#define DLM_LOCK_RES_BLOCK_DIRTY 0x00001000
270#define DLM_LOCK_RES_SETREF_INPROG 0x00002000
271#define DLM_LOCK_RES_RECOVERY_WAITING 0x00004000
272
273
274#define DLM_NODE_DEATH_WAIT_MAX (5 * 1000)
275
276#define DLM_PURGE_INTERVAL_MS (8 * 1000)
277
278struct dlm_lock_resource
279{
280
281
282 struct hlist_node hash_node;
283 struct qstr lockname;
284 struct kref refs;
285
286
287
288
289
290
291
292 struct list_head granted;
293 struct list_head converting;
294 struct list_head blocked;
295 struct list_head purge;
296
297
298
299
300
301 struct list_head dirty;
302 struct list_head recovering;
303
304
305 struct list_head tracking;
306
307
308
309 unsigned long last_used;
310
311 struct dlm_ctxt *dlm;
312
313 unsigned migration_pending:1;
314 atomic_t asts_reserved;
315 spinlock_t spinlock;
316 wait_queue_head_t wq;
317 u8 owner;
318 u16 state;
319 char lvb[DLM_LVB_LEN];
320 unsigned int inflight_locks;
321 unsigned int inflight_assert_workers;
322 unsigned long refmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
323};
324
325struct dlm_migratable_lock
326{
327 __be64 cookie;
328
329
330
331 __be16 pad1;
332 u8 list;
333 u8 flags;
334
335 s8 type;
336 s8 convert_type;
337 s8 highest_blocked;
338 u8 node;
339};
340
341struct dlm_lock
342{
343 struct dlm_migratable_lock ml;
344
345 struct list_head list;
346 struct list_head ast_list;
347 struct list_head bast_list;
348 struct dlm_lock_resource *lockres;
349 spinlock_t spinlock;
350 struct kref lock_refs;
351
352
353 dlm_astlockfunc_t *ast;
354 dlm_bastlockfunc_t *bast;
355 void *astdata;
356 struct dlm_lockstatus *lksb;
357 unsigned ast_pending:1,
358 bast_pending:1,
359 convert_pending:1,
360 lock_pending:1,
361 cancel_pending:1,
362 unlock_pending:1,
363 lksb_kernel_allocated:1;
364};
365
366enum dlm_lockres_list {
367 DLM_GRANTED_LIST = 0,
368 DLM_CONVERTING_LIST = 1,
369 DLM_BLOCKED_LIST = 2,
370};
371
372static inline int dlm_lvb_is_empty(char *lvb)
373{
374 int i;
375 for (i=0; i<DLM_LVB_LEN; i++)
376 if (lvb[i])
377 return 0;
378 return 1;
379}
380
381static inline char *dlm_list_in_text(enum dlm_lockres_list idx)
382{
383 if (idx == DLM_GRANTED_LIST)
384 return "granted";
385 else if (idx == DLM_CONVERTING_LIST)
386 return "converting";
387 else if (idx == DLM_BLOCKED_LIST)
388 return "blocked";
389 else
390 return "unknown";
391}
392
393static inline struct list_head *
394dlm_list_idx_to_ptr(struct dlm_lock_resource *res, enum dlm_lockres_list idx)
395{
396 struct list_head *ret = NULL;
397 if (idx == DLM_GRANTED_LIST)
398 ret = &res->granted;
399 else if (idx == DLM_CONVERTING_LIST)
400 ret = &res->converting;
401 else if (idx == DLM_BLOCKED_LIST)
402 ret = &res->blocked;
403 else
404 BUG();
405 return ret;
406}
407
408
409
410
411struct dlm_node_iter
412{
413 unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
414 int curnode;
415};
416
417
418enum {
419 DLM_MASTER_REQUEST_MSG = 500,
420 DLM_UNUSED_MSG1 = 501,
421 DLM_ASSERT_MASTER_MSG = 502,
422 DLM_CREATE_LOCK_MSG = 503,
423 DLM_CONVERT_LOCK_MSG = 504,
424 DLM_PROXY_AST_MSG = 505,
425 DLM_UNLOCK_LOCK_MSG = 506,
426 DLM_DEREF_LOCKRES_MSG = 507,
427 DLM_MIGRATE_REQUEST_MSG = 508,
428 DLM_MIG_LOCKRES_MSG = 509,
429 DLM_QUERY_JOIN_MSG = 510,
430 DLM_ASSERT_JOINED_MSG = 511,
431 DLM_CANCEL_JOIN_MSG = 512,
432 DLM_EXIT_DOMAIN_MSG = 513,
433 DLM_MASTER_REQUERY_MSG = 514,
434 DLM_LOCK_REQUEST_MSG = 515,
435 DLM_RECO_DATA_DONE_MSG = 516,
436 DLM_BEGIN_RECO_MSG = 517,
437 DLM_FINALIZE_RECO_MSG = 518,
438 DLM_QUERY_REGION = 519,
439 DLM_QUERY_NODEINFO = 520,
440 DLM_BEGIN_EXIT_DOMAIN_MSG = 521,
441 DLM_DEREF_LOCKRES_DONE = 522,
442};
443
444struct dlm_reco_node_data
445{
446 int state;
447 u8 node_num;
448 struct list_head list;
449};
450
451enum {
452 DLM_RECO_NODE_DATA_DEAD = -1,
453 DLM_RECO_NODE_DATA_INIT = 0,
454 DLM_RECO_NODE_DATA_REQUESTING = 1,
455 DLM_RECO_NODE_DATA_REQUESTED = 2,
456 DLM_RECO_NODE_DATA_RECEIVING = 3,
457 DLM_RECO_NODE_DATA_DONE = 4,
458 DLM_RECO_NODE_DATA_FINALIZE_SENT = 5,
459};
460
461
462enum {
463 DLM_MASTER_RESP_NO = 0,
464 DLM_MASTER_RESP_YES = 1,
465 DLM_MASTER_RESP_MAYBE = 2,
466 DLM_MASTER_RESP_ERROR = 3,
467};
468
469
470struct dlm_master_request
471{
472 u8 node_idx;
473 u8 namelen;
474 __be16 pad1;
475 __be32 flags;
476
477 u8 name[O2NM_MAX_NAME_LEN];
478};
479
480#define DLM_ASSERT_RESPONSE_REASSERT 0x00000001
481#define DLM_ASSERT_RESPONSE_MASTERY_REF 0x00000002
482
483#define DLM_ASSERT_MASTER_MLE_CLEANUP 0x00000001
484#define DLM_ASSERT_MASTER_REQUERY 0x00000002
485#define DLM_ASSERT_MASTER_FINISH_MIGRATION 0x00000004
486struct dlm_assert_master
487{
488 u8 node_idx;
489 u8 namelen;
490 __be16 pad1;
491 __be32 flags;
492
493 u8 name[O2NM_MAX_NAME_LEN];
494};
495
496#define DLM_MIGRATE_RESPONSE_MASTERY_REF 0x00000001
497
498struct dlm_migrate_request
499{
500 u8 master;
501 u8 new_master;
502 u8 namelen;
503 u8 pad1;
504 __be32 pad2;
505 u8 name[O2NM_MAX_NAME_LEN];
506};
507
508struct dlm_master_requery
509{
510 u8 pad1;
511 u8 pad2;
512 u8 node_idx;
513 u8 namelen;
514 __be32 pad3;
515 u8 name[O2NM_MAX_NAME_LEN];
516};
517
518#define DLM_MRES_RECOVERY 0x01
519#define DLM_MRES_MIGRATION 0x02
520#define DLM_MRES_ALL_DONE 0x04
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552#define DLM_MAX_MIGRATABLE_LOCKS 240
553
554struct dlm_migratable_lockres
555{
556 u8 master;
557 u8 lockname_len;
558 u8 num_locks;
559 u8 flags;
560 __be32 total_locks;
561 __be64 mig_cookie;
562
563
564 u8 lockname[DLM_LOCKID_NAME_MAX];
565
566 u8 lvb[DLM_LVB_LEN];
567
568 struct dlm_migratable_lock ml[0];
569};
570#define DLM_MIG_LOCKRES_MAX_LEN \
571 (sizeof(struct dlm_migratable_lockres) + \
572 (sizeof(struct dlm_migratable_lock) * \
573 DLM_MAX_MIGRATABLE_LOCKS) )
574
575
576
577#define DLM_MIG_LOCKRES_RESERVED (O2NET_MAX_PAYLOAD_BYTES - \
578 DLM_MIG_LOCKRES_MAX_LEN)
579
580struct dlm_create_lock
581{
582 __be64 cookie;
583
584 __be32 flags;
585 u8 pad1;
586 u8 node_idx;
587 s8 requested_type;
588 u8 namelen;
589
590 u8 name[O2NM_MAX_NAME_LEN];
591};
592
593struct dlm_convert_lock
594{
595 __be64 cookie;
596
597 __be32 flags;
598 u8 pad1;
599 u8 node_idx;
600 s8 requested_type;
601 u8 namelen;
602
603 u8 name[O2NM_MAX_NAME_LEN];
604
605 s8 lvb[0];
606};
607#define DLM_CONVERT_LOCK_MAX_LEN (sizeof(struct dlm_convert_lock)+DLM_LVB_LEN)
608
609struct dlm_unlock_lock
610{
611 __be64 cookie;
612
613 __be32 flags;
614 __be16 pad1;
615 u8 node_idx;
616 u8 namelen;
617
618 u8 name[O2NM_MAX_NAME_LEN];
619
620 s8 lvb[0];
621};
622#define DLM_UNLOCK_LOCK_MAX_LEN (sizeof(struct dlm_unlock_lock)+DLM_LVB_LEN)
623
624struct dlm_proxy_ast
625{
626 __be64 cookie;
627
628 __be32 flags;
629 u8 node_idx;
630 u8 type;
631 u8 blocked_type;
632 u8 namelen;
633
634 u8 name[O2NM_MAX_NAME_LEN];
635
636 s8 lvb[0];
637};
638#define DLM_PROXY_AST_MAX_LEN (sizeof(struct dlm_proxy_ast)+DLM_LVB_LEN)
639
640#define DLM_MOD_KEY (0x666c6172)
641enum dlm_query_join_response_code {
642 JOIN_DISALLOW = 0,
643 JOIN_OK = 1,
644 JOIN_OK_NO_MAP = 2,
645 JOIN_PROTOCOL_MISMATCH = 3,
646};
647
648struct dlm_query_join_packet {
649 u8 code;
650
651 u8 dlm_minor;
652
653 u8 fs_minor;
654
655 u8 reserved;
656};
657
658union dlm_query_join_response {
659 __be32 intval;
660 struct dlm_query_join_packet packet;
661};
662
663struct dlm_lock_request
664{
665 u8 node_idx;
666 u8 dead_node;
667 __be16 pad1;
668 __be32 pad2;
669};
670
671struct dlm_reco_data_done
672{
673 u8 node_idx;
674 u8 dead_node;
675 __be16 pad1;
676 __be32 pad2;
677
678
679
680
681 u8 reco_lvb[DLM_LVB_LEN];
682};
683
684struct dlm_begin_reco
685{
686 u8 node_idx;
687 u8 dead_node;
688 __be16 pad1;
689 __be32 pad2;
690};
691
692
693#define BITS_PER_BYTE 8
694#define BITS_TO_BYTES(bits) (((bits)+BITS_PER_BYTE-1)/BITS_PER_BYTE)
695
696struct dlm_query_join_request
697{
698 u8 node_idx;
699 u8 pad1[2];
700 u8 name_len;
701 struct dlm_protocol_version dlm_proto;
702 struct dlm_protocol_version fs_proto;
703 u8 domain[O2NM_MAX_NAME_LEN];
704 u8 node_map[BITS_TO_BYTES(O2NM_MAX_NODES)];
705};
706
707struct dlm_assert_joined
708{
709 u8 node_idx;
710 u8 pad1[2];
711 u8 name_len;
712 u8 domain[O2NM_MAX_NAME_LEN];
713};
714
715struct dlm_cancel_join
716{
717 u8 node_idx;
718 u8 pad1[2];
719 u8 name_len;
720 u8 domain[O2NM_MAX_NAME_LEN];
721};
722
723struct dlm_query_region {
724 u8 qr_node;
725 u8 qr_numregions;
726 u8 qr_namelen;
727 u8 pad1;
728 u8 qr_domain[O2NM_MAX_NAME_LEN];
729 u8 qr_regions[O2HB_MAX_REGION_NAME_LEN * O2NM_MAX_REGIONS];
730};
731
732struct dlm_node_info {
733 u8 ni_nodenum;
734 u8 pad1;
735 __be16 ni_ipv4_port;
736 __be32 ni_ipv4_address;
737};
738
739struct dlm_query_nodeinfo {
740 u8 qn_nodenum;
741 u8 qn_numnodes;
742 u8 qn_namelen;
743 u8 pad1;
744 u8 qn_domain[O2NM_MAX_NAME_LEN];
745 struct dlm_node_info qn_nodes[O2NM_MAX_NODES];
746};
747
748struct dlm_exit_domain
749{
750 u8 node_idx;
751 u8 pad1[3];
752};
753
754struct dlm_finalize_reco
755{
756 u8 node_idx;
757 u8 dead_node;
758 u8 flags;
759 u8 pad1;
760 __be32 pad2;
761};
762
763struct dlm_deref_lockres
764{
765 u32 pad1;
766 u16 pad2;
767 u8 node_idx;
768 u8 namelen;
769
770 u8 name[O2NM_MAX_NAME_LEN];
771};
772
773enum {
774 DLM_DEREF_RESPONSE_DONE = 0,
775 DLM_DEREF_RESPONSE_INPROG = 1,
776};
777
778struct dlm_deref_lockres_done {
779 u32 pad1;
780 u16 pad2;
781 u8 node_idx;
782 u8 namelen;
783
784 u8 name[O2NM_MAX_NAME_LEN];
785};
786
787static inline enum dlm_status
788__dlm_lockres_state_to_status(struct dlm_lock_resource *res)
789{
790 enum dlm_status status = DLM_NORMAL;
791
792 assert_spin_locked(&res->spinlock);
793
794 if (res->state & (DLM_LOCK_RES_RECOVERING|
795 DLM_LOCK_RES_RECOVERY_WAITING))
796 status = DLM_RECOVERING;
797 else if (res->state & DLM_LOCK_RES_MIGRATING)
798 status = DLM_MIGRATING;
799 else if (res->state & DLM_LOCK_RES_IN_PROGRESS)
800 status = DLM_FORWARD;
801
802 return status;
803}
804
805static inline u8 dlm_get_lock_cookie_node(u64 cookie)
806{
807 u8 ret;
808 cookie >>= 56;
809 ret = (u8)(cookie & 0xffULL);
810 return ret;
811}
812
813static inline unsigned long long dlm_get_lock_cookie_seq(u64 cookie)
814{
815 unsigned long long ret;
816 ret = ((unsigned long long)cookie) & 0x00ffffffffffffffULL;
817 return ret;
818}
819
820struct dlm_lock * dlm_new_lock(int type, u8 node, u64 cookie,
821 struct dlm_lockstatus *lksb);
822void dlm_lock_get(struct dlm_lock *lock);
823void dlm_lock_put(struct dlm_lock *lock);
824
825void dlm_lock_attach_lockres(struct dlm_lock *lock,
826 struct dlm_lock_resource *res);
827
828int dlm_create_lock_handler(struct o2net_msg *msg, u32 len, void *data,
829 void **ret_data);
830int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data,
831 void **ret_data);
832int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data,
833 void **ret_data);
834
835void dlm_revert_pending_convert(struct dlm_lock_resource *res,
836 struct dlm_lock *lock);
837void dlm_revert_pending_lock(struct dlm_lock_resource *res,
838 struct dlm_lock *lock);
839
840int dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data,
841 void **ret_data);
842void dlm_commit_pending_cancel(struct dlm_lock_resource *res,
843 struct dlm_lock *lock);
844void dlm_commit_pending_unlock(struct dlm_lock_resource *res,
845 struct dlm_lock *lock);
846
847int dlm_launch_thread(struct dlm_ctxt *dlm);
848void dlm_complete_thread(struct dlm_ctxt *dlm);
849int dlm_launch_recovery_thread(struct dlm_ctxt *dlm);
850void dlm_complete_recovery_thread(struct dlm_ctxt *dlm);
851void dlm_wait_for_recovery(struct dlm_ctxt *dlm);
852void dlm_kick_recovery_thread(struct dlm_ctxt *dlm);
853int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node);
854void dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout);
855void dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout);
856
857void dlm_put(struct dlm_ctxt *dlm);
858struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm);
859int dlm_domain_fully_joined(struct dlm_ctxt *dlm);
860
861void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
862 struct dlm_lock_resource *res);
863void dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
864 struct dlm_lock_resource *res);
865static inline void dlm_lockres_get(struct dlm_lock_resource *res)
866{
867
868
869 kref_get(&res->refs);
870}
871void dlm_lockres_put(struct dlm_lock_resource *res);
872void __dlm_unhash_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
873void __dlm_insert_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
874struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm,
875 const char *name,
876 unsigned int len,
877 unsigned int hash);
878struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm,
879 const char *name,
880 unsigned int len,
881 unsigned int hash);
882struct dlm_lock_resource * dlm_lookup_lockres(struct dlm_ctxt *dlm,
883 const char *name,
884 unsigned int len);
885
886int dlm_is_host_down(int errno);
887
888struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm,
889 const char *lockid,
890 int namelen,
891 int flags);
892struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm,
893 const char *name,
894 unsigned int namelen);
895
896void dlm_lockres_set_refmap_bit(struct dlm_ctxt *dlm,
897 struct dlm_lock_resource *res, int bit);
898void dlm_lockres_clear_refmap_bit(struct dlm_ctxt *dlm,
899 struct dlm_lock_resource *res, int bit);
900
901void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
902 struct dlm_lock_resource *res);
903void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
904 struct dlm_lock_resource *res);
905
906void __dlm_lockres_grab_inflight_worker(struct dlm_ctxt *dlm,
907 struct dlm_lock_resource *res);
908
909void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
910void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
911void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
912void __dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
913void dlm_do_local_ast(struct dlm_ctxt *dlm,
914 struct dlm_lock_resource *res,
915 struct dlm_lock *lock);
916int dlm_do_remote_ast(struct dlm_ctxt *dlm,
917 struct dlm_lock_resource *res,
918 struct dlm_lock *lock);
919void dlm_do_local_bast(struct dlm_ctxt *dlm,
920 struct dlm_lock_resource *res,
921 struct dlm_lock *lock,
922 int blocked_type);
923int dlm_send_proxy_ast_msg(struct dlm_ctxt *dlm,
924 struct dlm_lock_resource *res,
925 struct dlm_lock *lock,
926 int msg_type,
927 int blocked_type, int flags);
928static inline int dlm_send_proxy_bast(struct dlm_ctxt *dlm,
929 struct dlm_lock_resource *res,
930 struct dlm_lock *lock,
931 int blocked_type)
932{
933 return dlm_send_proxy_ast_msg(dlm, res, lock, DLM_BAST,
934 blocked_type, 0);
935}
936
937static inline int dlm_send_proxy_ast(struct dlm_ctxt *dlm,
938 struct dlm_lock_resource *res,
939 struct dlm_lock *lock,
940 int flags)
941{
942 return dlm_send_proxy_ast_msg(dlm, res, lock, DLM_AST,
943 0, flags);
944}
945
946void dlm_print_one_lock_resource(struct dlm_lock_resource *res);
947void __dlm_print_one_lock_resource(struct dlm_lock_resource *res);
948
949void dlm_kick_thread(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
950void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
951
952
953void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data);
954void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data);
955
956int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
957int dlm_finish_migration(struct dlm_ctxt *dlm,
958 struct dlm_lock_resource *res,
959 u8 old_master);
960void dlm_lockres_release_ast(struct dlm_ctxt *dlm,
961 struct dlm_lock_resource *res);
962void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res);
963
964int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data,
965 void **ret_data);
966int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data,
967 void **ret_data);
968void dlm_assert_master_post_handler(int status, void *data, void *ret_data);
969int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
970 void **ret_data);
971int dlm_deref_lockres_done_handler(struct o2net_msg *msg, u32 len, void *data,
972 void **ret_data);
973int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data,
974 void **ret_data);
975int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
976 void **ret_data);
977int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
978 void **ret_data);
979int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data,
980 void **ret_data);
981int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data,
982 void **ret_data);
983int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data,
984 void **ret_data);
985int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data,
986 void **ret_data);
987int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
988 u8 nodenum, u8 *real_master);
989
990void __dlm_do_purge_lockres(struct dlm_ctxt *dlm,
991 struct dlm_lock_resource *res);
992
993int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
994 struct dlm_lock_resource *res,
995 int ignore_higher,
996 u8 request_from,
997 u32 flags);
998
999
1000int dlm_send_one_lockres(struct dlm_ctxt *dlm,
1001 struct dlm_lock_resource *res,
1002 struct dlm_migratable_lockres *mres,
1003 u8 send_to,
1004 u8 flags);
1005void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
1006 struct dlm_lock_resource *res);
1007
1008
1009void __dlm_wait_on_lockres_flags(struct dlm_lock_resource *res, int flags);
1010
1011
1012static inline void __dlm_wait_on_lockres(struct dlm_lock_resource *res)
1013{
1014 __dlm_wait_on_lockres_flags(res, (DLM_LOCK_RES_IN_PROGRESS|
1015 DLM_LOCK_RES_RECOVERING|
1016 DLM_LOCK_RES_RECOVERY_WAITING|
1017 DLM_LOCK_RES_MIGRATING));
1018}
1019
1020void __dlm_unlink_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle);
1021void __dlm_insert_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle);
1022
1023
1024int dlm_init_master_caches(void);
1025void dlm_destroy_master_caches(void);
1026
1027int dlm_init_lock_cache(void);
1028void dlm_destroy_lock_cache(void);
1029
1030int dlm_init_mle_cache(void);
1031void dlm_destroy_mle_cache(void);
1032
1033void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up);
1034int dlm_drop_lockres_ref(struct dlm_ctxt *dlm,
1035 struct dlm_lock_resource *res);
1036void dlm_clean_master_list(struct dlm_ctxt *dlm,
1037 u8 dead_node);
1038void dlm_force_free_mles(struct dlm_ctxt *dlm);
1039int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock);
1040int __dlm_lockres_has_locks(struct dlm_lock_resource *res);
1041int __dlm_lockres_unused(struct dlm_lock_resource *res);
1042
1043static inline const char * dlm_lock_mode_name(int mode)
1044{
1045 switch (mode) {
1046 case LKM_EXMODE:
1047 return "EX";
1048 case LKM_PRMODE:
1049 return "PR";
1050 case LKM_NLMODE:
1051 return "NL";
1052 }
1053 return "UNKNOWN";
1054}
1055
1056
1057static inline int dlm_lock_compatible(int existing, int request)
1058{
1059
1060 if (request == LKM_NLMODE ||
1061 existing == LKM_NLMODE)
1062 return 1;
1063
1064
1065 if (request == LKM_EXMODE)
1066 return 0;
1067
1068
1069 if (existing == LKM_PRMODE)
1070 return 1;
1071
1072 return 0;
1073}
1074
1075static inline int dlm_lock_on_list(struct list_head *head,
1076 struct dlm_lock *lock)
1077{
1078 struct dlm_lock *tmplock;
1079
1080 list_for_each_entry(tmplock, head, list) {
1081 if (tmplock == lock)
1082 return 1;
1083 }
1084 return 0;
1085}
1086
1087
1088static inline enum dlm_status dlm_err_to_dlm_status(int err)
1089{
1090 enum dlm_status ret;
1091 if (err == -ENOMEM)
1092 ret = DLM_SYSERR;
1093 else if (err == -ETIMEDOUT || o2net_link_down(err, NULL))
1094 ret = DLM_NOLOCKMGR;
1095 else if (err == -EINVAL)
1096 ret = DLM_BADPARAM;
1097 else if (err == -ENAMETOOLONG)
1098 ret = DLM_IVBUFLEN;
1099 else
1100 ret = DLM_BADARGS;
1101 return ret;
1102}
1103
1104
1105static inline void dlm_node_iter_init(unsigned long *map,
1106 struct dlm_node_iter *iter)
1107{
1108 memcpy(iter->node_map, map, sizeof(iter->node_map));
1109 iter->curnode = -1;
1110}
1111
1112static inline int dlm_node_iter_next(struct dlm_node_iter *iter)
1113{
1114 int bit;
1115 bit = find_next_bit(iter->node_map, O2NM_MAX_NODES, iter->curnode+1);
1116 if (bit >= O2NM_MAX_NODES) {
1117 iter->curnode = O2NM_MAX_NODES;
1118 return -ENOENT;
1119 }
1120 iter->curnode = bit;
1121 return bit;
1122}
1123
1124static inline void dlm_set_lockres_owner(struct dlm_ctxt *dlm,
1125 struct dlm_lock_resource *res,
1126 u8 owner)
1127{
1128 assert_spin_locked(&res->spinlock);
1129
1130 res->owner = owner;
1131}
1132
1133static inline void dlm_change_lockres_owner(struct dlm_ctxt *dlm,
1134 struct dlm_lock_resource *res,
1135 u8 owner)
1136{
1137 assert_spin_locked(&res->spinlock);
1138
1139 if (owner != res->owner)
1140 dlm_set_lockres_owner(dlm, res, owner);
1141}
1142
1143#endif
1144