1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48#define DEBUG_SUBSYSTEM S_LDLM
49#include "../../include/linux/libcfs/libcfs.h"
50#include "../include/lustre_dlm.h"
51#include "../include/obd_support.h"
52#include "../include/obd.h"
53#include "../include/obd_class.h"
54#include "../include/lustre_lib.h"
55#include "ldlm_internal.h"
56
57
58
59
60
61
62
63__u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
64{
65 struct ldlm_resource *res = lock->l_resource;
66 struct list_head *tmp;
67 struct ldlm_lock *lck;
68 __u64 kms = 0;
69
70
71
72
73
74 ldlm_set_kms_ignore(lock);
75
76 list_for_each(tmp, &res->lr_granted) {
77 lck = list_entry(tmp, struct ldlm_lock, l_res_link);
78
79 if (ldlm_is_kms_ignore(lck))
80 continue;
81
82 if (lck->l_policy_data.l_extent.end >= old_kms)
83 return old_kms;
84
85
86
87
88 if (lck->l_policy_data.l_extent.end + 1 > kms)
89 kms = lck->l_policy_data.l_extent.end + 1;
90 }
91 LASSERTF(kms <= old_kms, "kms %llu old_kms %llu\n", kms, old_kms);
92
93 return kms;
94}
95EXPORT_SYMBOL(ldlm_extent_shift_kms);
96
97struct kmem_cache *ldlm_interval_slab;
98
99
100static void ldlm_interval_attach(struct ldlm_interval *n, struct ldlm_lock *l)
101{
102 LASSERT(!l->l_tree_node);
103 LASSERT(l->l_resource->lr_type == LDLM_EXTENT);
104
105 list_add_tail(&l->l_sl_policy, &n->li_group);
106 l->l_tree_node = n;
107}
108
109struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock)
110{
111 struct ldlm_interval *node;
112
113 LASSERT(lock->l_resource->lr_type == LDLM_EXTENT);
114 node = kmem_cache_zalloc(ldlm_interval_slab, GFP_NOFS);
115 if (!node)
116 return NULL;
117
118 INIT_LIST_HEAD(&node->li_group);
119 ldlm_interval_attach(node, lock);
120 return node;
121}
122
123void ldlm_interval_free(struct ldlm_interval *node)
124{
125 if (node) {
126 LASSERT(list_empty(&node->li_group));
127 LASSERT(!interval_is_intree(&node->li_node));
128 kmem_cache_free(ldlm_interval_slab, node);
129 }
130}
131
132struct ldlm_interval *ldlm_interval_detach(struct ldlm_lock *l)
133{
134 struct ldlm_interval *n = l->l_tree_node;
135
136 if (!n)
137 return NULL;
138
139 LASSERT(!list_empty(&n->li_group));
140 l->l_tree_node = NULL;
141 list_del_init(&l->l_sl_policy);
142
143 return list_empty(&n->li_group) ? n : NULL;
144}
145
146static inline int lock_mode_to_index(enum ldlm_mode mode)
147{
148 int index;
149
150 LASSERT(mode != 0);
151 LASSERT(is_power_of_2(mode));
152 for (index = -1; mode; index++)
153 mode >>= 1;
154 LASSERT(index < LCK_MODE_NUM);
155 return index;
156}
157
158
159void ldlm_extent_add_lock(struct ldlm_resource *res,
160 struct ldlm_lock *lock)
161{
162 struct interval_node *found, **root;
163 struct ldlm_interval *node;
164 struct ldlm_extent *extent;
165 int idx, rc;
166
167 LASSERT(lock->l_granted_mode == lock->l_req_mode);
168
169 node = lock->l_tree_node;
170 LASSERT(node);
171 LASSERT(!interval_is_intree(&node->li_node));
172
173 idx = lock_mode_to_index(lock->l_granted_mode);
174 LASSERT(lock->l_granted_mode == 1 << idx);
175 LASSERT(lock->l_granted_mode == res->lr_itree[idx].lit_mode);
176
177
178 extent = &lock->l_policy_data.l_extent;
179 rc = interval_set(&node->li_node, extent->start, extent->end);
180 LASSERT(!rc);
181
182 root = &res->lr_itree[idx].lit_root;
183 found = interval_insert(&node->li_node, root);
184 if (found) {
185 struct ldlm_interval *tmp;
186
187 tmp = ldlm_interval_detach(lock);
188 ldlm_interval_free(tmp);
189 ldlm_interval_attach(to_ldlm_interval(found), lock);
190 }
191 res->lr_itree[idx].lit_size++;
192
193
194
195
196 ldlm_resource_add_lock(res, &res->lr_granted, lock);
197
198 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_GRANT_CHECK)) {
199 struct ldlm_lock *lck;
200
201 list_for_each_entry_reverse(lck, &res->lr_granted,
202 l_res_link) {
203 if (lck == lock)
204 continue;
205 if (lockmode_compat(lck->l_granted_mode,
206 lock->l_granted_mode))
207 continue;
208 if (ldlm_extent_overlap(&lck->l_req_extent,
209 &lock->l_req_extent)) {
210 CDEBUG(D_ERROR, "granting conflicting lock %p %p\n",
211 lck, lock);
212 ldlm_resource_dump(D_ERROR, res);
213 LBUG();
214 }
215 }
216 }
217}
218
219
220void ldlm_extent_unlink_lock(struct ldlm_lock *lock)
221{
222 struct ldlm_resource *res = lock->l_resource;
223 struct ldlm_interval *node = lock->l_tree_node;
224 struct ldlm_interval_tree *tree;
225 int idx;
226
227 if (!node || !interval_is_intree(&node->li_node))
228 return;
229
230 idx = lock_mode_to_index(lock->l_granted_mode);
231 LASSERT(lock->l_granted_mode == 1 << idx);
232 tree = &res->lr_itree[idx];
233
234 LASSERT(tree->lit_root);
235
236 tree->lit_size--;
237 node = ldlm_interval_detach(lock);
238 if (node) {
239 interval_erase(&node->li_node, &tree->lit_root);
240 ldlm_interval_free(node);
241 }
242}
243
244void ldlm_extent_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
245 union ldlm_policy_data *lpolicy)
246{
247 lpolicy->l_extent.start = wpolicy->l_extent.start;
248 lpolicy->l_extent.end = wpolicy->l_extent.end;
249 lpolicy->l_extent.gid = wpolicy->l_extent.gid;
250}
251
252void ldlm_extent_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
253 union ldlm_wire_policy_data *wpolicy)
254{
255 memset(wpolicy, 0, sizeof(*wpolicy));
256 wpolicy->l_extent.start = lpolicy->l_extent.start;
257 wpolicy->l_extent.end = lpolicy->l_extent.end;
258 wpolicy->l_extent.gid = lpolicy->l_extent.gid;
259}
260