1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41#include <linux/module.h>
42
43#define DEBUG_SUBSYSTEM S_LLITE
44
45#include <lustre_lite.h>
46#include "llite_internal.h"
47
48
49void vvp_write_pending(struct ccc_object *club, struct ccc_page *page)
50{
51 struct ll_inode_info *lli = ll_i2info(club->cob_inode);
52
53 ENTRY;
54 spin_lock(&lli->lli_lock);
55 lli->lli_flags |= LLIF_SOM_DIRTY;
56 if (page != NULL && list_empty(&page->cpg_pending_linkage))
57 list_add(&page->cpg_pending_linkage,
58 &club->cob_pending_list);
59 spin_unlock(&lli->lli_lock);
60 EXIT;
61}
62
63
64void vvp_write_complete(struct ccc_object *club, struct ccc_page *page)
65{
66 struct ll_inode_info *lli = ll_i2info(club->cob_inode);
67 int rc = 0;
68
69 ENTRY;
70 spin_lock(&lli->lli_lock);
71 if (page != NULL && !list_empty(&page->cpg_pending_linkage)) {
72 list_del_init(&page->cpg_pending_linkage);
73 rc = 1;
74 }
75 spin_unlock(&lli->lli_lock);
76 if (rc)
77 ll_queue_done_writing(club->cob_inode, 0);
78 EXIT;
79}
80
81
82
83
84void ll_queue_done_writing(struct inode *inode, unsigned long flags)
85{
86 struct ll_inode_info *lli = ll_i2info(inode);
87 struct ccc_object *club = cl2ccc(ll_i2info(inode)->lli_clob);
88 ENTRY;
89
90 spin_lock(&lli->lli_lock);
91 lli->lli_flags |= flags;
92
93 if ((lli->lli_flags & LLIF_DONE_WRITING) &&
94 list_empty(&club->cob_pending_list)) {
95 struct ll_close_queue *lcq = ll_i2sbi(inode)->ll_lcq;
96
97 if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
98 CWARN("ino %lu/%u(flags %u) som valid it just after "
99 "recovery\n",
100 inode->i_ino, inode->i_generation,
101 lli->lli_flags);
102
103 spin_lock(&lcq->lcq_lock);
104
105 LASSERT(list_empty(&lli->lli_close_list));
106 CDEBUG(D_INODE, "adding inode %lu/%u to close list\n",
107 inode->i_ino, inode->i_generation);
108 list_add_tail(&lli->lli_close_list, &lcq->lcq_head);
109
110
111
112
113
114
115
116 lli->lli_flags &= ~LLIF_DONE_WRITING;
117
118 wake_up(&lcq->lcq_waitq);
119 spin_unlock(&lcq->lcq_lock);
120 }
121 spin_unlock(&lli->lli_lock);
122 EXIT;
123}
124
125
126void ll_done_writing_attr(struct inode *inode, struct md_op_data *op_data)
127{
128 struct ll_inode_info *lli = ll_i2info(inode);
129 ENTRY;
130
131 op_data->op_flags |= MF_SOM_CHANGE;
132
133 if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
134 CERROR("ino %lu/%u(flags %u) som valid it just after "
135 "recovery\n", inode->i_ino, inode->i_generation,
136 lli->lli_flags);
137
138 if (!cl_local_size(inode)) {
139
140 op_data->op_attr.ia_valid |= ATTR_MTIME_SET | ATTR_CTIME_SET |
141 ATTR_ATIME_SET | ATTR_SIZE | ATTR_BLOCKS;
142 }
143 EXIT;
144}
145
146
147void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data,
148 struct obd_client_handle **och, unsigned long flags)
149{
150 struct ll_inode_info *lli = ll_i2info(inode);
151 struct ccc_object *club = cl2ccc(ll_i2info(inode)->lli_clob);
152 ENTRY;
153
154 spin_lock(&lli->lli_lock);
155 if (!(list_empty(&club->cob_pending_list))) {
156 if (!(lli->lli_flags & LLIF_EPOCH_PENDING)) {
157 LASSERT(*och != NULL);
158 LASSERT(lli->lli_pending_och == NULL);
159
160
161 lli->lli_flags |= LLIF_EPOCH_PENDING;
162 lli->lli_pending_och = *och;
163 spin_unlock(&lli->lli_lock);
164
165 inode = igrab(inode);
166 LASSERT(inode);
167 GOTO(out, 0);
168 }
169 if (flags & LLIF_DONE_WRITING) {
170
171
172
173 LASSERT(!(lli->lli_flags & LLIF_DONE_WRITING));
174 lli->lli_flags |= LLIF_DONE_WRITING;
175 spin_unlock(&lli->lli_lock);
176
177 inode = igrab(inode);
178 LASSERT(inode);
179 GOTO(out, 0);
180 }
181 }
182 CDEBUG(D_INODE, "Epoch "LPU64" closed on "DFID"\n",
183 ll_i2info(inode)->lli_ioepoch, PFID(&lli->lli_fid));
184 op_data->op_flags |= MF_EPOCH_CLOSE;
185
186 if (flags & LLIF_DONE_WRITING) {
187 LASSERT(lli->lli_flags & LLIF_SOM_DIRTY);
188 LASSERT(!(lli->lli_flags & LLIF_DONE_WRITING));
189 *och = lli->lli_pending_och;
190 lli->lli_pending_och = NULL;
191 lli->lli_flags &= ~LLIF_EPOCH_PENDING;
192 } else {
193
194 if (!(lli->lli_flags & LLIF_SOM_DIRTY)) {
195 spin_unlock(&lli->lli_lock);
196 GOTO(out, 0);
197 }
198
199
200
201 if (lli->lli_flags & LLIF_EPOCH_PENDING) {
202 spin_unlock(&lli->lli_lock);
203 GOTO(out, 0);
204 }
205 }
206
207 LASSERT(list_empty(&club->cob_pending_list));
208 lli->lli_flags &= ~LLIF_SOM_DIRTY;
209 spin_unlock(&lli->lli_lock);
210 ll_done_writing_attr(inode, op_data);
211
212 EXIT;
213out:
214 return;
215}
216
217
218
219
220
221int ll_som_update(struct inode *inode, struct md_op_data *op_data)
222{
223 struct ll_inode_info *lli = ll_i2info(inode);
224 struct ptlrpc_request *request = NULL;
225 __u32 old_flags;
226 struct obdo *oa;
227 int rc;
228 ENTRY;
229
230 LASSERT(op_data != NULL);
231 if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
232 CERROR("ino %lu/%u(flags %u) som valid it just after "
233 "recovery\n", inode->i_ino, inode->i_generation,
234 lli->lli_flags);
235
236 OBDO_ALLOC(oa);
237 if (!oa) {
238 CERROR("can't allocate memory for Size-on-MDS update.\n");
239 RETURN(-ENOMEM);
240 }
241
242 old_flags = op_data->op_flags;
243 op_data->op_flags = MF_SOM_CHANGE;
244
245
246 if (lli->lli_ioepoch == op_data->op_ioepoch) {
247 rc = ll_inode_getattr(inode, oa, op_data->op_ioepoch,
248 old_flags & MF_GETATTR_LOCK);
249 if (rc) {
250 oa->o_valid = 0;
251 if (rc != -ENOENT)
252 CERROR("inode_getattr failed (%d): unable to "
253 "send a Size-on-MDS attribute update "
254 "for inode %lu/%u\n", rc, inode->i_ino,
255 inode->i_generation);
256 } else {
257 CDEBUG(D_INODE, "Size-on-MDS update on "DFID"\n",
258 PFID(&lli->lli_fid));
259 }
260
261 md_from_obdo(op_data, oa, oa->o_valid);
262 }
263
264 rc = md_setattr(ll_i2sbi(inode)->ll_md_exp, op_data,
265 NULL, 0, NULL, 0, &request, NULL);
266 ptlrpc_req_finished(request);
267
268 OBDO_FREE(oa);
269 RETURN(rc);
270}
271
272
273
274
275
276static void ll_prepare_done_writing(struct inode *inode,
277 struct md_op_data *op_data,
278 struct obd_client_handle **och)
279{
280 ll_ioepoch_close(inode, op_data, och, LLIF_DONE_WRITING);
281
282 if (*och == NULL)
283 return;
284
285 ll_pack_inode2opdata(inode, op_data, &(*och)->och_fh);
286 ll_prep_md_op_data(op_data, inode, NULL, NULL,
287 0, 0, LUSTRE_OPC_ANY, NULL);
288}
289
290
291static void ll_done_writing(struct inode *inode)
292{
293 struct obd_client_handle *och = NULL;
294 struct md_op_data *op_data;
295 int rc;
296 ENTRY;
297
298 LASSERT(exp_connect_som(ll_i2mdexp(inode)));
299
300 OBD_ALLOC_PTR(op_data);
301 if (op_data == NULL) {
302 CERROR("can't allocate op_data\n");
303 EXIT;
304 return;
305 }
306
307 ll_prepare_done_writing(inode, op_data, &och);
308
309 if (och == NULL)
310 GOTO(out, 0);
311
312 rc = md_done_writing(ll_i2sbi(inode)->ll_md_exp, op_data, NULL);
313 if (rc == -EAGAIN) {
314
315
316 rc = ll_som_update(inode, op_data);
317 } else if (rc) {
318 CERROR("inode %lu mdc done_writing failed: rc = %d\n",
319 inode->i_ino, rc);
320 }
321out:
322 ll_finish_md_op_data(op_data);
323 if (och) {
324 md_clear_open_replay_data(ll_i2sbi(inode)->ll_md_exp, och);
325 OBD_FREE_PTR(och);
326 }
327 EXIT;
328}
329
330static struct ll_inode_info *ll_close_next_lli(struct ll_close_queue *lcq)
331{
332 struct ll_inode_info *lli = NULL;
333
334 spin_lock(&lcq->lcq_lock);
335
336 if (!list_empty(&lcq->lcq_head)) {
337 lli = list_entry(lcq->lcq_head.next, struct ll_inode_info,
338 lli_close_list);
339 list_del_init(&lli->lli_close_list);
340 } else if (atomic_read(&lcq->lcq_stop))
341 lli = ERR_PTR(-EALREADY);
342
343 spin_unlock(&lcq->lcq_lock);
344 return lli;
345}
346
347static int ll_close_thread(void *arg)
348{
349 struct ll_close_queue *lcq = arg;
350 ENTRY;
351
352 complete(&lcq->lcq_comp);
353
354 while (1) {
355 struct l_wait_info lwi = { 0 };
356 struct ll_inode_info *lli;
357 struct inode *inode;
358
359 l_wait_event_exclusive(lcq->lcq_waitq,
360 (lli = ll_close_next_lli(lcq)) != NULL,
361 &lwi);
362 if (IS_ERR(lli))
363 break;
364
365 inode = ll_info2i(lli);
366 CDEBUG(D_INFO, "done_writting for inode %lu/%u\n",
367 inode->i_ino, inode->i_generation);
368 ll_done_writing(inode);
369 iput(inode);
370 }
371
372 CDEBUG(D_INFO, "ll_close exiting\n");
373 complete(&lcq->lcq_comp);
374 RETURN(0);
375}
376
377int ll_close_thread_start(struct ll_close_queue **lcq_ret)
378{
379 struct ll_close_queue *lcq;
380 task_t *task;
381
382 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CLOSE_THREAD))
383 return -EINTR;
384
385 OBD_ALLOC(lcq, sizeof(*lcq));
386 if (lcq == NULL)
387 return -ENOMEM;
388
389 spin_lock_init(&lcq->lcq_lock);
390 INIT_LIST_HEAD(&lcq->lcq_head);
391 init_waitqueue_head(&lcq->lcq_waitq);
392 init_completion(&lcq->lcq_comp);
393
394 task = kthread_run(ll_close_thread, lcq, "ll_close");
395 if (IS_ERR(task)) {
396 OBD_FREE(lcq, sizeof(*lcq));
397 return PTR_ERR(task);
398 }
399
400 wait_for_completion(&lcq->lcq_comp);
401 *lcq_ret = lcq;
402 return 0;
403}
404
405void ll_close_thread_shutdown(struct ll_close_queue *lcq)
406{
407 init_completion(&lcq->lcq_comp);
408 atomic_inc(&lcq->lcq_stop);
409 wake_up(&lcq->lcq_waitq);
410 wait_for_completion(&lcq->lcq_comp);
411 OBD_FREE(lcq, sizeof(*lcq));
412}
413