1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41#include <linux/module.h>
42
43#define DEBUG_SUBSYSTEM S_LLITE
44
45#include "../include/lustre_lite.h"
46#include "llite_internal.h"
47
48
49void vvp_write_pending(struct ccc_object *club, struct ccc_page *page)
50{
51 struct ll_inode_info *lli = ll_i2info(club->cob_inode);
52
53 spin_lock(&lli->lli_lock);
54 lli->lli_flags |= LLIF_SOM_DIRTY;
55 if (page && list_empty(&page->cpg_pending_linkage))
56 list_add(&page->cpg_pending_linkage, &club->cob_pending_list);
57 spin_unlock(&lli->lli_lock);
58}
59
60
61void vvp_write_complete(struct ccc_object *club, struct ccc_page *page)
62{
63 struct ll_inode_info *lli = ll_i2info(club->cob_inode);
64 int rc = 0;
65
66 spin_lock(&lli->lli_lock);
67 if (page && !list_empty(&page->cpg_pending_linkage)) {
68 list_del_init(&page->cpg_pending_linkage);
69 rc = 1;
70 }
71 spin_unlock(&lli->lli_lock);
72 if (rc)
73 ll_queue_done_writing(club->cob_inode, 0);
74}
75
76
77
78
79
80void ll_queue_done_writing(struct inode *inode, unsigned long flags)
81{
82 struct ll_inode_info *lli = ll_i2info(inode);
83 struct ccc_object *club = cl2ccc(ll_i2info(inode)->lli_clob);
84
85 spin_lock(&lli->lli_lock);
86 lli->lli_flags |= flags;
87
88 if ((lli->lli_flags & LLIF_DONE_WRITING) &&
89 list_empty(&club->cob_pending_list)) {
90 struct ll_close_queue *lcq = ll_i2sbi(inode)->ll_lcq;
91
92 if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
93 CWARN("ino %lu/%u(flags %u) som valid it just after recovery\n",
94 inode->i_ino, inode->i_generation,
95 lli->lli_flags);
96
97 spin_lock(&lcq->lcq_lock);
98
99 LASSERT(list_empty(&lli->lli_close_list));
100 CDEBUG(D_INODE, "adding inode %lu/%u to close list\n",
101 inode->i_ino, inode->i_generation);
102 list_add_tail(&lli->lli_close_list, &lcq->lcq_head);
103
104
105
106
107
108
109
110
111 lli->lli_flags &= ~LLIF_DONE_WRITING;
112
113 wake_up(&lcq->lcq_waitq);
114 spin_unlock(&lcq->lcq_lock);
115 }
116 spin_unlock(&lli->lli_lock);
117}
118
119
120void ll_done_writing_attr(struct inode *inode, struct md_op_data *op_data)
121{
122 struct ll_inode_info *lli = ll_i2info(inode);
123
124 op_data->op_flags |= MF_SOM_CHANGE;
125
126 if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
127 CERROR("ino %lu/%u(flags %u) som valid it just after recovery\n",
128 inode->i_ino, inode->i_generation,
129 lli->lli_flags);
130
131 if (!cl_local_size(inode)) {
132
133 op_data->op_attr.ia_valid |= ATTR_MTIME_SET | ATTR_CTIME_SET |
134 ATTR_ATIME_SET | ATTR_SIZE | ATTR_BLOCKS;
135 }
136}
137
138
139void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data,
140 struct obd_client_handle **och, unsigned long flags)
141{
142 struct ll_inode_info *lli = ll_i2info(inode);
143 struct ccc_object *club = cl2ccc(ll_i2info(inode)->lli_clob);
144
145 spin_lock(&lli->lli_lock);
146 if (!(list_empty(&club->cob_pending_list))) {
147 if (!(lli->lli_flags & LLIF_EPOCH_PENDING)) {
148 LASSERT(*och);
149 LASSERT(!lli->lli_pending_och);
150
151
152
153 lli->lli_flags |= LLIF_EPOCH_PENDING;
154 lli->lli_pending_och = *och;
155 spin_unlock(&lli->lli_lock);
156
157 inode = igrab(inode);
158 LASSERT(inode);
159 goto out;
160 }
161 if (flags & LLIF_DONE_WRITING) {
162
163
164
165
166 LASSERT(!(lli->lli_flags & LLIF_DONE_WRITING));
167 lli->lli_flags |= LLIF_DONE_WRITING;
168 spin_unlock(&lli->lli_lock);
169
170 inode = igrab(inode);
171 LASSERT(inode);
172 goto out;
173 }
174 }
175 CDEBUG(D_INODE, "Epoch %llu closed on "DFID"\n",
176 ll_i2info(inode)->lli_ioepoch, PFID(&lli->lli_fid));
177 op_data->op_flags |= MF_EPOCH_CLOSE;
178
179 if (flags & LLIF_DONE_WRITING) {
180 LASSERT(lli->lli_flags & LLIF_SOM_DIRTY);
181 LASSERT(!(lli->lli_flags & LLIF_DONE_WRITING));
182 *och = lli->lli_pending_och;
183 lli->lli_pending_och = NULL;
184 lli->lli_flags &= ~LLIF_EPOCH_PENDING;
185 } else {
186
187 if (!(lli->lli_flags & LLIF_SOM_DIRTY)) {
188 spin_unlock(&lli->lli_lock);
189 goto out;
190 }
191
192
193
194
195 if (lli->lli_flags & LLIF_EPOCH_PENDING) {
196 spin_unlock(&lli->lli_lock);
197 goto out;
198 }
199 }
200
201 LASSERT(list_empty(&club->cob_pending_list));
202 lli->lli_flags &= ~LLIF_SOM_DIRTY;
203 spin_unlock(&lli->lli_lock);
204 ll_done_writing_attr(inode, op_data);
205
206out:
207 return;
208}
209
210
211
212
213
214int ll_som_update(struct inode *inode, struct md_op_data *op_data)
215{
216 struct ll_inode_info *lli = ll_i2info(inode);
217 struct ptlrpc_request *request = NULL;
218 __u32 old_flags;
219 struct obdo *oa;
220 int rc;
221
222 LASSERT(op_data);
223 if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
224 CERROR("ino %lu/%u(flags %u) som valid it just after recovery\n",
225 inode->i_ino, inode->i_generation,
226 lli->lli_flags);
227
228 oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS);
229 if (!oa) {
230 CERROR("can't allocate memory for Size-on-MDS update.\n");
231 return -ENOMEM;
232 }
233
234 old_flags = op_data->op_flags;
235 op_data->op_flags = MF_SOM_CHANGE;
236
237
238 if (lli->lli_ioepoch == op_data->op_ioepoch) {
239 rc = ll_inode_getattr(inode, oa, op_data->op_ioepoch,
240 old_flags & MF_GETATTR_LOCK);
241 if (rc) {
242 oa->o_valid = 0;
243 if (rc != -ENOENT)
244 CERROR("inode_getattr failed (%d): unable to send a Size-on-MDS attribute update for inode %lu/%u\n",
245 rc, inode->i_ino,
246 inode->i_generation);
247 } else {
248 CDEBUG(D_INODE, "Size-on-MDS update on "DFID"\n",
249 PFID(&lli->lli_fid));
250 }
251
252 md_from_obdo(op_data, oa, oa->o_valid);
253 }
254
255 rc = md_setattr(ll_i2sbi(inode)->ll_md_exp, op_data,
256 NULL, 0, NULL, 0, &request, NULL);
257 ptlrpc_req_finished(request);
258
259 kmem_cache_free(obdo_cachep, oa);
260 return rc;
261}
262
263
264
265
266
267static void ll_prepare_done_writing(struct inode *inode,
268 struct md_op_data *op_data,
269 struct obd_client_handle **och)
270{
271 ll_ioepoch_close(inode, op_data, och, LLIF_DONE_WRITING);
272
273 if (!*och)
274 return;
275
276 ll_pack_inode2opdata(inode, op_data, &(*och)->och_fh);
277 ll_prep_md_op_data(op_data, inode, NULL, NULL,
278 0, 0, LUSTRE_OPC_ANY, NULL);
279}
280
281
282static void ll_done_writing(struct inode *inode)
283{
284 struct obd_client_handle *och = NULL;
285 struct md_op_data *op_data;
286 int rc;
287
288 LASSERT(exp_connect_som(ll_i2mdexp(inode)));
289
290 op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
291 if (!op_data)
292 return;
293
294 ll_prepare_done_writing(inode, op_data, &och);
295
296 if (!och)
297 goto out;
298
299 rc = md_done_writing(ll_i2sbi(inode)->ll_md_exp, op_data, NULL);
300 if (rc == -EAGAIN)
301
302
303
304 rc = ll_som_update(inode, op_data);
305 else if (rc)
306 CERROR("inode %lu mdc done_writing failed: rc = %d\n",
307 inode->i_ino, rc);
308out:
309 ll_finish_md_op_data(op_data);
310 if (och) {
311 md_clear_open_replay_data(ll_i2sbi(inode)->ll_md_exp, och);
312 kfree(och);
313 }
314}
315
316static struct ll_inode_info *ll_close_next_lli(struct ll_close_queue *lcq)
317{
318 struct ll_inode_info *lli = NULL;
319
320 spin_lock(&lcq->lcq_lock);
321
322 if (!list_empty(&lcq->lcq_head)) {
323 lli = list_entry(lcq->lcq_head.next, struct ll_inode_info,
324 lli_close_list);
325 list_del_init(&lli->lli_close_list);
326 } else if (atomic_read(&lcq->lcq_stop))
327 lli = ERR_PTR(-EALREADY);
328
329 spin_unlock(&lcq->lcq_lock);
330 return lli;
331}
332
333static int ll_close_thread(void *arg)
334{
335 struct ll_close_queue *lcq = arg;
336
337 complete(&lcq->lcq_comp);
338
339 while (1) {
340 struct l_wait_info lwi = { 0 };
341 struct ll_inode_info *lli;
342 struct inode *inode;
343
344 l_wait_event_exclusive(lcq->lcq_waitq,
345 (lli = ll_close_next_lli(lcq)) != NULL,
346 &lwi);
347 if (IS_ERR(lli))
348 break;
349
350 inode = ll_info2i(lli);
351 CDEBUG(D_INFO, "done_writing for inode %lu/%u\n",
352 inode->i_ino, inode->i_generation);
353 ll_done_writing(inode);
354 iput(inode);
355 }
356
357 CDEBUG(D_INFO, "ll_close exiting\n");
358 complete(&lcq->lcq_comp);
359 return 0;
360}
361
362int ll_close_thread_start(struct ll_close_queue **lcq_ret)
363{
364 struct ll_close_queue *lcq;
365 struct task_struct *task;
366
367 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CLOSE_THREAD))
368 return -EINTR;
369
370 lcq = kzalloc(sizeof(*lcq), GFP_NOFS);
371 if (!lcq)
372 return -ENOMEM;
373
374 spin_lock_init(&lcq->lcq_lock);
375 INIT_LIST_HEAD(&lcq->lcq_head);
376 init_waitqueue_head(&lcq->lcq_waitq);
377 init_completion(&lcq->lcq_comp);
378
379 task = kthread_run(ll_close_thread, lcq, "ll_close");
380 if (IS_ERR(task)) {
381 kfree(lcq);
382 return PTR_ERR(task);
383 }
384
385 wait_for_completion(&lcq->lcq_comp);
386 *lcq_ret = lcq;
387 return 0;
388}
389
390void ll_close_thread_shutdown(struct ll_close_queue *lcq)
391{
392 init_completion(&lcq->lcq_comp);
393 atomic_inc(&lcq->lcq_stop);
394 wake_up(&lcq->lcq_waitq);
395 wait_for_completion(&lcq->lcq_comp);
396 kfree(lcq);
397}
398