1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#include <linux/nfs_fs.h>
31#include <linux/nfs_page.h>
32#include <linux/module.h>
33#include <linux/sort.h>
34#include "internal.h"
35#include "pnfs.h"
36#include "iostat.h"
37#include "nfs4trace.h"
38#include "delegation.h"
39#include "nfs42.h"
40
41#define NFSDBG_FACILITY NFSDBG_PNFS
42#define PNFS_LAYOUTGET_RETRY_TIMEOUT (120*HZ)
43
44
45
46
47
48
49static DEFINE_SPINLOCK(pnfs_spinlock);
50
51
52
53
54static LIST_HEAD(pnfs_modules_tbl);
55
56static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr *lo);
57static void pnfs_free_returned_lsegs(struct pnfs_layout_hdr *lo,
58 struct list_head *free_me,
59 const struct pnfs_layout_range *range,
60 u32 seq);
61static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment *lseg,
62 struct list_head *tmp_list);
63
64
65static struct pnfs_layoutdriver_type *
66find_pnfs_driver_locked(u32 id)
67{
68 struct pnfs_layoutdriver_type *local;
69
70 list_for_each_entry(local, &pnfs_modules_tbl, pnfs_tblid)
71 if (local->id == id)
72 goto out;
73 local = NULL;
74out:
75 dprintk("%s: Searching for id %u, found %p\n", __func__, id, local);
76 return local;
77}
78
79static struct pnfs_layoutdriver_type *
80find_pnfs_driver(u32 id)
81{
82 struct pnfs_layoutdriver_type *local;
83
84 spin_lock(&pnfs_spinlock);
85 local = find_pnfs_driver_locked(id);
86 if (local != NULL && !try_module_get(local->owner)) {
87 dprintk("%s: Could not grab reference on module\n", __func__);
88 local = NULL;
89 }
90 spin_unlock(&pnfs_spinlock);
91 return local;
92}
93
94void
95unset_pnfs_layoutdriver(struct nfs_server *nfss)
96{
97 if (nfss->pnfs_curr_ld) {
98 if (nfss->pnfs_curr_ld->clear_layoutdriver)
99 nfss->pnfs_curr_ld->clear_layoutdriver(nfss);
100
101 if (atomic_dec_and_test(&nfss->nfs_client->cl_mds_count))
102 nfs4_deviceid_purge_client(nfss->nfs_client);
103 module_put(nfss->pnfs_curr_ld->owner);
104 }
105 nfss->pnfs_curr_ld = NULL;
106}
107
108
109
110
111
112
113
114
115static const u32 ld_prefs[] = {
116 LAYOUT_SCSI,
117 LAYOUT_BLOCK_VOLUME,
118 LAYOUT_OSD2_OBJECTS,
119 LAYOUT_FLEX_FILES,
120 LAYOUT_NFSV4_1_FILES,
121 0
122};
123
124static int
125ld_cmp(const void *e1, const void *e2)
126{
127 u32 ld1 = *((u32 *)e1);
128 u32 ld2 = *((u32 *)e2);
129 int i;
130
131 for (i = 0; ld_prefs[i] != 0; i++) {
132 if (ld1 == ld_prefs[i])
133 return -1;
134
135 if (ld2 == ld_prefs[i])
136 return 1;
137 }
138 return 0;
139}
140
141
142
143
144
145
146
147void
148set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh,
149 struct nfs_fsinfo *fsinfo)
150{
151 struct pnfs_layoutdriver_type *ld_type = NULL;
152 u32 id;
153 int i;
154
155 if (fsinfo->nlayouttypes == 0)
156 goto out_no_driver;
157 if (!(server->nfs_client->cl_exchange_flags &
158 (EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS))) {
159 printk(KERN_ERR "NFS: %s: cl_exchange_flags 0x%x\n",
160 __func__, server->nfs_client->cl_exchange_flags);
161 goto out_no_driver;
162 }
163
164 sort(fsinfo->layouttype, fsinfo->nlayouttypes,
165 sizeof(*fsinfo->layouttype), ld_cmp, NULL);
166
167 for (i = 0; i < fsinfo->nlayouttypes; i++) {
168 id = fsinfo->layouttype[i];
169 ld_type = find_pnfs_driver(id);
170 if (!ld_type) {
171 request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX,
172 id);
173 ld_type = find_pnfs_driver(id);
174 }
175 if (ld_type)
176 break;
177 }
178
179 if (!ld_type) {
180 dprintk("%s: No pNFS module found!\n", __func__);
181 goto out_no_driver;
182 }
183
184 server->pnfs_curr_ld = ld_type;
185 if (ld_type->set_layoutdriver
186 && ld_type->set_layoutdriver(server, mntfh)) {
187 printk(KERN_ERR "NFS: %s: Error initializing pNFS layout "
188 "driver %u.\n", __func__, id);
189 module_put(ld_type->owner);
190 goto out_no_driver;
191 }
192
193 atomic_inc(&server->nfs_client->cl_mds_count);
194
195 dprintk("%s: pNFS module for %u set\n", __func__, id);
196 return;
197
198out_no_driver:
199 dprintk("%s: Using NFSv4 I/O\n", __func__);
200 server->pnfs_curr_ld = NULL;
201}
202
203int
204pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
205{
206 int status = -EINVAL;
207 struct pnfs_layoutdriver_type *tmp;
208
209 if (ld_type->id == 0) {
210 printk(KERN_ERR "NFS: %s id 0 is reserved\n", __func__);
211 return status;
212 }
213 if (!ld_type->alloc_lseg || !ld_type->free_lseg) {
214 printk(KERN_ERR "NFS: %s Layout driver must provide "
215 "alloc_lseg and free_lseg.\n", __func__);
216 return status;
217 }
218
219 spin_lock(&pnfs_spinlock);
220 tmp = find_pnfs_driver_locked(ld_type->id);
221 if (!tmp) {
222 list_add(&ld_type->pnfs_tblid, &pnfs_modules_tbl);
223 status = 0;
224 dprintk("%s Registering id:%u name:%s\n", __func__, ld_type->id,
225 ld_type->name);
226 } else {
227 printk(KERN_ERR "NFS: %s Module with id %d already loaded!\n",
228 __func__, ld_type->id);
229 }
230 spin_unlock(&pnfs_spinlock);
231
232 return status;
233}
234EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver);
235
236void
237pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
238{
239 dprintk("%s Deregistering id:%u\n", __func__, ld_type->id);
240 spin_lock(&pnfs_spinlock);
241 list_del(&ld_type->pnfs_tblid);
242 spin_unlock(&pnfs_spinlock);
243}
244EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver);
245
246
247
248
249
250
251void
252pnfs_get_layout_hdr(struct pnfs_layout_hdr *lo)
253{
254 atomic_inc(&lo->plh_refcount);
255}
256
257static struct pnfs_layout_hdr *
258pnfs_alloc_layout_hdr(struct inode *ino, gfp_t gfp_flags)
259{
260 struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
261 return ld->alloc_layout_hdr(ino, gfp_flags);
262}
263
264static void
265pnfs_free_layout_hdr(struct pnfs_layout_hdr *lo)
266{
267 struct nfs_server *server = NFS_SERVER(lo->plh_inode);
268 struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
269
270 if (!list_empty(&lo->plh_layouts)) {
271 struct nfs_client *clp = server->nfs_client;
272
273 spin_lock(&clp->cl_lock);
274 list_del_init(&lo->plh_layouts);
275 spin_unlock(&clp->cl_lock);
276 }
277 put_rpccred(lo->plh_lc_cred);
278 return ld->free_layout_hdr(lo);
279}
280
281static void
282pnfs_detach_layout_hdr(struct pnfs_layout_hdr *lo)
283{
284 struct nfs_inode *nfsi = NFS_I(lo->plh_inode);
285 dprintk("%s: freeing layout cache %p\n", __func__, lo);
286 nfsi->layout = NULL;
287
288 nfsi->write_io = 0;
289 nfsi->read_io = 0;
290}
291
292void
293pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
294{
295 struct inode *inode;
296
297 if (!lo)
298 return;
299 inode = lo->plh_inode;
300 pnfs_layoutreturn_before_put_layout_hdr(lo);
301
302 if (atomic_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
303 if (!list_empty(&lo->plh_segs))
304 WARN_ONCE(1, "NFS: BUG unfreed layout segments.\n");
305 pnfs_detach_layout_hdr(lo);
306 spin_unlock(&inode->i_lock);
307 pnfs_free_layout_hdr(lo);
308 }
309}
310
311static void
312pnfs_set_plh_return_info(struct pnfs_layout_hdr *lo, enum pnfs_iomode iomode,
313 u32 seq)
314{
315 if (lo->plh_return_iomode != 0 && lo->plh_return_iomode != iomode)
316 iomode = IOMODE_ANY;
317 lo->plh_return_iomode = iomode;
318 set_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
319 if (seq != 0) {
320 WARN_ON_ONCE(lo->plh_return_seq != 0 && lo->plh_return_seq != seq);
321 lo->plh_return_seq = seq;
322 }
323}
324
325static void
326pnfs_clear_layoutreturn_info(struct pnfs_layout_hdr *lo)
327{
328 struct pnfs_layout_segment *lseg;
329 lo->plh_return_iomode = 0;
330 lo->plh_return_seq = 0;
331 clear_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
332 list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
333 if (!test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
334 continue;
335 pnfs_set_plh_return_info(lo, lseg->pls_range.iomode, 0);
336 }
337}
338
339static void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr *lo)
340{
341 clear_bit_unlock(NFS_LAYOUT_RETURN, &lo->plh_flags);
342 clear_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags);
343 smp_mb__after_atomic();
344 wake_up_bit(&lo->plh_flags, NFS_LAYOUT_RETURN);
345 rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq);
346}
347
348static void
349pnfs_clear_lseg_state(struct pnfs_layout_segment *lseg,
350 struct list_head *free_me)
351{
352 clear_bit(NFS_LSEG_ROC, &lseg->pls_flags);
353 clear_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags);
354 if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags))
355 pnfs_lseg_dec_and_remove_zero(lseg, free_me);
356 if (test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
357 pnfs_lseg_dec_and_remove_zero(lseg, free_me);
358}
359
360
361
362
363bool nfs4_refresh_layout_stateid(nfs4_stateid *dst, struct inode *inode)
364{
365 struct pnfs_layout_hdr *lo;
366 bool ret = false;
367
368 spin_lock(&inode->i_lock);
369 lo = NFS_I(inode)->layout;
370 if (lo && nfs4_stateid_match_other(dst, &lo->plh_stateid)) {
371 dst->seqid = lo->plh_stateid.seqid;
372 ret = true;
373 }
374 spin_unlock(&inode->i_lock);
375 return ret;
376}
377
378
379
380
381
382
383
384
385int
386pnfs_mark_layout_stateid_invalid(struct pnfs_layout_hdr *lo,
387 struct list_head *lseg_list)
388{
389 struct pnfs_layout_range range = {
390 .iomode = IOMODE_ANY,
391 .offset = 0,
392 .length = NFS4_MAX_UINT64,
393 };
394 struct pnfs_layout_segment *lseg, *next;
395
396 set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
397 list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
398 pnfs_clear_lseg_state(lseg, lseg_list);
399 pnfs_clear_layoutreturn_info(lo);
400 pnfs_free_returned_lsegs(lo, lseg_list, &range, 0);
401 if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags) &&
402 !test_and_set_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags))
403 pnfs_clear_layoutreturn_waitbit(lo);
404 return !list_empty(&lo->plh_segs);
405}
406
407static int
408pnfs_iomode_to_fail_bit(u32 iomode)
409{
410 return iomode == IOMODE_RW ?
411 NFS_LAYOUT_RW_FAILED : NFS_LAYOUT_RO_FAILED;
412}
413
414static void
415pnfs_layout_set_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
416{
417 lo->plh_retry_timestamp = jiffies;
418 if (!test_and_set_bit(fail_bit, &lo->plh_flags))
419 atomic_inc(&lo->plh_refcount);
420}
421
422static void
423pnfs_layout_clear_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
424{
425 if (test_and_clear_bit(fail_bit, &lo->plh_flags))
426 atomic_dec(&lo->plh_refcount);
427}
428
429static void
430pnfs_layout_io_set_failed(struct pnfs_layout_hdr *lo, u32 iomode)
431{
432 struct inode *inode = lo->plh_inode;
433 struct pnfs_layout_range range = {
434 .iomode = iomode,
435 .offset = 0,
436 .length = NFS4_MAX_UINT64,
437 };
438 LIST_HEAD(head);
439
440 spin_lock(&inode->i_lock);
441 pnfs_layout_set_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
442 pnfs_mark_matching_lsegs_invalid(lo, &head, &range, 0);
443 spin_unlock(&inode->i_lock);
444 pnfs_free_lseg_list(&head);
445 dprintk("%s Setting layout IOMODE_%s fail bit\n", __func__,
446 iomode == IOMODE_RW ? "RW" : "READ");
447}
448
449static bool
450pnfs_layout_io_test_failed(struct pnfs_layout_hdr *lo, u32 iomode)
451{
452 unsigned long start, end;
453 int fail_bit = pnfs_iomode_to_fail_bit(iomode);
454
455 if (test_bit(fail_bit, &lo->plh_flags) == 0)
456 return false;
457 end = jiffies;
458 start = end - PNFS_LAYOUTGET_RETRY_TIMEOUT;
459 if (!time_in_range(lo->plh_retry_timestamp, start, end)) {
460
461 pnfs_layout_clear_fail_bit(lo, fail_bit);
462 return false;
463 }
464 return true;
465}
466
467static void
468pnfs_init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg,
469 const struct pnfs_layout_range *range,
470 const nfs4_stateid *stateid)
471{
472 INIT_LIST_HEAD(&lseg->pls_list);
473 INIT_LIST_HEAD(&lseg->pls_lc_list);
474 atomic_set(&lseg->pls_refcount, 1);
475 set_bit(NFS_LSEG_VALID, &lseg->pls_flags);
476 lseg->pls_layout = lo;
477 lseg->pls_range = *range;
478 lseg->pls_seq = be32_to_cpu(stateid->seqid);
479}
480
481static void pnfs_free_lseg(struct pnfs_layout_segment *lseg)
482{
483 if (lseg != NULL) {
484 struct inode *inode = lseg->pls_layout->plh_inode;
485 NFS_SERVER(inode)->pnfs_curr_ld->free_lseg(lseg);
486 }
487}
488
489static void
490pnfs_layout_remove_lseg(struct pnfs_layout_hdr *lo,
491 struct pnfs_layout_segment *lseg)
492{
493 WARN_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
494 list_del_init(&lseg->pls_list);
495
496 atomic_dec(&lo->plh_refcount);
497 if (test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
498 return;
499 if (list_empty(&lo->plh_segs) &&
500 !test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags) &&
501 !test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
502 if (atomic_read(&lo->plh_outstanding) == 0)
503 set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
504 clear_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
505 }
506}
507
508static bool
509pnfs_cache_lseg_for_layoutreturn(struct pnfs_layout_hdr *lo,
510 struct pnfs_layout_segment *lseg)
511{
512 if (test_and_clear_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags) &&
513 pnfs_layout_is_valid(lo)) {
514 pnfs_set_plh_return_info(lo, lseg->pls_range.iomode, 0);
515 list_move_tail(&lseg->pls_list, &lo->plh_return_segs);
516 return true;
517 }
518 return false;
519}
520
521void
522pnfs_put_lseg(struct pnfs_layout_segment *lseg)
523{
524 struct pnfs_layout_hdr *lo;
525 struct inode *inode;
526
527 if (!lseg)
528 return;
529
530 dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
531 atomic_read(&lseg->pls_refcount),
532 test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
533
534 lo = lseg->pls_layout;
535 inode = lo->plh_inode;
536
537 if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
538 if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
539 spin_unlock(&inode->i_lock);
540 return;
541 }
542 pnfs_get_layout_hdr(lo);
543 pnfs_layout_remove_lseg(lo, lseg);
544 if (pnfs_cache_lseg_for_layoutreturn(lo, lseg))
545 lseg = NULL;
546 spin_unlock(&inode->i_lock);
547 pnfs_free_lseg(lseg);
548 pnfs_put_layout_hdr(lo);
549 }
550}
551EXPORT_SYMBOL_GPL(pnfs_put_lseg);
552
553static void pnfs_free_lseg_async_work(struct work_struct *work)
554{
555 struct pnfs_layout_segment *lseg;
556 struct pnfs_layout_hdr *lo;
557
558 lseg = container_of(work, struct pnfs_layout_segment, pls_work);
559 lo = lseg->pls_layout;
560
561 pnfs_free_lseg(lseg);
562 pnfs_put_layout_hdr(lo);
563}
564
565static void pnfs_free_lseg_async(struct pnfs_layout_segment *lseg)
566{
567 INIT_WORK(&lseg->pls_work, pnfs_free_lseg_async_work);
568 schedule_work(&lseg->pls_work);
569}
570
571void
572pnfs_put_lseg_locked(struct pnfs_layout_segment *lseg)
573{
574 if (!lseg)
575 return;
576
577 assert_spin_locked(&lseg->pls_layout->plh_inode->i_lock);
578
579 dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
580 atomic_read(&lseg->pls_refcount),
581 test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
582 if (atomic_dec_and_test(&lseg->pls_refcount)) {
583 struct pnfs_layout_hdr *lo = lseg->pls_layout;
584 if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags))
585 return;
586 pnfs_layout_remove_lseg(lo, lseg);
587 if (!pnfs_cache_lseg_for_layoutreturn(lo, lseg)) {
588 pnfs_get_layout_hdr(lo);
589 pnfs_free_lseg_async(lseg);
590 }
591 }
592}
593EXPORT_SYMBOL_GPL(pnfs_put_lseg_locked);
594
595
596
597
598
599
600
601
602static bool
603pnfs_lseg_range_contained(const struct pnfs_layout_range *l1,
604 const struct pnfs_layout_range *l2)
605{
606 u64 start1 = l1->offset;
607 u64 end1 = pnfs_end_offset(start1, l1->length);
608 u64 start2 = l2->offset;
609 u64 end2 = pnfs_end_offset(start2, l2->length);
610
611 return (start1 <= start2) && (end1 >= end2);
612}
613
614static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment *lseg,
615 struct list_head *tmp_list)
616{
617 if (!atomic_dec_and_test(&lseg->pls_refcount))
618 return false;
619 pnfs_layout_remove_lseg(lseg->pls_layout, lseg);
620 list_add(&lseg->pls_list, tmp_list);
621 return true;
622}
623
624
625static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
626 struct list_head *tmp_list)
627{
628 int rv = 0;
629
630 if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
631
632
633
634
635 dprintk("%s: lseg %p ref %d\n", __func__, lseg,
636 atomic_read(&lseg->pls_refcount));
637 if (pnfs_lseg_dec_and_remove_zero(lseg, tmp_list))
638 rv = 1;
639 }
640 return rv;
641}
642
643
644
645
646
647static bool pnfs_seqid_is_newer(u32 s1, u32 s2)
648{
649 return (s32)(s1 - s2) > 0;
650}
651
652static bool
653pnfs_should_free_range(const struct pnfs_layout_range *lseg_range,
654 const struct pnfs_layout_range *recall_range)
655{
656 return (recall_range->iomode == IOMODE_ANY ||
657 lseg_range->iomode == recall_range->iomode) &&
658 pnfs_lseg_range_intersecting(lseg_range, recall_range);
659}
660
661static bool
662pnfs_match_lseg_recall(const struct pnfs_layout_segment *lseg,
663 const struct pnfs_layout_range *recall_range,
664 u32 seq)
665{
666 if (seq != 0 && pnfs_seqid_is_newer(lseg->pls_seq, seq))
667 return false;
668 if (recall_range == NULL)
669 return true;
670 return pnfs_should_free_range(&lseg->pls_range, recall_range);
671}
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688int
689pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
690 struct list_head *tmp_list,
691 const struct pnfs_layout_range *recall_range,
692 u32 seq)
693{
694 struct pnfs_layout_segment *lseg, *next;
695 int remaining = 0;
696
697 dprintk("%s:Begin lo %p\n", __func__, lo);
698
699 if (list_empty(&lo->plh_segs))
700 return 0;
701 list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
702 if (pnfs_match_lseg_recall(lseg, recall_range, seq)) {
703 dprintk("%s: freeing lseg %p iomode %d seq %u "
704 "offset %llu length %llu\n", __func__,
705 lseg, lseg->pls_range.iomode, lseg->pls_seq,
706 lseg->pls_range.offset, lseg->pls_range.length);
707 if (!mark_lseg_invalid(lseg, tmp_list))
708 remaining++;
709 }
710 dprintk("%s:Return %i\n", __func__, remaining);
711 return remaining;
712}
713
714static void
715pnfs_free_returned_lsegs(struct pnfs_layout_hdr *lo,
716 struct list_head *free_me,
717 const struct pnfs_layout_range *range,
718 u32 seq)
719{
720 struct pnfs_layout_segment *lseg, *next;
721
722 list_for_each_entry_safe(lseg, next, &lo->plh_return_segs, pls_list) {
723 if (pnfs_match_lseg_recall(lseg, range, seq))
724 list_move_tail(&lseg->pls_list, free_me);
725 }
726}
727
728
729void
730pnfs_free_lseg_list(struct list_head *free_me)
731{
732 struct pnfs_layout_segment *lseg, *tmp;
733
734 if (list_empty(free_me))
735 return;
736
737 list_for_each_entry_safe(lseg, tmp, free_me, pls_list) {
738 list_del(&lseg->pls_list);
739 pnfs_free_lseg(lseg);
740 }
741}
742
743void
744pnfs_destroy_layout(struct nfs_inode *nfsi)
745{
746 struct pnfs_layout_hdr *lo;
747 LIST_HEAD(tmp_list);
748
749 spin_lock(&nfsi->vfs_inode.i_lock);
750 lo = nfsi->layout;
751 if (lo) {
752 pnfs_get_layout_hdr(lo);
753 pnfs_mark_layout_stateid_invalid(lo, &tmp_list);
754 pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RO_FAILED);
755 pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RW_FAILED);
756 spin_unlock(&nfsi->vfs_inode.i_lock);
757 pnfs_free_lseg_list(&tmp_list);
758 nfs_commit_inode(&nfsi->vfs_inode, 0);
759 pnfs_put_layout_hdr(lo);
760 } else
761 spin_unlock(&nfsi->vfs_inode.i_lock);
762}
763EXPORT_SYMBOL_GPL(pnfs_destroy_layout);
764
765static bool
766pnfs_layout_add_bulk_destroy_list(struct inode *inode,
767 struct list_head *layout_list)
768{
769 struct pnfs_layout_hdr *lo;
770 bool ret = false;
771
772 spin_lock(&inode->i_lock);
773 lo = NFS_I(inode)->layout;
774 if (lo != NULL && list_empty(&lo->plh_bulk_destroy)) {
775 pnfs_get_layout_hdr(lo);
776 list_add(&lo->plh_bulk_destroy, layout_list);
777 ret = true;
778 }
779 spin_unlock(&inode->i_lock);
780 return ret;
781}
782
783
784static int
785pnfs_layout_bulk_destroy_byserver_locked(struct nfs_client *clp,
786 struct nfs_server *server,
787 struct list_head *layout_list)
788 __must_hold(&clp->cl_lock)
789 __must_hold(RCU)
790{
791 struct pnfs_layout_hdr *lo, *next;
792 struct inode *inode;
793
794 list_for_each_entry_safe(lo, next, &server->layouts, plh_layouts) {
795 if (test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) ||
796 test_bit(NFS_LAYOUT_INODE_FREEING, &lo->plh_flags) ||
797 !list_empty(&lo->plh_bulk_destroy))
798 continue;
799
800 if (!nfs_sb_active(server->super))
801 break;
802 inode = igrab(lo->plh_inode);
803 if (inode != NULL) {
804 list_del_init(&lo->plh_layouts);
805 if (pnfs_layout_add_bulk_destroy_list(inode,
806 layout_list))
807 continue;
808 rcu_read_unlock();
809 spin_unlock(&clp->cl_lock);
810 iput(inode);
811 } else {
812 rcu_read_unlock();
813 spin_unlock(&clp->cl_lock);
814 set_bit(NFS_LAYOUT_INODE_FREEING, &lo->plh_flags);
815 }
816 nfs_sb_deactive(server->super);
817 spin_lock(&clp->cl_lock);
818 rcu_read_lock();
819 return -EAGAIN;
820 }
821 return 0;
822}
823
824static int
825pnfs_layout_free_bulk_destroy_list(struct list_head *layout_list,
826 bool is_bulk_recall)
827{
828 struct pnfs_layout_hdr *lo;
829 struct inode *inode;
830 LIST_HEAD(lseg_list);
831 int ret = 0;
832
833 while (!list_empty(layout_list)) {
834 lo = list_entry(layout_list->next, struct pnfs_layout_hdr,
835 plh_bulk_destroy);
836 dprintk("%s freeing layout for inode %lu\n", __func__,
837 lo->plh_inode->i_ino);
838 inode = lo->plh_inode;
839
840 pnfs_layoutcommit_inode(inode, false);
841
842 spin_lock(&inode->i_lock);
843 list_del_init(&lo->plh_bulk_destroy);
844 if (pnfs_mark_layout_stateid_invalid(lo, &lseg_list)) {
845 if (is_bulk_recall)
846 set_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
847 ret = -EAGAIN;
848 }
849 spin_unlock(&inode->i_lock);
850 pnfs_free_lseg_list(&lseg_list);
851
852 nfs_commit_inode(inode, 0);
853 pnfs_put_layout_hdr(lo);
854 nfs_iput_and_deactive(inode);
855 }
856 return ret;
857}
858
859int
860pnfs_destroy_layouts_byfsid(struct nfs_client *clp,
861 struct nfs_fsid *fsid,
862 bool is_recall)
863{
864 struct nfs_server *server;
865 LIST_HEAD(layout_list);
866
867 spin_lock(&clp->cl_lock);
868 rcu_read_lock();
869restart:
870 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
871 if (memcmp(&server->fsid, fsid, sizeof(*fsid)) != 0)
872 continue;
873 if (pnfs_layout_bulk_destroy_byserver_locked(clp,
874 server,
875 &layout_list) != 0)
876 goto restart;
877 }
878 rcu_read_unlock();
879 spin_unlock(&clp->cl_lock);
880
881 if (list_empty(&layout_list))
882 return 0;
883 return pnfs_layout_free_bulk_destroy_list(&layout_list, is_recall);
884}
885
886int
887pnfs_destroy_layouts_byclid(struct nfs_client *clp,
888 bool is_recall)
889{
890 struct nfs_server *server;
891 LIST_HEAD(layout_list);
892
893 spin_lock(&clp->cl_lock);
894 rcu_read_lock();
895restart:
896 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
897 if (pnfs_layout_bulk_destroy_byserver_locked(clp,
898 server,
899 &layout_list) != 0)
900 goto restart;
901 }
902 rcu_read_unlock();
903 spin_unlock(&clp->cl_lock);
904
905 if (list_empty(&layout_list))
906 return 0;
907 return pnfs_layout_free_bulk_destroy_list(&layout_list, is_recall);
908}
909
910
911
912
913
914void
915pnfs_destroy_all_layouts(struct nfs_client *clp)
916{
917 nfs4_deviceid_mark_client_invalid(clp);
918 nfs4_deviceid_purge_client(clp);
919
920 pnfs_destroy_layouts_byclid(clp, false);
921}
922
923
924void
925pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
926 bool update_barrier)
927{
928 u32 oldseq, newseq, new_barrier = 0;
929
930 oldseq = be32_to_cpu(lo->plh_stateid.seqid);
931 newseq = be32_to_cpu(new->seqid);
932
933 if (!pnfs_layout_is_valid(lo)) {
934 nfs4_stateid_copy(&lo->plh_stateid, new);
935 lo->plh_barrier = newseq;
936 pnfs_clear_layoutreturn_info(lo);
937 clear_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
938 return;
939 }
940 if (pnfs_seqid_is_newer(newseq, oldseq)) {
941 nfs4_stateid_copy(&lo->plh_stateid, new);
942
943
944
945
946 new_barrier = newseq - atomic_read(&lo->plh_outstanding);
947 }
948 if (update_barrier)
949 new_barrier = be32_to_cpu(new->seqid);
950 else if (new_barrier == 0)
951 return;
952 if (pnfs_seqid_is_newer(new_barrier, lo->plh_barrier))
953 lo->plh_barrier = new_barrier;
954}
955
956static bool
957pnfs_layout_stateid_blocked(const struct pnfs_layout_hdr *lo,
958 const nfs4_stateid *stateid)
959{
960 u32 seqid = be32_to_cpu(stateid->seqid);
961
962 return !pnfs_seqid_is_newer(seqid, lo->plh_barrier);
963}
964
965
966static bool
967pnfs_layoutgets_blocked(const struct pnfs_layout_hdr *lo)
968{
969 return lo->plh_block_lgets ||
970 test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
971}
972
973
974
975
976
977
978
979static struct pnfs_layout_segment *
980send_layoutget(struct pnfs_layout_hdr *lo,
981 struct nfs_open_context *ctx,
982 nfs4_stateid *stateid,
983 const struct pnfs_layout_range *range,
984 long *timeout, gfp_t gfp_flags)
985{
986 struct inode *ino = lo->plh_inode;
987 struct nfs_server *server = NFS_SERVER(ino);
988 struct nfs4_layoutget *lgp;
989 loff_t i_size;
990
991 dprintk("--> %s\n", __func__);
992
993
994
995
996
997
998 lgp = kzalloc(sizeof(*lgp), gfp_flags);
999 if (lgp == NULL)
1000 return ERR_PTR(-ENOMEM);
1001
1002 i_size = i_size_read(ino);
1003
1004 lgp->args.minlength = PAGE_CACHE_SIZE;
1005 if (lgp->args.minlength > range->length)
1006 lgp->args.minlength = range->length;
1007 if (range->iomode == IOMODE_READ) {
1008 if (range->offset >= i_size)
1009 lgp->args.minlength = 0;
1010 else if (i_size - range->offset < lgp->args.minlength)
1011 lgp->args.minlength = i_size - range->offset;
1012 }
1013 lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
1014 pnfs_copy_range(&lgp->args.range, range);
1015 lgp->args.type = server->pnfs_curr_ld->id;
1016 lgp->args.inode = ino;
1017 lgp->args.ctx = get_nfs_open_context(ctx);
1018 nfs4_stateid_copy(&lgp->args.stateid, stateid);
1019 lgp->gfp_flags = gfp_flags;
1020 lgp->cred = lo->plh_lc_cred;
1021
1022 return nfs4_proc_layoutget(lgp, timeout, gfp_flags);
1023}
1024
1025static void pnfs_clear_layoutcommit(struct inode *inode,
1026 struct list_head *head)
1027{
1028 struct nfs_inode *nfsi = NFS_I(inode);
1029 struct pnfs_layout_segment *lseg, *tmp;
1030
1031 if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
1032 return;
1033 list_for_each_entry_safe(lseg, tmp, &nfsi->layout->plh_segs, pls_list) {
1034 if (!test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
1035 continue;
1036 pnfs_lseg_dec_and_remove_zero(lseg, head);
1037 }
1038}
1039
1040void pnfs_layoutreturn_free_lsegs(struct pnfs_layout_hdr *lo,
1041 const nfs4_stateid *arg_stateid,
1042 const struct pnfs_layout_range *range,
1043 const nfs4_stateid *stateid)
1044{
1045 struct inode *inode = lo->plh_inode;
1046 LIST_HEAD(freeme);
1047
1048 spin_lock(&inode->i_lock);
1049 if (!pnfs_layout_is_valid(lo) || !arg_stateid ||
1050 !nfs4_stateid_match_other(&lo->plh_stateid, arg_stateid))
1051 goto out_unlock;
1052 if (stateid) {
1053 u32 seq = be32_to_cpu(arg_stateid->seqid);
1054
1055 pnfs_mark_matching_lsegs_invalid(lo, &freeme, range, seq);
1056 pnfs_free_returned_lsegs(lo, &freeme, range, seq);
1057 pnfs_set_layout_stateid(lo, stateid, true);
1058 } else
1059 pnfs_mark_layout_stateid_invalid(lo, &freeme);
1060out_unlock:
1061 pnfs_clear_layoutreturn_waitbit(lo);
1062 spin_unlock(&inode->i_lock);
1063 pnfs_free_lseg_list(&freeme);
1064
1065}
1066
1067static bool
1068pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo,
1069 nfs4_stateid *stateid,
1070 enum pnfs_iomode *iomode)
1071{
1072
1073 if (atomic_read(&lo->plh_outstanding) != 0)
1074 return false;
1075 if (test_and_set_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags))
1076 return false;
1077 set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags);
1078 pnfs_get_layout_hdr(lo);
1079 if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags)) {
1080 if (stateid != NULL) {
1081 nfs4_stateid_copy(stateid, &lo->plh_stateid);
1082 if (lo->plh_return_seq != 0)
1083 stateid->seqid = cpu_to_be32(lo->plh_return_seq);
1084 }
1085 if (iomode != NULL)
1086 *iomode = lo->plh_return_iomode;
1087 pnfs_clear_layoutreturn_info(lo);
1088 return true;
1089 }
1090 if (stateid != NULL)
1091 nfs4_stateid_copy(stateid, &lo->plh_stateid);
1092 if (iomode != NULL)
1093 *iomode = IOMODE_ANY;
1094 return true;
1095}
1096
1097static void
1098pnfs_init_layoutreturn_args(struct nfs4_layoutreturn_args *args,
1099 struct pnfs_layout_hdr *lo,
1100 const nfs4_stateid *stateid,
1101 enum pnfs_iomode iomode)
1102{
1103 struct inode *inode = lo->plh_inode;
1104
1105 args->layout_type = NFS_SERVER(inode)->pnfs_curr_ld->id;
1106 args->inode = inode;
1107 args->range.iomode = iomode;
1108 args->range.offset = 0;
1109 args->range.length = NFS4_MAX_UINT64;
1110 args->layout = lo;
1111 nfs4_stateid_copy(&args->stateid, stateid);
1112}
1113
1114static int
1115pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo, const nfs4_stateid *stateid,
1116 enum pnfs_iomode iomode, bool sync)
1117{
1118 struct inode *ino = lo->plh_inode;
1119 struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
1120 struct nfs4_layoutreturn *lrp;
1121 int status = 0;
1122
1123 lrp = kzalloc(sizeof(*lrp), GFP_NOFS);
1124 if (unlikely(lrp == NULL)) {
1125 status = -ENOMEM;
1126 spin_lock(&ino->i_lock);
1127 pnfs_clear_layoutreturn_waitbit(lo);
1128 spin_unlock(&ino->i_lock);
1129 pnfs_put_layout_hdr(lo);
1130 goto out;
1131 }
1132
1133 pnfs_init_layoutreturn_args(&lrp->args, lo, stateid, iomode);
1134 lrp->args.ld_private = &lrp->ld_private;
1135 lrp->clp = NFS_SERVER(ino)->nfs_client;
1136 lrp->cred = lo->plh_lc_cred;
1137 if (ld->prepare_layoutreturn)
1138 ld->prepare_layoutreturn(&lrp->args);
1139
1140 status = nfs4_proc_layoutreturn(lrp, sync);
1141out:
1142 dprintk("<-- %s status: %d\n", __func__, status);
1143 return status;
1144}
1145
1146
1147static bool
1148pnfs_layout_need_return(struct pnfs_layout_hdr *lo)
1149{
1150 struct pnfs_layout_segment *s;
1151
1152 if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
1153 return false;
1154
1155
1156 list_for_each_entry(s, &lo->plh_segs, pls_list) {
1157 if (test_bit(NFS_LSEG_LAYOUTRETURN, &s->pls_flags))
1158 return false;
1159 }
1160
1161 return true;
1162}
1163
1164static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr *lo)
1165{
1166 struct inode *inode= lo->plh_inode;
1167
1168 if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
1169 return;
1170 spin_lock(&inode->i_lock);
1171 if (pnfs_layout_need_return(lo)) {
1172 nfs4_stateid stateid;
1173 enum pnfs_iomode iomode;
1174 bool send;
1175
1176 send = pnfs_prepare_layoutreturn(lo, &stateid, &iomode);
1177 spin_unlock(&inode->i_lock);
1178 if (send) {
1179
1180 pnfs_send_layoutreturn(lo, &stateid, iomode, false);
1181 }
1182 } else
1183 spin_unlock(&inode->i_lock);
1184}
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194int
1195_pnfs_return_layout(struct inode *ino)
1196{
1197 struct pnfs_layout_hdr *lo = NULL;
1198 struct nfs_inode *nfsi = NFS_I(ino);
1199 LIST_HEAD(tmp_list);
1200 nfs4_stateid stateid;
1201 int status = 0;
1202 bool send, valid_layout;
1203
1204 dprintk("NFS: %s for inode %lu\n", __func__, ino->i_ino);
1205
1206 spin_lock(&ino->i_lock);
1207 lo = nfsi->layout;
1208 if (!lo) {
1209 spin_unlock(&ino->i_lock);
1210 dprintk("NFS: %s no layout to return\n", __func__);
1211 goto out;
1212 }
1213
1214 pnfs_get_layout_hdr(lo);
1215
1216 if (test_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags)) {
1217 spin_unlock(&ino->i_lock);
1218 if (wait_on_bit(&lo->plh_flags, NFS_LAYOUT_RETURN,
1219 TASK_UNINTERRUPTIBLE))
1220 goto out_put_layout_hdr;
1221 spin_lock(&ino->i_lock);
1222 }
1223 valid_layout = pnfs_layout_is_valid(lo);
1224 pnfs_clear_layoutcommit(ino, &tmp_list);
1225 pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL, 0);
1226
1227 if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) {
1228 struct pnfs_layout_range range = {
1229 .iomode = IOMODE_ANY,
1230 .offset = 0,
1231 .length = NFS4_MAX_UINT64,
1232 };
1233 NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo, &range);
1234 }
1235
1236
1237 if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags) ||
1238 !valid_layout) {
1239 spin_unlock(&ino->i_lock);
1240 dprintk("NFS: %s no layout segments to return\n", __func__);
1241 goto out_put_layout_hdr;
1242 }
1243
1244 send = pnfs_prepare_layoutreturn(lo, &stateid, NULL);
1245 spin_unlock(&ino->i_lock);
1246 if (send)
1247 status = pnfs_send_layoutreturn(lo, &stateid, IOMODE_ANY, true);
1248out_put_layout_hdr:
1249 pnfs_free_lseg_list(&tmp_list);
1250 pnfs_put_layout_hdr(lo);
1251out:
1252 dprintk("<-- %s status: %d\n", __func__, status);
1253 return status;
1254}
1255EXPORT_SYMBOL_GPL(_pnfs_return_layout);
1256
1257int
1258pnfs_commit_and_return_layout(struct inode *inode)
1259{
1260 struct pnfs_layout_hdr *lo;
1261 int ret;
1262
1263 spin_lock(&inode->i_lock);
1264 lo = NFS_I(inode)->layout;
1265 if (lo == NULL) {
1266 spin_unlock(&inode->i_lock);
1267 return 0;
1268 }
1269 pnfs_get_layout_hdr(lo);
1270
1271 lo->plh_block_lgets++;
1272 spin_unlock(&inode->i_lock);
1273 filemap_fdatawait(inode->i_mapping);
1274 ret = pnfs_layoutcommit_inode(inode, true);
1275 if (ret == 0)
1276 ret = _pnfs_return_layout(inode);
1277 spin_lock(&inode->i_lock);
1278 lo->plh_block_lgets--;
1279 spin_unlock(&inode->i_lock);
1280 pnfs_put_layout_hdr(lo);
1281 return ret;
1282}
1283
1284bool pnfs_roc(struct inode *ino,
1285 struct nfs4_layoutreturn_args *args,
1286 struct nfs4_layoutreturn_res *res,
1287 const struct rpc_cred *cred)
1288{
1289 struct nfs_inode *nfsi = NFS_I(ino);
1290 struct nfs_open_context *ctx;
1291 struct nfs4_state *state;
1292 struct pnfs_layout_hdr *lo;
1293 struct pnfs_layout_segment *lseg, *next;
1294 nfs4_stateid stateid;
1295 enum pnfs_iomode iomode = 0;
1296 bool layoutreturn = false, roc = false;
1297
1298 if (!nfs_have_layout(ino))
1299 return false;
1300 spin_lock(&ino->i_lock);
1301 lo = nfsi->layout;
1302 if (!lo || !pnfs_layout_is_valid(lo) ||
1303 test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
1304 lo = NULL;
1305 goto out_noroc;
1306 }
1307 pnfs_get_layout_hdr(lo);
1308
1309
1310 if (nfs4_check_delegation(ino, FMODE_READ))
1311 goto out_noroc;
1312
1313 list_for_each_entry(ctx, &nfsi->open_files, list) {
1314 state = ctx->state;
1315
1316 if (state != NULL && state->state != 0)
1317 goto out_noroc;
1318 }
1319
1320
1321 list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list) {
1322
1323 if (!test_and_clear_bit(NFS_LSEG_ROC, &lseg->pls_flags))
1324 continue;
1325
1326
1327
1328
1329 set_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags);
1330 if (!mark_lseg_invalid(lseg, &lo->plh_return_segs))
1331 continue;
1332 pnfs_set_plh_return_info(lo, lseg->pls_range.iomode, 0);
1333 }
1334
1335 if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
1336 goto out_noroc;
1337
1338
1339
1340
1341
1342
1343 layoutreturn = pnfs_prepare_layoutreturn(lo, &stateid, &iomode);
1344
1345 if (!layoutreturn || cred != lo->plh_lc_cred)
1346 goto out_noroc;
1347
1348 roc = layoutreturn;
1349 pnfs_init_layoutreturn_args(args, lo, &stateid, iomode);
1350 res->lrs_present = 0;
1351 layoutreturn = false;
1352
1353out_noroc:
1354 spin_unlock(&ino->i_lock);
1355 pnfs_layoutcommit_inode(ino, true);
1356 if (roc) {
1357 struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
1358 if (ld->prepare_layoutreturn)
1359 ld->prepare_layoutreturn(args);
1360 pnfs_put_layout_hdr(lo);
1361 return true;
1362 }
1363 if (layoutreturn)
1364 pnfs_send_layoutreturn(lo, &stateid, iomode, true);
1365 pnfs_put_layout_hdr(lo);
1366 return false;
1367}
1368
1369void pnfs_roc_release(struct nfs4_layoutreturn_args *args,
1370 struct nfs4_layoutreturn_res *res,
1371 int ret)
1372{
1373 struct pnfs_layout_hdr *lo = args->layout;
1374 const nfs4_stateid *arg_stateid = NULL;
1375 const nfs4_stateid *res_stateid = NULL;
1376 struct nfs4_xdr_opaque_data *ld_private = args->ld_private;
1377
1378 if (ret == 0) {
1379 arg_stateid = &args->stateid;
1380 if (res->lrs_present)
1381 res_stateid = &res->stateid;
1382 }
1383 pnfs_layoutreturn_free_lsegs(lo, arg_stateid, &args->range,
1384 res_stateid);
1385 if (ld_private && ld_private->ops && ld_private->ops->free)
1386 ld_private->ops->free(ld_private);
1387 pnfs_put_layout_hdr(lo);
1388 trace_nfs4_layoutreturn_on_close(args->inode, 0);
1389}
1390
1391bool pnfs_wait_on_layoutreturn(struct inode *ino, struct rpc_task *task)
1392{
1393 struct nfs_inode *nfsi = NFS_I(ino);
1394 struct pnfs_layout_hdr *lo;
1395 bool sleep = false;
1396
1397
1398
1399 spin_lock(&ino->i_lock);
1400 lo = nfsi->layout;
1401 if (lo && test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
1402 rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
1403 sleep = true;
1404 }
1405 spin_unlock(&ino->i_lock);
1406 return sleep;
1407}
1408
1409
1410
1411
1412
1413
1414static s64
1415pnfs_lseg_range_cmp(const struct pnfs_layout_range *l1,
1416 const struct pnfs_layout_range *l2)
1417{
1418 s64 d;
1419
1420
1421 d = l1->offset - l2->offset;
1422 if (d)
1423 return d;
1424
1425
1426 d = l2->length - l1->length;
1427 if (d)
1428 return d;
1429
1430
1431 return (int)(l1->iomode == IOMODE_READ) - (int)(l2->iomode == IOMODE_READ);
1432}
1433
1434static bool
1435pnfs_lseg_range_is_after(const struct pnfs_layout_range *l1,
1436 const struct pnfs_layout_range *l2)
1437{
1438 return pnfs_lseg_range_cmp(l1, l2) > 0;
1439}
1440
1441static bool
1442pnfs_lseg_no_merge(struct pnfs_layout_segment *lseg,
1443 struct pnfs_layout_segment *old)
1444{
1445 return false;
1446}
1447
1448void
1449pnfs_generic_layout_insert_lseg(struct pnfs_layout_hdr *lo,
1450 struct pnfs_layout_segment *lseg,
1451 bool (*is_after)(const struct pnfs_layout_range *,
1452 const struct pnfs_layout_range *),
1453 bool (*do_merge)(struct pnfs_layout_segment *,
1454 struct pnfs_layout_segment *),
1455 struct list_head *free_me)
1456{
1457 struct pnfs_layout_segment *lp, *tmp;
1458
1459 dprintk("%s:Begin\n", __func__);
1460
1461 list_for_each_entry_safe(lp, tmp, &lo->plh_segs, pls_list) {
1462 if (test_bit(NFS_LSEG_VALID, &lp->pls_flags) == 0)
1463 continue;
1464 if (do_merge(lseg, lp)) {
1465 mark_lseg_invalid(lp, free_me);
1466 continue;
1467 }
1468 if (is_after(&lseg->pls_range, &lp->pls_range))
1469 continue;
1470 list_add_tail(&lseg->pls_list, &lp->pls_list);
1471 dprintk("%s: inserted lseg %p "
1472 "iomode %d offset %llu length %llu before "
1473 "lp %p iomode %d offset %llu length %llu\n",
1474 __func__, lseg, lseg->pls_range.iomode,
1475 lseg->pls_range.offset, lseg->pls_range.length,
1476 lp, lp->pls_range.iomode, lp->pls_range.offset,
1477 lp->pls_range.length);
1478 goto out;
1479 }
1480 list_add_tail(&lseg->pls_list, &lo->plh_segs);
1481 dprintk("%s: inserted lseg %p "
1482 "iomode %d offset %llu length %llu at tail\n",
1483 __func__, lseg, lseg->pls_range.iomode,
1484 lseg->pls_range.offset, lseg->pls_range.length);
1485out:
1486 pnfs_get_layout_hdr(lo);
1487
1488 dprintk("%s:Return\n", __func__);
1489}
1490EXPORT_SYMBOL_GPL(pnfs_generic_layout_insert_lseg);
1491
1492static void
1493pnfs_layout_insert_lseg(struct pnfs_layout_hdr *lo,
1494 struct pnfs_layout_segment *lseg,
1495 struct list_head *free_me)
1496{
1497 struct inode *inode = lo->plh_inode;
1498 struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
1499
1500 if (ld->add_lseg != NULL)
1501 ld->add_lseg(lo, lseg, free_me);
1502 else
1503 pnfs_generic_layout_insert_lseg(lo, lseg,
1504 pnfs_lseg_range_is_after,
1505 pnfs_lseg_no_merge,
1506 free_me);
1507}
1508
1509static struct pnfs_layout_hdr *
1510alloc_init_layout_hdr(struct inode *ino,
1511 struct nfs_open_context *ctx,
1512 gfp_t gfp_flags)
1513{
1514 struct pnfs_layout_hdr *lo;
1515
1516 lo = pnfs_alloc_layout_hdr(ino, gfp_flags);
1517 if (!lo)
1518 return NULL;
1519 atomic_set(&lo->plh_refcount, 1);
1520 INIT_LIST_HEAD(&lo->plh_layouts);
1521 INIT_LIST_HEAD(&lo->plh_segs);
1522 INIT_LIST_HEAD(&lo->plh_return_segs);
1523 INIT_LIST_HEAD(&lo->plh_bulk_destroy);
1524 lo->plh_inode = ino;
1525 lo->plh_lc_cred = get_rpccred(ctx->cred);
1526 lo->plh_flags |= 1 << NFS_LAYOUT_INVALID_STID;
1527 return lo;
1528}
1529
1530static struct pnfs_layout_hdr *
1531pnfs_find_alloc_layout(struct inode *ino,
1532 struct nfs_open_context *ctx,
1533 gfp_t gfp_flags)
1534 __releases(&ino->i_lock)
1535 __acquires(&ino->i_lock)
1536{
1537 struct nfs_inode *nfsi = NFS_I(ino);
1538 struct pnfs_layout_hdr *new = NULL;
1539
1540 dprintk("%s Begin ino=%p layout=%p\n", __func__, ino, nfsi->layout);
1541
1542 if (nfsi->layout != NULL)
1543 goto out_existing;
1544 spin_unlock(&ino->i_lock);
1545 new = alloc_init_layout_hdr(ino, ctx, gfp_flags);
1546 spin_lock(&ino->i_lock);
1547
1548 if (likely(nfsi->layout == NULL)) {
1549 nfsi->layout = new;
1550 return new;
1551 } else if (new != NULL)
1552 pnfs_free_layout_hdr(new);
1553out_existing:
1554 pnfs_get_layout_hdr(nfsi->layout);
1555 return nfsi->layout;
1556}
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571static bool
1572pnfs_lseg_range_match(const struct pnfs_layout_range *ls_range,
1573 const struct pnfs_layout_range *range,
1574 bool strict_iomode)
1575{
1576 struct pnfs_layout_range range1;
1577
1578 if ((range->iomode == IOMODE_RW &&
1579 ls_range->iomode != IOMODE_RW) ||
1580 (range->iomode != ls_range->iomode &&
1581 strict_iomode == true) ||
1582 !pnfs_lseg_range_intersecting(ls_range, range))
1583 return 0;
1584
1585
1586 range1 = *range;
1587 range1.length = 1;
1588 return pnfs_lseg_range_contained(ls_range, &range1);
1589}
1590
1591
1592
1593
1594static struct pnfs_layout_segment *
1595pnfs_find_lseg(struct pnfs_layout_hdr *lo,
1596 struct pnfs_layout_range *range,
1597 bool strict_iomode)
1598{
1599 struct pnfs_layout_segment *lseg, *ret = NULL;
1600
1601 dprintk("%s:Begin\n", __func__);
1602
1603 list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
1604 if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) &&
1605 !test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags) &&
1606 pnfs_lseg_range_match(&lseg->pls_range, range,
1607 strict_iomode)) {
1608 ret = pnfs_get_lseg(lseg);
1609 break;
1610 }
1611 }
1612
1613 dprintk("%s:Return lseg %p ref %d\n",
1614 __func__, ret, ret ? atomic_read(&ret->pls_refcount) : 0);
1615 return ret;
1616}
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx,
1638 struct inode *ino, int iomode)
1639{
1640 struct nfs4_threshold *t = ctx->mdsthreshold;
1641 struct nfs_inode *nfsi = NFS_I(ino);
1642 loff_t fsize = i_size_read(ino);
1643 bool size = false, size_set = false, io = false, io_set = false, ret = false;
1644
1645 if (t == NULL)
1646 return ret;
1647
1648 dprintk("%s bm=0x%x rd_sz=%llu wr_sz=%llu rd_io=%llu wr_io=%llu\n",
1649 __func__, t->bm, t->rd_sz, t->wr_sz, t->rd_io_sz, t->wr_io_sz);
1650
1651 switch (iomode) {
1652 case IOMODE_READ:
1653 if (t->bm & THRESHOLD_RD) {
1654 dprintk("%s fsize %llu\n", __func__, fsize);
1655 size_set = true;
1656 if (fsize < t->rd_sz)
1657 size = true;
1658 }
1659 if (t->bm & THRESHOLD_RD_IO) {
1660 dprintk("%s nfsi->read_io %llu\n", __func__,
1661 nfsi->read_io);
1662 io_set = true;
1663 if (nfsi->read_io < t->rd_io_sz)
1664 io = true;
1665 }
1666 break;
1667 case IOMODE_RW:
1668 if (t->bm & THRESHOLD_WR) {
1669 dprintk("%s fsize %llu\n", __func__, fsize);
1670 size_set = true;
1671 if (fsize < t->wr_sz)
1672 size = true;
1673 }
1674 if (t->bm & THRESHOLD_WR_IO) {
1675 dprintk("%s nfsi->write_io %llu\n", __func__,
1676 nfsi->write_io);
1677 io_set = true;
1678 if (nfsi->write_io < t->wr_io_sz)
1679 io = true;
1680 }
1681 break;
1682 }
1683 if (size_set && io_set) {
1684 if (size && io)
1685 ret = true;
1686 } else if (size || io)
1687 ret = true;
1688
1689 dprintk("<-- %s size %d io %d ret %d\n", __func__, size, io, ret);
1690 return ret;
1691}
1692
1693static bool pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr *lo)
1694{
1695
1696
1697
1698
1699 pnfs_layoutcommit_inode(lo->plh_inode, false);
1700 return !wait_on_bit_action(&lo->plh_flags, NFS_LAYOUT_RETURN,
1701 nfs_wait_bit_killable,
1702 TASK_UNINTERRUPTIBLE);
1703}
1704
1705static void pnfs_clear_first_layoutget(struct pnfs_layout_hdr *lo)
1706{
1707 unsigned long *bitlock = &lo->plh_flags;
1708
1709 clear_bit_unlock(NFS_LAYOUT_FIRST_LAYOUTGET, bitlock);
1710 smp_mb__after_atomic();
1711 wake_up_bit(bitlock, NFS_LAYOUT_FIRST_LAYOUTGET);
1712}
1713
1714
1715
1716
1717
1718struct pnfs_layout_segment *
1719pnfs_update_layout(struct inode *ino,
1720 struct nfs_open_context *ctx,
1721 loff_t pos,
1722 u64 count,
1723 enum pnfs_iomode iomode,
1724 bool strict_iomode,
1725 gfp_t gfp_flags)
1726{
1727 struct pnfs_layout_range arg = {
1728 .iomode = iomode,
1729 .offset = pos,
1730 .length = count,
1731 };
1732 unsigned pg_offset;
1733 struct nfs_server *server = NFS_SERVER(ino);
1734 struct nfs_client *clp = server->nfs_client;
1735 struct pnfs_layout_hdr *lo = NULL;
1736 struct pnfs_layout_segment *lseg = NULL;
1737 nfs4_stateid stateid;
1738 long timeout = 0;
1739 unsigned long giveup = jiffies + (clp->cl_lease_time << 1);
1740 bool first;
1741
1742 if (!pnfs_enabled_sb(NFS_SERVER(ino))) {
1743 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1744 PNFS_UPDATE_LAYOUT_NO_PNFS);
1745 goto out;
1746 }
1747
1748 if (iomode == IOMODE_READ && i_size_read(ino) == 0) {
1749 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1750 PNFS_UPDATE_LAYOUT_RD_ZEROLEN);
1751 goto out;
1752 }
1753
1754 if (pnfs_within_mdsthreshold(ctx, ino, iomode)) {
1755 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1756 PNFS_UPDATE_LAYOUT_MDSTHRESH);
1757 goto out;
1758 }
1759
1760lookup_again:
1761 nfs4_client_recover_expired_lease(clp);
1762 first = false;
1763 spin_lock(&ino->i_lock);
1764 lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
1765 if (lo == NULL) {
1766 spin_unlock(&ino->i_lock);
1767 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1768 PNFS_UPDATE_LAYOUT_NOMEM);
1769 goto out;
1770 }
1771
1772
1773 if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
1774 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1775 PNFS_UPDATE_LAYOUT_BULK_RECALL);
1776 dprintk("%s matches recall, use MDS\n", __func__);
1777 goto out_unlock;
1778 }
1779
1780
1781 if (pnfs_layout_io_test_failed(lo, iomode)) {
1782 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1783 PNFS_UPDATE_LAYOUT_IO_TEST_FAIL);
1784 goto out_unlock;
1785 }
1786
1787 lseg = pnfs_find_lseg(lo, &arg, strict_iomode);
1788 if (lseg) {
1789 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1790 PNFS_UPDATE_LAYOUT_FOUND_CACHED);
1791 goto out_unlock;
1792 }
1793
1794 if (!nfs4_valid_open_stateid(ctx->state)) {
1795 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1796 PNFS_UPDATE_LAYOUT_INVALID_OPEN);
1797 goto out_unlock;
1798 }
1799
1800
1801
1802
1803
1804
1805 if (test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags)) {
1806
1807
1808
1809
1810
1811 if (test_and_set_bit(NFS_LAYOUT_FIRST_LAYOUTGET,
1812 &lo->plh_flags)) {
1813 spin_unlock(&ino->i_lock);
1814 wait_on_bit(&lo->plh_flags, NFS_LAYOUT_FIRST_LAYOUTGET,
1815 TASK_UNINTERRUPTIBLE);
1816 pnfs_put_layout_hdr(lo);
1817 dprintk("%s retrying\n", __func__);
1818 goto lookup_again;
1819 }
1820
1821 first = true;
1822 if (nfs4_select_rw_stateid(ctx->state,
1823 iomode == IOMODE_RW ? FMODE_WRITE : FMODE_READ,
1824 NULL, &stateid, NULL) != 0) {
1825 trace_pnfs_update_layout(ino, pos, count,
1826 iomode, lo, lseg,
1827 PNFS_UPDATE_LAYOUT_INVALID_OPEN);
1828 goto out_unlock;
1829 }
1830 } else {
1831 nfs4_stateid_copy(&stateid, &lo->plh_stateid);
1832 }
1833
1834
1835
1836
1837
1838 if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
1839 spin_unlock(&ino->i_lock);
1840 dprintk("%s wait for layoutreturn\n", __func__);
1841 if (pnfs_prepare_to_retry_layoutget(lo)) {
1842 if (first)
1843 pnfs_clear_first_layoutget(lo);
1844 pnfs_put_layout_hdr(lo);
1845 dprintk("%s retrying\n", __func__);
1846 trace_pnfs_update_layout(ino, pos, count, iomode, lo,
1847 lseg, PNFS_UPDATE_LAYOUT_RETRY);
1848 goto lookup_again;
1849 }
1850 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1851 PNFS_UPDATE_LAYOUT_RETURN);
1852 goto out_put_layout_hdr;
1853 }
1854
1855 if (pnfs_layoutgets_blocked(lo)) {
1856 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1857 PNFS_UPDATE_LAYOUT_BLOCKED);
1858 goto out_unlock;
1859 }
1860 atomic_inc(&lo->plh_outstanding);
1861 spin_unlock(&ino->i_lock);
1862
1863 if (list_empty(&lo->plh_layouts)) {
1864
1865
1866
1867 spin_lock(&clp->cl_lock);
1868 if (list_empty(&lo->plh_layouts))
1869 list_add_tail(&lo->plh_layouts, &server->layouts);
1870 spin_unlock(&clp->cl_lock);
1871 }
1872
1873 pg_offset = arg.offset & ~PAGE_CACHE_MASK;
1874 if (pg_offset) {
1875 arg.offset -= pg_offset;
1876 arg.length += pg_offset;
1877 }
1878 if (arg.length != NFS4_MAX_UINT64)
1879 arg.length = PAGE_CACHE_ALIGN(arg.length);
1880
1881 lseg = send_layoutget(lo, ctx, &stateid, &arg, &timeout, gfp_flags);
1882 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1883 PNFS_UPDATE_LAYOUT_SEND_LAYOUTGET);
1884 atomic_dec(&lo->plh_outstanding);
1885 if (IS_ERR(lseg)) {
1886 switch(PTR_ERR(lseg)) {
1887 case -EBUSY:
1888 if (time_after(jiffies, giveup))
1889 lseg = NULL;
1890 break;
1891 case -ERECALLCONFLICT:
1892
1893 if (first) {
1894 lseg = NULL;
1895 break;
1896 }
1897
1898 if (time_after(jiffies, giveup))
1899 pnfs_destroy_layout(NFS_I(ino));
1900
1901 case -EAGAIN:
1902 break;
1903 default:
1904 if (!nfs_error_is_fatal(PTR_ERR(lseg))) {
1905 pnfs_layout_clear_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
1906 lseg = NULL;
1907 }
1908 goto out_put_layout_hdr;
1909 }
1910 if (lseg) {
1911 if (first)
1912 pnfs_clear_first_layoutget(lo);
1913 trace_pnfs_update_layout(ino, pos, count,
1914 iomode, lo, lseg, PNFS_UPDATE_LAYOUT_RETRY);
1915 pnfs_put_layout_hdr(lo);
1916 goto lookup_again;
1917 }
1918 } else {
1919 pnfs_layout_clear_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
1920 }
1921
1922out_put_layout_hdr:
1923 if (first)
1924 pnfs_clear_first_layoutget(lo);
1925 pnfs_put_layout_hdr(lo);
1926out:
1927 dprintk("%s: inode %s/%llu pNFS layout segment %s for "
1928 "(%s, offset: %llu, length: %llu)\n",
1929 __func__, ino->i_sb->s_id,
1930 (unsigned long long)NFS_FILEID(ino),
1931 IS_ERR_OR_NULL(lseg) ? "not found" : "found",
1932 iomode==IOMODE_RW ? "read/write" : "read-only",
1933 (unsigned long long)pos,
1934 (unsigned long long)count);
1935 return lseg;
1936out_unlock:
1937 spin_unlock(&ino->i_lock);
1938 goto out_put_layout_hdr;
1939}
1940EXPORT_SYMBOL_GPL(pnfs_update_layout);
1941
1942static bool
1943pnfs_sanity_check_layout_range(struct pnfs_layout_range *range)
1944{
1945 switch (range->iomode) {
1946 case IOMODE_READ:
1947 case IOMODE_RW:
1948 break;
1949 default:
1950 return false;
1951 }
1952 if (range->offset == NFS4_MAX_UINT64)
1953 return false;
1954 if (range->length == 0)
1955 return false;
1956 if (range->length != NFS4_MAX_UINT64 &&
1957 range->length > NFS4_MAX_UINT64 - range->offset)
1958 return false;
1959 return true;
1960}
1961
1962struct pnfs_layout_segment *
1963pnfs_layout_process(struct nfs4_layoutget *lgp)
1964{
1965 struct pnfs_layout_hdr *lo = NFS_I(lgp->args.inode)->layout;
1966 struct nfs4_layoutget_res *res = &lgp->res;
1967 struct pnfs_layout_segment *lseg;
1968 struct inode *ino = lo->plh_inode;
1969 LIST_HEAD(free_me);
1970
1971 if (!pnfs_sanity_check_layout_range(&res->range))
1972 return ERR_PTR(-EINVAL);
1973
1974
1975 lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res, lgp->gfp_flags);
1976 if (IS_ERR_OR_NULL(lseg)) {
1977 if (!lseg)
1978 lseg = ERR_PTR(-ENOMEM);
1979
1980 dprintk("%s: Could not allocate layout: error %ld\n",
1981 __func__, PTR_ERR(lseg));
1982 return lseg;
1983 }
1984
1985 pnfs_init_lseg(lo, lseg, &res->range, &res->stateid);
1986
1987 spin_lock(&ino->i_lock);
1988 if (pnfs_layoutgets_blocked(lo)) {
1989 dprintk("%s forget reply due to state\n", __func__);
1990 goto out_forget;
1991 }
1992
1993 if (!pnfs_layout_is_valid(lo)) {
1994
1995 pnfs_set_layout_stateid(lo, &res->stateid, true);
1996 } else if (nfs4_stateid_match_other(&lo->plh_stateid, &res->stateid)) {
1997
1998 if (pnfs_layout_stateid_blocked(lo, &res->stateid)) {
1999 dprintk("%s forget reply due to sequence\n", __func__);
2000 goto out_forget;
2001 }
2002 pnfs_set_layout_stateid(lo, &res->stateid, false);
2003 } else {
2004
2005
2006
2007
2008 pnfs_mark_layout_stateid_invalid(lo, &free_me);
2009 goto out_forget;
2010 }
2011
2012 pnfs_get_lseg(lseg);
2013 pnfs_layout_insert_lseg(lo, lseg, &free_me);
2014
2015
2016 if (res->return_on_close)
2017 set_bit(NFS_LSEG_ROC, &lseg->pls_flags);
2018
2019 spin_unlock(&ino->i_lock);
2020 pnfs_free_lseg_list(&free_me);
2021 return lseg;
2022
2023out_forget:
2024 spin_unlock(&ino->i_lock);
2025 lseg->pls_layout = lo;
2026 NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
2027 if (!pnfs_layout_is_valid(lo))
2028 nfs_commit_inode(ino, 0);
2029 return ERR_PTR(-EAGAIN);
2030}
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042int
2043pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
2044 struct list_head *tmp_list,
2045 const struct pnfs_layout_range *return_range,
2046 u32 seq)
2047{
2048 struct pnfs_layout_segment *lseg, *next;
2049 int remaining = 0;
2050
2051 dprintk("%s:Begin lo %p\n", __func__, lo);
2052
2053 if (list_empty(&lo->plh_segs))
2054 return 0;
2055
2056 assert_spin_locked(&lo->plh_inode->i_lock);
2057
2058 list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
2059 if (pnfs_match_lseg_recall(lseg, return_range, seq)) {
2060 dprintk("%s: marking lseg %p iomode %d "
2061 "offset %llu length %llu\n", __func__,
2062 lseg, lseg->pls_range.iomode,
2063 lseg->pls_range.offset,
2064 lseg->pls_range.length);
2065 if (mark_lseg_invalid(lseg, tmp_list))
2066 continue;
2067 remaining++;
2068 set_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags);
2069 }
2070
2071 if (remaining)
2072 pnfs_set_plh_return_info(lo, return_range->iomode, seq);
2073
2074 return remaining;
2075}
2076
2077void pnfs_error_mark_layout_for_return(struct inode *inode,
2078 struct pnfs_layout_segment *lseg)
2079{
2080 struct pnfs_layout_hdr *lo = NFS_I(inode)->layout;
2081 struct pnfs_layout_range range = {
2082 .iomode = lseg->pls_range.iomode,
2083 .offset = 0,
2084 .length = NFS4_MAX_UINT64,
2085 };
2086 bool return_now = false;
2087
2088 spin_lock(&inode->i_lock);
2089 pnfs_set_plh_return_info(lo, range.iomode, 0);
2090
2091
2092
2093
2094
2095 if (!pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs, &range, 0)) {
2096 nfs4_stateid stateid;
2097 enum pnfs_iomode iomode;
2098
2099 return_now = pnfs_prepare_layoutreturn(lo, &stateid, &iomode);
2100 spin_unlock(&inode->i_lock);
2101 if (return_now)
2102 pnfs_send_layoutreturn(lo, &stateid, iomode, false);
2103 } else {
2104 spin_unlock(&inode->i_lock);
2105 nfs_commit_inode(inode, 0);
2106 }
2107}
2108EXPORT_SYMBOL_GPL(pnfs_error_mark_layout_for_return);
2109
2110
2111
2112
2113
2114static void
2115pnfs_generic_pg_check_range(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
2116{
2117 if (pgio->pg_lseg && !pnfs_lseg_request_intersecting(pgio->pg_lseg, req)) {
2118 pnfs_put_lseg(pgio->pg_lseg);
2119 pgio->pg_lseg = NULL;
2120 }
2121}
2122
2123void
2124pnfs_generic_pg_check_layout(struct nfs_pageio_descriptor *pgio)
2125{
2126 if (pgio->pg_lseg == NULL ||
2127 test_bit(NFS_LSEG_VALID, &pgio->pg_lseg->pls_flags))
2128 return;
2129 pnfs_put_lseg(pgio->pg_lseg);
2130 pgio->pg_lseg = NULL;
2131}
2132EXPORT_SYMBOL_GPL(pnfs_generic_pg_check_layout);
2133
2134void
2135pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
2136{
2137 u64 rd_size = req->wb_bytes;
2138
2139 pnfs_generic_pg_check_layout(pgio);
2140 pnfs_generic_pg_check_range(pgio, req);
2141 if (pgio->pg_lseg == NULL) {
2142 if (pgio->pg_dreq == NULL)
2143 rd_size = i_size_read(pgio->pg_inode) - req_offset(req);
2144 else
2145 rd_size = nfs_dreq_bytes_left(pgio->pg_dreq);
2146
2147 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
2148 req->wb_context,
2149 req_offset(req),
2150 rd_size,
2151 IOMODE_READ,
2152 false,
2153 GFP_KERNEL);
2154 if (IS_ERR(pgio->pg_lseg)) {
2155 pgio->pg_error = PTR_ERR(pgio->pg_lseg);
2156 pgio->pg_lseg = NULL;
2157 return;
2158 }
2159 }
2160
2161 if (pgio->pg_lseg == NULL)
2162 nfs_pageio_reset_read_mds(pgio);
2163
2164}
2165EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_read);
2166
2167void
2168pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio,
2169 struct nfs_page *req, u64 wb_size)
2170{
2171 pnfs_generic_pg_check_layout(pgio);
2172 pnfs_generic_pg_check_range(pgio, req);
2173 if (pgio->pg_lseg == NULL) {
2174 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
2175 req->wb_context,
2176 req_offset(req),
2177 wb_size,
2178 IOMODE_RW,
2179 false,
2180 GFP_NOFS);
2181 if (IS_ERR(pgio->pg_lseg)) {
2182 pgio->pg_error = PTR_ERR(pgio->pg_lseg);
2183 pgio->pg_lseg = NULL;
2184 return;
2185 }
2186 }
2187
2188 if (pgio->pg_lseg == NULL)
2189 nfs_pageio_reset_write_mds(pgio);
2190}
2191EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_write);
2192
2193void
2194pnfs_generic_pg_cleanup(struct nfs_pageio_descriptor *desc)
2195{
2196 if (desc->pg_lseg) {
2197 pnfs_put_lseg(desc->pg_lseg);
2198 desc->pg_lseg = NULL;
2199 }
2200}
2201EXPORT_SYMBOL_GPL(pnfs_generic_pg_cleanup);
2202
2203
2204
2205
2206
2207size_t
2208pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio,
2209 struct nfs_page *prev, struct nfs_page *req)
2210{
2211 unsigned int size;
2212 u64 seg_end, req_start, seg_left;
2213
2214 size = nfs_generic_pg_test(pgio, prev, req);
2215 if (!size)
2216 return 0;
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229 if (pgio->pg_lseg) {
2230 seg_end = pnfs_end_offset(pgio->pg_lseg->pls_range.offset,
2231 pgio->pg_lseg->pls_range.length);
2232 req_start = req_offset(req);
2233
2234
2235 if (req_start >= seg_end)
2236 return 0;
2237
2238
2239
2240 seg_left = seg_end - req_start;
2241 if (seg_left < size)
2242 size = (unsigned int)seg_left;
2243 }
2244
2245 return size;
2246}
2247EXPORT_SYMBOL_GPL(pnfs_generic_pg_test);
2248
2249int pnfs_write_done_resend_to_mds(struct nfs_pgio_header *hdr)
2250{
2251 struct nfs_pageio_descriptor pgio;
2252
2253
2254 nfs_pageio_init_write(&pgio, hdr->inode, FLUSH_STABLE, true,
2255 hdr->completion_ops);
2256 set_bit(NFS_CONTEXT_RESEND_WRITES, &hdr->args.context->flags);
2257 return nfs_pageio_resend(&pgio, hdr);
2258}
2259EXPORT_SYMBOL_GPL(pnfs_write_done_resend_to_mds);
2260
2261static void pnfs_ld_handle_write_error(struct nfs_pgio_header *hdr)
2262{
2263
2264 dprintk("pnfs write error = %d\n", hdr->pnfs_error);
2265 if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
2266 PNFS_LAYOUTRET_ON_ERROR) {
2267 pnfs_return_layout(hdr->inode);
2268 }
2269 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
2270 hdr->task.tk_status = pnfs_write_done_resend_to_mds(hdr);
2271}
2272
2273
2274
2275
2276void pnfs_ld_write_done(struct nfs_pgio_header *hdr)
2277{
2278 if (likely(!hdr->pnfs_error)) {
2279 pnfs_set_layoutcommit(hdr->inode, hdr->lseg,
2280 hdr->mds_offset + hdr->res.count);
2281 hdr->mds_ops->rpc_call_done(&hdr->task, hdr);
2282 }
2283 trace_nfs4_pnfs_write(hdr, hdr->pnfs_error);
2284 if (unlikely(hdr->pnfs_error))
2285 pnfs_ld_handle_write_error(hdr);
2286 hdr->mds_ops->rpc_release(hdr);
2287}
2288EXPORT_SYMBOL_GPL(pnfs_ld_write_done);
2289
2290static void
2291pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
2292 struct nfs_pgio_header *hdr)
2293{
2294 struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
2295
2296 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
2297 list_splice_tail_init(&hdr->pages, &mirror->pg_list);
2298 nfs_pageio_reset_write_mds(desc);
2299 mirror->pg_recoalesce = 1;
2300 }
2301 hdr->completion_ops->completion(hdr);
2302}
2303
2304static enum pnfs_try_status
2305pnfs_try_to_write_data(struct nfs_pgio_header *hdr,
2306 const struct rpc_call_ops *call_ops,
2307 struct pnfs_layout_segment *lseg,
2308 int how)
2309{
2310 struct inode *inode = hdr->inode;
2311 enum pnfs_try_status trypnfs;
2312 struct nfs_server *nfss = NFS_SERVER(inode);
2313
2314 hdr->mds_ops = call_ops;
2315
2316 dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__,
2317 inode->i_ino, hdr->args.count, hdr->args.offset, how);
2318 trypnfs = nfss->pnfs_curr_ld->write_pagelist(hdr, how);
2319 if (trypnfs != PNFS_NOT_ATTEMPTED)
2320 nfs_inc_stats(inode, NFSIOS_PNFS_WRITE);
2321 dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
2322 return trypnfs;
2323}
2324
2325static void
2326pnfs_do_write(struct nfs_pageio_descriptor *desc,
2327 struct nfs_pgio_header *hdr, int how)
2328{
2329 const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
2330 struct pnfs_layout_segment *lseg = desc->pg_lseg;
2331 enum pnfs_try_status trypnfs;
2332
2333 trypnfs = pnfs_try_to_write_data(hdr, call_ops, lseg, how);
2334 switch (trypnfs) {
2335 case PNFS_NOT_ATTEMPTED:
2336 pnfs_write_through_mds(desc, hdr);
2337 case PNFS_ATTEMPTED:
2338 break;
2339 case PNFS_TRY_AGAIN:
2340
2341 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
2342 struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
2343 list_splice_init(&hdr->pages, &mirror->pg_list);
2344 mirror->pg_recoalesce = 1;
2345 }
2346 hdr->mds_ops->rpc_release(hdr);
2347 }
2348}
2349
2350static void pnfs_writehdr_free(struct nfs_pgio_header *hdr)
2351{
2352 pnfs_put_lseg(hdr->lseg);
2353 nfs_pgio_header_free(hdr);
2354}
2355
2356int
2357pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
2358{
2359 struct nfs_pgio_header *hdr;
2360 int ret;
2361
2362 hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
2363 if (!hdr) {
2364 desc->pg_error = -ENOMEM;
2365 return desc->pg_error;
2366 }
2367 nfs_pgheader_init(desc, hdr, pnfs_writehdr_free);
2368
2369 hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
2370 ret = nfs_generic_pgio(desc, hdr);
2371 if (!ret)
2372 pnfs_do_write(desc, hdr, desc->pg_ioflags);
2373
2374 return ret;
2375}
2376EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages);
2377
2378int pnfs_read_done_resend_to_mds(struct nfs_pgio_header *hdr)
2379{
2380 struct nfs_pageio_descriptor pgio;
2381
2382
2383 nfs_pageio_init_read(&pgio, hdr->inode, true, hdr->completion_ops);
2384 return nfs_pageio_resend(&pgio, hdr);
2385}
2386EXPORT_SYMBOL_GPL(pnfs_read_done_resend_to_mds);
2387
2388static void pnfs_ld_handle_read_error(struct nfs_pgio_header *hdr)
2389{
2390 dprintk("pnfs read error = %d\n", hdr->pnfs_error);
2391 if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
2392 PNFS_LAYOUTRET_ON_ERROR) {
2393 pnfs_return_layout(hdr->inode);
2394 }
2395 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
2396 hdr->task.tk_status = pnfs_read_done_resend_to_mds(hdr);
2397}
2398
2399
2400
2401
2402void pnfs_ld_read_done(struct nfs_pgio_header *hdr)
2403{
2404 if (likely(!hdr->pnfs_error))
2405 hdr->mds_ops->rpc_call_done(&hdr->task, hdr);
2406 trace_nfs4_pnfs_read(hdr, hdr->pnfs_error);
2407 if (unlikely(hdr->pnfs_error))
2408 pnfs_ld_handle_read_error(hdr);
2409 hdr->mds_ops->rpc_release(hdr);
2410}
2411EXPORT_SYMBOL_GPL(pnfs_ld_read_done);
2412
2413static void
2414pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
2415 struct nfs_pgio_header *hdr)
2416{
2417 struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
2418
2419 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
2420 list_splice_tail_init(&hdr->pages, &mirror->pg_list);
2421 nfs_pageio_reset_read_mds(desc);
2422 mirror->pg_recoalesce = 1;
2423 }
2424 hdr->completion_ops->completion(hdr);
2425}
2426
2427
2428
2429
2430static enum pnfs_try_status
2431pnfs_try_to_read_data(struct nfs_pgio_header *hdr,
2432 const struct rpc_call_ops *call_ops,
2433 struct pnfs_layout_segment *lseg)
2434{
2435 struct inode *inode = hdr->inode;
2436 struct nfs_server *nfss = NFS_SERVER(inode);
2437 enum pnfs_try_status trypnfs;
2438
2439 hdr->mds_ops = call_ops;
2440
2441 dprintk("%s: Reading ino:%lu %u@%llu\n",
2442 __func__, inode->i_ino, hdr->args.count, hdr->args.offset);
2443
2444 trypnfs = nfss->pnfs_curr_ld->read_pagelist(hdr);
2445 if (trypnfs != PNFS_NOT_ATTEMPTED)
2446 nfs_inc_stats(inode, NFSIOS_PNFS_READ);
2447 dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
2448 return trypnfs;
2449}
2450
2451
2452void pnfs_read_resend_pnfs(struct nfs_pgio_header *hdr)
2453{
2454 struct nfs_pageio_descriptor pgio;
2455
2456 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
2457
2458 pnfs_put_lseg(hdr->lseg);
2459 hdr->lseg = NULL;
2460
2461 nfs_pageio_init_read(&pgio, hdr->inode, false,
2462 hdr->completion_ops);
2463 hdr->task.tk_status = nfs_pageio_resend(&pgio, hdr);
2464 }
2465}
2466EXPORT_SYMBOL_GPL(pnfs_read_resend_pnfs);
2467
2468static void
2469pnfs_do_read(struct nfs_pageio_descriptor *desc, struct nfs_pgio_header *hdr)
2470{
2471 const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
2472 struct pnfs_layout_segment *lseg = desc->pg_lseg;
2473 enum pnfs_try_status trypnfs;
2474
2475 trypnfs = pnfs_try_to_read_data(hdr, call_ops, lseg);
2476 switch (trypnfs) {
2477 case PNFS_NOT_ATTEMPTED:
2478 pnfs_read_through_mds(desc, hdr);
2479 case PNFS_ATTEMPTED:
2480 break;
2481 case PNFS_TRY_AGAIN:
2482
2483 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
2484 struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
2485 list_splice_init(&hdr->pages, &mirror->pg_list);
2486 mirror->pg_recoalesce = 1;
2487 }
2488 hdr->mds_ops->rpc_release(hdr);
2489 }
2490}
2491
2492static void pnfs_readhdr_free(struct nfs_pgio_header *hdr)
2493{
2494 pnfs_put_lseg(hdr->lseg);
2495 nfs_pgio_header_free(hdr);
2496}
2497
2498int
2499pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
2500{
2501 struct nfs_pgio_header *hdr;
2502 int ret;
2503
2504 hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
2505 if (!hdr) {
2506 desc->pg_error = -ENOMEM;
2507 return desc->pg_error;
2508 }
2509 nfs_pgheader_init(desc, hdr, pnfs_readhdr_free);
2510 hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
2511 ret = nfs_generic_pgio(desc, hdr);
2512 if (!ret)
2513 pnfs_do_read(desc, hdr);
2514 return ret;
2515}
2516EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages);
2517
2518static void pnfs_clear_layoutcommitting(struct inode *inode)
2519{
2520 unsigned long *bitlock = &NFS_I(inode)->flags;
2521
2522 clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock);
2523 smp_mb__after_clear_bit();
2524 wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING);
2525}
2526
2527
2528
2529
2530static void pnfs_list_write_lseg(struct inode *inode, struct list_head *listp)
2531{
2532 struct pnfs_layout_segment *lseg;
2533
2534 list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) {
2535 if (lseg->pls_range.iomode == IOMODE_RW &&
2536 test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
2537 list_add(&lseg->pls_lc_list, listp);
2538 }
2539}
2540
2541static void pnfs_list_write_lseg_done(struct inode *inode, struct list_head *listp)
2542{
2543 struct pnfs_layout_segment *lseg, *tmp;
2544
2545
2546 list_for_each_entry_safe(lseg, tmp, listp, pls_lc_list) {
2547 list_del_init(&lseg->pls_lc_list);
2548 pnfs_put_lseg(lseg);
2549 }
2550
2551 pnfs_clear_layoutcommitting(inode);
2552}
2553
2554void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg)
2555{
2556 pnfs_layout_io_set_failed(lseg->pls_layout, lseg->pls_range.iomode);
2557}
2558EXPORT_SYMBOL_GPL(pnfs_set_lo_fail);
2559
2560void
2561pnfs_set_layoutcommit(struct inode *inode, struct pnfs_layout_segment *lseg,
2562 loff_t end_pos)
2563{
2564 struct nfs_inode *nfsi = NFS_I(inode);
2565 bool mark_as_dirty = false;
2566
2567 spin_lock(&inode->i_lock);
2568 if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
2569 nfsi->layout->plh_lwb = end_pos;
2570 mark_as_dirty = true;
2571 dprintk("%s: Set layoutcommit for inode %lu ",
2572 __func__, inode->i_ino);
2573 } else if (end_pos > nfsi->layout->plh_lwb)
2574 nfsi->layout->plh_lwb = end_pos;
2575 if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags)) {
2576
2577 pnfs_get_lseg(lseg);
2578 }
2579 spin_unlock(&inode->i_lock);
2580 dprintk("%s: lseg %p end_pos %llu\n",
2581 __func__, lseg, nfsi->layout->plh_lwb);
2582
2583
2584
2585 if (mark_as_dirty)
2586 mark_inode_dirty_sync(inode);
2587}
2588EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit);
2589
2590void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data)
2591{
2592 struct nfs_server *nfss = NFS_SERVER(data->args.inode);
2593
2594 if (nfss->pnfs_curr_ld->cleanup_layoutcommit)
2595 nfss->pnfs_curr_ld->cleanup_layoutcommit(data);
2596 pnfs_list_write_lseg_done(data->args.inode, &data->lseg_list);
2597}
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607int
2608pnfs_layoutcommit_inode(struct inode *inode, bool sync)
2609{
2610 struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
2611 struct nfs4_layoutcommit_data *data;
2612 struct nfs_inode *nfsi = NFS_I(inode);
2613 loff_t end_pos;
2614 int status;
2615
2616 if (!pnfs_layoutcommit_outstanding(inode))
2617 return 0;
2618
2619 dprintk("--> %s inode %lu\n", __func__, inode->i_ino);
2620
2621 status = -EAGAIN;
2622 if (test_and_set_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags)) {
2623 if (!sync)
2624 goto out;
2625 status = wait_on_bit_lock_action(&nfsi->flags,
2626 NFS_INO_LAYOUTCOMMITTING,
2627 nfs_wait_bit_killable,
2628 TASK_KILLABLE);
2629 if (status)
2630 goto out;
2631 }
2632
2633 status = -ENOMEM;
2634
2635 data = kzalloc(sizeof(*data), GFP_NOFS);
2636 if (!data)
2637 goto clear_layoutcommitting;
2638
2639 status = 0;
2640 spin_lock(&inode->i_lock);
2641 if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
2642 goto out_unlock;
2643
2644 INIT_LIST_HEAD(&data->lseg_list);
2645 pnfs_list_write_lseg(inode, &data->lseg_list);
2646
2647 end_pos = nfsi->layout->plh_lwb;
2648
2649 nfs4_stateid_copy(&data->args.stateid, &nfsi->layout->plh_stateid);
2650 spin_unlock(&inode->i_lock);
2651
2652 data->args.inode = inode;
2653 data->cred = get_rpccred(nfsi->layout->plh_lc_cred);
2654 nfs_fattr_init(&data->fattr);
2655 data->args.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
2656 data->res.fattr = &data->fattr;
2657 if (end_pos != 0)
2658 data->args.lastbytewritten = end_pos - 1;
2659 else
2660 data->args.lastbytewritten = U64_MAX;
2661 data->res.server = NFS_SERVER(inode);
2662
2663 if (ld->prepare_layoutcommit) {
2664 status = ld->prepare_layoutcommit(&data->args);
2665 if (status) {
2666 put_rpccred(data->cred);
2667 spin_lock(&inode->i_lock);
2668 set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags);
2669 if (end_pos > nfsi->layout->plh_lwb)
2670 nfsi->layout->plh_lwb = end_pos;
2671 goto out_unlock;
2672 }
2673 }
2674
2675
2676 status = nfs4_proc_layoutcommit(data, sync);
2677out:
2678 if (status)
2679 mark_inode_dirty_sync(inode);
2680 dprintk("<-- %s status %d\n", __func__, status);
2681 return status;
2682out_unlock:
2683 spin_unlock(&inode->i_lock);
2684 kfree(data);
2685clear_layoutcommitting:
2686 pnfs_clear_layoutcommitting(inode);
2687 goto out;
2688}
2689EXPORT_SYMBOL_GPL(pnfs_layoutcommit_inode);
2690
2691int
2692pnfs_generic_sync(struct inode *inode, bool datasync)
2693{
2694 return pnfs_layoutcommit_inode(inode, true);
2695}
2696EXPORT_SYMBOL_GPL(pnfs_generic_sync);
2697
2698struct nfs4_threshold *pnfs_mdsthreshold_alloc(void)
2699{
2700 struct nfs4_threshold *thp;
2701
2702 thp = kzalloc(sizeof(*thp), GFP_NOFS);
2703 if (!thp) {
2704 dprintk("%s mdsthreshold allocation failed\n", __func__);
2705 return NULL;
2706 }
2707 return thp;
2708}
2709
2710#if IS_ENABLED(CONFIG_NFS_V4_2)
2711int
2712pnfs_report_layoutstat(struct inode *inode, gfp_t gfp_flags)
2713{
2714 struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
2715 struct nfs_server *server = NFS_SERVER(inode);
2716 struct nfs_inode *nfsi = NFS_I(inode);
2717 struct nfs42_layoutstat_data *data;
2718 struct pnfs_layout_hdr *hdr;
2719 int status = 0;
2720
2721 if (!pnfs_enabled_sb(server) || !ld->prepare_layoutstats)
2722 goto out;
2723
2724 if (!nfs_server_capable(inode, NFS_CAP_LAYOUTSTATS))
2725 goto out;
2726
2727 if (test_and_set_bit(NFS_INO_LAYOUTSTATS, &nfsi->flags))
2728 goto out;
2729
2730 spin_lock(&inode->i_lock);
2731 if (!NFS_I(inode)->layout) {
2732 spin_unlock(&inode->i_lock);
2733 goto out_clear_layoutstats;
2734 }
2735 hdr = NFS_I(inode)->layout;
2736 pnfs_get_layout_hdr(hdr);
2737 spin_unlock(&inode->i_lock);
2738
2739 data = kzalloc(sizeof(*data), gfp_flags);
2740 if (!data) {
2741 status = -ENOMEM;
2742 goto out_put;
2743 }
2744
2745 data->args.fh = NFS_FH(inode);
2746 data->args.inode = inode;
2747 status = ld->prepare_layoutstats(&data->args);
2748 if (status)
2749 goto out_free;
2750
2751 status = nfs42_proc_layoutstats_generic(NFS_SERVER(inode), data);
2752
2753out:
2754 dprintk("%s returns %d\n", __func__, status);
2755 return status;
2756
2757out_free:
2758 kfree(data);
2759out_put:
2760 pnfs_put_layout_hdr(hdr);
2761out_clear_layoutstats:
2762 smp_mb__before_atomic();
2763 clear_bit(NFS_INO_LAYOUTSTATS, &nfsi->flags);
2764 smp_mb__after_atomic();
2765 goto out;
2766}
2767EXPORT_SYMBOL_GPL(pnfs_report_layoutstat);
2768#endif
2769
2770unsigned int layoutstats_timer;
2771module_param(layoutstats_timer, uint, 0644);
2772EXPORT_SYMBOL_GPL(layoutstats_timer);
2773