1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#include <linux/nfs_fs.h>
31#include <linux/nfs_page.h>
32#include <linux/module.h>
33#include <linux/sort.h>
34#include "internal.h"
35#include "pnfs.h"
36#include "iostat.h"
37#include "nfs4trace.h"
38#include "delegation.h"
39#include "nfs42.h"
40#include "nfs4_fs.h"
41
42#define NFSDBG_FACILITY NFSDBG_PNFS
43#define PNFS_LAYOUTGET_RETRY_TIMEOUT (120*HZ)
44
45
46
47
48
49
50static DEFINE_SPINLOCK(pnfs_spinlock);
51
52
53
54
55static LIST_HEAD(pnfs_modules_tbl);
56
57static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr *lo);
58static void pnfs_free_returned_lsegs(struct pnfs_layout_hdr *lo,
59 struct list_head *free_me,
60 const struct pnfs_layout_range *range,
61 u32 seq);
62static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment *lseg,
63 struct list_head *tmp_list);
64
65
66static struct pnfs_layoutdriver_type *
67find_pnfs_driver_locked(u32 id)
68{
69 struct pnfs_layoutdriver_type *local;
70
71 list_for_each_entry(local, &pnfs_modules_tbl, pnfs_tblid)
72 if (local->id == id)
73 goto out;
74 local = NULL;
75out:
76 dprintk("%s: Searching for id %u, found %p\n", __func__, id, local);
77 return local;
78}
79
80static struct pnfs_layoutdriver_type *
81find_pnfs_driver(u32 id)
82{
83 struct pnfs_layoutdriver_type *local;
84
85 spin_lock(&pnfs_spinlock);
86 local = find_pnfs_driver_locked(id);
87 if (local != NULL && !try_module_get(local->owner)) {
88 dprintk("%s: Could not grab reference on module\n", __func__);
89 local = NULL;
90 }
91 spin_unlock(&pnfs_spinlock);
92 return local;
93}
94
95void
96unset_pnfs_layoutdriver(struct nfs_server *nfss)
97{
98 if (nfss->pnfs_curr_ld) {
99 if (nfss->pnfs_curr_ld->clear_layoutdriver)
100 nfss->pnfs_curr_ld->clear_layoutdriver(nfss);
101
102 if (atomic_dec_and_test(&nfss->nfs_client->cl_mds_count))
103 nfs4_deviceid_purge_client(nfss->nfs_client);
104 module_put(nfss->pnfs_curr_ld->owner);
105 }
106 nfss->pnfs_curr_ld = NULL;
107}
108
109
110
111
112
113
114
115
116static const u32 ld_prefs[] = {
117 LAYOUT_SCSI,
118 LAYOUT_BLOCK_VOLUME,
119 LAYOUT_OSD2_OBJECTS,
120 LAYOUT_FLEX_FILES,
121 LAYOUT_NFSV4_1_FILES,
122 0
123};
124
125static int
126ld_cmp(const void *e1, const void *e2)
127{
128 u32 ld1 = *((u32 *)e1);
129 u32 ld2 = *((u32 *)e2);
130 int i;
131
132 for (i = 0; ld_prefs[i] != 0; i++) {
133 if (ld1 == ld_prefs[i])
134 return -1;
135
136 if (ld2 == ld_prefs[i])
137 return 1;
138 }
139 return 0;
140}
141
142
143
144
145
146
147
148void
149set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh,
150 struct nfs_fsinfo *fsinfo)
151{
152 struct pnfs_layoutdriver_type *ld_type = NULL;
153 u32 id;
154 int i;
155
156 if (fsinfo->nlayouttypes == 0)
157 goto out_no_driver;
158 if (!(server->nfs_client->cl_exchange_flags &
159 (EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS))) {
160 printk(KERN_ERR "NFS: %s: cl_exchange_flags 0x%x\n",
161 __func__, server->nfs_client->cl_exchange_flags);
162 goto out_no_driver;
163 }
164
165 sort(fsinfo->layouttype, fsinfo->nlayouttypes,
166 sizeof(*fsinfo->layouttype), ld_cmp, NULL);
167
168 for (i = 0; i < fsinfo->nlayouttypes; i++) {
169 id = fsinfo->layouttype[i];
170 ld_type = find_pnfs_driver(id);
171 if (!ld_type) {
172 request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX,
173 id);
174 ld_type = find_pnfs_driver(id);
175 }
176 if (ld_type)
177 break;
178 }
179
180 if (!ld_type) {
181 dprintk("%s: No pNFS module found!\n", __func__);
182 goto out_no_driver;
183 }
184
185 server->pnfs_curr_ld = ld_type;
186 if (ld_type->set_layoutdriver
187 && ld_type->set_layoutdriver(server, mntfh)) {
188 printk(KERN_ERR "NFS: %s: Error initializing pNFS layout "
189 "driver %u.\n", __func__, id);
190 module_put(ld_type->owner);
191 goto out_no_driver;
192 }
193
194 atomic_inc(&server->nfs_client->cl_mds_count);
195
196 dprintk("%s: pNFS module for %u set\n", __func__, id);
197 return;
198
199out_no_driver:
200 dprintk("%s: Using NFSv4 I/O\n", __func__);
201 server->pnfs_curr_ld = NULL;
202}
203
204int
205pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
206{
207 int status = -EINVAL;
208 struct pnfs_layoutdriver_type *tmp;
209
210 if (ld_type->id == 0) {
211 printk(KERN_ERR "NFS: %s id 0 is reserved\n", __func__);
212 return status;
213 }
214 if (!ld_type->alloc_lseg || !ld_type->free_lseg) {
215 printk(KERN_ERR "NFS: %s Layout driver must provide "
216 "alloc_lseg and free_lseg.\n", __func__);
217 return status;
218 }
219
220 spin_lock(&pnfs_spinlock);
221 tmp = find_pnfs_driver_locked(ld_type->id);
222 if (!tmp) {
223 list_add(&ld_type->pnfs_tblid, &pnfs_modules_tbl);
224 status = 0;
225 dprintk("%s Registering id:%u name:%s\n", __func__, ld_type->id,
226 ld_type->name);
227 } else {
228 printk(KERN_ERR "NFS: %s Module with id %d already loaded!\n",
229 __func__, ld_type->id);
230 }
231 spin_unlock(&pnfs_spinlock);
232
233 return status;
234}
235EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver);
236
237void
238pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
239{
240 dprintk("%s Deregistering id:%u\n", __func__, ld_type->id);
241 spin_lock(&pnfs_spinlock);
242 list_del(&ld_type->pnfs_tblid);
243 spin_unlock(&pnfs_spinlock);
244}
245EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver);
246
247
248
249
250
251
252void
253pnfs_get_layout_hdr(struct pnfs_layout_hdr *lo)
254{
255 refcount_inc(&lo->plh_refcount);
256}
257
258static struct pnfs_layout_hdr *
259pnfs_alloc_layout_hdr(struct inode *ino, gfp_t gfp_flags)
260{
261 struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
262 return ld->alloc_layout_hdr(ino, gfp_flags);
263}
264
265static void
266pnfs_free_layout_hdr(struct pnfs_layout_hdr *lo)
267{
268 struct nfs_server *server = NFS_SERVER(lo->plh_inode);
269 struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
270
271 if (test_and_clear_bit(NFS_LAYOUT_HASHED, &lo->plh_flags)) {
272 struct nfs_client *clp = server->nfs_client;
273
274 spin_lock(&clp->cl_lock);
275 list_del_rcu(&lo->plh_layouts);
276 spin_unlock(&clp->cl_lock);
277 }
278 put_cred(lo->plh_lc_cred);
279 return ld->free_layout_hdr(lo);
280}
281
282static void
283pnfs_detach_layout_hdr(struct pnfs_layout_hdr *lo)
284{
285 struct nfs_inode *nfsi = NFS_I(lo->plh_inode);
286 dprintk("%s: freeing layout cache %p\n", __func__, lo);
287 nfsi->layout = NULL;
288
289 nfsi->write_io = 0;
290 nfsi->read_io = 0;
291}
292
293void
294pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
295{
296 struct inode *inode;
297 unsigned long i_state;
298
299 if (!lo)
300 return;
301 inode = lo->plh_inode;
302 pnfs_layoutreturn_before_put_layout_hdr(lo);
303
304 if (refcount_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
305 if (!list_empty(&lo->plh_segs))
306 WARN_ONCE(1, "NFS: BUG unfreed layout segments.\n");
307 pnfs_detach_layout_hdr(lo);
308 i_state = inode->i_state;
309 spin_unlock(&inode->i_lock);
310 pnfs_free_layout_hdr(lo);
311
312 if (i_state & (I_FREEING | I_CLEAR))
313 wake_up_var(lo);
314 }
315}
316
317static struct inode *
318pnfs_grab_inode_layout_hdr(struct pnfs_layout_hdr *lo)
319{
320 struct inode *inode = igrab(lo->plh_inode);
321 if (inode)
322 return inode;
323 set_bit(NFS_LAYOUT_INODE_FREEING, &lo->plh_flags);
324 return NULL;
325}
326
327
328
329
330
331static bool pnfs_seqid_is_newer(u32 s1, u32 s2)
332{
333 return (s32)(s1 - s2) > 0;
334}
335
336static void pnfs_barrier_update(struct pnfs_layout_hdr *lo, u32 newseq)
337{
338 if (pnfs_seqid_is_newer(newseq, lo->plh_barrier))
339 lo->plh_barrier = newseq;
340}
341
342static void
343pnfs_set_plh_return_info(struct pnfs_layout_hdr *lo, enum pnfs_iomode iomode,
344 u32 seq)
345{
346 if (lo->plh_return_iomode != 0 && lo->plh_return_iomode != iomode)
347 iomode = IOMODE_ANY;
348 lo->plh_return_iomode = iomode;
349 set_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
350 if (seq != 0) {
351 WARN_ON_ONCE(lo->plh_return_seq != 0 && lo->plh_return_seq != seq);
352 lo->plh_return_seq = seq;
353 pnfs_barrier_update(lo, seq);
354 }
355}
356
357static void
358pnfs_clear_layoutreturn_info(struct pnfs_layout_hdr *lo)
359{
360 struct pnfs_layout_segment *lseg;
361 lo->plh_return_iomode = 0;
362 lo->plh_return_seq = 0;
363 clear_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
364 list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
365 if (!test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
366 continue;
367 pnfs_set_plh_return_info(lo, lseg->pls_range.iomode, 0);
368 }
369}
370
371static void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr *lo)
372{
373 clear_bit_unlock(NFS_LAYOUT_RETURN, &lo->plh_flags);
374 clear_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags);
375 smp_mb__after_atomic();
376 wake_up_bit(&lo->plh_flags, NFS_LAYOUT_RETURN);
377 rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq);
378}
379
380static void
381pnfs_clear_lseg_state(struct pnfs_layout_segment *lseg,
382 struct list_head *free_me)
383{
384 clear_bit(NFS_LSEG_ROC, &lseg->pls_flags);
385 clear_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags);
386 if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags))
387 pnfs_lseg_dec_and_remove_zero(lseg, free_me);
388 if (test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
389 pnfs_lseg_dec_and_remove_zero(lseg, free_me);
390}
391
392
393
394
395
396bool nfs4_layout_refresh_old_stateid(nfs4_stateid *dst,
397 struct pnfs_layout_range *dst_range,
398 struct inode *inode)
399{
400 struct pnfs_layout_hdr *lo;
401 struct pnfs_layout_range range = {
402 .iomode = IOMODE_ANY,
403 .offset = 0,
404 .length = NFS4_MAX_UINT64,
405 };
406 bool ret = false;
407 LIST_HEAD(head);
408 int err;
409
410 spin_lock(&inode->i_lock);
411 lo = NFS_I(inode)->layout;
412 if (lo && pnfs_layout_is_valid(lo) &&
413 nfs4_stateid_match_other(dst, &lo->plh_stateid)) {
414
415 if (!nfs4_stateid_is_newer(&lo->plh_stateid, dst)) {
416 nfs4_stateid_seqid_inc(dst);
417 ret = true;
418 goto out;
419 }
420
421 err = pnfs_mark_matching_lsegs_return(lo, &head, &range, 0);
422 if (err != -EBUSY) {
423 dst->seqid = lo->plh_stateid.seqid;
424 *dst_range = range;
425 ret = true;
426 }
427 }
428out:
429 spin_unlock(&inode->i_lock);
430 pnfs_free_lseg_list(&head);
431 return ret;
432}
433
434
435
436
437
438
439
440
441int
442pnfs_mark_layout_stateid_invalid(struct pnfs_layout_hdr *lo,
443 struct list_head *lseg_list)
444{
445 struct pnfs_layout_range range = {
446 .iomode = IOMODE_ANY,
447 .offset = 0,
448 .length = NFS4_MAX_UINT64,
449 };
450 struct pnfs_layout_segment *lseg, *next;
451
452 set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
453 list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
454 pnfs_clear_lseg_state(lseg, lseg_list);
455 pnfs_clear_layoutreturn_info(lo);
456 pnfs_free_returned_lsegs(lo, lseg_list, &range, 0);
457 if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags) &&
458 !test_and_set_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags))
459 pnfs_clear_layoutreturn_waitbit(lo);
460 return !list_empty(&lo->plh_segs);
461}
462
463static int
464pnfs_iomode_to_fail_bit(u32 iomode)
465{
466 return iomode == IOMODE_RW ?
467 NFS_LAYOUT_RW_FAILED : NFS_LAYOUT_RO_FAILED;
468}
469
470static void
471pnfs_layout_set_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
472{
473 lo->plh_retry_timestamp = jiffies;
474 if (!test_and_set_bit(fail_bit, &lo->plh_flags))
475 refcount_inc(&lo->plh_refcount);
476}
477
478static void
479pnfs_layout_clear_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
480{
481 if (test_and_clear_bit(fail_bit, &lo->plh_flags))
482 refcount_dec(&lo->plh_refcount);
483}
484
485static void
486pnfs_layout_io_set_failed(struct pnfs_layout_hdr *lo, u32 iomode)
487{
488 struct inode *inode = lo->plh_inode;
489 struct pnfs_layout_range range = {
490 .iomode = iomode,
491 .offset = 0,
492 .length = NFS4_MAX_UINT64,
493 };
494 LIST_HEAD(head);
495
496 spin_lock(&inode->i_lock);
497 pnfs_layout_set_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
498 pnfs_mark_matching_lsegs_invalid(lo, &head, &range, 0);
499 spin_unlock(&inode->i_lock);
500 pnfs_free_lseg_list(&head);
501 dprintk("%s Setting layout IOMODE_%s fail bit\n", __func__,
502 iomode == IOMODE_RW ? "RW" : "READ");
503}
504
505static bool
506pnfs_layout_io_test_failed(struct pnfs_layout_hdr *lo, u32 iomode)
507{
508 unsigned long start, end;
509 int fail_bit = pnfs_iomode_to_fail_bit(iomode);
510
511 if (test_bit(fail_bit, &lo->plh_flags) == 0)
512 return false;
513 end = jiffies;
514 start = end - PNFS_LAYOUTGET_RETRY_TIMEOUT;
515 if (!time_in_range(lo->plh_retry_timestamp, start, end)) {
516
517 pnfs_layout_clear_fail_bit(lo, fail_bit);
518 return false;
519 }
520 return true;
521}
522
523static void
524pnfs_init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg,
525 const struct pnfs_layout_range *range,
526 const nfs4_stateid *stateid)
527{
528 INIT_LIST_HEAD(&lseg->pls_list);
529 INIT_LIST_HEAD(&lseg->pls_lc_list);
530 INIT_LIST_HEAD(&lseg->pls_commits);
531 refcount_set(&lseg->pls_refcount, 1);
532 set_bit(NFS_LSEG_VALID, &lseg->pls_flags);
533 lseg->pls_layout = lo;
534 lseg->pls_range = *range;
535 lseg->pls_seq = be32_to_cpu(stateid->seqid);
536}
537
538static void pnfs_free_lseg(struct pnfs_layout_segment *lseg)
539{
540 if (lseg != NULL) {
541 struct inode *inode = lseg->pls_layout->plh_inode;
542 NFS_SERVER(inode)->pnfs_curr_ld->free_lseg(lseg);
543 }
544}
545
546static void
547pnfs_layout_remove_lseg(struct pnfs_layout_hdr *lo,
548 struct pnfs_layout_segment *lseg)
549{
550 WARN_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
551 list_del_init(&lseg->pls_list);
552
553 refcount_dec(&lo->plh_refcount);
554 if (test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
555 return;
556 if (list_empty(&lo->plh_segs) &&
557 !test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags) &&
558 !test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
559 if (atomic_read(&lo->plh_outstanding) == 0)
560 set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
561 clear_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
562 }
563}
564
565static bool
566pnfs_cache_lseg_for_layoutreturn(struct pnfs_layout_hdr *lo,
567 struct pnfs_layout_segment *lseg)
568{
569 if (test_and_clear_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags) &&
570 pnfs_layout_is_valid(lo)) {
571 pnfs_set_plh_return_info(lo, lseg->pls_range.iomode, 0);
572 list_move_tail(&lseg->pls_list, &lo->plh_return_segs);
573 return true;
574 }
575 return false;
576}
577
578void
579pnfs_put_lseg(struct pnfs_layout_segment *lseg)
580{
581 struct pnfs_layout_hdr *lo;
582 struct inode *inode;
583
584 if (!lseg)
585 return;
586
587 dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
588 refcount_read(&lseg->pls_refcount),
589 test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
590
591 lo = lseg->pls_layout;
592 inode = lo->plh_inode;
593
594 if (refcount_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
595 if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
596 spin_unlock(&inode->i_lock);
597 return;
598 }
599 pnfs_get_layout_hdr(lo);
600 pnfs_layout_remove_lseg(lo, lseg);
601 if (pnfs_cache_lseg_for_layoutreturn(lo, lseg))
602 lseg = NULL;
603 spin_unlock(&inode->i_lock);
604 pnfs_free_lseg(lseg);
605 pnfs_put_layout_hdr(lo);
606 }
607}
608EXPORT_SYMBOL_GPL(pnfs_put_lseg);
609
610
611
612
613
614
615
616
617static bool
618pnfs_lseg_range_contained(const struct pnfs_layout_range *l1,
619 const struct pnfs_layout_range *l2)
620{
621 u64 start1 = l1->offset;
622 u64 end1 = pnfs_end_offset(start1, l1->length);
623 u64 start2 = l2->offset;
624 u64 end2 = pnfs_end_offset(start2, l2->length);
625
626 return (start1 <= start2) && (end1 >= end2);
627}
628
629static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment *lseg,
630 struct list_head *tmp_list)
631{
632 if (!refcount_dec_and_test(&lseg->pls_refcount))
633 return false;
634 pnfs_layout_remove_lseg(lseg->pls_layout, lseg);
635 list_add(&lseg->pls_list, tmp_list);
636 return true;
637}
638
639
640static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
641 struct list_head *tmp_list)
642{
643 int rv = 0;
644
645 if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
646
647
648
649
650 dprintk("%s: lseg %p ref %d\n", __func__, lseg,
651 refcount_read(&lseg->pls_refcount));
652 if (pnfs_lseg_dec_and_remove_zero(lseg, tmp_list))
653 rv = 1;
654 }
655 return rv;
656}
657
658static bool
659pnfs_should_free_range(const struct pnfs_layout_range *lseg_range,
660 const struct pnfs_layout_range *recall_range)
661{
662 return (recall_range->iomode == IOMODE_ANY ||
663 lseg_range->iomode == recall_range->iomode) &&
664 pnfs_lseg_range_intersecting(lseg_range, recall_range);
665}
666
667static bool
668pnfs_match_lseg_recall(const struct pnfs_layout_segment *lseg,
669 const struct pnfs_layout_range *recall_range,
670 u32 seq)
671{
672 if (seq != 0 && pnfs_seqid_is_newer(lseg->pls_seq, seq))
673 return false;
674 if (recall_range == NULL)
675 return true;
676 return pnfs_should_free_range(&lseg->pls_range, recall_range);
677}
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694int
695pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
696 struct list_head *tmp_list,
697 const struct pnfs_layout_range *recall_range,
698 u32 seq)
699{
700 struct pnfs_layout_segment *lseg, *next;
701 int remaining = 0;
702
703 dprintk("%s:Begin lo %p\n", __func__, lo);
704
705 if (list_empty(&lo->plh_segs))
706 return 0;
707 list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
708 if (pnfs_match_lseg_recall(lseg, recall_range, seq)) {
709 dprintk("%s: freeing lseg %p iomode %d seq %u "
710 "offset %llu length %llu\n", __func__,
711 lseg, lseg->pls_range.iomode, lseg->pls_seq,
712 lseg->pls_range.offset, lseg->pls_range.length);
713 if (!mark_lseg_invalid(lseg, tmp_list))
714 remaining++;
715 }
716 dprintk("%s:Return %i\n", __func__, remaining);
717 return remaining;
718}
719
720static void
721pnfs_free_returned_lsegs(struct pnfs_layout_hdr *lo,
722 struct list_head *free_me,
723 const struct pnfs_layout_range *range,
724 u32 seq)
725{
726 struct pnfs_layout_segment *lseg, *next;
727
728 list_for_each_entry_safe(lseg, next, &lo->plh_return_segs, pls_list) {
729 if (pnfs_match_lseg_recall(lseg, range, seq))
730 list_move_tail(&lseg->pls_list, free_me);
731 }
732}
733
734
735void
736pnfs_free_lseg_list(struct list_head *free_me)
737{
738 struct pnfs_layout_segment *lseg, *tmp;
739
740 if (list_empty(free_me))
741 return;
742
743 list_for_each_entry_safe(lseg, tmp, free_me, pls_list) {
744 list_del(&lseg->pls_list);
745 pnfs_free_lseg(lseg);
746 }
747}
748
749static struct pnfs_layout_hdr *__pnfs_destroy_layout(struct nfs_inode *nfsi)
750{
751 struct pnfs_layout_hdr *lo;
752 LIST_HEAD(tmp_list);
753
754 spin_lock(&nfsi->vfs_inode.i_lock);
755 lo = nfsi->layout;
756 if (lo) {
757 pnfs_get_layout_hdr(lo);
758 pnfs_mark_layout_stateid_invalid(lo, &tmp_list);
759 pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RO_FAILED);
760 pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RW_FAILED);
761 spin_unlock(&nfsi->vfs_inode.i_lock);
762 pnfs_free_lseg_list(&tmp_list);
763 nfs_commit_inode(&nfsi->vfs_inode, 0);
764 pnfs_put_layout_hdr(lo);
765 } else
766 spin_unlock(&nfsi->vfs_inode.i_lock);
767 return lo;
768}
769
770void pnfs_destroy_layout(struct nfs_inode *nfsi)
771{
772 __pnfs_destroy_layout(nfsi);
773}
774EXPORT_SYMBOL_GPL(pnfs_destroy_layout);
775
776static bool pnfs_layout_removed(struct nfs_inode *nfsi,
777 struct pnfs_layout_hdr *lo)
778{
779 bool ret;
780
781 spin_lock(&nfsi->vfs_inode.i_lock);
782 ret = nfsi->layout != lo;
783 spin_unlock(&nfsi->vfs_inode.i_lock);
784 return ret;
785}
786
787void pnfs_destroy_layout_final(struct nfs_inode *nfsi)
788{
789 struct pnfs_layout_hdr *lo = __pnfs_destroy_layout(nfsi);
790
791 if (lo)
792 wait_var_event(lo, pnfs_layout_removed(nfsi, lo));
793}
794
795static bool
796pnfs_layout_add_bulk_destroy_list(struct inode *inode,
797 struct list_head *layout_list)
798{
799 struct pnfs_layout_hdr *lo;
800 bool ret = false;
801
802 spin_lock(&inode->i_lock);
803 lo = NFS_I(inode)->layout;
804 if (lo != NULL && list_empty(&lo->plh_bulk_destroy)) {
805 pnfs_get_layout_hdr(lo);
806 list_add(&lo->plh_bulk_destroy, layout_list);
807 ret = true;
808 }
809 spin_unlock(&inode->i_lock);
810 return ret;
811}
812
813
814static int
815pnfs_layout_bulk_destroy_byserver_locked(struct nfs_client *clp,
816 struct nfs_server *server,
817 struct list_head *layout_list)
818 __must_hold(&clp->cl_lock)
819 __must_hold(RCU)
820{
821 struct pnfs_layout_hdr *lo, *next;
822 struct inode *inode;
823
824 list_for_each_entry_safe(lo, next, &server->layouts, plh_layouts) {
825 if (test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) ||
826 test_bit(NFS_LAYOUT_INODE_FREEING, &lo->plh_flags) ||
827 !list_empty(&lo->plh_bulk_destroy))
828 continue;
829
830 if (!nfs_sb_active(server->super))
831 break;
832 inode = pnfs_grab_inode_layout_hdr(lo);
833 if (inode != NULL) {
834 if (test_and_clear_bit(NFS_LAYOUT_HASHED, &lo->plh_flags))
835 list_del_rcu(&lo->plh_layouts);
836 if (pnfs_layout_add_bulk_destroy_list(inode,
837 layout_list))
838 continue;
839 rcu_read_unlock();
840 spin_unlock(&clp->cl_lock);
841 iput(inode);
842 } else {
843 rcu_read_unlock();
844 spin_unlock(&clp->cl_lock);
845 }
846 nfs_sb_deactive(server->super);
847 spin_lock(&clp->cl_lock);
848 rcu_read_lock();
849 return -EAGAIN;
850 }
851 return 0;
852}
853
854static int
855pnfs_layout_free_bulk_destroy_list(struct list_head *layout_list,
856 bool is_bulk_recall)
857{
858 struct pnfs_layout_hdr *lo;
859 struct inode *inode;
860 LIST_HEAD(lseg_list);
861 int ret = 0;
862
863 while (!list_empty(layout_list)) {
864 lo = list_entry(layout_list->next, struct pnfs_layout_hdr,
865 plh_bulk_destroy);
866 dprintk("%s freeing layout for inode %lu\n", __func__,
867 lo->plh_inode->i_ino);
868 inode = lo->plh_inode;
869
870 pnfs_layoutcommit_inode(inode, false);
871
872 spin_lock(&inode->i_lock);
873 list_del_init(&lo->plh_bulk_destroy);
874 if (pnfs_mark_layout_stateid_invalid(lo, &lseg_list)) {
875 if (is_bulk_recall)
876 set_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
877 ret = -EAGAIN;
878 }
879 spin_unlock(&inode->i_lock);
880 pnfs_free_lseg_list(&lseg_list);
881
882 nfs_commit_inode(inode, 0);
883 pnfs_put_layout_hdr(lo);
884 nfs_iput_and_deactive(inode);
885 }
886 return ret;
887}
888
889int
890pnfs_destroy_layouts_byfsid(struct nfs_client *clp,
891 struct nfs_fsid *fsid,
892 bool is_recall)
893{
894 struct nfs_server *server;
895 LIST_HEAD(layout_list);
896
897 spin_lock(&clp->cl_lock);
898 rcu_read_lock();
899restart:
900 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
901 if (memcmp(&server->fsid, fsid, sizeof(*fsid)) != 0)
902 continue;
903 if (pnfs_layout_bulk_destroy_byserver_locked(clp,
904 server,
905 &layout_list) != 0)
906 goto restart;
907 }
908 rcu_read_unlock();
909 spin_unlock(&clp->cl_lock);
910
911 if (list_empty(&layout_list))
912 return 0;
913 return pnfs_layout_free_bulk_destroy_list(&layout_list, is_recall);
914}
915
916int
917pnfs_destroy_layouts_byclid(struct nfs_client *clp,
918 bool is_recall)
919{
920 struct nfs_server *server;
921 LIST_HEAD(layout_list);
922
923 spin_lock(&clp->cl_lock);
924 rcu_read_lock();
925restart:
926 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
927 if (pnfs_layout_bulk_destroy_byserver_locked(clp,
928 server,
929 &layout_list) != 0)
930 goto restart;
931 }
932 rcu_read_unlock();
933 spin_unlock(&clp->cl_lock);
934
935 if (list_empty(&layout_list))
936 return 0;
937 return pnfs_layout_free_bulk_destroy_list(&layout_list, is_recall);
938}
939
940
941
942
943
944void
945pnfs_destroy_all_layouts(struct nfs_client *clp)
946{
947 nfs4_deviceid_mark_client_invalid(clp);
948 nfs4_deviceid_purge_client(clp);
949
950 pnfs_destroy_layouts_byclid(clp, false);
951}
952
953static void
954pnfs_set_layout_cred(struct pnfs_layout_hdr *lo, const struct cred *cred)
955{
956 const struct cred *old;
957
958 if (cred && cred_fscmp(lo->plh_lc_cred, cred) != 0) {
959 old = xchg(&lo->plh_lc_cred, get_cred(cred));
960 put_cred(old);
961 }
962}
963
964
965void
966pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
967 const struct cred *cred, bool update_barrier)
968{
969 u32 oldseq, newseq, new_barrier = 0;
970
971 oldseq = be32_to_cpu(lo->plh_stateid.seqid);
972 newseq = be32_to_cpu(new->seqid);
973
974 if (!pnfs_layout_is_valid(lo)) {
975 pnfs_set_layout_cred(lo, cred);
976 nfs4_stateid_copy(&lo->plh_stateid, new);
977 lo->plh_barrier = newseq;
978 pnfs_clear_layoutreturn_info(lo);
979 clear_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
980 return;
981 }
982 if (pnfs_seqid_is_newer(newseq, oldseq)) {
983 nfs4_stateid_copy(&lo->plh_stateid, new);
984
985
986
987
988 new_barrier = newseq - atomic_read(&lo->plh_outstanding);
989 }
990 if (update_barrier)
991 new_barrier = be32_to_cpu(new->seqid);
992 else if (new_barrier == 0)
993 return;
994 pnfs_barrier_update(lo, new_barrier);
995}
996
997static bool
998pnfs_layout_stateid_blocked(const struct pnfs_layout_hdr *lo,
999 const nfs4_stateid *stateid)
1000{
1001 u32 seqid = be32_to_cpu(stateid->seqid);
1002
1003 return !pnfs_seqid_is_newer(seqid, lo->plh_barrier) && lo->plh_barrier;
1004}
1005
1006
1007static bool
1008pnfs_layoutgets_blocked(const struct pnfs_layout_hdr *lo)
1009{
1010 return lo->plh_block_lgets ||
1011 test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
1012}
1013
1014static struct nfs_server *
1015pnfs_find_server(struct inode *inode, struct nfs_open_context *ctx)
1016{
1017 struct nfs_server *server;
1018
1019 if (inode) {
1020 server = NFS_SERVER(inode);
1021 } else {
1022 struct dentry *parent_dir = dget_parent(ctx->dentry);
1023 server = NFS_SERVER(parent_dir->d_inode);
1024 dput(parent_dir);
1025 }
1026 return server;
1027}
1028
1029static void nfs4_free_pages(struct page **pages, size_t size)
1030{
1031 int i;
1032
1033 if (!pages)
1034 return;
1035
1036 for (i = 0; i < size; i++) {
1037 if (!pages[i])
1038 break;
1039 __free_page(pages[i]);
1040 }
1041 kfree(pages);
1042}
1043
1044static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags)
1045{
1046 struct page **pages;
1047 int i;
1048
1049 pages = kmalloc_array(size, sizeof(struct page *), gfp_flags);
1050 if (!pages) {
1051 dprintk("%s: can't alloc array of %zu pages\n", __func__, size);
1052 return NULL;
1053 }
1054
1055 for (i = 0; i < size; i++) {
1056 pages[i] = alloc_page(gfp_flags);
1057 if (!pages[i]) {
1058 dprintk("%s: failed to allocate page\n", __func__);
1059 nfs4_free_pages(pages, i);
1060 return NULL;
1061 }
1062 }
1063
1064 return pages;
1065}
1066
1067static struct nfs4_layoutget *
1068pnfs_alloc_init_layoutget_args(struct inode *ino,
1069 struct nfs_open_context *ctx,
1070 const nfs4_stateid *stateid,
1071 const struct pnfs_layout_range *range,
1072 gfp_t gfp_flags)
1073{
1074 struct nfs_server *server = pnfs_find_server(ino, ctx);
1075 size_t max_reply_sz = server->pnfs_curr_ld->max_layoutget_response;
1076 size_t max_pages = max_response_pages(server);
1077 struct nfs4_layoutget *lgp;
1078
1079 dprintk("--> %s\n", __func__);
1080
1081 lgp = kzalloc(sizeof(*lgp), gfp_flags);
1082 if (lgp == NULL)
1083 return NULL;
1084
1085 if (max_reply_sz) {
1086 size_t npages = (max_reply_sz + PAGE_SIZE - 1) >> PAGE_SHIFT;
1087 if (npages < max_pages)
1088 max_pages = npages;
1089 }
1090
1091 lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags);
1092 if (!lgp->args.layout.pages) {
1093 kfree(lgp);
1094 return NULL;
1095 }
1096 lgp->args.layout.pglen = max_pages * PAGE_SIZE;
1097 lgp->res.layoutp = &lgp->args.layout;
1098
1099
1100 lgp->res.status = -NFS4ERR_DELAY;
1101
1102 lgp->args.minlength = PAGE_SIZE;
1103 if (lgp->args.minlength > range->length)
1104 lgp->args.minlength = range->length;
1105 if (ino) {
1106 loff_t i_size = i_size_read(ino);
1107
1108 if (range->iomode == IOMODE_READ) {
1109 if (range->offset >= i_size)
1110 lgp->args.minlength = 0;
1111 else if (i_size - range->offset < lgp->args.minlength)
1112 lgp->args.minlength = i_size - range->offset;
1113 }
1114 }
1115 lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
1116 pnfs_copy_range(&lgp->args.range, range);
1117 lgp->args.type = server->pnfs_curr_ld->id;
1118 lgp->args.inode = ino;
1119 lgp->args.ctx = get_nfs_open_context(ctx);
1120 nfs4_stateid_copy(&lgp->args.stateid, stateid);
1121 lgp->gfp_flags = gfp_flags;
1122 lgp->cred = ctx->cred;
1123 return lgp;
1124}
1125
1126void pnfs_layoutget_free(struct nfs4_layoutget *lgp)
1127{
1128 size_t max_pages = lgp->args.layout.pglen / PAGE_SIZE;
1129
1130 nfs4_free_pages(lgp->args.layout.pages, max_pages);
1131 if (lgp->args.inode)
1132 pnfs_put_layout_hdr(NFS_I(lgp->args.inode)->layout);
1133 put_nfs_open_context(lgp->args.ctx);
1134 kfree(lgp);
1135}
1136
1137static void pnfs_clear_layoutcommit(struct inode *inode,
1138 struct list_head *head)
1139{
1140 struct nfs_inode *nfsi = NFS_I(inode);
1141 struct pnfs_layout_segment *lseg, *tmp;
1142
1143 if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
1144 return;
1145 list_for_each_entry_safe(lseg, tmp, &nfsi->layout->plh_segs, pls_list) {
1146 if (!test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
1147 continue;
1148 pnfs_lseg_dec_and_remove_zero(lseg, head);
1149 }
1150}
1151
1152void pnfs_layoutreturn_free_lsegs(struct pnfs_layout_hdr *lo,
1153 const nfs4_stateid *arg_stateid,
1154 const struct pnfs_layout_range *range,
1155 const nfs4_stateid *stateid)
1156{
1157 struct inode *inode = lo->plh_inode;
1158 LIST_HEAD(freeme);
1159
1160 spin_lock(&inode->i_lock);
1161 if (!pnfs_layout_is_valid(lo) ||
1162 !nfs4_stateid_match_other(&lo->plh_stateid, arg_stateid))
1163 goto out_unlock;
1164 if (stateid) {
1165 u32 seq = be32_to_cpu(arg_stateid->seqid);
1166
1167 pnfs_mark_matching_lsegs_invalid(lo, &freeme, range, seq);
1168 pnfs_free_returned_lsegs(lo, &freeme, range, seq);
1169 pnfs_set_layout_stateid(lo, stateid, NULL, true);
1170 } else
1171 pnfs_mark_layout_stateid_invalid(lo, &freeme);
1172out_unlock:
1173 pnfs_clear_layoutreturn_waitbit(lo);
1174 spin_unlock(&inode->i_lock);
1175 pnfs_free_lseg_list(&freeme);
1176
1177}
1178
1179static bool
1180pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo,
1181 nfs4_stateid *stateid,
1182 const struct cred **cred,
1183 enum pnfs_iomode *iomode)
1184{
1185
1186 if (atomic_read(&lo->plh_outstanding) != 0)
1187 return false;
1188 if (test_and_set_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags))
1189 return false;
1190 set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags);
1191 pnfs_get_layout_hdr(lo);
1192 nfs4_stateid_copy(stateid, &lo->plh_stateid);
1193 *cred = get_cred(lo->plh_lc_cred);
1194 if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags)) {
1195 if (lo->plh_return_seq != 0)
1196 stateid->seqid = cpu_to_be32(lo->plh_return_seq);
1197 if (iomode != NULL)
1198 *iomode = lo->plh_return_iomode;
1199 pnfs_clear_layoutreturn_info(lo);
1200 } else if (iomode != NULL)
1201 *iomode = IOMODE_ANY;
1202 pnfs_barrier_update(lo, be32_to_cpu(stateid->seqid));
1203 return true;
1204}
1205
1206static void
1207pnfs_init_layoutreturn_args(struct nfs4_layoutreturn_args *args,
1208 struct pnfs_layout_hdr *lo,
1209 const nfs4_stateid *stateid,
1210 enum pnfs_iomode iomode)
1211{
1212 struct inode *inode = lo->plh_inode;
1213
1214 args->layout_type = NFS_SERVER(inode)->pnfs_curr_ld->id;
1215 args->inode = inode;
1216 args->range.iomode = iomode;
1217 args->range.offset = 0;
1218 args->range.length = NFS4_MAX_UINT64;
1219 args->layout = lo;
1220 nfs4_stateid_copy(&args->stateid, stateid);
1221}
1222
1223static int
1224pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo,
1225 const nfs4_stateid *stateid,
1226 const struct cred **pcred,
1227 enum pnfs_iomode iomode,
1228 bool sync)
1229{
1230 struct inode *ino = lo->plh_inode;
1231 struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
1232 struct nfs4_layoutreturn *lrp;
1233 const struct cred *cred = *pcred;
1234 int status = 0;
1235
1236 *pcred = NULL;
1237 lrp = kzalloc(sizeof(*lrp), GFP_NOFS);
1238 if (unlikely(lrp == NULL)) {
1239 status = -ENOMEM;
1240 spin_lock(&ino->i_lock);
1241 pnfs_clear_layoutreturn_waitbit(lo);
1242 spin_unlock(&ino->i_lock);
1243 put_cred(cred);
1244 pnfs_put_layout_hdr(lo);
1245 goto out;
1246 }
1247
1248 pnfs_init_layoutreturn_args(&lrp->args, lo, stateid, iomode);
1249 lrp->args.ld_private = &lrp->ld_private;
1250 lrp->clp = NFS_SERVER(ino)->nfs_client;
1251 lrp->cred = cred;
1252 if (ld->prepare_layoutreturn)
1253 ld->prepare_layoutreturn(&lrp->args);
1254
1255 status = nfs4_proc_layoutreturn(lrp, sync);
1256out:
1257 dprintk("<-- %s status: %d\n", __func__, status);
1258 return status;
1259}
1260
1261static bool
1262pnfs_layout_segments_returnable(struct pnfs_layout_hdr *lo,
1263 enum pnfs_iomode iomode,
1264 u32 seq)
1265{
1266 struct pnfs_layout_range recall_range = {
1267 .length = NFS4_MAX_UINT64,
1268 .iomode = iomode,
1269 };
1270 return pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs,
1271 &recall_range, seq) != -EBUSY;
1272}
1273
1274
1275static bool
1276pnfs_layout_need_return(struct pnfs_layout_hdr *lo)
1277{
1278 if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
1279 return false;
1280 return pnfs_layout_segments_returnable(lo, lo->plh_return_iomode,
1281 lo->plh_return_seq);
1282}
1283
1284static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr *lo)
1285{
1286 struct inode *inode= lo->plh_inode;
1287
1288 if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
1289 return;
1290 spin_lock(&inode->i_lock);
1291 if (pnfs_layout_need_return(lo)) {
1292 const struct cred *cred;
1293 nfs4_stateid stateid;
1294 enum pnfs_iomode iomode;
1295 bool send;
1296
1297 send = pnfs_prepare_layoutreturn(lo, &stateid, &cred, &iomode);
1298 spin_unlock(&inode->i_lock);
1299 if (send) {
1300
1301 pnfs_send_layoutreturn(lo, &stateid, &cred, iomode, false);
1302 }
1303 } else
1304 spin_unlock(&inode->i_lock);
1305}
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315int
1316_pnfs_return_layout(struct inode *ino)
1317{
1318 struct pnfs_layout_hdr *lo = NULL;
1319 struct nfs_inode *nfsi = NFS_I(ino);
1320 LIST_HEAD(tmp_list);
1321 const struct cred *cred;
1322 nfs4_stateid stateid;
1323 int status = 0;
1324 bool send, valid_layout;
1325
1326 dprintk("NFS: %s for inode %lu\n", __func__, ino->i_ino);
1327
1328 spin_lock(&ino->i_lock);
1329 lo = nfsi->layout;
1330 if (!lo) {
1331 spin_unlock(&ino->i_lock);
1332 dprintk("NFS: %s no layout to return\n", __func__);
1333 goto out;
1334 }
1335
1336 pnfs_get_layout_hdr(lo);
1337
1338 if (test_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags)) {
1339 spin_unlock(&ino->i_lock);
1340 if (wait_on_bit(&lo->plh_flags, NFS_LAYOUT_RETURN,
1341 TASK_UNINTERRUPTIBLE))
1342 goto out_put_layout_hdr;
1343 spin_lock(&ino->i_lock);
1344 }
1345 valid_layout = pnfs_layout_is_valid(lo);
1346 pnfs_clear_layoutcommit(ino, &tmp_list);
1347 pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL, 0);
1348
1349 if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) {
1350 struct pnfs_layout_range range = {
1351 .iomode = IOMODE_ANY,
1352 .offset = 0,
1353 .length = NFS4_MAX_UINT64,
1354 };
1355 NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo, &range);
1356 }
1357
1358
1359 if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags) ||
1360 !valid_layout) {
1361 spin_unlock(&ino->i_lock);
1362 dprintk("NFS: %s no layout segments to return\n", __func__);
1363 goto out_wait_layoutreturn;
1364 }
1365
1366 send = pnfs_prepare_layoutreturn(lo, &stateid, &cred, NULL);
1367 spin_unlock(&ino->i_lock);
1368 if (send)
1369 status = pnfs_send_layoutreturn(lo, &stateid, &cred, IOMODE_ANY, true);
1370out_wait_layoutreturn:
1371 wait_on_bit(&lo->plh_flags, NFS_LAYOUT_RETURN, TASK_UNINTERRUPTIBLE);
1372out_put_layout_hdr:
1373 pnfs_free_lseg_list(&tmp_list);
1374 pnfs_put_layout_hdr(lo);
1375out:
1376 dprintk("<-- %s status: %d\n", __func__, status);
1377 return status;
1378}
1379
1380int
1381pnfs_commit_and_return_layout(struct inode *inode)
1382{
1383 struct pnfs_layout_hdr *lo;
1384 int ret;
1385
1386 spin_lock(&inode->i_lock);
1387 lo = NFS_I(inode)->layout;
1388 if (lo == NULL) {
1389 spin_unlock(&inode->i_lock);
1390 return 0;
1391 }
1392 pnfs_get_layout_hdr(lo);
1393
1394 lo->plh_block_lgets++;
1395 spin_unlock(&inode->i_lock);
1396 filemap_fdatawait(inode->i_mapping);
1397 ret = pnfs_layoutcommit_inode(inode, true);
1398 if (ret == 0)
1399 ret = _pnfs_return_layout(inode);
1400 spin_lock(&inode->i_lock);
1401 lo->plh_block_lgets--;
1402 spin_unlock(&inode->i_lock);
1403 pnfs_put_layout_hdr(lo);
1404 return ret;
1405}
1406
1407bool pnfs_roc(struct inode *ino,
1408 struct nfs4_layoutreturn_args *args,
1409 struct nfs4_layoutreturn_res *res,
1410 const struct cred *cred)
1411{
1412 struct nfs_inode *nfsi = NFS_I(ino);
1413 struct nfs_open_context *ctx;
1414 struct nfs4_state *state;
1415 struct pnfs_layout_hdr *lo;
1416 struct pnfs_layout_segment *lseg, *next;
1417 const struct cred *lc_cred;
1418 nfs4_stateid stateid;
1419 enum pnfs_iomode iomode = 0;
1420 bool layoutreturn = false, roc = false;
1421 bool skip_read = false;
1422
1423 if (!nfs_have_layout(ino))
1424 return false;
1425retry:
1426 rcu_read_lock();
1427 spin_lock(&ino->i_lock);
1428 lo = nfsi->layout;
1429 if (!lo || !pnfs_layout_is_valid(lo) ||
1430 test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
1431 lo = NULL;
1432 goto out_noroc;
1433 }
1434 pnfs_get_layout_hdr(lo);
1435 if (test_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags)) {
1436 spin_unlock(&ino->i_lock);
1437 rcu_read_unlock();
1438 wait_on_bit(&lo->plh_flags, NFS_LAYOUT_RETURN,
1439 TASK_UNINTERRUPTIBLE);
1440 pnfs_put_layout_hdr(lo);
1441 goto retry;
1442 }
1443
1444
1445 if (nfs4_check_delegation(ino, FMODE_READ)) {
1446 if (nfs4_check_delegation(ino, FMODE_WRITE))
1447 goto out_noroc;
1448 skip_read = true;
1449 }
1450
1451 list_for_each_entry_rcu(ctx, &nfsi->open_files, list) {
1452 state = ctx->state;
1453 if (state == NULL)
1454 continue;
1455
1456 if (state->state & FMODE_WRITE)
1457 goto out_noroc;
1458 if (state->state & FMODE_READ)
1459 skip_read = true;
1460 }
1461
1462
1463 list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list) {
1464 if (skip_read && lseg->pls_range.iomode == IOMODE_READ)
1465 continue;
1466
1467 if (!test_and_clear_bit(NFS_LSEG_ROC, &lseg->pls_flags))
1468 continue;
1469
1470
1471
1472
1473 set_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags);
1474 if (!mark_lseg_invalid(lseg, &lo->plh_return_segs))
1475 continue;
1476 pnfs_set_plh_return_info(lo, lseg->pls_range.iomode, 0);
1477 }
1478
1479 if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
1480 goto out_noroc;
1481
1482
1483
1484
1485
1486
1487 layoutreturn = pnfs_prepare_layoutreturn(lo, &stateid, &lc_cred, &iomode);
1488
1489 if (!layoutreturn || cred_fscmp(cred, lc_cred) != 0)
1490 goto out_noroc;
1491
1492 roc = layoutreturn;
1493 pnfs_init_layoutreturn_args(args, lo, &stateid, iomode);
1494 res->lrs_present = 0;
1495 layoutreturn = false;
1496 put_cred(lc_cred);
1497
1498out_noroc:
1499 spin_unlock(&ino->i_lock);
1500 rcu_read_unlock();
1501 pnfs_layoutcommit_inode(ino, true);
1502 if (roc) {
1503 struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
1504 if (ld->prepare_layoutreturn)
1505 ld->prepare_layoutreturn(args);
1506 pnfs_put_layout_hdr(lo);
1507 return true;
1508 }
1509 if (layoutreturn)
1510 pnfs_send_layoutreturn(lo, &stateid, &lc_cred, iomode, true);
1511 pnfs_put_layout_hdr(lo);
1512 return false;
1513}
1514
1515int pnfs_roc_done(struct rpc_task *task, struct nfs4_layoutreturn_args **argpp,
1516 struct nfs4_layoutreturn_res **respp, int *ret)
1517{
1518 struct nfs4_layoutreturn_args *arg = *argpp;
1519 int retval = -EAGAIN;
1520
1521 if (!arg)
1522 return 0;
1523
1524 switch (*ret) {
1525 case 0:
1526 retval = 0;
1527 break;
1528 case -NFS4ERR_NOMATCHING_LAYOUT:
1529
1530 if (task->tk_rpc_status == 0)
1531 break;
1532
1533 if (!RPC_WAS_SENT(task))
1534 return 0;
1535
1536
1537
1538
1539 *ret = 0;
1540 (*respp)->lrs_present = 0;
1541 retval = 0;
1542 break;
1543 case -NFS4ERR_DELAY:
1544
1545 *ret = -NFS4ERR_NOMATCHING_LAYOUT;
1546 return 0;
1547 case -NFS4ERR_OLD_STATEID:
1548 if (!nfs4_layout_refresh_old_stateid(&arg->stateid,
1549 &arg->range, arg->inode))
1550 break;
1551 *ret = -NFS4ERR_NOMATCHING_LAYOUT;
1552 return -EAGAIN;
1553 }
1554 *argpp = NULL;
1555 *respp = NULL;
1556 return retval;
1557}
1558
1559void pnfs_roc_release(struct nfs4_layoutreturn_args *args,
1560 struct nfs4_layoutreturn_res *res,
1561 int ret)
1562{
1563 struct pnfs_layout_hdr *lo = args->layout;
1564 struct inode *inode = args->inode;
1565 const nfs4_stateid *res_stateid = NULL;
1566 struct nfs4_xdr_opaque_data *ld_private = args->ld_private;
1567
1568 switch (ret) {
1569 case -NFS4ERR_NOMATCHING_LAYOUT:
1570 spin_lock(&inode->i_lock);
1571 if (pnfs_layout_is_valid(lo) &&
1572 nfs4_stateid_match_other(&args->stateid, &lo->plh_stateid))
1573 pnfs_set_plh_return_info(lo, args->range.iomode, 0);
1574 pnfs_clear_layoutreturn_waitbit(lo);
1575 spin_unlock(&inode->i_lock);
1576 break;
1577 case 0:
1578 if (res->lrs_present)
1579 res_stateid = &res->stateid;
1580 fallthrough;
1581 default:
1582 pnfs_layoutreturn_free_lsegs(lo, &args->stateid, &args->range,
1583 res_stateid);
1584 }
1585 trace_nfs4_layoutreturn_on_close(args->inode, &args->stateid, ret);
1586 if (ld_private && ld_private->ops && ld_private->ops->free)
1587 ld_private->ops->free(ld_private);
1588 pnfs_put_layout_hdr(lo);
1589}
1590
1591bool pnfs_wait_on_layoutreturn(struct inode *ino, struct rpc_task *task)
1592{
1593 struct nfs_inode *nfsi = NFS_I(ino);
1594 struct pnfs_layout_hdr *lo;
1595 bool sleep = false;
1596
1597
1598
1599 spin_lock(&ino->i_lock);
1600 lo = nfsi->layout;
1601 if (lo && test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
1602 rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
1603 sleep = true;
1604 }
1605 spin_unlock(&ino->i_lock);
1606 return sleep;
1607}
1608
1609
1610
1611
1612
1613
1614static s64
1615pnfs_lseg_range_cmp(const struct pnfs_layout_range *l1,
1616 const struct pnfs_layout_range *l2)
1617{
1618 s64 d;
1619
1620
1621 d = l1->offset - l2->offset;
1622 if (d)
1623 return d;
1624
1625
1626 d = l2->length - l1->length;
1627 if (d)
1628 return d;
1629
1630
1631 return (int)(l1->iomode == IOMODE_READ) - (int)(l2->iomode == IOMODE_READ);
1632}
1633
1634static bool
1635pnfs_lseg_range_is_after(const struct pnfs_layout_range *l1,
1636 const struct pnfs_layout_range *l2)
1637{
1638 return pnfs_lseg_range_cmp(l1, l2) > 0;
1639}
1640
1641static bool
1642pnfs_lseg_no_merge(struct pnfs_layout_segment *lseg,
1643 struct pnfs_layout_segment *old)
1644{
1645 return false;
1646}
1647
1648void
1649pnfs_generic_layout_insert_lseg(struct pnfs_layout_hdr *lo,
1650 struct pnfs_layout_segment *lseg,
1651 bool (*is_after)(const struct pnfs_layout_range *,
1652 const struct pnfs_layout_range *),
1653 bool (*do_merge)(struct pnfs_layout_segment *,
1654 struct pnfs_layout_segment *),
1655 struct list_head *free_me)
1656{
1657 struct pnfs_layout_segment *lp, *tmp;
1658
1659 dprintk("%s:Begin\n", __func__);
1660
1661 list_for_each_entry_safe(lp, tmp, &lo->plh_segs, pls_list) {
1662 if (test_bit(NFS_LSEG_VALID, &lp->pls_flags) == 0)
1663 continue;
1664 if (do_merge(lseg, lp)) {
1665 mark_lseg_invalid(lp, free_me);
1666 continue;
1667 }
1668 if (is_after(&lseg->pls_range, &lp->pls_range))
1669 continue;
1670 list_add_tail(&lseg->pls_list, &lp->pls_list);
1671 dprintk("%s: inserted lseg %p "
1672 "iomode %d offset %llu length %llu before "
1673 "lp %p iomode %d offset %llu length %llu\n",
1674 __func__, lseg, lseg->pls_range.iomode,
1675 lseg->pls_range.offset, lseg->pls_range.length,
1676 lp, lp->pls_range.iomode, lp->pls_range.offset,
1677 lp->pls_range.length);
1678 goto out;
1679 }
1680 list_add_tail(&lseg->pls_list, &lo->plh_segs);
1681 dprintk("%s: inserted lseg %p "
1682 "iomode %d offset %llu length %llu at tail\n",
1683 __func__, lseg, lseg->pls_range.iomode,
1684 lseg->pls_range.offset, lseg->pls_range.length);
1685out:
1686 pnfs_get_layout_hdr(lo);
1687
1688 dprintk("%s:Return\n", __func__);
1689}
1690EXPORT_SYMBOL_GPL(pnfs_generic_layout_insert_lseg);
1691
1692static void
1693pnfs_layout_insert_lseg(struct pnfs_layout_hdr *lo,
1694 struct pnfs_layout_segment *lseg,
1695 struct list_head *free_me)
1696{
1697 struct inode *inode = lo->plh_inode;
1698 struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
1699
1700 if (ld->add_lseg != NULL)
1701 ld->add_lseg(lo, lseg, free_me);
1702 else
1703 pnfs_generic_layout_insert_lseg(lo, lseg,
1704 pnfs_lseg_range_is_after,
1705 pnfs_lseg_no_merge,
1706 free_me);
1707}
1708
1709static struct pnfs_layout_hdr *
1710alloc_init_layout_hdr(struct inode *ino,
1711 struct nfs_open_context *ctx,
1712 gfp_t gfp_flags)
1713{
1714 struct pnfs_layout_hdr *lo;
1715
1716 lo = pnfs_alloc_layout_hdr(ino, gfp_flags);
1717 if (!lo)
1718 return NULL;
1719 refcount_set(&lo->plh_refcount, 1);
1720 INIT_LIST_HEAD(&lo->plh_layouts);
1721 INIT_LIST_HEAD(&lo->plh_segs);
1722 INIT_LIST_HEAD(&lo->plh_return_segs);
1723 INIT_LIST_HEAD(&lo->plh_bulk_destroy);
1724 lo->plh_inode = ino;
1725 lo->plh_lc_cred = get_cred(ctx->cred);
1726 lo->plh_flags |= 1 << NFS_LAYOUT_INVALID_STID;
1727 return lo;
1728}
1729
1730static struct pnfs_layout_hdr *
1731pnfs_find_alloc_layout(struct inode *ino,
1732 struct nfs_open_context *ctx,
1733 gfp_t gfp_flags)
1734 __releases(&ino->i_lock)
1735 __acquires(&ino->i_lock)
1736{
1737 struct nfs_inode *nfsi = NFS_I(ino);
1738 struct pnfs_layout_hdr *new = NULL;
1739
1740 dprintk("%s Begin ino=%p layout=%p\n", __func__, ino, nfsi->layout);
1741
1742 if (nfsi->layout != NULL)
1743 goto out_existing;
1744 spin_unlock(&ino->i_lock);
1745 new = alloc_init_layout_hdr(ino, ctx, gfp_flags);
1746 spin_lock(&ino->i_lock);
1747
1748 if (likely(nfsi->layout == NULL)) {
1749 nfsi->layout = new;
1750 return new;
1751 } else if (new != NULL)
1752 pnfs_free_layout_hdr(new);
1753out_existing:
1754 pnfs_get_layout_hdr(nfsi->layout);
1755 return nfsi->layout;
1756}
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771static bool
1772pnfs_lseg_range_match(const struct pnfs_layout_range *ls_range,
1773 const struct pnfs_layout_range *range,
1774 bool strict_iomode)
1775{
1776 struct pnfs_layout_range range1;
1777
1778 if ((range->iomode == IOMODE_RW &&
1779 ls_range->iomode != IOMODE_RW) ||
1780 (range->iomode != ls_range->iomode &&
1781 strict_iomode) ||
1782 !pnfs_lseg_range_intersecting(ls_range, range))
1783 return false;
1784
1785
1786 range1 = *range;
1787 range1.length = 1;
1788 return pnfs_lseg_range_contained(ls_range, &range1);
1789}
1790
1791
1792
1793
1794static struct pnfs_layout_segment *
1795pnfs_find_lseg(struct pnfs_layout_hdr *lo,
1796 struct pnfs_layout_range *range,
1797 bool strict_iomode)
1798{
1799 struct pnfs_layout_segment *lseg, *ret = NULL;
1800
1801 dprintk("%s:Begin\n", __func__);
1802
1803 list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
1804 if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) &&
1805 pnfs_lseg_range_match(&lseg->pls_range, range,
1806 strict_iomode)) {
1807 ret = pnfs_get_lseg(lseg);
1808 break;
1809 }
1810 }
1811
1812 dprintk("%s:Return lseg %p ref %d\n",
1813 __func__, ret, ret ? refcount_read(&ret->pls_refcount) : 0);
1814 return ret;
1815}
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx,
1837 struct inode *ino, int iomode)
1838{
1839 struct nfs4_threshold *t = ctx->mdsthreshold;
1840 struct nfs_inode *nfsi = NFS_I(ino);
1841 loff_t fsize = i_size_read(ino);
1842 bool size = false, size_set = false, io = false, io_set = false, ret = false;
1843
1844 if (t == NULL)
1845 return ret;
1846
1847 dprintk("%s bm=0x%x rd_sz=%llu wr_sz=%llu rd_io=%llu wr_io=%llu\n",
1848 __func__, t->bm, t->rd_sz, t->wr_sz, t->rd_io_sz, t->wr_io_sz);
1849
1850 switch (iomode) {
1851 case IOMODE_READ:
1852 if (t->bm & THRESHOLD_RD) {
1853 dprintk("%s fsize %llu\n", __func__, fsize);
1854 size_set = true;
1855 if (fsize < t->rd_sz)
1856 size = true;
1857 }
1858 if (t->bm & THRESHOLD_RD_IO) {
1859 dprintk("%s nfsi->read_io %llu\n", __func__,
1860 nfsi->read_io);
1861 io_set = true;
1862 if (nfsi->read_io < t->rd_io_sz)
1863 io = true;
1864 }
1865 break;
1866 case IOMODE_RW:
1867 if (t->bm & THRESHOLD_WR) {
1868 dprintk("%s fsize %llu\n", __func__, fsize);
1869 size_set = true;
1870 if (fsize < t->wr_sz)
1871 size = true;
1872 }
1873 if (t->bm & THRESHOLD_WR_IO) {
1874 dprintk("%s nfsi->write_io %llu\n", __func__,
1875 nfsi->write_io);
1876 io_set = true;
1877 if (nfsi->write_io < t->wr_io_sz)
1878 io = true;
1879 }
1880 break;
1881 }
1882 if (size_set && io_set) {
1883 if (size && io)
1884 ret = true;
1885 } else if (size || io)
1886 ret = true;
1887
1888 dprintk("<-- %s size %d io %d ret %d\n", __func__, size, io, ret);
1889 return ret;
1890}
1891
1892static int pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr *lo)
1893{
1894
1895
1896
1897
1898 pnfs_layoutcommit_inode(lo->plh_inode, false);
1899 return wait_on_bit_action(&lo->plh_flags, NFS_LAYOUT_RETURN,
1900 nfs_wait_bit_killable,
1901 TASK_KILLABLE);
1902}
1903
1904static void nfs_layoutget_begin(struct pnfs_layout_hdr *lo)
1905{
1906 atomic_inc(&lo->plh_outstanding);
1907}
1908
1909static void nfs_layoutget_end(struct pnfs_layout_hdr *lo)
1910{
1911 if (atomic_dec_and_test(&lo->plh_outstanding))
1912 wake_up_var(&lo->plh_outstanding);
1913}
1914
1915static bool pnfs_is_first_layoutget(struct pnfs_layout_hdr *lo)
1916{
1917 return test_bit(NFS_LAYOUT_FIRST_LAYOUTGET, &lo->plh_flags);
1918}
1919
1920static void pnfs_clear_first_layoutget(struct pnfs_layout_hdr *lo)
1921{
1922 unsigned long *bitlock = &lo->plh_flags;
1923
1924 clear_bit_unlock(NFS_LAYOUT_FIRST_LAYOUTGET, bitlock);
1925 smp_mb__after_atomic();
1926 wake_up_bit(bitlock, NFS_LAYOUT_FIRST_LAYOUTGET);
1927}
1928
1929static void _add_to_server_list(struct pnfs_layout_hdr *lo,
1930 struct nfs_server *server)
1931{
1932 if (!test_and_set_bit(NFS_LAYOUT_HASHED, &lo->plh_flags)) {
1933 struct nfs_client *clp = server->nfs_client;
1934
1935
1936
1937
1938 spin_lock(&clp->cl_lock);
1939 list_add_tail_rcu(&lo->plh_layouts, &server->layouts);
1940 spin_unlock(&clp->cl_lock);
1941 }
1942}
1943
1944
1945
1946
1947
1948struct pnfs_layout_segment *
1949pnfs_update_layout(struct inode *ino,
1950 struct nfs_open_context *ctx,
1951 loff_t pos,
1952 u64 count,
1953 enum pnfs_iomode iomode,
1954 bool strict_iomode,
1955 gfp_t gfp_flags)
1956{
1957 struct pnfs_layout_range arg = {
1958 .iomode = iomode,
1959 .offset = pos,
1960 .length = count,
1961 };
1962 unsigned pg_offset;
1963 struct nfs_server *server = NFS_SERVER(ino);
1964 struct nfs_client *clp = server->nfs_client;
1965 struct pnfs_layout_hdr *lo = NULL;
1966 struct pnfs_layout_segment *lseg = NULL;
1967 struct nfs4_layoutget *lgp;
1968 nfs4_stateid stateid;
1969 long timeout = 0;
1970 unsigned long giveup = jiffies + (clp->cl_lease_time << 1);
1971 bool first;
1972
1973 if (!pnfs_enabled_sb(NFS_SERVER(ino))) {
1974 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1975 PNFS_UPDATE_LAYOUT_NO_PNFS);
1976 goto out;
1977 }
1978
1979 if (pnfs_within_mdsthreshold(ctx, ino, iomode)) {
1980 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1981 PNFS_UPDATE_LAYOUT_MDSTHRESH);
1982 goto out;
1983 }
1984
1985lookup_again:
1986 lseg = ERR_PTR(nfs4_client_recover_expired_lease(clp));
1987 if (IS_ERR(lseg))
1988 goto out;
1989 first = false;
1990 spin_lock(&ino->i_lock);
1991 lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
1992 if (lo == NULL) {
1993 spin_unlock(&ino->i_lock);
1994 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1995 PNFS_UPDATE_LAYOUT_NOMEM);
1996 goto out;
1997 }
1998
1999
2000 if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
2001 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
2002 PNFS_UPDATE_LAYOUT_BULK_RECALL);
2003 dprintk("%s matches recall, use MDS\n", __func__);
2004 goto out_unlock;
2005 }
2006
2007
2008 if (pnfs_layout_io_test_failed(lo, iomode)) {
2009 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
2010 PNFS_UPDATE_LAYOUT_IO_TEST_FAIL);
2011 goto out_unlock;
2012 }
2013
2014
2015
2016
2017
2018 if (list_empty(&lo->plh_segs) &&
2019 atomic_read(&lo->plh_outstanding) != 0) {
2020 spin_unlock(&ino->i_lock);
2021 lseg = ERR_PTR(wait_var_event_killable(&lo->plh_outstanding,
2022 !atomic_read(&lo->plh_outstanding)));
2023 if (IS_ERR(lseg))
2024 goto out_put_layout_hdr;
2025 pnfs_put_layout_hdr(lo);
2026 goto lookup_again;
2027 }
2028
2029
2030
2031
2032
2033 if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
2034 spin_unlock(&ino->i_lock);
2035 dprintk("%s wait for layoutreturn\n", __func__);
2036 lseg = ERR_PTR(pnfs_prepare_to_retry_layoutget(lo));
2037 if (!IS_ERR(lseg)) {
2038 pnfs_put_layout_hdr(lo);
2039 dprintk("%s retrying\n", __func__);
2040 trace_pnfs_update_layout(ino, pos, count, iomode, lo,
2041 lseg,
2042 PNFS_UPDATE_LAYOUT_RETRY);
2043 goto lookup_again;
2044 }
2045 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
2046 PNFS_UPDATE_LAYOUT_RETURN);
2047 goto out_put_layout_hdr;
2048 }
2049
2050 lseg = pnfs_find_lseg(lo, &arg, strict_iomode);
2051 if (lseg) {
2052 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
2053 PNFS_UPDATE_LAYOUT_FOUND_CACHED);
2054 goto out_unlock;
2055 }
2056
2057
2058
2059
2060
2061
2062 if (test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags)) {
2063 int status;
2064
2065
2066
2067
2068
2069 if (test_and_set_bit(NFS_LAYOUT_FIRST_LAYOUTGET,
2070 &lo->plh_flags)) {
2071 spin_unlock(&ino->i_lock);
2072 lseg = ERR_PTR(wait_on_bit(&lo->plh_flags,
2073 NFS_LAYOUT_FIRST_LAYOUTGET,
2074 TASK_KILLABLE));
2075 if (IS_ERR(lseg))
2076 goto out_put_layout_hdr;
2077 pnfs_put_layout_hdr(lo);
2078 dprintk("%s retrying\n", __func__);
2079 goto lookup_again;
2080 }
2081
2082 spin_unlock(&ino->i_lock);
2083 first = true;
2084 status = nfs4_select_rw_stateid(ctx->state,
2085 iomode == IOMODE_RW ? FMODE_WRITE : FMODE_READ,
2086 NULL, &stateid, NULL);
2087 if (status != 0) {
2088 lseg = ERR_PTR(status);
2089 trace_pnfs_update_layout(ino, pos, count,
2090 iomode, lo, lseg,
2091 PNFS_UPDATE_LAYOUT_INVALID_OPEN);
2092 nfs4_schedule_stateid_recovery(server, ctx->state);
2093 pnfs_clear_first_layoutget(lo);
2094 pnfs_put_layout_hdr(lo);
2095 goto lookup_again;
2096 }
2097 spin_lock(&ino->i_lock);
2098 } else {
2099 nfs4_stateid_copy(&stateid, &lo->plh_stateid);
2100 }
2101
2102 if (pnfs_layoutgets_blocked(lo)) {
2103 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
2104 PNFS_UPDATE_LAYOUT_BLOCKED);
2105 goto out_unlock;
2106 }
2107 nfs_layoutget_begin(lo);
2108 spin_unlock(&ino->i_lock);
2109
2110 _add_to_server_list(lo, server);
2111
2112 pg_offset = arg.offset & ~PAGE_MASK;
2113 if (pg_offset) {
2114 arg.offset -= pg_offset;
2115 arg.length += pg_offset;
2116 }
2117 if (arg.length != NFS4_MAX_UINT64)
2118 arg.length = PAGE_ALIGN(arg.length);
2119
2120 lgp = pnfs_alloc_init_layoutget_args(ino, ctx, &stateid, &arg, gfp_flags);
2121 if (!lgp) {
2122 trace_pnfs_update_layout(ino, pos, count, iomode, lo, NULL,
2123 PNFS_UPDATE_LAYOUT_NOMEM);
2124 nfs_layoutget_end(lo);
2125 goto out_put_layout_hdr;
2126 }
2127
2128 lseg = nfs4_proc_layoutget(lgp, &timeout);
2129 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
2130 PNFS_UPDATE_LAYOUT_SEND_LAYOUTGET);
2131 nfs_layoutget_end(lo);
2132 if (IS_ERR(lseg)) {
2133 switch(PTR_ERR(lseg)) {
2134 case -EBUSY:
2135 if (time_after(jiffies, giveup))
2136 lseg = NULL;
2137 break;
2138 case -ERECALLCONFLICT:
2139 case -EAGAIN:
2140 break;
2141 default:
2142 if (!nfs_error_is_fatal(PTR_ERR(lseg))) {
2143 pnfs_layout_clear_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
2144 lseg = NULL;
2145 }
2146 goto out_put_layout_hdr;
2147 }
2148 if (lseg) {
2149 if (first)
2150 pnfs_clear_first_layoutget(lo);
2151 trace_pnfs_update_layout(ino, pos, count,
2152 iomode, lo, lseg, PNFS_UPDATE_LAYOUT_RETRY);
2153 pnfs_put_layout_hdr(lo);
2154 goto lookup_again;
2155 }
2156 } else {
2157 pnfs_layout_clear_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
2158 }
2159
2160out_put_layout_hdr:
2161 if (first)
2162 pnfs_clear_first_layoutget(lo);
2163 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
2164 PNFS_UPDATE_LAYOUT_EXIT);
2165 pnfs_put_layout_hdr(lo);
2166out:
2167 dprintk("%s: inode %s/%llu pNFS layout segment %s for "
2168 "(%s, offset: %llu, length: %llu)\n",
2169 __func__, ino->i_sb->s_id,
2170 (unsigned long long)NFS_FILEID(ino),
2171 IS_ERR_OR_NULL(lseg) ? "not found" : "found",
2172 iomode==IOMODE_RW ? "read/write" : "read-only",
2173 (unsigned long long)pos,
2174 (unsigned long long)count);
2175 return lseg;
2176out_unlock:
2177 spin_unlock(&ino->i_lock);
2178 goto out_put_layout_hdr;
2179}
2180EXPORT_SYMBOL_GPL(pnfs_update_layout);
2181
2182static bool
2183pnfs_sanity_check_layout_range(struct pnfs_layout_range *range)
2184{
2185 switch (range->iomode) {
2186 case IOMODE_READ:
2187 case IOMODE_RW:
2188 break;
2189 default:
2190 return false;
2191 }
2192 if (range->offset == NFS4_MAX_UINT64)
2193 return false;
2194 if (range->length == 0)
2195 return false;
2196 if (range->length != NFS4_MAX_UINT64 &&
2197 range->length > NFS4_MAX_UINT64 - range->offset)
2198 return false;
2199 return true;
2200}
2201
2202static struct pnfs_layout_hdr *
2203_pnfs_grab_empty_layout(struct inode *ino, struct nfs_open_context *ctx)
2204{
2205 struct pnfs_layout_hdr *lo;
2206
2207 spin_lock(&ino->i_lock);
2208 lo = pnfs_find_alloc_layout(ino, ctx, GFP_KERNEL);
2209 if (!lo)
2210 goto out_unlock;
2211 if (!test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags))
2212 goto out_unlock;
2213 if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
2214 goto out_unlock;
2215 if (pnfs_layoutgets_blocked(lo))
2216 goto out_unlock;
2217 if (test_and_set_bit(NFS_LAYOUT_FIRST_LAYOUTGET, &lo->plh_flags))
2218 goto out_unlock;
2219 nfs_layoutget_begin(lo);
2220 spin_unlock(&ino->i_lock);
2221 _add_to_server_list(lo, NFS_SERVER(ino));
2222 return lo;
2223
2224out_unlock:
2225 spin_unlock(&ino->i_lock);
2226 pnfs_put_layout_hdr(lo);
2227 return NULL;
2228}
2229
2230static void _lgopen_prepare_attached(struct nfs4_opendata *data,
2231 struct nfs_open_context *ctx)
2232{
2233 struct inode *ino = data->dentry->d_inode;
2234 struct pnfs_layout_range rng = {
2235 .iomode = (data->o_arg.fmode & FMODE_WRITE) ?
2236 IOMODE_RW: IOMODE_READ,
2237 .offset = 0,
2238 .length = NFS4_MAX_UINT64,
2239 };
2240 struct nfs4_layoutget *lgp;
2241 struct pnfs_layout_hdr *lo;
2242
2243
2244 if (rng.iomode == IOMODE_READ &&
2245 (i_size_read(ino) == 0 || ino->i_mapping->nrpages != 0))
2246 return;
2247
2248 lo = _pnfs_grab_empty_layout(ino, ctx);
2249 if (!lo)
2250 return;
2251 lgp = pnfs_alloc_init_layoutget_args(ino, ctx, ¤t_stateid,
2252 &rng, GFP_KERNEL);
2253 if (!lgp) {
2254 pnfs_clear_first_layoutget(lo);
2255 nfs_layoutget_end(lo);
2256 pnfs_put_layout_hdr(lo);
2257 return;
2258 }
2259 data->lgp = lgp;
2260 data->o_arg.lg_args = &lgp->args;
2261 data->o_res.lg_res = &lgp->res;
2262}
2263
2264static void _lgopen_prepare_floating(struct nfs4_opendata *data,
2265 struct nfs_open_context *ctx)
2266{
2267 struct pnfs_layout_range rng = {
2268 .iomode = (data->o_arg.fmode & FMODE_WRITE) ?
2269 IOMODE_RW: IOMODE_READ,
2270 .offset = 0,
2271 .length = NFS4_MAX_UINT64,
2272 };
2273 struct nfs4_layoutget *lgp;
2274
2275 lgp = pnfs_alloc_init_layoutget_args(NULL, ctx, ¤t_stateid,
2276 &rng, GFP_KERNEL);
2277 if (!lgp)
2278 return;
2279 data->lgp = lgp;
2280 data->o_arg.lg_args = &lgp->args;
2281 data->o_res.lg_res = &lgp->res;
2282}
2283
2284void pnfs_lgopen_prepare(struct nfs4_opendata *data,
2285 struct nfs_open_context *ctx)
2286{
2287 struct nfs_server *server = NFS_SERVER(data->dir->d_inode);
2288
2289 if (!(pnfs_enabled_sb(server) &&
2290 server->pnfs_curr_ld->flags & PNFS_LAYOUTGET_ON_OPEN))
2291 return;
2292
2293 if (!nfs_server_capable(data->dir->d_inode, NFS_CAP_LGOPEN))
2294 return;
2295 if (data->state)
2296 _lgopen_prepare_attached(data, ctx);
2297 else
2298 _lgopen_prepare_floating(data, ctx);
2299}
2300
2301void pnfs_parse_lgopen(struct inode *ino, struct nfs4_layoutget *lgp,
2302 struct nfs_open_context *ctx)
2303{
2304 struct pnfs_layout_hdr *lo;
2305 struct pnfs_layout_segment *lseg;
2306 struct nfs_server *srv = NFS_SERVER(ino);
2307 u32 iomode;
2308
2309 if (!lgp)
2310 return;
2311 dprintk("%s: entered with status %i\n", __func__, lgp->res.status);
2312 if (lgp->res.status) {
2313 switch (lgp->res.status) {
2314 default:
2315 break;
2316
2317
2318
2319
2320
2321
2322
2323 case -NFS4ERR_BAD_STATEID:
2324 case -NFS4ERR_NOTSUPP:
2325 case -NFS4ERR_REP_TOO_BIG:
2326 case -NFS4ERR_REP_TOO_BIG_TO_CACHE:
2327 case -NFS4ERR_REQ_TOO_BIG:
2328 case -NFS4ERR_TOO_MANY_OPS:
2329 case -NFS4ERR_UNKNOWN_LAYOUTTYPE:
2330 srv->caps &= ~NFS_CAP_LGOPEN;
2331 }
2332 return;
2333 }
2334 if (!lgp->args.inode) {
2335 lo = _pnfs_grab_empty_layout(ino, ctx);
2336 if (!lo)
2337 return;
2338 lgp->args.inode = ino;
2339 } else
2340 lo = NFS_I(lgp->args.inode)->layout;
2341
2342 lseg = pnfs_layout_process(lgp);
2343 if (!IS_ERR(lseg)) {
2344 iomode = lgp->args.range.iomode;
2345 pnfs_layout_clear_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
2346 pnfs_put_lseg(lseg);
2347 }
2348}
2349
2350void nfs4_lgopen_release(struct nfs4_layoutget *lgp)
2351{
2352 if (lgp != NULL) {
2353 struct inode *inode = lgp->args.inode;
2354 if (inode) {
2355 struct pnfs_layout_hdr *lo = NFS_I(inode)->layout;
2356 pnfs_clear_first_layoutget(lo);
2357 nfs_layoutget_end(lo);
2358 }
2359 pnfs_layoutget_free(lgp);
2360 }
2361}
2362
2363struct pnfs_layout_segment *
2364pnfs_layout_process(struct nfs4_layoutget *lgp)
2365{
2366 struct pnfs_layout_hdr *lo = NFS_I(lgp->args.inode)->layout;
2367 struct nfs4_layoutget_res *res = &lgp->res;
2368 struct pnfs_layout_segment *lseg;
2369 struct inode *ino = lo->plh_inode;
2370 LIST_HEAD(free_me);
2371
2372 if (!pnfs_sanity_check_layout_range(&res->range))
2373 return ERR_PTR(-EINVAL);
2374
2375
2376 lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res, lgp->gfp_flags);
2377 if (IS_ERR_OR_NULL(lseg)) {
2378 if (!lseg)
2379 lseg = ERR_PTR(-ENOMEM);
2380
2381 dprintk("%s: Could not allocate layout: error %ld\n",
2382 __func__, PTR_ERR(lseg));
2383 return lseg;
2384 }
2385
2386 pnfs_init_lseg(lo, lseg, &res->range, &res->stateid);
2387
2388 spin_lock(&ino->i_lock);
2389 if (pnfs_layoutgets_blocked(lo)) {
2390 dprintk("%s forget reply due to state\n", __func__);
2391 goto out_forget;
2392 }
2393
2394 if (nfs4_stateid_match_other(&lo->plh_stateid, &res->stateid)) {
2395
2396 if (pnfs_layout_stateid_blocked(lo, &res->stateid)) {
2397 if (!pnfs_layout_is_valid(lo) &&
2398 pnfs_is_first_layoutget(lo))
2399 lo->plh_barrier = 0;
2400 dprintk("%s forget reply due to sequence\n", __func__);
2401 goto out_forget;
2402 }
2403 pnfs_set_layout_stateid(lo, &res->stateid, lgp->cred, false);
2404 } else if (pnfs_layout_is_valid(lo)) {
2405
2406
2407
2408
2409 struct pnfs_layout_range range = {
2410 .iomode = IOMODE_ANY,
2411 .length = NFS4_MAX_UINT64,
2412 };
2413 pnfs_set_plh_return_info(lo, IOMODE_ANY, 0);
2414 pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs,
2415 &range, 0);
2416 goto out_forget;
2417 } else {
2418
2419 if (!pnfs_is_first_layoutget(lo))
2420 goto out_forget;
2421 pnfs_set_layout_stateid(lo, &res->stateid, lgp->cred, true);
2422 }
2423
2424 pnfs_get_lseg(lseg);
2425 pnfs_layout_insert_lseg(lo, lseg, &free_me);
2426
2427
2428 if (res->return_on_close)
2429 set_bit(NFS_LSEG_ROC, &lseg->pls_flags);
2430
2431 spin_unlock(&ino->i_lock);
2432 pnfs_free_lseg_list(&free_me);
2433 return lseg;
2434
2435out_forget:
2436 spin_unlock(&ino->i_lock);
2437 lseg->pls_layout = lo;
2438 NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
2439 return ERR_PTR(-EAGAIN);
2440}
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458int
2459pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
2460 struct list_head *tmp_list,
2461 const struct pnfs_layout_range *return_range,
2462 u32 seq)
2463{
2464 struct pnfs_layout_segment *lseg, *next;
2465 int remaining = 0;
2466
2467 dprintk("%s:Begin lo %p\n", __func__, lo);
2468
2469 assert_spin_locked(&lo->plh_inode->i_lock);
2470
2471 list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
2472 if (pnfs_match_lseg_recall(lseg, return_range, seq)) {
2473 dprintk("%s: marking lseg %p iomode %d "
2474 "offset %llu length %llu\n", __func__,
2475 lseg, lseg->pls_range.iomode,
2476 lseg->pls_range.offset,
2477 lseg->pls_range.length);
2478 if (mark_lseg_invalid(lseg, tmp_list))
2479 continue;
2480 remaining++;
2481 set_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags);
2482 }
2483
2484 if (remaining) {
2485 pnfs_set_plh_return_info(lo, return_range->iomode, seq);
2486 return -EBUSY;
2487 }
2488
2489 if (!list_empty(&lo->plh_return_segs)) {
2490 pnfs_set_plh_return_info(lo, return_range->iomode, seq);
2491 return 0;
2492 }
2493
2494 return -ENOENT;
2495}
2496
2497static void
2498pnfs_mark_layout_for_return(struct inode *inode,
2499 const struct pnfs_layout_range *range)
2500{
2501 struct pnfs_layout_hdr *lo;
2502 bool return_now = false;
2503
2504 spin_lock(&inode->i_lock);
2505 lo = NFS_I(inode)->layout;
2506 if (!pnfs_layout_is_valid(lo)) {
2507 spin_unlock(&inode->i_lock);
2508 return;
2509 }
2510 pnfs_set_plh_return_info(lo, range->iomode, 0);
2511
2512
2513
2514
2515
2516 if (pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs, range, 0) != -EBUSY) {
2517 const struct cred *cred;
2518 nfs4_stateid stateid;
2519 enum pnfs_iomode iomode;
2520
2521 return_now = pnfs_prepare_layoutreturn(lo, &stateid, &cred, &iomode);
2522 spin_unlock(&inode->i_lock);
2523 if (return_now)
2524 pnfs_send_layoutreturn(lo, &stateid, &cred, iomode, false);
2525 } else {
2526 spin_unlock(&inode->i_lock);
2527 nfs_commit_inode(inode, 0);
2528 }
2529}
2530
2531void pnfs_error_mark_layout_for_return(struct inode *inode,
2532 struct pnfs_layout_segment *lseg)
2533{
2534 struct pnfs_layout_range range = {
2535 .iomode = lseg->pls_range.iomode,
2536 .offset = 0,
2537 .length = NFS4_MAX_UINT64,
2538 };
2539
2540 pnfs_mark_layout_for_return(inode, &range);
2541}
2542EXPORT_SYMBOL_GPL(pnfs_error_mark_layout_for_return);
2543
2544static bool
2545pnfs_layout_can_be_returned(struct pnfs_layout_hdr *lo)
2546{
2547 return pnfs_layout_is_valid(lo) &&
2548 !test_bit(NFS_LAYOUT_INODE_FREEING, &lo->plh_flags) &&
2549 !test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags);
2550}
2551
2552static struct pnfs_layout_segment *
2553pnfs_find_first_lseg(struct pnfs_layout_hdr *lo,
2554 const struct pnfs_layout_range *range,
2555 enum pnfs_iomode iomode)
2556{
2557 struct pnfs_layout_segment *lseg;
2558
2559 list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
2560 if (!test_bit(NFS_LSEG_VALID, &lseg->pls_flags))
2561 continue;
2562 if (test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
2563 continue;
2564 if (lseg->pls_range.iomode != iomode && iomode != IOMODE_ANY)
2565 continue;
2566 if (pnfs_lseg_range_intersecting(&lseg->pls_range, range))
2567 return lseg;
2568 }
2569 return NULL;
2570}
2571
2572
2573static bool
2574pnfs_should_return_unused_layout(struct pnfs_layout_hdr *lo,
2575 const struct pnfs_layout_range *range)
2576{
2577 struct list_head *head;
2578 struct nfs_open_context *ctx;
2579 fmode_t mode = 0;
2580
2581 if (!pnfs_layout_can_be_returned(lo) ||
2582 !pnfs_find_first_lseg(lo, range, range->iomode))
2583 return false;
2584
2585 head = &NFS_I(lo->plh_inode)->open_files;
2586 list_for_each_entry_rcu(ctx, head, list) {
2587 if (ctx->state)
2588 mode |= ctx->state->state & (FMODE_READ|FMODE_WRITE);
2589 }
2590
2591 switch (range->iomode) {
2592 default:
2593 break;
2594 case IOMODE_READ:
2595 mode &= ~FMODE_WRITE;
2596 break;
2597 case IOMODE_RW:
2598 if (pnfs_find_first_lseg(lo, range, IOMODE_READ))
2599 mode &= ~FMODE_READ;
2600 }
2601 return mode == 0;
2602}
2603
2604static int
2605pnfs_layout_return_unused_byserver(struct nfs_server *server, void *data)
2606{
2607 const struct pnfs_layout_range *range = data;
2608 struct pnfs_layout_hdr *lo;
2609 struct inode *inode;
2610restart:
2611 rcu_read_lock();
2612 list_for_each_entry_rcu(lo, &server->layouts, plh_layouts) {
2613 if (!pnfs_layout_can_be_returned(lo) ||
2614 test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
2615 continue;
2616 inode = lo->plh_inode;
2617 spin_lock(&inode->i_lock);
2618 if (!pnfs_should_return_unused_layout(lo, range)) {
2619 spin_unlock(&inode->i_lock);
2620 continue;
2621 }
2622 spin_unlock(&inode->i_lock);
2623 inode = pnfs_grab_inode_layout_hdr(lo);
2624 if (!inode)
2625 continue;
2626 rcu_read_unlock();
2627 pnfs_mark_layout_for_return(inode, range);
2628 iput(inode);
2629 cond_resched();
2630 goto restart;
2631 }
2632 rcu_read_unlock();
2633 return 0;
2634}
2635
2636void
2637pnfs_layout_return_unused_byclid(struct nfs_client *clp,
2638 enum pnfs_iomode iomode)
2639{
2640 struct pnfs_layout_range range = {
2641 .iomode = iomode,
2642 .offset = 0,
2643 .length = NFS4_MAX_UINT64,
2644 };
2645
2646 nfs_client_for_each_server(clp, pnfs_layout_return_unused_byserver,
2647 &range);
2648}
2649
2650void
2651pnfs_generic_pg_check_layout(struct nfs_pageio_descriptor *pgio)
2652{
2653 if (pgio->pg_lseg == NULL ||
2654 test_bit(NFS_LSEG_VALID, &pgio->pg_lseg->pls_flags))
2655 return;
2656 pnfs_put_lseg(pgio->pg_lseg);
2657 pgio->pg_lseg = NULL;
2658}
2659EXPORT_SYMBOL_GPL(pnfs_generic_pg_check_layout);
2660
2661
2662
2663
2664
2665void
2666pnfs_generic_pg_check_range(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
2667{
2668 if (pgio->pg_lseg && !pnfs_lseg_request_intersecting(pgio->pg_lseg, req)) {
2669 pnfs_put_lseg(pgio->pg_lseg);
2670 pgio->pg_lseg = NULL;
2671 }
2672}
2673EXPORT_SYMBOL_GPL(pnfs_generic_pg_check_range);
2674
2675void
2676pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
2677{
2678 u64 rd_size = req->wb_bytes;
2679
2680 pnfs_generic_pg_check_layout(pgio);
2681 pnfs_generic_pg_check_range(pgio, req);
2682 if (pgio->pg_lseg == NULL) {
2683 if (pgio->pg_dreq == NULL)
2684 rd_size = i_size_read(pgio->pg_inode) - req_offset(req);
2685 else
2686 rd_size = nfs_dreq_bytes_left(pgio->pg_dreq);
2687
2688 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
2689 nfs_req_openctx(req),
2690 req_offset(req),
2691 rd_size,
2692 IOMODE_READ,
2693 false,
2694 GFP_KERNEL);
2695 if (IS_ERR(pgio->pg_lseg)) {
2696 pgio->pg_error = PTR_ERR(pgio->pg_lseg);
2697 pgio->pg_lseg = NULL;
2698 return;
2699 }
2700 }
2701
2702 if (pgio->pg_lseg == NULL)
2703 nfs_pageio_reset_read_mds(pgio);
2704
2705}
2706EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_read);
2707
2708void
2709pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio,
2710 struct nfs_page *req, u64 wb_size)
2711{
2712 pnfs_generic_pg_check_layout(pgio);
2713 pnfs_generic_pg_check_range(pgio, req);
2714 if (pgio->pg_lseg == NULL) {
2715 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
2716 nfs_req_openctx(req),
2717 req_offset(req),
2718 wb_size,
2719 IOMODE_RW,
2720 false,
2721 GFP_KERNEL);
2722 if (IS_ERR(pgio->pg_lseg)) {
2723 pgio->pg_error = PTR_ERR(pgio->pg_lseg);
2724 pgio->pg_lseg = NULL;
2725 return;
2726 }
2727 }
2728
2729 if (pgio->pg_lseg == NULL)
2730 nfs_pageio_reset_write_mds(pgio);
2731}
2732EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_write);
2733
2734void
2735pnfs_generic_pg_cleanup(struct nfs_pageio_descriptor *desc)
2736{
2737 if (desc->pg_lseg) {
2738 pnfs_put_lseg(desc->pg_lseg);
2739 desc->pg_lseg = NULL;
2740 }
2741}
2742EXPORT_SYMBOL_GPL(pnfs_generic_pg_cleanup);
2743
2744
2745
2746
2747
2748size_t
2749pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio,
2750 struct nfs_page *prev, struct nfs_page *req)
2751{
2752 unsigned int size;
2753 u64 seg_end, req_start, seg_left;
2754
2755 size = nfs_generic_pg_test(pgio, prev, req);
2756 if (!size)
2757 return 0;
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770 if (pgio->pg_lseg) {
2771 seg_end = pnfs_end_offset(pgio->pg_lseg->pls_range.offset,
2772 pgio->pg_lseg->pls_range.length);
2773 req_start = req_offset(req);
2774
2775
2776 if (req_start >= seg_end)
2777 return 0;
2778
2779
2780
2781 seg_left = seg_end - req_start;
2782 if (seg_left < size)
2783 size = (unsigned int)seg_left;
2784 }
2785
2786 return size;
2787}
2788EXPORT_SYMBOL_GPL(pnfs_generic_pg_test);
2789
2790int pnfs_write_done_resend_to_mds(struct nfs_pgio_header *hdr)
2791{
2792 struct nfs_pageio_descriptor pgio;
2793
2794
2795 nfs_pageio_init_write(&pgio, hdr->inode, FLUSH_STABLE, true,
2796 hdr->completion_ops);
2797 set_bit(NFS_CONTEXT_RESEND_WRITES, &hdr->args.context->flags);
2798 return nfs_pageio_resend(&pgio, hdr);
2799}
2800EXPORT_SYMBOL_GPL(pnfs_write_done_resend_to_mds);
2801
2802static void pnfs_ld_handle_write_error(struct nfs_pgio_header *hdr)
2803{
2804
2805 dprintk("pnfs write error = %d\n", hdr->pnfs_error);
2806 if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
2807 PNFS_LAYOUTRET_ON_ERROR) {
2808 pnfs_return_layout(hdr->inode);
2809 }
2810 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
2811 hdr->task.tk_status = pnfs_write_done_resend_to_mds(hdr);
2812}
2813
2814
2815
2816
2817void pnfs_ld_write_done(struct nfs_pgio_header *hdr)
2818{
2819 if (likely(!hdr->pnfs_error)) {
2820 pnfs_set_layoutcommit(hdr->inode, hdr->lseg,
2821 hdr->mds_offset + hdr->res.count);
2822 hdr->mds_ops->rpc_call_done(&hdr->task, hdr);
2823 }
2824 trace_nfs4_pnfs_write(hdr, hdr->pnfs_error);
2825 if (unlikely(hdr->pnfs_error))
2826 pnfs_ld_handle_write_error(hdr);
2827 hdr->mds_ops->rpc_release(hdr);
2828}
2829EXPORT_SYMBOL_GPL(pnfs_ld_write_done);
2830
2831static void
2832pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
2833 struct nfs_pgio_header *hdr)
2834{
2835 struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
2836
2837 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
2838 list_splice_tail_init(&hdr->pages, &mirror->pg_list);
2839 nfs_pageio_reset_write_mds(desc);
2840 mirror->pg_recoalesce = 1;
2841 }
2842 hdr->completion_ops->completion(hdr);
2843}
2844
2845static enum pnfs_try_status
2846pnfs_try_to_write_data(struct nfs_pgio_header *hdr,
2847 const struct rpc_call_ops *call_ops,
2848 struct pnfs_layout_segment *lseg,
2849 int how)
2850{
2851 struct inode *inode = hdr->inode;
2852 enum pnfs_try_status trypnfs;
2853 struct nfs_server *nfss = NFS_SERVER(inode);
2854
2855 hdr->mds_ops = call_ops;
2856
2857 dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__,
2858 inode->i_ino, hdr->args.count, hdr->args.offset, how);
2859 trypnfs = nfss->pnfs_curr_ld->write_pagelist(hdr, how);
2860 if (trypnfs != PNFS_NOT_ATTEMPTED)
2861 nfs_inc_stats(inode, NFSIOS_PNFS_WRITE);
2862 dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
2863 return trypnfs;
2864}
2865
2866static void
2867pnfs_do_write(struct nfs_pageio_descriptor *desc,
2868 struct nfs_pgio_header *hdr, int how)
2869{
2870 const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
2871 struct pnfs_layout_segment *lseg = desc->pg_lseg;
2872 enum pnfs_try_status trypnfs;
2873
2874 trypnfs = pnfs_try_to_write_data(hdr, call_ops, lseg, how);
2875 switch (trypnfs) {
2876 case PNFS_NOT_ATTEMPTED:
2877 pnfs_write_through_mds(desc, hdr);
2878 case PNFS_ATTEMPTED:
2879 break;
2880 case PNFS_TRY_AGAIN:
2881
2882 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
2883 struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
2884 list_splice_init(&hdr->pages, &mirror->pg_list);
2885 mirror->pg_recoalesce = 1;
2886 }
2887 hdr->mds_ops->rpc_release(hdr);
2888 }
2889}
2890
2891static void pnfs_writehdr_free(struct nfs_pgio_header *hdr)
2892{
2893 pnfs_put_lseg(hdr->lseg);
2894 nfs_pgio_header_free(hdr);
2895}
2896
2897int
2898pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
2899{
2900 struct nfs_pgio_header *hdr;
2901 int ret;
2902
2903 hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
2904 if (!hdr) {
2905 desc->pg_error = -ENOMEM;
2906 return desc->pg_error;
2907 }
2908 nfs_pgheader_init(desc, hdr, pnfs_writehdr_free);
2909
2910 hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
2911 ret = nfs_generic_pgio(desc, hdr);
2912 if (!ret)
2913 pnfs_do_write(desc, hdr, desc->pg_ioflags);
2914
2915 return ret;
2916}
2917EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages);
2918
2919int pnfs_read_done_resend_to_mds(struct nfs_pgio_header *hdr)
2920{
2921 struct nfs_pageio_descriptor pgio;
2922
2923
2924 nfs_pageio_init_read(&pgio, hdr->inode, true, hdr->completion_ops);
2925 return nfs_pageio_resend(&pgio, hdr);
2926}
2927EXPORT_SYMBOL_GPL(pnfs_read_done_resend_to_mds);
2928
2929static void pnfs_ld_handle_read_error(struct nfs_pgio_header *hdr)
2930{
2931 dprintk("pnfs read error = %d\n", hdr->pnfs_error);
2932 if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
2933 PNFS_LAYOUTRET_ON_ERROR) {
2934 pnfs_return_layout(hdr->inode);
2935 }
2936 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
2937 hdr->task.tk_status = pnfs_read_done_resend_to_mds(hdr);
2938}
2939
2940
2941
2942
2943void pnfs_ld_read_done(struct nfs_pgio_header *hdr)
2944{
2945 if (likely(!hdr->pnfs_error))
2946 hdr->mds_ops->rpc_call_done(&hdr->task, hdr);
2947 trace_nfs4_pnfs_read(hdr, hdr->pnfs_error);
2948 if (unlikely(hdr->pnfs_error))
2949 pnfs_ld_handle_read_error(hdr);
2950 hdr->mds_ops->rpc_release(hdr);
2951}
2952EXPORT_SYMBOL_GPL(pnfs_ld_read_done);
2953
2954static void
2955pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
2956 struct nfs_pgio_header *hdr)
2957{
2958 struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
2959
2960 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
2961 list_splice_tail_init(&hdr->pages, &mirror->pg_list);
2962 nfs_pageio_reset_read_mds(desc);
2963 mirror->pg_recoalesce = 1;
2964 }
2965 hdr->completion_ops->completion(hdr);
2966}
2967
2968
2969
2970
2971static enum pnfs_try_status
2972pnfs_try_to_read_data(struct nfs_pgio_header *hdr,
2973 const struct rpc_call_ops *call_ops,
2974 struct pnfs_layout_segment *lseg)
2975{
2976 struct inode *inode = hdr->inode;
2977 struct nfs_server *nfss = NFS_SERVER(inode);
2978 enum pnfs_try_status trypnfs;
2979
2980 hdr->mds_ops = call_ops;
2981
2982 dprintk("%s: Reading ino:%lu %u@%llu\n",
2983 __func__, inode->i_ino, hdr->args.count, hdr->args.offset);
2984
2985 trypnfs = nfss->pnfs_curr_ld->read_pagelist(hdr);
2986 if (trypnfs != PNFS_NOT_ATTEMPTED)
2987 nfs_inc_stats(inode, NFSIOS_PNFS_READ);
2988 dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
2989 return trypnfs;
2990}
2991
2992
2993void pnfs_read_resend_pnfs(struct nfs_pgio_header *hdr,
2994 unsigned int mirror_idx)
2995{
2996 struct nfs_pageio_descriptor pgio;
2997
2998 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
2999
3000 pnfs_put_lseg(hdr->lseg);
3001 hdr->lseg = NULL;
3002
3003 nfs_pageio_init_read(&pgio, hdr->inode, false,
3004 hdr->completion_ops);
3005 pgio.pg_mirror_idx = mirror_idx;
3006 hdr->task.tk_status = nfs_pageio_resend(&pgio, hdr);
3007 }
3008}
3009EXPORT_SYMBOL_GPL(pnfs_read_resend_pnfs);
3010
3011static void
3012pnfs_do_read(struct nfs_pageio_descriptor *desc, struct nfs_pgio_header *hdr)
3013{
3014 const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
3015 struct pnfs_layout_segment *lseg = desc->pg_lseg;
3016 enum pnfs_try_status trypnfs;
3017
3018 trypnfs = pnfs_try_to_read_data(hdr, call_ops, lseg);
3019 switch (trypnfs) {
3020 case PNFS_NOT_ATTEMPTED:
3021 pnfs_read_through_mds(desc, hdr);
3022 case PNFS_ATTEMPTED:
3023 break;
3024 case PNFS_TRY_AGAIN:
3025
3026 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
3027 struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
3028 list_splice_init(&hdr->pages, &mirror->pg_list);
3029 mirror->pg_recoalesce = 1;
3030 }
3031 hdr->mds_ops->rpc_release(hdr);
3032 }
3033}
3034
3035static void pnfs_readhdr_free(struct nfs_pgio_header *hdr)
3036{
3037 pnfs_put_lseg(hdr->lseg);
3038 nfs_pgio_header_free(hdr);
3039}
3040
3041int
3042pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
3043{
3044 struct nfs_pgio_header *hdr;
3045 int ret;
3046
3047 hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
3048 if (!hdr) {
3049 desc->pg_error = -ENOMEM;
3050 return desc->pg_error;
3051 }
3052 nfs_pgheader_init(desc, hdr, pnfs_readhdr_free);
3053 hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
3054 ret = nfs_generic_pgio(desc, hdr);
3055 if (!ret)
3056 pnfs_do_read(desc, hdr);
3057 return ret;
3058}
3059EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages);
3060
3061static void pnfs_clear_layoutcommitting(struct inode *inode)
3062{
3063 unsigned long *bitlock = &NFS_I(inode)->flags;
3064
3065 clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock);
3066 smp_mb__after_atomic();
3067 wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING);
3068}
3069
3070
3071
3072
3073static void pnfs_list_write_lseg(struct inode *inode, struct list_head *listp)
3074{
3075 struct pnfs_layout_segment *lseg;
3076
3077 list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) {
3078 if (lseg->pls_range.iomode == IOMODE_RW &&
3079 test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
3080 list_add(&lseg->pls_lc_list, listp);
3081 }
3082}
3083
3084static void pnfs_list_write_lseg_done(struct inode *inode, struct list_head *listp)
3085{
3086 struct pnfs_layout_segment *lseg, *tmp;
3087
3088
3089 list_for_each_entry_safe(lseg, tmp, listp, pls_lc_list) {
3090 list_del_init(&lseg->pls_lc_list);
3091 pnfs_put_lseg(lseg);
3092 }
3093
3094 pnfs_clear_layoutcommitting(inode);
3095}
3096
3097void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg)
3098{
3099 pnfs_layout_io_set_failed(lseg->pls_layout, lseg->pls_range.iomode);
3100}
3101EXPORT_SYMBOL_GPL(pnfs_set_lo_fail);
3102
3103void
3104pnfs_set_layoutcommit(struct inode *inode, struct pnfs_layout_segment *lseg,
3105 loff_t end_pos)
3106{
3107 struct nfs_inode *nfsi = NFS_I(inode);
3108 bool mark_as_dirty = false;
3109
3110 spin_lock(&inode->i_lock);
3111 if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
3112 nfsi->layout->plh_lwb = end_pos;
3113 mark_as_dirty = true;
3114 dprintk("%s: Set layoutcommit for inode %lu ",
3115 __func__, inode->i_ino);
3116 } else if (end_pos > nfsi->layout->plh_lwb)
3117 nfsi->layout->plh_lwb = end_pos;
3118 if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags)) {
3119
3120 pnfs_get_lseg(lseg);
3121 }
3122 spin_unlock(&inode->i_lock);
3123 dprintk("%s: lseg %p end_pos %llu\n",
3124 __func__, lseg, nfsi->layout->plh_lwb);
3125
3126
3127
3128 if (mark_as_dirty)
3129 mark_inode_dirty_sync(inode);
3130}
3131EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit);
3132
3133void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data)
3134{
3135 struct nfs_server *nfss = NFS_SERVER(data->args.inode);
3136
3137 if (nfss->pnfs_curr_ld->cleanup_layoutcommit)
3138 nfss->pnfs_curr_ld->cleanup_layoutcommit(data);
3139 pnfs_list_write_lseg_done(data->args.inode, &data->lseg_list);
3140}
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150int
3151pnfs_layoutcommit_inode(struct inode *inode, bool sync)
3152{
3153 struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
3154 struct nfs4_layoutcommit_data *data;
3155 struct nfs_inode *nfsi = NFS_I(inode);
3156 loff_t end_pos;
3157 int status;
3158
3159 if (!pnfs_layoutcommit_outstanding(inode))
3160 return 0;
3161
3162 dprintk("--> %s inode %lu\n", __func__, inode->i_ino);
3163
3164 status = -EAGAIN;
3165 if (test_and_set_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags)) {
3166 if (!sync)
3167 goto out;
3168 status = wait_on_bit_lock_action(&nfsi->flags,
3169 NFS_INO_LAYOUTCOMMITTING,
3170 nfs_wait_bit_killable,
3171 TASK_KILLABLE);
3172 if (status)
3173 goto out;
3174 }
3175
3176 status = -ENOMEM;
3177
3178 data = kzalloc(sizeof(*data), GFP_NOFS);
3179 if (!data)
3180 goto clear_layoutcommitting;
3181
3182 status = 0;
3183 spin_lock(&inode->i_lock);
3184 if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
3185 goto out_unlock;
3186
3187 INIT_LIST_HEAD(&data->lseg_list);
3188 pnfs_list_write_lseg(inode, &data->lseg_list);
3189
3190 end_pos = nfsi->layout->plh_lwb;
3191
3192 nfs4_stateid_copy(&data->args.stateid, &nfsi->layout->plh_stateid);
3193 data->cred = get_cred(nfsi->layout->plh_lc_cred);
3194 spin_unlock(&inode->i_lock);
3195
3196 data->args.inode = inode;
3197 nfs_fattr_init(&data->fattr);
3198 data->args.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
3199 data->res.fattr = &data->fattr;
3200 if (end_pos != 0)
3201 data->args.lastbytewritten = end_pos - 1;
3202 else
3203 data->args.lastbytewritten = U64_MAX;
3204 data->res.server = NFS_SERVER(inode);
3205
3206 if (ld->prepare_layoutcommit) {
3207 status = ld->prepare_layoutcommit(&data->args);
3208 if (status) {
3209 put_cred(data->cred);
3210 spin_lock(&inode->i_lock);
3211 set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags);
3212 if (end_pos > nfsi->layout->plh_lwb)
3213 nfsi->layout->plh_lwb = end_pos;
3214 goto out_unlock;
3215 }
3216 }
3217
3218
3219 status = nfs4_proc_layoutcommit(data, sync);
3220out:
3221 if (status)
3222 mark_inode_dirty_sync(inode);
3223 dprintk("<-- %s status %d\n", __func__, status);
3224 return status;
3225out_unlock:
3226 spin_unlock(&inode->i_lock);
3227 kfree(data);
3228clear_layoutcommitting:
3229 pnfs_clear_layoutcommitting(inode);
3230 goto out;
3231}
3232EXPORT_SYMBOL_GPL(pnfs_layoutcommit_inode);
3233
3234int
3235pnfs_generic_sync(struct inode *inode, bool datasync)
3236{
3237 return pnfs_layoutcommit_inode(inode, true);
3238}
3239EXPORT_SYMBOL_GPL(pnfs_generic_sync);
3240
3241struct nfs4_threshold *pnfs_mdsthreshold_alloc(void)
3242{
3243 struct nfs4_threshold *thp;
3244
3245 thp = kzalloc(sizeof(*thp), GFP_NOFS);
3246 if (!thp) {
3247 dprintk("%s mdsthreshold allocation failed\n", __func__);
3248 return NULL;
3249 }
3250 return thp;
3251}
3252
3253#if IS_ENABLED(CONFIG_NFS_V4_2)
3254int
3255pnfs_report_layoutstat(struct inode *inode, gfp_t gfp_flags)
3256{
3257 struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
3258 struct nfs_server *server = NFS_SERVER(inode);
3259 struct nfs_inode *nfsi = NFS_I(inode);
3260 struct nfs42_layoutstat_data *data;
3261 struct pnfs_layout_hdr *hdr;
3262 int status = 0;
3263
3264 if (!pnfs_enabled_sb(server) || !ld->prepare_layoutstats)
3265 goto out;
3266
3267 if (!nfs_server_capable(inode, NFS_CAP_LAYOUTSTATS))
3268 goto out;
3269
3270 if (test_and_set_bit(NFS_INO_LAYOUTSTATS, &nfsi->flags))
3271 goto out;
3272
3273 spin_lock(&inode->i_lock);
3274 if (!NFS_I(inode)->layout) {
3275 spin_unlock(&inode->i_lock);
3276 goto out_clear_layoutstats;
3277 }
3278 hdr = NFS_I(inode)->layout;
3279 pnfs_get_layout_hdr(hdr);
3280 spin_unlock(&inode->i_lock);
3281
3282 data = kzalloc(sizeof(*data), gfp_flags);
3283 if (!data) {
3284 status = -ENOMEM;
3285 goto out_put;
3286 }
3287
3288 data->args.fh = NFS_FH(inode);
3289 data->args.inode = inode;
3290 status = ld->prepare_layoutstats(&data->args);
3291 if (status)
3292 goto out_free;
3293
3294 status = nfs42_proc_layoutstats_generic(NFS_SERVER(inode), data);
3295
3296out:
3297 dprintk("%s returns %d\n", __func__, status);
3298 return status;
3299
3300out_free:
3301 kfree(data);
3302out_put:
3303 pnfs_put_layout_hdr(hdr);
3304out_clear_layoutstats:
3305 smp_mb__before_atomic();
3306 clear_bit(NFS_INO_LAYOUTSTATS, &nfsi->flags);
3307 smp_mb__after_atomic();
3308 goto out;
3309}
3310EXPORT_SYMBOL_GPL(pnfs_report_layoutstat);
3311#endif
3312
3313unsigned int layoutstats_timer;
3314module_param(layoutstats_timer, uint, 0644);
3315EXPORT_SYMBOL_GPL(layoutstats_timer);
3316