1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41#include <linux/kernel.h>
42#include <linux/slab.h>
43#include <linux/fs.h>
44#include <linux/nfs_fs.h>
45#include <linux/kthread.h>
46#include <linux/module.h>
47#include <linux/random.h>
48#include <linux/ratelimit.h>
49#include <linux/workqueue.h>
50#include <linux/bitops.h>
51#include <linux/jiffies.h>
52
53#include <linux/sunrpc/clnt.h>
54
55#include "nfs4_fs.h"
56#include "callback.h"
57#include "delegation.h"
58#include "internal.h"
59#include "nfs4idmap.h"
60#include "nfs4session.h"
61#include "pnfs.h"
62#include "netns.h"
63
64#define NFSDBG_FACILITY NFSDBG_STATE
65
66#define OPENOWNER_POOL_SIZE 8
67
68const nfs4_stateid zero_stateid = {
69 { .data = { 0 } },
70 .type = NFS4_SPECIAL_STATEID_TYPE,
71};
72const nfs4_stateid invalid_stateid = {
73 {
74
75 .data = { 0xff, 0xff, 0xff, 0xff, 0 },
76 },
77 .type = NFS4_INVALID_STATEID_TYPE,
78};
79
80const nfs4_stateid current_stateid = {
81 {
82
83 .data = { 0x0, 0x0, 0x0, 0x1, 0 },
84 },
85 .type = NFS4_SPECIAL_STATEID_TYPE,
86};
87
88static DEFINE_MUTEX(nfs_clid_init_mutex);
89
90static int nfs4_setup_state_renewal(struct nfs_client *clp)
91{
92 int status;
93 struct nfs_fsinfo fsinfo;
94 unsigned long now;
95
96 if (!test_bit(NFS_CS_CHECK_LEASE_TIME, &clp->cl_res_state)) {
97 nfs4_schedule_state_renewal(clp);
98 return 0;
99 }
100
101 now = jiffies;
102 status = nfs4_proc_get_lease_time(clp, &fsinfo);
103 if (status == 0) {
104 nfs4_set_lease_period(clp, fsinfo.lease_time * HZ, now);
105 nfs4_schedule_state_renewal(clp);
106 }
107
108 return status;
109}
110
111int nfs4_init_clientid(struct nfs_client *clp, const struct cred *cred)
112{
113 struct nfs4_setclientid_res clid = {
114 .clientid = clp->cl_clientid,
115 .confirm = clp->cl_confirm,
116 };
117 unsigned short port;
118 int status;
119 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
120
121 if (test_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state))
122 goto do_confirm;
123 port = nn->nfs_callback_tcpport;
124 if (clp->cl_addr.ss_family == AF_INET6)
125 port = nn->nfs_callback_tcpport6;
126
127 status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, port, cred, &clid);
128 if (status != 0)
129 goto out;
130 clp->cl_clientid = clid.clientid;
131 clp->cl_confirm = clid.confirm;
132 set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
133do_confirm:
134 status = nfs4_proc_setclientid_confirm(clp, &clid, cred);
135 if (status != 0)
136 goto out;
137 clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
138 nfs4_setup_state_renewal(clp);
139out:
140 return status;
141}
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156int nfs40_discover_server_trunking(struct nfs_client *clp,
157 struct nfs_client **result,
158 const struct cred *cred)
159{
160 struct nfs4_setclientid_res clid = {
161 .clientid = clp->cl_clientid,
162 .confirm = clp->cl_confirm,
163 };
164 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
165 unsigned short port;
166 int status;
167
168 port = nn->nfs_callback_tcpport;
169 if (clp->cl_addr.ss_family == AF_INET6)
170 port = nn->nfs_callback_tcpport6;
171
172 status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, port, cred, &clid);
173 if (status != 0)
174 goto out;
175 clp->cl_clientid = clid.clientid;
176 clp->cl_confirm = clid.confirm;
177
178 status = nfs40_walk_client_list(clp, result, cred);
179 if (status == 0) {
180
181
182 nfs4_schedule_state_renewal(*result);
183
184
185 if (clp->cl_state)
186 nfs4_schedule_state_manager(clp);
187 }
188out:
189 return status;
190}
191
192const struct cred *nfs4_get_machine_cred(struct nfs_client *clp)
193{
194 return get_cred(rpc_machine_cred());
195}
196
197static void nfs4_root_machine_cred(struct nfs_client *clp)
198{
199
200
201 clp->cl_principal = NULL;
202 clp->cl_rpcclient->cl_principal = NULL;
203}
204
205static const struct cred *
206nfs4_get_renew_cred_server_locked(struct nfs_server *server)
207{
208 const struct cred *cred = NULL;
209 struct nfs4_state_owner *sp;
210 struct rb_node *pos;
211
212 for (pos = rb_first(&server->state_owners);
213 pos != NULL;
214 pos = rb_next(pos)) {
215 sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
216 if (list_empty(&sp->so_states))
217 continue;
218 cred = get_cred(sp->so_cred);
219 break;
220 }
221 return cred;
222}
223
224
225
226
227
228
229
230
231const struct cred *nfs4_get_renew_cred(struct nfs_client *clp)
232{
233 const struct cred *cred = NULL;
234 struct nfs_server *server;
235
236
237 cred = nfs4_get_machine_cred(clp);
238 if (cred != NULL)
239 goto out;
240
241 spin_lock(&clp->cl_lock);
242 rcu_read_lock();
243 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
244 cred = nfs4_get_renew_cred_server_locked(server);
245 if (cred != NULL)
246 break;
247 }
248 rcu_read_unlock();
249 spin_unlock(&clp->cl_lock);
250
251out:
252 return cred;
253}
254
255static void nfs4_end_drain_slot_table(struct nfs4_slot_table *tbl)
256{
257 if (test_and_clear_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state)) {
258 spin_lock(&tbl->slot_tbl_lock);
259 nfs41_wake_slot_table(tbl);
260 spin_unlock(&tbl->slot_tbl_lock);
261 }
262}
263
264static void nfs4_end_drain_session(struct nfs_client *clp)
265{
266 struct nfs4_session *ses = clp->cl_session;
267
268 if (clp->cl_slot_tbl) {
269 nfs4_end_drain_slot_table(clp->cl_slot_tbl);
270 return;
271 }
272
273 if (ses != NULL) {
274 nfs4_end_drain_slot_table(&ses->bc_slot_table);
275 nfs4_end_drain_slot_table(&ses->fc_slot_table);
276 }
277}
278
279static int nfs4_drain_slot_tbl(struct nfs4_slot_table *tbl)
280{
281 set_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state);
282 spin_lock(&tbl->slot_tbl_lock);
283 if (tbl->highest_used_slotid != NFS4_NO_SLOT) {
284 reinit_completion(&tbl->complete);
285 spin_unlock(&tbl->slot_tbl_lock);
286 return wait_for_completion_interruptible(&tbl->complete);
287 }
288 spin_unlock(&tbl->slot_tbl_lock);
289 return 0;
290}
291
292static int nfs4_begin_drain_session(struct nfs_client *clp)
293{
294 struct nfs4_session *ses = clp->cl_session;
295 int ret;
296
297 if (clp->cl_slot_tbl)
298 return nfs4_drain_slot_tbl(clp->cl_slot_tbl);
299
300
301 ret = nfs4_drain_slot_tbl(&ses->bc_slot_table);
302 if (ret)
303 return ret;
304
305 return nfs4_drain_slot_tbl(&ses->fc_slot_table);
306}
307
308#if defined(CONFIG_NFS_V4_1)
309
310static void nfs41_finish_session_reset(struct nfs_client *clp)
311{
312 clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
313 clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
314
315 clear_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
316 nfs4_setup_state_renewal(clp);
317}
318
319int nfs41_init_clientid(struct nfs_client *clp, const struct cred *cred)
320{
321 int status;
322
323 if (test_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state))
324 goto do_confirm;
325 status = nfs4_proc_exchange_id(clp, cred);
326 if (status != 0)
327 goto out;
328 set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
329do_confirm:
330 status = nfs4_proc_create_session(clp, cred);
331 if (status != 0)
332 goto out;
333 nfs41_finish_session_reset(clp);
334 nfs_mark_client_ready(clp, NFS_CS_READY);
335out:
336 return status;
337}
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352int nfs41_discover_server_trunking(struct nfs_client *clp,
353 struct nfs_client **result,
354 const struct cred *cred)
355{
356 int status;
357
358 status = nfs4_proc_exchange_id(clp, cred);
359 if (status != NFS4_OK)
360 return status;
361
362 status = nfs41_walk_client_list(clp, result, cred);
363 if (status < 0)
364 return status;
365 if (clp != *result)
366 return 0;
367
368
369
370
371
372
373 if (clp->cl_exchange_flags & EXCHGID4_FLAG_CONFIRMED_R) {
374 if (!test_bit(NFS_CS_TSM_POSSIBLE, &clp->cl_flags))
375 set_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state);
376 else
377 set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
378 }
379 nfs4_schedule_state_manager(clp);
380 status = nfs_wait_client_init_complete(clp);
381 if (status < 0)
382 nfs_put_client(clp);
383 return status;
384}
385
386#endif
387
388
389
390
391
392
393
394const struct cred *nfs4_get_clid_cred(struct nfs_client *clp)
395{
396 const struct cred *cred;
397
398 cred = nfs4_get_machine_cred(clp);
399 return cred;
400}
401
402static struct nfs4_state_owner *
403nfs4_find_state_owner_locked(struct nfs_server *server, const struct cred *cred)
404{
405 struct rb_node **p = &server->state_owners.rb_node,
406 *parent = NULL;
407 struct nfs4_state_owner *sp;
408 int cmp;
409
410 while (*p != NULL) {
411 parent = *p;
412 sp = rb_entry(parent, struct nfs4_state_owner, so_server_node);
413 cmp = cred_fscmp(cred, sp->so_cred);
414
415 if (cmp < 0)
416 p = &parent->rb_left;
417 else if (cmp > 0)
418 p = &parent->rb_right;
419 else {
420 if (!list_empty(&sp->so_lru))
421 list_del_init(&sp->so_lru);
422 atomic_inc(&sp->so_count);
423 return sp;
424 }
425 }
426 return NULL;
427}
428
429static struct nfs4_state_owner *
430nfs4_insert_state_owner_locked(struct nfs4_state_owner *new)
431{
432 struct nfs_server *server = new->so_server;
433 struct rb_node **p = &server->state_owners.rb_node,
434 *parent = NULL;
435 struct nfs4_state_owner *sp;
436 int cmp;
437
438 while (*p != NULL) {
439 parent = *p;
440 sp = rb_entry(parent, struct nfs4_state_owner, so_server_node);
441 cmp = cred_fscmp(new->so_cred, sp->so_cred);
442
443 if (cmp < 0)
444 p = &parent->rb_left;
445 else if (cmp > 0)
446 p = &parent->rb_right;
447 else {
448 if (!list_empty(&sp->so_lru))
449 list_del_init(&sp->so_lru);
450 atomic_inc(&sp->so_count);
451 return sp;
452 }
453 }
454 rb_link_node(&new->so_server_node, parent, p);
455 rb_insert_color(&new->so_server_node, &server->state_owners);
456 return new;
457}
458
459static void
460nfs4_remove_state_owner_locked(struct nfs4_state_owner *sp)
461{
462 struct nfs_server *server = sp->so_server;
463
464 if (!RB_EMPTY_NODE(&sp->so_server_node))
465 rb_erase(&sp->so_server_node, &server->state_owners);
466}
467
468static void
469nfs4_init_seqid_counter(struct nfs_seqid_counter *sc)
470{
471 sc->create_time = ktime_get();
472 sc->flags = 0;
473 sc->counter = 0;
474 spin_lock_init(&sc->lock);
475 INIT_LIST_HEAD(&sc->list);
476 rpc_init_wait_queue(&sc->wait, "Seqid_waitqueue");
477}
478
479static void
480nfs4_destroy_seqid_counter(struct nfs_seqid_counter *sc)
481{
482 rpc_destroy_wait_queue(&sc->wait);
483}
484
485
486
487
488
489
490static struct nfs4_state_owner *
491nfs4_alloc_state_owner(struct nfs_server *server,
492 const struct cred *cred,
493 gfp_t gfp_flags)
494{
495 struct nfs4_state_owner *sp;
496
497 sp = kzalloc(sizeof(*sp), gfp_flags);
498 if (!sp)
499 return NULL;
500 sp->so_seqid.owner_id = ida_simple_get(&server->openowner_id, 0, 0,
501 gfp_flags);
502 if (sp->so_seqid.owner_id < 0) {
503 kfree(sp);
504 return NULL;
505 }
506 sp->so_server = server;
507 sp->so_cred = get_cred(cred);
508 spin_lock_init(&sp->so_lock);
509 INIT_LIST_HEAD(&sp->so_states);
510 nfs4_init_seqid_counter(&sp->so_seqid);
511 atomic_set(&sp->so_count, 1);
512 INIT_LIST_HEAD(&sp->so_lru);
513 seqcount_init(&sp->so_reclaim_seqcount);
514 mutex_init(&sp->so_delegreturn_mutex);
515 return sp;
516}
517
518static void
519nfs4_reset_state_owner(struct nfs4_state_owner *sp)
520{
521
522
523
524
525
526
527
528
529
530 sp->so_seqid.create_time = ktime_get();
531}
532
533static void nfs4_free_state_owner(struct nfs4_state_owner *sp)
534{
535 nfs4_destroy_seqid_counter(&sp->so_seqid);
536 put_cred(sp->so_cred);
537 ida_simple_remove(&sp->so_server->openowner_id, sp->so_seqid.owner_id);
538 kfree(sp);
539}
540
541static void nfs4_gc_state_owners(struct nfs_server *server)
542{
543 struct nfs_client *clp = server->nfs_client;
544 struct nfs4_state_owner *sp, *tmp;
545 unsigned long time_min, time_max;
546 LIST_HEAD(doomed);
547
548 spin_lock(&clp->cl_lock);
549 time_max = jiffies;
550 time_min = (long)time_max - (long)clp->cl_lease_time;
551 list_for_each_entry_safe(sp, tmp, &server->state_owners_lru, so_lru) {
552
553 if (time_in_range(sp->so_expires, time_min, time_max))
554 break;
555 list_move(&sp->so_lru, &doomed);
556 nfs4_remove_state_owner_locked(sp);
557 }
558 spin_unlock(&clp->cl_lock);
559
560 list_for_each_entry_safe(sp, tmp, &doomed, so_lru) {
561 list_del(&sp->so_lru);
562 nfs4_free_state_owner(sp);
563 }
564}
565
566
567
568
569
570
571
572
573
574struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server,
575 const struct cred *cred,
576 gfp_t gfp_flags)
577{
578 struct nfs_client *clp = server->nfs_client;
579 struct nfs4_state_owner *sp, *new;
580
581 spin_lock(&clp->cl_lock);
582 sp = nfs4_find_state_owner_locked(server, cred);
583 spin_unlock(&clp->cl_lock);
584 if (sp != NULL)
585 goto out;
586 new = nfs4_alloc_state_owner(server, cred, gfp_flags);
587 if (new == NULL)
588 goto out;
589 spin_lock(&clp->cl_lock);
590 sp = nfs4_insert_state_owner_locked(new);
591 spin_unlock(&clp->cl_lock);
592 if (sp != new)
593 nfs4_free_state_owner(new);
594out:
595 nfs4_gc_state_owners(server);
596 return sp;
597}
598
599
600
601
602
603
604
605
606
607
608
609
610
611void nfs4_put_state_owner(struct nfs4_state_owner *sp)
612{
613 struct nfs_server *server = sp->so_server;
614 struct nfs_client *clp = server->nfs_client;
615
616 if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock))
617 return;
618
619 sp->so_expires = jiffies;
620 list_add_tail(&sp->so_lru, &server->state_owners_lru);
621 spin_unlock(&clp->cl_lock);
622}
623
624
625
626
627
628
629
630
631
632
633
634
635void nfs4_purge_state_owners(struct nfs_server *server, struct list_head *head)
636{
637 struct nfs_client *clp = server->nfs_client;
638 struct nfs4_state_owner *sp, *tmp;
639
640 spin_lock(&clp->cl_lock);
641 list_for_each_entry_safe(sp, tmp, &server->state_owners_lru, so_lru) {
642 list_move(&sp->so_lru, head);
643 nfs4_remove_state_owner_locked(sp);
644 }
645 spin_unlock(&clp->cl_lock);
646}
647
648
649
650
651
652
653
654
655void nfs4_free_state_owners(struct list_head *head)
656{
657 struct nfs4_state_owner *sp, *tmp;
658
659 list_for_each_entry_safe(sp, tmp, head, so_lru) {
660 list_del(&sp->so_lru);
661 nfs4_free_state_owner(sp);
662 }
663}
664
665static struct nfs4_state *
666nfs4_alloc_open_state(void)
667{
668 struct nfs4_state *state;
669
670 state = kzalloc(sizeof(*state), GFP_NOFS);
671 if (!state)
672 return NULL;
673 refcount_set(&state->count, 1);
674 INIT_LIST_HEAD(&state->lock_states);
675 spin_lock_init(&state->state_lock);
676 seqlock_init(&state->seqlock);
677 init_waitqueue_head(&state->waitq);
678 return state;
679}
680
681void
682nfs4_state_set_mode_locked(struct nfs4_state *state, fmode_t fmode)
683{
684 if (state->state == fmode)
685 return;
686
687 if ((fmode & FMODE_WRITE) != (state->state & FMODE_WRITE)) {
688 if (fmode & FMODE_WRITE)
689 list_move(&state->open_states, &state->owner->so_states);
690 else
691 list_move_tail(&state->open_states, &state->owner->so_states);
692 }
693 state->state = fmode;
694}
695
696static struct nfs4_state *
697__nfs4_find_state_byowner(struct inode *inode, struct nfs4_state_owner *owner)
698{
699 struct nfs_inode *nfsi = NFS_I(inode);
700 struct nfs4_state *state;
701
702 list_for_each_entry_rcu(state, &nfsi->open_states, inode_states) {
703 if (state->owner != owner)
704 continue;
705 if (!nfs4_valid_open_stateid(state))
706 continue;
707 if (refcount_inc_not_zero(&state->count))
708 return state;
709 }
710 return NULL;
711}
712
713static void
714nfs4_free_open_state(struct nfs4_state *state)
715{
716 kfree_rcu(state, rcu_head);
717}
718
719struct nfs4_state *
720nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner)
721{
722 struct nfs4_state *state, *new;
723 struct nfs_inode *nfsi = NFS_I(inode);
724
725 rcu_read_lock();
726 state = __nfs4_find_state_byowner(inode, owner);
727 rcu_read_unlock();
728 if (state)
729 goto out;
730 new = nfs4_alloc_open_state();
731 spin_lock(&owner->so_lock);
732 spin_lock(&inode->i_lock);
733 state = __nfs4_find_state_byowner(inode, owner);
734 if (state == NULL && new != NULL) {
735 state = new;
736 state->owner = owner;
737 atomic_inc(&owner->so_count);
738 list_add_rcu(&state->inode_states, &nfsi->open_states);
739 ihold(inode);
740 state->inode = inode;
741 spin_unlock(&inode->i_lock);
742
743
744 list_add_tail(&state->open_states, &owner->so_states);
745 spin_unlock(&owner->so_lock);
746 } else {
747 spin_unlock(&inode->i_lock);
748 spin_unlock(&owner->so_lock);
749 if (new)
750 nfs4_free_open_state(new);
751 }
752out:
753 return state;
754}
755
756void nfs4_put_open_state(struct nfs4_state *state)
757{
758 struct inode *inode = state->inode;
759 struct nfs4_state_owner *owner = state->owner;
760
761 if (!refcount_dec_and_lock(&state->count, &owner->so_lock))
762 return;
763 spin_lock(&inode->i_lock);
764 list_del_rcu(&state->inode_states);
765 list_del(&state->open_states);
766 spin_unlock(&inode->i_lock);
767 spin_unlock(&owner->so_lock);
768 iput(inode);
769 nfs4_free_open_state(state);
770 nfs4_put_state_owner(owner);
771}
772
773
774
775
776static void __nfs4_close(struct nfs4_state *state,
777 fmode_t fmode, gfp_t gfp_mask, int wait)
778{
779 struct nfs4_state_owner *owner = state->owner;
780 int call_close = 0;
781 fmode_t newstate;
782
783 atomic_inc(&owner->so_count);
784
785 spin_lock(&owner->so_lock);
786 switch (fmode & (FMODE_READ | FMODE_WRITE)) {
787 case FMODE_READ:
788 state->n_rdonly--;
789 break;
790 case FMODE_WRITE:
791 state->n_wronly--;
792 break;
793 case FMODE_READ|FMODE_WRITE:
794 state->n_rdwr--;
795 }
796 newstate = FMODE_READ|FMODE_WRITE;
797 if (state->n_rdwr == 0) {
798 if (state->n_rdonly == 0) {
799 newstate &= ~FMODE_READ;
800 call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags);
801 call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
802 }
803 if (state->n_wronly == 0) {
804 newstate &= ~FMODE_WRITE;
805 call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags);
806 call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
807 }
808 if (newstate == 0)
809 clear_bit(NFS_DELEGATED_STATE, &state->flags);
810 }
811 nfs4_state_set_mode_locked(state, newstate);
812 spin_unlock(&owner->so_lock);
813
814 if (!call_close) {
815 nfs4_put_open_state(state);
816 nfs4_put_state_owner(owner);
817 } else
818 nfs4_do_close(state, gfp_mask, wait);
819}
820
821void nfs4_close_state(struct nfs4_state *state, fmode_t fmode)
822{
823 __nfs4_close(state, fmode, GFP_NOFS, 0);
824}
825
826void nfs4_close_sync(struct nfs4_state *state, fmode_t fmode)
827{
828 __nfs4_close(state, fmode, GFP_KERNEL, 1);
829}
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844static struct nfs4_lock_state *
845__nfs4_find_lock_state(struct nfs4_state *state,
846 fl_owner_t fl_owner, fl_owner_t fl_owner2)
847{
848 struct nfs4_lock_state *pos, *ret = NULL;
849 list_for_each_entry(pos, &state->lock_states, ls_locks) {
850 if (pos->ls_owner == fl_owner) {
851 ret = pos;
852 break;
853 }
854 if (pos->ls_owner == fl_owner2)
855 ret = pos;
856 }
857 if (ret)
858 refcount_inc(&ret->ls_count);
859 return ret;
860}
861
862
863
864
865
866
867static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
868{
869 struct nfs4_lock_state *lsp;
870 struct nfs_server *server = state->owner->so_server;
871
872 lsp = kzalloc(sizeof(*lsp), GFP_NOFS);
873 if (lsp == NULL)
874 return NULL;
875 nfs4_init_seqid_counter(&lsp->ls_seqid);
876 refcount_set(&lsp->ls_count, 1);
877 lsp->ls_state = state;
878 lsp->ls_owner = fl_owner;
879 lsp->ls_seqid.owner_id = ida_simple_get(&server->lockowner_id, 0, 0, GFP_NOFS);
880 if (lsp->ls_seqid.owner_id < 0)
881 goto out_free;
882 INIT_LIST_HEAD(&lsp->ls_locks);
883 return lsp;
884out_free:
885 kfree(lsp);
886 return NULL;
887}
888
889void nfs4_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp)
890{
891 ida_simple_remove(&server->lockowner_id, lsp->ls_seqid.owner_id);
892 nfs4_destroy_seqid_counter(&lsp->ls_seqid);
893 kfree(lsp);
894}
895
896
897
898
899
900
901static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner)
902{
903 struct nfs4_lock_state *lsp, *new = NULL;
904
905 for(;;) {
906 spin_lock(&state->state_lock);
907 lsp = __nfs4_find_lock_state(state, owner, NULL);
908 if (lsp != NULL)
909 break;
910 if (new != NULL) {
911 list_add(&new->ls_locks, &state->lock_states);
912 set_bit(LK_STATE_IN_USE, &state->flags);
913 lsp = new;
914 new = NULL;
915 break;
916 }
917 spin_unlock(&state->state_lock);
918 new = nfs4_alloc_lock_state(state, owner);
919 if (new == NULL)
920 return NULL;
921 }
922 spin_unlock(&state->state_lock);
923 if (new != NULL)
924 nfs4_free_lock_state(state->owner->so_server, new);
925 return lsp;
926}
927
928
929
930
931
932void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
933{
934 struct nfs_server *server;
935 struct nfs4_state *state;
936
937 if (lsp == NULL)
938 return;
939 state = lsp->ls_state;
940 if (!refcount_dec_and_lock(&lsp->ls_count, &state->state_lock))
941 return;
942 list_del(&lsp->ls_locks);
943 if (list_empty(&state->lock_states))
944 clear_bit(LK_STATE_IN_USE, &state->flags);
945 spin_unlock(&state->state_lock);
946 server = state->owner->so_server;
947 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) {
948 struct nfs_client *clp = server->nfs_client;
949
950 clp->cl_mvops->free_lock_state(server, lsp);
951 } else
952 nfs4_free_lock_state(server, lsp);
953}
954
955static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
956{
957 struct nfs4_lock_state *lsp = src->fl_u.nfs4_fl.owner;
958
959 dst->fl_u.nfs4_fl.owner = lsp;
960 refcount_inc(&lsp->ls_count);
961}
962
963static void nfs4_fl_release_lock(struct file_lock *fl)
964{
965 nfs4_put_lock_state(fl->fl_u.nfs4_fl.owner);
966}
967
968static const struct file_lock_operations nfs4_fl_lock_ops = {
969 .fl_copy_lock = nfs4_fl_copy_lock,
970 .fl_release_private = nfs4_fl_release_lock,
971};
972
973int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl)
974{
975 struct nfs4_lock_state *lsp;
976
977 if (fl->fl_ops != NULL)
978 return 0;
979 lsp = nfs4_get_lock_state(state, fl->fl_owner);
980 if (lsp == NULL)
981 return -ENOMEM;
982 fl->fl_u.nfs4_fl.owner = lsp;
983 fl->fl_ops = &nfs4_fl_lock_ops;
984 return 0;
985}
986
987static int nfs4_copy_lock_stateid(nfs4_stateid *dst,
988 struct nfs4_state *state,
989 const struct nfs_lock_context *l_ctx)
990{
991 struct nfs4_lock_state *lsp;
992 fl_owner_t fl_owner, fl_flock_owner;
993 int ret = -ENOENT;
994
995 if (l_ctx == NULL)
996 goto out;
997
998 if (test_bit(LK_STATE_IN_USE, &state->flags) == 0)
999 goto out;
1000
1001 fl_owner = l_ctx->lockowner;
1002 fl_flock_owner = l_ctx->open_context->flock_owner;
1003
1004 spin_lock(&state->state_lock);
1005 lsp = __nfs4_find_lock_state(state, fl_owner, fl_flock_owner);
1006 if (lsp && test_bit(NFS_LOCK_LOST, &lsp->ls_flags))
1007 ret = -EIO;
1008 else if (lsp != NULL && test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0) {
1009 nfs4_stateid_copy(dst, &lsp->ls_stateid);
1010 ret = 0;
1011 }
1012 spin_unlock(&state->state_lock);
1013 nfs4_put_lock_state(lsp);
1014out:
1015 return ret;
1016}
1017
1018bool nfs4_refresh_open_stateid(nfs4_stateid *dst, struct nfs4_state *state)
1019{
1020 bool ret;
1021 int seq;
1022
1023 do {
1024 ret = false;
1025 seq = read_seqbegin(&state->seqlock);
1026 if (nfs4_state_match_open_stateid_other(state, dst)) {
1027 dst->seqid = state->open_stateid.seqid;
1028 ret = true;
1029 }
1030 } while (read_seqretry(&state->seqlock, seq));
1031 return ret;
1032}
1033
1034bool nfs4_copy_open_stateid(nfs4_stateid *dst, struct nfs4_state *state)
1035{
1036 bool ret;
1037 const nfs4_stateid *src;
1038 int seq;
1039
1040 do {
1041 ret = false;
1042 src = &zero_stateid;
1043 seq = read_seqbegin(&state->seqlock);
1044 if (test_bit(NFS_OPEN_STATE, &state->flags)) {
1045 src = &state->open_stateid;
1046 ret = true;
1047 }
1048 nfs4_stateid_copy(dst, src);
1049 } while (read_seqretry(&state->seqlock, seq));
1050 return ret;
1051}
1052
1053
1054
1055
1056
1057int nfs4_select_rw_stateid(struct nfs4_state *state,
1058 fmode_t fmode, const struct nfs_lock_context *l_ctx,
1059 nfs4_stateid *dst, const struct cred **cred)
1060{
1061 int ret;
1062
1063 if (!nfs4_valid_open_stateid(state))
1064 return -EIO;
1065 if (cred != NULL)
1066 *cred = NULL;
1067 ret = nfs4_copy_lock_stateid(dst, state, l_ctx);
1068 if (ret == -EIO)
1069
1070 goto out;
1071
1072 if (nfs4_copy_delegation_stateid(state->inode, fmode, dst, cred)) {
1073 ret = 0;
1074 goto out;
1075 }
1076 if (ret != -ENOENT)
1077
1078
1079
1080
1081 goto out;
1082 ret = nfs4_copy_open_stateid(dst, state) ? 0 : -EAGAIN;
1083out:
1084 if (nfs_server_capable(state->inode, NFS_CAP_STATEID_NFSV41))
1085 dst->seqid = 0;
1086 return ret;
1087}
1088
1089struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_mask)
1090{
1091 struct nfs_seqid *new;
1092
1093 new = kmalloc(sizeof(*new), gfp_mask);
1094 if (new == NULL)
1095 return ERR_PTR(-ENOMEM);
1096 new->sequence = counter;
1097 INIT_LIST_HEAD(&new->list);
1098 new->task = NULL;
1099 return new;
1100}
1101
1102void nfs_release_seqid(struct nfs_seqid *seqid)
1103{
1104 struct nfs_seqid_counter *sequence;
1105
1106 if (seqid == NULL || list_empty(&seqid->list))
1107 return;
1108 sequence = seqid->sequence;
1109 spin_lock(&sequence->lock);
1110 list_del_init(&seqid->list);
1111 if (!list_empty(&sequence->list)) {
1112 struct nfs_seqid *next;
1113
1114 next = list_first_entry(&sequence->list,
1115 struct nfs_seqid, list);
1116 rpc_wake_up_queued_task(&sequence->wait, next->task);
1117 }
1118 spin_unlock(&sequence->lock);
1119}
1120
1121void nfs_free_seqid(struct nfs_seqid *seqid)
1122{
1123 nfs_release_seqid(seqid);
1124 kfree(seqid);
1125}
1126
1127
1128
1129
1130
1131
1132static void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
1133{
1134 switch (status) {
1135 case 0:
1136 break;
1137 case -NFS4ERR_BAD_SEQID:
1138 if (seqid->sequence->flags & NFS_SEQID_CONFIRMED)
1139 return;
1140 pr_warn_ratelimited("NFS: v4 server returned a bad"
1141 " sequence-id error on an"
1142 " unconfirmed sequence %p!\n",
1143 seqid->sequence);
1144 case -NFS4ERR_STALE_CLIENTID:
1145 case -NFS4ERR_STALE_STATEID:
1146 case -NFS4ERR_BAD_STATEID:
1147 case -NFS4ERR_BADXDR:
1148 case -NFS4ERR_RESOURCE:
1149 case -NFS4ERR_NOFILEHANDLE:
1150 case -NFS4ERR_MOVED:
1151
1152 return;
1153 };
1154
1155
1156
1157
1158 seqid->sequence->counter++;
1159}
1160
1161void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid)
1162{
1163 struct nfs4_state_owner *sp;
1164
1165 if (seqid == NULL)
1166 return;
1167
1168 sp = container_of(seqid->sequence, struct nfs4_state_owner, so_seqid);
1169 if (status == -NFS4ERR_BAD_SEQID)
1170 nfs4_reset_state_owner(sp);
1171 if (!nfs4_has_session(sp->so_server->nfs_client))
1172 nfs_increment_seqid(status, seqid);
1173}
1174
1175
1176
1177
1178
1179
1180void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid)
1181{
1182 if (seqid != NULL)
1183 nfs_increment_seqid(status, seqid);
1184}
1185
1186int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task)
1187{
1188 struct nfs_seqid_counter *sequence;
1189 int status = 0;
1190
1191 if (seqid == NULL)
1192 goto out;
1193 sequence = seqid->sequence;
1194 spin_lock(&sequence->lock);
1195 seqid->task = task;
1196 if (list_empty(&seqid->list))
1197 list_add_tail(&seqid->list, &sequence->list);
1198 if (list_first_entry(&sequence->list, struct nfs_seqid, list) == seqid)
1199 goto unlock;
1200 rpc_sleep_on(&sequence->wait, task, NULL);
1201 status = -EAGAIN;
1202unlock:
1203 spin_unlock(&sequence->lock);
1204out:
1205 return status;
1206}
1207
1208static int nfs4_run_state_manager(void *);
1209
1210static void nfs4_clear_state_manager_bit(struct nfs_client *clp)
1211{
1212 smp_mb__before_atomic();
1213 clear_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state);
1214 smp_mb__after_atomic();
1215 wake_up_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING);
1216 rpc_wake_up(&clp->cl_rpcwaitq);
1217}
1218
1219
1220
1221
1222void nfs4_schedule_state_manager(struct nfs_client *clp)
1223{
1224 struct task_struct *task;
1225 char buf[INET6_ADDRSTRLEN + sizeof("-manager") + 1];
1226
1227 set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state);
1228 if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
1229 return;
1230 __module_get(THIS_MODULE);
1231 refcount_inc(&clp->cl_count);
1232
1233
1234
1235
1236 rcu_read_lock();
1237 snprintf(buf, sizeof(buf), "%s-manager",
1238 rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR));
1239 rcu_read_unlock();
1240 task = kthread_run(nfs4_run_state_manager, clp, "%s", buf);
1241 if (IS_ERR(task)) {
1242 printk(KERN_ERR "%s: kthread_run: %ld\n",
1243 __func__, PTR_ERR(task));
1244 nfs4_clear_state_manager_bit(clp);
1245 nfs_put_client(clp);
1246 module_put(THIS_MODULE);
1247 }
1248}
1249
1250
1251
1252
1253void nfs4_schedule_lease_recovery(struct nfs_client *clp)
1254{
1255 if (!clp)
1256 return;
1257 if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
1258 set_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
1259 dprintk("%s: scheduling lease recovery for server %s\n", __func__,
1260 clp->cl_hostname);
1261 nfs4_schedule_state_manager(clp);
1262}
1263EXPORT_SYMBOL_GPL(nfs4_schedule_lease_recovery);
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273int nfs4_schedule_migration_recovery(const struct nfs_server *server)
1274{
1275 struct nfs_client *clp = server->nfs_client;
1276
1277 if (server->fh_expire_type != NFS4_FH_PERSISTENT) {
1278 pr_err("NFS: volatile file handles not supported (server %s)\n",
1279 clp->cl_hostname);
1280 return -NFS4ERR_IO;
1281 }
1282
1283 if (test_bit(NFS_MIG_FAILED, &server->mig_status))
1284 return -NFS4ERR_IO;
1285
1286 dprintk("%s: scheduling migration recovery for (%llx:%llx) on %s\n",
1287 __func__,
1288 (unsigned long long)server->fsid.major,
1289 (unsigned long long)server->fsid.minor,
1290 clp->cl_hostname);
1291
1292 set_bit(NFS_MIG_IN_TRANSITION,
1293 &((struct nfs_server *)server)->mig_status);
1294 set_bit(NFS4CLNT_MOVED, &clp->cl_state);
1295
1296 nfs4_schedule_state_manager(clp);
1297 return 0;
1298}
1299EXPORT_SYMBOL_GPL(nfs4_schedule_migration_recovery);
1300
1301
1302
1303
1304
1305
1306
1307void nfs4_schedule_lease_moved_recovery(struct nfs_client *clp)
1308{
1309 dprintk("%s: scheduling lease-moved recovery for client ID %llx on %s\n",
1310 __func__, clp->cl_clientid, clp->cl_hostname);
1311
1312 set_bit(NFS4CLNT_LEASE_MOVED, &clp->cl_state);
1313 nfs4_schedule_state_manager(clp);
1314}
1315EXPORT_SYMBOL_GPL(nfs4_schedule_lease_moved_recovery);
1316
1317int nfs4_wait_clnt_recover(struct nfs_client *clp)
1318{
1319 int res;
1320
1321 might_sleep();
1322
1323 refcount_inc(&clp->cl_count);
1324 res = wait_on_bit_action(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING,
1325 nfs_wait_bit_killable, TASK_KILLABLE);
1326 if (res)
1327 goto out;
1328 if (clp->cl_cons_state < 0)
1329 res = clp->cl_cons_state;
1330out:
1331 nfs_put_client(clp);
1332 return res;
1333}
1334
1335int nfs4_client_recover_expired_lease(struct nfs_client *clp)
1336{
1337 unsigned int loop;
1338 int ret;
1339
1340 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
1341 ret = nfs4_wait_clnt_recover(clp);
1342 if (ret != 0)
1343 break;
1344 if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) &&
1345 !test_bit(NFS4CLNT_CHECK_LEASE,&clp->cl_state))
1346 break;
1347 nfs4_schedule_state_manager(clp);
1348 ret = -EIO;
1349 }
1350 return ret;
1351}
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361static void nfs40_handle_cb_pathdown(struct nfs_client *clp)
1362{
1363 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1364 nfs_expire_all_delegations(clp);
1365 dprintk("%s: handling CB_PATHDOWN recovery for server %s\n", __func__,
1366 clp->cl_hostname);
1367}
1368
1369void nfs4_schedule_path_down_recovery(struct nfs_client *clp)
1370{
1371 nfs40_handle_cb_pathdown(clp);
1372 nfs4_schedule_state_manager(clp);
1373}
1374
1375static int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state)
1376{
1377
1378 if (!nfs4_valid_open_stateid(state))
1379 return 0;
1380 set_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
1381
1382 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) {
1383 clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
1384 return 0;
1385 }
1386 set_bit(NFS_OWNER_RECLAIM_REBOOT, &state->owner->so_flags);
1387 set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state);
1388 return 1;
1389}
1390
1391int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state)
1392{
1393 if (!nfs4_valid_open_stateid(state))
1394 return 0;
1395 set_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags);
1396 clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
1397 set_bit(NFS_OWNER_RECLAIM_NOGRACE, &state->owner->so_flags);
1398 set_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state);
1399 return 1;
1400}
1401
1402int nfs4_schedule_stateid_recovery(const struct nfs_server *server, struct nfs4_state *state)
1403{
1404 struct nfs_client *clp = server->nfs_client;
1405
1406 if (!nfs4_state_mark_reclaim_nograce(clp, state))
1407 return -EBADF;
1408 nfs_inode_find_delegation_state_and_recover(state->inode,
1409 &state->stateid);
1410 dprintk("%s: scheduling stateid recovery for server %s\n", __func__,
1411 clp->cl_hostname);
1412 nfs4_schedule_state_manager(clp);
1413 return 0;
1414}
1415EXPORT_SYMBOL_GPL(nfs4_schedule_stateid_recovery);
1416
1417static struct nfs4_lock_state *
1418nfs_state_find_lock_state_by_stateid(struct nfs4_state *state,
1419 const nfs4_stateid *stateid)
1420{
1421 struct nfs4_lock_state *pos;
1422
1423 list_for_each_entry(pos, &state->lock_states, ls_locks) {
1424 if (!test_bit(NFS_LOCK_INITIALIZED, &pos->ls_flags))
1425 continue;
1426 if (nfs4_stateid_match_other(&pos->ls_stateid, stateid))
1427 return pos;
1428 }
1429 return NULL;
1430}
1431
1432static bool nfs_state_lock_state_matches_stateid(struct nfs4_state *state,
1433 const nfs4_stateid *stateid)
1434{
1435 bool found = false;
1436
1437 if (test_bit(LK_STATE_IN_USE, &state->flags)) {
1438 spin_lock(&state->state_lock);
1439 if (nfs_state_find_lock_state_by_stateid(state, stateid))
1440 found = true;
1441 spin_unlock(&state->state_lock);
1442 }
1443 return found;
1444}
1445
1446void nfs_inode_find_state_and_recover(struct inode *inode,
1447 const nfs4_stateid *stateid)
1448{
1449 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
1450 struct nfs_inode *nfsi = NFS_I(inode);
1451 struct nfs_open_context *ctx;
1452 struct nfs4_state *state;
1453 bool found = false;
1454
1455 rcu_read_lock();
1456 list_for_each_entry_rcu(ctx, &nfsi->open_files, list) {
1457 state = ctx->state;
1458 if (state == NULL)
1459 continue;
1460 if (nfs4_stateid_match_other(&state->stateid, stateid) &&
1461 nfs4_state_mark_reclaim_nograce(clp, state)) {
1462 found = true;
1463 continue;
1464 }
1465 if (nfs4_stateid_match_other(&state->open_stateid, stateid) &&
1466 nfs4_state_mark_reclaim_nograce(clp, state)) {
1467 found = true;
1468 continue;
1469 }
1470 if (nfs_state_lock_state_matches_stateid(state, stateid) &&
1471 nfs4_state_mark_reclaim_nograce(clp, state))
1472 found = true;
1473 }
1474 rcu_read_unlock();
1475
1476 nfs_inode_find_delegation_state_and_recover(inode, stateid);
1477 if (found)
1478 nfs4_schedule_state_manager(clp);
1479}
1480
1481static void nfs4_state_mark_open_context_bad(struct nfs4_state *state, int err)
1482{
1483 struct inode *inode = state->inode;
1484 struct nfs_inode *nfsi = NFS_I(inode);
1485 struct nfs_open_context *ctx;
1486
1487 rcu_read_lock();
1488 list_for_each_entry_rcu(ctx, &nfsi->open_files, list) {
1489 if (ctx->state != state)
1490 continue;
1491 set_bit(NFS_CONTEXT_BAD, &ctx->flags);
1492 pr_warn("NFSv4: state recovery failed for open file %pd2, "
1493 "error = %d\n", ctx->dentry, err);
1494 }
1495 rcu_read_unlock();
1496}
1497
1498static void nfs4_state_mark_recovery_failed(struct nfs4_state *state, int error)
1499{
1500 set_bit(NFS_STATE_RECOVERY_FAILED, &state->flags);
1501 nfs4_state_mark_open_context_bad(state, error);
1502}
1503
1504
1505static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_recovery_ops *ops)
1506{
1507 struct inode *inode = state->inode;
1508 struct nfs_inode *nfsi = NFS_I(inode);
1509 struct file_lock *fl;
1510 struct nfs4_lock_state *lsp;
1511 int status = 0;
1512 struct file_lock_context *flctx = inode->i_flctx;
1513 struct list_head *list;
1514
1515 if (flctx == NULL)
1516 return 0;
1517
1518 list = &flctx->flc_posix;
1519
1520
1521 down_write(&nfsi->rwsem);
1522 spin_lock(&flctx->flc_lock);
1523restart:
1524 list_for_each_entry(fl, list, fl_list) {
1525 if (nfs_file_open_context(fl->fl_file)->state != state)
1526 continue;
1527 spin_unlock(&flctx->flc_lock);
1528 status = ops->recover_lock(state, fl);
1529 switch (status) {
1530 case 0:
1531 break;
1532 case -ETIMEDOUT:
1533 case -ESTALE:
1534 case -NFS4ERR_ADMIN_REVOKED:
1535 case -NFS4ERR_STALE_STATEID:
1536 case -NFS4ERR_BAD_STATEID:
1537 case -NFS4ERR_EXPIRED:
1538 case -NFS4ERR_NO_GRACE:
1539 case -NFS4ERR_STALE_CLIENTID:
1540 case -NFS4ERR_BADSESSION:
1541 case -NFS4ERR_BADSLOT:
1542 case -NFS4ERR_BAD_HIGH_SLOT:
1543 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1544 goto out;
1545 default:
1546 pr_err("NFS: %s: unhandled error %d\n",
1547 __func__, status);
1548
1549 case -ENOMEM:
1550 case -NFS4ERR_DENIED:
1551 case -NFS4ERR_RECLAIM_BAD:
1552 case -NFS4ERR_RECLAIM_CONFLICT:
1553 lsp = fl->fl_u.nfs4_fl.owner;
1554 if (lsp)
1555 set_bit(NFS_LOCK_LOST, &lsp->ls_flags);
1556 status = 0;
1557 }
1558 spin_lock(&flctx->flc_lock);
1559 }
1560 if (list == &flctx->flc_posix) {
1561 list = &flctx->flc_flock;
1562 goto restart;
1563 }
1564 spin_unlock(&flctx->flc_lock);
1565out:
1566 up_write(&nfsi->rwsem);
1567 return status;
1568}
1569
1570#ifdef CONFIG_NFS_V4_2
1571static void nfs42_complete_copies(struct nfs4_state_owner *sp, struct nfs4_state *state)
1572{
1573 struct nfs4_copy_state *copy;
1574
1575 if (!test_bit(NFS_CLNT_DST_SSC_COPY_STATE, &state->flags))
1576 return;
1577
1578 spin_lock(&sp->so_server->nfs_client->cl_lock);
1579 list_for_each_entry(copy, &sp->so_server->ss_copies, copies) {
1580 if (!nfs4_stateid_match_other(&state->stateid, ©->parent_state->stateid))
1581 continue;
1582 copy->flags = 1;
1583 complete(©->completion);
1584 break;
1585 }
1586 spin_unlock(&sp->so_server->nfs_client->cl_lock);
1587}
1588#else
1589static inline void nfs42_complete_copies(struct nfs4_state_owner *sp,
1590 struct nfs4_state *state)
1591{
1592}
1593#endif
1594
1595static int __nfs4_reclaim_open_state(struct nfs4_state_owner *sp, struct nfs4_state *state,
1596 const struct nfs4_state_recovery_ops *ops)
1597{
1598 struct nfs4_lock_state *lock;
1599 int status;
1600
1601 status = ops->recover_open(sp, state);
1602 if (status < 0)
1603 return status;
1604
1605 status = nfs4_reclaim_locks(state, ops);
1606 if (status < 0)
1607 return status;
1608
1609 if (!test_bit(NFS_DELEGATED_STATE, &state->flags)) {
1610 spin_lock(&state->state_lock);
1611 list_for_each_entry(lock, &state->lock_states, ls_locks) {
1612 if (!test_bit(NFS_LOCK_INITIALIZED, &lock->ls_flags))
1613 pr_warn_ratelimited("NFS: %s: Lock reclaim failed!\n", __func__);
1614 }
1615 spin_unlock(&state->state_lock);
1616 }
1617
1618 nfs42_complete_copies(sp, state);
1619 clear_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags);
1620 return status;
1621}
1622
1623static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs4_state_recovery_ops *ops)
1624{
1625 struct nfs4_state *state;
1626 unsigned int loop = 0;
1627 int status = 0;
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637 spin_lock(&sp->so_lock);
1638 raw_write_seqcount_begin(&sp->so_reclaim_seqcount);
1639restart:
1640 list_for_each_entry(state, &sp->so_states, open_states) {
1641 if (!test_and_clear_bit(ops->state_flag_bit, &state->flags))
1642 continue;
1643 if (!nfs4_valid_open_stateid(state))
1644 continue;
1645 if (state->state == 0)
1646 continue;
1647 refcount_inc(&state->count);
1648 spin_unlock(&sp->so_lock);
1649 status = __nfs4_reclaim_open_state(sp, state, ops);
1650
1651 switch (status) {
1652 default:
1653 if (status >= 0) {
1654 loop = 0;
1655 break;
1656 }
1657 printk(KERN_ERR "NFS: %s: unhandled error %d\n", __func__, status);
1658
1659 case -ENOENT:
1660 case -ENOMEM:
1661 case -EACCES:
1662 case -EROFS:
1663 case -EIO:
1664 case -ESTALE:
1665
1666 nfs4_state_mark_recovery_failed(state, status);
1667 break;
1668 case -EAGAIN:
1669 ssleep(1);
1670 if (loop++ < 10) {
1671 set_bit(ops->state_flag_bit, &state->flags);
1672 break;
1673 }
1674
1675 case -NFS4ERR_ADMIN_REVOKED:
1676 case -NFS4ERR_STALE_STATEID:
1677 case -NFS4ERR_OLD_STATEID:
1678 case -NFS4ERR_BAD_STATEID:
1679 case -NFS4ERR_RECLAIM_BAD:
1680 case -NFS4ERR_RECLAIM_CONFLICT:
1681 nfs4_state_mark_reclaim_nograce(sp->so_server->nfs_client, state);
1682 break;
1683 case -NFS4ERR_EXPIRED:
1684 case -NFS4ERR_NO_GRACE:
1685 nfs4_state_mark_reclaim_nograce(sp->so_server->nfs_client, state);
1686
1687 case -NFS4ERR_STALE_CLIENTID:
1688 case -NFS4ERR_BADSESSION:
1689 case -NFS4ERR_BADSLOT:
1690 case -NFS4ERR_BAD_HIGH_SLOT:
1691 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1692 case -ETIMEDOUT:
1693 goto out_err;
1694 }
1695 nfs4_put_open_state(state);
1696 spin_lock(&sp->so_lock);
1697 goto restart;
1698 }
1699 raw_write_seqcount_end(&sp->so_reclaim_seqcount);
1700 spin_unlock(&sp->so_lock);
1701 return 0;
1702out_err:
1703 nfs4_put_open_state(state);
1704 spin_lock(&sp->so_lock);
1705 raw_write_seqcount_end(&sp->so_reclaim_seqcount);
1706 spin_unlock(&sp->so_lock);
1707 return status;
1708}
1709
1710static void nfs4_clear_open_state(struct nfs4_state *state)
1711{
1712 struct nfs4_lock_state *lock;
1713
1714 clear_bit(NFS_DELEGATED_STATE, &state->flags);
1715 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1716 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1717 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1718 spin_lock(&state->state_lock);
1719 list_for_each_entry(lock, &state->lock_states, ls_locks) {
1720 lock->ls_seqid.flags = 0;
1721 clear_bit(NFS_LOCK_INITIALIZED, &lock->ls_flags);
1722 }
1723 spin_unlock(&state->state_lock);
1724}
1725
1726static void nfs4_reset_seqids(struct nfs_server *server,
1727 int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state))
1728{
1729 struct nfs_client *clp = server->nfs_client;
1730 struct nfs4_state_owner *sp;
1731 struct rb_node *pos;
1732 struct nfs4_state *state;
1733
1734 spin_lock(&clp->cl_lock);
1735 for (pos = rb_first(&server->state_owners);
1736 pos != NULL;
1737 pos = rb_next(pos)) {
1738 sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
1739 sp->so_seqid.flags = 0;
1740 spin_lock(&sp->so_lock);
1741 list_for_each_entry(state, &sp->so_states, open_states) {
1742 if (mark_reclaim(clp, state))
1743 nfs4_clear_open_state(state);
1744 }
1745 spin_unlock(&sp->so_lock);
1746 }
1747 spin_unlock(&clp->cl_lock);
1748}
1749
1750static void nfs4_state_mark_reclaim_helper(struct nfs_client *clp,
1751 int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state))
1752{
1753 struct nfs_server *server;
1754
1755 rcu_read_lock();
1756 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
1757 nfs4_reset_seqids(server, mark_reclaim);
1758 rcu_read_unlock();
1759}
1760
1761static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp)
1762{
1763
1764 nfs_delegation_mark_reclaim(clp);
1765 nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_reboot);
1766}
1767
1768static int nfs4_reclaim_complete(struct nfs_client *clp,
1769 const struct nfs4_state_recovery_ops *ops,
1770 const struct cred *cred)
1771{
1772
1773 if (ops->reclaim_complete)
1774 return ops->reclaim_complete(clp, cred);
1775 return 0;
1776}
1777
1778static void nfs4_clear_reclaim_server(struct nfs_server *server)
1779{
1780 struct nfs_client *clp = server->nfs_client;
1781 struct nfs4_state_owner *sp;
1782 struct rb_node *pos;
1783 struct nfs4_state *state;
1784
1785 spin_lock(&clp->cl_lock);
1786 for (pos = rb_first(&server->state_owners);
1787 pos != NULL;
1788 pos = rb_next(pos)) {
1789 sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
1790 spin_lock(&sp->so_lock);
1791 list_for_each_entry(state, &sp->so_states, open_states) {
1792 if (!test_and_clear_bit(NFS_STATE_RECLAIM_REBOOT,
1793 &state->flags))
1794 continue;
1795 nfs4_state_mark_reclaim_nograce(clp, state);
1796 }
1797 spin_unlock(&sp->so_lock);
1798 }
1799 spin_unlock(&clp->cl_lock);
1800}
1801
1802static int nfs4_state_clear_reclaim_reboot(struct nfs_client *clp)
1803{
1804 struct nfs_server *server;
1805
1806 if (!test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
1807 return 0;
1808
1809 rcu_read_lock();
1810 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
1811 nfs4_clear_reclaim_server(server);
1812 rcu_read_unlock();
1813
1814 nfs_delegation_reap_unclaimed(clp);
1815 return 1;
1816}
1817
1818static void nfs4_state_end_reclaim_reboot(struct nfs_client *clp)
1819{
1820 const struct nfs4_state_recovery_ops *ops;
1821 const struct cred *cred;
1822 int err;
1823
1824 if (!nfs4_state_clear_reclaim_reboot(clp))
1825 return;
1826 ops = clp->cl_mvops->reboot_recovery_ops;
1827 cred = nfs4_get_clid_cred(clp);
1828 err = nfs4_reclaim_complete(clp, ops, cred);
1829 put_cred(cred);
1830 if (err == -NFS4ERR_CONN_NOT_BOUND_TO_SESSION)
1831 set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state);
1832}
1833
1834static void nfs4_state_start_reclaim_nograce(struct nfs_client *clp)
1835{
1836 nfs_mark_test_expired_all_delegations(clp);
1837 nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_nograce);
1838}
1839
1840static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
1841{
1842 switch (error) {
1843 case 0:
1844 break;
1845 case -NFS4ERR_CB_PATH_DOWN:
1846 nfs40_handle_cb_pathdown(clp);
1847 break;
1848 case -NFS4ERR_NO_GRACE:
1849 nfs4_state_end_reclaim_reboot(clp);
1850 break;
1851 case -NFS4ERR_STALE_CLIENTID:
1852 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1853 nfs4_state_start_reclaim_reboot(clp);
1854 break;
1855 case -NFS4ERR_EXPIRED:
1856 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1857 nfs4_state_start_reclaim_nograce(clp);
1858 break;
1859 case -NFS4ERR_BADSESSION:
1860 case -NFS4ERR_BADSLOT:
1861 case -NFS4ERR_BAD_HIGH_SLOT:
1862 case -NFS4ERR_DEADSESSION:
1863 case -NFS4ERR_SEQ_FALSE_RETRY:
1864 case -NFS4ERR_SEQ_MISORDERED:
1865 set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
1866
1867 break;
1868 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1869 set_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
1870 break;
1871 default:
1872 dprintk("%s: failed to handle error %d for server %s\n",
1873 __func__, error, clp->cl_hostname);
1874 return error;
1875 }
1876 dprintk("%s: handled error %d for server %s\n", __func__, error,
1877 clp->cl_hostname);
1878 return 0;
1879}
1880
1881static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recovery_ops *ops)
1882{
1883 struct nfs4_state_owner *sp;
1884 struct nfs_server *server;
1885 struct rb_node *pos;
1886 LIST_HEAD(freeme);
1887 int status = 0;
1888
1889restart:
1890 rcu_read_lock();
1891 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
1892 nfs4_purge_state_owners(server, &freeme);
1893 spin_lock(&clp->cl_lock);
1894 for (pos = rb_first(&server->state_owners);
1895 pos != NULL;
1896 pos = rb_next(pos)) {
1897 sp = rb_entry(pos,
1898 struct nfs4_state_owner, so_server_node);
1899 if (!test_and_clear_bit(ops->owner_flag_bit,
1900 &sp->so_flags))
1901 continue;
1902 if (!atomic_inc_not_zero(&sp->so_count))
1903 continue;
1904 spin_unlock(&clp->cl_lock);
1905 rcu_read_unlock();
1906
1907 status = nfs4_reclaim_open_state(sp, ops);
1908 if (status < 0) {
1909 set_bit(ops->owner_flag_bit, &sp->so_flags);
1910 nfs4_put_state_owner(sp);
1911 status = nfs4_recovery_handle_error(clp, status);
1912 return (status != 0) ? status : -EAGAIN;
1913 }
1914
1915 nfs4_put_state_owner(sp);
1916 goto restart;
1917 }
1918 spin_unlock(&clp->cl_lock);
1919 }
1920 rcu_read_unlock();
1921 nfs4_free_state_owners(&freeme);
1922 return 0;
1923}
1924
1925static int nfs4_check_lease(struct nfs_client *clp)
1926{
1927 const struct cred *cred;
1928 const struct nfs4_state_maintenance_ops *ops =
1929 clp->cl_mvops->state_renewal_ops;
1930 int status;
1931
1932
1933 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
1934 return 0;
1935 cred = ops->get_state_renewal_cred(clp);
1936 if (cred == NULL) {
1937 cred = nfs4_get_clid_cred(clp);
1938 status = -ENOKEY;
1939 if (cred == NULL)
1940 goto out;
1941 }
1942 status = ops->renew_lease(clp, cred);
1943 put_cred(cred);
1944 if (status == -ETIMEDOUT) {
1945 set_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
1946 return 0;
1947 }
1948out:
1949 return nfs4_recovery_handle_error(clp, status);
1950}
1951
1952
1953
1954
1955static int nfs4_handle_reclaim_lease_error(struct nfs_client *clp, int status)
1956{
1957 switch (status) {
1958 case -NFS4ERR_SEQ_MISORDERED:
1959 if (test_and_set_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state))
1960 return -ESERVERFAULT;
1961
1962 ssleep(1);
1963 clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
1964 break;
1965 case -NFS4ERR_STALE_CLIENTID:
1966 clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
1967 nfs4_state_start_reclaim_reboot(clp);
1968 break;
1969 case -NFS4ERR_CLID_INUSE:
1970 pr_err("NFS: Server %s reports our clientid is in use\n",
1971 clp->cl_hostname);
1972 nfs_mark_client_ready(clp, -EPERM);
1973 clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
1974 return -EPERM;
1975 case -EACCES:
1976 case -NFS4ERR_DELAY:
1977 case -EAGAIN:
1978 ssleep(1);
1979 break;
1980
1981 case -NFS4ERR_MINOR_VERS_MISMATCH:
1982 if (clp->cl_cons_state == NFS_CS_SESSION_INITING)
1983 nfs_mark_client_ready(clp, -EPROTONOSUPPORT);
1984 dprintk("%s: exit with error %d for server %s\n",
1985 __func__, -EPROTONOSUPPORT, clp->cl_hostname);
1986 return -EPROTONOSUPPORT;
1987 case -NFS4ERR_NOT_SAME:
1988
1989 default:
1990 dprintk("%s: exit with error %d for server %s\n", __func__,
1991 status, clp->cl_hostname);
1992 return status;
1993 }
1994 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1995 dprintk("%s: handled error %d for server %s\n", __func__, status,
1996 clp->cl_hostname);
1997 return 0;
1998}
1999
2000static int nfs4_establish_lease(struct nfs_client *clp)
2001{
2002 const struct cred *cred;
2003 const struct nfs4_state_recovery_ops *ops =
2004 clp->cl_mvops->reboot_recovery_ops;
2005 int status;
2006
2007 status = nfs4_begin_drain_session(clp);
2008 if (status != 0)
2009 return status;
2010 cred = nfs4_get_clid_cred(clp);
2011 if (cred == NULL)
2012 return -ENOENT;
2013 status = ops->establish_clid(clp, cred);
2014 put_cred(cred);
2015 if (status != 0)
2016 return status;
2017 pnfs_destroy_all_layouts(clp);
2018 return 0;
2019}
2020
2021
2022
2023
2024
2025static int nfs4_reclaim_lease(struct nfs_client *clp)
2026{
2027 int status;
2028
2029 status = nfs4_establish_lease(clp);
2030 if (status < 0)
2031 return nfs4_handle_reclaim_lease_error(clp, status);
2032 if (test_and_clear_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state))
2033 nfs4_state_start_reclaim_nograce(clp);
2034 if (!test_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state))
2035 set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state);
2036 clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
2037 clear_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
2038 return 0;
2039}
2040
2041static int nfs4_purge_lease(struct nfs_client *clp)
2042{
2043 int status;
2044
2045 status = nfs4_establish_lease(clp);
2046 if (status < 0)
2047 return nfs4_handle_reclaim_lease_error(clp, status);
2048 clear_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state);
2049 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
2050 nfs4_state_start_reclaim_nograce(clp);
2051 return 0;
2052}
2053
2054
2055
2056
2057
2058
2059
2060
2061static int nfs4_try_migration(struct nfs_server *server, const struct cred *cred)
2062{
2063 struct nfs_client *clp = server->nfs_client;
2064 struct nfs4_fs_locations *locations = NULL;
2065 struct inode *inode;
2066 struct page *page;
2067 int status, result;
2068
2069 dprintk("--> %s: FSID %llx:%llx on \"%s\"\n", __func__,
2070 (unsigned long long)server->fsid.major,
2071 (unsigned long long)server->fsid.minor,
2072 clp->cl_hostname);
2073
2074 result = 0;
2075 page = alloc_page(GFP_KERNEL);
2076 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
2077 if (page == NULL || locations == NULL) {
2078 dprintk("<-- %s: no memory\n", __func__);
2079 goto out;
2080 }
2081
2082 inode = d_inode(server->super->s_root);
2083 result = nfs4_proc_get_locations(inode, locations, page, cred);
2084 if (result) {
2085 dprintk("<-- %s: failed to retrieve fs_locations: %d\n",
2086 __func__, result);
2087 goto out;
2088 }
2089
2090 result = -NFS4ERR_NXIO;
2091 if (!(locations->fattr.valid & NFS_ATTR_FATTR_V4_LOCATIONS)) {
2092 dprintk("<-- %s: No fs_locations data, migration skipped\n",
2093 __func__);
2094 goto out;
2095 }
2096
2097 status = nfs4_begin_drain_session(clp);
2098 if (status != 0)
2099 return status;
2100
2101 status = nfs4_replace_transport(server, locations);
2102 if (status != 0) {
2103 dprintk("<-- %s: failed to replace transport: %d\n",
2104 __func__, status);
2105 goto out;
2106 }
2107
2108 result = 0;
2109 dprintk("<-- %s: migration succeeded\n", __func__);
2110
2111out:
2112 if (page != NULL)
2113 __free_page(page);
2114 kfree(locations);
2115 if (result) {
2116 pr_err("NFS: migration recovery failed (server %s)\n",
2117 clp->cl_hostname);
2118 set_bit(NFS_MIG_FAILED, &server->mig_status);
2119 }
2120 return result;
2121}
2122
2123
2124
2125
2126static int nfs4_handle_migration(struct nfs_client *clp)
2127{
2128 const struct nfs4_state_maintenance_ops *ops =
2129 clp->cl_mvops->state_renewal_ops;
2130 struct nfs_server *server;
2131 const struct cred *cred;
2132
2133 dprintk("%s: migration reported on \"%s\"\n", __func__,
2134 clp->cl_hostname);
2135
2136 cred = ops->get_state_renewal_cred(clp);
2137 if (cred == NULL)
2138 return -NFS4ERR_NOENT;
2139
2140 clp->cl_mig_gen++;
2141restart:
2142 rcu_read_lock();
2143 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
2144 int status;
2145
2146 if (server->mig_gen == clp->cl_mig_gen)
2147 continue;
2148 server->mig_gen = clp->cl_mig_gen;
2149
2150 if (!test_and_clear_bit(NFS_MIG_IN_TRANSITION,
2151 &server->mig_status))
2152 continue;
2153
2154 rcu_read_unlock();
2155 status = nfs4_try_migration(server, cred);
2156 if (status < 0) {
2157 put_cred(cred);
2158 return status;
2159 }
2160 goto restart;
2161 }
2162 rcu_read_unlock();
2163 put_cred(cred);
2164 return 0;
2165}
2166
2167
2168
2169
2170
2171
2172static int nfs4_handle_lease_moved(struct nfs_client *clp)
2173{
2174 const struct nfs4_state_maintenance_ops *ops =
2175 clp->cl_mvops->state_renewal_ops;
2176 struct nfs_server *server;
2177 const struct cred *cred;
2178
2179 dprintk("%s: lease moved reported on \"%s\"\n", __func__,
2180 clp->cl_hostname);
2181
2182 cred = ops->get_state_renewal_cred(clp);
2183 if (cred == NULL)
2184 return -NFS4ERR_NOENT;
2185
2186 clp->cl_mig_gen++;
2187restart:
2188 rcu_read_lock();
2189 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
2190 struct inode *inode;
2191 int status;
2192
2193 if (server->mig_gen == clp->cl_mig_gen)
2194 continue;
2195 server->mig_gen = clp->cl_mig_gen;
2196
2197 rcu_read_unlock();
2198
2199 inode = d_inode(server->super->s_root);
2200 status = nfs4_proc_fsid_present(inode, cred);
2201 if (status != -NFS4ERR_MOVED)
2202 goto restart;
2203 if (nfs4_try_migration(server, cred) == -NFS4ERR_LEASE_MOVED)
2204 goto restart;
2205 goto out;
2206 }
2207 rcu_read_unlock();
2208
2209out:
2210 put_cred(cred);
2211 return 0;
2212}
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227int nfs4_discover_server_trunking(struct nfs_client *clp,
2228 struct nfs_client **result)
2229{
2230 const struct nfs4_state_recovery_ops *ops =
2231 clp->cl_mvops->reboot_recovery_ops;
2232 struct rpc_clnt *clnt;
2233 const struct cred *cred;
2234 int i, status;
2235
2236 dprintk("NFS: %s: testing '%s'\n", __func__, clp->cl_hostname);
2237
2238 clnt = clp->cl_rpcclient;
2239 i = 0;
2240
2241 mutex_lock(&nfs_clid_init_mutex);
2242again:
2243 status = -ENOENT;
2244 cred = nfs4_get_clid_cred(clp);
2245 if (cred == NULL)
2246 goto out_unlock;
2247
2248 status = ops->detect_trunking(clp, result, cred);
2249 put_cred(cred);
2250 switch (status) {
2251 case 0:
2252 case -EINTR:
2253 case -ERESTARTSYS:
2254 break;
2255 case -ETIMEDOUT:
2256 if (clnt->cl_softrtry)
2257 break;
2258
2259 case -NFS4ERR_DELAY:
2260 case -EAGAIN:
2261 ssleep(1);
2262
2263 case -NFS4ERR_STALE_CLIENTID:
2264 dprintk("NFS: %s after status %d, retrying\n",
2265 __func__, status);
2266 goto again;
2267 case -EACCES:
2268 if (i++ == 0) {
2269 nfs4_root_machine_cred(clp);
2270 goto again;
2271 }
2272 if (clnt->cl_auth->au_flavor == RPC_AUTH_UNIX)
2273 break;
2274
2275 case -NFS4ERR_CLID_INUSE:
2276 case -NFS4ERR_WRONGSEC:
2277
2278 if (clnt->cl_auth->au_flavor == RPC_AUTH_UNIX) {
2279 status = -EPERM;
2280 break;
2281 }
2282 clnt = rpc_clone_client_set_auth(clnt, RPC_AUTH_UNIX);
2283 if (IS_ERR(clnt)) {
2284 status = PTR_ERR(clnt);
2285 break;
2286 }
2287
2288
2289
2290
2291 clnt = xchg(&clp->cl_rpcclient, clnt);
2292 rpc_shutdown_client(clnt);
2293 clnt = clp->cl_rpcclient;
2294 goto again;
2295
2296 case -NFS4ERR_MINOR_VERS_MISMATCH:
2297 status = -EPROTONOSUPPORT;
2298 break;
2299
2300 case -EKEYEXPIRED:
2301 case -NFS4ERR_NOT_SAME:
2302
2303 status = -EKEYEXPIRED;
2304 break;
2305 default:
2306 pr_warn("NFS: %s unhandled error %d. Exiting with error EIO\n",
2307 __func__, status);
2308 status = -EIO;
2309 }
2310
2311out_unlock:
2312 mutex_unlock(&nfs_clid_init_mutex);
2313 dprintk("NFS: %s: status = %d\n", __func__, status);
2314 return status;
2315}
2316
2317#ifdef CONFIG_NFS_V4_1
2318void nfs4_schedule_session_recovery(struct nfs4_session *session, int err)
2319{
2320 struct nfs_client *clp = session->clp;
2321
2322 switch (err) {
2323 default:
2324 set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
2325 break;
2326 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
2327 set_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
2328 }
2329 nfs4_schedule_state_manager(clp);
2330}
2331EXPORT_SYMBOL_GPL(nfs4_schedule_session_recovery);
2332
2333void nfs41_notify_server(struct nfs_client *clp)
2334{
2335
2336 set_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
2337 nfs4_schedule_state_manager(clp);
2338}
2339
2340static void nfs4_reset_all_state(struct nfs_client *clp)
2341{
2342 if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) {
2343 set_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state);
2344 clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
2345 nfs4_state_start_reclaim_nograce(clp);
2346 dprintk("%s: scheduling reset of all state for server %s!\n",
2347 __func__, clp->cl_hostname);
2348 nfs4_schedule_state_manager(clp);
2349 }
2350}
2351
2352static void nfs41_handle_server_reboot(struct nfs_client *clp)
2353{
2354 if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) {
2355 nfs4_state_start_reclaim_reboot(clp);
2356 dprintk("%s: server %s rebooted!\n", __func__,
2357 clp->cl_hostname);
2358 nfs4_schedule_state_manager(clp);
2359 }
2360}
2361
2362static void nfs41_handle_all_state_revoked(struct nfs_client *clp)
2363{
2364 nfs4_reset_all_state(clp);
2365 dprintk("%s: state revoked on server %s\n", __func__, clp->cl_hostname);
2366}
2367
2368static void nfs41_handle_some_state_revoked(struct nfs_client *clp)
2369{
2370 nfs4_state_start_reclaim_nograce(clp);
2371 nfs4_schedule_state_manager(clp);
2372
2373 dprintk("%s: state revoked on server %s\n", __func__, clp->cl_hostname);
2374}
2375
2376static void nfs41_handle_recallable_state_revoked(struct nfs_client *clp)
2377{
2378
2379 pnfs_destroy_all_layouts(clp);
2380 nfs_test_expired_all_delegations(clp);
2381 dprintk("%s: Recallable state revoked on server %s!\n", __func__,
2382 clp->cl_hostname);
2383}
2384
2385static void nfs41_handle_backchannel_fault(struct nfs_client *clp)
2386{
2387 set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
2388 nfs4_schedule_state_manager(clp);
2389
2390 dprintk("%s: server %s declared a backchannel fault\n", __func__,
2391 clp->cl_hostname);
2392}
2393
2394static void nfs41_handle_cb_path_down(struct nfs_client *clp)
2395{
2396 if (test_and_set_bit(NFS4CLNT_BIND_CONN_TO_SESSION,
2397 &clp->cl_state) == 0)
2398 nfs4_schedule_state_manager(clp);
2399}
2400
2401void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags,
2402 bool recovery)
2403{
2404 if (!flags)
2405 return;
2406
2407 dprintk("%s: \"%s\" (client ID %llx) flags=0x%08x\n",
2408 __func__, clp->cl_hostname, clp->cl_clientid, flags);
2409
2410
2411
2412
2413
2414
2415 if (recovery)
2416 goto out_recovery;
2417
2418 if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED)
2419 nfs41_handle_server_reboot(clp);
2420 if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED))
2421 nfs41_handle_all_state_revoked(clp);
2422 if (flags & (SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED |
2423 SEQ4_STATUS_ADMIN_STATE_REVOKED))
2424 nfs41_handle_some_state_revoked(clp);
2425 if (flags & SEQ4_STATUS_LEASE_MOVED)
2426 nfs4_schedule_lease_moved_recovery(clp);
2427 if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED)
2428 nfs41_handle_recallable_state_revoked(clp);
2429out_recovery:
2430 if (flags & SEQ4_STATUS_BACKCHANNEL_FAULT)
2431 nfs41_handle_backchannel_fault(clp);
2432 else if (flags & (SEQ4_STATUS_CB_PATH_DOWN |
2433 SEQ4_STATUS_CB_PATH_DOWN_SESSION))
2434 nfs41_handle_cb_path_down(clp);
2435}
2436
2437static int nfs4_reset_session(struct nfs_client *clp)
2438{
2439 const struct cred *cred;
2440 int status;
2441
2442 if (!nfs4_has_session(clp))
2443 return 0;
2444 status = nfs4_begin_drain_session(clp);
2445 if (status != 0)
2446 return status;
2447 cred = nfs4_get_clid_cred(clp);
2448 status = nfs4_proc_destroy_session(clp->cl_session, cred);
2449 switch (status) {
2450 case 0:
2451 case -NFS4ERR_BADSESSION:
2452 case -NFS4ERR_DEADSESSION:
2453 break;
2454 case -NFS4ERR_BACK_CHAN_BUSY:
2455 case -NFS4ERR_DELAY:
2456 set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
2457 status = 0;
2458 ssleep(1);
2459 goto out;
2460 default:
2461 status = nfs4_recovery_handle_error(clp, status);
2462 goto out;
2463 }
2464
2465 memset(clp->cl_session->sess_id.data, 0, NFS4_MAX_SESSIONID_LEN);
2466 status = nfs4_proc_create_session(clp, cred);
2467 if (status) {
2468 dprintk("%s: session reset failed with status %d for server %s!\n",
2469 __func__, status, clp->cl_hostname);
2470 status = nfs4_handle_reclaim_lease_error(clp, status);
2471 goto out;
2472 }
2473 nfs41_finish_session_reset(clp);
2474 dprintk("%s: session reset was successful for server %s!\n",
2475 __func__, clp->cl_hostname);
2476out:
2477 put_cred(cred);
2478 return status;
2479}
2480
2481static int nfs4_bind_conn_to_session(struct nfs_client *clp)
2482{
2483 const struct cred *cred;
2484 int ret;
2485
2486 if (!nfs4_has_session(clp))
2487 return 0;
2488 ret = nfs4_begin_drain_session(clp);
2489 if (ret != 0)
2490 return ret;
2491 cred = nfs4_get_clid_cred(clp);
2492 ret = nfs4_proc_bind_conn_to_session(clp, cred);
2493 put_cred(cred);
2494 clear_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
2495 switch (ret) {
2496 case 0:
2497 dprintk("%s: bind_conn_to_session was successful for server %s!\n",
2498 __func__, clp->cl_hostname);
2499 break;
2500 case -NFS4ERR_DELAY:
2501 ssleep(1);
2502 set_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
2503 break;
2504 default:
2505 return nfs4_recovery_handle_error(clp, ret);
2506 }
2507 return 0;
2508}
2509#else
2510static int nfs4_reset_session(struct nfs_client *clp) { return 0; }
2511
2512static int nfs4_bind_conn_to_session(struct nfs_client *clp)
2513{
2514 return 0;
2515}
2516#endif
2517
2518static void nfs4_state_manager(struct nfs_client *clp)
2519{
2520 int status = 0;
2521 const char *section = "", *section_sep = "";
2522
2523
2524 do {
2525 clear_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state);
2526 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
2527 section = "purge state";
2528 status = nfs4_purge_lease(clp);
2529 if (status < 0)
2530 goto out_error;
2531 continue;
2532 }
2533
2534 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) {
2535 section = "lease expired";
2536
2537 status = nfs4_reclaim_lease(clp);
2538 if (status < 0)
2539 goto out_error;
2540 continue;
2541 }
2542
2543
2544 if (test_and_clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state)) {
2545 section = "reset session";
2546 status = nfs4_reset_session(clp);
2547 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
2548 continue;
2549 if (status < 0)
2550 goto out_error;
2551 }
2552
2553
2554 if (test_and_clear_bit(NFS4CLNT_BIND_CONN_TO_SESSION,
2555 &clp->cl_state)) {
2556 section = "bind conn to session";
2557 status = nfs4_bind_conn_to_session(clp);
2558 if (status < 0)
2559 goto out_error;
2560 continue;
2561 }
2562
2563 if (test_and_clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state)) {
2564 section = "check lease";
2565 status = nfs4_check_lease(clp);
2566 if (status < 0)
2567 goto out_error;
2568 continue;
2569 }
2570
2571 if (test_and_clear_bit(NFS4CLNT_MOVED, &clp->cl_state)) {
2572 section = "migration";
2573 status = nfs4_handle_migration(clp);
2574 if (status < 0)
2575 goto out_error;
2576 }
2577
2578 if (test_and_clear_bit(NFS4CLNT_LEASE_MOVED, &clp->cl_state)) {
2579 section = "lease moved";
2580 status = nfs4_handle_lease_moved(clp);
2581 if (status < 0)
2582 goto out_error;
2583 }
2584
2585
2586 if (test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) {
2587 section = "reclaim reboot";
2588 status = nfs4_do_reclaim(clp,
2589 clp->cl_mvops->reboot_recovery_ops);
2590 if (status == -EAGAIN)
2591 continue;
2592 if (status < 0)
2593 goto out_error;
2594 nfs4_state_end_reclaim_reboot(clp);
2595 }
2596
2597
2598 if (test_and_clear_bit(NFS4CLNT_DELEGATION_EXPIRED, &clp->cl_state)) {
2599 section = "detect expired delegations";
2600 nfs_reap_expired_delegations(clp);
2601 continue;
2602 }
2603
2604
2605 if (test_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) {
2606 section = "reclaim nograce";
2607 status = nfs4_do_reclaim(clp,
2608 clp->cl_mvops->nograce_recovery_ops);
2609 if (status == -EAGAIN)
2610 continue;
2611 if (status < 0)
2612 goto out_error;
2613 clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state);
2614 }
2615
2616 nfs4_end_drain_session(clp);
2617 nfs4_clear_state_manager_bit(clp);
2618
2619 if (!test_and_set_bit(NFS4CLNT_DELEGRETURN_RUNNING, &clp->cl_state)) {
2620 if (test_and_clear_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state)) {
2621 nfs_client_return_marked_delegations(clp);
2622 set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state);
2623 }
2624 clear_bit(NFS4CLNT_DELEGRETURN_RUNNING, &clp->cl_state);
2625 }
2626
2627
2628 if (!test_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state))
2629 return;
2630 if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
2631 return;
2632 } while (refcount_read(&clp->cl_count) > 1 && !signalled());
2633 goto out_drain;
2634
2635out_error:
2636 if (strlen(section))
2637 section_sep = ": ";
2638 pr_warn_ratelimited("NFS: state manager%s%s failed on NFSv4 server %s"
2639 " with error %d\n", section_sep, section,
2640 clp->cl_hostname, -status);
2641 ssleep(1);
2642out_drain:
2643 nfs4_end_drain_session(clp);
2644 nfs4_clear_state_manager_bit(clp);
2645}
2646
2647static int nfs4_run_state_manager(void *ptr)
2648{
2649 struct nfs_client *clp = ptr;
2650
2651 allow_signal(SIGKILL);
2652 nfs4_state_manager(clp);
2653 nfs_put_client(clp);
2654 module_put_and_exit(0);
2655 return 0;
2656}
2657
2658
2659
2660
2661
2662
2663