1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49#define DEBUG_SUBSYSTEM S_SEC
50#include <linux/init.h>
51#include <linux/module.h>
52#include <linux/slab.h>
53#include <linux/dcache.h>
54#include <linux/fs.h>
55#include <linux/mutex.h>
56#include <linux/crypto.h>
57#include <asm/atomic.h>
58struct rpc_clnt;
59#include <linux/sunrpc/rpc_pipe_fs.h>
60
61#include <obd.h>
62#include <obd_class.h>
63#include <obd_support.h>
64#include <lustre/lustre_idl.h>
65#include <lustre_sec.h>
66#include <lustre_net.h>
67#include <lustre_import.h>
68
69#include "gss_err.h"
70#include "gss_internal.h"
71#include "gss_api.h"
72
73static struct ptlrpc_sec_policy gss_policy_pipefs;
74static struct ptlrpc_ctx_ops gss_pipefs_ctxops;
75
76static int gss_cli_ctx_refresh_pf(struct ptlrpc_cli_ctx *ctx);
77
78static int gss_sec_pipe_upcall_init(struct gss_sec *gsec)
79{
80 return 0;
81}
82
83static void gss_sec_pipe_upcall_fini(struct gss_sec *gsec)
84{
85}
86
87
88
89
90
91static
92struct ptlrpc_cli_ctx *ctx_create_pf(struct ptlrpc_sec *sec,
93 struct vfs_cred *vcred)
94{
95 struct gss_cli_ctx *gctx;
96 int rc;
97
98 OBD_ALLOC_PTR(gctx);
99 if (gctx == NULL)
100 return NULL;
101
102 rc = gss_cli_ctx_init_common(sec, &gctx->gc_base,
103 &gss_pipefs_ctxops, vcred);
104 if (rc) {
105 OBD_FREE_PTR(gctx);
106 return NULL;
107 }
108
109 return &gctx->gc_base;
110}
111
112static
113void ctx_destroy_pf(struct ptlrpc_sec *sec, struct ptlrpc_cli_ctx *ctx)
114{
115 struct gss_cli_ctx *gctx = ctx2gctx(ctx);
116
117 if (gss_cli_ctx_fini_common(sec, ctx))
118 return;
119
120 OBD_FREE_PTR(gctx);
121
122 atomic_dec(&sec->ps_nctx);
123 sptlrpc_sec_put(sec);
124}
125
126static
127void ctx_enhash_pf(struct ptlrpc_cli_ctx *ctx, struct hlist_head *hash)
128{
129 set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
130 atomic_inc(&ctx->cc_refcount);
131 hlist_add_head(&ctx->cc_cache, hash);
132}
133
134
135
136
137static
138void ctx_unhash_pf(struct ptlrpc_cli_ctx *ctx, struct hlist_head *freelist)
139{
140 LASSERT(spin_is_locked(&ctx->cc_sec->ps_lock));
141 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
142 LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
143 LASSERT(!hlist_unhashed(&ctx->cc_cache));
144
145 clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
146
147 if (atomic_dec_and_test(&ctx->cc_refcount)) {
148 __hlist_del(&ctx->cc_cache);
149 hlist_add_head(&ctx->cc_cache, freelist);
150 } else {
151 hlist_del_init(&ctx->cc_cache);
152 }
153}
154
155
156
157
158static
159int ctx_check_death_pf(struct ptlrpc_cli_ctx *ctx,
160 struct hlist_head *freelist)
161{
162 if (cli_ctx_check_death(ctx)) {
163 if (freelist)
164 ctx_unhash_pf(ctx, freelist);
165 return 1;
166 }
167
168 return 0;
169}
170
171static inline
172int ctx_check_death_locked_pf(struct ptlrpc_cli_ctx *ctx,
173 struct hlist_head *freelist)
174{
175 LASSERT(ctx->cc_sec);
176 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
177 LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
178
179 return ctx_check_death_pf(ctx, freelist);
180}
181
182static inline
183int ctx_match_pf(struct ptlrpc_cli_ctx *ctx, struct vfs_cred *vcred)
184{
185
186 if (!ctx->cc_ops->match)
187 return 1;
188
189 return ctx->cc_ops->match(ctx, vcred);
190}
191
192static
193void ctx_list_destroy_pf(struct hlist_head *head)
194{
195 struct ptlrpc_cli_ctx *ctx;
196
197 while (!hlist_empty(head)) {
198 ctx = hlist_entry(head->first, struct ptlrpc_cli_ctx,
199 cc_cache);
200
201 LASSERT(atomic_read(&ctx->cc_refcount) == 0);
202 LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT,
203 &ctx->cc_flags) == 0);
204
205 hlist_del_init(&ctx->cc_cache);
206 ctx_destroy_pf(ctx->cc_sec, ctx);
207 }
208}
209
210
211
212
213
214static
215int gss_cli_ctx_validate_pf(struct ptlrpc_cli_ctx *ctx)
216{
217 if (ctx_check_death_pf(ctx, NULL))
218 return 1;
219 if (cli_ctx_is_ready(ctx))
220 return 0;
221 return 1;
222}
223
224static
225void gss_cli_ctx_die_pf(struct ptlrpc_cli_ctx *ctx, int grace)
226{
227 LASSERT(ctx->cc_sec);
228 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
229
230 cli_ctx_expire(ctx);
231
232 spin_lock(&ctx->cc_sec->ps_lock);
233
234 if (test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags)) {
235 LASSERT(!hlist_unhashed(&ctx->cc_cache));
236 LASSERT(atomic_read(&ctx->cc_refcount) > 1);
237
238 hlist_del_init(&ctx->cc_cache);
239 if (atomic_dec_and_test(&ctx->cc_refcount))
240 LBUG();
241 }
242
243 spin_unlock(&ctx->cc_sec->ps_lock);
244}
245
246
247
248
249
250static inline
251unsigned int ctx_hash_index(int hashsize, __u64 key)
252{
253 return (unsigned int) (key & ((__u64) hashsize - 1));
254}
255
256static
257void gss_sec_ctx_replace_pf(struct gss_sec *gsec,
258 struct ptlrpc_cli_ctx *new)
259{
260 struct gss_sec_pipefs *gsec_pf;
261 struct ptlrpc_cli_ctx *ctx;
262 struct hlist_node *next;
263 HLIST_HEAD(freelist);
264 unsigned int hash;
265
266 gsec_pf = container_of(gsec, struct gss_sec_pipefs, gsp_base);
267
268 hash = ctx_hash_index(gsec_pf->gsp_chash_size,
269 (__u64) new->cc_vcred.vc_uid);
270 LASSERT(hash < gsec_pf->gsp_chash_size);
271
272 spin_lock(&gsec->gs_base.ps_lock);
273
274 hlist_for_each_entry_safe(ctx, next,
275 &gsec_pf->gsp_chash[hash], cc_cache) {
276 if (!ctx_match_pf(ctx, &new->cc_vcred))
277 continue;
278
279 cli_ctx_expire(ctx);
280 ctx_unhash_pf(ctx, &freelist);
281 break;
282 }
283
284 ctx_enhash_pf(new, &gsec_pf->gsp_chash[hash]);
285
286 spin_unlock(&gsec->gs_base.ps_lock);
287
288 ctx_list_destroy_pf(&freelist);
289}
290
291static
292int gss_install_rvs_cli_ctx_pf(struct gss_sec *gsec,
293 struct ptlrpc_svc_ctx *svc_ctx)
294{
295 struct vfs_cred vcred;
296 struct ptlrpc_cli_ctx *cli_ctx;
297 int rc;
298
299 vcred.vc_uid = 0;
300 vcred.vc_gid = 0;
301
302 cli_ctx = ctx_create_pf(&gsec->gs_base, &vcred);
303 if (!cli_ctx)
304 return -ENOMEM;
305
306 rc = gss_copy_rvc_cli_ctx(cli_ctx, svc_ctx);
307 if (rc) {
308 ctx_destroy_pf(cli_ctx->cc_sec, cli_ctx);
309 return rc;
310 }
311
312 gss_sec_ctx_replace_pf(gsec, cli_ctx);
313 return 0;
314}
315
316static
317void gss_ctx_cache_gc_pf(struct gss_sec_pipefs *gsec_pf,
318 struct hlist_head *freelist)
319{
320 struct ptlrpc_sec *sec;
321 struct ptlrpc_cli_ctx *ctx;
322 struct hlist_node *next;
323 int i;
324
325 sec = &gsec_pf->gsp_base.gs_base;
326
327 CDEBUG(D_SEC, "do gc on sec %s@%p\n", sec->ps_policy->sp_name, sec);
328
329 for (i = 0; i < gsec_pf->gsp_chash_size; i++) {
330 hlist_for_each_entry_safe(ctx, next,
331 &gsec_pf->gsp_chash[i], cc_cache)
332 ctx_check_death_locked_pf(ctx, freelist);
333 }
334
335 sec->ps_gc_next = cfs_time_current_sec() + sec->ps_gc_interval;
336}
337
338static
339struct ptlrpc_sec* gss_sec_create_pf(struct obd_import *imp,
340 struct ptlrpc_svc_ctx *ctx,
341 struct sptlrpc_flavor *sf)
342{
343 struct gss_sec_pipefs *gsec_pf;
344 int alloc_size, hash_size, i;
345
346#define GSS_SEC_PIPEFS_CTX_HASH_SIZE (32)
347
348 if (ctx ||
349 sf->sf_flags & (PTLRPC_SEC_FL_ROOTONLY | PTLRPC_SEC_FL_REVERSE))
350 hash_size = 1;
351 else
352 hash_size = GSS_SEC_PIPEFS_CTX_HASH_SIZE;
353
354 alloc_size = sizeof(*gsec_pf) +
355 sizeof(struct hlist_head) * hash_size;
356
357 OBD_ALLOC(gsec_pf, alloc_size);
358 if (!gsec_pf)
359 return NULL;
360
361 gsec_pf->gsp_chash_size = hash_size;
362 for (i = 0; i < hash_size; i++)
363 INIT_HLIST_HEAD(&gsec_pf->gsp_chash[i]);
364
365 if (gss_sec_create_common(&gsec_pf->gsp_base, &gss_policy_pipefs,
366 imp, ctx, sf))
367 goto err_free;
368
369 if (ctx == NULL) {
370 if (gss_sec_pipe_upcall_init(&gsec_pf->gsp_base))
371 goto err_destroy;
372 } else {
373 if (gss_install_rvs_cli_ctx_pf(&gsec_pf->gsp_base, ctx))
374 goto err_destroy;
375 }
376
377 return &gsec_pf->gsp_base.gs_base;
378
379err_destroy:
380 gss_sec_destroy_common(&gsec_pf->gsp_base);
381err_free:
382 OBD_FREE(gsec_pf, alloc_size);
383 return NULL;
384}
385
386static
387void gss_sec_destroy_pf(struct ptlrpc_sec *sec)
388{
389 struct gss_sec_pipefs *gsec_pf;
390 struct gss_sec *gsec;
391
392 CWARN("destroy %s@%p\n", sec->ps_policy->sp_name, sec);
393
394 gsec = container_of(sec, struct gss_sec, gs_base);
395 gsec_pf = container_of(gsec, struct gss_sec_pipefs, gsp_base);
396
397 LASSERT(gsec_pf->gsp_chash);
398 LASSERT(gsec_pf->gsp_chash_size);
399
400 gss_sec_pipe_upcall_fini(gsec);
401
402 gss_sec_destroy_common(gsec);
403
404 OBD_FREE(gsec, sizeof(*gsec_pf) +
405 sizeof(struct hlist_head) * gsec_pf->gsp_chash_size);
406}
407
408static
409struct ptlrpc_cli_ctx * gss_sec_lookup_ctx_pf(struct ptlrpc_sec *sec,
410 struct vfs_cred *vcred,
411 int create, int remove_dead)
412{
413 struct gss_sec *gsec;
414 struct gss_sec_pipefs *gsec_pf;
415 struct ptlrpc_cli_ctx *ctx = NULL, *new = NULL;
416 struct hlist_head *hash_head;
417 struct hlist_node *next;
418 HLIST_HEAD(freelist);
419 unsigned int hash, gc = 0, found = 0;
420
421 might_sleep();
422
423 gsec = container_of(sec, struct gss_sec, gs_base);
424 gsec_pf = container_of(gsec, struct gss_sec_pipefs, gsp_base);
425
426 hash = ctx_hash_index(gsec_pf->gsp_chash_size,
427 (__u64) vcred->vc_uid);
428 hash_head = &gsec_pf->gsp_chash[hash];
429 LASSERT(hash < gsec_pf->gsp_chash_size);
430
431retry:
432 spin_lock(&sec->ps_lock);
433
434
435 if (remove_dead && sec->ps_gc_next &&
436 cfs_time_after(cfs_time_current_sec(), sec->ps_gc_next)) {
437 gss_ctx_cache_gc_pf(gsec_pf, &freelist);
438 gc = 1;
439 }
440
441 hlist_for_each_entry_safe(ctx, next, hash_head, cc_cache) {
442 if (gc == 0 &&
443 ctx_check_death_locked_pf(ctx,
444 remove_dead ? &freelist : NULL))
445 continue;
446
447 if (ctx_match_pf(ctx, vcred)) {
448 found = 1;
449 break;
450 }
451 }
452
453 if (found) {
454 if (new && new != ctx) {
455
456 hlist_add_head(&new->cc_cache, &freelist);
457 new = NULL;
458 }
459
460
461 if (hash_head->first != &ctx->cc_cache) {
462 __hlist_del(&ctx->cc_cache);
463 hlist_add_head(&ctx->cc_cache, hash_head);
464 }
465 } else {
466
467 if (sec_is_reverse(sec)) {
468 spin_unlock(&sec->ps_lock);
469 return NULL;
470 }
471
472 if (new) {
473 ctx_enhash_pf(new, hash_head);
474 ctx = new;
475 } else if (create) {
476 spin_unlock(&sec->ps_lock);
477 new = ctx_create_pf(sec, vcred);
478 if (new) {
479 clear_bit(PTLRPC_CTX_NEW_BIT, &new->cc_flags);
480 goto retry;
481 }
482 } else {
483 ctx = NULL;
484 }
485 }
486
487
488 if (ctx)
489 atomic_inc(&ctx->cc_refcount);
490
491 spin_unlock(&sec->ps_lock);
492
493
494 if (new) {
495 LASSERT(new == ctx);
496 gss_cli_ctx_refresh_pf(new);
497 }
498
499 ctx_list_destroy_pf(&freelist);
500 return ctx;
501}
502
503static
504void gss_sec_release_ctx_pf(struct ptlrpc_sec *sec,
505 struct ptlrpc_cli_ctx *ctx,
506 int sync)
507{
508 LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
509 LASSERT(hlist_unhashed(&ctx->cc_cache));
510
511
512
513 if (!sync)
514 clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
515
516
517 ctx_destroy_pf(sec, ctx);
518}
519
520
521
522
523
524
525
526
527
528
529
530static
531int gss_sec_flush_ctx_cache_pf(struct ptlrpc_sec *sec,
532 uid_t uid,
533 int grace, int force)
534{
535 struct gss_sec *gsec;
536 struct gss_sec_pipefs *gsec_pf;
537 struct ptlrpc_cli_ctx *ctx;
538 struct hlist_node *next;
539 HLIST_HEAD(freelist);
540 int i, busy = 0;
541
542 might_sleep_if(grace);
543
544 gsec = container_of(sec, struct gss_sec, gs_base);
545 gsec_pf = container_of(gsec, struct gss_sec_pipefs, gsp_base);
546
547 spin_lock(&sec->ps_lock);
548 for (i = 0; i < gsec_pf->gsp_chash_size; i++) {
549 hlist_for_each_entry_safe(ctx, next,
550 &gsec_pf->gsp_chash[i],
551 cc_cache) {
552 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
553
554 if (uid != -1 && uid != ctx->cc_vcred.vc_uid)
555 continue;
556
557 if (atomic_read(&ctx->cc_refcount) > 1) {
558 busy++;
559 if (!force)
560 continue;
561
562 CWARN("flush busy(%d) ctx %p(%u->%s) by force, "
563 "grace %d\n",
564 atomic_read(&ctx->cc_refcount),
565 ctx, ctx->cc_vcred.vc_uid,
566 sec2target_str(ctx->cc_sec), grace);
567 }
568 ctx_unhash_pf(ctx, &freelist);
569
570 set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags);
571 if (!grace)
572 clear_bit(PTLRPC_CTX_UPTODATE_BIT,
573 &ctx->cc_flags);
574 }
575 }
576 spin_unlock(&sec->ps_lock);
577
578 ctx_list_destroy_pf(&freelist);
579 return busy;
580}
581
582
583
584
585
586static
587int gss_svc_accept_pf(struct ptlrpc_request *req)
588{
589 return gss_svc_accept(&gss_policy_pipefs, req);
590}
591
592static
593int gss_svc_install_rctx_pf(struct obd_import *imp,
594 struct ptlrpc_svc_ctx *ctx)
595{
596 struct ptlrpc_sec *sec;
597 int rc;
598
599 sec = sptlrpc_import_sec_ref(imp);
600 LASSERT(sec);
601 rc = gss_install_rvs_cli_ctx_pf(sec2gsec(sec), ctx);
602
603 sptlrpc_sec_put(sec);
604 return rc;
605}
606
607
608
609
610
611#define LUSTRE_PIPE_ROOT "/lustre"
612#define LUSTRE_PIPE_KRB5 LUSTRE_PIPE_ROOT"/krb5"
613
614struct gss_upcall_msg_data {
615 __u32 gum_seq;
616 __u32 gum_uid;
617 __u32 gum_gid;
618 __u32 gum_svc;
619 __u64 gum_nid;
620 __u8 gum_obd[64];
621};
622
623struct gss_upcall_msg {
624 struct rpc_pipe_msg gum_base;
625 atomic_t gum_refcount;
626 struct list_head gum_list;
627 __u32 gum_mechidx;
628 struct gss_sec *gum_gsec;
629 struct gss_cli_ctx *gum_gctx;
630 struct gss_upcall_msg_data gum_data;
631};
632
633static atomic_t upcall_seq = ATOMIC_INIT(0);
634
635static inline
636__u32 upcall_get_sequence(void)
637{
638 return (__u32) atomic_inc_return(&upcall_seq);
639}
640
641enum mech_idx_t {
642 MECH_KRB5 = 0,
643 MECH_MAX
644};
645
646static inline
647__u32 mech_name2idx(const char *name)
648{
649 LASSERT(!strcmp(name, "krb5"));
650 return MECH_KRB5;
651}
652
653
654static struct dentry *de_pipes[MECH_MAX] = { NULL, };
655
656static struct list_head upcall_lists[MECH_MAX];
657
658static spinlock_t upcall_locks[MECH_MAX];
659
660static inline
661void upcall_list_lock(int idx)
662{
663 spin_lock(&upcall_locks[idx]);
664}
665
666static inline
667void upcall_list_unlock(int idx)
668{
669 spin_unlock(&upcall_locks[idx]);
670}
671
672static
673void upcall_msg_enlist(struct gss_upcall_msg *msg)
674{
675 __u32 idx = msg->gum_mechidx;
676
677 upcall_list_lock(idx);
678 list_add(&msg->gum_list, &upcall_lists[idx]);
679 upcall_list_unlock(idx);
680}
681
682static
683void upcall_msg_delist(struct gss_upcall_msg *msg)
684{
685 __u32 idx = msg->gum_mechidx;
686
687 upcall_list_lock(idx);
688 list_del_init(&msg->gum_list);
689 upcall_list_unlock(idx);
690}
691
692
693
694
695
696static
697void gss_release_msg(struct gss_upcall_msg *gmsg)
698{
699 LASSERT(atomic_read(&gmsg->gum_refcount) > 0);
700
701 if (!atomic_dec_and_test(&gmsg->gum_refcount)) {
702 return;
703 }
704
705 if (gmsg->gum_gctx) {
706 sptlrpc_cli_ctx_wakeup(&gmsg->gum_gctx->gc_base);
707 sptlrpc_cli_ctx_put(&gmsg->gum_gctx->gc_base, 1);
708 gmsg->gum_gctx = NULL;
709 }
710
711 LASSERT(list_empty(&gmsg->gum_list));
712 LASSERT(list_empty(&gmsg->gum_base.list));
713 OBD_FREE_PTR(gmsg);
714}
715
716static
717void gss_unhash_msg_nolock(struct gss_upcall_msg *gmsg)
718{
719 __u32 idx = gmsg->gum_mechidx;
720
721 LASSERT(idx < MECH_MAX);
722 LASSERT(spin_is_locked(&upcall_locks[idx]));
723
724 if (list_empty(&gmsg->gum_list))
725 return;
726
727 list_del_init(&gmsg->gum_list);
728 LASSERT(atomic_read(&gmsg->gum_refcount) > 1);
729 atomic_dec(&gmsg->gum_refcount);
730}
731
732static
733void gss_unhash_msg(struct gss_upcall_msg *gmsg)
734{
735 __u32 idx = gmsg->gum_mechidx;
736
737 LASSERT(idx < MECH_MAX);
738 upcall_list_lock(idx);
739 gss_unhash_msg_nolock(gmsg);
740 upcall_list_unlock(idx);
741}
742
743static
744void gss_msg_fail_ctx(struct gss_upcall_msg *gmsg)
745{
746 if (gmsg->gum_gctx) {
747 struct ptlrpc_cli_ctx *ctx = &gmsg->gum_gctx->gc_base;
748
749 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
750 sptlrpc_cli_ctx_expire(ctx);
751 set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags);
752 }
753}
754
755static
756struct gss_upcall_msg * gss_find_upcall(__u32 mechidx, __u32 seq)
757{
758 struct gss_upcall_msg *gmsg;
759
760 upcall_list_lock(mechidx);
761 list_for_each_entry(gmsg, &upcall_lists[mechidx], gum_list) {
762 if (gmsg->gum_data.gum_seq != seq)
763 continue;
764
765 LASSERT(atomic_read(&gmsg->gum_refcount) > 0);
766 LASSERT(gmsg->gum_mechidx == mechidx);
767
768 atomic_inc(&gmsg->gum_refcount);
769 upcall_list_unlock(mechidx);
770 return gmsg;
771 }
772 upcall_list_unlock(mechidx);
773 return NULL;
774}
775
776static
777int simple_get_bytes(char **buf, __u32 *buflen, void *res, __u32 reslen)
778{
779 if (*buflen < reslen) {
780 CERROR("buflen %u < %u\n", *buflen, reslen);
781 return -EINVAL;
782 }
783
784 memcpy(res, *buf, reslen);
785 *buf += reslen;
786 *buflen -= reslen;
787 return 0;
788}
789
790
791
792
793
794static
795ssize_t gss_pipe_upcall(struct file *filp, struct rpc_pipe_msg *msg,
796 char *dst, size_t buflen)
797{
798 char *data = (char *)msg->data + msg->copied;
799 ssize_t mlen = msg->len;
800 ssize_t left;
801
802 if (mlen > buflen)
803 mlen = buflen;
804 left = copy_to_user(dst, data, mlen);
805 if (left < 0) {
806 msg->errno = left;
807 return left;
808 }
809 mlen -= left;
810 msg->copied += mlen;
811 msg->errno = 0;
812 return mlen;
813}
814
815static
816ssize_t gss_pipe_downcall(struct file *filp, const char *src, size_t mlen)
817{
818 struct rpc_inode *rpci = RPC_I(filp->f_dentry->d_inode);
819 struct gss_upcall_msg *gss_msg;
820 struct ptlrpc_cli_ctx *ctx;
821 struct gss_cli_ctx *gctx = NULL;
822 char *buf, *data;
823 int datalen;
824 int timeout, rc;
825 __u32 mechidx, seq, gss_err;
826
827 mechidx = (__u32) (long) rpci->private;
828 LASSERT(mechidx < MECH_MAX);
829
830 OBD_ALLOC(buf, mlen);
831 if (!buf)
832 return -ENOMEM;
833
834 if (copy_from_user(buf, src, mlen)) {
835 CERROR("failed copy user space data\n");
836 GOTO(out_free, rc = -EFAULT);
837 }
838 data = buf;
839 datalen = mlen;
840
841
842
843
844
845
846
847
848 if (simple_get_bytes(&data, &datalen, &seq, sizeof(seq))) {
849 CERROR("fail to get seq\n");
850 GOTO(out_free, rc = -EFAULT);
851 }
852
853 gss_msg = gss_find_upcall(mechidx, seq);
854 if (!gss_msg) {
855 CERROR("upcall %u has aborted earlier\n", seq);
856 GOTO(out_free, rc = -EINVAL);
857 }
858
859 gss_unhash_msg(gss_msg);
860 gctx = gss_msg->gum_gctx;
861 LASSERT(gctx);
862 LASSERT(atomic_read(&gctx->gc_base.cc_refcount) > 0);
863
864
865 if (simple_get_bytes(&data, &datalen, &timeout, sizeof(timeout)))
866 GOTO(out_msg, rc = -EFAULT);
867
868
869 if (simple_get_bytes(&data, &datalen, &gctx->gc_win,
870 sizeof(gctx->gc_win)))
871 GOTO(out_msg, rc = -EFAULT);
872
873 if (gctx->gc_win == 0) {
874
875
876
877
878 if (simple_get_bytes(&data, &datalen, &rc, sizeof(rc)))
879 GOTO(out_msg, rc = -EFAULT);
880 if (simple_get_bytes(&data, &datalen, &gss_err,sizeof(gss_err)))
881 GOTO(out_msg, rc = -EFAULT);
882
883 if (rc == 0 && gss_err == GSS_S_COMPLETE) {
884 CWARN("both rpc & gss error code not set\n");
885 rc = -EPERM;
886 }
887 } else {
888 rawobj_t tmpobj;
889
890
891 if (rawobj_extract_local(&tmpobj, (__u32 **) &data, &datalen))
892 GOTO(out_msg, rc = -EFAULT);
893 if (rawobj_dup(&gctx->gc_handle, &tmpobj))
894 GOTO(out_msg, rc = -ENOMEM);
895
896
897 if (rawobj_extract_local(&tmpobj, (__u32 **) &data, &datalen))
898 GOTO(out_msg, rc = -EFAULT);
899 gss_err = lgss_import_sec_context(&tmpobj,
900 gss_msg->gum_gsec->gs_mech,
901 &gctx->gc_mechctx);
902 rc = 0;
903 }
904
905 if (likely(rc == 0 && gss_err == GSS_S_COMPLETE)) {
906 gss_cli_ctx_uptodate(gctx);
907 } else {
908 ctx = &gctx->gc_base;
909 sptlrpc_cli_ctx_expire(ctx);
910 if (rc != -ERESTART || gss_err != GSS_S_COMPLETE)
911 set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags);
912
913 CERROR("refresh ctx %p(uid %d) failed: %d/0x%08x: %s\n",
914 ctx, ctx->cc_vcred.vc_uid, rc, gss_err,
915 test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags) ?
916 "fatal error" : "non-fatal");
917 }
918
919 rc = mlen;
920
921out_msg:
922 gss_release_msg(gss_msg);
923
924out_free:
925 OBD_FREE(buf, mlen);
926
927
928
929 rc = mlen;
930 return rc;
931}
932
933static
934void gss_pipe_destroy_msg(struct rpc_pipe_msg *msg)
935{
936 struct gss_upcall_msg *gmsg;
937 struct gss_upcall_msg_data *gumd;
938 static cfs_time_t ratelimit = 0;
939
940 LASSERT(list_empty(&msg->list));
941
942
943 if (msg->errno >= 0) {
944 return;
945 }
946
947 gmsg = container_of(msg, struct gss_upcall_msg, gum_base);
948 gumd = &gmsg->gum_data;
949 LASSERT(atomic_read(&gmsg->gum_refcount) > 0);
950
951 CERROR("failed msg %p (seq %u, uid %u, svc %u, nid "LPX64", obd %.*s): "
952 "errno %d\n", msg, gumd->gum_seq, gumd->gum_uid, gumd->gum_svc,
953 gumd->gum_nid, (int) sizeof(gumd->gum_obd),
954 gumd->gum_obd, msg->errno);
955
956 atomic_inc(&gmsg->gum_refcount);
957 gss_unhash_msg(gmsg);
958 if (msg->errno == -ETIMEDOUT || msg->errno == -EPIPE) {
959 cfs_time_t now = cfs_time_current_sec();
960
961 if (cfs_time_after(now, ratelimit)) {
962 CWARN("upcall timed out, is lgssd running?\n");
963 ratelimit = now + 15;
964 }
965 }
966 gss_msg_fail_ctx(gmsg);
967 gss_release_msg(gmsg);
968}
969
970static
971void gss_pipe_release(struct inode *inode)
972{
973 struct rpc_inode *rpci = RPC_I(inode);
974 __u32 idx;
975
976 idx = (__u32) (long) rpci->private;
977 LASSERT(idx < MECH_MAX);
978
979 upcall_list_lock(idx);
980 while (!list_empty(&upcall_lists[idx])) {
981 struct gss_upcall_msg *gmsg;
982 struct gss_upcall_msg_data *gumd;
983
984 gmsg = list_entry(upcall_lists[idx].next,
985 struct gss_upcall_msg, gum_list);
986 gumd = &gmsg->gum_data;
987 LASSERT(list_empty(&gmsg->gum_base.list));
988
989 CERROR("failing remaining msg %p:seq %u, uid %u, svc %u, "
990 "nid "LPX64", obd %.*s\n", gmsg,
991 gumd->gum_seq, gumd->gum_uid, gumd->gum_svc,
992 gumd->gum_nid, (int) sizeof(gumd->gum_obd),
993 gumd->gum_obd);
994
995 gmsg->gum_base.errno = -EPIPE;
996 atomic_inc(&gmsg->gum_refcount);
997 gss_unhash_msg_nolock(gmsg);
998
999 gss_msg_fail_ctx(gmsg);
1000
1001 upcall_list_unlock(idx);
1002 gss_release_msg(gmsg);
1003 upcall_list_lock(idx);
1004 }
1005 upcall_list_unlock(idx);
1006}
1007
1008static struct rpc_pipe_ops gss_upcall_ops = {
1009 .upcall = gss_pipe_upcall,
1010 .downcall = gss_pipe_downcall,
1011 .destroy_msg = gss_pipe_destroy_msg,
1012 .release_pipe = gss_pipe_release,
1013};
1014
1015
1016
1017
1018
1019static
1020int gss_ctx_refresh_pf(struct ptlrpc_cli_ctx *ctx)
1021{
1022 struct obd_import *imp;
1023 struct gss_sec *gsec;
1024 struct gss_upcall_msg *gmsg;
1025 int rc = 0;
1026
1027 might_sleep();
1028
1029 LASSERT(ctx->cc_sec);
1030 LASSERT(ctx->cc_sec->ps_import);
1031 LASSERT(ctx->cc_sec->ps_import->imp_obd);
1032
1033 imp = ctx->cc_sec->ps_import;
1034 if (!imp->imp_connection) {
1035 CERROR("import has no connection set\n");
1036 return -EINVAL;
1037 }
1038
1039 gsec = container_of(ctx->cc_sec, struct gss_sec, gs_base);
1040
1041 OBD_ALLOC_PTR(gmsg);
1042 if (!gmsg)
1043 return -ENOMEM;
1044
1045
1046 INIT_LIST_HEAD(&gmsg->gum_base.list);
1047 gmsg->gum_base.data = &gmsg->gum_data;
1048 gmsg->gum_base.len = sizeof(gmsg->gum_data);
1049 gmsg->gum_base.copied = 0;
1050 gmsg->gum_base.errno = 0;
1051
1052
1053 atomic_set(&gmsg->gum_refcount, 1);
1054 gmsg->gum_mechidx = mech_name2idx(gsec->gs_mech->gm_name);
1055 gmsg->gum_gsec = gsec;
1056 gmsg->gum_gctx = container_of(sptlrpc_cli_ctx_get(ctx),
1057 struct gss_cli_ctx, gc_base);
1058 gmsg->gum_data.gum_seq = upcall_get_sequence();
1059 gmsg->gum_data.gum_uid = ctx->cc_vcred.vc_uid;
1060 gmsg->gum_data.gum_gid = 0;
1061 gmsg->gum_data.gum_svc = import_to_gss_svc(imp);
1062 gmsg->gum_data.gum_nid = imp->imp_connection->c_peer.nid;
1063 strncpy(gmsg->gum_data.gum_obd, imp->imp_obd->obd_name,
1064 sizeof(gmsg->gum_data.gum_obd));
1065
1066
1067
1068 if (ctx->cc_flags & PTLRPC_CTX_STATUS_MASK) {
1069 CWARN("ctx %p(%u->%s) was set flags %lx unexpectedly\n",
1070 ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
1071 ctx->cc_flags);
1072
1073 LASSERT(!(ctx->cc_flags & PTLRPC_CTX_UPTODATE));
1074 ctx->cc_flags |= PTLRPC_CTX_DEAD | PTLRPC_CTX_ERROR;
1075
1076 rc = -EIO;
1077 goto err_free;
1078 }
1079
1080 upcall_msg_enlist(gmsg);
1081
1082 rc = rpc_queue_upcall(de_pipes[gmsg->gum_mechidx]->d_inode,
1083 &gmsg->gum_base);
1084 if (rc) {
1085 CERROR("rpc_queue_upcall failed: %d\n", rc);
1086
1087 upcall_msg_delist(gmsg);
1088 goto err_free;
1089 }
1090
1091 return 0;
1092err_free:
1093 OBD_FREE_PTR(gmsg);
1094 return rc;
1095}
1096
1097static
1098int gss_cli_ctx_refresh_pf(struct ptlrpc_cli_ctx *ctx)
1099{
1100
1101
1102 if (ctx->cc_vcred.vc_uid == 0) {
1103 struct gss_sec *gsec;
1104
1105 gsec = container_of(ctx->cc_sec, struct gss_sec, gs_base);
1106 gsec->gs_rvs_hdl = gss_get_next_ctx_index();
1107 }
1108
1109 return gss_ctx_refresh_pf(ctx);
1110}
1111
1112
1113
1114
1115
1116static struct ptlrpc_ctx_ops gss_pipefs_ctxops = {
1117 .match = gss_cli_ctx_match,
1118 .refresh = gss_cli_ctx_refresh_pf,
1119 .validate = gss_cli_ctx_validate_pf,
1120 .die = gss_cli_ctx_die_pf,
1121 .sign = gss_cli_ctx_sign,
1122 .verify = gss_cli_ctx_verify,
1123 .seal = gss_cli_ctx_seal,
1124 .unseal = gss_cli_ctx_unseal,
1125 .wrap_bulk = gss_cli_ctx_wrap_bulk,
1126 .unwrap_bulk = gss_cli_ctx_unwrap_bulk,
1127};
1128
1129static struct ptlrpc_sec_cops gss_sec_pipefs_cops = {
1130 .create_sec = gss_sec_create_pf,
1131 .destroy_sec = gss_sec_destroy_pf,
1132 .kill_sec = gss_sec_kill,
1133 .lookup_ctx = gss_sec_lookup_ctx_pf,
1134 .release_ctx = gss_sec_release_ctx_pf,
1135 .flush_ctx_cache = gss_sec_flush_ctx_cache_pf,
1136 .install_rctx = gss_sec_install_rctx,
1137 .alloc_reqbuf = gss_alloc_reqbuf,
1138 .free_reqbuf = gss_free_reqbuf,
1139 .alloc_repbuf = gss_alloc_repbuf,
1140 .free_repbuf = gss_free_repbuf,
1141 .enlarge_reqbuf = gss_enlarge_reqbuf,
1142};
1143
1144static struct ptlrpc_sec_sops gss_sec_pipefs_sops = {
1145 .accept = gss_svc_accept_pf,
1146 .invalidate_ctx = gss_svc_invalidate_ctx,
1147 .alloc_rs = gss_svc_alloc_rs,
1148 .authorize = gss_svc_authorize,
1149 .free_rs = gss_svc_free_rs,
1150 .free_ctx = gss_svc_free_ctx,
1151 .unwrap_bulk = gss_svc_unwrap_bulk,
1152 .wrap_bulk = gss_svc_wrap_bulk,
1153 .install_rctx = gss_svc_install_rctx_pf,
1154};
1155
1156static struct ptlrpc_sec_policy gss_policy_pipefs = {
1157 .sp_owner = THIS_MODULE,
1158 .sp_name = "gss.pipefs",
1159 .sp_policy = SPTLRPC_POLICY_GSS_PIPEFS,
1160 .sp_cops = &gss_sec_pipefs_cops,
1161 .sp_sops = &gss_sec_pipefs_sops,
1162};
1163
1164static
1165int __init gss_init_pipefs_upcall(void)
1166{
1167 struct dentry *de;
1168
1169
1170 de = rpc_mkdir(LUSTRE_PIPE_ROOT, NULL);
1171 if (IS_ERR(de) && PTR_ERR(de) != -EEXIST) {
1172 CERROR("Failed to create gss pipe dir: %ld\n", PTR_ERR(de));
1173 return PTR_ERR(de);
1174 }
1175
1176
1177
1178
1179
1180 de = rpc_mkpipe(LUSTRE_PIPE_KRB5, (void *) MECH_KRB5, &gss_upcall_ops,
1181 RPC_PIPE_WAIT_FOR_OPEN);
1182 if (!de || IS_ERR(de)) {
1183 CERROR("failed to make rpc_pipe %s: %ld\n",
1184 LUSTRE_PIPE_KRB5, PTR_ERR(de));
1185 rpc_rmdir(LUSTRE_PIPE_ROOT);
1186 return PTR_ERR(de);
1187 }
1188
1189 de_pipes[MECH_KRB5] = de;
1190 INIT_LIST_HEAD(&upcall_lists[MECH_KRB5]);
1191 spin_lock_init(&upcall_locks[MECH_KRB5]);
1192
1193 return 0;
1194}
1195
1196static
1197void __exit gss_exit_pipefs_upcall(void)
1198{
1199 __u32 i;
1200
1201 for (i = 0; i < MECH_MAX; i++) {
1202 LASSERT(list_empty(&upcall_lists[i]));
1203
1204
1205 de_pipes[i] = NULL;
1206 }
1207
1208 rpc_unlink(LUSTRE_PIPE_KRB5);
1209 rpc_rmdir(LUSTRE_PIPE_ROOT);
1210}
1211
1212int __init gss_init_pipefs(void)
1213{
1214 int rc;
1215
1216 rc = gss_init_pipefs_upcall();
1217 if (rc)
1218 return rc;
1219
1220 rc = sptlrpc_register_policy(&gss_policy_pipefs);
1221 if (rc) {
1222 gss_exit_pipefs_upcall();
1223 return rc;
1224 }
1225
1226 return 0;
1227}
1228
1229void __exit gss_exit_pipefs(void)
1230{
1231 gss_exit_pipefs_upcall();
1232 sptlrpc_unregister_policy(&gss_policy_pipefs);
1233}
1234