linux/drivers/staging/lustre/lustre/ptlrpc/gss/gss_svc_upcall.c
<<
>>
Prefs
   1/*
   2 * Modifications for Lustre
   3 *
   4 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
   5 *
   6 * Copyright (c) 2011, 2012, Intel Corporation.
   7 *
   8 * Author: Eric Mei <ericm@clusterfs.com>
   9 */
  10
  11/*
  12 * Neil Brown <neilb@cse.unsw.edu.au>
  13 * J. Bruce Fields <bfields@umich.edu>
  14 * Andy Adamson <andros@umich.edu>
  15 * Dug Song <dugsong@monkey.org>
  16 *
  17 * RPCSEC_GSS server authentication.
  18 * This implements RPCSEC_GSS as defined in rfc2203 (rpcsec_gss) and rfc2078
  19 * (gssapi)
  20 *
  21 * The RPCSEC_GSS involves three stages:
  22 *  1/ context creation
  23 *  2/ data exchange
  24 *  3/ context destruction
  25 *
  26 * Context creation is handled largely by upcalls to user-space.
  27 *  In particular, GSS_Accept_sec_context is handled by an upcall
  28 * Data exchange is handled entirely within the kernel
  29 *  In particular, GSS_GetMIC, GSS_VerifyMIC, GSS_Seal, GSS_Unseal are in-kernel.
  30 * Context destruction is handled in-kernel
  31 *  GSS_Delete_sec_context is in-kernel
  32 *
  33 * Context creation is initiated by a RPCSEC_GSS_INIT request arriving.
  34 * The context handle and gss_token are used as a key into the rpcsec_init cache.
  35 * The content of this cache includes some of the outputs of GSS_Accept_sec_context,
  36 * being major_status, minor_status, context_handle, reply_token.
  37 * These are sent back to the client.
  38 * Sequence window management is handled by the kernel.  The window size if currently
  39 * a compile time constant.
  40 *
  41 * When user-space is happy that a context is established, it places an entry
  42 * in the rpcsec_context cache. The key for this cache is the context_handle.
  43 * The content includes:
  44 *   uid/gidlist - for determining access rights
  45 *   mechanism type
  46 *   mechanism specific information, such as a key
  47 *
  48 */
  49
  50#define DEBUG_SUBSYSTEM S_SEC
  51#include <linux/types.h>
  52#include <linux/init.h>
  53#include <linux/module.h>
  54#include <linux/slab.h>
  55#include <linux/hash.h>
  56#include <linux/mutex.h>
  57#include <linux/sunrpc/cache.h>
  58
  59#include <obd.h>
  60#include <obd_class.h>
  61#include <obd_support.h>
  62#include <lustre/lustre_idl.h>
  63#include <lustre_net.h>
  64#include <lustre_import.h>
  65#include <lustre_sec.h>
  66
  67#include "gss_err.h"
  68#include "gss_internal.h"
  69#include "gss_api.h"
  70
  71#define GSS_SVC_UPCALL_TIMEOUT  (20)
  72
  73static spinlock_t __ctx_index_lock;
  74static __u64 __ctx_index;
  75
  76__u64 gss_get_next_ctx_index(void)
  77{
  78        __u64 idx;
  79
  80        spin_lock(&__ctx_index_lock);
  81        idx = __ctx_index++;
  82        spin_unlock(&__ctx_index_lock);
  83
  84        return idx;
  85}
  86
  87static inline unsigned long hash_mem(char *buf, int length, int bits)
  88{
  89        unsigned long hash = 0;
  90        unsigned long l = 0;
  91        int len = 0;
  92        unsigned char c;
  93
  94        do {
  95                if (len == length) {
  96                        c = (char) len;
  97                        len = -1;
  98                } else
  99                        c = *buf++;
 100
 101                l = (l << 8) | c;
 102                len++;
 103
 104                if ((len & (BITS_PER_LONG/8-1)) == 0)
 105                        hash = cfs_hash_long(hash^l, BITS_PER_LONG);
 106        } while (len);
 107
 108        return hash >> (BITS_PER_LONG - bits);
 109}
 110
 111/****************************************
 112 * rsi cache                        *
 113 ****************************************/
 114
 115#define RSI_HASHBITS    (6)
 116#define RSI_HASHMAX     (1 << RSI_HASHBITS)
 117#define RSI_HASHMASK    (RSI_HASHMAX - 1)
 118
 119struct rsi {
 120        struct cache_head       h;
 121        __u32              lustre_svc;
 122        __u64              nid;
 123        wait_queue_head_t            waitq;
 124        rawobj_t                in_handle, in_token;
 125        rawobj_t                out_handle, out_token;
 126        int                  major_status, minor_status;
 127};
 128
 129static struct cache_head *rsi_table[RSI_HASHMAX];
 130static struct cache_detail rsi_cache;
 131static struct rsi *rsi_update(struct rsi *new, struct rsi *old);
 132static struct rsi *rsi_lookup(struct rsi *item);
 133
 134static inline int rsi_hash(struct rsi *item)
 135{
 136        return hash_mem((char *)item->in_handle.data, item->in_handle.len,
 137                        RSI_HASHBITS) ^
 138               hash_mem((char *)item->in_token.data, item->in_token.len,
 139                        RSI_HASHBITS);
 140}
 141
 142static inline int __rsi_match(struct rsi *item, struct rsi *tmp)
 143{
 144        return (rawobj_equal(&item->in_handle, &tmp->in_handle) &&
 145                rawobj_equal(&item->in_token, &tmp->in_token));
 146}
 147
 148static void rsi_free(struct rsi *rsi)
 149{
 150        rawobj_free(&rsi->in_handle);
 151        rawobj_free(&rsi->in_token);
 152        rawobj_free(&rsi->out_handle);
 153        rawobj_free(&rsi->out_token);
 154}
 155
 156static void rsi_request(struct cache_detail *cd,
 157                        struct cache_head *h,
 158                        char **bpp, int *blen)
 159{
 160        struct rsi *rsi = container_of(h, struct rsi, h);
 161        __u64 index = 0;
 162
 163        /* if in_handle is null, provide kernel suggestion */
 164        if (rsi->in_handle.len == 0)
 165                index = gss_get_next_ctx_index();
 166
 167        qword_addhex(bpp, blen, (char *) &rsi->lustre_svc,
 168                     sizeof(rsi->lustre_svc));
 169        qword_addhex(bpp, blen, (char *) &rsi->nid, sizeof(rsi->nid));
 170        qword_addhex(bpp, blen, (char *) &index, sizeof(index));
 171        qword_addhex(bpp, blen, rsi->in_handle.data, rsi->in_handle.len);
 172        qword_addhex(bpp, blen, rsi->in_token.data, rsi->in_token.len);
 173        (*bpp)[-1] = '\n';
 174}
 175
 176static int rsi_upcall(struct cache_detail *cd, struct cache_head *h)
 177{
 178        return sunrpc_cache_pipe_upcall(cd, h, rsi_request);
 179}
 180
 181static inline void __rsi_init(struct rsi *new, struct rsi *item)
 182{
 183        new->out_handle = RAWOBJ_EMPTY;
 184        new->out_token = RAWOBJ_EMPTY;
 185
 186        new->in_handle = item->in_handle;
 187        item->in_handle = RAWOBJ_EMPTY;
 188        new->in_token = item->in_token;
 189        item->in_token = RAWOBJ_EMPTY;
 190
 191        new->lustre_svc = item->lustre_svc;
 192        new->nid = item->nid;
 193        init_waitqueue_head(&new->waitq);
 194}
 195
 196static inline void __rsi_update(struct rsi *new, struct rsi *item)
 197{
 198        LASSERT(new->out_handle.len == 0);
 199        LASSERT(new->out_token.len == 0);
 200
 201        new->out_handle = item->out_handle;
 202        item->out_handle = RAWOBJ_EMPTY;
 203        new->out_token = item->out_token;
 204        item->out_token = RAWOBJ_EMPTY;
 205
 206        new->major_status = item->major_status;
 207        new->minor_status = item->minor_status;
 208}
 209
 210static void rsi_put(struct kref *ref)
 211{
 212        struct rsi *rsi = container_of(ref, struct rsi, h.ref);
 213
 214        LASSERT(rsi->h.next == NULL);
 215        rsi_free(rsi);
 216        OBD_FREE_PTR(rsi);
 217}
 218
 219static int rsi_match(struct cache_head *a, struct cache_head *b)
 220{
 221        struct rsi *item = container_of(a, struct rsi, h);
 222        struct rsi *tmp = container_of(b, struct rsi, h);
 223
 224        return __rsi_match(item, tmp);
 225}
 226
 227static void rsi_init(struct cache_head *cnew, struct cache_head *citem)
 228{
 229        struct rsi *new = container_of(cnew, struct rsi, h);
 230        struct rsi *item = container_of(citem, struct rsi, h);
 231
 232        __rsi_init(new, item);
 233}
 234
 235static void update_rsi(struct cache_head *cnew, struct cache_head *citem)
 236{
 237        struct rsi *new = container_of(cnew, struct rsi, h);
 238        struct rsi *item = container_of(citem, struct rsi, h);
 239
 240        __rsi_update(new, item);
 241}
 242
 243static struct cache_head *rsi_alloc(void)
 244{
 245        struct rsi *rsi;
 246
 247        OBD_ALLOC_PTR(rsi);
 248        if (rsi)
 249                return &rsi->h;
 250        else
 251                return NULL;
 252}
 253
 254static int rsi_parse(struct cache_detail *cd, char *mesg, int mlen)
 255{
 256        char       *buf = mesg;
 257        char       *ep;
 258        int          len;
 259        struct rsi      rsii, *rsip = NULL;
 260        time_t    expiry;
 261        int          status = -EINVAL;
 262
 263        memset(&rsii, 0, sizeof(rsii));
 264
 265        /* handle */
 266        len = qword_get(&mesg, buf, mlen);
 267        if (len < 0)
 268                goto out;
 269        if (rawobj_alloc(&rsii.in_handle, buf, len)) {
 270                status = -ENOMEM;
 271                goto out;
 272        }
 273
 274        /* token */
 275        len = qword_get(&mesg, buf, mlen);
 276        if (len < 0)
 277                goto out;
 278        if (rawobj_alloc(&rsii.in_token, buf, len)) {
 279                status = -ENOMEM;
 280                goto out;
 281        }
 282
 283        rsip = rsi_lookup(&rsii);
 284        if (!rsip)
 285                goto out;
 286
 287        rsii.h.flags = 0;
 288        /* expiry */
 289        expiry = get_expiry(&mesg);
 290        if (expiry == 0)
 291                goto out;
 292
 293        len = qword_get(&mesg, buf, mlen);
 294        if (len <= 0)
 295                goto out;
 296
 297        /* major */
 298        rsii.major_status = simple_strtol(buf, &ep, 10);
 299        if (*ep)
 300                goto out;
 301
 302        /* minor */
 303        len = qword_get(&mesg, buf, mlen);
 304        if (len <= 0)
 305                goto out;
 306        rsii.minor_status = simple_strtol(buf, &ep, 10);
 307        if (*ep)
 308                goto out;
 309
 310        /* out_handle */
 311        len = qword_get(&mesg, buf, mlen);
 312        if (len < 0)
 313                goto out;
 314        if (rawobj_alloc(&rsii.out_handle, buf, len)) {
 315                status = -ENOMEM;
 316                goto out;
 317        }
 318
 319        /* out_token */
 320        len = qword_get(&mesg, buf, mlen);
 321        if (len < 0)
 322                goto out;
 323        if (rawobj_alloc(&rsii.out_token, buf, len)) {
 324                status = -ENOMEM;
 325                goto out;
 326        }
 327
 328        rsii.h.expiry_time = expiry;
 329        rsip = rsi_update(&rsii, rsip);
 330        status = 0;
 331out:
 332        rsi_free(&rsii);
 333        if (rsip) {
 334                wake_up_all(&rsip->waitq);
 335                cache_put(&rsip->h, &rsi_cache);
 336        } else {
 337                status = -ENOMEM;
 338        }
 339
 340        if (status)
 341                CERROR("rsi parse error %d\n", status);
 342        return status;
 343}
 344
 345static struct cache_detail rsi_cache = {
 346        .hash_size      = RSI_HASHMAX,
 347        .hash_table     = rsi_table,
 348        .name      = "auth.sptlrpc.init",
 349        .cache_put      = rsi_put,
 350        .cache_upcall   = rsi_upcall,
 351        .cache_parse    = rsi_parse,
 352        .match    = rsi_match,
 353        .init      = rsi_init,
 354        .update  = update_rsi,
 355        .alloc    = rsi_alloc,
 356};
 357
 358static struct rsi *rsi_lookup(struct rsi *item)
 359{
 360        struct cache_head *ch;
 361        int hash = rsi_hash(item);
 362
 363        ch = sunrpc_cache_lookup(&rsi_cache, &item->h, hash);
 364        if (ch)
 365                return container_of(ch, struct rsi, h);
 366        else
 367                return NULL;
 368}
 369
 370static struct rsi *rsi_update(struct rsi *new, struct rsi *old)
 371{
 372        struct cache_head *ch;
 373        int hash = rsi_hash(new);
 374
 375        ch = sunrpc_cache_update(&rsi_cache, &new->h, &old->h, hash);
 376        if (ch)
 377                return container_of(ch, struct rsi, h);
 378        else
 379                return NULL;
 380}
 381
 382/****************************************
 383 * rsc cache                        *
 384 ****************************************/
 385
 386#define RSC_HASHBITS    (10)
 387#define RSC_HASHMAX     (1 << RSC_HASHBITS)
 388#define RSC_HASHMASK    (RSC_HASHMAX - 1)
 389
 390struct rsc {
 391        struct cache_head       h;
 392        struct obd_device      *target;
 393        rawobj_t                handle;
 394        struct gss_svc_ctx      ctx;
 395};
 396
 397static struct cache_head *rsc_table[RSC_HASHMAX];
 398static struct cache_detail rsc_cache;
 399static struct rsc *rsc_update(struct rsc *new, struct rsc *old);
 400static struct rsc *rsc_lookup(struct rsc *item);
 401
 402static void rsc_free(struct rsc *rsci)
 403{
 404        rawobj_free(&rsci->handle);
 405        rawobj_free(&rsci->ctx.gsc_rvs_hdl);
 406        lgss_delete_sec_context(&rsci->ctx.gsc_mechctx);
 407}
 408
 409static inline int rsc_hash(struct rsc *rsci)
 410{
 411        return hash_mem((char *)rsci->handle.data,
 412                        rsci->handle.len, RSC_HASHBITS);
 413}
 414
 415static inline int __rsc_match(struct rsc *new, struct rsc *tmp)
 416{
 417        return rawobj_equal(&new->handle, &tmp->handle);
 418}
 419
 420static inline void __rsc_init(struct rsc *new, struct rsc *tmp)
 421{
 422        new->handle = tmp->handle;
 423        tmp->handle = RAWOBJ_EMPTY;
 424
 425        new->target = NULL;
 426        memset(&new->ctx, 0, sizeof(new->ctx));
 427        new->ctx.gsc_rvs_hdl = RAWOBJ_EMPTY;
 428}
 429
 430static inline void __rsc_update(struct rsc *new, struct rsc *tmp)
 431{
 432        new->ctx = tmp->ctx;
 433        tmp->ctx.gsc_rvs_hdl = RAWOBJ_EMPTY;
 434        tmp->ctx.gsc_mechctx = NULL;
 435
 436        memset(&new->ctx.gsc_seqdata, 0, sizeof(new->ctx.gsc_seqdata));
 437        spin_lock_init(&new->ctx.gsc_seqdata.ssd_lock);
 438}
 439
 440static void rsc_put(struct kref *ref)
 441{
 442        struct rsc *rsci = container_of(ref, struct rsc, h.ref);
 443
 444        LASSERT(rsci->h.next == NULL);
 445        rsc_free(rsci);
 446        OBD_FREE_PTR(rsci);
 447}
 448
 449static int rsc_match(struct cache_head *a, struct cache_head *b)
 450{
 451        struct rsc *new = container_of(a, struct rsc, h);
 452        struct rsc *tmp = container_of(b, struct rsc, h);
 453
 454        return __rsc_match(new, tmp);
 455}
 456
 457static void rsc_init(struct cache_head *cnew, struct cache_head *ctmp)
 458{
 459        struct rsc *new = container_of(cnew, struct rsc, h);
 460        struct rsc *tmp = container_of(ctmp, struct rsc, h);
 461
 462        __rsc_init(new, tmp);
 463}
 464
 465static void update_rsc(struct cache_head *cnew, struct cache_head *ctmp)
 466{
 467        struct rsc *new = container_of(cnew, struct rsc, h);
 468        struct rsc *tmp = container_of(ctmp, struct rsc, h);
 469
 470        __rsc_update(new, tmp);
 471}
 472
 473static struct cache_head * rsc_alloc(void)
 474{
 475        struct rsc *rsc;
 476
 477        OBD_ALLOC_PTR(rsc);
 478        if (rsc)
 479                return &rsc->h;
 480        else
 481                return NULL;
 482}
 483
 484static int rsc_parse(struct cache_detail *cd, char *mesg, int mlen)
 485{
 486        char            *buf = mesg;
 487        int               len, rv, tmp_int;
 488        struct rsc         rsci, *rscp = NULL;
 489        time_t         expiry;
 490        int               status = -EINVAL;
 491        struct gss_api_mech *gm = NULL;
 492
 493        memset(&rsci, 0, sizeof(rsci));
 494
 495        /* context handle */
 496        len = qword_get(&mesg, buf, mlen);
 497        if (len < 0) goto out;
 498        status = -ENOMEM;
 499        if (rawobj_alloc(&rsci.handle, buf, len))
 500                goto out;
 501
 502        rsci.h.flags = 0;
 503        /* expiry */
 504        expiry = get_expiry(&mesg);
 505        status = -EINVAL;
 506        if (expiry == 0)
 507                goto out;
 508
 509        /* remote flag */
 510        rv = get_int(&mesg, &tmp_int);
 511        if (rv) {
 512                CERROR("fail to get remote flag\n");
 513                goto out;
 514        }
 515        rsci.ctx.gsc_remote = (tmp_int != 0);
 516
 517        /* root user flag */
 518        rv = get_int(&mesg, &tmp_int);
 519        if (rv) {
 520                CERROR("fail to get oss user flag\n");
 521                goto out;
 522        }
 523        rsci.ctx.gsc_usr_root = (tmp_int != 0);
 524
 525        /* mds user flag */
 526        rv = get_int(&mesg, &tmp_int);
 527        if (rv) {
 528                CERROR("fail to get mds user flag\n");
 529                goto out;
 530        }
 531        rsci.ctx.gsc_usr_mds = (tmp_int != 0);
 532
 533        /* oss user flag */
 534        rv = get_int(&mesg, &tmp_int);
 535        if (rv) {
 536                CERROR("fail to get oss user flag\n");
 537                goto out;
 538        }
 539        rsci.ctx.gsc_usr_oss = (tmp_int != 0);
 540
 541        /* mapped uid */
 542        rv = get_int(&mesg, (int *) &rsci.ctx.gsc_mapped_uid);
 543        if (rv) {
 544                CERROR("fail to get mapped uid\n");
 545                goto out;
 546        }
 547
 548        rscp = rsc_lookup(&rsci);
 549        if (!rscp)
 550                goto out;
 551
 552        /* uid, or NEGATIVE */
 553        rv = get_int(&mesg, (int *) &rsci.ctx.gsc_uid);
 554        if (rv == -EINVAL)
 555                goto out;
 556        if (rv == -ENOENT) {
 557                CERROR("NOENT? set rsc entry negative\n");
 558                set_bit(CACHE_NEGATIVE, &rsci.h.flags);
 559        } else {
 560                rawobj_t tmp_buf;
 561                unsigned long ctx_expiry;
 562
 563                /* gid */
 564                if (get_int(&mesg, (int *) &rsci.ctx.gsc_gid))
 565                        goto out;
 566
 567                /* mech name */
 568                len = qword_get(&mesg, buf, mlen);
 569                if (len < 0)
 570                        goto out;
 571                gm = lgss_name_to_mech(buf);
 572                status = -EOPNOTSUPP;
 573                if (!gm)
 574                        goto out;
 575
 576                status = -EINVAL;
 577                /* mech-specific data: */
 578                len = qword_get(&mesg, buf, mlen);
 579                if (len < 0)
 580                        goto out;
 581
 582                tmp_buf.len = len;
 583                tmp_buf.data = (unsigned char *)buf;
 584                if (lgss_import_sec_context(&tmp_buf, gm,
 585                                            &rsci.ctx.gsc_mechctx))
 586                        goto out;
 587
 588                /* currently the expiry time passed down from user-space
 589                 * is invalid, here we retrive it from mech. */
 590                if (lgss_inquire_context(rsci.ctx.gsc_mechctx, &ctx_expiry)) {
 591                        CERROR("unable to get expire time, drop it\n");
 592                        goto out;
 593                }
 594                expiry = (time_t) ctx_expiry;
 595        }
 596
 597        rsci.h.expiry_time = expiry;
 598        rscp = rsc_update(&rsci, rscp);
 599        status = 0;
 600out:
 601        if (gm)
 602                lgss_mech_put(gm);
 603        rsc_free(&rsci);
 604        if (rscp)
 605                cache_put(&rscp->h, &rsc_cache);
 606        else
 607                status = -ENOMEM;
 608
 609        if (status)
 610                CERROR("parse rsc error %d\n", status);
 611        return status;
 612}
 613
 614static struct cache_detail rsc_cache = {
 615        .hash_size      = RSC_HASHMAX,
 616        .hash_table     = rsc_table,
 617        .name      = "auth.sptlrpc.context",
 618        .cache_put      = rsc_put,
 619        .cache_parse    = rsc_parse,
 620        .match    = rsc_match,
 621        .init      = rsc_init,
 622        .update  = update_rsc,
 623        .alloc    = rsc_alloc,
 624};
 625
 626static struct rsc *rsc_lookup(struct rsc *item)
 627{
 628        struct cache_head *ch;
 629        int             hash = rsc_hash(item);
 630
 631        ch = sunrpc_cache_lookup(&rsc_cache, &item->h, hash);
 632        if (ch)
 633                return container_of(ch, struct rsc, h);
 634        else
 635                return NULL;
 636}
 637
 638static struct rsc *rsc_update(struct rsc *new, struct rsc *old)
 639{
 640        struct cache_head *ch;
 641        int             hash = rsc_hash(new);
 642
 643        ch = sunrpc_cache_update(&rsc_cache, &new->h, &old->h, hash);
 644        if (ch)
 645                return container_of(ch, struct rsc, h);
 646        else
 647                return NULL;
 648}
 649
 650#define COMPAT_RSC_PUT(item, cd)        cache_put((item), (cd))
 651
 652/****************************************
 653 * rsc cache flush                    *
 654 ****************************************/
 655
 656typedef int rsc_entry_match(struct rsc *rscp, long data);
 657
 658static void rsc_flush(rsc_entry_match *match, long data)
 659{
 660        struct cache_head **ch;
 661        struct rsc *rscp;
 662        int n;
 663
 664        write_lock(&rsc_cache.hash_lock);
 665        for (n = 0; n < RSC_HASHMAX; n++) {
 666                for (ch = &rsc_cache.hash_table[n]; *ch;) {
 667                        rscp = container_of(*ch, struct rsc, h);
 668
 669                        if (!match(rscp, data)) {
 670                                ch = &((*ch)->next);
 671                                continue;
 672                        }
 673
 674                        /* it seems simply set NEGATIVE doesn't work */
 675                        *ch = (*ch)->next;
 676                        rscp->h.next = NULL;
 677                        cache_get(&rscp->h);
 678                        set_bit(CACHE_NEGATIVE, &rscp->h.flags);
 679                        COMPAT_RSC_PUT(&rscp->h, &rsc_cache);
 680                        rsc_cache.entries--;
 681                }
 682        }
 683        write_unlock(&rsc_cache.hash_lock);
 684}
 685
 686static int match_uid(struct rsc *rscp, long uid)
 687{
 688        if ((int) uid == -1)
 689                return 1;
 690        return ((int) rscp->ctx.gsc_uid == (int) uid);
 691}
 692
 693static int match_target(struct rsc *rscp, long target)
 694{
 695        return (rscp->target == (struct obd_device *) target);
 696}
 697
 698static inline void rsc_flush_uid(int uid)
 699{
 700        if (uid == -1)
 701                CWARN("flush all gss contexts...\n");
 702
 703        rsc_flush(match_uid, (long) uid);
 704}
 705
 706static inline void rsc_flush_target(struct obd_device *target)
 707{
 708        rsc_flush(match_target, (long) target);
 709}
 710
 711void gss_secsvc_flush(struct obd_device *target)
 712{
 713        rsc_flush_target(target);
 714}
 715EXPORT_SYMBOL(gss_secsvc_flush);
 716
 717static struct rsc *gss_svc_searchbyctx(rawobj_t *handle)
 718{
 719        struct rsc  rsci;
 720        struct rsc *found;
 721
 722        memset(&rsci, 0, sizeof(rsci));
 723        if (rawobj_dup(&rsci.handle, handle))
 724                return NULL;
 725
 726        found = rsc_lookup(&rsci);
 727        rsc_free(&rsci);
 728        if (!found)
 729                return NULL;
 730        if (cache_check(&rsc_cache, &found->h, NULL))
 731                return NULL;
 732        return found;
 733}
 734
 735int gss_svc_upcall_install_rvs_ctx(struct obd_import *imp,
 736                                   struct gss_sec *gsec,
 737                                   struct gss_cli_ctx *gctx)
 738{
 739        struct rsc      rsci, *rscp = NULL;
 740        unsigned long   ctx_expiry;
 741        __u32      major;
 742        int          rc;
 743
 744        memset(&rsci, 0, sizeof(rsci));
 745
 746        if (rawobj_alloc(&rsci.handle, (char *) &gsec->gs_rvs_hdl,
 747                         sizeof(gsec->gs_rvs_hdl)))
 748                GOTO(out, rc = -ENOMEM);
 749
 750        rscp = rsc_lookup(&rsci);
 751        if (rscp == NULL)
 752                GOTO(out, rc = -ENOMEM);
 753
 754        major = lgss_copy_reverse_context(gctx->gc_mechctx,
 755                                          &rsci.ctx.gsc_mechctx);
 756        if (major != GSS_S_COMPLETE)
 757                GOTO(out, rc = -ENOMEM);
 758
 759        if (lgss_inquire_context(rsci.ctx.gsc_mechctx, &ctx_expiry)) {
 760                CERROR("unable to get expire time, drop it\n");
 761                GOTO(out, rc = -EINVAL);
 762        }
 763        rsci.h.expiry_time = (time_t) ctx_expiry;
 764
 765        if (strcmp(imp->imp_obd->obd_type->typ_name, LUSTRE_MDC_NAME) == 0)
 766                rsci.ctx.gsc_usr_mds = 1;
 767        else if (strcmp(imp->imp_obd->obd_type->typ_name, LUSTRE_OSC_NAME) == 0)
 768                rsci.ctx.gsc_usr_oss = 1;
 769        else
 770                rsci.ctx.gsc_usr_root = 1;
 771
 772        rscp = rsc_update(&rsci, rscp);
 773        if (rscp == NULL)
 774                GOTO(out, rc = -ENOMEM);
 775
 776        rscp->target = imp->imp_obd;
 777        rawobj_dup(&gctx->gc_svc_handle, &rscp->handle);
 778
 779        CWARN("create reverse svc ctx %p to %s: idx "LPX64"\n",
 780              &rscp->ctx, obd2cli_tgt(imp->imp_obd), gsec->gs_rvs_hdl);
 781        rc = 0;
 782out:
 783        if (rscp)
 784                cache_put(&rscp->h, &rsc_cache);
 785        rsc_free(&rsci);
 786
 787        if (rc)
 788                CERROR("create reverse svc ctx: idx "LPX64", rc %d\n",
 789                       gsec->gs_rvs_hdl, rc);
 790        return rc;
 791}
 792
 793int gss_svc_upcall_expire_rvs_ctx(rawobj_t *handle)
 794{
 795        const cfs_time_t        expire = 20;
 796        struct rsc           *rscp;
 797
 798        rscp = gss_svc_searchbyctx(handle);
 799        if (rscp) {
 800                CDEBUG(D_SEC, "reverse svcctx %p (rsc %p) expire soon\n",
 801                       &rscp->ctx, rscp);
 802
 803                rscp->h.expiry_time = cfs_time_current_sec() + expire;
 804                COMPAT_RSC_PUT(&rscp->h, &rsc_cache);
 805        }
 806        return 0;
 807}
 808
 809int gss_svc_upcall_dup_handle(rawobj_t *handle, struct gss_svc_ctx *ctx)
 810{
 811        struct rsc *rscp = container_of(ctx, struct rsc, ctx);
 812
 813        return rawobj_dup(handle, &rscp->handle);
 814}
 815
 816int gss_svc_upcall_update_sequence(rawobj_t *handle, __u32 seq)
 817{
 818        struct rsc           *rscp;
 819
 820        rscp = gss_svc_searchbyctx(handle);
 821        if (rscp) {
 822                CDEBUG(D_SEC, "reverse svcctx %p (rsc %p) update seq to %u\n",
 823                       &rscp->ctx, rscp, seq + 1);
 824
 825                rscp->ctx.gsc_rvs_seq = seq + 1;
 826                COMPAT_RSC_PUT(&rscp->h, &rsc_cache);
 827        }
 828        return 0;
 829}
 830
 831static struct cache_deferred_req* cache_upcall_defer(struct cache_req *req)
 832{
 833        return NULL;
 834}
 835static struct cache_req cache_upcall_chandle = { cache_upcall_defer };
 836
 837int gss_svc_upcall_handle_init(struct ptlrpc_request *req,
 838                               struct gss_svc_reqctx *grctx,
 839                               struct gss_wire_ctx *gw,
 840                               struct obd_device *target,
 841                               __u32 lustre_svc,
 842                               rawobj_t *rvs_hdl,
 843                               rawobj_t *in_token)
 844{
 845        struct ptlrpc_reply_state *rs;
 846        struct rsc              *rsci = NULL;
 847        struct rsi              *rsip = NULL, rsikey;
 848        wait_queue_t         wait;
 849        int                     replen = sizeof(struct ptlrpc_body);
 850        struct gss_rep_header     *rephdr;
 851        int                     first_check = 1;
 852        int                     rc = SECSVC_DROP;
 853
 854        memset(&rsikey, 0, sizeof(rsikey));
 855        rsikey.lustre_svc = lustre_svc;
 856        rsikey.nid = (__u64) req->rq_peer.nid;
 857
 858        /* duplicate context handle. for INIT it always 0 */
 859        if (rawobj_dup(&rsikey.in_handle, &gw->gw_handle)) {
 860                CERROR("fail to dup context handle\n");
 861                GOTO(out, rc);
 862        }
 863
 864        if (rawobj_dup(&rsikey.in_token, in_token)) {
 865                CERROR("can't duplicate token\n");
 866                rawobj_free(&rsikey.in_handle);
 867                GOTO(out, rc);
 868        }
 869
 870        rsip = rsi_lookup(&rsikey);
 871        rsi_free(&rsikey);
 872        if (!rsip) {
 873                CERROR("error in rsi_lookup.\n");
 874
 875                if (!gss_pack_err_notify(req, GSS_S_FAILURE, 0))
 876                        rc = SECSVC_COMPLETE;
 877
 878                GOTO(out, rc);
 879        }
 880
 881        cache_get(&rsip->h); /* take an extra ref */
 882        init_waitqueue_head(&rsip->waitq);
 883        init_waitqueue_entry_current(&wait);
 884        add_wait_queue(&rsip->waitq, &wait);
 885
 886cache_check:
 887        /* Note each time cache_check() will drop a reference if return
 888         * non-zero. We hold an extra reference on initial rsip, but must
 889         * take care of following calls. */
 890        rc = cache_check(&rsi_cache, &rsip->h, &cache_upcall_chandle);
 891        switch (rc) {
 892        case -EAGAIN: {
 893                int valid;
 894
 895                if (first_check) {
 896                        first_check = 0;
 897
 898                        read_lock(&rsi_cache.hash_lock);
 899                        valid = test_bit(CACHE_VALID, &rsip->h.flags);
 900                        if (valid == 0)
 901                                set_current_state(TASK_INTERRUPTIBLE);
 902                        read_unlock(&rsi_cache.hash_lock);
 903
 904                        if (valid == 0)
 905                                schedule_timeout(GSS_SVC_UPCALL_TIMEOUT *
 906                                                     HZ);
 907
 908                        cache_get(&rsip->h);
 909                        goto cache_check;
 910                }
 911                CWARN("waited %ds timeout, drop\n", GSS_SVC_UPCALL_TIMEOUT);
 912                break;
 913        }
 914        case -ENOENT:
 915                CWARN("cache_check return ENOENT, drop\n");
 916                break;
 917        case 0:
 918                /* if not the first check, we have to release the extra
 919                 * reference we just added on it. */
 920                if (!first_check)
 921                        cache_put(&rsip->h, &rsi_cache);
 922                CDEBUG(D_SEC, "cache_check is good\n");
 923                break;
 924        }
 925
 926        remove_wait_queue(&rsip->waitq, &wait);
 927        cache_put(&rsip->h, &rsi_cache);
 928
 929        if (rc)
 930                GOTO(out, rc = SECSVC_DROP);
 931
 932        rc = SECSVC_DROP;
 933        rsci = gss_svc_searchbyctx(&rsip->out_handle);
 934        if (!rsci) {
 935                CERROR("authentication failed\n");
 936
 937                if (!gss_pack_err_notify(req, GSS_S_FAILURE, 0))
 938                        rc = SECSVC_COMPLETE;
 939
 940                GOTO(out, rc);
 941        } else {
 942                cache_get(&rsci->h);
 943                grctx->src_ctx = &rsci->ctx;
 944        }
 945
 946        if (rawobj_dup(&rsci->ctx.gsc_rvs_hdl, rvs_hdl)) {
 947                CERROR("failed duplicate reverse handle\n");
 948                GOTO(out, rc);
 949        }
 950
 951        rsci->target = target;
 952
 953        CDEBUG(D_SEC, "server create rsc %p(%u->%s)\n",
 954               rsci, rsci->ctx.gsc_uid, libcfs_nid2str(req->rq_peer.nid));
 955
 956        if (rsip->out_handle.len > PTLRPC_GSS_MAX_HANDLE_SIZE) {
 957                CERROR("handle size %u too large\n", rsip->out_handle.len);
 958                GOTO(out, rc = SECSVC_DROP);
 959        }
 960
 961        grctx->src_init = 1;
 962        grctx->src_reserve_len = cfs_size_round4(rsip->out_token.len);
 963
 964        rc = lustre_pack_reply_v2(req, 1, &replen, NULL, 0);
 965        if (rc) {
 966                CERROR("failed to pack reply: %d\n", rc);
 967                GOTO(out, rc = SECSVC_DROP);
 968        }
 969
 970        rs = req->rq_reply_state;
 971        LASSERT(rs->rs_repbuf->lm_bufcount == 3);
 972        LASSERT(rs->rs_repbuf->lm_buflens[0] >=
 973                sizeof(*rephdr) + rsip->out_handle.len);
 974        LASSERT(rs->rs_repbuf->lm_buflens[2] >= rsip->out_token.len);
 975
 976        rephdr = lustre_msg_buf(rs->rs_repbuf, 0, 0);
 977        rephdr->gh_version = PTLRPC_GSS_VERSION;
 978        rephdr->gh_flags = 0;
 979        rephdr->gh_proc = PTLRPC_GSS_PROC_ERR;
 980        rephdr->gh_major = rsip->major_status;
 981        rephdr->gh_minor = rsip->minor_status;
 982        rephdr->gh_seqwin = GSS_SEQ_WIN;
 983        rephdr->gh_handle.len = rsip->out_handle.len;
 984        memcpy(rephdr->gh_handle.data, rsip->out_handle.data,
 985               rsip->out_handle.len);
 986
 987        memcpy(lustre_msg_buf(rs->rs_repbuf, 2, 0), rsip->out_token.data,
 988               rsip->out_token.len);
 989
 990        rs->rs_repdata_len = lustre_shrink_msg(rs->rs_repbuf, 2,
 991                                               rsip->out_token.len, 0);
 992
 993        rc = SECSVC_OK;
 994
 995out:
 996        /* it looks like here we should put rsip also, but this mess up
 997         * with NFS cache mgmt code... FIXME */
 998#if 0
 999        if (rsip)
1000                rsi_put(&rsip->h, &rsi_cache);
1001#endif
1002
1003        if (rsci) {
1004                /* if anything went wrong, we don't keep the context too */
1005                if (rc != SECSVC_OK)
1006                        set_bit(CACHE_NEGATIVE, &rsci->h.flags);
1007                else
1008                        CDEBUG(D_SEC, "create rsc with idx "LPX64"\n",
1009                               gss_handle_to_u64(&rsci->handle));
1010
1011                COMPAT_RSC_PUT(&rsci->h, &rsc_cache);
1012        }
1013        return rc;
1014}
1015
1016struct gss_svc_ctx *gss_svc_upcall_get_ctx(struct ptlrpc_request *req,
1017                                           struct gss_wire_ctx *gw)
1018{
1019        struct rsc *rsc;
1020
1021        rsc = gss_svc_searchbyctx(&gw->gw_handle);
1022        if (!rsc) {
1023                CWARN("Invalid gss ctx idx "LPX64" from %s\n",
1024                      gss_handle_to_u64(&gw->gw_handle),
1025                      libcfs_nid2str(req->rq_peer.nid));
1026                return NULL;
1027        }
1028
1029        return &rsc->ctx;
1030}
1031
1032void gss_svc_upcall_put_ctx(struct gss_svc_ctx *ctx)
1033{
1034        struct rsc *rsc = container_of(ctx, struct rsc, ctx);
1035
1036        COMPAT_RSC_PUT(&rsc->h, &rsc_cache);
1037}
1038
1039void gss_svc_upcall_destroy_ctx(struct gss_svc_ctx *ctx)
1040{
1041        struct rsc *rsc = container_of(ctx, struct rsc, ctx);
1042
1043        /* can't be found */
1044        set_bit(CACHE_NEGATIVE, &rsc->h.flags);
1045        /* to be removed at next scan */
1046        rsc->h.expiry_time = 1;
1047}
1048
1049int __init gss_init_svc_upcall(void)
1050{
1051        int     i;
1052
1053        spin_lock_init(&__ctx_index_lock);
1054        /*
1055         * this helps reducing context index confliction. after server reboot,
1056         * conflicting request from clients might be filtered out by initial
1057         * sequence number checking, thus no chance to sent error notification
1058         * back to clients.
1059         */
1060        cfs_get_random_bytes(&__ctx_index, sizeof(__ctx_index));
1061
1062
1063        cache_register(&rsi_cache);
1064        cache_register(&rsc_cache);
1065
1066        /* FIXME this looks stupid. we intend to give lsvcgssd a chance to open
1067         * the init upcall channel, otherwise there's big chance that the first
1068         * upcall issued before the channel be opened thus nfsv4 cache code will
1069         * drop the request direclty, thus lead to unnecessary recovery time.
1070         * here we wait at miximum 1.5 seconds. */
1071        for (i = 0; i < 6; i++) {
1072                if (atomic_read(&rsi_cache.readers) > 0)
1073                        break;
1074                set_current_state(TASK_UNINTERRUPTIBLE);
1075                LASSERT(HZ >= 4);
1076                schedule_timeout(HZ / 4);
1077        }
1078
1079        if (atomic_read(&rsi_cache.readers) == 0)
1080                CWARN("Init channel is not opened by lsvcgssd, following "
1081                      "request might be dropped until lsvcgssd is active\n");
1082
1083        return 0;
1084}
1085
1086void __exit gss_exit_svc_upcall(void)
1087{
1088        cache_purge(&rsi_cache);
1089        cache_unregister(&rsi_cache);
1090
1091        cache_purge(&rsc_cache);
1092        cache_unregister(&rsc_cache);
1093}
1094