linux/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
<<
>>
Prefs
   1/*
   2 * GPL HEADER START
   3 *
   4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 only,
   8 * as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful, but
  11 * WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  13 * General Public License version 2 for more details (a copy is included
  14 * in the LICENSE file that accompanied this code).
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * version 2 along with this program; If not, see
  18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
  19 *
  20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  21 * CA 95054 USA or visit www.sun.com if you need additional information or
  22 * have any questions.
  23 *
  24 * GPL HEADER END
  25 */
  26/*
  27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
  28 * Use is subject to license terms.
  29 *
  30 * Copyright (c) 2011, 2012, Intel Corporation.
  31 */
  32/*
  33 * This file is part of Lustre, http://www.lustre.org/
  34 * Lustre is a trademark of Sun Microsystems, Inc.
  35 *
  36 * lustre/ptlrpc/sec_plain.c
  37 *
  38 * Author: Eric Mei <ericm@clusterfs.com>
  39 */
  40
  41#define DEBUG_SUBSYSTEM S_SEC
  42
  43
  44#include <obd_support.h>
  45#include <obd_cksum.h>
  46#include <obd_class.h>
  47#include <lustre_net.h>
  48#include <lustre_sec.h>
  49
  50struct plain_sec {
  51        struct ptlrpc_sec       pls_base;
  52        rwlock_t            pls_lock;
  53        struct ptlrpc_cli_ctx  *pls_ctx;
  54};
  55
  56static inline struct plain_sec *sec2plsec(struct ptlrpc_sec *sec)
  57{
  58        return container_of(sec, struct plain_sec, pls_base);
  59}
  60
  61static struct ptlrpc_sec_policy plain_policy;
  62static struct ptlrpc_ctx_ops    plain_ctx_ops;
  63static struct ptlrpc_svc_ctx    plain_svc_ctx;
  64
  65static unsigned int plain_at_offset;
  66
  67/*
  68 * for simplicity, plain policy rpc use fixed layout.
  69 */
  70#define PLAIN_PACK_SEGMENTS          (4)
  71
  72#define PLAIN_PACK_HDR_OFF            (0)
  73#define PLAIN_PACK_MSG_OFF            (1)
  74#define PLAIN_PACK_USER_OFF          (2)
  75#define PLAIN_PACK_BULK_OFF          (3)
  76
  77#define PLAIN_FL_USER              (0x01)
  78#define PLAIN_FL_BULK              (0x02)
  79
  80struct plain_header {
  81        __u8        ph_ver;         /* 0 */
  82        __u8        ph_flags;
  83        __u8        ph_sp;           /* source */
  84        __u8        ph_bulk_hash_alg;  /* complete flavor desc */
  85        __u8        ph_pad[4];
  86};
  87
  88struct plain_bulk_token {
  89        __u8        pbt_hash[8];
  90};
  91
  92#define PLAIN_BSD_SIZE \
  93        (sizeof(struct ptlrpc_bulk_sec_desc) + sizeof(struct plain_bulk_token))
  94
  95/****************************************
  96 * bulk checksum helpers                *
  97 ****************************************/
  98
  99static int plain_unpack_bsd(struct lustre_msg *msg, int swabbed)
 100{
 101        struct ptlrpc_bulk_sec_desc *bsd;
 102
 103        if (bulk_sec_desc_unpack(msg, PLAIN_PACK_BULK_OFF, swabbed))
 104                return -EPROTO;
 105
 106        bsd = lustre_msg_buf(msg, PLAIN_PACK_BULK_OFF, PLAIN_BSD_SIZE);
 107        if (bsd == NULL) {
 108                CERROR("bulk sec desc has short size %d\n",
 109                       lustre_msg_buflen(msg, PLAIN_PACK_BULK_OFF));
 110                return -EPROTO;
 111        }
 112
 113        if (bsd->bsd_svc != SPTLRPC_BULK_SVC_NULL &&
 114            bsd->bsd_svc != SPTLRPC_BULK_SVC_INTG) {
 115                CERROR("invalid bulk svc %u\n", bsd->bsd_svc);
 116                return -EPROTO;
 117        }
 118
 119        return 0;
 120}
 121
 122static int plain_generate_bulk_csum(struct ptlrpc_bulk_desc *desc,
 123                                    __u8 hash_alg,
 124                                    struct plain_bulk_token *token)
 125{
 126        if (hash_alg == BULK_HASH_ALG_NULL)
 127                return 0;
 128
 129        memset(token->pbt_hash, 0, sizeof(token->pbt_hash));
 130        return sptlrpc_get_bulk_checksum(desc, hash_alg, token->pbt_hash,
 131                                         sizeof(token->pbt_hash));
 132}
 133
 134static int plain_verify_bulk_csum(struct ptlrpc_bulk_desc *desc,
 135                                  __u8 hash_alg,
 136                                  struct plain_bulk_token *tokenr)
 137{
 138        struct plain_bulk_token tokenv;
 139        int                  rc;
 140
 141        if (hash_alg == BULK_HASH_ALG_NULL)
 142                return 0;
 143
 144        memset(&tokenv.pbt_hash, 0, sizeof(tokenv.pbt_hash));
 145        rc = sptlrpc_get_bulk_checksum(desc, hash_alg, tokenv.pbt_hash,
 146                                       sizeof(tokenv.pbt_hash));
 147        if (rc)
 148                return rc;
 149
 150        if (memcmp(tokenr->pbt_hash, tokenv.pbt_hash, sizeof(tokenr->pbt_hash)))
 151                return -EACCES;
 152        return 0;
 153}
 154
 155static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc)
 156{
 157        char       *ptr;
 158        unsigned int    off, i;
 159
 160        for (i = 0; i < desc->bd_iov_count; i++) {
 161                if (desc->bd_iov[i].kiov_len == 0)
 162                        continue;
 163
 164                ptr = kmap(desc->bd_iov[i].kiov_page);
 165                off = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
 166                ptr[off] ^= 0x1;
 167                kunmap(desc->bd_iov[i].kiov_page);
 168                return;
 169        }
 170}
 171
 172/****************************************
 173 * cli_ctx apis                  *
 174 ****************************************/
 175
 176static
 177int plain_ctx_refresh(struct ptlrpc_cli_ctx *ctx)
 178{
 179        /* should never reach here */
 180        LBUG();
 181        return 0;
 182}
 183
 184static
 185int plain_ctx_validate(struct ptlrpc_cli_ctx *ctx)
 186{
 187        return 0;
 188}
 189
 190static
 191int plain_ctx_sign(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
 192{
 193        struct lustre_msg   *msg = req->rq_reqbuf;
 194        struct plain_header *phdr;
 195
 196        msg->lm_secflvr = req->rq_flvr.sf_rpc;
 197
 198        phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, 0);
 199        phdr->ph_ver = 0;
 200        phdr->ph_flags = 0;
 201        phdr->ph_sp = ctx->cc_sec->ps_part;
 202        phdr->ph_bulk_hash_alg = req->rq_flvr.u_bulk.hash.hash_alg;
 203
 204        if (req->rq_pack_udesc)
 205                phdr->ph_flags |= PLAIN_FL_USER;
 206        if (req->rq_pack_bulk)
 207                phdr->ph_flags |= PLAIN_FL_BULK;
 208
 209        req->rq_reqdata_len = lustre_msg_size_v2(msg->lm_bufcount,
 210                                                 msg->lm_buflens);
 211        return 0;
 212}
 213
 214static
 215int plain_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
 216{
 217        struct lustre_msg   *msg = req->rq_repdata;
 218        struct plain_header *phdr;
 219        __u32           cksum;
 220        int               swabbed;
 221
 222        if (msg->lm_bufcount != PLAIN_PACK_SEGMENTS) {
 223                CERROR("unexpected reply buf count %u\n", msg->lm_bufcount);
 224                return -EPROTO;
 225        }
 226
 227        swabbed = ptlrpc_rep_need_swab(req);
 228
 229        phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, sizeof(*phdr));
 230        if (phdr == NULL) {
 231                CERROR("missing plain header\n");
 232                return -EPROTO;
 233        }
 234
 235        if (phdr->ph_ver != 0) {
 236                CERROR("Invalid header version\n");
 237                return -EPROTO;
 238        }
 239
 240        /* expect no user desc in reply */
 241        if (phdr->ph_flags & PLAIN_FL_USER) {
 242                CERROR("Unexpected udesc flag in reply\n");
 243                return -EPROTO;
 244        }
 245
 246        if (phdr->ph_bulk_hash_alg != req->rq_flvr.u_bulk.hash.hash_alg) {
 247                CERROR("reply bulk flavor %u != %u\n", phdr->ph_bulk_hash_alg,
 248                       req->rq_flvr.u_bulk.hash.hash_alg);
 249                return -EPROTO;
 250        }
 251
 252        if (unlikely(req->rq_early)) {
 253                unsigned int hsize = 4;
 254
 255                cfs_crypto_hash_digest(CFS_HASH_ALG_CRC32,
 256                                lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0),
 257                                lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF),
 258                                NULL, 0, (unsigned char *)&cksum, &hsize);
 259                if (cksum != msg->lm_cksum) {
 260                        CDEBUG(D_SEC,
 261                               "early reply checksum mismatch: %08x != %08x\n",
 262                               cpu_to_le32(cksum), msg->lm_cksum);
 263                        return -EINVAL;
 264                }
 265        } else {
 266                /* whether we sent with bulk or not, we expect the same
 267                 * in reply, except for early reply */
 268                if (!req->rq_early &&
 269                    !equi(req->rq_pack_bulk == 1,
 270                          phdr->ph_flags & PLAIN_FL_BULK)) {
 271                        CERROR("%s bulk checksum in reply\n",
 272                               req->rq_pack_bulk ? "Missing" : "Unexpected");
 273                        return -EPROTO;
 274                }
 275
 276                if (phdr->ph_flags & PLAIN_FL_BULK) {
 277                        if (plain_unpack_bsd(msg, swabbed))
 278                                return -EPROTO;
 279                }
 280        }
 281
 282        req->rq_repmsg = lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0);
 283        req->rq_replen = lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF);
 284        return 0;
 285}
 286
 287static
 288int plain_cli_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
 289                        struct ptlrpc_request *req,
 290                        struct ptlrpc_bulk_desc *desc)
 291{
 292        struct ptlrpc_bulk_sec_desc *bsd;
 293        struct plain_bulk_token     *token;
 294        int                       rc;
 295
 296        LASSERT(req->rq_pack_bulk);
 297        LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
 298
 299        bsd = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
 300        token = (struct plain_bulk_token *) bsd->bsd_data;
 301
 302        bsd->bsd_version = 0;
 303        bsd->bsd_flags = 0;
 304        bsd->bsd_type = SPTLRPC_BULK_DEFAULT;
 305        bsd->bsd_svc = SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc);
 306
 307        if (bsd->bsd_svc == SPTLRPC_BULK_SVC_NULL)
 308                return 0;
 309
 310        if (req->rq_bulk_read)
 311                return 0;
 312
 313        rc = plain_generate_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
 314                                      token);
 315        if (rc) {
 316                CERROR("bulk write: failed to compute checksum: %d\n", rc);
 317        } else {
 318                /*
 319                 * for sending we only compute the wrong checksum instead
 320                 * of corrupting the data so it is still correct on a redo
 321                 */
 322                if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND) &&
 323                    req->rq_flvr.u_bulk.hash.hash_alg != BULK_HASH_ALG_NULL)
 324                        token->pbt_hash[0] ^= 0x1;
 325        }
 326
 327        return rc;
 328}
 329
 330static
 331int plain_cli_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
 332                          struct ptlrpc_request *req,
 333                          struct ptlrpc_bulk_desc *desc)
 334{
 335        struct ptlrpc_bulk_sec_desc *bsdv;
 336        struct plain_bulk_token     *tokenv;
 337        int                       rc;
 338        int                       i, nob;
 339
 340        LASSERT(req->rq_pack_bulk);
 341        LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
 342        LASSERT(req->rq_repdata->lm_bufcount == PLAIN_PACK_SEGMENTS);
 343
 344        bsdv = lustre_msg_buf(req->rq_repdata, PLAIN_PACK_BULK_OFF, 0);
 345        tokenv = (struct plain_bulk_token *) bsdv->bsd_data;
 346
 347        if (req->rq_bulk_write) {
 348                if (bsdv->bsd_flags & BSD_FL_ERR)
 349                        return -EIO;
 350                return 0;
 351        }
 352
 353        /* fix the actual data size */
 354        for (i = 0, nob = 0; i < desc->bd_iov_count; i++) {
 355                if (desc->bd_iov[i].kiov_len + nob > desc->bd_nob_transferred) {
 356                        desc->bd_iov[i].kiov_len =
 357                                desc->bd_nob_transferred - nob;
 358                }
 359                nob += desc->bd_iov[i].kiov_len;
 360        }
 361
 362        rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
 363                                    tokenv);
 364        if (rc)
 365                CERROR("bulk read: client verify failed: %d\n", rc);
 366
 367        return rc;
 368}
 369
 370/****************************************
 371 * sec apis                          *
 372 ****************************************/
 373
 374static
 375struct ptlrpc_cli_ctx *plain_sec_install_ctx(struct plain_sec *plsec)
 376{
 377        struct ptlrpc_cli_ctx  *ctx, *ctx_new;
 378
 379        OBD_ALLOC_PTR(ctx_new);
 380
 381        write_lock(&plsec->pls_lock);
 382
 383        ctx = plsec->pls_ctx;
 384        if (ctx) {
 385                atomic_inc(&ctx->cc_refcount);
 386
 387                if (ctx_new)
 388                        OBD_FREE_PTR(ctx_new);
 389        } else if (ctx_new) {
 390                ctx = ctx_new;
 391
 392                atomic_set(&ctx->cc_refcount, 1); /* for cache */
 393                ctx->cc_sec = &plsec->pls_base;
 394                ctx->cc_ops = &plain_ctx_ops;
 395                ctx->cc_expire = 0;
 396                ctx->cc_flags = PTLRPC_CTX_CACHED | PTLRPC_CTX_UPTODATE;
 397                ctx->cc_vcred.vc_uid = 0;
 398                spin_lock_init(&ctx->cc_lock);
 399                INIT_LIST_HEAD(&ctx->cc_req_list);
 400                INIT_LIST_HEAD(&ctx->cc_gc_chain);
 401
 402                plsec->pls_ctx = ctx;
 403                atomic_inc(&plsec->pls_base.ps_nctx);
 404                atomic_inc(&plsec->pls_base.ps_refcount);
 405
 406                atomic_inc(&ctx->cc_refcount); /* for caller */
 407        }
 408
 409        write_unlock(&plsec->pls_lock);
 410
 411        return ctx;
 412}
 413
 414static
 415void plain_destroy_sec(struct ptlrpc_sec *sec)
 416{
 417        struct plain_sec       *plsec = sec2plsec(sec);
 418
 419        LASSERT(sec->ps_policy == &plain_policy);
 420        LASSERT(sec->ps_import);
 421        LASSERT(atomic_read(&sec->ps_refcount) == 0);
 422        LASSERT(atomic_read(&sec->ps_nctx) == 0);
 423        LASSERT(plsec->pls_ctx == NULL);
 424
 425        class_import_put(sec->ps_import);
 426
 427        OBD_FREE_PTR(plsec);
 428}
 429
 430static
 431void plain_kill_sec(struct ptlrpc_sec *sec)
 432{
 433        sec->ps_dying = 1;
 434}
 435
 436static
 437struct ptlrpc_sec *plain_create_sec(struct obd_import *imp,
 438                                    struct ptlrpc_svc_ctx *svc_ctx,
 439                                    struct sptlrpc_flavor *sf)
 440{
 441        struct plain_sec       *plsec;
 442        struct ptlrpc_sec      *sec;
 443        struct ptlrpc_cli_ctx  *ctx;
 444
 445        LASSERT(SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN);
 446
 447        OBD_ALLOC_PTR(plsec);
 448        if (plsec == NULL)
 449                return NULL;
 450
 451        /*
 452         * initialize plain_sec
 453         */
 454        rwlock_init(&plsec->pls_lock);
 455        plsec->pls_ctx = NULL;
 456
 457        sec = &plsec->pls_base;
 458        sec->ps_policy = &plain_policy;
 459        atomic_set(&sec->ps_refcount, 0);
 460        atomic_set(&sec->ps_nctx, 0);
 461        sec->ps_id = sptlrpc_get_next_secid();
 462        sec->ps_import = class_import_get(imp);
 463        sec->ps_flvr = *sf;
 464        spin_lock_init(&sec->ps_lock);
 465        INIT_LIST_HEAD(&sec->ps_gc_list);
 466        sec->ps_gc_interval = 0;
 467        sec->ps_gc_next = 0;
 468
 469        /* install ctx immediately if this is a reverse sec */
 470        if (svc_ctx) {
 471                ctx = plain_sec_install_ctx(plsec);
 472                if (ctx == NULL) {
 473                        plain_destroy_sec(sec);
 474                        return NULL;
 475                }
 476                sptlrpc_cli_ctx_put(ctx, 1);
 477        }
 478
 479        return sec;
 480}
 481
 482static
 483struct ptlrpc_cli_ctx *plain_lookup_ctx(struct ptlrpc_sec *sec,
 484                                        struct vfs_cred *vcred,
 485                                        int create, int remove_dead)
 486{
 487        struct plain_sec       *plsec = sec2plsec(sec);
 488        struct ptlrpc_cli_ctx  *ctx;
 489
 490        read_lock(&plsec->pls_lock);
 491        ctx = plsec->pls_ctx;
 492        if (ctx)
 493                atomic_inc(&ctx->cc_refcount);
 494        read_unlock(&plsec->pls_lock);
 495
 496        if (unlikely(ctx == NULL))
 497                ctx = plain_sec_install_ctx(plsec);
 498
 499        return ctx;
 500}
 501
 502static
 503void plain_release_ctx(struct ptlrpc_sec *sec,
 504                       struct ptlrpc_cli_ctx *ctx, int sync)
 505{
 506        LASSERT(atomic_read(&sec->ps_refcount) > 0);
 507        LASSERT(atomic_read(&sec->ps_nctx) > 0);
 508        LASSERT(atomic_read(&ctx->cc_refcount) == 0);
 509        LASSERT(ctx->cc_sec == sec);
 510
 511        OBD_FREE_PTR(ctx);
 512
 513        atomic_dec(&sec->ps_nctx);
 514        sptlrpc_sec_put(sec);
 515}
 516
 517static
 518int plain_flush_ctx_cache(struct ptlrpc_sec *sec,
 519                          uid_t uid, int grace, int force)
 520{
 521        struct plain_sec       *plsec = sec2plsec(sec);
 522        struct ptlrpc_cli_ctx  *ctx;
 523
 524        /* do nothing unless caller want to flush for 'all' */
 525        if (uid != -1)
 526                return 0;
 527
 528        write_lock(&plsec->pls_lock);
 529        ctx = plsec->pls_ctx;
 530        plsec->pls_ctx = NULL;
 531        write_unlock(&plsec->pls_lock);
 532
 533        if (ctx)
 534                sptlrpc_cli_ctx_put(ctx, 1);
 535        return 0;
 536}
 537
 538static
 539int plain_alloc_reqbuf(struct ptlrpc_sec *sec,
 540                       struct ptlrpc_request *req,
 541                       int msgsize)
 542{
 543        __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
 544        int   alloc_len;
 545
 546        buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header);
 547        buflens[PLAIN_PACK_MSG_OFF] = msgsize;
 548
 549        if (req->rq_pack_udesc)
 550                buflens[PLAIN_PACK_USER_OFF] = sptlrpc_current_user_desc_size();
 551
 552        if (req->rq_pack_bulk) {
 553                LASSERT(req->rq_bulk_read || req->rq_bulk_write);
 554                buflens[PLAIN_PACK_BULK_OFF] = PLAIN_BSD_SIZE;
 555        }
 556
 557        alloc_len = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
 558
 559        if (!req->rq_reqbuf) {
 560                LASSERT(!req->rq_pool);
 561
 562                alloc_len = size_roundup_power2(alloc_len);
 563                OBD_ALLOC_LARGE(req->rq_reqbuf, alloc_len);
 564                if (!req->rq_reqbuf)
 565                        return -ENOMEM;
 566
 567                req->rq_reqbuf_len = alloc_len;
 568        } else {
 569                LASSERT(req->rq_pool);
 570                LASSERT(req->rq_reqbuf_len >= alloc_len);
 571                memset(req->rq_reqbuf, 0, alloc_len);
 572        }
 573
 574        lustre_init_msg_v2(req->rq_reqbuf, PLAIN_PACK_SEGMENTS, buflens, NULL);
 575        req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_MSG_OFF, 0);
 576
 577        if (req->rq_pack_udesc)
 578                sptlrpc_pack_user_desc(req->rq_reqbuf, PLAIN_PACK_USER_OFF);
 579
 580        return 0;
 581}
 582
 583static
 584void plain_free_reqbuf(struct ptlrpc_sec *sec,
 585                       struct ptlrpc_request *req)
 586{
 587        if (!req->rq_pool) {
 588                OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len);
 589                req->rq_reqbuf = NULL;
 590                req->rq_reqbuf_len = 0;
 591        }
 592}
 593
 594static
 595int plain_alloc_repbuf(struct ptlrpc_sec *sec,
 596                       struct ptlrpc_request *req,
 597                       int msgsize)
 598{
 599        __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
 600        int alloc_len;
 601
 602        buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header);
 603        buflens[PLAIN_PACK_MSG_OFF] = msgsize;
 604
 605        if (req->rq_pack_bulk) {
 606                LASSERT(req->rq_bulk_read || req->rq_bulk_write);
 607                buflens[PLAIN_PACK_BULK_OFF] = PLAIN_BSD_SIZE;
 608        }
 609
 610        alloc_len = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
 611
 612        /* add space for early reply */
 613        alloc_len += plain_at_offset;
 614
 615        alloc_len = size_roundup_power2(alloc_len);
 616
 617        OBD_ALLOC_LARGE(req->rq_repbuf, alloc_len);
 618        if (!req->rq_repbuf)
 619                return -ENOMEM;
 620
 621        req->rq_repbuf_len = alloc_len;
 622        return 0;
 623}
 624
 625static
 626void plain_free_repbuf(struct ptlrpc_sec *sec,
 627                       struct ptlrpc_request *req)
 628{
 629        OBD_FREE_LARGE(req->rq_repbuf, req->rq_repbuf_len);
 630        req->rq_repbuf = NULL;
 631        req->rq_repbuf_len = 0;
 632}
 633
 634static
 635int plain_enlarge_reqbuf(struct ptlrpc_sec *sec,
 636                         struct ptlrpc_request *req,
 637                         int segment, int newsize)
 638{
 639        struct lustre_msg      *newbuf;
 640        int                  oldsize;
 641        int                  newmsg_size, newbuf_size;
 642
 643        LASSERT(req->rq_reqbuf);
 644        LASSERT(req->rq_reqbuf_len >= req->rq_reqlen);
 645        LASSERT(lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_MSG_OFF, 0) ==
 646                req->rq_reqmsg);
 647
 648        /* compute new embedded msg size.  */
 649        oldsize = req->rq_reqmsg->lm_buflens[segment];
 650        req->rq_reqmsg->lm_buflens[segment] = newsize;
 651        newmsg_size = lustre_msg_size_v2(req->rq_reqmsg->lm_bufcount,
 652                                         req->rq_reqmsg->lm_buflens);
 653        req->rq_reqmsg->lm_buflens[segment] = oldsize;
 654
 655        /* compute new wrapper msg size.  */
 656        oldsize = req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF];
 657        req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF] = newmsg_size;
 658        newbuf_size = lustre_msg_size_v2(req->rq_reqbuf->lm_bufcount,
 659                                         req->rq_reqbuf->lm_buflens);
 660        req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF] = oldsize;
 661
 662        /* request from pool should always have enough buffer */
 663        LASSERT(!req->rq_pool || req->rq_reqbuf_len >= newbuf_size);
 664
 665        if (req->rq_reqbuf_len < newbuf_size) {
 666                newbuf_size = size_roundup_power2(newbuf_size);
 667
 668                OBD_ALLOC_LARGE(newbuf, newbuf_size);
 669                if (newbuf == NULL)
 670                        return -ENOMEM;
 671
 672                memcpy(newbuf, req->rq_reqbuf, req->rq_reqbuf_len);
 673
 674                OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len);
 675                req->rq_reqbuf = newbuf;
 676                req->rq_reqbuf_len = newbuf_size;
 677                req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf,
 678                                                PLAIN_PACK_MSG_OFF, 0);
 679        }
 680
 681        _sptlrpc_enlarge_msg_inplace(req->rq_reqbuf, PLAIN_PACK_MSG_OFF,
 682                                     newmsg_size);
 683        _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize);
 684
 685        req->rq_reqlen = newmsg_size;
 686        return 0;
 687}
 688
 689/****************************************
 690 * service apis                  *
 691 ****************************************/
 692
 693static struct ptlrpc_svc_ctx plain_svc_ctx = {
 694        .sc_refcount    = ATOMIC_INIT(1),
 695        .sc_policy      = &plain_policy,
 696};
 697
 698static
 699int plain_accept(struct ptlrpc_request *req)
 700{
 701        struct lustre_msg   *msg = req->rq_reqbuf;
 702        struct plain_header *phdr;
 703        int               swabbed;
 704
 705        LASSERT(SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) ==
 706                SPTLRPC_POLICY_PLAIN);
 707
 708        if (SPTLRPC_FLVR_BASE(req->rq_flvr.sf_rpc) !=
 709            SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_PLAIN) ||
 710            SPTLRPC_FLVR_BULK_TYPE(req->rq_flvr.sf_rpc) !=
 711            SPTLRPC_FLVR_BULK_TYPE(SPTLRPC_FLVR_PLAIN)) {
 712                CERROR("Invalid rpc flavor %x\n", req->rq_flvr.sf_rpc);
 713                return SECSVC_DROP;
 714        }
 715
 716        if (msg->lm_bufcount < PLAIN_PACK_SEGMENTS) {
 717                CERROR("unexpected request buf count %u\n", msg->lm_bufcount);
 718                return SECSVC_DROP;
 719        }
 720
 721        swabbed = ptlrpc_req_need_swab(req);
 722
 723        phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, sizeof(*phdr));
 724        if (phdr == NULL) {
 725                CERROR("missing plain header\n");
 726                return -EPROTO;
 727        }
 728
 729        if (phdr->ph_ver != 0) {
 730                CERROR("Invalid header version\n");
 731                return -EPROTO;
 732        }
 733
 734        if (phdr->ph_bulk_hash_alg >= BULK_HASH_ALG_MAX) {
 735                CERROR("invalid hash algorithm: %u\n", phdr->ph_bulk_hash_alg);
 736                return -EPROTO;
 737        }
 738
 739        req->rq_sp_from = phdr->ph_sp;
 740        req->rq_flvr.u_bulk.hash.hash_alg = phdr->ph_bulk_hash_alg;
 741
 742        if (phdr->ph_flags & PLAIN_FL_USER) {
 743                if (sptlrpc_unpack_user_desc(msg, PLAIN_PACK_USER_OFF,
 744                                             swabbed)) {
 745                        CERROR("Mal-formed user descriptor\n");
 746                        return SECSVC_DROP;
 747                }
 748
 749                req->rq_pack_udesc = 1;
 750                req->rq_user_desc = lustre_msg_buf(msg, PLAIN_PACK_USER_OFF, 0);
 751        }
 752
 753        if (phdr->ph_flags & PLAIN_FL_BULK) {
 754                if (plain_unpack_bsd(msg, swabbed))
 755                        return SECSVC_DROP;
 756
 757                req->rq_pack_bulk = 1;
 758        }
 759
 760        req->rq_reqmsg = lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0);
 761        req->rq_reqlen = msg->lm_buflens[PLAIN_PACK_MSG_OFF];
 762
 763        req->rq_svc_ctx = &plain_svc_ctx;
 764        atomic_inc(&req->rq_svc_ctx->sc_refcount);
 765
 766        return SECSVC_OK;
 767}
 768
 769static
 770int plain_alloc_rs(struct ptlrpc_request *req, int msgsize)
 771{
 772        struct ptlrpc_reply_state   *rs;
 773        __u32                   buflens[PLAIN_PACK_SEGMENTS] = { 0, };
 774        int                       rs_size = sizeof(*rs);
 775
 776        LASSERT(msgsize % 8 == 0);
 777
 778        buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header);
 779        buflens[PLAIN_PACK_MSG_OFF] = msgsize;
 780
 781        if (req->rq_pack_bulk && (req->rq_bulk_read || req->rq_bulk_write))
 782                buflens[PLAIN_PACK_BULK_OFF] = PLAIN_BSD_SIZE;
 783
 784        rs_size += lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
 785
 786        rs = req->rq_reply_state;
 787
 788        if (rs) {
 789                /* pre-allocated */
 790                LASSERT(rs->rs_size >= rs_size);
 791        } else {
 792                OBD_ALLOC_LARGE(rs, rs_size);
 793                if (rs == NULL)
 794                        return -ENOMEM;
 795
 796                rs->rs_size = rs_size;
 797        }
 798
 799        rs->rs_svc_ctx = req->rq_svc_ctx;
 800        atomic_inc(&req->rq_svc_ctx->sc_refcount);
 801        rs->rs_repbuf = (struct lustre_msg *) (rs + 1);
 802        rs->rs_repbuf_len = rs_size - sizeof(*rs);
 803
 804        lustre_init_msg_v2(rs->rs_repbuf, PLAIN_PACK_SEGMENTS, buflens, NULL);
 805        rs->rs_msg = lustre_msg_buf_v2(rs->rs_repbuf, PLAIN_PACK_MSG_OFF, 0);
 806
 807        req->rq_reply_state = rs;
 808        return 0;
 809}
 810
 811static
 812void plain_free_rs(struct ptlrpc_reply_state *rs)
 813{
 814        LASSERT(atomic_read(&rs->rs_svc_ctx->sc_refcount) > 1);
 815        atomic_dec(&rs->rs_svc_ctx->sc_refcount);
 816
 817        if (!rs->rs_prealloc)
 818                OBD_FREE_LARGE(rs, rs->rs_size);
 819}
 820
 821static
 822int plain_authorize(struct ptlrpc_request *req)
 823{
 824        struct ptlrpc_reply_state *rs = req->rq_reply_state;
 825        struct lustre_msg_v2      *msg = rs->rs_repbuf;
 826        struct plain_header       *phdr;
 827        int                     len;
 828
 829        LASSERT(rs);
 830        LASSERT(msg);
 831
 832        if (req->rq_replen != msg->lm_buflens[PLAIN_PACK_MSG_OFF])
 833                len = lustre_shrink_msg(msg, PLAIN_PACK_MSG_OFF,
 834                                        req->rq_replen, 1);
 835        else
 836                len = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
 837
 838        msg->lm_secflvr = req->rq_flvr.sf_rpc;
 839
 840        phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, 0);
 841        phdr->ph_ver = 0;
 842        phdr->ph_flags = 0;
 843        phdr->ph_bulk_hash_alg = req->rq_flvr.u_bulk.hash.hash_alg;
 844
 845        if (req->rq_pack_bulk)
 846                phdr->ph_flags |= PLAIN_FL_BULK;
 847
 848        rs->rs_repdata_len = len;
 849
 850        if (likely(req->rq_packed_final)) {
 851                if (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)
 852                        req->rq_reply_off = plain_at_offset;
 853                else
 854                        req->rq_reply_off = 0;
 855        } else {
 856                unsigned int hsize = 4;
 857
 858                cfs_crypto_hash_digest(CFS_HASH_ALG_CRC32,
 859                        lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0),
 860                        lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF),
 861                        NULL, 0, (unsigned char *)&msg->lm_cksum, &hsize);
 862                        req->rq_reply_off = 0;
 863        }
 864
 865        return 0;
 866}
 867
 868static
 869int plain_svc_unwrap_bulk(struct ptlrpc_request *req,
 870                          struct ptlrpc_bulk_desc *desc)
 871{
 872        struct ptlrpc_reply_state   *rs = req->rq_reply_state;
 873        struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
 874        struct plain_bulk_token     *tokenr;
 875        int                       rc;
 876
 877        LASSERT(req->rq_bulk_write);
 878        LASSERT(req->rq_pack_bulk);
 879
 880        bsdr = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
 881        tokenr = (struct plain_bulk_token *) bsdr->bsd_data;
 882        bsdv = lustre_msg_buf(rs->rs_repbuf, PLAIN_PACK_BULK_OFF, 0);
 883
 884        bsdv->bsd_version = 0;
 885        bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
 886        bsdv->bsd_svc = bsdr->bsd_svc;
 887        bsdv->bsd_flags = 0;
 888
 889        if (bsdr->bsd_svc == SPTLRPC_BULK_SVC_NULL)
 890                return 0;
 891
 892        rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
 893                                    tokenr);
 894        if (rc) {
 895                bsdv->bsd_flags |= BSD_FL_ERR;
 896                CERROR("bulk write: server verify failed: %d\n", rc);
 897        }
 898
 899        return rc;
 900}
 901
 902static
 903int plain_svc_wrap_bulk(struct ptlrpc_request *req,
 904                        struct ptlrpc_bulk_desc *desc)
 905{
 906        struct ptlrpc_reply_state   *rs = req->rq_reply_state;
 907        struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
 908        struct plain_bulk_token     *tokenv;
 909        int                       rc;
 910
 911        LASSERT(req->rq_bulk_read);
 912        LASSERT(req->rq_pack_bulk);
 913
 914        bsdr = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
 915        bsdv = lustre_msg_buf(rs->rs_repbuf, PLAIN_PACK_BULK_OFF, 0);
 916        tokenv = (struct plain_bulk_token *) bsdv->bsd_data;
 917
 918        bsdv->bsd_version = 0;
 919        bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
 920        bsdv->bsd_svc = bsdr->bsd_svc;
 921        bsdv->bsd_flags = 0;
 922
 923        if (bsdr->bsd_svc == SPTLRPC_BULK_SVC_NULL)
 924                return 0;
 925
 926        rc = plain_generate_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
 927                                      tokenv);
 928        if (rc) {
 929                CERROR("bulk read: server failed to compute "
 930                       "checksum: %d\n", rc);
 931        } else {
 932                if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE))
 933                        corrupt_bulk_data(desc);
 934        }
 935
 936        return rc;
 937}
 938
 939static struct ptlrpc_ctx_ops plain_ctx_ops = {
 940        .refresh                = plain_ctx_refresh,
 941        .validate              = plain_ctx_validate,
 942        .sign              = plain_ctx_sign,
 943        .verify          = plain_ctx_verify,
 944        .wrap_bulk            = plain_cli_wrap_bulk,
 945        .unwrap_bulk        = plain_cli_unwrap_bulk,
 946};
 947
 948static struct ptlrpc_sec_cops plain_sec_cops = {
 949        .create_sec          = plain_create_sec,
 950        .destroy_sec        = plain_destroy_sec,
 951        .kill_sec              = plain_kill_sec,
 952        .lookup_ctx          = plain_lookup_ctx,
 953        .release_ctx        = plain_release_ctx,
 954        .flush_ctx_cache        = plain_flush_ctx_cache,
 955        .alloc_reqbuf      = plain_alloc_reqbuf,
 956        .free_reqbuf        = plain_free_reqbuf,
 957        .alloc_repbuf      = plain_alloc_repbuf,
 958        .free_repbuf        = plain_free_repbuf,
 959        .enlarge_reqbuf  = plain_enlarge_reqbuf,
 960};
 961
 962static struct ptlrpc_sec_sops plain_sec_sops = {
 963        .accept          = plain_accept,
 964        .alloc_rs              = plain_alloc_rs,
 965        .authorize            = plain_authorize,
 966        .free_rs                = plain_free_rs,
 967        .unwrap_bulk        = plain_svc_unwrap_bulk,
 968        .wrap_bulk            = plain_svc_wrap_bulk,
 969};
 970
 971static struct ptlrpc_sec_policy plain_policy = {
 972        .sp_owner              = THIS_MODULE,
 973        .sp_name                = "plain",
 974        .sp_policy            = SPTLRPC_POLICY_PLAIN,
 975        .sp_cops                = &plain_sec_cops,
 976        .sp_sops                = &plain_sec_sops,
 977};
 978
 979int sptlrpc_plain_init(void)
 980{
 981        __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
 982        int rc;
 983
 984        buflens[PLAIN_PACK_MSG_OFF] = lustre_msg_early_size();
 985        plain_at_offset = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
 986
 987        rc = sptlrpc_register_policy(&plain_policy);
 988        if (rc)
 989                CERROR("failed to register: %d\n", rc);
 990
 991        return rc;
 992}
 993
 994void sptlrpc_plain_fini(void)
 995{
 996        int rc;
 997
 998        rc = sptlrpc_unregister_policy(&plain_policy);
 999        if (rc)
1000                CERROR("cannot unregister: %d\n", rc);
1001}
1002