linux/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
<<
>>
Prefs
   1/*
   2 * GPL HEADER START
   3 *
   4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 only,
   8 * as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful, but
  11 * WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  13 * General Public License version 2 for more details (a copy is included
  14 * in the LICENSE file that accompanied this code).
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * version 2 along with this program; If not, see
  18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
  19 *
  20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  21 * CA 95054 USA or visit www.sun.com if you need additional information or
  22 * have any questions.
  23 *
  24 * GPL HEADER END
  25 */
  26/*
  27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
  28 * Use is subject to license terms.
  29 *
  30 * Copyright (c) 2011, 2015, Intel Corporation.
  31 */
  32/*
  33 * This file is part of Lustre, http://www.lustre.org/
  34 * Lustre is a trademark of Sun Microsystems, Inc.
  35 *
  36 * lustre/ptlrpc/sec_plain.c
  37 *
  38 * Author: Eric Mei <ericm@clusterfs.com>
  39 */
  40
  41#define DEBUG_SUBSYSTEM S_SEC
  42
  43#include "../include/obd_support.h"
  44#include "../include/obd_cksum.h"
  45#include "../include/obd_class.h"
  46#include "../include/lustre_net.h"
  47#include "../include/lustre_sec.h"
  48#include "ptlrpc_internal.h"
  49
  50struct plain_sec {
  51        struct ptlrpc_sec       pls_base;
  52        rwlock_t            pls_lock;
  53        struct ptlrpc_cli_ctx  *pls_ctx;
  54};
  55
  56static inline struct plain_sec *sec2plsec(struct ptlrpc_sec *sec)
  57{
  58        return container_of(sec, struct plain_sec, pls_base);
  59}
  60
  61static struct ptlrpc_sec_policy plain_policy;
  62static struct ptlrpc_ctx_ops    plain_ctx_ops;
  63static struct ptlrpc_svc_ctx    plain_svc_ctx;
  64
  65static unsigned int plain_at_offset;
  66
  67/*
  68 * for simplicity, plain policy rpc use fixed layout.
  69 */
  70#define PLAIN_PACK_SEGMENTS          (4)
  71
  72#define PLAIN_PACK_HDR_OFF            (0)
  73#define PLAIN_PACK_MSG_OFF            (1)
  74#define PLAIN_PACK_USER_OFF          (2)
  75#define PLAIN_PACK_BULK_OFF          (3)
  76
  77#define PLAIN_FL_USER              (0x01)
  78#define PLAIN_FL_BULK              (0x02)
  79
  80struct plain_header {
  81        __u8        ph_ver;         /* 0 */
  82        __u8        ph_flags;
  83        __u8        ph_sp;           /* source */
  84        __u8        ph_bulk_hash_alg;  /* complete flavor desc */
  85        __u8        ph_pad[4];
  86};
  87
  88struct plain_bulk_token {
  89        __u8        pbt_hash[8];
  90};
  91
  92#define PLAIN_BSD_SIZE \
  93        (sizeof(struct ptlrpc_bulk_sec_desc) + sizeof(struct plain_bulk_token))
  94
  95/****************************************
  96 * bulk checksum helpers                *
  97 ****************************************/
  98
  99static int plain_unpack_bsd(struct lustre_msg *msg, int swabbed)
 100{
 101        struct ptlrpc_bulk_sec_desc *bsd;
 102
 103        if (bulk_sec_desc_unpack(msg, PLAIN_PACK_BULK_OFF, swabbed))
 104                return -EPROTO;
 105
 106        bsd = lustre_msg_buf(msg, PLAIN_PACK_BULK_OFF, PLAIN_BSD_SIZE);
 107        if (!bsd) {
 108                CERROR("bulk sec desc has short size %d\n",
 109                       lustre_msg_buflen(msg, PLAIN_PACK_BULK_OFF));
 110                return -EPROTO;
 111        }
 112
 113        if (bsd->bsd_svc != SPTLRPC_BULK_SVC_NULL &&
 114            bsd->bsd_svc != SPTLRPC_BULK_SVC_INTG) {
 115                CERROR("invalid bulk svc %u\n", bsd->bsd_svc);
 116                return -EPROTO;
 117        }
 118
 119        return 0;
 120}
 121
 122static int plain_generate_bulk_csum(struct ptlrpc_bulk_desc *desc,
 123                                    __u8 hash_alg,
 124                                    struct plain_bulk_token *token)
 125{
 126        if (hash_alg == BULK_HASH_ALG_NULL)
 127                return 0;
 128
 129        memset(token->pbt_hash, 0, sizeof(token->pbt_hash));
 130        return sptlrpc_get_bulk_checksum(desc, hash_alg, token->pbt_hash,
 131                                         sizeof(token->pbt_hash));
 132}
 133
 134static int plain_verify_bulk_csum(struct ptlrpc_bulk_desc *desc,
 135                                  __u8 hash_alg,
 136                                  struct plain_bulk_token *tokenr)
 137{
 138        struct plain_bulk_token tokenv;
 139        int rc;
 140
 141        if (hash_alg == BULK_HASH_ALG_NULL)
 142                return 0;
 143
 144        memset(&tokenv.pbt_hash, 0, sizeof(tokenv.pbt_hash));
 145        rc = sptlrpc_get_bulk_checksum(desc, hash_alg, tokenv.pbt_hash,
 146                                       sizeof(tokenv.pbt_hash));
 147        if (rc)
 148                return rc;
 149
 150        if (memcmp(tokenr->pbt_hash, tokenv.pbt_hash, sizeof(tokenr->pbt_hash)))
 151                return -EACCES;
 152        return 0;
 153}
 154
 155static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc)
 156{
 157        char *ptr;
 158        unsigned int off, i;
 159
 160        for (i = 0; i < desc->bd_iov_count; i++) {
 161                if (desc->bd_iov[i].kiov_len == 0)
 162                        continue;
 163
 164                ptr = kmap(desc->bd_iov[i].kiov_page);
 165                off = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
 166                ptr[off] ^= 0x1;
 167                kunmap(desc->bd_iov[i].kiov_page);
 168                return;
 169        }
 170}
 171
 172/****************************************
 173 * cli_ctx apis                  *
 174 ****************************************/
 175
 176static
 177int plain_ctx_refresh(struct ptlrpc_cli_ctx *ctx)
 178{
 179        /* should never reach here */
 180        LBUG();
 181        return 0;
 182}
 183
 184static
 185int plain_ctx_validate(struct ptlrpc_cli_ctx *ctx)
 186{
 187        return 0;
 188}
 189
 190static
 191int plain_ctx_sign(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
 192{
 193        struct lustre_msg *msg = req->rq_reqbuf;
 194        struct plain_header *phdr;
 195
 196        msg->lm_secflvr = req->rq_flvr.sf_rpc;
 197
 198        phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, 0);
 199        phdr->ph_ver = 0;
 200        phdr->ph_flags = 0;
 201        phdr->ph_sp = ctx->cc_sec->ps_part;
 202        phdr->ph_bulk_hash_alg = req->rq_flvr.u_bulk.hash.hash_alg;
 203
 204        if (req->rq_pack_udesc)
 205                phdr->ph_flags |= PLAIN_FL_USER;
 206        if (req->rq_pack_bulk)
 207                phdr->ph_flags |= PLAIN_FL_BULK;
 208
 209        req->rq_reqdata_len = lustre_msg_size_v2(msg->lm_bufcount,
 210                                                 msg->lm_buflens);
 211        return 0;
 212}
 213
 214static
 215int plain_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
 216{
 217        struct lustre_msg *msg = req->rq_repdata;
 218        struct plain_header *phdr;
 219        __u32 cksum;
 220        int swabbed;
 221
 222        if (msg->lm_bufcount != PLAIN_PACK_SEGMENTS) {
 223                CERROR("unexpected reply buf count %u\n", msg->lm_bufcount);
 224                return -EPROTO;
 225        }
 226
 227        swabbed = ptlrpc_rep_need_swab(req);
 228
 229        phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, sizeof(*phdr));
 230        if (!phdr) {
 231                CERROR("missing plain header\n");
 232                return -EPROTO;
 233        }
 234
 235        if (phdr->ph_ver != 0) {
 236                CERROR("Invalid header version\n");
 237                return -EPROTO;
 238        }
 239
 240        /* expect no user desc in reply */
 241        if (phdr->ph_flags & PLAIN_FL_USER) {
 242                CERROR("Unexpected udesc flag in reply\n");
 243                return -EPROTO;
 244        }
 245
 246        if (phdr->ph_bulk_hash_alg != req->rq_flvr.u_bulk.hash.hash_alg) {
 247                CERROR("reply bulk flavor %u != %u\n", phdr->ph_bulk_hash_alg,
 248                       req->rq_flvr.u_bulk.hash.hash_alg);
 249                return -EPROTO;
 250        }
 251
 252        if (unlikely(req->rq_early)) {
 253                unsigned int hsize = 4;
 254
 255                cfs_crypto_hash_digest(CFS_HASH_ALG_CRC32,
 256                                lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0),
 257                                lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF),
 258                                NULL, 0, (unsigned char *)&cksum, &hsize);
 259                if (cksum != msg->lm_cksum) {
 260                        CDEBUG(D_SEC,
 261                               "early reply checksum mismatch: %08x != %08x\n",
 262                               cpu_to_le32(cksum), msg->lm_cksum);
 263                        return -EINVAL;
 264                }
 265        } else {
 266                /* whether we sent with bulk or not, we expect the same
 267                 * in reply, except for early reply
 268                 */
 269                if (!req->rq_early &&
 270                    !equi(req->rq_pack_bulk == 1,
 271                          phdr->ph_flags & PLAIN_FL_BULK)) {
 272                        CERROR("%s bulk checksum in reply\n",
 273                               req->rq_pack_bulk ? "Missing" : "Unexpected");
 274                        return -EPROTO;
 275                }
 276
 277                if (phdr->ph_flags & PLAIN_FL_BULK) {
 278                        if (plain_unpack_bsd(msg, swabbed))
 279                                return -EPROTO;
 280                }
 281        }
 282
 283        req->rq_repmsg = lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0);
 284        req->rq_replen = lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF);
 285        return 0;
 286}
 287
 288static
 289int plain_cli_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
 290                        struct ptlrpc_request *req,
 291                        struct ptlrpc_bulk_desc *desc)
 292{
 293        struct ptlrpc_bulk_sec_desc *bsd;
 294        struct plain_bulk_token *token;
 295        int rc;
 296
 297        LASSERT(req->rq_pack_bulk);
 298        LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
 299
 300        bsd = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
 301        token = (struct plain_bulk_token *) bsd->bsd_data;
 302
 303        bsd->bsd_version = 0;
 304        bsd->bsd_flags = 0;
 305        bsd->bsd_type = SPTLRPC_BULK_DEFAULT;
 306        bsd->bsd_svc = SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc);
 307
 308        if (bsd->bsd_svc == SPTLRPC_BULK_SVC_NULL)
 309                return 0;
 310
 311        if (req->rq_bulk_read)
 312                return 0;
 313
 314        rc = plain_generate_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
 315                                      token);
 316        if (rc) {
 317                CERROR("bulk write: failed to compute checksum: %d\n", rc);
 318        } else {
 319                /*
 320                 * for sending we only compute the wrong checksum instead
 321                 * of corrupting the data so it is still correct on a redo
 322                 */
 323                if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND) &&
 324                    req->rq_flvr.u_bulk.hash.hash_alg != BULK_HASH_ALG_NULL)
 325                        token->pbt_hash[0] ^= 0x1;
 326        }
 327
 328        return rc;
 329}
 330
 331static
 332int plain_cli_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
 333                          struct ptlrpc_request *req,
 334                          struct ptlrpc_bulk_desc *desc)
 335{
 336        struct ptlrpc_bulk_sec_desc *bsdv;
 337        struct plain_bulk_token *tokenv;
 338        int rc;
 339        int i, nob;
 340
 341        LASSERT(req->rq_pack_bulk);
 342        LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
 343        LASSERT(req->rq_repdata->lm_bufcount == PLAIN_PACK_SEGMENTS);
 344
 345        bsdv = lustre_msg_buf(req->rq_repdata, PLAIN_PACK_BULK_OFF, 0);
 346        tokenv = (struct plain_bulk_token *) bsdv->bsd_data;
 347
 348        if (req->rq_bulk_write) {
 349                if (bsdv->bsd_flags & BSD_FL_ERR)
 350                        return -EIO;
 351                return 0;
 352        }
 353
 354        /* fix the actual data size */
 355        for (i = 0, nob = 0; i < desc->bd_iov_count; i++) {
 356                if (desc->bd_iov[i].kiov_len + nob > desc->bd_nob_transferred) {
 357                        desc->bd_iov[i].kiov_len =
 358                                desc->bd_nob_transferred - nob;
 359                }
 360                nob += desc->bd_iov[i].kiov_len;
 361        }
 362
 363        rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
 364                                    tokenv);
 365        if (rc)
 366                CERROR("bulk read: client verify failed: %d\n", rc);
 367
 368        return rc;
 369}
 370
 371/****************************************
 372 * sec apis                          *
 373 ****************************************/
 374
 375static
 376struct ptlrpc_cli_ctx *plain_sec_install_ctx(struct plain_sec *plsec)
 377{
 378        struct ptlrpc_cli_ctx *ctx, *ctx_new;
 379
 380        ctx_new = kzalloc(sizeof(*ctx_new), GFP_NOFS);
 381
 382        write_lock(&plsec->pls_lock);
 383
 384        ctx = plsec->pls_ctx;
 385        if (ctx) {
 386                atomic_inc(&ctx->cc_refcount);
 387
 388                kfree(ctx_new);
 389        } else if (ctx_new) {
 390                ctx = ctx_new;
 391
 392                atomic_set(&ctx->cc_refcount, 1); /* for cache */
 393                ctx->cc_sec = &plsec->pls_base;
 394                ctx->cc_ops = &plain_ctx_ops;
 395                ctx->cc_expire = 0;
 396                ctx->cc_flags = PTLRPC_CTX_CACHED | PTLRPC_CTX_UPTODATE;
 397                ctx->cc_vcred.vc_uid = 0;
 398                spin_lock_init(&ctx->cc_lock);
 399                INIT_LIST_HEAD(&ctx->cc_req_list);
 400                INIT_LIST_HEAD(&ctx->cc_gc_chain);
 401
 402                plsec->pls_ctx = ctx;
 403                atomic_inc(&plsec->pls_base.ps_nctx);
 404                atomic_inc(&plsec->pls_base.ps_refcount);
 405
 406                atomic_inc(&ctx->cc_refcount); /* for caller */
 407        }
 408
 409        write_unlock(&plsec->pls_lock);
 410
 411        return ctx;
 412}
 413
 414static
 415void plain_destroy_sec(struct ptlrpc_sec *sec)
 416{
 417        struct plain_sec *plsec = sec2plsec(sec);
 418
 419        LASSERT(sec->ps_policy == &plain_policy);
 420        LASSERT(sec->ps_import);
 421        LASSERT(atomic_read(&sec->ps_refcount) == 0);
 422        LASSERT(atomic_read(&sec->ps_nctx) == 0);
 423        LASSERT(!plsec->pls_ctx);
 424
 425        class_import_put(sec->ps_import);
 426
 427        kfree(plsec);
 428}
 429
 430static
 431void plain_kill_sec(struct ptlrpc_sec *sec)
 432{
 433        sec->ps_dying = 1;
 434}
 435
 436static
 437struct ptlrpc_sec *plain_create_sec(struct obd_import *imp,
 438                                    struct ptlrpc_svc_ctx *svc_ctx,
 439                                    struct sptlrpc_flavor *sf)
 440{
 441        struct plain_sec *plsec;
 442        struct ptlrpc_sec *sec;
 443        struct ptlrpc_cli_ctx *ctx;
 444
 445        LASSERT(SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN);
 446
 447        plsec = kzalloc(sizeof(*plsec), GFP_NOFS);
 448        if (!plsec)
 449                return NULL;
 450
 451        /*
 452         * initialize plain_sec
 453         */
 454        rwlock_init(&plsec->pls_lock);
 455        plsec->pls_ctx = NULL;
 456
 457        sec = &plsec->pls_base;
 458        sec->ps_policy = &plain_policy;
 459        atomic_set(&sec->ps_refcount, 0);
 460        atomic_set(&sec->ps_nctx, 0);
 461        sec->ps_id = sptlrpc_get_next_secid();
 462        sec->ps_import = class_import_get(imp);
 463        sec->ps_flvr = *sf;
 464        spin_lock_init(&sec->ps_lock);
 465        INIT_LIST_HEAD(&sec->ps_gc_list);
 466        sec->ps_gc_interval = 0;
 467        sec->ps_gc_next = 0;
 468
 469        /* install ctx immediately if this is a reverse sec */
 470        if (svc_ctx) {
 471                ctx = plain_sec_install_ctx(plsec);
 472                if (!ctx) {
 473                        plain_destroy_sec(sec);
 474                        return NULL;
 475                }
 476                sptlrpc_cli_ctx_put(ctx, 1);
 477        }
 478
 479        return sec;
 480}
 481
 482static
 483struct ptlrpc_cli_ctx *plain_lookup_ctx(struct ptlrpc_sec *sec,
 484                                        struct vfs_cred *vcred,
 485                                        int create, int remove_dead)
 486{
 487        struct plain_sec *plsec = sec2plsec(sec);
 488        struct ptlrpc_cli_ctx *ctx;
 489
 490        read_lock(&plsec->pls_lock);
 491        ctx = plsec->pls_ctx;
 492        if (ctx)
 493                atomic_inc(&ctx->cc_refcount);
 494        read_unlock(&plsec->pls_lock);
 495
 496        if (unlikely(!ctx))
 497                ctx = plain_sec_install_ctx(plsec);
 498
 499        return ctx;
 500}
 501
 502static
 503void plain_release_ctx(struct ptlrpc_sec *sec,
 504                       struct ptlrpc_cli_ctx *ctx, int sync)
 505{
 506        LASSERT(atomic_read(&sec->ps_refcount) > 0);
 507        LASSERT(atomic_read(&sec->ps_nctx) > 0);
 508        LASSERT(atomic_read(&ctx->cc_refcount) == 0);
 509        LASSERT(ctx->cc_sec == sec);
 510
 511        kfree(ctx);
 512
 513        atomic_dec(&sec->ps_nctx);
 514        sptlrpc_sec_put(sec);
 515}
 516
 517static
 518int plain_flush_ctx_cache(struct ptlrpc_sec *sec,
 519                          uid_t uid, int grace, int force)
 520{
 521        struct plain_sec *plsec = sec2plsec(sec);
 522        struct ptlrpc_cli_ctx *ctx;
 523
 524        /* do nothing unless caller want to flush for 'all' */
 525        if (uid != -1)
 526                return 0;
 527
 528        write_lock(&plsec->pls_lock);
 529        ctx = plsec->pls_ctx;
 530        plsec->pls_ctx = NULL;
 531        write_unlock(&plsec->pls_lock);
 532
 533        if (ctx)
 534                sptlrpc_cli_ctx_put(ctx, 1);
 535        return 0;
 536}
 537
 538static
 539int plain_alloc_reqbuf(struct ptlrpc_sec *sec,
 540                       struct ptlrpc_request *req,
 541                       int msgsize)
 542{
 543        __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
 544        int alloc_len;
 545
 546        buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header);
 547        buflens[PLAIN_PACK_MSG_OFF] = msgsize;
 548
 549        if (req->rq_pack_udesc)
 550                buflens[PLAIN_PACK_USER_OFF] = sptlrpc_current_user_desc_size();
 551
 552        if (req->rq_pack_bulk) {
 553                LASSERT(req->rq_bulk_read || req->rq_bulk_write);
 554                buflens[PLAIN_PACK_BULK_OFF] = PLAIN_BSD_SIZE;
 555        }
 556
 557        alloc_len = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
 558
 559        if (!req->rq_reqbuf) {
 560                LASSERT(!req->rq_pool);
 561
 562                alloc_len = size_roundup_power2(alloc_len);
 563                req->rq_reqbuf = libcfs_kvzalloc(alloc_len, GFP_NOFS);
 564                if (!req->rq_reqbuf)
 565                        return -ENOMEM;
 566
 567                req->rq_reqbuf_len = alloc_len;
 568        } else {
 569                LASSERT(req->rq_pool);
 570                LASSERT(req->rq_reqbuf_len >= alloc_len);
 571                memset(req->rq_reqbuf, 0, alloc_len);
 572        }
 573
 574        lustre_init_msg_v2(req->rq_reqbuf, PLAIN_PACK_SEGMENTS, buflens, NULL);
 575        req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_MSG_OFF, 0);
 576
 577        if (req->rq_pack_udesc)
 578                sptlrpc_pack_user_desc(req->rq_reqbuf, PLAIN_PACK_USER_OFF);
 579
 580        return 0;
 581}
 582
 583static
 584void plain_free_reqbuf(struct ptlrpc_sec *sec,
 585                       struct ptlrpc_request *req)
 586{
 587        if (!req->rq_pool) {
 588                kvfree(req->rq_reqbuf);
 589                req->rq_reqbuf = NULL;
 590                req->rq_reqbuf_len = 0;
 591        }
 592}
 593
 594static
 595int plain_alloc_repbuf(struct ptlrpc_sec *sec,
 596                       struct ptlrpc_request *req,
 597                       int msgsize)
 598{
 599        __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
 600        int alloc_len;
 601
 602        buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header);
 603        buflens[PLAIN_PACK_MSG_OFF] = msgsize;
 604
 605        if (req->rq_pack_bulk) {
 606                LASSERT(req->rq_bulk_read || req->rq_bulk_write);
 607                buflens[PLAIN_PACK_BULK_OFF] = PLAIN_BSD_SIZE;
 608        }
 609
 610        alloc_len = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
 611
 612        /* add space for early reply */
 613        alloc_len += plain_at_offset;
 614
 615        alloc_len = size_roundup_power2(alloc_len);
 616
 617        req->rq_repbuf = libcfs_kvzalloc(alloc_len, GFP_NOFS);
 618        if (!req->rq_repbuf)
 619                return -ENOMEM;
 620
 621        req->rq_repbuf_len = alloc_len;
 622        return 0;
 623}
 624
 625static
 626void plain_free_repbuf(struct ptlrpc_sec *sec,
 627                       struct ptlrpc_request *req)
 628{
 629        kvfree(req->rq_repbuf);
 630        req->rq_repbuf = NULL;
 631        req->rq_repbuf_len = 0;
 632}
 633
 634static
 635int plain_enlarge_reqbuf(struct ptlrpc_sec *sec,
 636                         struct ptlrpc_request *req,
 637                         int segment, int newsize)
 638{
 639        struct lustre_msg *newbuf;
 640        int oldsize;
 641        int newmsg_size, newbuf_size;
 642
 643        LASSERT(req->rq_reqbuf);
 644        LASSERT(req->rq_reqbuf_len >= req->rq_reqlen);
 645        LASSERT(lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_MSG_OFF, 0) ==
 646                req->rq_reqmsg);
 647
 648        /* compute new embedded msg size.  */
 649        oldsize = req->rq_reqmsg->lm_buflens[segment];
 650        req->rq_reqmsg->lm_buflens[segment] = newsize;
 651        newmsg_size = lustre_msg_size_v2(req->rq_reqmsg->lm_bufcount,
 652                                         req->rq_reqmsg->lm_buflens);
 653        req->rq_reqmsg->lm_buflens[segment] = oldsize;
 654
 655        /* compute new wrapper msg size.  */
 656        oldsize = req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF];
 657        req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF] = newmsg_size;
 658        newbuf_size = lustre_msg_size_v2(req->rq_reqbuf->lm_bufcount,
 659                                         req->rq_reqbuf->lm_buflens);
 660        req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF] = oldsize;
 661
 662        /* request from pool should always have enough buffer */
 663        LASSERT(!req->rq_pool || req->rq_reqbuf_len >= newbuf_size);
 664
 665        if (req->rq_reqbuf_len < newbuf_size) {
 666                newbuf_size = size_roundup_power2(newbuf_size);
 667
 668                newbuf = libcfs_kvzalloc(newbuf_size, GFP_NOFS);
 669                if (!newbuf)
 670                        return -ENOMEM;
 671
 672                /* Must lock this, so that otherwise unprotected change of
 673                 * rq_reqmsg is not racing with parallel processing of
 674                 * imp_replay_list traversing threads. See LU-3333
 675                 * This is a bandaid at best, we really need to deal with this
 676                 * in request enlarging code before unpacking that's already
 677                 * there
 678                 */
 679                if (req->rq_import)
 680                        spin_lock(&req->rq_import->imp_lock);
 681
 682                memcpy(newbuf, req->rq_reqbuf, req->rq_reqbuf_len);
 683
 684                kvfree(req->rq_reqbuf);
 685                req->rq_reqbuf = newbuf;
 686                req->rq_reqbuf_len = newbuf_size;
 687                req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf,
 688                                                PLAIN_PACK_MSG_OFF, 0);
 689
 690                if (req->rq_import)
 691                        spin_unlock(&req->rq_import->imp_lock);
 692        }
 693
 694        _sptlrpc_enlarge_msg_inplace(req->rq_reqbuf, PLAIN_PACK_MSG_OFF,
 695                                     newmsg_size);
 696        _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize);
 697
 698        req->rq_reqlen = newmsg_size;
 699        return 0;
 700}
 701
 702/****************************************
 703 * service apis                  *
 704 ****************************************/
 705
 706static struct ptlrpc_svc_ctx plain_svc_ctx = {
 707        .sc_refcount    = ATOMIC_INIT(1),
 708        .sc_policy      = &plain_policy,
 709};
 710
 711static
 712int plain_accept(struct ptlrpc_request *req)
 713{
 714        struct lustre_msg *msg = req->rq_reqbuf;
 715        struct plain_header *phdr;
 716        int swabbed;
 717
 718        LASSERT(SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) ==
 719                SPTLRPC_POLICY_PLAIN);
 720
 721        if (SPTLRPC_FLVR_BASE(req->rq_flvr.sf_rpc) !=
 722            SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_PLAIN) ||
 723            SPTLRPC_FLVR_BULK_TYPE(req->rq_flvr.sf_rpc) !=
 724            SPTLRPC_FLVR_BULK_TYPE(SPTLRPC_FLVR_PLAIN)) {
 725                CERROR("Invalid rpc flavor %x\n", req->rq_flvr.sf_rpc);
 726                return SECSVC_DROP;
 727        }
 728
 729        if (msg->lm_bufcount < PLAIN_PACK_SEGMENTS) {
 730                CERROR("unexpected request buf count %u\n", msg->lm_bufcount);
 731                return SECSVC_DROP;
 732        }
 733
 734        swabbed = ptlrpc_req_need_swab(req);
 735
 736        phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, sizeof(*phdr));
 737        if (!phdr) {
 738                CERROR("missing plain header\n");
 739                return -EPROTO;
 740        }
 741
 742        if (phdr->ph_ver != 0) {
 743                CERROR("Invalid header version\n");
 744                return -EPROTO;
 745        }
 746
 747        if (phdr->ph_bulk_hash_alg >= BULK_HASH_ALG_MAX) {
 748                CERROR("invalid hash algorithm: %u\n", phdr->ph_bulk_hash_alg);
 749                return -EPROTO;
 750        }
 751
 752        req->rq_sp_from = phdr->ph_sp;
 753        req->rq_flvr.u_bulk.hash.hash_alg = phdr->ph_bulk_hash_alg;
 754
 755        if (phdr->ph_flags & PLAIN_FL_USER) {
 756                if (sptlrpc_unpack_user_desc(msg, PLAIN_PACK_USER_OFF,
 757                                             swabbed)) {
 758                        CERROR("Mal-formed user descriptor\n");
 759                        return SECSVC_DROP;
 760                }
 761
 762                req->rq_pack_udesc = 1;
 763                req->rq_user_desc = lustre_msg_buf(msg, PLAIN_PACK_USER_OFF, 0);
 764        }
 765
 766        if (phdr->ph_flags & PLAIN_FL_BULK) {
 767                if (plain_unpack_bsd(msg, swabbed))
 768                        return SECSVC_DROP;
 769
 770                req->rq_pack_bulk = 1;
 771        }
 772
 773        req->rq_reqmsg = lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0);
 774        req->rq_reqlen = msg->lm_buflens[PLAIN_PACK_MSG_OFF];
 775
 776        req->rq_svc_ctx = &plain_svc_ctx;
 777        atomic_inc(&req->rq_svc_ctx->sc_refcount);
 778
 779        return SECSVC_OK;
 780}
 781
 782static
 783int plain_alloc_rs(struct ptlrpc_request *req, int msgsize)
 784{
 785        struct ptlrpc_reply_state *rs;
 786        __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
 787        int rs_size = sizeof(*rs);
 788
 789        LASSERT(msgsize % 8 == 0);
 790
 791        buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header);
 792        buflens[PLAIN_PACK_MSG_OFF] = msgsize;
 793
 794        if (req->rq_pack_bulk && (req->rq_bulk_read || req->rq_bulk_write))
 795                buflens[PLAIN_PACK_BULK_OFF] = PLAIN_BSD_SIZE;
 796
 797        rs_size += lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
 798
 799        rs = req->rq_reply_state;
 800
 801        if (rs) {
 802                /* pre-allocated */
 803                LASSERT(rs->rs_size >= rs_size);
 804        } else {
 805                rs = libcfs_kvzalloc(rs_size, GFP_NOFS);
 806                if (!rs)
 807                        return -ENOMEM;
 808
 809                rs->rs_size = rs_size;
 810        }
 811
 812        rs->rs_svc_ctx = req->rq_svc_ctx;
 813        atomic_inc(&req->rq_svc_ctx->sc_refcount);
 814        rs->rs_repbuf = (struct lustre_msg *) (rs + 1);
 815        rs->rs_repbuf_len = rs_size - sizeof(*rs);
 816
 817        lustre_init_msg_v2(rs->rs_repbuf, PLAIN_PACK_SEGMENTS, buflens, NULL);
 818        rs->rs_msg = lustre_msg_buf_v2(rs->rs_repbuf, PLAIN_PACK_MSG_OFF, 0);
 819
 820        req->rq_reply_state = rs;
 821        return 0;
 822}
 823
 824static
 825void plain_free_rs(struct ptlrpc_reply_state *rs)
 826{
 827        LASSERT(atomic_read(&rs->rs_svc_ctx->sc_refcount) > 1);
 828        atomic_dec(&rs->rs_svc_ctx->sc_refcount);
 829
 830        if (!rs->rs_prealloc)
 831                kvfree(rs);
 832}
 833
 834static
 835int plain_authorize(struct ptlrpc_request *req)
 836{
 837        struct ptlrpc_reply_state *rs = req->rq_reply_state;
 838        struct lustre_msg_v2 *msg = rs->rs_repbuf;
 839        struct plain_header *phdr;
 840        int len;
 841
 842        LASSERT(rs);
 843        LASSERT(msg);
 844
 845        if (req->rq_replen != msg->lm_buflens[PLAIN_PACK_MSG_OFF])
 846                len = lustre_shrink_msg(msg, PLAIN_PACK_MSG_OFF,
 847                                        req->rq_replen, 1);
 848        else
 849                len = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
 850
 851        msg->lm_secflvr = req->rq_flvr.sf_rpc;
 852
 853        phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, 0);
 854        phdr->ph_ver = 0;
 855        phdr->ph_flags = 0;
 856        phdr->ph_bulk_hash_alg = req->rq_flvr.u_bulk.hash.hash_alg;
 857
 858        if (req->rq_pack_bulk)
 859                phdr->ph_flags |= PLAIN_FL_BULK;
 860
 861        rs->rs_repdata_len = len;
 862
 863        if (likely(req->rq_packed_final)) {
 864                if (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)
 865                        req->rq_reply_off = plain_at_offset;
 866                else
 867                        req->rq_reply_off = 0;
 868        } else {
 869                unsigned int hsize = 4;
 870
 871                cfs_crypto_hash_digest(CFS_HASH_ALG_CRC32,
 872                        lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0),
 873                        lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF),
 874                        NULL, 0, (unsigned char *)&msg->lm_cksum, &hsize);
 875                req->rq_reply_off = 0;
 876        }
 877
 878        return 0;
 879}
 880
 881static
 882int plain_svc_unwrap_bulk(struct ptlrpc_request *req,
 883                          struct ptlrpc_bulk_desc *desc)
 884{
 885        struct ptlrpc_reply_state *rs = req->rq_reply_state;
 886        struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
 887        struct plain_bulk_token *tokenr;
 888        int rc;
 889
 890        LASSERT(req->rq_bulk_write);
 891        LASSERT(req->rq_pack_bulk);
 892
 893        bsdr = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
 894        tokenr = (struct plain_bulk_token *) bsdr->bsd_data;
 895        bsdv = lustre_msg_buf(rs->rs_repbuf, PLAIN_PACK_BULK_OFF, 0);
 896
 897        bsdv->bsd_version = 0;
 898        bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
 899        bsdv->bsd_svc = bsdr->bsd_svc;
 900        bsdv->bsd_flags = 0;
 901
 902        if (bsdr->bsd_svc == SPTLRPC_BULK_SVC_NULL)
 903                return 0;
 904
 905        rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
 906                                    tokenr);
 907        if (rc) {
 908                bsdv->bsd_flags |= BSD_FL_ERR;
 909                CERROR("bulk write: server verify failed: %d\n", rc);
 910        }
 911
 912        return rc;
 913}
 914
 915static
 916int plain_svc_wrap_bulk(struct ptlrpc_request *req,
 917                        struct ptlrpc_bulk_desc *desc)
 918{
 919        struct ptlrpc_reply_state *rs = req->rq_reply_state;
 920        struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
 921        struct plain_bulk_token *tokenv;
 922        int rc;
 923
 924        LASSERT(req->rq_bulk_read);
 925        LASSERT(req->rq_pack_bulk);
 926
 927        bsdr = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
 928        bsdv = lustre_msg_buf(rs->rs_repbuf, PLAIN_PACK_BULK_OFF, 0);
 929        tokenv = (struct plain_bulk_token *) bsdv->bsd_data;
 930
 931        bsdv->bsd_version = 0;
 932        bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
 933        bsdv->bsd_svc = bsdr->bsd_svc;
 934        bsdv->bsd_flags = 0;
 935
 936        if (bsdr->bsd_svc == SPTLRPC_BULK_SVC_NULL)
 937                return 0;
 938
 939        rc = plain_generate_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
 940                                      tokenv);
 941        if (rc) {
 942                CERROR("bulk read: server failed to compute checksum: %d\n",
 943                       rc);
 944        } else {
 945                if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE))
 946                        corrupt_bulk_data(desc);
 947        }
 948
 949        return rc;
 950}
 951
 952static struct ptlrpc_ctx_ops plain_ctx_ops = {
 953        .refresh                = plain_ctx_refresh,
 954        .validate              = plain_ctx_validate,
 955        .sign              = plain_ctx_sign,
 956        .verify          = plain_ctx_verify,
 957        .wrap_bulk            = plain_cli_wrap_bulk,
 958        .unwrap_bulk        = plain_cli_unwrap_bulk,
 959};
 960
 961static struct ptlrpc_sec_cops plain_sec_cops = {
 962        .create_sec          = plain_create_sec,
 963        .destroy_sec        = plain_destroy_sec,
 964        .kill_sec              = plain_kill_sec,
 965        .lookup_ctx          = plain_lookup_ctx,
 966        .release_ctx        = plain_release_ctx,
 967        .flush_ctx_cache        = plain_flush_ctx_cache,
 968        .alloc_reqbuf      = plain_alloc_reqbuf,
 969        .free_reqbuf        = plain_free_reqbuf,
 970        .alloc_repbuf      = plain_alloc_repbuf,
 971        .free_repbuf        = plain_free_repbuf,
 972        .enlarge_reqbuf  = plain_enlarge_reqbuf,
 973};
 974
 975static struct ptlrpc_sec_sops plain_sec_sops = {
 976        .accept          = plain_accept,
 977        .alloc_rs              = plain_alloc_rs,
 978        .authorize            = plain_authorize,
 979        .free_rs                = plain_free_rs,
 980        .unwrap_bulk        = plain_svc_unwrap_bulk,
 981        .wrap_bulk            = plain_svc_wrap_bulk,
 982};
 983
 984static struct ptlrpc_sec_policy plain_policy = {
 985        .sp_owner              = THIS_MODULE,
 986        .sp_name                = "plain",
 987        .sp_policy            = SPTLRPC_POLICY_PLAIN,
 988        .sp_cops                = &plain_sec_cops,
 989        .sp_sops                = &plain_sec_sops,
 990};
 991
 992int sptlrpc_plain_init(void)
 993{
 994        __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
 995        int rc;
 996
 997        buflens[PLAIN_PACK_MSG_OFF] = lustre_msg_early_size();
 998        plain_at_offset = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
 999
1000        rc = sptlrpc_register_policy(&plain_policy);
1001        if (rc)
1002                CERROR("failed to register: %d\n", rc);
1003
1004        return rc;
1005}
1006
1007void sptlrpc_plain_fini(void)
1008{
1009        int rc;
1010
1011        rc = sptlrpc_unregister_policy(&plain_policy);
1012        if (rc)
1013                CERROR("cannot unregister: %d\n", rc);
1014}
1015