linux/drivers/staging/lustre/lustre/ptlrpc/gss/gss_krb5_mech.c
<<
>>
Prefs
   1/*
   2 * Modifications for Lustre
   3 *
   4 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
   5 *
   6 * Copyright (c) 2011, 2012, Intel Corporation.
   7 *
   8 * Author: Eric Mei <ericm@clusterfs.com>
   9 */
  10
  11/*
  12 *  linux/net/sunrpc/gss_krb5_mech.c
  13 *  linux/net/sunrpc/gss_krb5_crypto.c
  14 *  linux/net/sunrpc/gss_krb5_seal.c
  15 *  linux/net/sunrpc/gss_krb5_seqnum.c
  16 *  linux/net/sunrpc/gss_krb5_unseal.c
  17 *
  18 *  Copyright (c) 2001 The Regents of the University of Michigan.
  19 *  All rights reserved.
  20 *
  21 *  Andy Adamson <andros@umich.edu>
  22 *  J. Bruce Fields <bfields@umich.edu>
  23 *
  24 *  Redistribution and use in source and binary forms, with or without
  25 *  modification, are permitted provided that the following conditions
  26 *  are met:
  27 *
  28 *  1. Redistributions of source code must retain the above copyright
  29 *     notice, this list of conditions and the following disclaimer.
  30 *  2. Redistributions in binary form must reproduce the above copyright
  31 *     notice, this list of conditions and the following disclaimer in the
  32 *     documentation and/or other materials provided with the distribution.
  33 *  3. Neither the name of the University nor the names of its
  34 *     contributors may be used to endorse or promote products derived
  35 *     from this software without specific prior written permission.
  36 *
  37 *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
  38 *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
  39 *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  40 *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
  41 *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  42 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  43 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  44 *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
  45 *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
  46 *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  47 *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  48 *
  49 */
  50
  51#define DEBUG_SUBSYSTEM S_SEC
  52#include <linux/init.h>
  53#include <linux/module.h>
  54#include <linux/slab.h>
  55#include <linux/crypto.h>
  56#include <linux/mutex.h>
  57#include <linux/crypto.h>
  58
  59#include <obd.h>
  60#include <obd_class.h>
  61#include <obd_support.h>
  62#include <lustre/lustre_idl.h>
  63#include <lustre_net.h>
  64#include <lustre_import.h>
  65#include <lustre_sec.h>
  66
  67#include "gss_err.h"
  68#include "gss_internal.h"
  69#include "gss_api.h"
  70#include "gss_asn1.h"
  71#include "gss_krb5.h"
  72
  73static spinlock_t krb5_seq_lock;
  74
  75struct krb5_enctype {
  76        char       *ke_dispname;
  77        char       *ke_enc_name;            /* linux tfm name */
  78        char       *ke_hash_name;          /* linux tfm name */
  79        int          ke_enc_mode;           /* linux tfm mode */
  80        int          ke_hash_size;         /* checksum size */
  81        int          ke_conf_size;         /* confounder size */
  82        unsigned int    ke_hash_hmac:1;  /* is hmac? */
  83};
  84
  85/*
  86 * NOTE: for aes128-cts and aes256-cts, MIT implementation use CTS encryption.
  87 * but currently we simply CBC with padding, because linux doesn't support CTS
  88 * yet. this need to be fixed in the future.
  89 */
  90static struct krb5_enctype enctypes[] = {
  91        [ENCTYPE_DES_CBC_RAW] = {              /* des-cbc-md5 */
  92                "des-cbc-md5",
  93                "cbc(des)",
  94                "md5",
  95                0,
  96                16,
  97                8,
  98                0,
  99        },
 100        [ENCTYPE_DES3_CBC_RAW] = {            /* des3-hmac-sha1 */
 101                "des3-hmac-sha1",
 102                "cbc(des3_ede)",
 103                "hmac(sha1)",
 104                0,
 105                20,
 106                8,
 107                1,
 108        },
 109        [ENCTYPE_AES128_CTS_HMAC_SHA1_96] = {   /* aes128-cts */
 110                "aes128-cts-hmac-sha1-96",
 111                "cbc(aes)",
 112                "hmac(sha1)",
 113                0,
 114                12,
 115                16,
 116                1,
 117        },
 118        [ENCTYPE_AES256_CTS_HMAC_SHA1_96] = {   /* aes256-cts */
 119                "aes256-cts-hmac-sha1-96",
 120                "cbc(aes)",
 121                "hmac(sha1)",
 122                0,
 123                12,
 124                16,
 125                1,
 126        },
 127        [ENCTYPE_ARCFOUR_HMAC] = {            /* arcfour-hmac-md5 */
 128                "arcfour-hmac-md5",
 129                "ecb(arc4)",
 130                "hmac(md5)",
 131                0,
 132                16,
 133                8,
 134                1,
 135        },
 136};
 137
 138#define MAX_ENCTYPES    sizeof(enctypes)/sizeof(struct krb5_enctype)
 139
 140static const char * enctype2str(__u32 enctype)
 141{
 142        if (enctype < MAX_ENCTYPES && enctypes[enctype].ke_dispname)
 143                return enctypes[enctype].ke_dispname;
 144
 145        return "unknown";
 146}
 147
 148static
 149int keyblock_init(struct krb5_keyblock *kb, char *alg_name, int alg_mode)
 150{
 151        kb->kb_tfm = crypto_alloc_blkcipher(alg_name, alg_mode, 0);
 152        if (IS_ERR(kb->kb_tfm)) {
 153                CERROR("failed to alloc tfm: %s, mode %d\n",
 154                       alg_name, alg_mode);
 155                return -1;
 156        }
 157
 158        if (crypto_blkcipher_setkey(kb->kb_tfm, kb->kb_key.data, kb->kb_key.len)) {
 159                CERROR("failed to set %s key, len %d\n",
 160                       alg_name, kb->kb_key.len);
 161                return -1;
 162        }
 163
 164        return 0;
 165}
 166
 167static
 168int krb5_init_keys(struct krb5_ctx *kctx)
 169{
 170        struct krb5_enctype *ke;
 171
 172        if (kctx->kc_enctype >= MAX_ENCTYPES ||
 173            enctypes[kctx->kc_enctype].ke_hash_size == 0) {
 174                CERROR("unsupported enctype %x\n", kctx->kc_enctype);
 175                return -1;
 176        }
 177
 178        ke = &enctypes[kctx->kc_enctype];
 179
 180        /* tfm arc4 is stateful, user should alloc-use-free by his own */
 181        if (kctx->kc_enctype != ENCTYPE_ARCFOUR_HMAC &&
 182            keyblock_init(&kctx->kc_keye, ke->ke_enc_name, ke->ke_enc_mode))
 183                return -1;
 184
 185        /* tfm hmac is stateful, user should alloc-use-free by his own */
 186        if (ke->ke_hash_hmac == 0 &&
 187            keyblock_init(&kctx->kc_keyi, ke->ke_enc_name, ke->ke_enc_mode))
 188                return -1;
 189        if (ke->ke_hash_hmac == 0 &&
 190            keyblock_init(&kctx->kc_keyc, ke->ke_enc_name, ke->ke_enc_mode))
 191                return -1;
 192
 193        return 0;
 194}
 195
 196static
 197void keyblock_free(struct krb5_keyblock *kb)
 198{
 199        rawobj_free(&kb->kb_key);
 200        if (kb->kb_tfm)
 201                crypto_free_blkcipher(kb->kb_tfm);
 202}
 203
 204static
 205int keyblock_dup(struct krb5_keyblock *new, struct krb5_keyblock *kb)
 206{
 207        return rawobj_dup(&new->kb_key, &kb->kb_key);
 208}
 209
 210static
 211int get_bytes(char **ptr, const char *end, void *res, int len)
 212{
 213        char *p, *q;
 214        p = *ptr;
 215        q = p + len;
 216        if (q > end || q < p)
 217                return -1;
 218        memcpy(res, p, len);
 219        *ptr = q;
 220        return 0;
 221}
 222
 223static
 224int get_rawobj(char **ptr, const char *end, rawobj_t *res)
 225{
 226        char   *p, *q;
 227        __u32   len;
 228
 229        p = *ptr;
 230        if (get_bytes(&p, end, &len, sizeof(len)))
 231                return -1;
 232
 233        q = p + len;
 234        if (q > end || q < p)
 235                return -1;
 236
 237        OBD_ALLOC_LARGE(res->data, len);
 238        if (!res->data)
 239                return -1;
 240
 241        res->len = len;
 242        memcpy(res->data, p, len);
 243        *ptr = q;
 244        return 0;
 245}
 246
 247static
 248int get_keyblock(char **ptr, const char *end,
 249                 struct krb5_keyblock *kb, __u32 keysize)
 250{
 251        char *buf;
 252
 253        OBD_ALLOC_LARGE(buf, keysize);
 254        if (buf == NULL)
 255                return -1;
 256
 257        if (get_bytes(ptr, end, buf, keysize)) {
 258                OBD_FREE_LARGE(buf, keysize);
 259                return -1;
 260        }
 261
 262        kb->kb_key.len = keysize;
 263        kb->kb_key.data = buf;
 264        return 0;
 265}
 266
 267static
 268void delete_context_kerberos(struct krb5_ctx *kctx)
 269{
 270        rawobj_free(&kctx->kc_mech_used);
 271
 272        keyblock_free(&kctx->kc_keye);
 273        keyblock_free(&kctx->kc_keyi);
 274        keyblock_free(&kctx->kc_keyc);
 275}
 276
 277static
 278__u32 import_context_rfc1964(struct krb5_ctx *kctx, char *p, char *end)
 279{
 280        unsigned int    tmp_uint, keysize;
 281
 282        /* seed_init flag */
 283        if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
 284                goto out_err;
 285        kctx->kc_seed_init = (tmp_uint != 0);
 286
 287        /* seed */
 288        if (get_bytes(&p, end, kctx->kc_seed, sizeof(kctx->kc_seed)))
 289                goto out_err;
 290
 291        /* sign/seal algorithm, not really used now */
 292        if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
 293            get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
 294                goto out_err;
 295
 296        /* end time */
 297        if (get_bytes(&p, end, &kctx->kc_endtime, sizeof(kctx->kc_endtime)))
 298                goto out_err;
 299
 300        /* seq send */
 301        if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
 302                goto out_err;
 303        kctx->kc_seq_send = tmp_uint;
 304
 305        /* mech oid */
 306        if (get_rawobj(&p, end, &kctx->kc_mech_used))
 307                goto out_err;
 308
 309        /* old style enc/seq keys in format:
 310         *   - enctype (u32)
 311         *   - keysize (u32)
 312         *   - keydata
 313         * we decompose them to fit into the new context
 314         */
 315
 316        /* enc key */
 317        if (get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype)))
 318                goto out_err;
 319
 320        if (get_bytes(&p, end, &keysize, sizeof(keysize)))
 321                goto out_err;
 322
 323        if (get_keyblock(&p, end, &kctx->kc_keye, keysize))
 324                goto out_err;
 325
 326        /* seq key */
 327        if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
 328            tmp_uint != kctx->kc_enctype)
 329                goto out_err;
 330
 331        if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
 332            tmp_uint != keysize)
 333                goto out_err;
 334
 335        if (get_keyblock(&p, end, &kctx->kc_keyc, keysize))
 336                goto out_err;
 337
 338        /* old style fallback */
 339        if (keyblock_dup(&kctx->kc_keyi, &kctx->kc_keyc))
 340                goto out_err;
 341
 342        if (p != end)
 343                goto out_err;
 344
 345        CDEBUG(D_SEC, "successfully imported rfc1964 context\n");
 346        return 0;
 347out_err:
 348        return GSS_S_FAILURE;
 349}
 350
 351/* Flags for version 2 context flags */
 352#define KRB5_CTX_FLAG_INITIATOR         0x00000001
 353#define KRB5_CTX_FLAG_CFX               0x00000002
 354#define KRB5_CTX_FLAG_ACCEPTOR_SUBKEY   0x00000004
 355
 356static
 357__u32 import_context_rfc4121(struct krb5_ctx *kctx, char *p, char *end)
 358{
 359        unsigned int    tmp_uint, keysize;
 360
 361        /* end time */
 362        if (get_bytes(&p, end, &kctx->kc_endtime, sizeof(kctx->kc_endtime)))
 363                goto out_err;
 364
 365        /* flags */
 366        if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
 367                goto out_err;
 368
 369        if (tmp_uint & KRB5_CTX_FLAG_INITIATOR)
 370                kctx->kc_initiate = 1;
 371        if (tmp_uint & KRB5_CTX_FLAG_CFX)
 372                kctx->kc_cfx = 1;
 373        if (tmp_uint & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY)
 374                kctx->kc_have_acceptor_subkey = 1;
 375
 376        /* seq send */
 377        if (get_bytes(&p, end, &kctx->kc_seq_send, sizeof(kctx->kc_seq_send)))
 378                goto out_err;
 379
 380        /* enctype */
 381        if (get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype)))
 382                goto out_err;
 383
 384        /* size of each key */
 385        if (get_bytes(&p, end, &keysize, sizeof(keysize)))
 386                goto out_err;
 387
 388        /* number of keys - should always be 3 */
 389        if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
 390                goto out_err;
 391
 392        if (tmp_uint != 3) {
 393                CERROR("Invalid number of keys: %u\n", tmp_uint);
 394                goto out_err;
 395        }
 396
 397        /* ke */
 398        if (get_keyblock(&p, end, &kctx->kc_keye, keysize))
 399                goto out_err;
 400        /* ki */
 401        if (get_keyblock(&p, end, &kctx->kc_keyi, keysize))
 402                goto out_err;
 403        /* ki */
 404        if (get_keyblock(&p, end, &kctx->kc_keyc, keysize))
 405                goto out_err;
 406
 407        CDEBUG(D_SEC, "successfully imported v2 context\n");
 408        return 0;
 409out_err:
 410        return GSS_S_FAILURE;
 411}
 412
 413/*
 414 * The whole purpose here is trying to keep user level gss context parsing
 415 * from nfs-utils unchanged as possible as we can, they are not quite mature
 416 * yet, and many stuff still not clear, like heimdal etc.
 417 */
 418static
 419__u32 gss_import_sec_context_kerberos(rawobj_t *inbuf,
 420                                      struct gss_ctx *gctx)
 421{
 422        struct krb5_ctx *kctx;
 423        char        *p = (char *) inbuf->data;
 424        char        *end = (char *) (inbuf->data + inbuf->len);
 425        unsigned int     tmp_uint, rc;
 426
 427        if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint))) {
 428                CERROR("Fail to read version\n");
 429                return GSS_S_FAILURE;
 430        }
 431
 432        /* only support 0, 1 for the moment */
 433        if (tmp_uint > 2) {
 434                CERROR("Invalid version %u\n", tmp_uint);
 435                return GSS_S_FAILURE;
 436        }
 437
 438        OBD_ALLOC_PTR(kctx);
 439        if (!kctx)
 440                return GSS_S_FAILURE;
 441
 442        if (tmp_uint == 0 || tmp_uint == 1) {
 443                kctx->kc_initiate = tmp_uint;
 444                rc = import_context_rfc1964(kctx, p, end);
 445        } else {
 446                rc = import_context_rfc4121(kctx, p, end);
 447        }
 448
 449        if (rc == 0)
 450                rc = krb5_init_keys(kctx);
 451
 452        if (rc) {
 453                delete_context_kerberos(kctx);
 454                OBD_FREE_PTR(kctx);
 455
 456                return GSS_S_FAILURE;
 457        }
 458
 459        gctx->internal_ctx_id = kctx;
 460        return GSS_S_COMPLETE;
 461}
 462
 463static
 464__u32 gss_copy_reverse_context_kerberos(struct gss_ctx *gctx,
 465                                        struct gss_ctx *gctx_new)
 466{
 467        struct krb5_ctx *kctx = gctx->internal_ctx_id;
 468        struct krb5_ctx *knew;
 469
 470        OBD_ALLOC_PTR(knew);
 471        if (!knew)
 472                return GSS_S_FAILURE;
 473
 474        knew->kc_initiate = kctx->kc_initiate ? 0 : 1;
 475        knew->kc_cfx = kctx->kc_cfx;
 476        knew->kc_seed_init = kctx->kc_seed_init;
 477        knew->kc_have_acceptor_subkey = kctx->kc_have_acceptor_subkey;
 478        knew->kc_endtime = kctx->kc_endtime;
 479
 480        memcpy(knew->kc_seed, kctx->kc_seed, sizeof(kctx->kc_seed));
 481        knew->kc_seq_send = kctx->kc_seq_recv;
 482        knew->kc_seq_recv = kctx->kc_seq_send;
 483        knew->kc_enctype = kctx->kc_enctype;
 484
 485        if (rawobj_dup(&knew->kc_mech_used, &kctx->kc_mech_used))
 486                goto out_err;
 487
 488        if (keyblock_dup(&knew->kc_keye, &kctx->kc_keye))
 489                goto out_err;
 490        if (keyblock_dup(&knew->kc_keyi, &kctx->kc_keyi))
 491                goto out_err;
 492        if (keyblock_dup(&knew->kc_keyc, &kctx->kc_keyc))
 493                goto out_err;
 494        if (krb5_init_keys(knew))
 495                goto out_err;
 496
 497        gctx_new->internal_ctx_id = knew;
 498        CDEBUG(D_SEC, "successfully copied reverse context\n");
 499        return GSS_S_COMPLETE;
 500
 501out_err:
 502        delete_context_kerberos(knew);
 503        OBD_FREE_PTR(knew);
 504        return GSS_S_FAILURE;
 505}
 506
 507static
 508__u32 gss_inquire_context_kerberos(struct gss_ctx *gctx,
 509                                   unsigned long  *endtime)
 510{
 511        struct krb5_ctx *kctx = gctx->internal_ctx_id;
 512
 513        *endtime = (unsigned long) ((__u32) kctx->kc_endtime);
 514        return GSS_S_COMPLETE;
 515}
 516
 517static
 518void gss_delete_sec_context_kerberos(void *internal_ctx)
 519{
 520        struct krb5_ctx *kctx = internal_ctx;
 521
 522        delete_context_kerberos(kctx);
 523        OBD_FREE_PTR(kctx);
 524}
 525
 526static
 527void buf_to_sg(struct scatterlist *sg, void *ptr, int len)
 528{
 529        sg_set_buf(sg, ptr, len);
 530}
 531
 532static
 533__u32 krb5_encrypt(struct crypto_blkcipher *tfm,
 534                   int decrypt,
 535                   void * iv,
 536                   void * in,
 537                   void * out,
 538                   int length)
 539{
 540        struct blkcipher_desc desc;
 541        struct scatterlist    sg;
 542        __u8 local_iv[16] = {0};
 543        __u32 ret = -EINVAL;
 544
 545        LASSERT(tfm);
 546        desc.tfm  = tfm;
 547        desc.info = local_iv;
 548        desc.flags= 0;
 549
 550        if (length % crypto_blkcipher_blocksize(tfm) != 0) {
 551                CERROR("output length %d mismatch blocksize %d\n",
 552                       length, crypto_blkcipher_blocksize(tfm));
 553                goto out;
 554        }
 555
 556        if (crypto_blkcipher_ivsize(tfm) > 16) {
 557                CERROR("iv size too large %d\n", crypto_blkcipher_ivsize(tfm));
 558                goto out;
 559        }
 560
 561        if (iv)
 562                memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm));
 563
 564        memcpy(out, in, length);
 565        buf_to_sg(&sg, out, length);
 566
 567        if (decrypt)
 568                ret = crypto_blkcipher_decrypt_iv(&desc, &sg, &sg, length);
 569        else
 570                ret = crypto_blkcipher_encrypt_iv(&desc, &sg, &sg, length);
 571
 572out:
 573        return(ret);
 574}
 575
 576
 577static inline
 578int krb5_digest_hmac(struct crypto_hash *tfm,
 579                     rawobj_t *key,
 580                     struct krb5_header *khdr,
 581                     int msgcnt, rawobj_t *msgs,
 582                     int iovcnt, lnet_kiov_t *iovs,
 583                     rawobj_t *cksum)
 584{
 585        struct hash_desc   desc;
 586        struct scatterlist sg[1];
 587        int             i;
 588
 589        crypto_hash_setkey(tfm, key->data, key->len);
 590        desc.tfm  = tfm;
 591        desc.flags= 0;
 592
 593        crypto_hash_init(&desc);
 594
 595        for (i = 0; i < msgcnt; i++) {
 596                if (msgs[i].len == 0)
 597                        continue;
 598                buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
 599                crypto_hash_update(&desc, sg, msgs[i].len);
 600        }
 601
 602        for (i = 0; i < iovcnt; i++) {
 603                if (iovs[i].kiov_len == 0)
 604                        continue;
 605
 606                sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
 607                            iovs[i].kiov_offset);
 608                crypto_hash_update(&desc, sg, iovs[i].kiov_len);
 609        }
 610
 611        if (khdr) {
 612                buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
 613                crypto_hash_update(&desc, sg, sizeof(*khdr));
 614        }
 615
 616        return crypto_hash_final(&desc, cksum->data);
 617}
 618
 619
 620static inline
 621int krb5_digest_norm(struct crypto_hash *tfm,
 622                     struct krb5_keyblock *kb,
 623                     struct krb5_header *khdr,
 624                     int msgcnt, rawobj_t *msgs,
 625                     int iovcnt, lnet_kiov_t *iovs,
 626                     rawobj_t *cksum)
 627{
 628        struct hash_desc   desc;
 629        struct scatterlist sg[1];
 630        int             i;
 631
 632        LASSERT(kb->kb_tfm);
 633        desc.tfm  = tfm;
 634        desc.flags= 0;
 635
 636        crypto_hash_init(&desc);
 637
 638        for (i = 0; i < msgcnt; i++) {
 639                if (msgs[i].len == 0)
 640                        continue;
 641                buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
 642                crypto_hash_update(&desc, sg, msgs[i].len);
 643        }
 644
 645        for (i = 0; i < iovcnt; i++) {
 646                if (iovs[i].kiov_len == 0)
 647                        continue;
 648
 649                sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
 650                            iovs[i].kiov_offset);
 651                crypto_hash_update(&desc, sg, iovs[i].kiov_len);
 652        }
 653
 654        if (khdr) {
 655                buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
 656                crypto_hash_update(&desc, sg, sizeof(*khdr));
 657        }
 658
 659        crypto_hash_final(&desc, cksum->data);
 660
 661        return krb5_encrypt(kb->kb_tfm, 0, NULL, cksum->data,
 662                            cksum->data, cksum->len);
 663}
 664
 665/*
 666 * compute (keyed/keyless) checksum against the plain text which appended
 667 * with krb5 wire token header.
 668 */
 669static
 670__s32 krb5_make_checksum(__u32 enctype,
 671                         struct krb5_keyblock *kb,
 672                         struct krb5_header *khdr,
 673                         int msgcnt, rawobj_t *msgs,
 674                         int iovcnt, lnet_kiov_t *iovs,
 675                         rawobj_t *cksum)
 676{
 677        struct krb5_enctype   *ke = &enctypes[enctype];
 678        struct crypto_hash *tfm;
 679        __u32             code = GSS_S_FAILURE;
 680        int                 rc;
 681
 682        if (!(tfm = ll_crypto_alloc_hash(ke->ke_hash_name, 0, 0))) {
 683                CERROR("failed to alloc TFM: %s\n", ke->ke_hash_name);
 684                return GSS_S_FAILURE;
 685        }
 686
 687        cksum->len = crypto_hash_digestsize(tfm);
 688        OBD_ALLOC_LARGE(cksum->data, cksum->len);
 689        if (!cksum->data) {
 690                cksum->len = 0;
 691                goto out_tfm;
 692        }
 693
 694        if (ke->ke_hash_hmac)
 695                rc = krb5_digest_hmac(tfm, &kb->kb_key,
 696                                      khdr, msgcnt, msgs, iovcnt, iovs, cksum);
 697        else
 698                rc = krb5_digest_norm(tfm, kb,
 699                                      khdr, msgcnt, msgs, iovcnt, iovs, cksum);
 700
 701        if (rc == 0)
 702                code = GSS_S_COMPLETE;
 703out_tfm:
 704        crypto_free_hash(tfm);
 705        return code;
 706}
 707
 708static void fill_krb5_header(struct krb5_ctx *kctx,
 709                             struct krb5_header *khdr,
 710                             int privacy)
 711{
 712        unsigned char acceptor_flag;
 713
 714        acceptor_flag = kctx->kc_initiate ? 0 : FLAG_SENDER_IS_ACCEPTOR;
 715
 716        if (privacy) {
 717                khdr->kh_tok_id = cpu_to_be16(KG_TOK_WRAP_MSG);
 718                khdr->kh_flags = acceptor_flag | FLAG_WRAP_CONFIDENTIAL;
 719                khdr->kh_ec = cpu_to_be16(0);
 720                khdr->kh_rrc = cpu_to_be16(0);
 721        } else {
 722                khdr->kh_tok_id = cpu_to_be16(KG_TOK_MIC_MSG);
 723                khdr->kh_flags = acceptor_flag;
 724                khdr->kh_ec = cpu_to_be16(0xffff);
 725                khdr->kh_rrc = cpu_to_be16(0xffff);
 726        }
 727
 728        khdr->kh_filler = 0xff;
 729        spin_lock(&krb5_seq_lock);
 730        khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++);
 731        spin_unlock(&krb5_seq_lock);
 732}
 733
 734static __u32 verify_krb5_header(struct krb5_ctx *kctx,
 735                                struct krb5_header *khdr,
 736                                int privacy)
 737{
 738        unsigned char acceptor_flag;
 739        __u16    tok_id, ec_rrc;
 740
 741        acceptor_flag = kctx->kc_initiate ? FLAG_SENDER_IS_ACCEPTOR : 0;
 742
 743        if (privacy) {
 744                tok_id = KG_TOK_WRAP_MSG;
 745                ec_rrc = 0x0;
 746        } else {
 747                tok_id = KG_TOK_MIC_MSG;
 748                ec_rrc = 0xffff;
 749        }
 750
 751        /* sanity checks */
 752        if (be16_to_cpu(khdr->kh_tok_id) != tok_id) {
 753                CERROR("bad token id\n");
 754                return GSS_S_DEFECTIVE_TOKEN;
 755        }
 756        if ((khdr->kh_flags & FLAG_SENDER_IS_ACCEPTOR) != acceptor_flag) {
 757                CERROR("bad direction flag\n");
 758                return GSS_S_BAD_SIG;
 759        }
 760        if (privacy && (khdr->kh_flags & FLAG_WRAP_CONFIDENTIAL) == 0) {
 761                CERROR("missing confidential flag\n");
 762                return GSS_S_BAD_SIG;
 763        }
 764        if (khdr->kh_filler != 0xff) {
 765                CERROR("bad filler\n");
 766                return GSS_S_DEFECTIVE_TOKEN;
 767        }
 768        if (be16_to_cpu(khdr->kh_ec) != ec_rrc ||
 769            be16_to_cpu(khdr->kh_rrc) != ec_rrc) {
 770                CERROR("bad EC or RRC\n");
 771                return GSS_S_DEFECTIVE_TOKEN;
 772        }
 773        return GSS_S_COMPLETE;
 774}
 775
 776static
 777__u32 gss_get_mic_kerberos(struct gss_ctx *gctx,
 778                           int msgcnt,
 779                           rawobj_t *msgs,
 780                           int iovcnt,
 781                           lnet_kiov_t *iovs,
 782                           rawobj_t *token)
 783{
 784        struct krb5_ctx     *kctx = gctx->internal_ctx_id;
 785        struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
 786        struct krb5_header  *khdr;
 787        rawobj_t             cksum = RAWOBJ_EMPTY;
 788
 789        /* fill krb5 header */
 790        LASSERT(token->len >= sizeof(*khdr));
 791        khdr = (struct krb5_header *) token->data;
 792        fill_krb5_header(kctx, khdr, 0);
 793
 794        /* checksum */
 795        if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc,
 796                               khdr, msgcnt, msgs, iovcnt, iovs, &cksum))
 797                return GSS_S_FAILURE;
 798
 799        LASSERT(cksum.len >= ke->ke_hash_size);
 800        LASSERT(token->len >= sizeof(*khdr) + ke->ke_hash_size);
 801        memcpy(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
 802               ke->ke_hash_size);
 803
 804        token->len = sizeof(*khdr) + ke->ke_hash_size;
 805        rawobj_free(&cksum);
 806        return GSS_S_COMPLETE;
 807}
 808
 809static
 810__u32 gss_verify_mic_kerberos(struct gss_ctx *gctx,
 811                              int msgcnt,
 812                              rawobj_t *msgs,
 813                              int iovcnt,
 814                              lnet_kiov_t *iovs,
 815                              rawobj_t *token)
 816{
 817        struct krb5_ctx     *kctx = gctx->internal_ctx_id;
 818        struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
 819        struct krb5_header  *khdr;
 820        rawobj_t             cksum = RAWOBJ_EMPTY;
 821        __u32           major;
 822
 823        if (token->len < sizeof(*khdr)) {
 824                CERROR("short signature: %u\n", token->len);
 825                return GSS_S_DEFECTIVE_TOKEN;
 826        }
 827
 828        khdr = (struct krb5_header *) token->data;
 829
 830        major = verify_krb5_header(kctx, khdr, 0);
 831        if (major != GSS_S_COMPLETE) {
 832                CERROR("bad krb5 header\n");
 833                return major;
 834        }
 835
 836        if (token->len < sizeof(*khdr) + ke->ke_hash_size) {
 837                CERROR("short signature: %u, require %d\n",
 838                       token->len, (int) sizeof(*khdr) + ke->ke_hash_size);
 839                return GSS_S_FAILURE;
 840        }
 841
 842        if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc,
 843                               khdr, msgcnt, msgs, iovcnt, iovs, &cksum)) {
 844                CERROR("failed to make checksum\n");
 845                return GSS_S_FAILURE;
 846        }
 847
 848        LASSERT(cksum.len >= ke->ke_hash_size);
 849        if (memcmp(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
 850                   ke->ke_hash_size)) {
 851                CERROR("checksum mismatch\n");
 852                rawobj_free(&cksum);
 853                return GSS_S_BAD_SIG;
 854        }
 855
 856        rawobj_free(&cksum);
 857        return GSS_S_COMPLETE;
 858}
 859
 860static
 861int add_padding(rawobj_t *msg, int msg_buflen, int blocksize)
 862{
 863        int padding;
 864
 865        padding = (blocksize - (msg->len & (blocksize - 1))) &
 866                  (blocksize - 1);
 867        if (!padding)
 868                return 0;
 869
 870        if (msg->len + padding > msg_buflen) {
 871                CERROR("bufsize %u too small: datalen %u, padding %u\n",
 872                        msg_buflen, msg->len, padding);
 873                return -EINVAL;
 874        }
 875
 876        memset(msg->data + msg->len, padding, padding);
 877        msg->len += padding;
 878        return 0;
 879}
 880
 881static
 882int krb5_encrypt_rawobjs(struct crypto_blkcipher *tfm,
 883                         int mode_ecb,
 884                         int inobj_cnt,
 885                         rawobj_t *inobjs,
 886                         rawobj_t *outobj,
 887                         int enc)
 888{
 889        struct blkcipher_desc desc;
 890        struct scatterlist    src, dst;
 891        __u8              local_iv[16] = {0}, *buf;
 892        __u32            datalen = 0;
 893        int                i, rc;
 894
 895        buf = outobj->data;
 896        desc.tfm  = tfm;
 897        desc.info = local_iv;
 898        desc.flags = 0;
 899
 900        for (i = 0; i < inobj_cnt; i++) {
 901                LASSERT(buf + inobjs[i].len <= outobj->data + outobj->len);
 902
 903                buf_to_sg(&src, inobjs[i].data, inobjs[i].len);
 904                buf_to_sg(&dst, buf, outobj->len - datalen);
 905
 906                if (mode_ecb) {
 907                        if (enc)
 908                                rc = crypto_blkcipher_encrypt(
 909                                        &desc, &dst, &src, src.length);
 910                        else
 911                                rc = crypto_blkcipher_decrypt(
 912                                        &desc, &dst, &src, src.length);
 913                } else {
 914                        if (enc)
 915                                rc = crypto_blkcipher_encrypt_iv(
 916                                        &desc, &dst, &src, src.length);
 917                        else
 918                                rc = crypto_blkcipher_decrypt_iv(
 919                                        &desc, &dst, &src, src.length);
 920                }
 921
 922                if (rc) {
 923                        CERROR("encrypt error %d\n", rc);
 924                        return rc;
 925                }
 926
 927                datalen += inobjs[i].len;
 928                buf += inobjs[i].len;
 929        }
 930
 931        outobj->len = datalen;
 932        return 0;
 933}
 934
 935/*
 936 * if adj_nob != 0, we adjust desc->bd_nob to the actual cipher text size.
 937 */
 938static
 939int krb5_encrypt_bulk(struct crypto_blkcipher *tfm,
 940                      struct krb5_header *khdr,
 941                      char *confounder,
 942                      struct ptlrpc_bulk_desc *desc,
 943                      rawobj_t *cipher,
 944                      int adj_nob)
 945{
 946        struct blkcipher_desc   ciph_desc;
 947        __u8                local_iv[16] = {0};
 948        struct scatterlist      src, dst;
 949        int                  blocksize, i, rc, nob = 0;
 950
 951        LASSERT(desc->bd_iov_count);
 952        LASSERT(desc->bd_enc_iov);
 953
 954        blocksize = crypto_blkcipher_blocksize(tfm);
 955        LASSERT(blocksize > 1);
 956        LASSERT(cipher->len == blocksize + sizeof(*khdr));
 957
 958        ciph_desc.tfm  = tfm;
 959        ciph_desc.info = local_iv;
 960        ciph_desc.flags = 0;
 961
 962        /* encrypt confounder */
 963        buf_to_sg(&src, confounder, blocksize);
 964        buf_to_sg(&dst, cipher->data, blocksize);
 965
 966        rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src, blocksize);
 967        if (rc) {
 968                CERROR("error to encrypt confounder: %d\n", rc);
 969                return rc;
 970        }
 971
 972        /* encrypt clear pages */
 973        for (i = 0; i < desc->bd_iov_count; i++) {
 974                sg_set_page(&src, desc->bd_iov[i].kiov_page,
 975                            (desc->bd_iov[i].kiov_len + blocksize - 1) &
 976                            (~(blocksize - 1)),
 977                            desc->bd_iov[i].kiov_offset);
 978                if (adj_nob)
 979                        nob += src.length;
 980                sg_set_page(&dst, desc->bd_enc_iov[i].kiov_page, src.length,
 981                            src.offset);
 982
 983                desc->bd_enc_iov[i].kiov_offset = dst.offset;
 984                desc->bd_enc_iov[i].kiov_len = dst.length;
 985
 986                rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
 987                                                    src.length);
 988                if (rc) {
 989                        CERROR("error to encrypt page: %d\n", rc);
 990                        return rc;
 991                }
 992        }
 993
 994        /* encrypt krb5 header */
 995        buf_to_sg(&src, khdr, sizeof(*khdr));
 996        buf_to_sg(&dst, cipher->data + blocksize, sizeof(*khdr));
 997
 998        rc = crypto_blkcipher_encrypt_iv(&ciph_desc,
 999                                            &dst, &src, sizeof(*khdr));
1000        if (rc) {
1001                CERROR("error to encrypt krb5 header: %d\n", rc);
1002                return rc;
1003        }
1004
1005        if (adj_nob)
1006                desc->bd_nob = nob;
1007
1008        return 0;
1009}
1010
1011/*
1012 * desc->bd_nob_transferred is the size of cipher text received.
1013 * desc->bd_nob is the target size of plain text supposed to be.
1014 *
1015 * if adj_nob != 0, we adjust each page's kiov_len to the actual
1016 * plain text size.
1017 * - for client read: we don't know data size for each page, so
1018 *   bd_iov[]->kiov_len is set to PAGE_SIZE, but actual data received might
1019 *   be smaller, so we need to adjust it according to bd_enc_iov[]->kiov_len.
1020 *   this means we DO NOT support the situation that server send an odd size
1021 *   data in a page which is not the last one.
1022 * - for server write: we knows exactly data size for each page being expected,
1023 *   thus kiov_len is accurate already, so we should not adjust it at all.
1024 *   and bd_enc_iov[]->kiov_len should be round_up(bd_iov[]->kiov_len) which
1025 *   should have been done by prep_bulk().
1026 */
1027static
1028int krb5_decrypt_bulk(struct crypto_blkcipher *tfm,
1029                      struct krb5_header *khdr,
1030                      struct ptlrpc_bulk_desc *desc,
1031                      rawobj_t *cipher,
1032                      rawobj_t *plain,
1033                      int adj_nob)
1034{
1035        struct blkcipher_desc   ciph_desc;
1036        __u8                local_iv[16] = {0};
1037        struct scatterlist      src, dst;
1038        int                  ct_nob = 0, pt_nob = 0;
1039        int                  blocksize, i, rc;
1040
1041        LASSERT(desc->bd_iov_count);
1042        LASSERT(desc->bd_enc_iov);
1043        LASSERT(desc->bd_nob_transferred);
1044
1045        blocksize = crypto_blkcipher_blocksize(tfm);
1046        LASSERT(blocksize > 1);
1047        LASSERT(cipher->len == blocksize + sizeof(*khdr));
1048
1049        ciph_desc.tfm  = tfm;
1050        ciph_desc.info = local_iv;
1051        ciph_desc.flags = 0;
1052
1053        if (desc->bd_nob_transferred % blocksize) {
1054                CERROR("odd transferred nob: %d\n", desc->bd_nob_transferred);
1055                return -EPROTO;
1056        }
1057
1058        /* decrypt head (confounder) */
1059        buf_to_sg(&src, cipher->data, blocksize);
1060        buf_to_sg(&dst, plain->data, blocksize);
1061
1062        rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src, blocksize);
1063        if (rc) {
1064                CERROR("error to decrypt confounder: %d\n", rc);
1065                return rc;
1066        }
1067
1068        for (i = 0; i < desc->bd_iov_count && ct_nob < desc->bd_nob_transferred;
1069             i++) {
1070                if (desc->bd_enc_iov[i].kiov_offset % blocksize != 0 ||
1071                    desc->bd_enc_iov[i].kiov_len % blocksize != 0) {
1072                        CERROR("page %d: odd offset %u len %u, blocksize %d\n",
1073                               i, desc->bd_enc_iov[i].kiov_offset,
1074                               desc->bd_enc_iov[i].kiov_len, blocksize);
1075                        return -EFAULT;
1076                }
1077
1078                if (adj_nob) {
1079                        if (ct_nob + desc->bd_enc_iov[i].kiov_len >
1080                            desc->bd_nob_transferred)
1081                                desc->bd_enc_iov[i].kiov_len =
1082                                        desc->bd_nob_transferred - ct_nob;
1083
1084                        desc->bd_iov[i].kiov_len = desc->bd_enc_iov[i].kiov_len;
1085                        if (pt_nob + desc->bd_enc_iov[i].kiov_len >desc->bd_nob)
1086                                desc->bd_iov[i].kiov_len = desc->bd_nob -pt_nob;
1087                } else {
1088                        /* this should be guaranteed by LNET */
1089                        LASSERT(ct_nob + desc->bd_enc_iov[i].kiov_len <=
1090                                desc->bd_nob_transferred);
1091                        LASSERT(desc->bd_iov[i].kiov_len <=
1092                                desc->bd_enc_iov[i].kiov_len);
1093                }
1094
1095                if (desc->bd_enc_iov[i].kiov_len == 0)
1096                        continue;
1097
1098                sg_set_page(&src, desc->bd_enc_iov[i].kiov_page,
1099                            desc->bd_enc_iov[i].kiov_len,
1100                            desc->bd_enc_iov[i].kiov_offset);
1101                dst = src;
1102                if (desc->bd_iov[i].kiov_len % blocksize == 0)
1103                        sg_assign_page(&dst, desc->bd_iov[i].kiov_page);
1104
1105                rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
1106                                                    src.length);
1107                if (rc) {
1108                        CERROR("error to decrypt page: %d\n", rc);
1109                        return rc;
1110                }
1111
1112                if (desc->bd_iov[i].kiov_len % blocksize != 0) {
1113                        memcpy(page_address(desc->bd_iov[i].kiov_page) +
1114                               desc->bd_iov[i].kiov_offset,
1115                               page_address(desc->bd_enc_iov[i].kiov_page) +
1116                               desc->bd_iov[i].kiov_offset,
1117                               desc->bd_iov[i].kiov_len);
1118                }
1119
1120                ct_nob += desc->bd_enc_iov[i].kiov_len;
1121                pt_nob += desc->bd_iov[i].kiov_len;
1122        }
1123
1124        if (unlikely(ct_nob != desc->bd_nob_transferred)) {
1125                CERROR("%d cipher text transferred but only %d decrypted\n",
1126                       desc->bd_nob_transferred, ct_nob);
1127                return -EFAULT;
1128        }
1129
1130        if (unlikely(!adj_nob && pt_nob != desc->bd_nob)) {
1131                CERROR("%d plain text expected but only %d received\n",
1132                       desc->bd_nob, pt_nob);
1133                return -EFAULT;
1134        }
1135
1136        /* if needed, clear up the rest unused iovs */
1137        if (adj_nob)
1138                while (i < desc->bd_iov_count)
1139                        desc->bd_iov[i++].kiov_len = 0;
1140
1141        /* decrypt tail (krb5 header) */
1142        buf_to_sg(&src, cipher->data + blocksize, sizeof(*khdr));
1143        buf_to_sg(&dst, cipher->data + blocksize, sizeof(*khdr));
1144
1145        rc = crypto_blkcipher_decrypt_iv(&ciph_desc,
1146                                            &dst, &src, sizeof(*khdr));
1147        if (rc) {
1148                CERROR("error to decrypt tail: %d\n", rc);
1149                return rc;
1150        }
1151
1152        if (memcmp(cipher->data + blocksize, khdr, sizeof(*khdr))) {
1153                CERROR("krb5 header doesn't match\n");
1154                return -EACCES;
1155        }
1156
1157        return 0;
1158}
1159
1160static
1161__u32 gss_wrap_kerberos(struct gss_ctx *gctx,
1162                        rawobj_t *gsshdr,
1163                        rawobj_t *msg,
1164                        int msg_buflen,
1165                        rawobj_t *token)
1166{
1167        struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1168        struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1169        struct krb5_header  *khdr;
1170        int               blocksize;
1171        rawobj_t             cksum = RAWOBJ_EMPTY;
1172        rawobj_t             data_desc[3], cipher;
1173        __u8             conf[GSS_MAX_CIPHER_BLOCK];
1174        int               rc = 0;
1175
1176        LASSERT(ke);
1177        LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
1178        LASSERT(kctx->kc_keye.kb_tfm == NULL ||
1179                ke->ke_conf_size >=
1180                crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm));
1181
1182        /*
1183         * final token format:
1184         * ---------------------------------------------------
1185         * | krb5 header | cipher text | checksum (16 bytes) |
1186         * ---------------------------------------------------
1187         */
1188
1189        /* fill krb5 header */
1190        LASSERT(token->len >= sizeof(*khdr));
1191        khdr = (struct krb5_header *) token->data;
1192        fill_krb5_header(kctx, khdr, 1);
1193
1194        /* generate confounder */
1195        cfs_get_random_bytes(conf, ke->ke_conf_size);
1196
1197        /* get encryption blocksize. note kc_keye might not associated with
1198         * a tfm, currently only for arcfour-hmac */
1199        if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1200                LASSERT(kctx->kc_keye.kb_tfm == NULL);
1201                blocksize = 1;
1202        } else {
1203                LASSERT(kctx->kc_keye.kb_tfm);
1204                blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1205        }
1206        LASSERT(blocksize <= ke->ke_conf_size);
1207
1208        /* padding the message */
1209        if (add_padding(msg, msg_buflen, blocksize))
1210                return GSS_S_FAILURE;
1211
1212        /*
1213         * clear text layout for checksum:
1214         * ------------------------------------------------------
1215         * | confounder | gss header | clear msgs | krb5 header |
1216         * ------------------------------------------------------
1217         */
1218        data_desc[0].data = conf;
1219        data_desc[0].len = ke->ke_conf_size;
1220        data_desc[1].data = gsshdr->data;
1221        data_desc[1].len = gsshdr->len;
1222        data_desc[2].data = msg->data;
1223        data_desc[2].len = msg->len;
1224
1225        /* compute checksum */
1226        if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1227                               khdr, 3, data_desc, 0, NULL, &cksum))
1228                return GSS_S_FAILURE;
1229        LASSERT(cksum.len >= ke->ke_hash_size);
1230
1231        /*
1232         * clear text layout for encryption:
1233         * -----------------------------------------
1234         * | confounder | clear msgs | krb5 header |
1235         * -----------------------------------------
1236         */
1237        data_desc[0].data = conf;
1238        data_desc[0].len = ke->ke_conf_size;
1239        data_desc[1].data = msg->data;
1240        data_desc[1].len = msg->len;
1241        data_desc[2].data = (__u8 *) khdr;
1242        data_desc[2].len = sizeof(*khdr);
1243
1244        /* cipher text will be directly inplace */
1245        cipher.data = (__u8 *) (khdr + 1);
1246        cipher.len = token->len - sizeof(*khdr);
1247        LASSERT(cipher.len >= ke->ke_conf_size + msg->len + sizeof(*khdr));
1248
1249        if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1250                rawobj_t                 arc4_keye;
1251                struct crypto_blkcipher *arc4_tfm;
1252
1253                if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
1254                                       NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
1255                        CERROR("failed to obtain arc4 enc key\n");
1256                        GOTO(arc4_out, rc = -EACCES);
1257                }
1258
1259                arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
1260                if (IS_ERR(arc4_tfm)) {
1261                        CERROR("failed to alloc tfm arc4 in ECB mode\n");
1262                        GOTO(arc4_out_key, rc = -EACCES);
1263                }
1264
1265                if (crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data,
1266                                               arc4_keye.len)) {
1267                        CERROR("failed to set arc4 key, len %d\n",
1268                               arc4_keye.len);
1269                        GOTO(arc4_out_tfm, rc = -EACCES);
1270                }
1271
1272                rc = krb5_encrypt_rawobjs(arc4_tfm, 1,
1273                                          3, data_desc, &cipher, 1);
1274arc4_out_tfm:
1275                crypto_free_blkcipher(arc4_tfm);
1276arc4_out_key:
1277                rawobj_free(&arc4_keye);
1278arc4_out:
1279                do {} while(0); /* just to avoid compile warning */
1280        } else {
1281                rc = krb5_encrypt_rawobjs(kctx->kc_keye.kb_tfm, 0,
1282                                          3, data_desc, &cipher, 1);
1283        }
1284
1285        if (rc != 0) {
1286                rawobj_free(&cksum);
1287                return GSS_S_FAILURE;
1288        }
1289
1290        /* fill in checksum */
1291        LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size);
1292        memcpy((char *)(khdr + 1) + cipher.len,
1293               cksum.data + cksum.len - ke->ke_hash_size,
1294               ke->ke_hash_size);
1295        rawobj_free(&cksum);
1296
1297        /* final token length */
1298        token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size;
1299        return GSS_S_COMPLETE;
1300}
1301
1302static
1303__u32 gss_prep_bulk_kerberos(struct gss_ctx *gctx,
1304                             struct ptlrpc_bulk_desc *desc)
1305{
1306        struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1307        int               blocksize, i;
1308
1309        LASSERT(desc->bd_iov_count);
1310        LASSERT(desc->bd_enc_iov);
1311        LASSERT(kctx->kc_keye.kb_tfm);
1312
1313        blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1314
1315        for (i = 0; i < desc->bd_iov_count; i++) {
1316                LASSERT(desc->bd_enc_iov[i].kiov_page);
1317                /*
1318                 * offset should always start at page boundary of either
1319                 * client or server side.
1320                 */
1321                if (desc->bd_iov[i].kiov_offset & blocksize) {
1322                        CERROR("odd offset %d in page %d\n",
1323                               desc->bd_iov[i].kiov_offset, i);
1324                        return GSS_S_FAILURE;
1325                }
1326
1327                desc->bd_enc_iov[i].kiov_offset = desc->bd_iov[i].kiov_offset;
1328                desc->bd_enc_iov[i].kiov_len = (desc->bd_iov[i].kiov_len +
1329                                                blocksize - 1) & (~(blocksize - 1));
1330        }
1331
1332        return GSS_S_COMPLETE;
1333}
1334
1335static
1336__u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx,
1337                             struct ptlrpc_bulk_desc *desc,
1338                             rawobj_t *token, int adj_nob)
1339{
1340        struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1341        struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1342        struct krb5_header  *khdr;
1343        int               blocksize;
1344        rawobj_t             cksum = RAWOBJ_EMPTY;
1345        rawobj_t             data_desc[1], cipher;
1346        __u8             conf[GSS_MAX_CIPHER_BLOCK];
1347        int               rc = 0;
1348
1349        LASSERT(ke);
1350        LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
1351
1352        /*
1353         * final token format:
1354         * --------------------------------------------------
1355         * | krb5 header | head/tail cipher text | checksum |
1356         * --------------------------------------------------
1357         */
1358
1359        /* fill krb5 header */
1360        LASSERT(token->len >= sizeof(*khdr));
1361        khdr = (struct krb5_header *) token->data;
1362        fill_krb5_header(kctx, khdr, 1);
1363
1364        /* generate confounder */
1365        cfs_get_random_bytes(conf, ke->ke_conf_size);
1366
1367        /* get encryption blocksize. note kc_keye might not associated with
1368         * a tfm, currently only for arcfour-hmac */
1369        if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1370                LASSERT(kctx->kc_keye.kb_tfm == NULL);
1371                blocksize = 1;
1372        } else {
1373                LASSERT(kctx->kc_keye.kb_tfm);
1374                blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1375        }
1376
1377        /*
1378         * we assume the size of krb5_header (16 bytes) must be n * blocksize.
1379         * the bulk token size would be exactly (sizeof(krb5_header) +
1380         * blocksize + sizeof(krb5_header) + hashsize)
1381         */
1382        LASSERT(blocksize <= ke->ke_conf_size);
1383        LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
1384        LASSERT(token->len >= sizeof(*khdr) + blocksize + sizeof(*khdr) + 16);
1385
1386        /*
1387         * clear text layout for checksum:
1388         * ------------------------------------------
1389         * | confounder | clear pages | krb5 header |
1390         * ------------------------------------------
1391         */
1392        data_desc[0].data = conf;
1393        data_desc[0].len = ke->ke_conf_size;
1394
1395        /* compute checksum */
1396        if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1397                               khdr, 1, data_desc,
1398                               desc->bd_iov_count, desc->bd_iov,
1399                               &cksum))
1400                return GSS_S_FAILURE;
1401        LASSERT(cksum.len >= ke->ke_hash_size);
1402
1403        /*
1404         * clear text layout for encryption:
1405         * ------------------------------------------
1406         * | confounder | clear pages | krb5 header |
1407         * ------------------------------------------
1408         *      |             |      |
1409         *      ----------  (cipher pages)   |
1410         * result token:   |               |
1411         * -------------------------------------------
1412         * | krb5 header | cipher text | cipher text |
1413         * -------------------------------------------
1414         */
1415        data_desc[0].data = conf;
1416        data_desc[0].len = ke->ke_conf_size;
1417
1418        cipher.data = (__u8 *) (khdr + 1);
1419        cipher.len = blocksize + sizeof(*khdr);
1420
1421        if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1422                LBUG();
1423                rc = 0;
1424        } else {
1425                rc = krb5_encrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
1426                                       conf, desc, &cipher, adj_nob);
1427        }
1428
1429        if (rc != 0) {
1430                rawobj_free(&cksum);
1431                return GSS_S_FAILURE;
1432        }
1433
1434        /* fill in checksum */
1435        LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size);
1436        memcpy((char *)(khdr + 1) + cipher.len,
1437               cksum.data + cksum.len - ke->ke_hash_size,
1438               ke->ke_hash_size);
1439        rawobj_free(&cksum);
1440
1441        /* final token length */
1442        token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size;
1443        return GSS_S_COMPLETE;
1444}
1445
1446static
1447__u32 gss_unwrap_kerberos(struct gss_ctx  *gctx,
1448                          rawobj_t      *gsshdr,
1449                          rawobj_t      *token,
1450                          rawobj_t      *msg)
1451{
1452        struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1453        struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1454        struct krb5_header  *khdr;
1455        unsigned char       *tmpbuf;
1456        int               blocksize, bodysize;
1457        rawobj_t             cksum = RAWOBJ_EMPTY;
1458        rawobj_t             cipher_in, plain_out;
1459        rawobj_t             hash_objs[3];
1460        int               rc = 0;
1461        __u32           major;
1462
1463        LASSERT(ke);
1464
1465        if (token->len < sizeof(*khdr)) {
1466                CERROR("short signature: %u\n", token->len);
1467                return GSS_S_DEFECTIVE_TOKEN;
1468        }
1469
1470        khdr = (struct krb5_header *) token->data;
1471
1472        major = verify_krb5_header(kctx, khdr, 1);
1473        if (major != GSS_S_COMPLETE) {
1474                CERROR("bad krb5 header\n");
1475                return major;
1476        }
1477
1478        /* block size */
1479        if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1480                LASSERT(kctx->kc_keye.kb_tfm == NULL);
1481                blocksize = 1;
1482        } else {
1483                LASSERT(kctx->kc_keye.kb_tfm);
1484                blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1485        }
1486
1487        /* expected token layout:
1488         * ----------------------------------------
1489         * | krb5 header | cipher text | checksum |
1490         * ----------------------------------------
1491         */
1492        bodysize = token->len - sizeof(*khdr) - ke->ke_hash_size;
1493
1494        if (bodysize % blocksize) {
1495                CERROR("odd bodysize %d\n", bodysize);
1496                return GSS_S_DEFECTIVE_TOKEN;
1497        }
1498
1499        if (bodysize <= ke->ke_conf_size + sizeof(*khdr)) {
1500                CERROR("incomplete token: bodysize %d\n", bodysize);
1501                return GSS_S_DEFECTIVE_TOKEN;
1502        }
1503
1504        if (msg->len < bodysize - ke->ke_conf_size - sizeof(*khdr)) {
1505                CERROR("buffer too small: %u, require %d\n",
1506                       msg->len, bodysize - ke->ke_conf_size);
1507                return GSS_S_FAILURE;
1508        }
1509
1510        /* decrypting */
1511        OBD_ALLOC_LARGE(tmpbuf, bodysize);
1512        if (!tmpbuf)
1513                return GSS_S_FAILURE;
1514
1515        major = GSS_S_FAILURE;
1516
1517        cipher_in.data = (__u8 *) (khdr + 1);
1518        cipher_in.len = bodysize;
1519        plain_out.data = tmpbuf;
1520        plain_out.len = bodysize;
1521
1522        if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1523                rawobj_t                 arc4_keye;
1524                struct crypto_blkcipher *arc4_tfm;
1525
1526                cksum.data = token->data + token->len - ke->ke_hash_size;
1527                cksum.len = ke->ke_hash_size;
1528
1529                if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
1530                                       NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
1531                        CERROR("failed to obtain arc4 enc key\n");
1532                        GOTO(arc4_out, rc = -EACCES);
1533                }
1534
1535                arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
1536                if (IS_ERR(arc4_tfm)) {
1537                        CERROR("failed to alloc tfm arc4 in ECB mode\n");
1538                        GOTO(arc4_out_key, rc = -EACCES);
1539                }
1540
1541                if (crypto_blkcipher_setkey(arc4_tfm,
1542                                         arc4_keye.data, arc4_keye.len)) {
1543                        CERROR("failed to set arc4 key, len %d\n",
1544                               arc4_keye.len);
1545                        GOTO(arc4_out_tfm, rc = -EACCES);
1546                }
1547
1548                rc = krb5_encrypt_rawobjs(arc4_tfm, 1,
1549                                          1, &cipher_in, &plain_out, 0);
1550arc4_out_tfm:
1551                crypto_free_blkcipher(arc4_tfm);
1552arc4_out_key:
1553                rawobj_free(&arc4_keye);
1554arc4_out:
1555                cksum = RAWOBJ_EMPTY;
1556        } else {
1557                rc = krb5_encrypt_rawobjs(kctx->kc_keye.kb_tfm, 0,
1558                                          1, &cipher_in, &plain_out, 0);
1559        }
1560
1561        if (rc != 0) {
1562                CERROR("error decrypt\n");
1563                goto out_free;
1564        }
1565        LASSERT(plain_out.len == bodysize);
1566
1567        /* expected clear text layout:
1568         * -----------------------------------------
1569         * | confounder | clear msgs | krb5 header |
1570         * -----------------------------------------
1571         */
1572
1573        /* verify krb5 header in token is not modified */
1574        if (memcmp(khdr, plain_out.data + plain_out.len - sizeof(*khdr),
1575                   sizeof(*khdr))) {
1576                CERROR("decrypted krb5 header mismatch\n");
1577                goto out_free;
1578        }
1579
1580        /* verify checksum, compose clear text as layout:
1581         * ------------------------------------------------------
1582         * | confounder | gss header | clear msgs | krb5 header |
1583         * ------------------------------------------------------
1584         */
1585        hash_objs[0].len = ke->ke_conf_size;
1586        hash_objs[0].data = plain_out.data;
1587        hash_objs[1].len = gsshdr->len;
1588        hash_objs[1].data = gsshdr->data;
1589        hash_objs[2].len = plain_out.len - ke->ke_conf_size - sizeof(*khdr);
1590        hash_objs[2].data = plain_out.data + ke->ke_conf_size;
1591        if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1592                               khdr, 3, hash_objs, 0, NULL, &cksum))
1593                goto out_free;
1594
1595        LASSERT(cksum.len >= ke->ke_hash_size);
1596        if (memcmp((char *)(khdr + 1) + bodysize,
1597                   cksum.data + cksum.len - ke->ke_hash_size,
1598                   ke->ke_hash_size)) {
1599                CERROR("checksum mismatch\n");
1600                goto out_free;
1601        }
1602
1603        msg->len =  bodysize - ke->ke_conf_size - sizeof(*khdr);
1604        memcpy(msg->data, tmpbuf + ke->ke_conf_size, msg->len);
1605
1606        major = GSS_S_COMPLETE;
1607out_free:
1608        OBD_FREE_LARGE(tmpbuf, bodysize);
1609        rawobj_free(&cksum);
1610        return major;
1611}
1612
1613static
1614__u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
1615                               struct ptlrpc_bulk_desc *desc,
1616                               rawobj_t *token, int adj_nob)
1617{
1618        struct krb5_ctx     *kctx = gctx->internal_ctx_id;
1619        struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1620        struct krb5_header  *khdr;
1621        int               blocksize;
1622        rawobj_t             cksum = RAWOBJ_EMPTY;
1623        rawobj_t             cipher, plain;
1624        rawobj_t             data_desc[1];
1625        int               rc;
1626        __u32           major;
1627
1628        LASSERT(ke);
1629
1630        if (token->len < sizeof(*khdr)) {
1631                CERROR("short signature: %u\n", token->len);
1632                return GSS_S_DEFECTIVE_TOKEN;
1633        }
1634
1635        khdr = (struct krb5_header *) token->data;
1636
1637        major = verify_krb5_header(kctx, khdr, 1);
1638        if (major != GSS_S_COMPLETE) {
1639                CERROR("bad krb5 header\n");
1640                return major;
1641        }
1642
1643        /* block size */
1644        if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1645                LASSERT(kctx->kc_keye.kb_tfm == NULL);
1646                blocksize = 1;
1647                LBUG();
1648        } else {
1649                LASSERT(kctx->kc_keye.kb_tfm);
1650                blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1651        }
1652        LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
1653
1654        /*
1655         * token format is expected as:
1656         * -----------------------------------------------
1657         * | krb5 header | head/tail cipher text | cksum |
1658         * -----------------------------------------------
1659         */
1660        if (token->len < sizeof(*khdr) + blocksize + sizeof(*khdr) +
1661                         ke->ke_hash_size) {
1662                CERROR("short token size: %u\n", token->len);
1663                return GSS_S_DEFECTIVE_TOKEN;
1664        }
1665
1666        cipher.data = (__u8 *) (khdr + 1);
1667        cipher.len = blocksize + sizeof(*khdr);
1668        plain.data = cipher.data;
1669        plain.len = cipher.len;
1670
1671        rc = krb5_decrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
1672                               desc, &cipher, &plain, adj_nob);
1673        if (rc)
1674                return GSS_S_DEFECTIVE_TOKEN;
1675
1676        /*
1677         * verify checksum, compose clear text as layout:
1678         * ------------------------------------------
1679         * | confounder | clear pages | krb5 header |
1680         * ------------------------------------------
1681         */
1682        data_desc[0].data = plain.data;
1683        data_desc[0].len = blocksize;
1684
1685        if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1686                               khdr, 1, data_desc,
1687                               desc->bd_iov_count, desc->bd_iov,
1688                               &cksum))
1689                return GSS_S_FAILURE;
1690        LASSERT(cksum.len >= ke->ke_hash_size);
1691
1692        if (memcmp(plain.data + blocksize + sizeof(*khdr),
1693                   cksum.data + cksum.len - ke->ke_hash_size,
1694                   ke->ke_hash_size)) {
1695                CERROR("checksum mismatch\n");
1696                rawobj_free(&cksum);
1697                return GSS_S_BAD_SIG;
1698        }
1699
1700        rawobj_free(&cksum);
1701        return GSS_S_COMPLETE;
1702}
1703
1704int gss_display_kerberos(struct gss_ctx *ctx,
1705                         char             *buf,
1706                         int                bufsize)
1707{
1708        struct krb5_ctx    *kctx = ctx->internal_ctx_id;
1709        int              written;
1710
1711        written = snprintf(buf, bufsize, "krb5 (%s)",
1712                           enctype2str(kctx->kc_enctype));
1713        return written;
1714}
1715
1716static struct gss_api_ops gss_kerberos_ops = {
1717        .gss_import_sec_context     = gss_import_sec_context_kerberos,
1718        .gss_copy_reverse_context   = gss_copy_reverse_context_kerberos,
1719        .gss_inquire_context    = gss_inquire_context_kerberos,
1720        .gss_get_mic            = gss_get_mic_kerberos,
1721        .gss_verify_mic      = gss_verify_mic_kerberos,
1722        .gss_wrap                  = gss_wrap_kerberos,
1723        .gss_unwrap              = gss_unwrap_kerberos,
1724        .gss_prep_bulk        = gss_prep_bulk_kerberos,
1725        .gss_wrap_bulk        = gss_wrap_bulk_kerberos,
1726        .gss_unwrap_bulk            = gss_unwrap_bulk_kerberos,
1727        .gss_delete_sec_context     = gss_delete_sec_context_kerberos,
1728        .gss_display            = gss_display_kerberos,
1729};
1730
1731static struct subflavor_desc gss_kerberos_sfs[] = {
1732        {
1733                .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5N,
1734                .sf_qop  = 0,
1735                .sf_service     = SPTLRPC_SVC_NULL,
1736                .sf_name        = "krb5n"
1737        },
1738        {
1739                .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5A,
1740                .sf_qop  = 0,
1741                .sf_service     = SPTLRPC_SVC_AUTH,
1742                .sf_name        = "krb5a"
1743        },
1744        {
1745                .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5I,
1746                .sf_qop  = 0,
1747                .sf_service     = SPTLRPC_SVC_INTG,
1748                .sf_name        = "krb5i"
1749        },
1750        {
1751                .sf_subflavor   = SPTLRPC_SUBFLVR_KRB5P,
1752                .sf_qop  = 0,
1753                .sf_service     = SPTLRPC_SVC_PRIV,
1754                .sf_name        = "krb5p"
1755        },
1756};
1757
1758/*
1759 * currently we leave module owner NULL
1760 */
1761static struct gss_api_mech gss_kerberos_mech = {
1762        .gm_owner       = NULL, /*THIS_MODULE, */
1763        .gm_name        = "krb5",
1764        .gm_oid  = (rawobj_t)
1765                                {9, "\052\206\110\206\367\022\001\002\002"},
1766        .gm_ops  = &gss_kerberos_ops,
1767        .gm_sf_num      = 4,
1768        .gm_sfs  = gss_kerberos_sfs,
1769};
1770
1771int __init init_kerberos_module(void)
1772{
1773        int status;
1774
1775        spin_lock_init(&krb5_seq_lock);
1776
1777        status = lgss_mech_register(&gss_kerberos_mech);
1778        if (status)
1779                CERROR("Failed to register kerberos gss mechanism!\n");
1780        return status;
1781}
1782
1783void __exit cleanup_kerberos_module(void)
1784{
1785        lgss_mech_unregister(&gss_kerberos_mech);
1786}
1787