linux/net/sunrpc/auth_gss/gss_krb5_wrap.c
<<
>>
Prefs
   1/*
   2 * COPYRIGHT (c) 2008
   3 * The Regents of the University of Michigan
   4 * ALL RIGHTS RESERVED
   5 *
   6 * Permission is granted to use, copy, create derivative works
   7 * and redistribute this software and such derivative works
   8 * for any purpose, so long as the name of The University of
   9 * Michigan is not used in any advertising or publicity
  10 * pertaining to the use of distribution of this software
  11 * without specific, written prior authorization.  If the
  12 * above copyright notice or any other identification of the
  13 * University of Michigan is included in any copy of any
  14 * portion of this software, then the disclaimer below must
  15 * also be included.
  16 *
  17 * THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION
  18 * FROM THE UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY
  19 * PURPOSE, AND WITHOUT WARRANTY BY THE UNIVERSITY OF
  20 * MICHIGAN OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING
  21 * WITHOUT LIMITATION THE IMPLIED WARRANTIES OF
  22 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE
  23 * REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE LIABLE
  24 * FOR ANY DAMAGES, INCLUDING SPECIAL, INDIRECT, INCIDENTAL, OR
  25 * CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM ARISING
  26 * OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN
  27 * IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF
  28 * SUCH DAMAGES.
  29 */
  30
  31#include <linux/types.h>
  32#include <linux/jiffies.h>
  33#include <linux/sunrpc/gss_krb5.h>
  34#include <linux/random.h>
  35#include <linux/pagemap.h>
  36#include <linux/crypto.h>
  37
  38#ifdef RPC_DEBUG
  39# define RPCDBG_FACILITY        RPCDBG_AUTH
  40#endif
  41
  42static inline int
  43gss_krb5_padding(int blocksize, int length)
  44{
  45        return blocksize - (length % blocksize);
  46}
  47
  48static inline void
  49gss_krb5_add_padding(struct xdr_buf *buf, int offset, int blocksize)
  50{
  51        int padding = gss_krb5_padding(blocksize, buf->len - offset);
  52        char *p;
  53        struct kvec *iov;
  54
  55        if (buf->page_len || buf->tail[0].iov_len)
  56                iov = &buf->tail[0];
  57        else
  58                iov = &buf->head[0];
  59        p = iov->iov_base + iov->iov_len;
  60        iov->iov_len += padding;
  61        buf->len += padding;
  62        memset(p, padding, padding);
  63}
  64
  65static inline int
  66gss_krb5_remove_padding(struct xdr_buf *buf, int blocksize)
  67{
  68        u8 *ptr;
  69        u8 pad;
  70        size_t len = buf->len;
  71
  72        if (len <= buf->head[0].iov_len) {
  73                pad = *(u8 *)(buf->head[0].iov_base + len - 1);
  74                if (pad > buf->head[0].iov_len)
  75                        return -EINVAL;
  76                buf->head[0].iov_len -= pad;
  77                goto out;
  78        } else
  79                len -= buf->head[0].iov_len;
  80        if (len <= buf->page_len) {
  81                unsigned int last = (buf->page_base + len - 1)
  82                                        >>PAGE_CACHE_SHIFT;
  83                unsigned int offset = (buf->page_base + len - 1)
  84                                        & (PAGE_CACHE_SIZE - 1);
  85                ptr = kmap_atomic(buf->pages[last], KM_USER0);
  86                pad = *(ptr + offset);
  87                kunmap_atomic(ptr, KM_USER0);
  88                goto out;
  89        } else
  90                len -= buf->page_len;
  91        BUG_ON(len > buf->tail[0].iov_len);
  92        pad = *(u8 *)(buf->tail[0].iov_base + len - 1);
  93out:
  94        /* XXX: NOTE: we do not adjust the page lengths--they represent
  95         * a range of data in the real filesystem page cache, and we need
  96         * to know that range so the xdr code can properly place read data.
  97         * However adjusting the head length, as we do above, is harmless.
  98         * In the case of a request that fits into a single page, the server
  99         * also uses length and head length together to determine the original
 100         * start of the request to copy the request for deferal; so it's
 101         * easier on the server if we adjust head and tail length in tandem.
 102         * It's not really a problem that we don't fool with the page and
 103         * tail lengths, though--at worst badly formed xdr might lead the
 104         * server to attempt to parse the padding.
 105         * XXX: Document all these weird requirements for gss mechanism
 106         * wrap/unwrap functions. */
 107        if (pad > blocksize)
 108                return -EINVAL;
 109        if (buf->len > pad)
 110                buf->len -= pad;
 111        else
 112                return -EINVAL;
 113        return 0;
 114}
 115
 116void
 117gss_krb5_make_confounder(char *p, u32 conflen)
 118{
 119        static u64 i = 0;
 120        u64 *q = (u64 *)p;
 121
 122        /* rfc1964 claims this should be "random".  But all that's really
 123         * necessary is that it be unique.  And not even that is necessary in
 124         * our case since our "gssapi" implementation exists only to support
 125         * rpcsec_gss, so we know that the only buffers we will ever encrypt
 126         * already begin with a unique sequence number.  Just to hedge my bets
 127         * I'll make a half-hearted attempt at something unique, but ensuring
 128         * uniqueness would mean worrying about atomicity and rollover, and I
 129         * don't care enough. */
 130
 131        /* initialize to random value */
 132        if (i == 0) {
 133                i = random32();
 134                i = (i << 32) | random32();
 135        }
 136
 137        switch (conflen) {
 138        case 16:
 139                *q++ = i++;
 140                /* fall through */
 141        case 8:
 142                *q++ = i++;
 143                break;
 144        default:
 145                BUG();
 146        }
 147}
 148
 149/* Assumptions: the head and tail of inbuf are ours to play with.
 150 * The pages, however, may be real pages in the page cache and we replace
 151 * them with scratch pages from **pages before writing to them. */
 152/* XXX: obviously the above should be documentation of wrap interface,
 153 * and shouldn't be in this kerberos-specific file. */
 154
 155/* XXX factor out common code with seal/unseal. */
 156
 157static u32
 158gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
 159                struct xdr_buf *buf, struct page **pages)
 160{
 161        char                    cksumdata[GSS_KRB5_MAX_CKSUM_LEN];
 162        struct xdr_netobj       md5cksum = {.len = sizeof(cksumdata),
 163                                            .data = cksumdata};
 164        int                     blocksize = 0, plainlen;
 165        unsigned char           *ptr, *msg_start;
 166        s32                     now;
 167        int                     headlen;
 168        struct page             **tmp_pages;
 169        u32                     seq_send;
 170        u8                      *cksumkey;
 171        u32                     conflen = kctx->gk5e->conflen;
 172
 173        dprintk("RPC:       %s\n", __func__);
 174
 175        now = get_seconds();
 176
 177        blocksize = crypto_blkcipher_blocksize(kctx->enc);
 178        gss_krb5_add_padding(buf, offset, blocksize);
 179        BUG_ON((buf->len - offset) % blocksize);
 180        plainlen = conflen + buf->len - offset;
 181
 182        headlen = g_token_size(&kctx->mech_used,
 183                GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength + plainlen) -
 184                (buf->len - offset);
 185
 186        ptr = buf->head[0].iov_base + offset;
 187        /* shift data to make room for header. */
 188        xdr_extend_head(buf, offset, headlen);
 189
 190        /* XXX Would be cleverer to encrypt while copying. */
 191        BUG_ON((buf->len - offset - headlen) % blocksize);
 192
 193        g_make_token_header(&kctx->mech_used,
 194                                GSS_KRB5_TOK_HDR_LEN +
 195                                kctx->gk5e->cksumlength + plainlen, &ptr);
 196
 197
 198        /* ptr now at header described in rfc 1964, section 1.2.1: */
 199        ptr[0] = (unsigned char) ((KG_TOK_WRAP_MSG >> 8) & 0xff);
 200        ptr[1] = (unsigned char) (KG_TOK_WRAP_MSG & 0xff);
 201
 202        msg_start = ptr + GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength;
 203
 204        *(__be16 *)(ptr + 2) = cpu_to_le16(kctx->gk5e->signalg);
 205        memset(ptr + 4, 0xff, 4);
 206        *(__be16 *)(ptr + 4) = cpu_to_le16(kctx->gk5e->sealalg);
 207
 208        gss_krb5_make_confounder(msg_start, conflen);
 209
 210        if (kctx->gk5e->keyed_cksum)
 211                cksumkey = kctx->cksum;
 212        else
 213                cksumkey = NULL;
 214
 215        /* XXXJBF: UGH!: */
 216        tmp_pages = buf->pages;
 217        buf->pages = pages;
 218        if (make_checksum(kctx, ptr, 8, buf, offset + headlen - conflen,
 219                                        cksumkey, KG_USAGE_SEAL, &md5cksum))
 220                return GSS_S_FAILURE;
 221        buf->pages = tmp_pages;
 222
 223        memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len);
 224
 225        spin_lock(&krb5_seq_lock);
 226        seq_send = kctx->seq_send++;
 227        spin_unlock(&krb5_seq_lock);
 228
 229        /* XXX would probably be more efficient to compute checksum
 230         * and encrypt at the same time: */
 231        if ((krb5_make_seq_num(kctx, kctx->seq, kctx->initiate ? 0 : 0xff,
 232                               seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8)))
 233                return GSS_S_FAILURE;
 234
 235        if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) {
 236                struct crypto_blkcipher *cipher;
 237                int err;
 238                cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0,
 239                                                CRYPTO_ALG_ASYNC);
 240                if (IS_ERR(cipher))
 241                        return GSS_S_FAILURE;
 242
 243                krb5_rc4_setup_enc_key(kctx, cipher, seq_send);
 244
 245                err = gss_encrypt_xdr_buf(cipher, buf,
 246                                          offset + headlen - conflen, pages);
 247                crypto_free_blkcipher(cipher);
 248                if (err)
 249                        return GSS_S_FAILURE;
 250        } else {
 251                if (gss_encrypt_xdr_buf(kctx->enc, buf,
 252                                        offset + headlen - conflen, pages))
 253                        return GSS_S_FAILURE;
 254        }
 255
 256        return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
 257}
 258
 259static u32
 260gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
 261{
 262        int                     signalg;
 263        int                     sealalg;
 264        char                    cksumdata[GSS_KRB5_MAX_CKSUM_LEN];
 265        struct xdr_netobj       md5cksum = {.len = sizeof(cksumdata),
 266                                            .data = cksumdata};
 267        s32                     now;
 268        int                     direction;
 269        s32                     seqnum;
 270        unsigned char           *ptr;
 271        int                     bodysize;
 272        void                    *data_start, *orig_start;
 273        int                     data_len;
 274        int                     blocksize;
 275        u32                     conflen = kctx->gk5e->conflen;
 276        int                     crypt_offset;
 277        u8                      *cksumkey;
 278
 279        dprintk("RPC:       gss_unwrap_kerberos\n");
 280
 281        ptr = (u8 *)buf->head[0].iov_base + offset;
 282        if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr,
 283                                        buf->len - offset))
 284                return GSS_S_DEFECTIVE_TOKEN;
 285
 286        if ((ptr[0] != ((KG_TOK_WRAP_MSG >> 8) & 0xff)) ||
 287            (ptr[1] !=  (KG_TOK_WRAP_MSG & 0xff)))
 288                return GSS_S_DEFECTIVE_TOKEN;
 289
 290        /* XXX sanity-check bodysize?? */
 291
 292        /* get the sign and seal algorithms */
 293
 294        signalg = ptr[2] + (ptr[3] << 8);
 295        if (signalg != kctx->gk5e->signalg)
 296                return GSS_S_DEFECTIVE_TOKEN;
 297
 298        sealalg = ptr[4] + (ptr[5] << 8);
 299        if (sealalg != kctx->gk5e->sealalg)
 300                return GSS_S_DEFECTIVE_TOKEN;
 301
 302        if ((ptr[6] != 0xff) || (ptr[7] != 0xff))
 303                return GSS_S_DEFECTIVE_TOKEN;
 304
 305        /*
 306         * Data starts after token header and checksum.  ptr points
 307         * to the beginning of the token header
 308         */
 309        crypt_offset = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) -
 310                                        (unsigned char *)buf->head[0].iov_base;
 311
 312        /*
 313         * Need plaintext seqnum to derive encryption key for arcfour-hmac
 314         */
 315        if (krb5_get_seq_num(kctx, ptr + GSS_KRB5_TOK_HDR_LEN,
 316                             ptr + 8, &direction, &seqnum))
 317                return GSS_S_BAD_SIG;
 318
 319        if ((kctx->initiate && direction != 0xff) ||
 320            (!kctx->initiate && direction != 0))
 321                return GSS_S_BAD_SIG;
 322
 323        if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) {
 324                struct crypto_blkcipher *cipher;
 325                int err;
 326
 327                cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0,
 328                                                CRYPTO_ALG_ASYNC);
 329                if (IS_ERR(cipher))
 330                        return GSS_S_FAILURE;
 331
 332                krb5_rc4_setup_enc_key(kctx, cipher, seqnum);
 333
 334                err = gss_decrypt_xdr_buf(cipher, buf, crypt_offset);
 335                crypto_free_blkcipher(cipher);
 336                if (err)
 337                        return GSS_S_DEFECTIVE_TOKEN;
 338        } else {
 339                if (gss_decrypt_xdr_buf(kctx->enc, buf, crypt_offset))
 340                        return GSS_S_DEFECTIVE_TOKEN;
 341        }
 342
 343        if (kctx->gk5e->keyed_cksum)
 344                cksumkey = kctx->cksum;
 345        else
 346                cksumkey = NULL;
 347
 348        if (make_checksum(kctx, ptr, 8, buf, crypt_offset,
 349                                        cksumkey, KG_USAGE_SEAL, &md5cksum))
 350                return GSS_S_FAILURE;
 351
 352        if (memcmp(md5cksum.data, ptr + GSS_KRB5_TOK_HDR_LEN,
 353                                                kctx->gk5e->cksumlength))
 354                return GSS_S_BAD_SIG;
 355
 356        /* it got through unscathed.  Make sure the context is unexpired */
 357
 358        now = get_seconds();
 359
 360        if (now > kctx->endtime)
 361                return GSS_S_CONTEXT_EXPIRED;
 362
 363        /* do sequencing checks */
 364
 365        /* Copy the data back to the right position.  XXX: Would probably be
 366         * better to copy and encrypt at the same time. */
 367
 368        blocksize = crypto_blkcipher_blocksize(kctx->enc);
 369        data_start = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) +
 370                                        conflen;
 371        orig_start = buf->head[0].iov_base + offset;
 372        data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start;
 373        memmove(orig_start, data_start, data_len);
 374        buf->head[0].iov_len -= (data_start - orig_start);
 375        buf->len -= (data_start - orig_start);
 376
 377        if (gss_krb5_remove_padding(buf, blocksize))
 378                return GSS_S_DEFECTIVE_TOKEN;
 379
 380        return GSS_S_COMPLETE;
 381}
 382
 383/*
 384 * We cannot currently handle tokens with rotated data.  We need a
 385 * generalized routine to rotate the data in place.  It is anticipated
 386 * that we won't encounter rotated data in the general case.
 387 */
 388static u32
 389rotate_left(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf, u16 rrc)
 390{
 391        unsigned int realrrc = rrc % (buf->len - offset - GSS_KRB5_TOK_HDR_LEN);
 392
 393        if (realrrc == 0)
 394                return 0;
 395
 396        dprintk("%s: cannot process token with rotated data: "
 397                "rrc %u, realrrc %u\n", __func__, rrc, realrrc);
 398        return 1;
 399}
 400
 401static u32
 402gss_wrap_kerberos_v2(struct krb5_ctx *kctx, u32 offset,
 403                     struct xdr_buf *buf, struct page **pages)
 404{
 405        int             blocksize;
 406        u8              *ptr, *plainhdr;
 407        s32             now;
 408        u8              flags = 0x00;
 409        __be16          *be16ptr, ec = 0;
 410        __be64          *be64ptr;
 411        u32             err;
 412
 413        dprintk("RPC:       %s\n", __func__);
 414
 415        if (kctx->gk5e->encrypt_v2 == NULL)
 416                return GSS_S_FAILURE;
 417
 418        /* make room for gss token header */
 419        if (xdr_extend_head(buf, offset, GSS_KRB5_TOK_HDR_LEN))
 420                return GSS_S_FAILURE;
 421
 422        /* construct gss token header */
 423        ptr = plainhdr = buf->head[0].iov_base + offset;
 424        *ptr++ = (unsigned char) ((KG2_TOK_WRAP>>8) & 0xff);
 425        *ptr++ = (unsigned char) (KG2_TOK_WRAP & 0xff);
 426
 427        if ((kctx->flags & KRB5_CTX_FLAG_INITIATOR) == 0)
 428                flags |= KG2_TOKEN_FLAG_SENTBYACCEPTOR;
 429        if ((kctx->flags & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY) != 0)
 430                flags |= KG2_TOKEN_FLAG_ACCEPTORSUBKEY;
 431        /* We always do confidentiality in wrap tokens */
 432        flags |= KG2_TOKEN_FLAG_SEALED;
 433
 434        *ptr++ = flags;
 435        *ptr++ = 0xff;
 436        be16ptr = (__be16 *)ptr;
 437
 438        blocksize = crypto_blkcipher_blocksize(kctx->acceptor_enc);
 439        *be16ptr++ = cpu_to_be16(ec);
 440        /* "inner" token header always uses 0 for RRC */
 441        *be16ptr++ = cpu_to_be16(0);
 442
 443        be64ptr = (__be64 *)be16ptr;
 444        spin_lock(&krb5_seq_lock);
 445        *be64ptr = cpu_to_be64(kctx->seq_send64++);
 446        spin_unlock(&krb5_seq_lock);
 447
 448        err = (*kctx->gk5e->encrypt_v2)(kctx, offset, buf, ec, pages);
 449        if (err)
 450                return err;
 451
 452        now = get_seconds();
 453        return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
 454}
 455
 456static u32
 457gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
 458{
 459        s32             now;
 460        u64             seqnum;
 461        u8              *ptr;
 462        u8              flags = 0x00;
 463        u16             ec, rrc;
 464        int             err;
 465        u32             headskip, tailskip;
 466        u8              decrypted_hdr[GSS_KRB5_TOK_HDR_LEN];
 467        unsigned int    movelen;
 468
 469
 470        dprintk("RPC:       %s\n", __func__);
 471
 472        if (kctx->gk5e->decrypt_v2 == NULL)
 473                return GSS_S_FAILURE;
 474
 475        ptr = buf->head[0].iov_base + offset;
 476
 477        if (be16_to_cpu(*((__be16 *)ptr)) != KG2_TOK_WRAP)
 478                return GSS_S_DEFECTIVE_TOKEN;
 479
 480        flags = ptr[2];
 481        if ((!kctx->initiate && (flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR)) ||
 482            (kctx->initiate && !(flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR)))
 483                return GSS_S_BAD_SIG;
 484
 485        if ((flags & KG2_TOKEN_FLAG_SEALED) == 0) {
 486                dprintk("%s: token missing expected sealed flag\n", __func__);
 487                return GSS_S_DEFECTIVE_TOKEN;
 488        }
 489
 490        if (ptr[3] != 0xff)
 491                return GSS_S_DEFECTIVE_TOKEN;
 492
 493        ec = be16_to_cpup((__be16 *)(ptr + 4));
 494        rrc = be16_to_cpup((__be16 *)(ptr + 6));
 495
 496        seqnum = be64_to_cpup((__be64 *)(ptr + 8));
 497
 498        if (rrc != 0) {
 499                err = rotate_left(kctx, offset, buf, rrc);
 500                if (err)
 501                        return GSS_S_FAILURE;
 502        }
 503
 504        err = (*kctx->gk5e->decrypt_v2)(kctx, offset, buf,
 505                                        &headskip, &tailskip);
 506        if (err)
 507                return GSS_S_FAILURE;
 508
 509        /*
 510         * Retrieve the decrypted gss token header and verify
 511         * it against the original
 512         */
 513        err = read_bytes_from_xdr_buf(buf,
 514                                buf->len - GSS_KRB5_TOK_HDR_LEN - tailskip,
 515                                decrypted_hdr, GSS_KRB5_TOK_HDR_LEN);
 516        if (err) {
 517                dprintk("%s: error %u getting decrypted_hdr\n", __func__, err);
 518                return GSS_S_FAILURE;
 519        }
 520        if (memcmp(ptr, decrypted_hdr, 6)
 521                                || memcmp(ptr + 8, decrypted_hdr + 8, 8)) {
 522                dprintk("%s: token hdr, plaintext hdr mismatch!\n", __func__);
 523                return GSS_S_FAILURE;
 524        }
 525
 526        /* do sequencing checks */
 527
 528        /* it got through unscathed.  Make sure the context is unexpired */
 529        now = get_seconds();
 530        if (now > kctx->endtime)
 531                return GSS_S_CONTEXT_EXPIRED;
 532
 533        /*
 534         * Move the head data back to the right position in xdr_buf.
 535         * We ignore any "ec" data since it might be in the head or
 536         * the tail, and we really don't need to deal with it.
 537         * Note that buf->head[0].iov_len may indicate the available
 538         * head buffer space rather than that actually occupied.
 539         */
 540        movelen = min_t(unsigned int, buf->head[0].iov_len, buf->len);
 541        movelen -= offset + GSS_KRB5_TOK_HDR_LEN + headskip;
 542        BUG_ON(offset + GSS_KRB5_TOK_HDR_LEN + headskip + movelen >
 543                                                        buf->head[0].iov_len);
 544        memmove(ptr, ptr + GSS_KRB5_TOK_HDR_LEN + headskip, movelen);
 545        buf->head[0].iov_len -= GSS_KRB5_TOK_HDR_LEN + headskip;
 546        buf->len -= GSS_KRB5_TOK_HDR_LEN + headskip;
 547
 548        return GSS_S_COMPLETE;
 549}
 550
 551u32
 552gss_wrap_kerberos(struct gss_ctx *gctx, int offset,
 553                  struct xdr_buf *buf, struct page **pages)
 554{
 555        struct krb5_ctx *kctx = gctx->internal_ctx_id;
 556
 557        switch (kctx->enctype) {
 558        default:
 559                BUG();
 560        case ENCTYPE_DES_CBC_RAW:
 561        case ENCTYPE_DES3_CBC_RAW:
 562        case ENCTYPE_ARCFOUR_HMAC:
 563                return gss_wrap_kerberos_v1(kctx, offset, buf, pages);
 564        case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
 565        case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
 566                return gss_wrap_kerberos_v2(kctx, offset, buf, pages);
 567        }
 568}
 569
 570u32
 571gss_unwrap_kerberos(struct gss_ctx *gctx, int offset, struct xdr_buf *buf)
 572{
 573        struct krb5_ctx *kctx = gctx->internal_ctx_id;
 574
 575        switch (kctx->enctype) {
 576        default:
 577                BUG();
 578        case ENCTYPE_DES_CBC_RAW:
 579        case ENCTYPE_DES3_CBC_RAW:
 580        case ENCTYPE_ARCFOUR_HMAC:
 581                return gss_unwrap_kerberos_v1(kctx, offset, buf);
 582        case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
 583        case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
 584                return gss_unwrap_kerberos_v2(kctx, offset, buf);
 585        }
 586}
 587
 588