linux/net/sunrpc/auth_gss/gss_krb5_wrap.c
<<
>>
Prefs
   1/*
   2 * COPYRIGHT (c) 2008
   3 * The Regents of the University of Michigan
   4 * ALL RIGHTS RESERVED
   5 *
   6 * Permission is granted to use, copy, create derivative works
   7 * and redistribute this software and such derivative works
   8 * for any purpose, so long as the name of The University of
   9 * Michigan is not used in any advertising or publicity
  10 * pertaining to the use of distribution of this software
  11 * without specific, written prior authorization.  If the
  12 * above copyright notice or any other identification of the
  13 * University of Michigan is included in any copy of any
  14 * portion of this software, then the disclaimer below must
  15 * also be included.
  16 *
  17 * THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION
  18 * FROM THE UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY
  19 * PURPOSE, AND WITHOUT WARRANTY BY THE UNIVERSITY OF
  20 * MICHIGAN OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING
  21 * WITHOUT LIMITATION THE IMPLIED WARRANTIES OF
  22 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE
  23 * REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE LIABLE
  24 * FOR ANY DAMAGES, INCLUDING SPECIAL, INDIRECT, INCIDENTAL, OR
  25 * CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM ARISING
  26 * OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN
  27 * IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF
  28 * SUCH DAMAGES.
  29 */
  30
  31#include <crypto/skcipher.h>
  32#include <linux/types.h>
  33#include <linux/jiffies.h>
  34#include <linux/sunrpc/gss_krb5.h>
  35#include <linux/random.h>
  36#include <linux/pagemap.h>
  37
  38#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
  39# define RPCDBG_FACILITY        RPCDBG_AUTH
  40#endif
  41
  42static inline int
  43gss_krb5_padding(int blocksize, int length)
  44{
  45        return blocksize - (length % blocksize);
  46}
  47
  48static inline void
  49gss_krb5_add_padding(struct xdr_buf *buf, int offset, int blocksize)
  50{
  51        int padding = gss_krb5_padding(blocksize, buf->len - offset);
  52        char *p;
  53        struct kvec *iov;
  54
  55        if (buf->page_len || buf->tail[0].iov_len)
  56                iov = &buf->tail[0];
  57        else
  58                iov = &buf->head[0];
  59        p = iov->iov_base + iov->iov_len;
  60        iov->iov_len += padding;
  61        buf->len += padding;
  62        memset(p, padding, padding);
  63}
  64
  65static inline int
  66gss_krb5_remove_padding(struct xdr_buf *buf, int blocksize)
  67{
  68        u8 *ptr;
  69        u8 pad;
  70        size_t len = buf->len;
  71
  72        if (len <= buf->head[0].iov_len) {
  73                pad = *(u8 *)(buf->head[0].iov_base + len - 1);
  74                if (pad > buf->head[0].iov_len)
  75                        return -EINVAL;
  76                buf->head[0].iov_len -= pad;
  77                goto out;
  78        } else
  79                len -= buf->head[0].iov_len;
  80        if (len <= buf->page_len) {
  81                unsigned int last = (buf->page_base + len - 1)
  82                                        >>PAGE_SHIFT;
  83                unsigned int offset = (buf->page_base + len - 1)
  84                                        & (PAGE_SIZE - 1);
  85                ptr = kmap_atomic(buf->pages[last]);
  86                pad = *(ptr + offset);
  87                kunmap_atomic(ptr);
  88                goto out;
  89        } else
  90                len -= buf->page_len;
  91        BUG_ON(len > buf->tail[0].iov_len);
  92        pad = *(u8 *)(buf->tail[0].iov_base + len - 1);
  93out:
  94        /* XXX: NOTE: we do not adjust the page lengths--they represent
  95         * a range of data in the real filesystem page cache, and we need
  96         * to know that range so the xdr code can properly place read data.
  97         * However adjusting the head length, as we do above, is harmless.
  98         * In the case of a request that fits into a single page, the server
  99         * also uses length and head length together to determine the original
 100         * start of the request to copy the request for deferal; so it's
 101         * easier on the server if we adjust head and tail length in tandem.
 102         * It's not really a problem that we don't fool with the page and
 103         * tail lengths, though--at worst badly formed xdr might lead the
 104         * server to attempt to parse the padding.
 105         * XXX: Document all these weird requirements for gss mechanism
 106         * wrap/unwrap functions. */
 107        if (pad > blocksize)
 108                return -EINVAL;
 109        if (buf->len > pad)
 110                buf->len -= pad;
 111        else
 112                return -EINVAL;
 113        return 0;
 114}
 115
 116void
 117gss_krb5_make_confounder(char *p, u32 conflen)
 118{
 119        static u64 i = 0;
 120        u64 *q = (u64 *)p;
 121
 122        /* rfc1964 claims this should be "random".  But all that's really
 123         * necessary is that it be unique.  And not even that is necessary in
 124         * our case since our "gssapi" implementation exists only to support
 125         * rpcsec_gss, so we know that the only buffers we will ever encrypt
 126         * already begin with a unique sequence number.  Just to hedge my bets
 127         * I'll make a half-hearted attempt at something unique, but ensuring
 128         * uniqueness would mean worrying about atomicity and rollover, and I
 129         * don't care enough. */
 130
 131        /* initialize to random value */
 132        if (i == 0) {
 133                i = prandom_u32();
 134                i = (i << 32) | prandom_u32();
 135        }
 136
 137        switch (conflen) {
 138        case 16:
 139                *q++ = i++;
 140                fallthrough;
 141        case 8:
 142                *q++ = i++;
 143                break;
 144        default:
 145                BUG();
 146        }
 147}
 148
 149/* Assumptions: the head and tail of inbuf are ours to play with.
 150 * The pages, however, may be real pages in the page cache and we replace
 151 * them with scratch pages from **pages before writing to them. */
 152/* XXX: obviously the above should be documentation of wrap interface,
 153 * and shouldn't be in this kerberos-specific file. */
 154
 155/* XXX factor out common code with seal/unseal. */
 156
 157static u32
 158gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
 159                struct xdr_buf *buf, struct page **pages)
 160{
 161        char                    cksumdata[GSS_KRB5_MAX_CKSUM_LEN];
 162        struct xdr_netobj       md5cksum = {.len = sizeof(cksumdata),
 163                                            .data = cksumdata};
 164        int                     blocksize = 0, plainlen;
 165        unsigned char           *ptr, *msg_start;
 166        time64_t                now;
 167        int                     headlen;
 168        struct page             **tmp_pages;
 169        u32                     seq_send;
 170        u8                      *cksumkey;
 171        u32                     conflen = kctx->gk5e->conflen;
 172
 173        dprintk("RPC:       %s\n", __func__);
 174
 175        now = ktime_get_real_seconds();
 176
 177        blocksize = crypto_sync_skcipher_blocksize(kctx->enc);
 178        gss_krb5_add_padding(buf, offset, blocksize);
 179        BUG_ON((buf->len - offset) % blocksize);
 180        plainlen = conflen + buf->len - offset;
 181
 182        headlen = g_token_size(&kctx->mech_used,
 183                GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength + plainlen) -
 184                (buf->len - offset);
 185
 186        ptr = buf->head[0].iov_base + offset;
 187        /* shift data to make room for header. */
 188        xdr_extend_head(buf, offset, headlen);
 189
 190        /* XXX Would be cleverer to encrypt while copying. */
 191        BUG_ON((buf->len - offset - headlen) % blocksize);
 192
 193        g_make_token_header(&kctx->mech_used,
 194                                GSS_KRB5_TOK_HDR_LEN +
 195                                kctx->gk5e->cksumlength + plainlen, &ptr);
 196
 197
 198        /* ptr now at header described in rfc 1964, section 1.2.1: */
 199        ptr[0] = (unsigned char) ((KG_TOK_WRAP_MSG >> 8) & 0xff);
 200        ptr[1] = (unsigned char) (KG_TOK_WRAP_MSG & 0xff);
 201
 202        msg_start = ptr + GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength;
 203
 204        /*
 205         * signalg and sealalg are stored as if they were converted from LE
 206         * to host endian, even though they're opaque pairs of bytes according
 207         * to the RFC.
 208         */
 209        *(__le16 *)(ptr + 2) = cpu_to_le16(kctx->gk5e->signalg);
 210        *(__le16 *)(ptr + 4) = cpu_to_le16(kctx->gk5e->sealalg);
 211        ptr[6] = 0xff;
 212        ptr[7] = 0xff;
 213
 214        gss_krb5_make_confounder(msg_start, conflen);
 215
 216        if (kctx->gk5e->keyed_cksum)
 217                cksumkey = kctx->cksum;
 218        else
 219                cksumkey = NULL;
 220
 221        /* XXXJBF: UGH!: */
 222        tmp_pages = buf->pages;
 223        buf->pages = pages;
 224        if (make_checksum(kctx, ptr, 8, buf, offset + headlen - conflen,
 225                                        cksumkey, KG_USAGE_SEAL, &md5cksum))
 226                return GSS_S_FAILURE;
 227        buf->pages = tmp_pages;
 228
 229        memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len);
 230
 231        seq_send = atomic_fetch_inc(&kctx->seq_send);
 232
 233        /* XXX would probably be more efficient to compute checksum
 234         * and encrypt at the same time: */
 235        if ((krb5_make_seq_num(kctx, kctx->seq, kctx->initiate ? 0 : 0xff,
 236                               seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8)))
 237                return GSS_S_FAILURE;
 238
 239        if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) {
 240                struct crypto_sync_skcipher *cipher;
 241                int err;
 242                cipher = crypto_alloc_sync_skcipher(kctx->gk5e->encrypt_name,
 243                                                    0, 0);
 244                if (IS_ERR(cipher))
 245                        return GSS_S_FAILURE;
 246
 247                krb5_rc4_setup_enc_key(kctx, cipher, seq_send);
 248
 249                err = gss_encrypt_xdr_buf(cipher, buf,
 250                                          offset + headlen - conflen, pages);
 251                crypto_free_sync_skcipher(cipher);
 252                if (err)
 253                        return GSS_S_FAILURE;
 254        } else {
 255                if (gss_encrypt_xdr_buf(kctx->enc, buf,
 256                                        offset + headlen - conflen, pages))
 257                        return GSS_S_FAILURE;
 258        }
 259
 260        return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
 261}
 262
 263static u32
 264gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, int len,
 265                       struct xdr_buf *buf, unsigned int *slack,
 266                       unsigned int *align)
 267{
 268        int                     signalg;
 269        int                     sealalg;
 270        char                    cksumdata[GSS_KRB5_MAX_CKSUM_LEN];
 271        struct xdr_netobj       md5cksum = {.len = sizeof(cksumdata),
 272                                            .data = cksumdata};
 273        time64_t                now;
 274        int                     direction;
 275        s32                     seqnum;
 276        unsigned char           *ptr;
 277        int                     bodysize;
 278        void                    *data_start, *orig_start;
 279        int                     data_len;
 280        int                     blocksize;
 281        u32                     conflen = kctx->gk5e->conflen;
 282        int                     crypt_offset;
 283        u8                      *cksumkey;
 284        unsigned int            saved_len = buf->len;
 285
 286        dprintk("RPC:       gss_unwrap_kerberos\n");
 287
 288        ptr = (u8 *)buf->head[0].iov_base + offset;
 289        if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr,
 290                                        len - offset))
 291                return GSS_S_DEFECTIVE_TOKEN;
 292
 293        if ((ptr[0] != ((KG_TOK_WRAP_MSG >> 8) & 0xff)) ||
 294            (ptr[1] !=  (KG_TOK_WRAP_MSG & 0xff)))
 295                return GSS_S_DEFECTIVE_TOKEN;
 296
 297        /* XXX sanity-check bodysize?? */
 298
 299        /* get the sign and seal algorithms */
 300
 301        signalg = ptr[2] + (ptr[3] << 8);
 302        if (signalg != kctx->gk5e->signalg)
 303                return GSS_S_DEFECTIVE_TOKEN;
 304
 305        sealalg = ptr[4] + (ptr[5] << 8);
 306        if (sealalg != kctx->gk5e->sealalg)
 307                return GSS_S_DEFECTIVE_TOKEN;
 308
 309        if ((ptr[6] != 0xff) || (ptr[7] != 0xff))
 310                return GSS_S_DEFECTIVE_TOKEN;
 311
 312        /*
 313         * Data starts after token header and checksum.  ptr points
 314         * to the beginning of the token header
 315         */
 316        crypt_offset = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) -
 317                                        (unsigned char *)buf->head[0].iov_base;
 318
 319        /*
 320         * Need plaintext seqnum to derive encryption key for arcfour-hmac
 321         */
 322        if (krb5_get_seq_num(kctx, ptr + GSS_KRB5_TOK_HDR_LEN,
 323                             ptr + 8, &direction, &seqnum))
 324                return GSS_S_BAD_SIG;
 325
 326        if ((kctx->initiate && direction != 0xff) ||
 327            (!kctx->initiate && direction != 0))
 328                return GSS_S_BAD_SIG;
 329
 330        buf->len = len;
 331        if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) {
 332                struct crypto_sync_skcipher *cipher;
 333                int err;
 334
 335                cipher = crypto_alloc_sync_skcipher(kctx->gk5e->encrypt_name,
 336                                                    0, 0);
 337                if (IS_ERR(cipher))
 338                        return GSS_S_FAILURE;
 339
 340                krb5_rc4_setup_enc_key(kctx, cipher, seqnum);
 341
 342                err = gss_decrypt_xdr_buf(cipher, buf, crypt_offset);
 343                crypto_free_sync_skcipher(cipher);
 344                if (err)
 345                        return GSS_S_DEFECTIVE_TOKEN;
 346        } else {
 347                if (gss_decrypt_xdr_buf(kctx->enc, buf, crypt_offset))
 348                        return GSS_S_DEFECTIVE_TOKEN;
 349        }
 350
 351        if (kctx->gk5e->keyed_cksum)
 352                cksumkey = kctx->cksum;
 353        else
 354                cksumkey = NULL;
 355
 356        if (make_checksum(kctx, ptr, 8, buf, crypt_offset,
 357                                        cksumkey, KG_USAGE_SEAL, &md5cksum))
 358                return GSS_S_FAILURE;
 359
 360        if (memcmp(md5cksum.data, ptr + GSS_KRB5_TOK_HDR_LEN,
 361                                                kctx->gk5e->cksumlength))
 362                return GSS_S_BAD_SIG;
 363
 364        /* it got through unscathed.  Make sure the context is unexpired */
 365
 366        now = ktime_get_real_seconds();
 367
 368        if (now > kctx->endtime)
 369                return GSS_S_CONTEXT_EXPIRED;
 370
 371        /* do sequencing checks */
 372
 373        /* Copy the data back to the right position.  XXX: Would probably be
 374         * better to copy and encrypt at the same time. */
 375
 376        blocksize = crypto_sync_skcipher_blocksize(kctx->enc);
 377        data_start = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) +
 378                                        conflen;
 379        orig_start = buf->head[0].iov_base + offset;
 380        data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start;
 381        memmove(orig_start, data_start, data_len);
 382        buf->head[0].iov_len -= (data_start - orig_start);
 383        buf->len = len - (data_start - orig_start);
 384
 385        if (gss_krb5_remove_padding(buf, blocksize))
 386                return GSS_S_DEFECTIVE_TOKEN;
 387
 388        /* slack must include room for krb5 padding */
 389        *slack = XDR_QUADLEN(saved_len - buf->len);
 390        /* The GSS blob always precedes the RPC message payload */
 391        *align = *slack;
 392        return GSS_S_COMPLETE;
 393}
 394
 395/*
 396 * We can shift data by up to LOCAL_BUF_LEN bytes in a pass.  If we need
 397 * to do more than that, we shift repeatedly.  Kevin Coffman reports
 398 * seeing 28 bytes as the value used by Microsoft clients and servers
 399 * with AES, so this constant is chosen to allow handling 28 in one pass
 400 * without using too much stack space.
 401 *
 402 * If that proves to a problem perhaps we could use a more clever
 403 * algorithm.
 404 */
 405#define LOCAL_BUF_LEN 32u
 406
 407static void rotate_buf_a_little(struct xdr_buf *buf, unsigned int shift)
 408{
 409        char head[LOCAL_BUF_LEN];
 410        char tmp[LOCAL_BUF_LEN];
 411        unsigned int this_len, i;
 412
 413        BUG_ON(shift > LOCAL_BUF_LEN);
 414
 415        read_bytes_from_xdr_buf(buf, 0, head, shift);
 416        for (i = 0; i + shift < buf->len; i += LOCAL_BUF_LEN) {
 417                this_len = min(LOCAL_BUF_LEN, buf->len - (i + shift));
 418                read_bytes_from_xdr_buf(buf, i+shift, tmp, this_len);
 419                write_bytes_to_xdr_buf(buf, i, tmp, this_len);
 420        }
 421        write_bytes_to_xdr_buf(buf, buf->len - shift, head, shift);
 422}
 423
 424static void _rotate_left(struct xdr_buf *buf, unsigned int shift)
 425{
 426        int shifted = 0;
 427        int this_shift;
 428
 429        shift %= buf->len;
 430        while (shifted < shift) {
 431                this_shift = min(shift - shifted, LOCAL_BUF_LEN);
 432                rotate_buf_a_little(buf, this_shift);
 433                shifted += this_shift;
 434        }
 435}
 436
 437static void rotate_left(u32 base, struct xdr_buf *buf, unsigned int shift)
 438{
 439        struct xdr_buf subbuf;
 440
 441        xdr_buf_subsegment(buf, &subbuf, base, buf->len - base);
 442        _rotate_left(&subbuf, shift);
 443}
 444
 445static u32
 446gss_wrap_kerberos_v2(struct krb5_ctx *kctx, u32 offset,
 447                     struct xdr_buf *buf, struct page **pages)
 448{
 449        u8              *ptr, *plainhdr;
 450        time64_t        now;
 451        u8              flags = 0x00;
 452        __be16          *be16ptr;
 453        __be64          *be64ptr;
 454        u32             err;
 455
 456        dprintk("RPC:       %s\n", __func__);
 457
 458        if (kctx->gk5e->encrypt_v2 == NULL)
 459                return GSS_S_FAILURE;
 460
 461        /* make room for gss token header */
 462        if (xdr_extend_head(buf, offset, GSS_KRB5_TOK_HDR_LEN))
 463                return GSS_S_FAILURE;
 464
 465        /* construct gss token header */
 466        ptr = plainhdr = buf->head[0].iov_base + offset;
 467        *ptr++ = (unsigned char) ((KG2_TOK_WRAP>>8) & 0xff);
 468        *ptr++ = (unsigned char) (KG2_TOK_WRAP & 0xff);
 469
 470        if ((kctx->flags & KRB5_CTX_FLAG_INITIATOR) == 0)
 471                flags |= KG2_TOKEN_FLAG_SENTBYACCEPTOR;
 472        if ((kctx->flags & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY) != 0)
 473                flags |= KG2_TOKEN_FLAG_ACCEPTORSUBKEY;
 474        /* We always do confidentiality in wrap tokens */
 475        flags |= KG2_TOKEN_FLAG_SEALED;
 476
 477        *ptr++ = flags;
 478        *ptr++ = 0xff;
 479        be16ptr = (__be16 *)ptr;
 480
 481        *be16ptr++ = 0;
 482        /* "inner" token header always uses 0 for RRC */
 483        *be16ptr++ = 0;
 484
 485        be64ptr = (__be64 *)be16ptr;
 486        *be64ptr = cpu_to_be64(atomic64_fetch_inc(&kctx->seq_send64));
 487
 488        err = (*kctx->gk5e->encrypt_v2)(kctx, offset, buf, pages);
 489        if (err)
 490                return err;
 491
 492        now = ktime_get_real_seconds();
 493        return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
 494}
 495
 496static u32
 497gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, int len,
 498                       struct xdr_buf *buf, unsigned int *slack,
 499                       unsigned int *align)
 500{
 501        time64_t        now;
 502        u8              *ptr;
 503        u8              flags = 0x00;
 504        u16             ec, rrc;
 505        int             err;
 506        u32             headskip, tailskip;
 507        u8              decrypted_hdr[GSS_KRB5_TOK_HDR_LEN];
 508        unsigned int    movelen;
 509
 510
 511        dprintk("RPC:       %s\n", __func__);
 512
 513        if (kctx->gk5e->decrypt_v2 == NULL)
 514                return GSS_S_FAILURE;
 515
 516        ptr = buf->head[0].iov_base + offset;
 517
 518        if (be16_to_cpu(*((__be16 *)ptr)) != KG2_TOK_WRAP)
 519                return GSS_S_DEFECTIVE_TOKEN;
 520
 521        flags = ptr[2];
 522        if ((!kctx->initiate && (flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR)) ||
 523            (kctx->initiate && !(flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR)))
 524                return GSS_S_BAD_SIG;
 525
 526        if ((flags & KG2_TOKEN_FLAG_SEALED) == 0) {
 527                dprintk("%s: token missing expected sealed flag\n", __func__);
 528                return GSS_S_DEFECTIVE_TOKEN;
 529        }
 530
 531        if (ptr[3] != 0xff)
 532                return GSS_S_DEFECTIVE_TOKEN;
 533
 534        ec = be16_to_cpup((__be16 *)(ptr + 4));
 535        rrc = be16_to_cpup((__be16 *)(ptr + 6));
 536
 537        /*
 538         * NOTE: the sequence number at ptr + 8 is skipped, rpcsec_gss
 539         * doesn't want it checked; see page 6 of rfc 2203.
 540         */
 541
 542        if (rrc != 0)
 543                rotate_left(offset + 16, buf, rrc);
 544
 545        err = (*kctx->gk5e->decrypt_v2)(kctx, offset, len, buf,
 546                                        &headskip, &tailskip);
 547        if (err)
 548                return GSS_S_FAILURE;
 549
 550        /*
 551         * Retrieve the decrypted gss token header and verify
 552         * it against the original
 553         */
 554        err = read_bytes_from_xdr_buf(buf,
 555                                len - GSS_KRB5_TOK_HDR_LEN - tailskip,
 556                                decrypted_hdr, GSS_KRB5_TOK_HDR_LEN);
 557        if (err) {
 558                dprintk("%s: error %u getting decrypted_hdr\n", __func__, err);
 559                return GSS_S_FAILURE;
 560        }
 561        if (memcmp(ptr, decrypted_hdr, 6)
 562                                || memcmp(ptr + 8, decrypted_hdr + 8, 8)) {
 563                dprintk("%s: token hdr, plaintext hdr mismatch!\n", __func__);
 564                return GSS_S_FAILURE;
 565        }
 566
 567        /* do sequencing checks */
 568
 569        /* it got through unscathed.  Make sure the context is unexpired */
 570        now = ktime_get_real_seconds();
 571        if (now > kctx->endtime)
 572                return GSS_S_CONTEXT_EXPIRED;
 573
 574        /*
 575         * Move the head data back to the right position in xdr_buf.
 576         * We ignore any "ec" data since it might be in the head or
 577         * the tail, and we really don't need to deal with it.
 578         * Note that buf->head[0].iov_len may indicate the available
 579         * head buffer space rather than that actually occupied.
 580         */
 581        movelen = min_t(unsigned int, buf->head[0].iov_len, len);
 582        movelen -= offset + GSS_KRB5_TOK_HDR_LEN + headskip;
 583        BUG_ON(offset + GSS_KRB5_TOK_HDR_LEN + headskip + movelen >
 584                                                        buf->head[0].iov_len);
 585        memmove(ptr, ptr + GSS_KRB5_TOK_HDR_LEN + headskip, movelen);
 586        buf->head[0].iov_len -= GSS_KRB5_TOK_HDR_LEN + headskip;
 587        buf->len = len - (GSS_KRB5_TOK_HDR_LEN + headskip);
 588
 589        /* Trim off the trailing "extra count" and checksum blob */
 590        xdr_buf_trim(buf, ec + GSS_KRB5_TOK_HDR_LEN + tailskip);
 591
 592        *align = XDR_QUADLEN(GSS_KRB5_TOK_HDR_LEN + headskip);
 593        *slack = *align + XDR_QUADLEN(ec + GSS_KRB5_TOK_HDR_LEN + tailskip);
 594        return GSS_S_COMPLETE;
 595}
 596
 597u32
 598gss_wrap_kerberos(struct gss_ctx *gctx, int offset,
 599                  struct xdr_buf *buf, struct page **pages)
 600{
 601        struct krb5_ctx *kctx = gctx->internal_ctx_id;
 602
 603        switch (kctx->enctype) {
 604        default:
 605                BUG();
 606        case ENCTYPE_DES_CBC_RAW:
 607        case ENCTYPE_DES3_CBC_RAW:
 608        case ENCTYPE_ARCFOUR_HMAC:
 609                return gss_wrap_kerberos_v1(kctx, offset, buf, pages);
 610        case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
 611        case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
 612                return gss_wrap_kerberos_v2(kctx, offset, buf, pages);
 613        }
 614}
 615
 616u32
 617gss_unwrap_kerberos(struct gss_ctx *gctx, int offset,
 618                    int len, struct xdr_buf *buf)
 619{
 620        struct krb5_ctx *kctx = gctx->internal_ctx_id;
 621
 622        switch (kctx->enctype) {
 623        default:
 624                BUG();
 625        case ENCTYPE_DES_CBC_RAW:
 626        case ENCTYPE_DES3_CBC_RAW:
 627        case ENCTYPE_ARCFOUR_HMAC:
 628                return gss_unwrap_kerberos_v1(kctx, offset, len, buf,
 629                                              &gctx->slack, &gctx->align);
 630        case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
 631        case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
 632                return gss_unwrap_kerberos_v2(kctx, offset, len, buf,
 633                                              &gctx->slack, &gctx->align);
 634        }
 635}
 636