1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37#include <crypto/algapi.h>
38#include <linux/err.h>
39#include <linux/types.h>
40#include <linux/mm.h>
41#include <linux/scatterlist.h>
42#include <linux/crypto.h>
43#include <linux/highmem.h>
44#include <linux/pagemap.h>
45#include <linux/random.h>
46#include <linux/sunrpc/gss_krb5.h>
47#include <linux/sunrpc/xdr.h>
48
49#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
50# define RPCDBG_FACILITY RPCDBG_AUTH
51#endif
52
53u32
54krb5_encrypt(
55 struct crypto_blkcipher *tfm,
56 void * iv,
57 void * in,
58 void * out,
59 int length)
60{
61 u32 ret = -EINVAL;
62 struct scatterlist sg[1];
63 u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
64 struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
65
66 if (length % crypto_blkcipher_blocksize(tfm) != 0)
67 goto out;
68
69 if (crypto_blkcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
70 dprintk("RPC: gss_k5encrypt: tfm iv size too large %d\n",
71 crypto_blkcipher_ivsize(tfm));
72 goto out;
73 }
74
75 if (iv)
76 memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm));
77
78 memcpy(out, in, length);
79 sg_init_one(sg, out, length);
80
81 ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, length);
82out:
83 dprintk("RPC: krb5_encrypt returns %d\n", ret);
84 return ret;
85}
86
87u32
88krb5_decrypt(
89 struct crypto_blkcipher *tfm,
90 void * iv,
91 void * in,
92 void * out,
93 int length)
94{
95 u32 ret = -EINVAL;
96 struct scatterlist sg[1];
97 u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
98 struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
99
100 if (length % crypto_blkcipher_blocksize(tfm) != 0)
101 goto out;
102
103 if (crypto_blkcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
104 dprintk("RPC: gss_k5decrypt: tfm iv size too large %d\n",
105 crypto_blkcipher_ivsize(tfm));
106 goto out;
107 }
108 if (iv)
109 memcpy(local_iv,iv, crypto_blkcipher_ivsize(tfm));
110
111 memcpy(out, in, length);
112 sg_init_one(sg, out, length);
113
114 ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, length);
115out:
116 dprintk("RPC: gss_k5decrypt returns %d\n",ret);
117 return ret;
118}
119
120static int
121checksummer(struct scatterlist *sg, void *data)
122{
123 struct hash_desc *desc = data;
124
125 return crypto_hash_update(desc, sg, sg->length);
126}
127
128static int
129arcfour_hmac_md5_usage_to_salt(unsigned int usage, u8 salt[4])
130{
131 unsigned int ms_usage;
132
133 switch (usage) {
134 case KG_USAGE_SIGN:
135 ms_usage = 15;
136 break;
137 case KG_USAGE_SEAL:
138 ms_usage = 13;
139 break;
140 default:
141 return -EINVAL;
142 }
143 salt[0] = (ms_usage >> 0) & 0xff;
144 salt[1] = (ms_usage >> 8) & 0xff;
145 salt[2] = (ms_usage >> 16) & 0xff;
146 salt[3] = (ms_usage >> 24) & 0xff;
147
148 return 0;
149}
150
151static u32
152make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,
153 struct xdr_buf *body, int body_offset, u8 *cksumkey,
154 unsigned int usage, struct xdr_netobj *cksumout)
155{
156 struct hash_desc desc;
157 struct scatterlist sg[1];
158 int err = -1;
159 u8 *checksumdata;
160 u8 rc4salt[4];
161 struct crypto_hash *md5;
162 struct crypto_hash *hmac_md5;
163
164 if (cksumkey == NULL)
165 return GSS_S_FAILURE;
166
167 if (cksumout->len < kctx->gk5e->cksumlength) {
168 dprintk("%s: checksum buffer length, %u, too small for %s\n",
169 __func__, cksumout->len, kctx->gk5e->name);
170 return GSS_S_FAILURE;
171 }
172
173 if (arcfour_hmac_md5_usage_to_salt(usage, rc4salt)) {
174 dprintk("%s: invalid usage value %u\n", __func__, usage);
175 return GSS_S_FAILURE;
176 }
177
178 checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_NOFS);
179 if (!checksumdata)
180 return GSS_S_FAILURE;
181
182 md5 = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
183 if (IS_ERR(md5))
184 goto out_free_cksum;
185
186 hmac_md5 = crypto_alloc_hash(kctx->gk5e->cksum_name, 0,
187 CRYPTO_ALG_ASYNC);
188 if (IS_ERR(hmac_md5))
189 goto out_free_md5;
190
191 desc.tfm = md5;
192 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
193
194 err = crypto_hash_init(&desc);
195 if (err)
196 goto out;
197 sg_init_one(sg, rc4salt, 4);
198 err = crypto_hash_update(&desc, sg, 4);
199 if (err)
200 goto out;
201
202 sg_init_one(sg, header, hdrlen);
203 err = crypto_hash_update(&desc, sg, hdrlen);
204 if (err)
205 goto out;
206 err = xdr_process_buf(body, body_offset, body->len - body_offset,
207 checksummer, &desc);
208 if (err)
209 goto out;
210 err = crypto_hash_final(&desc, checksumdata);
211 if (err)
212 goto out;
213
214 desc.tfm = hmac_md5;
215 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
216
217 err = crypto_hash_init(&desc);
218 if (err)
219 goto out;
220 err = crypto_hash_setkey(hmac_md5, cksumkey, kctx->gk5e->keylength);
221 if (err)
222 goto out;
223
224 sg_init_one(sg, checksumdata, crypto_hash_digestsize(md5));
225 err = crypto_hash_digest(&desc, sg, crypto_hash_digestsize(md5),
226 checksumdata);
227 if (err)
228 goto out;
229
230 memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
231 cksumout->len = kctx->gk5e->cksumlength;
232out:
233 crypto_free_hash(hmac_md5);
234out_free_md5:
235 crypto_free_hash(md5);
236out_free_cksum:
237 kfree(checksumdata);
238 return err ? GSS_S_FAILURE : 0;
239}
240
241
242
243
244
245
246u32
247make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
248 struct xdr_buf *body, int body_offset, u8 *cksumkey,
249 unsigned int usage, struct xdr_netobj *cksumout)
250{
251 struct hash_desc desc;
252 struct scatterlist sg[1];
253 int err = -1;
254 u8 *checksumdata;
255 unsigned int checksumlen;
256
257 if (kctx->gk5e->ctype == CKSUMTYPE_HMAC_MD5_ARCFOUR)
258 return make_checksum_hmac_md5(kctx, header, hdrlen,
259 body, body_offset,
260 cksumkey, usage, cksumout);
261
262 if (cksumout->len < kctx->gk5e->cksumlength) {
263 dprintk("%s: checksum buffer length, %u, too small for %s\n",
264 __func__, cksumout->len, kctx->gk5e->name);
265 return GSS_S_FAILURE;
266 }
267
268 checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_NOFS);
269 if (checksumdata == NULL)
270 return GSS_S_FAILURE;
271
272 desc.tfm = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
273 if (IS_ERR(desc.tfm))
274 goto out_free_cksum;
275 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
276
277 checksumlen = crypto_hash_digestsize(desc.tfm);
278
279 if (cksumkey != NULL) {
280 err = crypto_hash_setkey(desc.tfm, cksumkey,
281 kctx->gk5e->keylength);
282 if (err)
283 goto out;
284 }
285
286 err = crypto_hash_init(&desc);
287 if (err)
288 goto out;
289 sg_init_one(sg, header, hdrlen);
290 err = crypto_hash_update(&desc, sg, hdrlen);
291 if (err)
292 goto out;
293 err = xdr_process_buf(body, body_offset, body->len - body_offset,
294 checksummer, &desc);
295 if (err)
296 goto out;
297 err = crypto_hash_final(&desc, checksumdata);
298 if (err)
299 goto out;
300
301 switch (kctx->gk5e->ctype) {
302 case CKSUMTYPE_RSA_MD5:
303 err = kctx->gk5e->encrypt(kctx->seq, NULL, checksumdata,
304 checksumdata, checksumlen);
305 if (err)
306 goto out;
307 memcpy(cksumout->data,
308 checksumdata + checksumlen - kctx->gk5e->cksumlength,
309 kctx->gk5e->cksumlength);
310 break;
311 case CKSUMTYPE_HMAC_SHA1_DES3:
312 memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
313 break;
314 default:
315 BUG();
316 break;
317 }
318 cksumout->len = kctx->gk5e->cksumlength;
319out:
320 crypto_free_hash(desc.tfm);
321out_free_cksum:
322 kfree(checksumdata);
323 return err ? GSS_S_FAILURE : 0;
324}
325
326
327
328
329
330
331
332
333u32
334make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen,
335 struct xdr_buf *body, int body_offset, u8 *cksumkey,
336 unsigned int usage, struct xdr_netobj *cksumout)
337{
338 struct hash_desc desc;
339 struct scatterlist sg[1];
340 int err = -1;
341 u8 *checksumdata;
342 unsigned int checksumlen;
343
344 if (kctx->gk5e->keyed_cksum == 0) {
345 dprintk("%s: expected keyed hash for %s\n",
346 __func__, kctx->gk5e->name);
347 return GSS_S_FAILURE;
348 }
349 if (cksumkey == NULL) {
350 dprintk("%s: no key supplied for %s\n",
351 __func__, kctx->gk5e->name);
352 return GSS_S_FAILURE;
353 }
354
355 checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_NOFS);
356 if (!checksumdata)
357 return GSS_S_FAILURE;
358
359 desc.tfm = crypto_alloc_hash(kctx->gk5e->cksum_name, 0,
360 CRYPTO_ALG_ASYNC);
361 if (IS_ERR(desc.tfm))
362 goto out_free_cksum;
363 checksumlen = crypto_hash_digestsize(desc.tfm);
364 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
365
366 err = crypto_hash_setkey(desc.tfm, cksumkey, kctx->gk5e->keylength);
367 if (err)
368 goto out;
369
370 err = crypto_hash_init(&desc);
371 if (err)
372 goto out;
373 err = xdr_process_buf(body, body_offset, body->len - body_offset,
374 checksummer, &desc);
375 if (err)
376 goto out;
377 if (header != NULL) {
378 sg_init_one(sg, header, hdrlen);
379 err = crypto_hash_update(&desc, sg, hdrlen);
380 if (err)
381 goto out;
382 }
383 err = crypto_hash_final(&desc, checksumdata);
384 if (err)
385 goto out;
386
387 cksumout->len = kctx->gk5e->cksumlength;
388
389 switch (kctx->gk5e->ctype) {
390 case CKSUMTYPE_HMAC_SHA1_96_AES128:
391 case CKSUMTYPE_HMAC_SHA1_96_AES256:
392
393 memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
394 break;
395 default:
396 BUG();
397 break;
398 }
399out:
400 crypto_free_hash(desc.tfm);
401out_free_cksum:
402 kfree(checksumdata);
403 return err ? GSS_S_FAILURE : 0;
404}
405
406struct encryptor_desc {
407 u8 iv[GSS_KRB5_MAX_BLOCKSIZE];
408 struct blkcipher_desc desc;
409 int pos;
410 struct xdr_buf *outbuf;
411 struct page **pages;
412 struct scatterlist infrags[4];
413 struct scatterlist outfrags[4];
414 int fragno;
415 int fraglen;
416};
417
418static int
419encryptor(struct scatterlist *sg, void *data)
420{
421 struct encryptor_desc *desc = data;
422 struct xdr_buf *outbuf = desc->outbuf;
423 struct page *in_page;
424 int thislen = desc->fraglen + sg->length;
425 int fraglen, ret;
426 int page_pos;
427
428
429
430 BUG_ON(desc->fragno > 3);
431
432 page_pos = desc->pos - outbuf->head[0].iov_len;
433 if (page_pos >= 0 && page_pos < outbuf->page_len) {
434
435 int i = (page_pos + outbuf->page_base) >> PAGE_CACHE_SHIFT;
436 in_page = desc->pages[i];
437 } else {
438 in_page = sg_page(sg);
439 }
440 sg_set_page(&desc->infrags[desc->fragno], in_page, sg->length,
441 sg->offset);
442 sg_set_page(&desc->outfrags[desc->fragno], sg_page(sg), sg->length,
443 sg->offset);
444 desc->fragno++;
445 desc->fraglen += sg->length;
446 desc->pos += sg->length;
447
448 fraglen = thislen & (crypto_blkcipher_blocksize(desc->desc.tfm) - 1);
449 thislen -= fraglen;
450
451 if (thislen == 0)
452 return 0;
453
454 sg_mark_end(&desc->infrags[desc->fragno - 1]);
455 sg_mark_end(&desc->outfrags[desc->fragno - 1]);
456
457 ret = crypto_blkcipher_encrypt_iv(&desc->desc, desc->outfrags,
458 desc->infrags, thislen);
459 if (ret)
460 return ret;
461
462 sg_init_table(desc->infrags, 4);
463 sg_init_table(desc->outfrags, 4);
464
465 if (fraglen) {
466 sg_set_page(&desc->outfrags[0], sg_page(sg), fraglen,
467 sg->offset + sg->length - fraglen);
468 desc->infrags[0] = desc->outfrags[0];
469 sg_assign_page(&desc->infrags[0], in_page);
470 desc->fragno = 1;
471 desc->fraglen = fraglen;
472 } else {
473 desc->fragno = 0;
474 desc->fraglen = 0;
475 }
476 return 0;
477}
478
479int
480gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
481 int offset, struct page **pages)
482{
483 int ret;
484 struct encryptor_desc desc;
485
486 BUG_ON((buf->len - offset) % crypto_blkcipher_blocksize(tfm) != 0);
487
488 memset(desc.iv, 0, sizeof(desc.iv));
489 desc.desc.tfm = tfm;
490 desc.desc.info = desc.iv;
491 desc.desc.flags = 0;
492 desc.pos = offset;
493 desc.outbuf = buf;
494 desc.pages = pages;
495 desc.fragno = 0;
496 desc.fraglen = 0;
497
498 sg_init_table(desc.infrags, 4);
499 sg_init_table(desc.outfrags, 4);
500
501 ret = xdr_process_buf(buf, offset, buf->len - offset, encryptor, &desc);
502 return ret;
503}
504
505struct decryptor_desc {
506 u8 iv[GSS_KRB5_MAX_BLOCKSIZE];
507 struct blkcipher_desc desc;
508 struct scatterlist frags[4];
509 int fragno;
510 int fraglen;
511};
512
513static int
514decryptor(struct scatterlist *sg, void *data)
515{
516 struct decryptor_desc *desc = data;
517 int thislen = desc->fraglen + sg->length;
518 int fraglen, ret;
519
520
521
522 BUG_ON(desc->fragno > 3);
523 sg_set_page(&desc->frags[desc->fragno], sg_page(sg), sg->length,
524 sg->offset);
525 desc->fragno++;
526 desc->fraglen += sg->length;
527
528 fraglen = thislen & (crypto_blkcipher_blocksize(desc->desc.tfm) - 1);
529 thislen -= fraglen;
530
531 if (thislen == 0)
532 return 0;
533
534 sg_mark_end(&desc->frags[desc->fragno - 1]);
535
536 ret = crypto_blkcipher_decrypt_iv(&desc->desc, desc->frags,
537 desc->frags, thislen);
538 if (ret)
539 return ret;
540
541 sg_init_table(desc->frags, 4);
542
543 if (fraglen) {
544 sg_set_page(&desc->frags[0], sg_page(sg), fraglen,
545 sg->offset + sg->length - fraglen);
546 desc->fragno = 1;
547 desc->fraglen = fraglen;
548 } else {
549 desc->fragno = 0;
550 desc->fraglen = 0;
551 }
552 return 0;
553}
554
555int
556gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
557 int offset)
558{
559 struct decryptor_desc desc;
560
561
562 BUG_ON((buf->len - offset) % crypto_blkcipher_blocksize(tfm) != 0);
563
564 memset(desc.iv, 0, sizeof(desc.iv));
565 desc.desc.tfm = tfm;
566 desc.desc.info = desc.iv;
567 desc.desc.flags = 0;
568 desc.fragno = 0;
569 desc.fraglen = 0;
570
571 sg_init_table(desc.frags, 4);
572
573 return xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc);
574}
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593int
594xdr_extend_head(struct xdr_buf *buf, unsigned int base, unsigned int shiftlen)
595{
596 u8 *p;
597
598 if (shiftlen == 0)
599 return 0;
600
601 BUILD_BUG_ON(GSS_KRB5_MAX_SLACK_NEEDED > RPC_MAX_AUTH_SIZE);
602 BUG_ON(shiftlen > RPC_MAX_AUTH_SIZE);
603
604 p = buf->head[0].iov_base + base;
605
606 memmove(p + shiftlen, p, buf->head[0].iov_len - base);
607
608 buf->head[0].iov_len += shiftlen;
609 buf->len += shiftlen;
610
611 return 0;
612}
613
614static u32
615gss_krb5_cts_crypt(struct crypto_blkcipher *cipher, struct xdr_buf *buf,
616 u32 offset, u8 *iv, struct page **pages, int encrypt)
617{
618 u32 ret;
619 struct scatterlist sg[1];
620 struct blkcipher_desc desc = { .tfm = cipher, .info = iv };
621 u8 *data;
622 struct page **save_pages;
623 u32 len = buf->len - offset;
624
625 if (len > GSS_KRB5_MAX_BLOCKSIZE * 2) {
626 WARN_ON(0);
627 return -ENOMEM;
628 }
629 data = kmalloc(GSS_KRB5_MAX_BLOCKSIZE * 2, GFP_NOFS);
630 if (!data)
631 return -ENOMEM;
632
633
634
635
636
637
638 save_pages = buf->pages;
639 if (encrypt)
640 buf->pages = pages;
641
642 ret = read_bytes_from_xdr_buf(buf, offset, data, len);
643 buf->pages = save_pages;
644 if (ret)
645 goto out;
646
647 sg_init_one(sg, data, len);
648
649 if (encrypt)
650 ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, len);
651 else
652 ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, len);
653
654 if (ret)
655 goto out;
656
657 ret = write_bytes_to_xdr_buf(buf, offset, data, len);
658
659out:
660 kfree(data);
661 return ret;
662}
663
664u32
665gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
666 struct xdr_buf *buf, struct page **pages)
667{
668 u32 err;
669 struct xdr_netobj hmac;
670 u8 *cksumkey;
671 u8 *ecptr;
672 struct crypto_blkcipher *cipher, *aux_cipher;
673 int blocksize;
674 struct page **save_pages;
675 int nblocks, nbytes;
676 struct encryptor_desc desc;
677 u32 cbcbytes;
678 unsigned int usage;
679
680 if (kctx->initiate) {
681 cipher = kctx->initiator_enc;
682 aux_cipher = kctx->initiator_enc_aux;
683 cksumkey = kctx->initiator_integ;
684 usage = KG_USAGE_INITIATOR_SEAL;
685 } else {
686 cipher = kctx->acceptor_enc;
687 aux_cipher = kctx->acceptor_enc_aux;
688 cksumkey = kctx->acceptor_integ;
689 usage = KG_USAGE_ACCEPTOR_SEAL;
690 }
691 blocksize = crypto_blkcipher_blocksize(cipher);
692
693
694 offset += GSS_KRB5_TOK_HDR_LEN;
695 if (xdr_extend_head(buf, offset, kctx->gk5e->conflen))
696 return GSS_S_FAILURE;
697 gss_krb5_make_confounder(buf->head[0].iov_base + offset, kctx->gk5e->conflen);
698 offset -= GSS_KRB5_TOK_HDR_LEN;
699
700 if (buf->tail[0].iov_base != NULL) {
701 ecptr = buf->tail[0].iov_base + buf->tail[0].iov_len;
702 } else {
703 buf->tail[0].iov_base = buf->head[0].iov_base
704 + buf->head[0].iov_len;
705 buf->tail[0].iov_len = 0;
706 ecptr = buf->tail[0].iov_base;
707 }
708
709
710 memcpy(ecptr, buf->head[0].iov_base + offset, GSS_KRB5_TOK_HDR_LEN);
711 buf->tail[0].iov_len += GSS_KRB5_TOK_HDR_LEN;
712 buf->len += GSS_KRB5_TOK_HDR_LEN;
713
714
715 hmac.len = GSS_KRB5_MAX_CKSUM_LEN;
716 hmac.data = buf->tail[0].iov_base + buf->tail[0].iov_len;
717
718
719
720
721
722
723
724
725 save_pages = buf->pages;
726 buf->pages = pages;
727
728 err = make_checksum_v2(kctx, NULL, 0, buf,
729 offset + GSS_KRB5_TOK_HDR_LEN,
730 cksumkey, usage, &hmac);
731 buf->pages = save_pages;
732 if (err)
733 return GSS_S_FAILURE;
734
735 nbytes = buf->len - offset - GSS_KRB5_TOK_HDR_LEN;
736 nblocks = (nbytes + blocksize - 1) / blocksize;
737 cbcbytes = 0;
738 if (nblocks > 2)
739 cbcbytes = (nblocks - 2) * blocksize;
740
741 memset(desc.iv, 0, sizeof(desc.iv));
742
743 if (cbcbytes) {
744 desc.pos = offset + GSS_KRB5_TOK_HDR_LEN;
745 desc.fragno = 0;
746 desc.fraglen = 0;
747 desc.pages = pages;
748 desc.outbuf = buf;
749 desc.desc.info = desc.iv;
750 desc.desc.flags = 0;
751 desc.desc.tfm = aux_cipher;
752
753 sg_init_table(desc.infrags, 4);
754 sg_init_table(desc.outfrags, 4);
755
756 err = xdr_process_buf(buf, offset + GSS_KRB5_TOK_HDR_LEN,
757 cbcbytes, encryptor, &desc);
758 if (err)
759 goto out_err;
760 }
761
762
763 err = gss_krb5_cts_crypt(cipher, buf,
764 offset + GSS_KRB5_TOK_HDR_LEN + cbcbytes,
765 desc.iv, pages, 1);
766 if (err) {
767 err = GSS_S_FAILURE;
768 goto out_err;
769 }
770
771
772 buf->tail[0].iov_len += kctx->gk5e->cksumlength;
773 buf->len += kctx->gk5e->cksumlength;
774
775out_err:
776 if (err)
777 err = GSS_S_FAILURE;
778 return err;
779}
780
781u32
782gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
783 u32 *headskip, u32 *tailskip)
784{
785 struct xdr_buf subbuf;
786 u32 ret = 0;
787 u8 *cksum_key;
788 struct crypto_blkcipher *cipher, *aux_cipher;
789 struct xdr_netobj our_hmac_obj;
790 u8 our_hmac[GSS_KRB5_MAX_CKSUM_LEN];
791 u8 pkt_hmac[GSS_KRB5_MAX_CKSUM_LEN];
792 int nblocks, blocksize, cbcbytes;
793 struct decryptor_desc desc;
794 unsigned int usage;
795
796 if (kctx->initiate) {
797 cipher = kctx->acceptor_enc;
798 aux_cipher = kctx->acceptor_enc_aux;
799 cksum_key = kctx->acceptor_integ;
800 usage = KG_USAGE_ACCEPTOR_SEAL;
801 } else {
802 cipher = kctx->initiator_enc;
803 aux_cipher = kctx->initiator_enc_aux;
804 cksum_key = kctx->initiator_integ;
805 usage = KG_USAGE_INITIATOR_SEAL;
806 }
807 blocksize = crypto_blkcipher_blocksize(cipher);
808
809
810
811 xdr_buf_subsegment(buf, &subbuf, offset + GSS_KRB5_TOK_HDR_LEN,
812 (buf->len - offset - GSS_KRB5_TOK_HDR_LEN -
813 kctx->gk5e->cksumlength));
814
815 nblocks = (subbuf.len + blocksize - 1) / blocksize;
816
817 cbcbytes = 0;
818 if (nblocks > 2)
819 cbcbytes = (nblocks - 2) * blocksize;
820
821 memset(desc.iv, 0, sizeof(desc.iv));
822
823 if (cbcbytes) {
824 desc.fragno = 0;
825 desc.fraglen = 0;
826 desc.desc.info = desc.iv;
827 desc.desc.flags = 0;
828 desc.desc.tfm = aux_cipher;
829
830 sg_init_table(desc.frags, 4);
831
832 ret = xdr_process_buf(&subbuf, 0, cbcbytes, decryptor, &desc);
833 if (ret)
834 goto out_err;
835 }
836
837
838 ret = gss_krb5_cts_crypt(cipher, &subbuf, cbcbytes, desc.iv, NULL, 0);
839 if (ret)
840 goto out_err;
841
842
843
844 our_hmac_obj.len = sizeof(our_hmac);
845 our_hmac_obj.data = our_hmac;
846
847 ret = make_checksum_v2(kctx, NULL, 0, &subbuf, 0,
848 cksum_key, usage, &our_hmac_obj);
849 if (ret)
850 goto out_err;
851
852
853 ret = read_bytes_from_xdr_buf(buf, buf->len - kctx->gk5e->cksumlength,
854 pkt_hmac, kctx->gk5e->cksumlength);
855 if (ret)
856 goto out_err;
857
858 if (crypto_memneq(pkt_hmac, our_hmac, kctx->gk5e->cksumlength) != 0) {
859 ret = GSS_S_BAD_SIG;
860 goto out_err;
861 }
862 *headskip = kctx->gk5e->conflen;
863 *tailskip = kctx->gk5e->cksumlength;
864out_err:
865 if (ret && ret != GSS_S_BAD_SIG)
866 ret = GSS_S_FAILURE;
867 return ret;
868}
869
870
871
872
873
874int
875krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, struct crypto_blkcipher *cipher,
876 unsigned char *cksum)
877{
878 struct crypto_hash *hmac;
879 struct hash_desc desc;
880 struct scatterlist sg[1];
881 u8 Kseq[GSS_KRB5_MAX_KEYLEN];
882 u32 zeroconstant = 0;
883 int err;
884
885 dprintk("%s: entered\n", __func__);
886
887 hmac = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
888 if (IS_ERR(hmac)) {
889 dprintk("%s: error %ld, allocating hash '%s'\n",
890 __func__, PTR_ERR(hmac), kctx->gk5e->cksum_name);
891 return PTR_ERR(hmac);
892 }
893
894 desc.tfm = hmac;
895 desc.flags = 0;
896
897 err = crypto_hash_init(&desc);
898 if (err)
899 goto out_err;
900
901
902 err = crypto_hash_setkey(hmac, kctx->Ksess, kctx->gk5e->keylength);
903 if (err)
904 goto out_err;
905
906 sg_init_one(sg, &zeroconstant, 4);
907 err = crypto_hash_digest(&desc, sg, 4, Kseq);
908 if (err)
909 goto out_err;
910
911
912 err = crypto_hash_setkey(hmac, Kseq, kctx->gk5e->keylength);
913 if (err)
914 goto out_err;
915
916 sg_set_buf(sg, cksum, 8);
917
918 err = crypto_hash_digest(&desc, sg, 8, Kseq);
919 if (err)
920 goto out_err;
921
922 err = crypto_blkcipher_setkey(cipher, Kseq, kctx->gk5e->keylength);
923 if (err)
924 goto out_err;
925
926 err = 0;
927
928out_err:
929 crypto_free_hash(hmac);
930 dprintk("%s: returning %d\n", __func__, err);
931 return err;
932}
933
934
935
936
937
938int
939krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, struct crypto_blkcipher *cipher,
940 s32 seqnum)
941{
942 struct crypto_hash *hmac;
943 struct hash_desc desc;
944 struct scatterlist sg[1];
945 u8 Kcrypt[GSS_KRB5_MAX_KEYLEN];
946 u8 zeroconstant[4] = {0};
947 u8 seqnumarray[4];
948 int err, i;
949
950 dprintk("%s: entered, seqnum %u\n", __func__, seqnum);
951
952 hmac = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
953 if (IS_ERR(hmac)) {
954 dprintk("%s: error %ld, allocating hash '%s'\n",
955 __func__, PTR_ERR(hmac), kctx->gk5e->cksum_name);
956 return PTR_ERR(hmac);
957 }
958
959 desc.tfm = hmac;
960 desc.flags = 0;
961
962 err = crypto_hash_init(&desc);
963 if (err)
964 goto out_err;
965
966
967 for (i = 0; i < kctx->gk5e->keylength; i++)
968 Kcrypt[i] = kctx->Ksess[i] ^ 0xf0;
969
970 err = crypto_hash_setkey(hmac, Kcrypt, kctx->gk5e->keylength);
971 if (err)
972 goto out_err;
973
974 sg_init_one(sg, zeroconstant, 4);
975 err = crypto_hash_digest(&desc, sg, 4, Kcrypt);
976 if (err)
977 goto out_err;
978
979
980 err = crypto_hash_setkey(hmac, Kcrypt, kctx->gk5e->keylength);
981 if (err)
982 goto out_err;
983
984 seqnumarray[0] = (unsigned char) ((seqnum >> 24) & 0xff);
985 seqnumarray[1] = (unsigned char) ((seqnum >> 16) & 0xff);
986 seqnumarray[2] = (unsigned char) ((seqnum >> 8) & 0xff);
987 seqnumarray[3] = (unsigned char) ((seqnum >> 0) & 0xff);
988
989 sg_set_buf(sg, seqnumarray, 4);
990
991 err = crypto_hash_digest(&desc, sg, 4, Kcrypt);
992 if (err)
993 goto out_err;
994
995 err = crypto_blkcipher_setkey(cipher, Kcrypt, kctx->gk5e->keylength);
996 if (err)
997 goto out_err;
998
999 err = 0;
1000
1001out_err:
1002 crypto_free_hash(hmac);
1003 dprintk("%s: returning %d\n", __func__, err);
1004 return err;
1005}
1006
1007