1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31#include <crypto/skcipher.h>
32#include <linux/types.h>
33#include <linux/jiffies.h>
34#include <linux/sunrpc/gss_krb5.h>
35#include <linux/random.h>
36#include <linux/pagemap.h>
37
38#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
39# define RPCDBG_FACILITY RPCDBG_AUTH
40#endif
41
42static inline int
43gss_krb5_padding(int blocksize, int length)
44{
45 return blocksize - (length % blocksize);
46}
47
48static inline void
49gss_krb5_add_padding(struct xdr_buf *buf, int offset, int blocksize)
50{
51 int padding = gss_krb5_padding(blocksize, buf->len - offset);
52 char *p;
53 struct kvec *iov;
54
55 if (buf->page_len || buf->tail[0].iov_len)
56 iov = &buf->tail[0];
57 else
58 iov = &buf->head[0];
59 p = iov->iov_base + iov->iov_len;
60 iov->iov_len += padding;
61 buf->len += padding;
62 memset(p, padding, padding);
63}
64
65static inline int
66gss_krb5_remove_padding(struct xdr_buf *buf, int blocksize)
67{
68 u8 *ptr;
69 u8 pad;
70 size_t len = buf->len;
71
72 if (len <= buf->head[0].iov_len) {
73 pad = *(u8 *)(buf->head[0].iov_base + len - 1);
74 if (pad > buf->head[0].iov_len)
75 return -EINVAL;
76 buf->head[0].iov_len -= pad;
77 goto out;
78 } else
79 len -= buf->head[0].iov_len;
80 if (len <= buf->page_len) {
81 unsigned int last = (buf->page_base + len - 1)
82 >>PAGE_SHIFT;
83 unsigned int offset = (buf->page_base + len - 1)
84 & (PAGE_SIZE - 1);
85 ptr = kmap_atomic(buf->pages[last]);
86 pad = *(ptr + offset);
87 kunmap_atomic(ptr);
88 goto out;
89 } else
90 len -= buf->page_len;
91 BUG_ON(len > buf->tail[0].iov_len);
92 pad = *(u8 *)(buf->tail[0].iov_base + len - 1);
93out:
94
95
96
97
98
99
100
101
102
103
104
105
106
107 if (pad > blocksize)
108 return -EINVAL;
109 if (buf->len > pad)
110 buf->len -= pad;
111 else
112 return -EINVAL;
113 return 0;
114}
115
116void
117gss_krb5_make_confounder(char *p, u32 conflen)
118{
119 static u64 i = 0;
120 u64 *q = (u64 *)p;
121
122
123
124
125
126
127
128
129
130
131
132 if (i == 0) {
133 i = prandom_u32();
134 i = (i << 32) | prandom_u32();
135 }
136
137 switch (conflen) {
138 case 16:
139 *q++ = i++;
140
141 case 8:
142 *q++ = i++;
143 break;
144 default:
145 BUG();
146 }
147}
148
149
150
151
152
153
154
155
156
157static u32
158gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
159 struct xdr_buf *buf, struct page **pages)
160{
161 char cksumdata[GSS_KRB5_MAX_CKSUM_LEN];
162 struct xdr_netobj md5cksum = {.len = sizeof(cksumdata),
163 .data = cksumdata};
164 int blocksize = 0, plainlen;
165 unsigned char *ptr, *msg_start;
166 s32 now;
167 int headlen;
168 struct page **tmp_pages;
169 u32 seq_send;
170 u8 *cksumkey;
171 u32 conflen = kctx->gk5e->conflen;
172
173 dprintk("RPC: %s\n", __func__);
174
175 now = get_seconds();
176
177 blocksize = crypto_skcipher_blocksize(kctx->enc);
178 gss_krb5_add_padding(buf, offset, blocksize);
179 BUG_ON((buf->len - offset) % blocksize);
180 plainlen = conflen + buf->len - offset;
181
182 headlen = g_token_size(&kctx->mech_used,
183 GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength + plainlen) -
184 (buf->len - offset);
185
186 ptr = buf->head[0].iov_base + offset;
187
188 xdr_extend_head(buf, offset, headlen);
189
190
191 BUG_ON((buf->len - offset - headlen) % blocksize);
192
193 g_make_token_header(&kctx->mech_used,
194 GSS_KRB5_TOK_HDR_LEN +
195 kctx->gk5e->cksumlength + plainlen, &ptr);
196
197
198
199 ptr[0] = (unsigned char) ((KG_TOK_WRAP_MSG >> 8) & 0xff);
200 ptr[1] = (unsigned char) (KG_TOK_WRAP_MSG & 0xff);
201
202 msg_start = ptr + GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength;
203
204
205
206
207
208
209 *(__le16 *)(ptr + 2) = cpu_to_le16(kctx->gk5e->signalg);
210 *(__le16 *)(ptr + 4) = cpu_to_le16(kctx->gk5e->sealalg);
211 ptr[6] = 0xff;
212 ptr[7] = 0xff;
213
214 gss_krb5_make_confounder(msg_start, conflen);
215
216 if (kctx->gk5e->keyed_cksum)
217 cksumkey = kctx->cksum;
218 else
219 cksumkey = NULL;
220
221
222 tmp_pages = buf->pages;
223 buf->pages = pages;
224 if (make_checksum(kctx, ptr, 8, buf, offset + headlen - conflen,
225 cksumkey, KG_USAGE_SEAL, &md5cksum))
226 return GSS_S_FAILURE;
227 buf->pages = tmp_pages;
228
229 memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len);
230
231 spin_lock(&krb5_seq_lock);
232 seq_send = kctx->seq_send++;
233 spin_unlock(&krb5_seq_lock);
234
235
236
237 if ((krb5_make_seq_num(kctx, kctx->seq, kctx->initiate ? 0 : 0xff,
238 seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8)))
239 return GSS_S_FAILURE;
240
241 if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) {
242 struct crypto_skcipher *cipher;
243 int err;
244 cipher = crypto_alloc_skcipher(kctx->gk5e->encrypt_name, 0,
245 CRYPTO_ALG_ASYNC);
246 if (IS_ERR(cipher))
247 return GSS_S_FAILURE;
248
249 krb5_rc4_setup_enc_key(kctx, cipher, seq_send);
250
251 err = gss_encrypt_xdr_buf(cipher, buf,
252 offset + headlen - conflen, pages);
253 crypto_free_skcipher(cipher);
254 if (err)
255 return GSS_S_FAILURE;
256 } else {
257 if (gss_encrypt_xdr_buf(kctx->enc, buf,
258 offset + headlen - conflen, pages))
259 return GSS_S_FAILURE;
260 }
261
262 return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
263}
264
265static u32
266gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
267{
268 int signalg;
269 int sealalg;
270 char cksumdata[GSS_KRB5_MAX_CKSUM_LEN];
271 struct xdr_netobj md5cksum = {.len = sizeof(cksumdata),
272 .data = cksumdata};
273 s32 now;
274 int direction;
275 s32 seqnum;
276 unsigned char *ptr;
277 int bodysize;
278 void *data_start, *orig_start;
279 int data_len;
280 int blocksize;
281 u32 conflen = kctx->gk5e->conflen;
282 int crypt_offset;
283 u8 *cksumkey;
284
285 dprintk("RPC: gss_unwrap_kerberos\n");
286
287 ptr = (u8 *)buf->head[0].iov_base + offset;
288 if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr,
289 buf->len - offset))
290 return GSS_S_DEFECTIVE_TOKEN;
291
292 if ((ptr[0] != ((KG_TOK_WRAP_MSG >> 8) & 0xff)) ||
293 (ptr[1] != (KG_TOK_WRAP_MSG & 0xff)))
294 return GSS_S_DEFECTIVE_TOKEN;
295
296
297
298
299
300 signalg = ptr[2] + (ptr[3] << 8);
301 if (signalg != kctx->gk5e->signalg)
302 return GSS_S_DEFECTIVE_TOKEN;
303
304 sealalg = ptr[4] + (ptr[5] << 8);
305 if (sealalg != kctx->gk5e->sealalg)
306 return GSS_S_DEFECTIVE_TOKEN;
307
308 if ((ptr[6] != 0xff) || (ptr[7] != 0xff))
309 return GSS_S_DEFECTIVE_TOKEN;
310
311
312
313
314
315 crypt_offset = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) -
316 (unsigned char *)buf->head[0].iov_base;
317
318
319
320
321 if (krb5_get_seq_num(kctx, ptr + GSS_KRB5_TOK_HDR_LEN,
322 ptr + 8, &direction, &seqnum))
323 return GSS_S_BAD_SIG;
324
325 if ((kctx->initiate && direction != 0xff) ||
326 (!kctx->initiate && direction != 0))
327 return GSS_S_BAD_SIG;
328
329 if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) {
330 struct crypto_skcipher *cipher;
331 int err;
332
333 cipher = crypto_alloc_skcipher(kctx->gk5e->encrypt_name, 0,
334 CRYPTO_ALG_ASYNC);
335 if (IS_ERR(cipher))
336 return GSS_S_FAILURE;
337
338 krb5_rc4_setup_enc_key(kctx, cipher, seqnum);
339
340 err = gss_decrypt_xdr_buf(cipher, buf, crypt_offset);
341 crypto_free_skcipher(cipher);
342 if (err)
343 return GSS_S_DEFECTIVE_TOKEN;
344 } else {
345 if (gss_decrypt_xdr_buf(kctx->enc, buf, crypt_offset))
346 return GSS_S_DEFECTIVE_TOKEN;
347 }
348
349 if (kctx->gk5e->keyed_cksum)
350 cksumkey = kctx->cksum;
351 else
352 cksumkey = NULL;
353
354 if (make_checksum(kctx, ptr, 8, buf, crypt_offset,
355 cksumkey, KG_USAGE_SEAL, &md5cksum))
356 return GSS_S_FAILURE;
357
358 if (memcmp(md5cksum.data, ptr + GSS_KRB5_TOK_HDR_LEN,
359 kctx->gk5e->cksumlength))
360 return GSS_S_BAD_SIG;
361
362
363
364 now = get_seconds();
365
366 if (now > kctx->endtime)
367 return GSS_S_CONTEXT_EXPIRED;
368
369
370
371
372
373
374 blocksize = crypto_skcipher_blocksize(kctx->enc);
375 data_start = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) +
376 conflen;
377 orig_start = buf->head[0].iov_base + offset;
378 data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start;
379 memmove(orig_start, data_start, data_len);
380 buf->head[0].iov_len -= (data_start - orig_start);
381 buf->len -= (data_start - orig_start);
382
383 if (gss_krb5_remove_padding(buf, blocksize))
384 return GSS_S_DEFECTIVE_TOKEN;
385
386 return GSS_S_COMPLETE;
387}
388
389
390
391
392
393
394
395
396
397
398
399#define LOCAL_BUF_LEN 32u
400
401static void rotate_buf_a_little(struct xdr_buf *buf, unsigned int shift)
402{
403 char head[LOCAL_BUF_LEN];
404 char tmp[LOCAL_BUF_LEN];
405 unsigned int this_len, i;
406
407 BUG_ON(shift > LOCAL_BUF_LEN);
408
409 read_bytes_from_xdr_buf(buf, 0, head, shift);
410 for (i = 0; i + shift < buf->len; i += LOCAL_BUF_LEN) {
411 this_len = min(LOCAL_BUF_LEN, buf->len - (i + shift));
412 read_bytes_from_xdr_buf(buf, i+shift, tmp, this_len);
413 write_bytes_to_xdr_buf(buf, i, tmp, this_len);
414 }
415 write_bytes_to_xdr_buf(buf, buf->len - shift, head, shift);
416}
417
418static void _rotate_left(struct xdr_buf *buf, unsigned int shift)
419{
420 int shifted = 0;
421 int this_shift;
422
423 shift %= buf->len;
424 while (shifted < shift) {
425 this_shift = min(shift - shifted, LOCAL_BUF_LEN);
426 rotate_buf_a_little(buf, this_shift);
427 shifted += this_shift;
428 }
429}
430
431static void rotate_left(u32 base, struct xdr_buf *buf, unsigned int shift)
432{
433 struct xdr_buf subbuf;
434
435 xdr_buf_subsegment(buf, &subbuf, base, buf->len - base);
436 _rotate_left(&subbuf, shift);
437}
438
439static u32
440gss_wrap_kerberos_v2(struct krb5_ctx *kctx, u32 offset,
441 struct xdr_buf *buf, struct page **pages)
442{
443 int blocksize;
444 u8 *ptr, *plainhdr;
445 s32 now;
446 u8 flags = 0x00;
447 __be16 *be16ptr;
448 __be64 *be64ptr;
449 u32 err;
450
451 dprintk("RPC: %s\n", __func__);
452
453 if (kctx->gk5e->encrypt_v2 == NULL)
454 return GSS_S_FAILURE;
455
456
457 if (xdr_extend_head(buf, offset, GSS_KRB5_TOK_HDR_LEN))
458 return GSS_S_FAILURE;
459
460
461 ptr = plainhdr = buf->head[0].iov_base + offset;
462 *ptr++ = (unsigned char) ((KG2_TOK_WRAP>>8) & 0xff);
463 *ptr++ = (unsigned char) (KG2_TOK_WRAP & 0xff);
464
465 if ((kctx->flags & KRB5_CTX_FLAG_INITIATOR) == 0)
466 flags |= KG2_TOKEN_FLAG_SENTBYACCEPTOR;
467 if ((kctx->flags & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY) != 0)
468 flags |= KG2_TOKEN_FLAG_ACCEPTORSUBKEY;
469
470 flags |= KG2_TOKEN_FLAG_SEALED;
471
472 *ptr++ = flags;
473 *ptr++ = 0xff;
474 be16ptr = (__be16 *)ptr;
475
476 blocksize = crypto_skcipher_blocksize(kctx->acceptor_enc);
477 *be16ptr++ = 0;
478
479 *be16ptr++ = 0;
480
481 be64ptr = (__be64 *)be16ptr;
482 spin_lock(&krb5_seq_lock);
483 *be64ptr = cpu_to_be64(kctx->seq_send64++);
484 spin_unlock(&krb5_seq_lock);
485
486 err = (*kctx->gk5e->encrypt_v2)(kctx, offset, buf, pages);
487 if (err)
488 return err;
489
490 now = get_seconds();
491 return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
492}
493
494static u32
495gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
496{
497 s32 now;
498 u8 *ptr;
499 u8 flags = 0x00;
500 u16 ec, rrc;
501 int err;
502 u32 headskip, tailskip;
503 u8 decrypted_hdr[GSS_KRB5_TOK_HDR_LEN];
504 unsigned int movelen;
505
506
507 dprintk("RPC: %s\n", __func__);
508
509 if (kctx->gk5e->decrypt_v2 == NULL)
510 return GSS_S_FAILURE;
511
512 ptr = buf->head[0].iov_base + offset;
513
514 if (be16_to_cpu(*((__be16 *)ptr)) != KG2_TOK_WRAP)
515 return GSS_S_DEFECTIVE_TOKEN;
516
517 flags = ptr[2];
518 if ((!kctx->initiate && (flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR)) ||
519 (kctx->initiate && !(flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR)))
520 return GSS_S_BAD_SIG;
521
522 if ((flags & KG2_TOKEN_FLAG_SEALED) == 0) {
523 dprintk("%s: token missing expected sealed flag\n", __func__);
524 return GSS_S_DEFECTIVE_TOKEN;
525 }
526
527 if (ptr[3] != 0xff)
528 return GSS_S_DEFECTIVE_TOKEN;
529
530 ec = be16_to_cpup((__be16 *)(ptr + 4));
531 rrc = be16_to_cpup((__be16 *)(ptr + 6));
532
533
534
535
536
537
538 if (rrc != 0)
539 rotate_left(offset + 16, buf, rrc);
540
541 err = (*kctx->gk5e->decrypt_v2)(kctx, offset, buf,
542 &headskip, &tailskip);
543 if (err)
544 return GSS_S_FAILURE;
545
546
547
548
549
550 err = read_bytes_from_xdr_buf(buf,
551 buf->len - GSS_KRB5_TOK_HDR_LEN - tailskip,
552 decrypted_hdr, GSS_KRB5_TOK_HDR_LEN);
553 if (err) {
554 dprintk("%s: error %u getting decrypted_hdr\n", __func__, err);
555 return GSS_S_FAILURE;
556 }
557 if (memcmp(ptr, decrypted_hdr, 6)
558 || memcmp(ptr + 8, decrypted_hdr + 8, 8)) {
559 dprintk("%s: token hdr, plaintext hdr mismatch!\n", __func__);
560 return GSS_S_FAILURE;
561 }
562
563
564
565
566 now = get_seconds();
567 if (now > kctx->endtime)
568 return GSS_S_CONTEXT_EXPIRED;
569
570
571
572
573
574
575
576
577 movelen = min_t(unsigned int, buf->head[0].iov_len, buf->len);
578 movelen -= offset + GSS_KRB5_TOK_HDR_LEN + headskip;
579 BUG_ON(offset + GSS_KRB5_TOK_HDR_LEN + headskip + movelen >
580 buf->head[0].iov_len);
581 memmove(ptr, ptr + GSS_KRB5_TOK_HDR_LEN + headskip, movelen);
582 buf->head[0].iov_len -= GSS_KRB5_TOK_HDR_LEN + headskip;
583 buf->len -= GSS_KRB5_TOK_HDR_LEN + headskip;
584
585
586 xdr_buf_trim(buf, ec + GSS_KRB5_TOK_HDR_LEN + tailskip);
587 return GSS_S_COMPLETE;
588}
589
590u32
591gss_wrap_kerberos(struct gss_ctx *gctx, int offset,
592 struct xdr_buf *buf, struct page **pages)
593{
594 struct krb5_ctx *kctx = gctx->internal_ctx_id;
595
596 switch (kctx->enctype) {
597 default:
598 BUG();
599 case ENCTYPE_DES_CBC_RAW:
600 case ENCTYPE_DES3_CBC_RAW:
601 case ENCTYPE_ARCFOUR_HMAC:
602 return gss_wrap_kerberos_v1(kctx, offset, buf, pages);
603 case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
604 case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
605 return gss_wrap_kerberos_v2(kctx, offset, buf, pages);
606 }
607}
608
609u32
610gss_unwrap_kerberos(struct gss_ctx *gctx, int offset, struct xdr_buf *buf)
611{
612 struct krb5_ctx *kctx = gctx->internal_ctx_id;
613
614 switch (kctx->enctype) {
615 default:
616 BUG();
617 case ENCTYPE_DES_CBC_RAW:
618 case ENCTYPE_DES3_CBC_RAW:
619 case ENCTYPE_ARCFOUR_HMAC:
620 return gss_unwrap_kerberos_v1(kctx, offset, buf);
621 case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
622 case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
623 return gss_unwrap_kerberos_v2(kctx, offset, buf);
624 }
625}
626
627