1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17
18#include <linux/kernel.h>
19#include <linux/moduleparam.h>
20#include <linux/ratelimit.h>
21#include <linux/file.h>
22#include <linux/crypto.h>
23#include <linux/scatterlist.h>
24#include <linux/err.h>
25#include <linux/slab.h>
26#include <crypto/hash.h>
27
28#include "ima.h"
29
30
31static unsigned long ima_ahash_minsize;
32module_param_named(ahash_minsize, ima_ahash_minsize, ulong, 0644);
33MODULE_PARM_DESC(ahash_minsize, "Minimum file size for ahash use");
34
35
36static int ima_maxorder;
37static unsigned int ima_bufsize = PAGE_SIZE;
38
39static int param_set_bufsize(const char *val, const struct kernel_param *kp)
40{
41 unsigned long long size;
42 int order;
43
44 size = memparse(val, NULL);
45 order = get_order(size);
46 if (order >= MAX_ORDER)
47 return -EINVAL;
48 ima_maxorder = order;
49 ima_bufsize = PAGE_SIZE << order;
50 return 0;
51}
52
53static const struct kernel_param_ops param_ops_bufsize = {
54 .set = param_set_bufsize,
55 .get = param_get_uint,
56};
57#define param_check_bufsize(name, p) __param_check(name, p, unsigned int)
58
59module_param_named(ahash_bufsize, ima_bufsize, bufsize, 0644);
60MODULE_PARM_DESC(ahash_bufsize, "Maximum ahash buffer size");
61
62static struct crypto_shash *ima_shash_tfm;
63static struct crypto_ahash *ima_ahash_tfm;
64
65int __init ima_init_crypto(void)
66{
67 long rc;
68
69 ima_shash_tfm = crypto_alloc_shash(hash_algo_name[ima_hash_algo], 0, 0);
70 if (IS_ERR(ima_shash_tfm)) {
71 rc = PTR_ERR(ima_shash_tfm);
72 pr_err("Can not allocate %s (reason: %ld)\n",
73 hash_algo_name[ima_hash_algo], rc);
74 return rc;
75 }
76 pr_info("Allocated hash algorithm: %s\n",
77 hash_algo_name[ima_hash_algo]);
78 return 0;
79}
80
81static struct crypto_shash *ima_alloc_tfm(enum hash_algo algo)
82{
83 struct crypto_shash *tfm = ima_shash_tfm;
84 int rc;
85
86 if (algo < 0 || algo >= HASH_ALGO__LAST)
87 algo = ima_hash_algo;
88
89 if (algo != ima_hash_algo) {
90 tfm = crypto_alloc_shash(hash_algo_name[algo], 0, 0);
91 if (IS_ERR(tfm)) {
92 rc = PTR_ERR(tfm);
93 pr_err("Can not allocate %s (reason: %d)\n",
94 hash_algo_name[algo], rc);
95 }
96 }
97 return tfm;
98}
99
100static void ima_free_tfm(struct crypto_shash *tfm)
101{
102 if (tfm != ima_shash_tfm)
103 crypto_free_shash(tfm);
104}
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121static void *ima_alloc_pages(loff_t max_size, size_t *allocated_size,
122 int last_warn)
123{
124 void *ptr;
125 int order = ima_maxorder;
126 gfp_t gfp_mask = __GFP_RECLAIM | __GFP_NOWARN | __GFP_NORETRY;
127
128 if (order)
129 order = min(get_order(max_size), order);
130
131 for (; order; order--) {
132 ptr = (void *)__get_free_pages(gfp_mask, order);
133 if (ptr) {
134 *allocated_size = PAGE_SIZE << order;
135 return ptr;
136 }
137 }
138
139
140
141 gfp_mask = GFP_KERNEL;
142
143 if (!last_warn)
144 gfp_mask |= __GFP_NOWARN;
145
146 ptr = (void *)__get_free_pages(gfp_mask, 0);
147 if (ptr) {
148 *allocated_size = PAGE_SIZE;
149 return ptr;
150 }
151
152 *allocated_size = 0;
153 return NULL;
154}
155
156
157
158
159
160
161static void ima_free_pages(void *ptr, size_t size)
162{
163 if (!ptr)
164 return;
165 free_pages((unsigned long)ptr, get_order(size));
166}
167
168static struct crypto_ahash *ima_alloc_atfm(enum hash_algo algo)
169{
170 struct crypto_ahash *tfm = ima_ahash_tfm;
171 int rc;
172
173 if (algo < 0 || algo >= HASH_ALGO__LAST)
174 algo = ima_hash_algo;
175
176 if (algo != ima_hash_algo || !tfm) {
177 tfm = crypto_alloc_ahash(hash_algo_name[algo], 0, 0);
178 if (!IS_ERR(tfm)) {
179 if (algo == ima_hash_algo)
180 ima_ahash_tfm = tfm;
181 } else {
182 rc = PTR_ERR(tfm);
183 pr_err("Can not allocate %s (reason: %d)\n",
184 hash_algo_name[algo], rc);
185 }
186 }
187 return tfm;
188}
189
190static void ima_free_atfm(struct crypto_ahash *tfm)
191{
192 if (tfm != ima_ahash_tfm)
193 crypto_free_ahash(tfm);
194}
195
196static inline int ahash_wait(int err, struct crypto_wait *wait)
197{
198
199 err = crypto_wait_req(err, wait);
200
201 if (err)
202 pr_crit_ratelimited("ahash calculation failed: err: %d\n", err);
203
204 return err;
205}
206
207static int ima_calc_file_hash_atfm(struct file *file,
208 struct ima_digest_data *hash,
209 struct crypto_ahash *tfm)
210{
211 loff_t i_size, offset;
212 char *rbuf[2] = { NULL, };
213 int rc, rbuf_len, active = 0, ahash_rc = 0;
214 struct ahash_request *req;
215 struct scatterlist sg[1];
216 struct crypto_wait wait;
217 size_t rbuf_size[2];
218
219 hash->length = crypto_ahash_digestsize(tfm);
220
221 req = ahash_request_alloc(tfm, GFP_KERNEL);
222 if (!req)
223 return -ENOMEM;
224
225 crypto_init_wait(&wait);
226 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
227 CRYPTO_TFM_REQ_MAY_SLEEP,
228 crypto_req_done, &wait);
229
230 rc = ahash_wait(crypto_ahash_init(req), &wait);
231 if (rc)
232 goto out1;
233
234 i_size = i_size_read(file_inode(file));
235
236 if (i_size == 0)
237 goto out2;
238
239
240
241
242
243 rbuf[0] = ima_alloc_pages(i_size, &rbuf_size[0], 1);
244 if (!rbuf[0]) {
245 rc = -ENOMEM;
246 goto out1;
247 }
248
249
250 if (i_size > rbuf_size[0]) {
251
252
253
254
255
256 rbuf[1] = ima_alloc_pages(i_size - rbuf_size[0],
257 &rbuf_size[1], 0);
258 }
259
260 for (offset = 0; offset < i_size; offset += rbuf_len) {
261 if (!rbuf[1] && offset) {
262
263
264
265
266 rc = ahash_wait(ahash_rc, &wait);
267 if (rc)
268 goto out3;
269 }
270
271 rbuf_len = min_t(loff_t, i_size - offset, rbuf_size[active]);
272 rc = integrity_kernel_read(file, offset, rbuf[active],
273 rbuf_len);
274 if (rc != rbuf_len) {
275 if (rc >= 0)
276 rc = -EINVAL;
277
278
279
280
281 ahash_wait(ahash_rc, &wait);
282 goto out3;
283 }
284
285 if (rbuf[1] && offset) {
286
287
288
289
290 rc = ahash_wait(ahash_rc, &wait);
291 if (rc)
292 goto out3;
293 }
294
295 sg_init_one(&sg[0], rbuf[active], rbuf_len);
296 ahash_request_set_crypt(req, sg, NULL, rbuf_len);
297
298 ahash_rc = crypto_ahash_update(req);
299
300 if (rbuf[1])
301 active = !active;
302 }
303
304 rc = ahash_wait(ahash_rc, &wait);
305out3:
306 ima_free_pages(rbuf[0], rbuf_size[0]);
307 ima_free_pages(rbuf[1], rbuf_size[1]);
308out2:
309 if (!rc) {
310 ahash_request_set_crypt(req, NULL, hash->digest, 0);
311 rc = ahash_wait(crypto_ahash_final(req), &wait);
312 }
313out1:
314 ahash_request_free(req);
315 return rc;
316}
317
318static int ima_calc_file_ahash(struct file *file, struct ima_digest_data *hash)
319{
320 struct crypto_ahash *tfm;
321 int rc;
322
323 tfm = ima_alloc_atfm(hash->algo);
324 if (IS_ERR(tfm))
325 return PTR_ERR(tfm);
326
327 rc = ima_calc_file_hash_atfm(file, hash, tfm);
328
329 ima_free_atfm(tfm);
330
331 return rc;
332}
333
334static int ima_calc_file_hash_tfm(struct file *file,
335 struct ima_digest_data *hash,
336 struct crypto_shash *tfm)
337{
338 loff_t i_size, offset = 0;
339 char *rbuf;
340 int rc;
341 SHASH_DESC_ON_STACK(shash, tfm);
342
343 shash->tfm = tfm;
344 shash->flags = 0;
345
346 hash->length = crypto_shash_digestsize(tfm);
347
348 rc = crypto_shash_init(shash);
349 if (rc != 0)
350 return rc;
351
352 i_size = i_size_read(file_inode(file));
353
354 if (i_size == 0)
355 goto out;
356
357 rbuf = kzalloc(PAGE_SIZE, GFP_KERNEL);
358 if (!rbuf)
359 return -ENOMEM;
360
361 while (offset < i_size) {
362 int rbuf_len;
363
364 rbuf_len = integrity_kernel_read(file, offset, rbuf, PAGE_SIZE);
365 if (rbuf_len < 0) {
366 rc = rbuf_len;
367 break;
368 }
369 if (rbuf_len == 0)
370 break;
371 offset += rbuf_len;
372
373 rc = crypto_shash_update(shash, rbuf, rbuf_len);
374 if (rc)
375 break;
376 }
377 kfree(rbuf);
378out:
379 if (!rc)
380 rc = crypto_shash_final(shash, hash->digest);
381 return rc;
382}
383
384static int ima_calc_file_shash(struct file *file, struct ima_digest_data *hash)
385{
386 struct crypto_shash *tfm;
387 int rc;
388
389 tfm = ima_alloc_tfm(hash->algo);
390 if (IS_ERR(tfm))
391 return PTR_ERR(tfm);
392
393 rc = ima_calc_file_hash_tfm(file, hash, tfm);
394
395 ima_free_tfm(tfm);
396
397 return rc;
398}
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash)
414{
415 loff_t i_size;
416 int rc;
417 struct file *f = file;
418 bool new_file_instance = false, modified_mode = false;
419
420
421
422
423
424 if (file->f_flags & O_DIRECT) {
425 hash->length = hash_digest_size[ima_hash_algo];
426 hash->algo = ima_hash_algo;
427 return -EINVAL;
428 }
429
430
431 if (!(file->f_mode & FMODE_READ)) {
432 int flags = file->f_flags & ~(O_WRONLY | O_APPEND |
433 O_TRUNC | O_CREAT | O_NOCTTY | O_EXCL);
434 flags |= O_RDONLY;
435 f = dentry_open(&file->f_path, flags, file->f_cred);
436 if (IS_ERR(f)) {
437
438
439
440
441 pr_info_ratelimited("Unable to reopen file for reading.\n");
442 f = file;
443 f->f_mode |= FMODE_READ;
444 modified_mode = true;
445 } else {
446 new_file_instance = true;
447 }
448 }
449
450 i_size = i_size_read(file_inode(f));
451
452 if (ima_ahash_minsize && i_size >= ima_ahash_minsize) {
453 rc = ima_calc_file_ahash(f, hash);
454 if (!rc)
455 goto out;
456 }
457
458 rc = ima_calc_file_shash(f, hash);
459out:
460 if (new_file_instance)
461 fput(f);
462 else if (modified_mode)
463 f->f_mode &= ~FMODE_READ;
464 return rc;
465}
466
467
468
469
470static int ima_calc_field_array_hash_tfm(struct ima_field_data *field_data,
471 struct ima_template_desc *td,
472 int num_fields,
473 struct ima_digest_data *hash,
474 struct crypto_shash *tfm)
475{
476 SHASH_DESC_ON_STACK(shash, tfm);
477 int rc, i;
478
479 shash->tfm = tfm;
480 shash->flags = 0;
481
482 hash->length = crypto_shash_digestsize(tfm);
483
484 rc = crypto_shash_init(shash);
485 if (rc != 0)
486 return rc;
487
488 for (i = 0; i < num_fields; i++) {
489 u8 buffer[IMA_EVENT_NAME_LEN_MAX + 1] = { 0 };
490 u8 *data_to_hash = field_data[i].data;
491 u32 datalen = field_data[i].len;
492 u32 datalen_to_hash =
493 !ima_canonical_fmt ? datalen : cpu_to_le32(datalen);
494
495 if (strcmp(td->name, IMA_TEMPLATE_IMA_NAME) != 0) {
496 rc = crypto_shash_update(shash,
497 (const u8 *) &datalen_to_hash,
498 sizeof(datalen_to_hash));
499 if (rc)
500 break;
501 } else if (strcmp(td->fields[i]->field_id, "n") == 0) {
502 memcpy(buffer, data_to_hash, datalen);
503 data_to_hash = buffer;
504 datalen = IMA_EVENT_NAME_LEN_MAX + 1;
505 }
506 rc = crypto_shash_update(shash, data_to_hash, datalen);
507 if (rc)
508 break;
509 }
510
511 if (!rc)
512 rc = crypto_shash_final(shash, hash->digest);
513
514 return rc;
515}
516
517int ima_calc_field_array_hash(struct ima_field_data *field_data,
518 struct ima_template_desc *desc, int num_fields,
519 struct ima_digest_data *hash)
520{
521 struct crypto_shash *tfm;
522 int rc;
523
524 tfm = ima_alloc_tfm(hash->algo);
525 if (IS_ERR(tfm))
526 return PTR_ERR(tfm);
527
528 rc = ima_calc_field_array_hash_tfm(field_data, desc, num_fields,
529 hash, tfm);
530
531 ima_free_tfm(tfm);
532
533 return rc;
534}
535
536static int calc_buffer_ahash_atfm(const void *buf, loff_t len,
537 struct ima_digest_data *hash,
538 struct crypto_ahash *tfm)
539{
540 struct ahash_request *req;
541 struct scatterlist sg;
542 struct crypto_wait wait;
543 int rc, ahash_rc = 0;
544
545 hash->length = crypto_ahash_digestsize(tfm);
546
547 req = ahash_request_alloc(tfm, GFP_KERNEL);
548 if (!req)
549 return -ENOMEM;
550
551 crypto_init_wait(&wait);
552 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
553 CRYPTO_TFM_REQ_MAY_SLEEP,
554 crypto_req_done, &wait);
555
556 rc = ahash_wait(crypto_ahash_init(req), &wait);
557 if (rc)
558 goto out;
559
560 sg_init_one(&sg, buf, len);
561 ahash_request_set_crypt(req, &sg, NULL, len);
562
563 ahash_rc = crypto_ahash_update(req);
564
565
566 rc = ahash_wait(ahash_rc, &wait);
567 if (!rc) {
568 ahash_request_set_crypt(req, NULL, hash->digest, 0);
569 rc = ahash_wait(crypto_ahash_final(req), &wait);
570 }
571out:
572 ahash_request_free(req);
573 return rc;
574}
575
576static int calc_buffer_ahash(const void *buf, loff_t len,
577 struct ima_digest_data *hash)
578{
579 struct crypto_ahash *tfm;
580 int rc;
581
582 tfm = ima_alloc_atfm(hash->algo);
583 if (IS_ERR(tfm))
584 return PTR_ERR(tfm);
585
586 rc = calc_buffer_ahash_atfm(buf, len, hash, tfm);
587
588 ima_free_atfm(tfm);
589
590 return rc;
591}
592
593static int calc_buffer_shash_tfm(const void *buf, loff_t size,
594 struct ima_digest_data *hash,
595 struct crypto_shash *tfm)
596{
597 SHASH_DESC_ON_STACK(shash, tfm);
598 unsigned int len;
599 int rc;
600
601 shash->tfm = tfm;
602 shash->flags = 0;
603
604 hash->length = crypto_shash_digestsize(tfm);
605
606 rc = crypto_shash_init(shash);
607 if (rc != 0)
608 return rc;
609
610 while (size) {
611 len = size < PAGE_SIZE ? size : PAGE_SIZE;
612 rc = crypto_shash_update(shash, buf, len);
613 if (rc)
614 break;
615 buf += len;
616 size -= len;
617 }
618
619 if (!rc)
620 rc = crypto_shash_final(shash, hash->digest);
621 return rc;
622}
623
624static int calc_buffer_shash(const void *buf, loff_t len,
625 struct ima_digest_data *hash)
626{
627 struct crypto_shash *tfm;
628 int rc;
629
630 tfm = ima_alloc_tfm(hash->algo);
631 if (IS_ERR(tfm))
632 return PTR_ERR(tfm);
633
634 rc = calc_buffer_shash_tfm(buf, len, hash, tfm);
635
636 ima_free_tfm(tfm);
637 return rc;
638}
639
640int ima_calc_buffer_hash(const void *buf, loff_t len,
641 struct ima_digest_data *hash)
642{
643 int rc;
644
645 if (ima_ahash_minsize && len >= ima_ahash_minsize) {
646 rc = calc_buffer_ahash(buf, len, hash);
647 if (!rc)
648 return 0;
649 }
650
651 return calc_buffer_shash(buf, len, hash);
652}
653
654static void ima_pcrread(u32 idx, struct tpm_digest *d)
655{
656 if (!ima_tpm_chip)
657 return;
658
659 if (tpm_pcr_read(ima_tpm_chip, idx, d) != 0)
660 pr_err("Error Communicating to TPM chip\n");
661}
662
663
664
665
666
667
668
669
670
671
672
673
674static int ima_calc_boot_aggregate_tfm(char *digest, u16 alg_id,
675 struct crypto_shash *tfm)
676{
677 struct tpm_digest d = { .alg_id = alg_id, .digest = {0} };
678 int rc;
679 u32 i;
680 SHASH_DESC_ON_STACK(shash, tfm);
681
682 shash->tfm = tfm;
683 shash->flags = 0;
684
685 pr_devel("calculating the boot-aggregate based on TPM bank: %04x\n",
686 d.alg_id);
687
688 rc = crypto_shash_init(shash);
689 if (rc != 0)
690 return rc;
691
692
693 for (i = TPM_PCR0; i < TPM_PCR8; i++) {
694 ima_pcrread(i, &d);
695
696 rc = crypto_shash_update(shash, d.digest,
697 crypto_shash_digestsize(tfm));
698 }
699 if (!rc)
700 crypto_shash_final(shash, digest);
701 return rc;
702}
703
704int ima_calc_boot_aggregate(struct ima_digest_data *hash)
705{
706 struct crypto_shash *tfm;
707 u16 crypto_id, alg_id;
708 int rc, i, bank_idx = -1;
709
710 for (i = 0; i < ima_tpm_chip->nr_allocated_banks; i++) {
711 crypto_id = ima_tpm_chip->allocated_banks[i].crypto_id;
712 if (crypto_id == hash->algo) {
713 bank_idx = i;
714 break;
715 }
716
717 if (crypto_id == HASH_ALGO_SHA256)
718 bank_idx = i;
719
720 if (bank_idx == -1 && crypto_id == HASH_ALGO_SHA1)
721 bank_idx = i;
722 }
723
724 if (bank_idx == -1) {
725 pr_err("No suitable TPM algorithm for boot aggregate\n");
726 return 0;
727 }
728
729 hash->algo = ima_tpm_chip->allocated_banks[bank_idx].crypto_id;
730
731 tfm = ima_alloc_tfm(hash->algo);
732 if (IS_ERR(tfm))
733 return PTR_ERR(tfm);
734
735 hash->length = crypto_shash_digestsize(tfm);
736 alg_id = ima_tpm_chip->allocated_banks[bank_idx].alg_id;
737 rc = ima_calc_boot_aggregate_tfm(hash->digest, alg_id, tfm);
738
739 ima_free_tfm(tfm);
740
741 return rc;
742}
743