linux/security/integrity/ima/ima_crypto.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2005,2006,2007,2008 IBM Corporation
   4 *
   5 * Authors:
   6 * Mimi Zohar <zohar@us.ibm.com>
   7 * Kylene Hall <kjhall@us.ibm.com>
   8 *
   9 * File: ima_crypto.c
  10 *      Calculates md5/sha1 file hash, template hash, boot-aggreate hash
  11 */
  12
  13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14
  15#include <linux/kernel.h>
  16#include <linux/moduleparam.h>
  17#include <linux/ratelimit.h>
  18#include <linux/file.h>
  19#include <linux/crypto.h>
  20#include <linux/scatterlist.h>
  21#include <linux/err.h>
  22#include <linux/slab.h>
  23#include <crypto/hash.h>
  24
  25#include "ima.h"
  26
  27/* minimum file size for ahash use */
  28static unsigned long ima_ahash_minsize;
  29module_param_named(ahash_minsize, ima_ahash_minsize, ulong, 0644);
  30MODULE_PARM_DESC(ahash_minsize, "Minimum file size for ahash use");
  31
  32/* default is 0 - 1 page. */
  33static int ima_maxorder;
  34static unsigned int ima_bufsize = PAGE_SIZE;
  35
  36static int param_set_bufsize(const char *val, const struct kernel_param *kp)
  37{
  38        unsigned long long size;
  39        int order;
  40
  41        size = memparse(val, NULL);
  42        order = get_order(size);
  43        if (order >= MAX_ORDER)
  44                return -EINVAL;
  45        ima_maxorder = order;
  46        ima_bufsize = PAGE_SIZE << order;
  47        return 0;
  48}
  49
  50static const struct kernel_param_ops param_ops_bufsize = {
  51        .set = param_set_bufsize,
  52        .get = param_get_uint,
  53};
  54#define param_check_bufsize(name, p) __param_check(name, p, unsigned int)
  55
  56module_param_named(ahash_bufsize, ima_bufsize, bufsize, 0644);
  57MODULE_PARM_DESC(ahash_bufsize, "Maximum ahash buffer size");
  58
  59static struct crypto_shash *ima_shash_tfm;
  60static struct crypto_ahash *ima_ahash_tfm;
  61
  62int __init ima_init_crypto(void)
  63{
  64        long rc;
  65
  66        ima_shash_tfm = crypto_alloc_shash(hash_algo_name[ima_hash_algo], 0, 0);
  67        if (IS_ERR(ima_shash_tfm)) {
  68                rc = PTR_ERR(ima_shash_tfm);
  69                pr_err("Can not allocate %s (reason: %ld)\n",
  70                       hash_algo_name[ima_hash_algo], rc);
  71                return rc;
  72        }
  73        pr_info("Allocated hash algorithm: %s\n",
  74                hash_algo_name[ima_hash_algo]);
  75        return 0;
  76}
  77
  78static struct crypto_shash *ima_alloc_tfm(enum hash_algo algo)
  79{
  80        struct crypto_shash *tfm = ima_shash_tfm;
  81        int rc;
  82
  83        if (algo < 0 || algo >= HASH_ALGO__LAST)
  84                algo = ima_hash_algo;
  85
  86        if (algo != ima_hash_algo) {
  87                tfm = crypto_alloc_shash(hash_algo_name[algo], 0, 0);
  88                if (IS_ERR(tfm)) {
  89                        rc = PTR_ERR(tfm);
  90                        pr_err("Can not allocate %s (reason: %d)\n",
  91                               hash_algo_name[algo], rc);
  92                }
  93        }
  94        return tfm;
  95}
  96
  97static void ima_free_tfm(struct crypto_shash *tfm)
  98{
  99        if (tfm != ima_shash_tfm)
 100                crypto_free_shash(tfm);
 101}
 102
 103/**
 104 * ima_alloc_pages() - Allocate contiguous pages.
 105 * @max_size:       Maximum amount of memory to allocate.
 106 * @allocated_size: Returned size of actual allocation.
 107 * @last_warn:      Should the min_size allocation warn or not.
 108 *
 109 * Tries to do opportunistic allocation for memory first trying to allocate
 110 * max_size amount of memory and then splitting that until zero order is
 111 * reached. Allocation is tried without generating allocation warnings unless
 112 * last_warn is set. Last_warn set affects only last allocation of zero order.
 113 *
 114 * By default, ima_maxorder is 0 and it is equivalent to kmalloc(GFP_KERNEL)
 115 *
 116 * Return pointer to allocated memory, or NULL on failure.
 117 */
 118static void *ima_alloc_pages(loff_t max_size, size_t *allocated_size,
 119                             int last_warn)
 120{
 121        void *ptr;
 122        int order = ima_maxorder;
 123        gfp_t gfp_mask = __GFP_RECLAIM | __GFP_NOWARN | __GFP_NORETRY;
 124
 125        if (order)
 126                order = min(get_order(max_size), order);
 127
 128        for (; order; order--) {
 129                ptr = (void *)__get_free_pages(gfp_mask, order);
 130                if (ptr) {
 131                        *allocated_size = PAGE_SIZE << order;
 132                        return ptr;
 133                }
 134        }
 135
 136        /* order is zero - one page */
 137
 138        gfp_mask = GFP_KERNEL;
 139
 140        if (!last_warn)
 141                gfp_mask |= __GFP_NOWARN;
 142
 143        ptr = (void *)__get_free_pages(gfp_mask, 0);
 144        if (ptr) {
 145                *allocated_size = PAGE_SIZE;
 146                return ptr;
 147        }
 148
 149        *allocated_size = 0;
 150        return NULL;
 151}
 152
 153/**
 154 * ima_free_pages() - Free pages allocated by ima_alloc_pages().
 155 * @ptr:  Pointer to allocated pages.
 156 * @size: Size of allocated buffer.
 157 */
 158static void ima_free_pages(void *ptr, size_t size)
 159{
 160        if (!ptr)
 161                return;
 162        free_pages((unsigned long)ptr, get_order(size));
 163}
 164
 165static struct crypto_ahash *ima_alloc_atfm(enum hash_algo algo)
 166{
 167        struct crypto_ahash *tfm = ima_ahash_tfm;
 168        int rc;
 169
 170        if (algo < 0 || algo >= HASH_ALGO__LAST)
 171                algo = ima_hash_algo;
 172
 173        if (algo != ima_hash_algo || !tfm) {
 174                tfm = crypto_alloc_ahash(hash_algo_name[algo], 0, 0);
 175                if (!IS_ERR(tfm)) {
 176                        if (algo == ima_hash_algo)
 177                                ima_ahash_tfm = tfm;
 178                } else {
 179                        rc = PTR_ERR(tfm);
 180                        pr_err("Can not allocate %s (reason: %d)\n",
 181                               hash_algo_name[algo], rc);
 182                }
 183        }
 184        return tfm;
 185}
 186
 187static void ima_free_atfm(struct crypto_ahash *tfm)
 188{
 189        if (tfm != ima_ahash_tfm)
 190                crypto_free_ahash(tfm);
 191}
 192
 193static inline int ahash_wait(int err, struct crypto_wait *wait)
 194{
 195
 196        err = crypto_wait_req(err, wait);
 197
 198        if (err)
 199                pr_crit_ratelimited("ahash calculation failed: err: %d\n", err);
 200
 201        return err;
 202}
 203
 204static int ima_calc_file_hash_atfm(struct file *file,
 205                                   struct ima_digest_data *hash,
 206                                   struct crypto_ahash *tfm)
 207{
 208        loff_t i_size, offset;
 209        char *rbuf[2] = { NULL, };
 210        int rc, rbuf_len, active = 0, ahash_rc = 0;
 211        struct ahash_request *req;
 212        struct scatterlist sg[1];
 213        struct crypto_wait wait;
 214        size_t rbuf_size[2];
 215
 216        hash->length = crypto_ahash_digestsize(tfm);
 217
 218        req = ahash_request_alloc(tfm, GFP_KERNEL);
 219        if (!req)
 220                return -ENOMEM;
 221
 222        crypto_init_wait(&wait);
 223        ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
 224                                   CRYPTO_TFM_REQ_MAY_SLEEP,
 225                                   crypto_req_done, &wait);
 226
 227        rc = ahash_wait(crypto_ahash_init(req), &wait);
 228        if (rc)
 229                goto out1;
 230
 231        i_size = i_size_read(file_inode(file));
 232
 233        if (i_size == 0)
 234                goto out2;
 235
 236        /*
 237         * Try to allocate maximum size of memory.
 238         * Fail if even a single page cannot be allocated.
 239         */
 240        rbuf[0] = ima_alloc_pages(i_size, &rbuf_size[0], 1);
 241        if (!rbuf[0]) {
 242                rc = -ENOMEM;
 243                goto out1;
 244        }
 245
 246        /* Only allocate one buffer if that is enough. */
 247        if (i_size > rbuf_size[0]) {
 248                /*
 249                 * Try to allocate secondary buffer. If that fails fallback to
 250                 * using single buffering. Use previous memory allocation size
 251                 * as baseline for possible allocation size.
 252                 */
 253                rbuf[1] = ima_alloc_pages(i_size - rbuf_size[0],
 254                                          &rbuf_size[1], 0);
 255        }
 256
 257        for (offset = 0; offset < i_size; offset += rbuf_len) {
 258                if (!rbuf[1] && offset) {
 259                        /* Not using two buffers, and it is not the first
 260                         * read/request, wait for the completion of the
 261                         * previous ahash_update() request.
 262                         */
 263                        rc = ahash_wait(ahash_rc, &wait);
 264                        if (rc)
 265                                goto out3;
 266                }
 267                /* read buffer */
 268                rbuf_len = min_t(loff_t, i_size - offset, rbuf_size[active]);
 269                rc = integrity_kernel_read(file, offset, rbuf[active],
 270                                           rbuf_len);
 271                if (rc != rbuf_len)
 272                        goto out3;
 273
 274                if (rbuf[1] && offset) {
 275                        /* Using two buffers, and it is not the first
 276                         * read/request, wait for the completion of the
 277                         * previous ahash_update() request.
 278                         */
 279                        rc = ahash_wait(ahash_rc, &wait);
 280                        if (rc)
 281                                goto out3;
 282                }
 283
 284                sg_init_one(&sg[0], rbuf[active], rbuf_len);
 285                ahash_request_set_crypt(req, sg, NULL, rbuf_len);
 286
 287                ahash_rc = crypto_ahash_update(req);
 288
 289                if (rbuf[1])
 290                        active = !active; /* swap buffers, if we use two */
 291        }
 292        /* wait for the last update request to complete */
 293        rc = ahash_wait(ahash_rc, &wait);
 294out3:
 295        ima_free_pages(rbuf[0], rbuf_size[0]);
 296        ima_free_pages(rbuf[1], rbuf_size[1]);
 297out2:
 298        if (!rc) {
 299                ahash_request_set_crypt(req, NULL, hash->digest, 0);
 300                rc = ahash_wait(crypto_ahash_final(req), &wait);
 301        }
 302out1:
 303        ahash_request_free(req);
 304        return rc;
 305}
 306
 307static int ima_calc_file_ahash(struct file *file, struct ima_digest_data *hash)
 308{
 309        struct crypto_ahash *tfm;
 310        int rc;
 311
 312        tfm = ima_alloc_atfm(hash->algo);
 313        if (IS_ERR(tfm))
 314                return PTR_ERR(tfm);
 315
 316        rc = ima_calc_file_hash_atfm(file, hash, tfm);
 317
 318        ima_free_atfm(tfm);
 319
 320        return rc;
 321}
 322
 323static int ima_calc_file_hash_tfm(struct file *file,
 324                                  struct ima_digest_data *hash,
 325                                  struct crypto_shash *tfm)
 326{
 327        loff_t i_size, offset = 0;
 328        char *rbuf;
 329        int rc;
 330        SHASH_DESC_ON_STACK(shash, tfm);
 331
 332        shash->tfm = tfm;
 333
 334        hash->length = crypto_shash_digestsize(tfm);
 335
 336        rc = crypto_shash_init(shash);
 337        if (rc != 0)
 338                return rc;
 339
 340        i_size = i_size_read(file_inode(file));
 341
 342        if (i_size == 0)
 343                goto out;
 344
 345        rbuf = kzalloc(PAGE_SIZE, GFP_KERNEL);
 346        if (!rbuf)
 347                return -ENOMEM;
 348
 349        while (offset < i_size) {
 350                int rbuf_len;
 351
 352                rbuf_len = integrity_kernel_read(file, offset, rbuf, PAGE_SIZE);
 353                if (rbuf_len < 0) {
 354                        rc = rbuf_len;
 355                        break;
 356                }
 357                if (rbuf_len == 0)
 358                        break;
 359                offset += rbuf_len;
 360
 361                rc = crypto_shash_update(shash, rbuf, rbuf_len);
 362                if (rc)
 363                        break;
 364        }
 365        kfree(rbuf);
 366out:
 367        if (!rc)
 368                rc = crypto_shash_final(shash, hash->digest);
 369        return rc;
 370}
 371
 372static int ima_calc_file_shash(struct file *file, struct ima_digest_data *hash)
 373{
 374        struct crypto_shash *tfm;
 375        int rc;
 376
 377        tfm = ima_alloc_tfm(hash->algo);
 378        if (IS_ERR(tfm))
 379                return PTR_ERR(tfm);
 380
 381        rc = ima_calc_file_hash_tfm(file, hash, tfm);
 382
 383        ima_free_tfm(tfm);
 384
 385        return rc;
 386}
 387
 388/*
 389 * ima_calc_file_hash - calculate file hash
 390 *
 391 * Asynchronous hash (ahash) allows using HW acceleration for calculating
 392 * a hash. ahash performance varies for different data sizes on different
 393 * crypto accelerators. shash performance might be better for smaller files.
 394 * The 'ima.ahash_minsize' module parameter allows specifying the best
 395 * minimum file size for using ahash on the system.
 396 *
 397 * If the ima.ahash_minsize parameter is not specified, this function uses
 398 * shash for the hash calculation.  If ahash fails, it falls back to using
 399 * shash.
 400 */
 401int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash)
 402{
 403        loff_t i_size;
 404        int rc;
 405        struct file *f = file;
 406        bool new_file_instance = false, modified_flags = false;
 407
 408        /*
 409         * For consistency, fail file's opened with the O_DIRECT flag on
 410         * filesystems mounted with/without DAX option.
 411         */
 412        if (file->f_flags & O_DIRECT) {
 413                hash->length = hash_digest_size[ima_hash_algo];
 414                hash->algo = ima_hash_algo;
 415                return -EINVAL;
 416        }
 417
 418        /* Open a new file instance in O_RDONLY if we cannot read */
 419        if (!(file->f_mode & FMODE_READ)) {
 420                int flags = file->f_flags & ~(O_WRONLY | O_APPEND |
 421                                O_TRUNC | O_CREAT | O_NOCTTY | O_EXCL);
 422                flags |= O_RDONLY;
 423                f = dentry_open(&file->f_path, flags, file->f_cred);
 424                if (IS_ERR(f)) {
 425                        /*
 426                         * Cannot open the file again, lets modify f_flags
 427                         * of original and continue
 428                         */
 429                        pr_info_ratelimited("Unable to reopen file for reading.\n");
 430                        f = file;
 431                        f->f_flags |= FMODE_READ;
 432                        modified_flags = true;
 433                } else {
 434                        new_file_instance = true;
 435                }
 436        }
 437
 438        i_size = i_size_read(file_inode(f));
 439
 440        if (ima_ahash_minsize && i_size >= ima_ahash_minsize) {
 441                rc = ima_calc_file_ahash(f, hash);
 442                if (!rc)
 443                        goto out;
 444        }
 445
 446        rc = ima_calc_file_shash(f, hash);
 447out:
 448        if (new_file_instance)
 449                fput(f);
 450        else if (modified_flags)
 451                f->f_flags &= ~FMODE_READ;
 452        return rc;
 453}
 454
 455/*
 456 * Calculate the hash of template data
 457 */
 458static int ima_calc_field_array_hash_tfm(struct ima_field_data *field_data,
 459                                         struct ima_template_desc *td,
 460                                         int num_fields,
 461                                         struct ima_digest_data *hash,
 462                                         struct crypto_shash *tfm)
 463{
 464        SHASH_DESC_ON_STACK(shash, tfm);
 465        int rc, i;
 466
 467        shash->tfm = tfm;
 468
 469        hash->length = crypto_shash_digestsize(tfm);
 470
 471        rc = crypto_shash_init(shash);
 472        if (rc != 0)
 473                return rc;
 474
 475        for (i = 0; i < num_fields; i++) {
 476                u8 buffer[IMA_EVENT_NAME_LEN_MAX + 1] = { 0 };
 477                u8 *data_to_hash = field_data[i].data;
 478                u32 datalen = field_data[i].len;
 479                u32 datalen_to_hash =
 480                    !ima_canonical_fmt ? datalen : cpu_to_le32(datalen);
 481
 482                if (strcmp(td->name, IMA_TEMPLATE_IMA_NAME) != 0) {
 483                        rc = crypto_shash_update(shash,
 484                                                (const u8 *) &datalen_to_hash,
 485                                                sizeof(datalen_to_hash));
 486                        if (rc)
 487                                break;
 488                } else if (strcmp(td->fields[i]->field_id, "n") == 0) {
 489                        memcpy(buffer, data_to_hash, datalen);
 490                        data_to_hash = buffer;
 491                        datalen = IMA_EVENT_NAME_LEN_MAX + 1;
 492                }
 493                rc = crypto_shash_update(shash, data_to_hash, datalen);
 494                if (rc)
 495                        break;
 496        }
 497
 498        if (!rc)
 499                rc = crypto_shash_final(shash, hash->digest);
 500
 501        return rc;
 502}
 503
 504int ima_calc_field_array_hash(struct ima_field_data *field_data,
 505                              struct ima_template_desc *desc, int num_fields,
 506                              struct ima_digest_data *hash)
 507{
 508        struct crypto_shash *tfm;
 509        int rc;
 510
 511        tfm = ima_alloc_tfm(hash->algo);
 512        if (IS_ERR(tfm))
 513                return PTR_ERR(tfm);
 514
 515        rc = ima_calc_field_array_hash_tfm(field_data, desc, num_fields,
 516                                           hash, tfm);
 517
 518        ima_free_tfm(tfm);
 519
 520        return rc;
 521}
 522
 523static int calc_buffer_ahash_atfm(const void *buf, loff_t len,
 524                                  struct ima_digest_data *hash,
 525                                  struct crypto_ahash *tfm)
 526{
 527        struct ahash_request *req;
 528        struct scatterlist sg;
 529        struct crypto_wait wait;
 530        int rc, ahash_rc = 0;
 531
 532        hash->length = crypto_ahash_digestsize(tfm);
 533
 534        req = ahash_request_alloc(tfm, GFP_KERNEL);
 535        if (!req)
 536                return -ENOMEM;
 537
 538        crypto_init_wait(&wait);
 539        ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
 540                                   CRYPTO_TFM_REQ_MAY_SLEEP,
 541                                   crypto_req_done, &wait);
 542
 543        rc = ahash_wait(crypto_ahash_init(req), &wait);
 544        if (rc)
 545                goto out;
 546
 547        sg_init_one(&sg, buf, len);
 548        ahash_request_set_crypt(req, &sg, NULL, len);
 549
 550        ahash_rc = crypto_ahash_update(req);
 551
 552        /* wait for the update request to complete */
 553        rc = ahash_wait(ahash_rc, &wait);
 554        if (!rc) {
 555                ahash_request_set_crypt(req, NULL, hash->digest, 0);
 556                rc = ahash_wait(crypto_ahash_final(req), &wait);
 557        }
 558out:
 559        ahash_request_free(req);
 560        return rc;
 561}
 562
 563static int calc_buffer_ahash(const void *buf, loff_t len,
 564                             struct ima_digest_data *hash)
 565{
 566        struct crypto_ahash *tfm;
 567        int rc;
 568
 569        tfm = ima_alloc_atfm(hash->algo);
 570        if (IS_ERR(tfm))
 571                return PTR_ERR(tfm);
 572
 573        rc = calc_buffer_ahash_atfm(buf, len, hash, tfm);
 574
 575        ima_free_atfm(tfm);
 576
 577        return rc;
 578}
 579
 580static int calc_buffer_shash_tfm(const void *buf, loff_t size,
 581                                struct ima_digest_data *hash,
 582                                struct crypto_shash *tfm)
 583{
 584        SHASH_DESC_ON_STACK(shash, tfm);
 585        unsigned int len;
 586        int rc;
 587
 588        shash->tfm = tfm;
 589
 590        hash->length = crypto_shash_digestsize(tfm);
 591
 592        rc = crypto_shash_init(shash);
 593        if (rc != 0)
 594                return rc;
 595
 596        while (size) {
 597                len = size < PAGE_SIZE ? size : PAGE_SIZE;
 598                rc = crypto_shash_update(shash, buf, len);
 599                if (rc)
 600                        break;
 601                buf += len;
 602                size -= len;
 603        }
 604
 605        if (!rc)
 606                rc = crypto_shash_final(shash, hash->digest);
 607        return rc;
 608}
 609
 610static int calc_buffer_shash(const void *buf, loff_t len,
 611                             struct ima_digest_data *hash)
 612{
 613        struct crypto_shash *tfm;
 614        int rc;
 615
 616        tfm = ima_alloc_tfm(hash->algo);
 617        if (IS_ERR(tfm))
 618                return PTR_ERR(tfm);
 619
 620        rc = calc_buffer_shash_tfm(buf, len, hash, tfm);
 621
 622        ima_free_tfm(tfm);
 623        return rc;
 624}
 625
 626int ima_calc_buffer_hash(const void *buf, loff_t len,
 627                         struct ima_digest_data *hash)
 628{
 629        int rc;
 630
 631        if (ima_ahash_minsize && len >= ima_ahash_minsize) {
 632                rc = calc_buffer_ahash(buf, len, hash);
 633                if (!rc)
 634                        return 0;
 635        }
 636
 637        return calc_buffer_shash(buf, len, hash);
 638}
 639
 640static void __init ima_pcrread(u32 idx, struct tpm_digest *d)
 641{
 642        if (!ima_tpm_chip)
 643                return;
 644
 645        if (tpm_pcr_read(ima_tpm_chip, idx, d) != 0)
 646                pr_err("Error Communicating to TPM chip\n");
 647}
 648
 649/*
 650 * Calculate the boot aggregate hash
 651 */
 652static int __init ima_calc_boot_aggregate_tfm(char *digest,
 653                                              struct crypto_shash *tfm)
 654{
 655        struct tpm_digest d = { .alg_id = TPM_ALG_SHA1, .digest = {0} };
 656        int rc;
 657        u32 i;
 658        SHASH_DESC_ON_STACK(shash, tfm);
 659
 660        shash->tfm = tfm;
 661
 662        rc = crypto_shash_init(shash);
 663        if (rc != 0)
 664                return rc;
 665
 666        /* cumulative sha1 over tpm registers 0-7 */
 667        for (i = TPM_PCR0; i < TPM_PCR8; i++) {
 668                ima_pcrread(i, &d);
 669                /* now accumulate with current aggregate */
 670                rc = crypto_shash_update(shash, d.digest, TPM_DIGEST_SIZE);
 671        }
 672        if (!rc)
 673                crypto_shash_final(shash, digest);
 674        return rc;
 675}
 676
 677int __init ima_calc_boot_aggregate(struct ima_digest_data *hash)
 678{
 679        struct crypto_shash *tfm;
 680        int rc;
 681
 682        tfm = ima_alloc_tfm(hash->algo);
 683        if (IS_ERR(tfm))
 684                return PTR_ERR(tfm);
 685
 686        hash->length = crypto_shash_digestsize(tfm);
 687        rc = ima_calc_boot_aggregate_tfm(hash->digest, tfm);
 688
 689        ima_free_tfm(tfm);
 690
 691        return rc;
 692}
 693