linux/block/blk-crypto-fallback.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright 2019 Google LLC
   4 */
   5
   6/*
   7 * Refer to Documentation/block/inline-encryption.rst for detailed explanation.
   8 */
   9
  10#define pr_fmt(fmt) "blk-crypto-fallback: " fmt
  11
  12#include <crypto/skcipher.h>
  13#include <linux/blk-cgroup.h>
  14#include <linux/blk-crypto.h>
  15#include <linux/blkdev.h>
  16#include <linux/crypto.h>
  17#include <linux/keyslot-manager.h>
  18#include <linux/mempool.h>
  19#include <linux/module.h>
  20#include <linux/random.h>
  21
  22#include "blk-crypto-internal.h"
  23
  24static unsigned int num_prealloc_bounce_pg = 32;
  25module_param(num_prealloc_bounce_pg, uint, 0);
  26MODULE_PARM_DESC(num_prealloc_bounce_pg,
  27                 "Number of preallocated bounce pages for the blk-crypto crypto API fallback");
  28
  29static unsigned int blk_crypto_num_keyslots = 100;
  30module_param_named(num_keyslots, blk_crypto_num_keyslots, uint, 0);
  31MODULE_PARM_DESC(num_keyslots,
  32                 "Number of keyslots for the blk-crypto crypto API fallback");
  33
  34static unsigned int num_prealloc_fallback_crypt_ctxs = 128;
  35module_param(num_prealloc_fallback_crypt_ctxs, uint, 0);
  36MODULE_PARM_DESC(num_prealloc_crypt_fallback_ctxs,
  37                 "Number of preallocated bio fallback crypto contexts for blk-crypto to use during crypto API fallback");
  38
  39struct bio_fallback_crypt_ctx {
  40        struct bio_crypt_ctx crypt_ctx;
  41        /*
  42         * Copy of the bvec_iter when this bio was submitted.
  43         * We only want to en/decrypt the part of the bio as described by the
  44         * bvec_iter upon submission because bio might be split before being
  45         * resubmitted
  46         */
  47        struct bvec_iter crypt_iter;
  48        union {
  49                struct {
  50                        struct work_struct work;
  51                        struct bio *bio;
  52                };
  53                struct {
  54                        void *bi_private_orig;
  55                        bio_end_io_t *bi_end_io_orig;
  56                };
  57        };
  58};
  59
  60static struct kmem_cache *bio_fallback_crypt_ctx_cache;
  61static mempool_t *bio_fallback_crypt_ctx_pool;
  62
  63/*
  64 * Allocating a crypto tfm during I/O can deadlock, so we have to preallocate
  65 * all of a mode's tfms when that mode starts being used. Since each mode may
  66 * need all the keyslots at some point, each mode needs its own tfm for each
  67 * keyslot; thus, a keyslot may contain tfms for multiple modes.  However, to
  68 * match the behavior of real inline encryption hardware (which only supports a
  69 * single encryption context per keyslot), we only allow one tfm per keyslot to
  70 * be used at a time - the rest of the unused tfms have their keys cleared.
  71 */
  72static DEFINE_MUTEX(tfms_init_lock);
  73static bool tfms_inited[BLK_ENCRYPTION_MODE_MAX];
  74
  75static struct blk_crypto_keyslot {
  76        enum blk_crypto_mode_num crypto_mode;
  77        struct crypto_skcipher *tfms[BLK_ENCRYPTION_MODE_MAX];
  78} *blk_crypto_keyslots;
  79
  80static struct blk_keyslot_manager blk_crypto_ksm;
  81static struct workqueue_struct *blk_crypto_wq;
  82static mempool_t *blk_crypto_bounce_page_pool;
  83static struct bio_set crypto_bio_split;
  84
  85/*
  86 * This is the key we set when evicting a keyslot. This *should* be the all 0's
  87 * key, but AES-XTS rejects that key, so we use some random bytes instead.
  88 */
  89static u8 blank_key[BLK_CRYPTO_MAX_KEY_SIZE];
  90
  91static void blk_crypto_evict_keyslot(unsigned int slot)
  92{
  93        struct blk_crypto_keyslot *slotp = &blk_crypto_keyslots[slot];
  94        enum blk_crypto_mode_num crypto_mode = slotp->crypto_mode;
  95        int err;
  96
  97        WARN_ON(slotp->crypto_mode == BLK_ENCRYPTION_MODE_INVALID);
  98
  99        /* Clear the key in the skcipher */
 100        err = crypto_skcipher_setkey(slotp->tfms[crypto_mode], blank_key,
 101                                     blk_crypto_modes[crypto_mode].keysize);
 102        WARN_ON(err);
 103        slotp->crypto_mode = BLK_ENCRYPTION_MODE_INVALID;
 104}
 105
 106static int blk_crypto_keyslot_program(struct blk_keyslot_manager *ksm,
 107                                      const struct blk_crypto_key *key,
 108                                      unsigned int slot)
 109{
 110        struct blk_crypto_keyslot *slotp = &blk_crypto_keyslots[slot];
 111        const enum blk_crypto_mode_num crypto_mode =
 112                                                key->crypto_cfg.crypto_mode;
 113        int err;
 114
 115        if (crypto_mode != slotp->crypto_mode &&
 116            slotp->crypto_mode != BLK_ENCRYPTION_MODE_INVALID)
 117                blk_crypto_evict_keyslot(slot);
 118
 119        slotp->crypto_mode = crypto_mode;
 120        err = crypto_skcipher_setkey(slotp->tfms[crypto_mode], key->raw,
 121                                     key->size);
 122        if (err) {
 123                blk_crypto_evict_keyslot(slot);
 124                return err;
 125        }
 126        return 0;
 127}
 128
 129static int blk_crypto_keyslot_evict(struct blk_keyslot_manager *ksm,
 130                                    const struct blk_crypto_key *key,
 131                                    unsigned int slot)
 132{
 133        blk_crypto_evict_keyslot(slot);
 134        return 0;
 135}
 136
 137/*
 138 * The crypto API fallback KSM ops - only used for a bio when it specifies a
 139 * blk_crypto_key that was not supported by the device's inline encryption
 140 * hardware.
 141 */
 142static const struct blk_ksm_ll_ops blk_crypto_ksm_ll_ops = {
 143        .keyslot_program        = blk_crypto_keyslot_program,
 144        .keyslot_evict          = blk_crypto_keyslot_evict,
 145};
 146
 147static void blk_crypto_fallback_encrypt_endio(struct bio *enc_bio)
 148{
 149        struct bio *src_bio = enc_bio->bi_private;
 150        int i;
 151
 152        for (i = 0; i < enc_bio->bi_vcnt; i++)
 153                mempool_free(enc_bio->bi_io_vec[i].bv_page,
 154                             blk_crypto_bounce_page_pool);
 155
 156        src_bio->bi_status = enc_bio->bi_status;
 157
 158        bio_put(enc_bio);
 159        bio_endio(src_bio);
 160}
 161
 162static struct bio *blk_crypto_clone_bio(struct bio *bio_src)
 163{
 164        struct bvec_iter iter;
 165        struct bio_vec bv;
 166        struct bio *bio;
 167
 168        bio = bio_kmalloc(GFP_NOIO, bio_segments(bio_src));
 169        if (!bio)
 170                return NULL;
 171        bio->bi_bdev            = bio_src->bi_bdev;
 172        if (bio_flagged(bio_src, BIO_REMAPPED))
 173                bio_set_flag(bio, BIO_REMAPPED);
 174        bio->bi_opf             = bio_src->bi_opf;
 175        bio->bi_ioprio          = bio_src->bi_ioprio;
 176        bio->bi_write_hint      = bio_src->bi_write_hint;
 177        bio->bi_iter.bi_sector  = bio_src->bi_iter.bi_sector;
 178        bio->bi_iter.bi_size    = bio_src->bi_iter.bi_size;
 179
 180        bio_for_each_segment(bv, bio_src, iter)
 181                bio->bi_io_vec[bio->bi_vcnt++] = bv;
 182
 183        bio_clone_blkg_association(bio, bio_src);
 184        blkcg_bio_issue_init(bio);
 185
 186        return bio;
 187}
 188
 189static bool blk_crypto_alloc_cipher_req(struct blk_ksm_keyslot *slot,
 190                                        struct skcipher_request **ciph_req_ret,
 191                                        struct crypto_wait *wait)
 192{
 193        struct skcipher_request *ciph_req;
 194        const struct blk_crypto_keyslot *slotp;
 195        int keyslot_idx = blk_ksm_get_slot_idx(slot);
 196
 197        slotp = &blk_crypto_keyslots[keyslot_idx];
 198        ciph_req = skcipher_request_alloc(slotp->tfms[slotp->crypto_mode],
 199                                          GFP_NOIO);
 200        if (!ciph_req)
 201                return false;
 202
 203        skcipher_request_set_callback(ciph_req,
 204                                      CRYPTO_TFM_REQ_MAY_BACKLOG |
 205                                      CRYPTO_TFM_REQ_MAY_SLEEP,
 206                                      crypto_req_done, wait);
 207        *ciph_req_ret = ciph_req;
 208
 209        return true;
 210}
 211
 212static bool blk_crypto_split_bio_if_needed(struct bio **bio_ptr)
 213{
 214        struct bio *bio = *bio_ptr;
 215        unsigned int i = 0;
 216        unsigned int num_sectors = 0;
 217        struct bio_vec bv;
 218        struct bvec_iter iter;
 219
 220        bio_for_each_segment(bv, bio, iter) {
 221                num_sectors += bv.bv_len >> SECTOR_SHIFT;
 222                if (++i == BIO_MAX_VECS)
 223                        break;
 224        }
 225        if (num_sectors < bio_sectors(bio)) {
 226                struct bio *split_bio;
 227
 228                split_bio = bio_split(bio, num_sectors, GFP_NOIO,
 229                                      &crypto_bio_split);
 230                if (!split_bio) {
 231                        bio->bi_status = BLK_STS_RESOURCE;
 232                        return false;
 233                }
 234                bio_chain(split_bio, bio);
 235                submit_bio_noacct(bio);
 236                *bio_ptr = split_bio;
 237        }
 238
 239        return true;
 240}
 241
 242union blk_crypto_iv {
 243        __le64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
 244        u8 bytes[BLK_CRYPTO_MAX_IV_SIZE];
 245};
 246
 247static void blk_crypto_dun_to_iv(const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
 248                                 union blk_crypto_iv *iv)
 249{
 250        int i;
 251
 252        for (i = 0; i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++)
 253                iv->dun[i] = cpu_to_le64(dun[i]);
 254}
 255
 256/*
 257 * The crypto API fallback's encryption routine.
 258 * Allocate a bounce bio for encryption, encrypt the input bio using crypto API,
 259 * and replace *bio_ptr with the bounce bio. May split input bio if it's too
 260 * large. Returns true on success. Returns false and sets bio->bi_status on
 261 * error.
 262 */
 263static bool blk_crypto_fallback_encrypt_bio(struct bio **bio_ptr)
 264{
 265        struct bio *src_bio, *enc_bio;
 266        struct bio_crypt_ctx *bc;
 267        struct blk_ksm_keyslot *slot;
 268        int data_unit_size;
 269        struct skcipher_request *ciph_req = NULL;
 270        DECLARE_CRYPTO_WAIT(wait);
 271        u64 curr_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
 272        struct scatterlist src, dst;
 273        union blk_crypto_iv iv;
 274        unsigned int i, j;
 275        bool ret = false;
 276        blk_status_t blk_st;
 277
 278        /* Split the bio if it's too big for single page bvec */
 279        if (!blk_crypto_split_bio_if_needed(bio_ptr))
 280                return false;
 281
 282        src_bio = *bio_ptr;
 283        bc = src_bio->bi_crypt_context;
 284        data_unit_size = bc->bc_key->crypto_cfg.data_unit_size;
 285
 286        /* Allocate bounce bio for encryption */
 287        enc_bio = blk_crypto_clone_bio(src_bio);
 288        if (!enc_bio) {
 289                src_bio->bi_status = BLK_STS_RESOURCE;
 290                return false;
 291        }
 292
 293        /*
 294         * Use the crypto API fallback keyslot manager to get a crypto_skcipher
 295         * for the algorithm and key specified for this bio.
 296         */
 297        blk_st = blk_ksm_get_slot_for_key(&blk_crypto_ksm, bc->bc_key, &slot);
 298        if (blk_st != BLK_STS_OK) {
 299                src_bio->bi_status = blk_st;
 300                goto out_put_enc_bio;
 301        }
 302
 303        /* and then allocate an skcipher_request for it */
 304        if (!blk_crypto_alloc_cipher_req(slot, &ciph_req, &wait)) {
 305                src_bio->bi_status = BLK_STS_RESOURCE;
 306                goto out_release_keyslot;
 307        }
 308
 309        memcpy(curr_dun, bc->bc_dun, sizeof(curr_dun));
 310        sg_init_table(&src, 1);
 311        sg_init_table(&dst, 1);
 312
 313        skcipher_request_set_crypt(ciph_req, &src, &dst, data_unit_size,
 314                                   iv.bytes);
 315
 316        /* Encrypt each page in the bounce bio */
 317        for (i = 0; i < enc_bio->bi_vcnt; i++) {
 318                struct bio_vec *enc_bvec = &enc_bio->bi_io_vec[i];
 319                struct page *plaintext_page = enc_bvec->bv_page;
 320                struct page *ciphertext_page =
 321                        mempool_alloc(blk_crypto_bounce_page_pool, GFP_NOIO);
 322
 323                enc_bvec->bv_page = ciphertext_page;
 324
 325                if (!ciphertext_page) {
 326                        src_bio->bi_status = BLK_STS_RESOURCE;
 327                        goto out_free_bounce_pages;
 328                }
 329
 330                sg_set_page(&src, plaintext_page, data_unit_size,
 331                            enc_bvec->bv_offset);
 332                sg_set_page(&dst, ciphertext_page, data_unit_size,
 333                            enc_bvec->bv_offset);
 334
 335                /* Encrypt each data unit in this page */
 336                for (j = 0; j < enc_bvec->bv_len; j += data_unit_size) {
 337                        blk_crypto_dun_to_iv(curr_dun, &iv);
 338                        if (crypto_wait_req(crypto_skcipher_encrypt(ciph_req),
 339                                            &wait)) {
 340                                i++;
 341                                src_bio->bi_status = BLK_STS_IOERR;
 342                                goto out_free_bounce_pages;
 343                        }
 344                        bio_crypt_dun_increment(curr_dun, 1);
 345                        src.offset += data_unit_size;
 346                        dst.offset += data_unit_size;
 347                }
 348        }
 349
 350        enc_bio->bi_private = src_bio;
 351        enc_bio->bi_end_io = blk_crypto_fallback_encrypt_endio;
 352        *bio_ptr = enc_bio;
 353        ret = true;
 354
 355        enc_bio = NULL;
 356        goto out_free_ciph_req;
 357
 358out_free_bounce_pages:
 359        while (i > 0)
 360                mempool_free(enc_bio->bi_io_vec[--i].bv_page,
 361                             blk_crypto_bounce_page_pool);
 362out_free_ciph_req:
 363        skcipher_request_free(ciph_req);
 364out_release_keyslot:
 365        blk_ksm_put_slot(slot);
 366out_put_enc_bio:
 367        if (enc_bio)
 368                bio_put(enc_bio);
 369
 370        return ret;
 371}
 372
 373/*
 374 * The crypto API fallback's main decryption routine.
 375 * Decrypts input bio in place, and calls bio_endio on the bio.
 376 */
 377static void blk_crypto_fallback_decrypt_bio(struct work_struct *work)
 378{
 379        struct bio_fallback_crypt_ctx *f_ctx =
 380                container_of(work, struct bio_fallback_crypt_ctx, work);
 381        struct bio *bio = f_ctx->bio;
 382        struct bio_crypt_ctx *bc = &f_ctx->crypt_ctx;
 383        struct blk_ksm_keyslot *slot;
 384        struct skcipher_request *ciph_req = NULL;
 385        DECLARE_CRYPTO_WAIT(wait);
 386        u64 curr_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
 387        union blk_crypto_iv iv;
 388        struct scatterlist sg;
 389        struct bio_vec bv;
 390        struct bvec_iter iter;
 391        const int data_unit_size = bc->bc_key->crypto_cfg.data_unit_size;
 392        unsigned int i;
 393        blk_status_t blk_st;
 394
 395        /*
 396         * Use the crypto API fallback keyslot manager to get a crypto_skcipher
 397         * for the algorithm and key specified for this bio.
 398         */
 399        blk_st = blk_ksm_get_slot_for_key(&blk_crypto_ksm, bc->bc_key, &slot);
 400        if (blk_st != BLK_STS_OK) {
 401                bio->bi_status = blk_st;
 402                goto out_no_keyslot;
 403        }
 404
 405        /* and then allocate an skcipher_request for it */
 406        if (!blk_crypto_alloc_cipher_req(slot, &ciph_req, &wait)) {
 407                bio->bi_status = BLK_STS_RESOURCE;
 408                goto out;
 409        }
 410
 411        memcpy(curr_dun, bc->bc_dun, sizeof(curr_dun));
 412        sg_init_table(&sg, 1);
 413        skcipher_request_set_crypt(ciph_req, &sg, &sg, data_unit_size,
 414                                   iv.bytes);
 415
 416        /* Decrypt each segment in the bio */
 417        __bio_for_each_segment(bv, bio, iter, f_ctx->crypt_iter) {
 418                struct page *page = bv.bv_page;
 419
 420                sg_set_page(&sg, page, data_unit_size, bv.bv_offset);
 421
 422                /* Decrypt each data unit in the segment */
 423                for (i = 0; i < bv.bv_len; i += data_unit_size) {
 424                        blk_crypto_dun_to_iv(curr_dun, &iv);
 425                        if (crypto_wait_req(crypto_skcipher_decrypt(ciph_req),
 426                                            &wait)) {
 427                                bio->bi_status = BLK_STS_IOERR;
 428                                goto out;
 429                        }
 430                        bio_crypt_dun_increment(curr_dun, 1);
 431                        sg.offset += data_unit_size;
 432                }
 433        }
 434
 435out:
 436        skcipher_request_free(ciph_req);
 437        blk_ksm_put_slot(slot);
 438out_no_keyslot:
 439        mempool_free(f_ctx, bio_fallback_crypt_ctx_pool);
 440        bio_endio(bio);
 441}
 442
 443/**
 444 * blk_crypto_fallback_decrypt_endio - queue bio for fallback decryption
 445 *
 446 * @bio: the bio to queue
 447 *
 448 * Restore bi_private and bi_end_io, and queue the bio for decryption into a
 449 * workqueue, since this function will be called from an atomic context.
 450 */
 451static void blk_crypto_fallback_decrypt_endio(struct bio *bio)
 452{
 453        struct bio_fallback_crypt_ctx *f_ctx = bio->bi_private;
 454
 455        bio->bi_private = f_ctx->bi_private_orig;
 456        bio->bi_end_io = f_ctx->bi_end_io_orig;
 457
 458        /* If there was an IO error, don't queue for decrypt. */
 459        if (bio->bi_status) {
 460                mempool_free(f_ctx, bio_fallback_crypt_ctx_pool);
 461                bio_endio(bio);
 462                return;
 463        }
 464
 465        INIT_WORK(&f_ctx->work, blk_crypto_fallback_decrypt_bio);
 466        f_ctx->bio = bio;
 467        queue_work(blk_crypto_wq, &f_ctx->work);
 468}
 469
 470/**
 471 * blk_crypto_fallback_bio_prep - Prepare a bio to use fallback en/decryption
 472 *
 473 * @bio_ptr: pointer to the bio to prepare
 474 *
 475 * If bio is doing a WRITE operation, this splits the bio into two parts if it's
 476 * too big (see blk_crypto_split_bio_if_needed). It then allocates a bounce bio
 477 * for the first part, encrypts it, and update bio_ptr to point to the bounce
 478 * bio.
 479 *
 480 * For a READ operation, we mark the bio for decryption by using bi_private and
 481 * bi_end_io.
 482 *
 483 * In either case, this function will make the bio look like a regular bio (i.e.
 484 * as if no encryption context was ever specified) for the purposes of the rest
 485 * of the stack except for blk-integrity (blk-integrity and blk-crypto are not
 486 * currently supported together).
 487 *
 488 * Return: true on success. Sets bio->bi_status and returns false on error.
 489 */
 490bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr)
 491{
 492        struct bio *bio = *bio_ptr;
 493        struct bio_crypt_ctx *bc = bio->bi_crypt_context;
 494        struct bio_fallback_crypt_ctx *f_ctx;
 495
 496        if (WARN_ON_ONCE(!tfms_inited[bc->bc_key->crypto_cfg.crypto_mode])) {
 497                /* User didn't call blk_crypto_start_using_key() first */
 498                bio->bi_status = BLK_STS_IOERR;
 499                return false;
 500        }
 501
 502        if (!blk_ksm_crypto_cfg_supported(&blk_crypto_ksm,
 503                                          &bc->bc_key->crypto_cfg)) {
 504                bio->bi_status = BLK_STS_NOTSUPP;
 505                return false;
 506        }
 507
 508        if (bio_data_dir(bio) == WRITE)
 509                return blk_crypto_fallback_encrypt_bio(bio_ptr);
 510
 511        /*
 512         * bio READ case: Set up a f_ctx in the bio's bi_private and set the
 513         * bi_end_io appropriately to trigger decryption when the bio is ended.
 514         */
 515        f_ctx = mempool_alloc(bio_fallback_crypt_ctx_pool, GFP_NOIO);
 516        f_ctx->crypt_ctx = *bc;
 517        f_ctx->crypt_iter = bio->bi_iter;
 518        f_ctx->bi_private_orig = bio->bi_private;
 519        f_ctx->bi_end_io_orig = bio->bi_end_io;
 520        bio->bi_private = (void *)f_ctx;
 521        bio->bi_end_io = blk_crypto_fallback_decrypt_endio;
 522        bio_crypt_free_ctx(bio);
 523
 524        return true;
 525}
 526
 527int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key)
 528{
 529        return blk_ksm_evict_key(&blk_crypto_ksm, key);
 530}
 531
 532static bool blk_crypto_fallback_inited;
 533static int blk_crypto_fallback_init(void)
 534{
 535        int i;
 536        int err;
 537
 538        if (blk_crypto_fallback_inited)
 539                return 0;
 540
 541        prandom_bytes(blank_key, BLK_CRYPTO_MAX_KEY_SIZE);
 542
 543        err = bioset_init(&crypto_bio_split, 64, 0, 0);
 544        if (err)
 545                goto out;
 546
 547        err = blk_ksm_init(&blk_crypto_ksm, blk_crypto_num_keyslots);
 548        if (err)
 549                goto fail_free_bioset;
 550        err = -ENOMEM;
 551
 552        blk_crypto_ksm.ksm_ll_ops = blk_crypto_ksm_ll_ops;
 553        blk_crypto_ksm.max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE;
 554
 555        /* All blk-crypto modes have a crypto API fallback. */
 556        for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++)
 557                blk_crypto_ksm.crypto_modes_supported[i] = 0xFFFFFFFF;
 558        blk_crypto_ksm.crypto_modes_supported[BLK_ENCRYPTION_MODE_INVALID] = 0;
 559
 560        blk_crypto_wq = alloc_workqueue("blk_crypto_wq",
 561                                        WQ_UNBOUND | WQ_HIGHPRI |
 562                                        WQ_MEM_RECLAIM, num_online_cpus());
 563        if (!blk_crypto_wq)
 564                goto fail_free_ksm;
 565
 566        blk_crypto_keyslots = kcalloc(blk_crypto_num_keyslots,
 567                                      sizeof(blk_crypto_keyslots[0]),
 568                                      GFP_KERNEL);
 569        if (!blk_crypto_keyslots)
 570                goto fail_free_wq;
 571
 572        blk_crypto_bounce_page_pool =
 573                mempool_create_page_pool(num_prealloc_bounce_pg, 0);
 574        if (!blk_crypto_bounce_page_pool)
 575                goto fail_free_keyslots;
 576
 577        bio_fallback_crypt_ctx_cache = KMEM_CACHE(bio_fallback_crypt_ctx, 0);
 578        if (!bio_fallback_crypt_ctx_cache)
 579                goto fail_free_bounce_page_pool;
 580
 581        bio_fallback_crypt_ctx_pool =
 582                mempool_create_slab_pool(num_prealloc_fallback_crypt_ctxs,
 583                                         bio_fallback_crypt_ctx_cache);
 584        if (!bio_fallback_crypt_ctx_pool)
 585                goto fail_free_crypt_ctx_cache;
 586
 587        blk_crypto_fallback_inited = true;
 588
 589        return 0;
 590fail_free_crypt_ctx_cache:
 591        kmem_cache_destroy(bio_fallback_crypt_ctx_cache);
 592fail_free_bounce_page_pool:
 593        mempool_destroy(blk_crypto_bounce_page_pool);
 594fail_free_keyslots:
 595        kfree(blk_crypto_keyslots);
 596fail_free_wq:
 597        destroy_workqueue(blk_crypto_wq);
 598fail_free_ksm:
 599        blk_ksm_destroy(&blk_crypto_ksm);
 600fail_free_bioset:
 601        bioset_exit(&crypto_bio_split);
 602out:
 603        return err;
 604}
 605
 606/*
 607 * Prepare blk-crypto-fallback for the specified crypto mode.
 608 * Returns -ENOPKG if the needed crypto API support is missing.
 609 */
 610int blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num)
 611{
 612        const char *cipher_str = blk_crypto_modes[mode_num].cipher_str;
 613        struct blk_crypto_keyslot *slotp;
 614        unsigned int i;
 615        int err = 0;
 616
 617        /*
 618         * Fast path
 619         * Ensure that updates to blk_crypto_keyslots[i].tfms[mode_num]
 620         * for each i are visible before we try to access them.
 621         */
 622        if (likely(smp_load_acquire(&tfms_inited[mode_num])))
 623                return 0;
 624
 625        mutex_lock(&tfms_init_lock);
 626        if (tfms_inited[mode_num])
 627                goto out;
 628
 629        err = blk_crypto_fallback_init();
 630        if (err)
 631                goto out;
 632
 633        for (i = 0; i < blk_crypto_num_keyslots; i++) {
 634                slotp = &blk_crypto_keyslots[i];
 635                slotp->tfms[mode_num] = crypto_alloc_skcipher(cipher_str, 0, 0);
 636                if (IS_ERR(slotp->tfms[mode_num])) {
 637                        err = PTR_ERR(slotp->tfms[mode_num]);
 638                        if (err == -ENOENT) {
 639                                pr_warn_once("Missing crypto API support for \"%s\"\n",
 640                                             cipher_str);
 641                                err = -ENOPKG;
 642                        }
 643                        slotp->tfms[mode_num] = NULL;
 644                        goto out_free_tfms;
 645                }
 646
 647                crypto_skcipher_set_flags(slotp->tfms[mode_num],
 648                                          CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
 649        }
 650
 651        /*
 652         * Ensure that updates to blk_crypto_keyslots[i].tfms[mode_num]
 653         * for each i are visible before we set tfms_inited[mode_num].
 654         */
 655        smp_store_release(&tfms_inited[mode_num], true);
 656        goto out;
 657
 658out_free_tfms:
 659        for (i = 0; i < blk_crypto_num_keyslots; i++) {
 660                slotp = &blk_crypto_keyslots[i];
 661                crypto_free_skcipher(slotp->tfms[mode_num]);
 662                slotp->tfms[mode_num] = NULL;
 663        }
 664out:
 665        mutex_unlock(&tfms_init_lock);
 666        return err;
 667}
 668