linux/fs/crypto/crypto.c
<<
>>
Prefs
   1/*
   2 * This contains encryption functions for per-file encryption.
   3 *
   4 * Copyright (C) 2015, Google, Inc.
   5 * Copyright (C) 2015, Motorola Mobility
   6 *
   7 * Written by Michael Halcrow, 2014.
   8 *
   9 * Filename encryption additions
  10 *      Uday Savagaonkar, 2014
  11 * Encryption policy handling additions
  12 *      Ildar Muslukhov, 2014
  13 * Add fscrypt_pullback_bio_page()
  14 *      Jaegeuk Kim, 2015.
  15 *
  16 * This has not yet undergone a rigorous security audit.
  17 *
  18 * The usage of AES-XTS should conform to recommendations in NIST
  19 * Special Publication 800-38E and IEEE P1619/D16.
  20 */
  21
  22#include <linux/pagemap.h>
  23#include <linux/mempool.h>
  24#include <linux/module.h>
  25#include <linux/scatterlist.h>
  26#include <linux/ratelimit.h>
  27#include <linux/dcache.h>
  28#include <linux/namei.h>
  29#include <crypto/aes.h>
  30#include <crypto/skcipher.h>
  31#include "fscrypt_private.h"
  32
  33static unsigned int num_prealloc_crypto_pages = 32;
  34static unsigned int num_prealloc_crypto_ctxs = 128;
  35
  36module_param(num_prealloc_crypto_pages, uint, 0444);
  37MODULE_PARM_DESC(num_prealloc_crypto_pages,
  38                "Number of crypto pages to preallocate");
  39module_param(num_prealloc_crypto_ctxs, uint, 0444);
  40MODULE_PARM_DESC(num_prealloc_crypto_ctxs,
  41                "Number of crypto contexts to preallocate");
  42
  43static mempool_t *fscrypt_bounce_page_pool = NULL;
  44
  45static LIST_HEAD(fscrypt_free_ctxs);
  46static DEFINE_SPINLOCK(fscrypt_ctx_lock);
  47
  48static struct workqueue_struct *fscrypt_read_workqueue;
  49static DEFINE_MUTEX(fscrypt_init_mutex);
  50
  51static struct kmem_cache *fscrypt_ctx_cachep;
  52struct kmem_cache *fscrypt_info_cachep;
  53
  54void fscrypt_enqueue_decrypt_work(struct work_struct *work)
  55{
  56        queue_work(fscrypt_read_workqueue, work);
  57}
  58EXPORT_SYMBOL(fscrypt_enqueue_decrypt_work);
  59
  60/**
  61 * fscrypt_release_ctx() - Releases an encryption context
  62 * @ctx: The encryption context to release.
  63 *
  64 * If the encryption context was allocated from the pre-allocated pool, returns
  65 * it to that pool. Else, frees it.
  66 *
  67 * If there's a bounce page in the context, this frees that.
  68 */
  69void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
  70{
  71        unsigned long flags;
  72
  73        if (ctx->flags & FS_CTX_HAS_BOUNCE_BUFFER_FL && ctx->w.bounce_page) {
  74                mempool_free(ctx->w.bounce_page, fscrypt_bounce_page_pool);
  75                ctx->w.bounce_page = NULL;
  76        }
  77        ctx->w.control_page = NULL;
  78        if (ctx->flags & FS_CTX_REQUIRES_FREE_ENCRYPT_FL) {
  79                kmem_cache_free(fscrypt_ctx_cachep, ctx);
  80        } else {
  81                spin_lock_irqsave(&fscrypt_ctx_lock, flags);
  82                list_add(&ctx->free_list, &fscrypt_free_ctxs);
  83                spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
  84        }
  85}
  86EXPORT_SYMBOL(fscrypt_release_ctx);
  87
  88/**
  89 * fscrypt_get_ctx() - Gets an encryption context
  90 * @inode:       The inode for which we are doing the crypto
  91 * @gfp_flags:   The gfp flag for memory allocation
  92 *
  93 * Allocates and initializes an encryption context.
  94 *
  95 * Return: An allocated and initialized encryption context on success; error
  96 * value or NULL otherwise.
  97 */
  98struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode, gfp_t gfp_flags)
  99{
 100        struct fscrypt_ctx *ctx = NULL;
 101        struct fscrypt_info *ci = inode->i_crypt_info;
 102        unsigned long flags;
 103
 104        if (ci == NULL)
 105                return ERR_PTR(-ENOKEY);
 106
 107        /*
 108         * We first try getting the ctx from a free list because in
 109         * the common case the ctx will have an allocated and
 110         * initialized crypto tfm, so it's probably a worthwhile
 111         * optimization. For the bounce page, we first try getting it
 112         * from the kernel allocator because that's just about as fast
 113         * as getting it from a list and because a cache of free pages
 114         * should generally be a "last resort" option for a filesystem
 115         * to be able to do its job.
 116         */
 117        spin_lock_irqsave(&fscrypt_ctx_lock, flags);
 118        ctx = list_first_entry_or_null(&fscrypt_free_ctxs,
 119                                        struct fscrypt_ctx, free_list);
 120        if (ctx)
 121                list_del(&ctx->free_list);
 122        spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
 123        if (!ctx) {
 124                ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, gfp_flags);
 125                if (!ctx)
 126                        return ERR_PTR(-ENOMEM);
 127                ctx->flags |= FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
 128        } else {
 129                ctx->flags &= ~FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
 130        }
 131        ctx->flags &= ~FS_CTX_HAS_BOUNCE_BUFFER_FL;
 132        return ctx;
 133}
 134EXPORT_SYMBOL(fscrypt_get_ctx);
 135
 136int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw,
 137                           u64 lblk_num, struct page *src_page,
 138                           struct page *dest_page, unsigned int len,
 139                           unsigned int offs, gfp_t gfp_flags)
 140{
 141        struct {
 142                __le64 index;
 143                u8 padding[FS_IV_SIZE - sizeof(__le64)];
 144        } iv;
 145        struct skcipher_request *req = NULL;
 146        DECLARE_CRYPTO_WAIT(wait);
 147        struct scatterlist dst, src;
 148        struct fscrypt_info *ci = inode->i_crypt_info;
 149        struct crypto_skcipher *tfm = ci->ci_ctfm;
 150        int res = 0;
 151
 152        BUG_ON(len == 0);
 153
 154        BUILD_BUG_ON(sizeof(iv) != FS_IV_SIZE);
 155        BUILD_BUG_ON(AES_BLOCK_SIZE != FS_IV_SIZE);
 156        iv.index = cpu_to_le64(lblk_num);
 157        memset(iv.padding, 0, sizeof(iv.padding));
 158
 159        if (ci->ci_essiv_tfm != NULL) {
 160                crypto_cipher_encrypt_one(ci->ci_essiv_tfm, (u8 *)&iv,
 161                                          (u8 *)&iv);
 162        }
 163
 164        req = skcipher_request_alloc(tfm, gfp_flags);
 165        if (!req)
 166                return -ENOMEM;
 167
 168        skcipher_request_set_callback(
 169                req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
 170                crypto_req_done, &wait);
 171
 172        sg_init_table(&dst, 1);
 173        sg_set_page(&dst, dest_page, len, offs);
 174        sg_init_table(&src, 1);
 175        sg_set_page(&src, src_page, len, offs);
 176        skcipher_request_set_crypt(req, &src, &dst, len, &iv);
 177        if (rw == FS_DECRYPT)
 178                res = crypto_wait_req(crypto_skcipher_decrypt(req), &wait);
 179        else
 180                res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
 181        skcipher_request_free(req);
 182        if (res) {
 183                fscrypt_err(inode->i_sb,
 184                            "%scryption failed for inode %lu, block %llu: %d",
 185                            (rw == FS_DECRYPT ? "de" : "en"),
 186                            inode->i_ino, lblk_num, res);
 187                return res;
 188        }
 189        return 0;
 190}
 191
 192struct page *fscrypt_alloc_bounce_page(struct fscrypt_ctx *ctx,
 193                                       gfp_t gfp_flags)
 194{
 195        ctx->w.bounce_page = mempool_alloc(fscrypt_bounce_page_pool, gfp_flags);
 196        if (ctx->w.bounce_page == NULL)
 197                return ERR_PTR(-ENOMEM);
 198        ctx->flags |= FS_CTX_HAS_BOUNCE_BUFFER_FL;
 199        return ctx->w.bounce_page;
 200}
 201
 202/**
 203 * fscypt_encrypt_page() - Encrypts a page
 204 * @inode:     The inode for which the encryption should take place
 205 * @page:      The page to encrypt. Must be locked for bounce-page
 206 *             encryption.
 207 * @len:       Length of data to encrypt in @page and encrypted
 208 *             data in returned page.
 209 * @offs:      Offset of data within @page and returned
 210 *             page holding encrypted data.
 211 * @lblk_num:  Logical block number. This must be unique for multiple
 212 *             calls with same inode, except when overwriting
 213 *             previously written data.
 214 * @gfp_flags: The gfp flag for memory allocation
 215 *
 216 * Encrypts @page using the ctx encryption context. Performs encryption
 217 * either in-place or into a newly allocated bounce page.
 218 * Called on the page write path.
 219 *
 220 * Bounce page allocation is the default.
 221 * In this case, the contents of @page are encrypted and stored in an
 222 * allocated bounce page. @page has to be locked and the caller must call
 223 * fscrypt_restore_control_page() on the returned ciphertext page to
 224 * release the bounce buffer and the encryption context.
 225 *
 226 * In-place encryption is used by setting the FS_CFLG_OWN_PAGES flag in
 227 * fscrypt_operations. Here, the input-page is returned with its content
 228 * encrypted.
 229 *
 230 * Return: A page with the encrypted content on success. Else, an
 231 * error value or NULL.
 232 */
 233struct page *fscrypt_encrypt_page(const struct inode *inode,
 234                                struct page *page,
 235                                unsigned int len,
 236                                unsigned int offs,
 237                                u64 lblk_num, gfp_t gfp_flags)
 238
 239{
 240        struct fscrypt_ctx *ctx;
 241        struct page *ciphertext_page = page;
 242        int err;
 243
 244        BUG_ON(len % FS_CRYPTO_BLOCK_SIZE != 0);
 245
 246        if (inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES) {
 247                /* with inplace-encryption we just encrypt the page */
 248                err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk_num, page,
 249                                             ciphertext_page, len, offs,
 250                                             gfp_flags);
 251                if (err)
 252                        return ERR_PTR(err);
 253
 254                return ciphertext_page;
 255        }
 256
 257        BUG_ON(!PageLocked(page));
 258
 259        ctx = fscrypt_get_ctx(inode, gfp_flags);
 260        if (IS_ERR(ctx))
 261                return (struct page *)ctx;
 262
 263        /* The encryption operation will require a bounce page. */
 264        ciphertext_page = fscrypt_alloc_bounce_page(ctx, gfp_flags);
 265        if (IS_ERR(ciphertext_page))
 266                goto errout;
 267
 268        ctx->w.control_page = page;
 269        err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk_num,
 270                                     page, ciphertext_page, len, offs,
 271                                     gfp_flags);
 272        if (err) {
 273                ciphertext_page = ERR_PTR(err);
 274                goto errout;
 275        }
 276        SetPagePrivate(ciphertext_page);
 277        set_page_private(ciphertext_page, (unsigned long)ctx);
 278        lock_page(ciphertext_page);
 279        return ciphertext_page;
 280
 281errout:
 282        fscrypt_release_ctx(ctx);
 283        return ciphertext_page;
 284}
 285EXPORT_SYMBOL(fscrypt_encrypt_page);
 286
 287/**
 288 * fscrypt_decrypt_page() - Decrypts a page in-place
 289 * @inode:     The corresponding inode for the page to decrypt.
 290 * @page:      The page to decrypt. Must be locked in case
 291 *             it is a writeback page (FS_CFLG_OWN_PAGES unset).
 292 * @len:       Number of bytes in @page to be decrypted.
 293 * @offs:      Start of data in @page.
 294 * @lblk_num:  Logical block number.
 295 *
 296 * Decrypts page in-place using the ctx encryption context.
 297 *
 298 * Called from the read completion callback.
 299 *
 300 * Return: Zero on success, non-zero otherwise.
 301 */
 302int fscrypt_decrypt_page(const struct inode *inode, struct page *page,
 303                        unsigned int len, unsigned int offs, u64 lblk_num)
 304{
 305        if (!(inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES))
 306                BUG_ON(!PageLocked(page));
 307
 308        return fscrypt_do_page_crypto(inode, FS_DECRYPT, lblk_num, page, page,
 309                                      len, offs, GFP_NOFS);
 310}
 311EXPORT_SYMBOL(fscrypt_decrypt_page);
 312
 313/*
 314 * Validate dentries for encrypted directories to make sure we aren't
 315 * potentially caching stale data after a key has been added or
 316 * removed.
 317 */
 318static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
 319{
 320        struct dentry *dir;
 321        int dir_has_key, cached_with_key;
 322
 323        if (flags & LOOKUP_RCU)
 324                return -ECHILD;
 325
 326        dir = dget_parent(dentry);
 327        if (!IS_ENCRYPTED(d_inode(dir))) {
 328                dput(dir);
 329                return 0;
 330        }
 331
 332        spin_lock(&dentry->d_lock);
 333        cached_with_key = dentry->d_flags & DCACHE_ENCRYPTED_WITH_KEY;
 334        spin_unlock(&dentry->d_lock);
 335        dir_has_key = (d_inode(dir)->i_crypt_info != NULL);
 336        dput(dir);
 337
 338        /*
 339         * If the dentry was cached without the key, and it is a
 340         * negative dentry, it might be a valid name.  We can't check
 341         * if the key has since been made available due to locking
 342         * reasons, so we fail the validation so ext4_lookup() can do
 343         * this check.
 344         *
 345         * We also fail the validation if the dentry was created with
 346         * the key present, but we no longer have the key, or vice versa.
 347         */
 348        if ((!cached_with_key && d_is_negative(dentry)) ||
 349                        (!cached_with_key && dir_has_key) ||
 350                        (cached_with_key && !dir_has_key))
 351                return 0;
 352        return 1;
 353}
 354
 355const struct dentry_operations fscrypt_d_ops = {
 356        .d_revalidate = fscrypt_d_revalidate,
 357};
 358
 359void fscrypt_restore_control_page(struct page *page)
 360{
 361        struct fscrypt_ctx *ctx;
 362
 363        ctx = (struct fscrypt_ctx *)page_private(page);
 364        set_page_private(page, (unsigned long)NULL);
 365        ClearPagePrivate(page);
 366        unlock_page(page);
 367        fscrypt_release_ctx(ctx);
 368}
 369EXPORT_SYMBOL(fscrypt_restore_control_page);
 370
 371static void fscrypt_destroy(void)
 372{
 373        struct fscrypt_ctx *pos, *n;
 374
 375        list_for_each_entry_safe(pos, n, &fscrypt_free_ctxs, free_list)
 376                kmem_cache_free(fscrypt_ctx_cachep, pos);
 377        INIT_LIST_HEAD(&fscrypt_free_ctxs);
 378        mempool_destroy(fscrypt_bounce_page_pool);
 379        fscrypt_bounce_page_pool = NULL;
 380}
 381
 382/**
 383 * fscrypt_initialize() - allocate major buffers for fs encryption.
 384 * @cop_flags:  fscrypt operations flags
 385 *
 386 * We only call this when we start accessing encrypted files, since it
 387 * results in memory getting allocated that wouldn't otherwise be used.
 388 *
 389 * Return: Zero on success, non-zero otherwise.
 390 */
 391int fscrypt_initialize(unsigned int cop_flags)
 392{
 393        int i, res = -ENOMEM;
 394
 395        /* No need to allocate a bounce page pool if this FS won't use it. */
 396        if (cop_flags & FS_CFLG_OWN_PAGES)
 397                return 0;
 398
 399        mutex_lock(&fscrypt_init_mutex);
 400        if (fscrypt_bounce_page_pool)
 401                goto already_initialized;
 402
 403        for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
 404                struct fscrypt_ctx *ctx;
 405
 406                ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, GFP_NOFS);
 407                if (!ctx)
 408                        goto fail;
 409                list_add(&ctx->free_list, &fscrypt_free_ctxs);
 410        }
 411
 412        fscrypt_bounce_page_pool =
 413                mempool_create_page_pool(num_prealloc_crypto_pages, 0);
 414        if (!fscrypt_bounce_page_pool)
 415                goto fail;
 416
 417already_initialized:
 418        mutex_unlock(&fscrypt_init_mutex);
 419        return 0;
 420fail:
 421        fscrypt_destroy();
 422        mutex_unlock(&fscrypt_init_mutex);
 423        return res;
 424}
 425
 426void fscrypt_msg(struct super_block *sb, const char *level,
 427                 const char *fmt, ...)
 428{
 429        static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
 430                                      DEFAULT_RATELIMIT_BURST);
 431        struct va_format vaf;
 432        va_list args;
 433
 434        if (!__ratelimit(&rs))
 435                return;
 436
 437        va_start(args, fmt);
 438        vaf.fmt = fmt;
 439        vaf.va = &args;
 440        if (sb)
 441                printk("%sfscrypt (%s): %pV\n", level, sb->s_id, &vaf);
 442        else
 443                printk("%sfscrypt: %pV\n", level, &vaf);
 444        va_end(args);
 445}
 446
 447/**
 448 * fscrypt_init() - Set up for fs encryption.
 449 */
 450static int __init fscrypt_init(void)
 451{
 452        /*
 453         * Use an unbound workqueue to allow bios to be decrypted in parallel
 454         * even when they happen to complete on the same CPU.  This sacrifices
 455         * locality, but it's worthwhile since decryption is CPU-intensive.
 456         *
 457         * Also use a high-priority workqueue to prioritize decryption work,
 458         * which blocks reads from completing, over regular application tasks.
 459         */
 460        fscrypt_read_workqueue = alloc_workqueue("fscrypt_read_queue",
 461                                                 WQ_UNBOUND | WQ_HIGHPRI,
 462                                                 num_online_cpus());
 463        if (!fscrypt_read_workqueue)
 464                goto fail;
 465
 466        fscrypt_ctx_cachep = KMEM_CACHE(fscrypt_ctx, SLAB_RECLAIM_ACCOUNT);
 467        if (!fscrypt_ctx_cachep)
 468                goto fail_free_queue;
 469
 470        fscrypt_info_cachep = KMEM_CACHE(fscrypt_info, SLAB_RECLAIM_ACCOUNT);
 471        if (!fscrypt_info_cachep)
 472                goto fail_free_ctx;
 473
 474        return 0;
 475
 476fail_free_ctx:
 477        kmem_cache_destroy(fscrypt_ctx_cachep);
 478fail_free_queue:
 479        destroy_workqueue(fscrypt_read_workqueue);
 480fail:
 481        return -ENOMEM;
 482}
 483module_init(fscrypt_init)
 484
 485/**
 486 * fscrypt_exit() - Shutdown the fs encryption system
 487 */
 488static void __exit fscrypt_exit(void)
 489{
 490        fscrypt_destroy();
 491
 492        if (fscrypt_read_workqueue)
 493                destroy_workqueue(fscrypt_read_workqueue);
 494        kmem_cache_destroy(fscrypt_ctx_cachep);
 495        kmem_cache_destroy(fscrypt_info_cachep);
 496
 497        fscrypt_essiv_cleanup();
 498}
 499module_exit(fscrypt_exit);
 500
 501MODULE_LICENSE("GPL");
 502