linux/fs/crypto/bio.c
<<
>>
Prefs
   1/*
   2 * This contains encryption functions for per-file encryption.
   3 *
   4 * Copyright (C) 2015, Google, Inc.
   5 * Copyright (C) 2015, Motorola Mobility
   6 *
   7 * Written by Michael Halcrow, 2014.
   8 *
   9 * Filename encryption additions
  10 *      Uday Savagaonkar, 2014
  11 * Encryption policy handling additions
  12 *      Ildar Muslukhov, 2014
  13 * Add fscrypt_pullback_bio_page()
  14 *      Jaegeuk Kim, 2015.
  15 *
  16 * This has not yet undergone a rigorous security audit.
  17 *
  18 * The usage of AES-XTS should conform to recommendations in NIST
  19 * Special Publication 800-38E and IEEE P1619/D16.
  20 */
  21
  22#include <linux/pagemap.h>
  23#include <linux/module.h>
  24#include <linux/bio.h>
  25#include <linux/namei.h>
  26#include "fscrypt_private.h"
  27
  28/*
  29 * Call fscrypt_decrypt_page on every single page, reusing the encryption
  30 * context.
  31 */
  32static void completion_pages(struct work_struct *work)
  33{
  34        struct fscrypt_ctx *ctx =
  35                container_of(work, struct fscrypt_ctx, r.work);
  36        struct bio *bio = ctx->r.bio;
  37        struct bio_vec *bv;
  38        int i;
  39
  40        bio_for_each_segment_all(bv, bio, i) {
  41                struct page *page = bv->bv_page;
  42                int ret = fscrypt_decrypt_page(page->mapping->host, page,
  43                                PAGE_SIZE, 0, page->index);
  44
  45                if (ret) {
  46                        WARN_ON_ONCE(1);
  47                        SetPageError(page);
  48                } else {
  49                        SetPageUptodate(page);
  50                }
  51                unlock_page(page);
  52        }
  53        fscrypt_release_ctx(ctx);
  54        bio_put(bio);
  55}
  56
  57void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *ctx, struct bio *bio)
  58{
  59        INIT_WORK(&ctx->r.work, completion_pages);
  60        ctx->r.bio = bio;
  61        queue_work(fscrypt_read_workqueue, &ctx->r.work);
  62}
  63EXPORT_SYMBOL(fscrypt_decrypt_bio_pages);
  64
  65void fscrypt_pullback_bio_page(struct page **page, bool restore)
  66{
  67        struct fscrypt_ctx *ctx;
  68        struct page *bounce_page;
  69
  70        /* The bounce data pages are unmapped. */
  71        if ((*page)->mapping)
  72                return;
  73
  74        /* The bounce data page is unmapped. */
  75        bounce_page = *page;
  76        ctx = (struct fscrypt_ctx *)page_private(bounce_page);
  77
  78        /* restore control page */
  79        *page = ctx->w.control_page;
  80
  81        if (restore)
  82                fscrypt_restore_control_page(bounce_page);
  83}
  84EXPORT_SYMBOL(fscrypt_pullback_bio_page);
  85
  86int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
  87                                sector_t pblk, unsigned int len)
  88{
  89        struct fscrypt_ctx *ctx;
  90        struct page *ciphertext_page = NULL;
  91        struct bio *bio;
  92        int ret, err = 0;
  93
  94        BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE);
  95
  96        ctx = fscrypt_get_ctx(inode, GFP_NOFS);
  97        if (IS_ERR(ctx))
  98                return PTR_ERR(ctx);
  99
 100        ciphertext_page = fscrypt_alloc_bounce_page(ctx, GFP_NOWAIT);
 101        if (IS_ERR(ciphertext_page)) {
 102                err = PTR_ERR(ciphertext_page);
 103                goto errout;
 104        }
 105
 106        while (len--) {
 107                err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk,
 108                                             ZERO_PAGE(0), ciphertext_page,
 109                                             PAGE_SIZE, 0, GFP_NOFS);
 110                if (err)
 111                        goto errout;
 112
 113                bio = bio_alloc(GFP_NOWAIT, 1);
 114                if (!bio) {
 115                        err = -ENOMEM;
 116                        goto errout;
 117                }
 118                bio->bi_bdev = inode->i_sb->s_bdev;
 119                bio->bi_iter.bi_sector =
 120                        pblk << (inode->i_sb->s_blocksize_bits - 9);
 121                bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
 122                ret = bio_add_page(bio, ciphertext_page,
 123                                        inode->i_sb->s_blocksize, 0);
 124                if (ret != inode->i_sb->s_blocksize) {
 125                        /* should never happen! */
 126                        WARN_ON(1);
 127                        bio_put(bio);
 128                        err = -EIO;
 129                        goto errout;
 130                }
 131                err = submit_bio_wait(bio);
 132                if (err == 0 && bio->bi_status)
 133                        err = -EIO;
 134                bio_put(bio);
 135                if (err)
 136                        goto errout;
 137                lblk++;
 138                pblk++;
 139        }
 140        err = 0;
 141errout:
 142        fscrypt_release_ctx(ctx);
 143        return err;
 144}
 145EXPORT_SYMBOL(fscrypt_zeroout_range);
 146