linux/net/ceph/crypto.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2
   3#include <linux/ceph/ceph_debug.h>
   4
   5#include <linux/err.h>
   6#include <linux/scatterlist.h>
   7#include <linux/sched.h>
   8#include <linux/slab.h>
   9#include <crypto/aes.h>
  10#include <crypto/skcipher.h>
  11#include <linux/key-type.h>
  12#include <linux/sched/mm.h>
  13
  14#include <keys/ceph-type.h>
  15#include <keys/user-type.h>
  16#include <linux/ceph/decode.h>
  17#include "crypto.h"
  18
  19/*
  20 * Set ->key and ->tfm.  The rest of the key should be filled in before
  21 * this function is called.
  22 */
  23static int set_secret(struct ceph_crypto_key *key, void *buf)
  24{
  25        unsigned int noio_flag;
  26        int ret;
  27
  28        key->key = NULL;
  29        key->tfm = NULL;
  30
  31        switch (key->type) {
  32        case CEPH_CRYPTO_NONE:
  33                return 0; /* nothing to do */
  34        case CEPH_CRYPTO_AES:
  35                break;
  36        default:
  37                return -ENOTSUPP;
  38        }
  39
  40        if (!key->len)
  41                return -EINVAL;
  42
  43        key->key = kmemdup(buf, key->len, GFP_NOIO);
  44        if (!key->key) {
  45                ret = -ENOMEM;
  46                goto fail;
  47        }
  48
  49        /* crypto_alloc_sync_skcipher() allocates with GFP_KERNEL */
  50        noio_flag = memalloc_noio_save();
  51        key->tfm = crypto_alloc_sync_skcipher("cbc(aes)", 0, 0);
  52        memalloc_noio_restore(noio_flag);
  53        if (IS_ERR(key->tfm)) {
  54                ret = PTR_ERR(key->tfm);
  55                key->tfm = NULL;
  56                goto fail;
  57        }
  58
  59        ret = crypto_sync_skcipher_setkey(key->tfm, key->key, key->len);
  60        if (ret)
  61                goto fail;
  62
  63        return 0;
  64
  65fail:
  66        ceph_crypto_key_destroy(key);
  67        return ret;
  68}
  69
  70int ceph_crypto_key_clone(struct ceph_crypto_key *dst,
  71                          const struct ceph_crypto_key *src)
  72{
  73        memcpy(dst, src, sizeof(struct ceph_crypto_key));
  74        return set_secret(dst, src->key);
  75}
  76
  77int ceph_crypto_key_encode(struct ceph_crypto_key *key, void **p, void *end)
  78{
  79        if (*p + sizeof(u16) + sizeof(key->created) +
  80            sizeof(u16) + key->len > end)
  81                return -ERANGE;
  82        ceph_encode_16(p, key->type);
  83        ceph_encode_copy(p, &key->created, sizeof(key->created));
  84        ceph_encode_16(p, key->len);
  85        ceph_encode_copy(p, key->key, key->len);
  86        return 0;
  87}
  88
  89int ceph_crypto_key_decode(struct ceph_crypto_key *key, void **p, void *end)
  90{
  91        int ret;
  92
  93        ceph_decode_need(p, end, 2*sizeof(u16) + sizeof(key->created), bad);
  94        key->type = ceph_decode_16(p);
  95        ceph_decode_copy(p, &key->created, sizeof(key->created));
  96        key->len = ceph_decode_16(p);
  97        ceph_decode_need(p, end, key->len, bad);
  98        ret = set_secret(key, *p);
  99        memzero_explicit(*p, key->len);
 100        *p += key->len;
 101        return ret;
 102
 103bad:
 104        dout("failed to decode crypto key\n");
 105        return -EINVAL;
 106}
 107
 108int ceph_crypto_key_unarmor(struct ceph_crypto_key *key, const char *inkey)
 109{
 110        int inlen = strlen(inkey);
 111        int blen = inlen * 3 / 4;
 112        void *buf, *p;
 113        int ret;
 114
 115        dout("crypto_key_unarmor %s\n", inkey);
 116        buf = kmalloc(blen, GFP_NOFS);
 117        if (!buf)
 118                return -ENOMEM;
 119        blen = ceph_unarmor(buf, inkey, inkey+inlen);
 120        if (blen < 0) {
 121                kfree(buf);
 122                return blen;
 123        }
 124
 125        p = buf;
 126        ret = ceph_crypto_key_decode(key, &p, p + blen);
 127        kfree(buf);
 128        if (ret)
 129                return ret;
 130        dout("crypto_key_unarmor key %p type %d len %d\n", key,
 131             key->type, key->len);
 132        return 0;
 133}
 134
 135void ceph_crypto_key_destroy(struct ceph_crypto_key *key)
 136{
 137        if (key) {
 138                kfree_sensitive(key->key);
 139                key->key = NULL;
 140                if (key->tfm) {
 141                        crypto_free_sync_skcipher(key->tfm);
 142                        key->tfm = NULL;
 143                }
 144        }
 145}
 146
 147static const u8 *aes_iv = (u8 *)CEPH_AES_IV;
 148
 149/*
 150 * Should be used for buffers allocated with ceph_kvmalloc().
 151 * Currently these are encrypt out-buffer (ceph_buffer) and decrypt
 152 * in-buffer (msg front).
 153 *
 154 * Dispose of @sgt with teardown_sgtable().
 155 *
 156 * @prealloc_sg is to avoid memory allocation inside sg_alloc_table()
 157 * in cases where a single sg is sufficient.  No attempt to reduce the
 158 * number of sgs by squeezing physically contiguous pages together is
 159 * made though, for simplicity.
 160 */
 161static int setup_sgtable(struct sg_table *sgt, struct scatterlist *prealloc_sg,
 162                         const void *buf, unsigned int buf_len)
 163{
 164        struct scatterlist *sg;
 165        const bool is_vmalloc = is_vmalloc_addr(buf);
 166        unsigned int off = offset_in_page(buf);
 167        unsigned int chunk_cnt = 1;
 168        unsigned int chunk_len = PAGE_ALIGN(off + buf_len);
 169        int i;
 170        int ret;
 171
 172        if (buf_len == 0) {
 173                memset(sgt, 0, sizeof(*sgt));
 174                return -EINVAL;
 175        }
 176
 177        if (is_vmalloc) {
 178                chunk_cnt = chunk_len >> PAGE_SHIFT;
 179                chunk_len = PAGE_SIZE;
 180        }
 181
 182        if (chunk_cnt > 1) {
 183                ret = sg_alloc_table(sgt, chunk_cnt, GFP_NOFS);
 184                if (ret)
 185                        return ret;
 186        } else {
 187                WARN_ON(chunk_cnt != 1);
 188                sg_init_table(prealloc_sg, 1);
 189                sgt->sgl = prealloc_sg;
 190                sgt->nents = sgt->orig_nents = 1;
 191        }
 192
 193        for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
 194                struct page *page;
 195                unsigned int len = min(chunk_len - off, buf_len);
 196
 197                if (is_vmalloc)
 198                        page = vmalloc_to_page(buf);
 199                else
 200                        page = virt_to_page(buf);
 201
 202                sg_set_page(sg, page, len, off);
 203
 204                off = 0;
 205                buf += len;
 206                buf_len -= len;
 207        }
 208        WARN_ON(buf_len != 0);
 209
 210        return 0;
 211}
 212
 213static void teardown_sgtable(struct sg_table *sgt)
 214{
 215        if (sgt->orig_nents > 1)
 216                sg_free_table(sgt);
 217}
 218
 219static int ceph_aes_crypt(const struct ceph_crypto_key *key, bool encrypt,
 220                          void *buf, int buf_len, int in_len, int *pout_len)
 221{
 222        SYNC_SKCIPHER_REQUEST_ON_STACK(req, key->tfm);
 223        struct sg_table sgt;
 224        struct scatterlist prealloc_sg;
 225        char iv[AES_BLOCK_SIZE] __aligned(8);
 226        int pad_byte = AES_BLOCK_SIZE - (in_len & (AES_BLOCK_SIZE - 1));
 227        int crypt_len = encrypt ? in_len + pad_byte : in_len;
 228        int ret;
 229
 230        WARN_ON(crypt_len > buf_len);
 231        if (encrypt)
 232                memset(buf + in_len, pad_byte, pad_byte);
 233        ret = setup_sgtable(&sgt, &prealloc_sg, buf, crypt_len);
 234        if (ret)
 235                return ret;
 236
 237        memcpy(iv, aes_iv, AES_BLOCK_SIZE);
 238        skcipher_request_set_sync_tfm(req, key->tfm);
 239        skcipher_request_set_callback(req, 0, NULL, NULL);
 240        skcipher_request_set_crypt(req, sgt.sgl, sgt.sgl, crypt_len, iv);
 241
 242        /*
 243        print_hex_dump(KERN_ERR, "key: ", DUMP_PREFIX_NONE, 16, 1,
 244                       key->key, key->len, 1);
 245        print_hex_dump(KERN_ERR, " in: ", DUMP_PREFIX_NONE, 16, 1,
 246                       buf, crypt_len, 1);
 247        */
 248        if (encrypt)
 249                ret = crypto_skcipher_encrypt(req);
 250        else
 251                ret = crypto_skcipher_decrypt(req);
 252        skcipher_request_zero(req);
 253        if (ret) {
 254                pr_err("%s %scrypt failed: %d\n", __func__,
 255                       encrypt ? "en" : "de", ret);
 256                goto out_sgt;
 257        }
 258        /*
 259        print_hex_dump(KERN_ERR, "out: ", DUMP_PREFIX_NONE, 16, 1,
 260                       buf, crypt_len, 1);
 261        */
 262
 263        if (encrypt) {
 264                *pout_len = crypt_len;
 265        } else {
 266                pad_byte = *(char *)(buf + in_len - 1);
 267                if (pad_byte > 0 && pad_byte <= AES_BLOCK_SIZE &&
 268                    in_len >= pad_byte) {
 269                        *pout_len = in_len - pad_byte;
 270                } else {
 271                        pr_err("%s got bad padding %d on in_len %d\n",
 272                               __func__, pad_byte, in_len);
 273                        ret = -EPERM;
 274                        goto out_sgt;
 275                }
 276        }
 277
 278out_sgt:
 279        teardown_sgtable(&sgt);
 280        return ret;
 281}
 282
 283int ceph_crypt(const struct ceph_crypto_key *key, bool encrypt,
 284               void *buf, int buf_len, int in_len, int *pout_len)
 285{
 286        switch (key->type) {
 287        case CEPH_CRYPTO_NONE:
 288                *pout_len = in_len;
 289                return 0;
 290        case CEPH_CRYPTO_AES:
 291                return ceph_aes_crypt(key, encrypt, buf, buf_len, in_len,
 292                                      pout_len);
 293        default:
 294                return -ENOTSUPP;
 295        }
 296}
 297
 298static int ceph_key_preparse(struct key_preparsed_payload *prep)
 299{
 300        struct ceph_crypto_key *ckey;
 301        size_t datalen = prep->datalen;
 302        int ret;
 303        void *p;
 304
 305        ret = -EINVAL;
 306        if (datalen <= 0 || datalen > 32767 || !prep->data)
 307                goto err;
 308
 309        ret = -ENOMEM;
 310        ckey = kmalloc(sizeof(*ckey), GFP_KERNEL);
 311        if (!ckey)
 312                goto err;
 313
 314        /* TODO ceph_crypto_key_decode should really take const input */
 315        p = (void *)prep->data;
 316        ret = ceph_crypto_key_decode(ckey, &p, (char*)prep->data+datalen);
 317        if (ret < 0)
 318                goto err_ckey;
 319
 320        prep->payload.data[0] = ckey;
 321        prep->quotalen = datalen;
 322        return 0;
 323
 324err_ckey:
 325        kfree(ckey);
 326err:
 327        return ret;
 328}
 329
 330static void ceph_key_free_preparse(struct key_preparsed_payload *prep)
 331{
 332        struct ceph_crypto_key *ckey = prep->payload.data[0];
 333        ceph_crypto_key_destroy(ckey);
 334        kfree(ckey);
 335}
 336
 337static void ceph_key_destroy(struct key *key)
 338{
 339        struct ceph_crypto_key *ckey = key->payload.data[0];
 340
 341        ceph_crypto_key_destroy(ckey);
 342        kfree(ckey);
 343}
 344
 345struct key_type key_type_ceph = {
 346        .name           = "ceph",
 347        .preparse       = ceph_key_preparse,
 348        .free_preparse  = ceph_key_free_preparse,
 349        .instantiate    = generic_key_instantiate,
 350        .destroy        = ceph_key_destroy,
 351};
 352
 353int __init ceph_crypto_init(void)
 354{
 355        return register_key_type(&key_type_ceph);
 356}
 357
 358void ceph_crypto_shutdown(void)
 359{
 360        unregister_key_type(&key_type_ceph);
 361}
 362