linux/net/ceph/crypto.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2
   3#include <linux/ceph/ceph_debug.h>
   4
   5#include <linux/err.h>
   6#include <linux/scatterlist.h>
   7#include <linux/sched.h>
   8#include <linux/slab.h>
   9#include <crypto/aes.h>
  10#include <crypto/skcipher.h>
  11#include <linux/key-type.h>
  12#include <linux/sched/mm.h>
  13
  14#include <keys/ceph-type.h>
  15#include <keys/user-type.h>
  16#include <linux/ceph/decode.h>
  17#include "crypto.h"
  18
  19/*
  20 * Set ->key and ->tfm.  The rest of the key should be filled in before
  21 * this function is called.
  22 */
  23static int set_secret(struct ceph_crypto_key *key, void *buf)
  24{
  25        unsigned int noio_flag;
  26        int ret;
  27
  28        key->key = NULL;
  29        key->tfm = NULL;
  30
  31        switch (key->type) {
  32        case CEPH_CRYPTO_NONE:
  33                return 0; /* nothing to do */
  34        case CEPH_CRYPTO_AES:
  35                break;
  36        default:
  37                return -ENOTSUPP;
  38        }
  39
  40        if (!key->len)
  41                return -EINVAL;
  42
  43        key->key = kmemdup(buf, key->len, GFP_NOIO);
  44        if (!key->key) {
  45                ret = -ENOMEM;
  46                goto fail;
  47        }
  48
  49        /* crypto_alloc_sync_skcipher() allocates with GFP_KERNEL */
  50        noio_flag = memalloc_noio_save();
  51        key->tfm = crypto_alloc_sync_skcipher("cbc(aes)", 0, 0);
  52        memalloc_noio_restore(noio_flag);
  53        if (IS_ERR(key->tfm)) {
  54                ret = PTR_ERR(key->tfm);
  55                key->tfm = NULL;
  56                goto fail;
  57        }
  58
  59        ret = crypto_sync_skcipher_setkey(key->tfm, key->key, key->len);
  60        if (ret)
  61                goto fail;
  62
  63        return 0;
  64
  65fail:
  66        ceph_crypto_key_destroy(key);
  67        return ret;
  68}
  69
  70int ceph_crypto_key_clone(struct ceph_crypto_key *dst,
  71                          const struct ceph_crypto_key *src)
  72{
  73        memcpy(dst, src, sizeof(struct ceph_crypto_key));
  74        return set_secret(dst, src->key);
  75}
  76
  77int ceph_crypto_key_encode(struct ceph_crypto_key *key, void **p, void *end)
  78{
  79        if (*p + sizeof(u16) + sizeof(key->created) +
  80            sizeof(u16) + key->len > end)
  81                return -ERANGE;
  82        ceph_encode_16(p, key->type);
  83        ceph_encode_copy(p, &key->created, sizeof(key->created));
  84        ceph_encode_16(p, key->len);
  85        ceph_encode_copy(p, key->key, key->len);
  86        return 0;
  87}
  88
  89int ceph_crypto_key_decode(struct ceph_crypto_key *key, void **p, void *end)
  90{
  91        int ret;
  92
  93        ceph_decode_need(p, end, 2*sizeof(u16) + sizeof(key->created), bad);
  94        key->type = ceph_decode_16(p);
  95        ceph_decode_copy(p, &key->created, sizeof(key->created));
  96        key->len = ceph_decode_16(p);
  97        ceph_decode_need(p, end, key->len, bad);
  98        ret = set_secret(key, *p);
  99        *p += key->len;
 100        return ret;
 101
 102bad:
 103        dout("failed to decode crypto key\n");
 104        return -EINVAL;
 105}
 106
 107int ceph_crypto_key_unarmor(struct ceph_crypto_key *key, const char *inkey)
 108{
 109        int inlen = strlen(inkey);
 110        int blen = inlen * 3 / 4;
 111        void *buf, *p;
 112        int ret;
 113
 114        dout("crypto_key_unarmor %s\n", inkey);
 115        buf = kmalloc(blen, GFP_NOFS);
 116        if (!buf)
 117                return -ENOMEM;
 118        blen = ceph_unarmor(buf, inkey, inkey+inlen);
 119        if (blen < 0) {
 120                kfree(buf);
 121                return blen;
 122        }
 123
 124        p = buf;
 125        ret = ceph_crypto_key_decode(key, &p, p + blen);
 126        kfree(buf);
 127        if (ret)
 128                return ret;
 129        dout("crypto_key_unarmor key %p type %d len %d\n", key,
 130             key->type, key->len);
 131        return 0;
 132}
 133
 134void ceph_crypto_key_destroy(struct ceph_crypto_key *key)
 135{
 136        if (key) {
 137                kfree(key->key);
 138                key->key = NULL;
 139                crypto_free_sync_skcipher(key->tfm);
 140                key->tfm = NULL;
 141        }
 142}
 143
 144static const u8 *aes_iv = (u8 *)CEPH_AES_IV;
 145
 146/*
 147 * Should be used for buffers allocated with ceph_kvmalloc().
 148 * Currently these are encrypt out-buffer (ceph_buffer) and decrypt
 149 * in-buffer (msg front).
 150 *
 151 * Dispose of @sgt with teardown_sgtable().
 152 *
 153 * @prealloc_sg is to avoid memory allocation inside sg_alloc_table()
 154 * in cases where a single sg is sufficient.  No attempt to reduce the
 155 * number of sgs by squeezing physically contiguous pages together is
 156 * made though, for simplicity.
 157 */
 158static int setup_sgtable(struct sg_table *sgt, struct scatterlist *prealloc_sg,
 159                         const void *buf, unsigned int buf_len)
 160{
 161        struct scatterlist *sg;
 162        const bool is_vmalloc = is_vmalloc_addr(buf);
 163        unsigned int off = offset_in_page(buf);
 164        unsigned int chunk_cnt = 1;
 165        unsigned int chunk_len = PAGE_ALIGN(off + buf_len);
 166        int i;
 167        int ret;
 168
 169        if (buf_len == 0) {
 170                memset(sgt, 0, sizeof(*sgt));
 171                return -EINVAL;
 172        }
 173
 174        if (is_vmalloc) {
 175                chunk_cnt = chunk_len >> PAGE_SHIFT;
 176                chunk_len = PAGE_SIZE;
 177        }
 178
 179        if (chunk_cnt > 1) {
 180                ret = sg_alloc_table(sgt, chunk_cnt, GFP_NOFS);
 181                if (ret)
 182                        return ret;
 183        } else {
 184                WARN_ON(chunk_cnt != 1);
 185                sg_init_table(prealloc_sg, 1);
 186                sgt->sgl = prealloc_sg;
 187                sgt->nents = sgt->orig_nents = 1;
 188        }
 189
 190        for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
 191                struct page *page;
 192                unsigned int len = min(chunk_len - off, buf_len);
 193
 194                if (is_vmalloc)
 195                        page = vmalloc_to_page(buf);
 196                else
 197                        page = virt_to_page(buf);
 198
 199                sg_set_page(sg, page, len, off);
 200
 201                off = 0;
 202                buf += len;
 203                buf_len -= len;
 204        }
 205        WARN_ON(buf_len != 0);
 206
 207        return 0;
 208}
 209
 210static void teardown_sgtable(struct sg_table *sgt)
 211{
 212        if (sgt->orig_nents > 1)
 213                sg_free_table(sgt);
 214}
 215
 216static int ceph_aes_crypt(const struct ceph_crypto_key *key, bool encrypt,
 217                          void *buf, int buf_len, int in_len, int *pout_len)
 218{
 219        SYNC_SKCIPHER_REQUEST_ON_STACK(req, key->tfm);
 220        struct sg_table sgt;
 221        struct scatterlist prealloc_sg;
 222        char iv[AES_BLOCK_SIZE] __aligned(8);
 223        int pad_byte = AES_BLOCK_SIZE - (in_len & (AES_BLOCK_SIZE - 1));
 224        int crypt_len = encrypt ? in_len + pad_byte : in_len;
 225        int ret;
 226
 227        WARN_ON(crypt_len > buf_len);
 228        if (encrypt)
 229                memset(buf + in_len, pad_byte, pad_byte);
 230        ret = setup_sgtable(&sgt, &prealloc_sg, buf, crypt_len);
 231        if (ret)
 232                return ret;
 233
 234        memcpy(iv, aes_iv, AES_BLOCK_SIZE);
 235        skcipher_request_set_sync_tfm(req, key->tfm);
 236        skcipher_request_set_callback(req, 0, NULL, NULL);
 237        skcipher_request_set_crypt(req, sgt.sgl, sgt.sgl, crypt_len, iv);
 238
 239        /*
 240        print_hex_dump(KERN_ERR, "key: ", DUMP_PREFIX_NONE, 16, 1,
 241                       key->key, key->len, 1);
 242        print_hex_dump(KERN_ERR, " in: ", DUMP_PREFIX_NONE, 16, 1,
 243                       buf, crypt_len, 1);
 244        */
 245        if (encrypt)
 246                ret = crypto_skcipher_encrypt(req);
 247        else
 248                ret = crypto_skcipher_decrypt(req);
 249        skcipher_request_zero(req);
 250        if (ret) {
 251                pr_err("%s %scrypt failed: %d\n", __func__,
 252                       encrypt ? "en" : "de", ret);
 253                goto out_sgt;
 254        }
 255        /*
 256        print_hex_dump(KERN_ERR, "out: ", DUMP_PREFIX_NONE, 16, 1,
 257                       buf, crypt_len, 1);
 258        */
 259
 260        if (encrypt) {
 261                *pout_len = crypt_len;
 262        } else {
 263                pad_byte = *(char *)(buf + in_len - 1);
 264                if (pad_byte > 0 && pad_byte <= AES_BLOCK_SIZE &&
 265                    in_len >= pad_byte) {
 266                        *pout_len = in_len - pad_byte;
 267                } else {
 268                        pr_err("%s got bad padding %d on in_len %d\n",
 269                               __func__, pad_byte, in_len);
 270                        ret = -EPERM;
 271                        goto out_sgt;
 272                }
 273        }
 274
 275out_sgt:
 276        teardown_sgtable(&sgt);
 277        return ret;
 278}
 279
 280int ceph_crypt(const struct ceph_crypto_key *key, bool encrypt,
 281               void *buf, int buf_len, int in_len, int *pout_len)
 282{
 283        switch (key->type) {
 284        case CEPH_CRYPTO_NONE:
 285                *pout_len = in_len;
 286                return 0;
 287        case CEPH_CRYPTO_AES:
 288                return ceph_aes_crypt(key, encrypt, buf, buf_len, in_len,
 289                                      pout_len);
 290        default:
 291                return -ENOTSUPP;
 292        }
 293}
 294
 295static int ceph_key_preparse(struct key_preparsed_payload *prep)
 296{
 297        struct ceph_crypto_key *ckey;
 298        size_t datalen = prep->datalen;
 299        int ret;
 300        void *p;
 301
 302        ret = -EINVAL;
 303        if (datalen <= 0 || datalen > 32767 || !prep->data)
 304                goto err;
 305
 306        ret = -ENOMEM;
 307        ckey = kmalloc(sizeof(*ckey), GFP_KERNEL);
 308        if (!ckey)
 309                goto err;
 310
 311        /* TODO ceph_crypto_key_decode should really take const input */
 312        p = (void *)prep->data;
 313        ret = ceph_crypto_key_decode(ckey, &p, (char*)prep->data+datalen);
 314        if (ret < 0)
 315                goto err_ckey;
 316
 317        prep->payload.data[0] = ckey;
 318        prep->quotalen = datalen;
 319        return 0;
 320
 321err_ckey:
 322        kfree(ckey);
 323err:
 324        return ret;
 325}
 326
 327static void ceph_key_free_preparse(struct key_preparsed_payload *prep)
 328{
 329        struct ceph_crypto_key *ckey = prep->payload.data[0];
 330        ceph_crypto_key_destroy(ckey);
 331        kfree(ckey);
 332}
 333
 334static void ceph_key_destroy(struct key *key)
 335{
 336        struct ceph_crypto_key *ckey = key->payload.data[0];
 337
 338        ceph_crypto_key_destroy(ckey);
 339        kfree(ckey);
 340}
 341
 342struct key_type key_type_ceph = {
 343        .name           = "ceph",
 344        .preparse       = ceph_key_preparse,
 345        .free_preparse  = ceph_key_free_preparse,
 346        .instantiate    = generic_key_instantiate,
 347        .destroy        = ceph_key_destroy,
 348};
 349
 350int __init ceph_crypto_init(void)
 351{
 352        return register_key_type(&key_type_ceph);
 353}
 354
 355void ceph_crypto_shutdown(void)
 356{
 357        unregister_key_type(&key_type_ceph);
 358}
 359