linux/net/ceph/crypto.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2
   3#include <linux/ceph/ceph_debug.h>
   4
   5#include <linux/err.h>
   6#include <linux/scatterlist.h>
   7#include <linux/sched.h>
   8#include <linux/slab.h>
   9#include <crypto/aes.h>
  10#include <crypto/skcipher.h>
  11#include <linux/key-type.h>
  12#include <linux/sched/mm.h>
  13
  14#include <keys/ceph-type.h>
  15#include <keys/user-type.h>
  16#include <linux/ceph/decode.h>
  17#include "crypto.h"
  18
  19/*
  20 * Set ->key and ->tfm.  The rest of the key should be filled in before
  21 * this function is called.
  22 */
  23static int set_secret(struct ceph_crypto_key *key, void *buf)
  24{
  25        unsigned int noio_flag;
  26        int ret;
  27
  28        key->key = NULL;
  29        key->tfm = NULL;
  30
  31        switch (key->type) {
  32        case CEPH_CRYPTO_NONE:
  33                return 0; /* nothing to do */
  34        case CEPH_CRYPTO_AES:
  35                break;
  36        default:
  37                return -ENOTSUPP;
  38        }
  39
  40        if (!key->len)
  41                return -EINVAL;
  42
  43        key->key = kmemdup(buf, key->len, GFP_NOIO);
  44        if (!key->key) {
  45                ret = -ENOMEM;
  46                goto fail;
  47        }
  48
  49        /* crypto_alloc_sync_skcipher() allocates with GFP_KERNEL */
  50        noio_flag = memalloc_noio_save();
  51        key->tfm = crypto_alloc_sync_skcipher("cbc(aes)", 0, 0);
  52        memalloc_noio_restore(noio_flag);
  53        if (IS_ERR(key->tfm)) {
  54                ret = PTR_ERR(key->tfm);
  55                key->tfm = NULL;
  56                goto fail;
  57        }
  58
  59        ret = crypto_sync_skcipher_setkey(key->tfm, key->key, key->len);
  60        if (ret)
  61                goto fail;
  62
  63        return 0;
  64
  65fail:
  66        ceph_crypto_key_destroy(key);
  67        return ret;
  68}
  69
  70int ceph_crypto_key_clone(struct ceph_crypto_key *dst,
  71                          const struct ceph_crypto_key *src)
  72{
  73        memcpy(dst, src, sizeof(struct ceph_crypto_key));
  74        return set_secret(dst, src->key);
  75}
  76
  77int ceph_crypto_key_encode(struct ceph_crypto_key *key, void **p, void *end)
  78{
  79        if (*p + sizeof(u16) + sizeof(key->created) +
  80            sizeof(u16) + key->len > end)
  81                return -ERANGE;
  82        ceph_encode_16(p, key->type);
  83        ceph_encode_copy(p, &key->created, sizeof(key->created));
  84        ceph_encode_16(p, key->len);
  85        ceph_encode_copy(p, key->key, key->len);
  86        return 0;
  87}
  88
  89int ceph_crypto_key_decode(struct ceph_crypto_key *key, void **p, void *end)
  90{
  91        int ret;
  92
  93        ceph_decode_need(p, end, 2*sizeof(u16) + sizeof(key->created), bad);
  94        key->type = ceph_decode_16(p);
  95        ceph_decode_copy(p, &key->created, sizeof(key->created));
  96        key->len = ceph_decode_16(p);
  97        ceph_decode_need(p, end, key->len, bad);
  98        ret = set_secret(key, *p);
  99        *p += key->len;
 100        return ret;
 101
 102bad:
 103        dout("failed to decode crypto key\n");
 104        return -EINVAL;
 105}
 106
 107int ceph_crypto_key_unarmor(struct ceph_crypto_key *key, const char *inkey)
 108{
 109        int inlen = strlen(inkey);
 110        int blen = inlen * 3 / 4;
 111        void *buf, *p;
 112        int ret;
 113
 114        dout("crypto_key_unarmor %s\n", inkey);
 115        buf = kmalloc(blen, GFP_NOFS);
 116        if (!buf)
 117                return -ENOMEM;
 118        blen = ceph_unarmor(buf, inkey, inkey+inlen);
 119        if (blen < 0) {
 120                kfree(buf);
 121                return blen;
 122        }
 123
 124        p = buf;
 125        ret = ceph_crypto_key_decode(key, &p, p + blen);
 126        kfree(buf);
 127        if (ret)
 128                return ret;
 129        dout("crypto_key_unarmor key %p type %d len %d\n", key,
 130             key->type, key->len);
 131        return 0;
 132}
 133
 134void ceph_crypto_key_destroy(struct ceph_crypto_key *key)
 135{
 136        if (key) {
 137                kfree(key->key);
 138                key->key = NULL;
 139                if (key->tfm) {
 140                        crypto_free_sync_skcipher(key->tfm);
 141                        key->tfm = NULL;
 142                }
 143        }
 144}
 145
 146static const u8 *aes_iv = (u8 *)CEPH_AES_IV;
 147
 148/*
 149 * Should be used for buffers allocated with ceph_kvmalloc().
 150 * Currently these are encrypt out-buffer (ceph_buffer) and decrypt
 151 * in-buffer (msg front).
 152 *
 153 * Dispose of @sgt with teardown_sgtable().
 154 *
 155 * @prealloc_sg is to avoid memory allocation inside sg_alloc_table()
 156 * in cases where a single sg is sufficient.  No attempt to reduce the
 157 * number of sgs by squeezing physically contiguous pages together is
 158 * made though, for simplicity.
 159 */
 160static int setup_sgtable(struct sg_table *sgt, struct scatterlist *prealloc_sg,
 161                         const void *buf, unsigned int buf_len)
 162{
 163        struct scatterlist *sg;
 164        const bool is_vmalloc = is_vmalloc_addr(buf);
 165        unsigned int off = offset_in_page(buf);
 166        unsigned int chunk_cnt = 1;
 167        unsigned int chunk_len = PAGE_ALIGN(off + buf_len);
 168        int i;
 169        int ret;
 170
 171        if (buf_len == 0) {
 172                memset(sgt, 0, sizeof(*sgt));
 173                return -EINVAL;
 174        }
 175
 176        if (is_vmalloc) {
 177                chunk_cnt = chunk_len >> PAGE_SHIFT;
 178                chunk_len = PAGE_SIZE;
 179        }
 180
 181        if (chunk_cnt > 1) {
 182                ret = sg_alloc_table(sgt, chunk_cnt, GFP_NOFS);
 183                if (ret)
 184                        return ret;
 185        } else {
 186                WARN_ON(chunk_cnt != 1);
 187                sg_init_table(prealloc_sg, 1);
 188                sgt->sgl = prealloc_sg;
 189                sgt->nents = sgt->orig_nents = 1;
 190        }
 191
 192        for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
 193                struct page *page;
 194                unsigned int len = min(chunk_len - off, buf_len);
 195
 196                if (is_vmalloc)
 197                        page = vmalloc_to_page(buf);
 198                else
 199                        page = virt_to_page(buf);
 200
 201                sg_set_page(sg, page, len, off);
 202
 203                off = 0;
 204                buf += len;
 205                buf_len -= len;
 206        }
 207        WARN_ON(buf_len != 0);
 208
 209        return 0;
 210}
 211
 212static void teardown_sgtable(struct sg_table *sgt)
 213{
 214        if (sgt->orig_nents > 1)
 215                sg_free_table(sgt);
 216}
 217
 218static int ceph_aes_crypt(const struct ceph_crypto_key *key, bool encrypt,
 219                          void *buf, int buf_len, int in_len, int *pout_len)
 220{
 221        SYNC_SKCIPHER_REQUEST_ON_STACK(req, key->tfm);
 222        struct sg_table sgt;
 223        struct scatterlist prealloc_sg;
 224        char iv[AES_BLOCK_SIZE] __aligned(8);
 225        int pad_byte = AES_BLOCK_SIZE - (in_len & (AES_BLOCK_SIZE - 1));
 226        int crypt_len = encrypt ? in_len + pad_byte : in_len;
 227        int ret;
 228
 229        WARN_ON(crypt_len > buf_len);
 230        if (encrypt)
 231                memset(buf + in_len, pad_byte, pad_byte);
 232        ret = setup_sgtable(&sgt, &prealloc_sg, buf, crypt_len);
 233        if (ret)
 234                return ret;
 235
 236        memcpy(iv, aes_iv, AES_BLOCK_SIZE);
 237        skcipher_request_set_sync_tfm(req, key->tfm);
 238        skcipher_request_set_callback(req, 0, NULL, NULL);
 239        skcipher_request_set_crypt(req, sgt.sgl, sgt.sgl, crypt_len, iv);
 240
 241        /*
 242        print_hex_dump(KERN_ERR, "key: ", DUMP_PREFIX_NONE, 16, 1,
 243                       key->key, key->len, 1);
 244        print_hex_dump(KERN_ERR, " in: ", DUMP_PREFIX_NONE, 16, 1,
 245                       buf, crypt_len, 1);
 246        */
 247        if (encrypt)
 248                ret = crypto_skcipher_encrypt(req);
 249        else
 250                ret = crypto_skcipher_decrypt(req);
 251        skcipher_request_zero(req);
 252        if (ret) {
 253                pr_err("%s %scrypt failed: %d\n", __func__,
 254                       encrypt ? "en" : "de", ret);
 255                goto out_sgt;
 256        }
 257        /*
 258        print_hex_dump(KERN_ERR, "out: ", DUMP_PREFIX_NONE, 16, 1,
 259                       buf, crypt_len, 1);
 260        */
 261
 262        if (encrypt) {
 263                *pout_len = crypt_len;
 264        } else {
 265                pad_byte = *(char *)(buf + in_len - 1);
 266                if (pad_byte > 0 && pad_byte <= AES_BLOCK_SIZE &&
 267                    in_len >= pad_byte) {
 268                        *pout_len = in_len - pad_byte;
 269                } else {
 270                        pr_err("%s got bad padding %d on in_len %d\n",
 271                               __func__, pad_byte, in_len);
 272                        ret = -EPERM;
 273                        goto out_sgt;
 274                }
 275        }
 276
 277out_sgt:
 278        teardown_sgtable(&sgt);
 279        return ret;
 280}
 281
 282int ceph_crypt(const struct ceph_crypto_key *key, bool encrypt,
 283               void *buf, int buf_len, int in_len, int *pout_len)
 284{
 285        switch (key->type) {
 286        case CEPH_CRYPTO_NONE:
 287                *pout_len = in_len;
 288                return 0;
 289        case CEPH_CRYPTO_AES:
 290                return ceph_aes_crypt(key, encrypt, buf, buf_len, in_len,
 291                                      pout_len);
 292        default:
 293                return -ENOTSUPP;
 294        }
 295}
 296
 297static int ceph_key_preparse(struct key_preparsed_payload *prep)
 298{
 299        struct ceph_crypto_key *ckey;
 300        size_t datalen = prep->datalen;
 301        int ret;
 302        void *p;
 303
 304        ret = -EINVAL;
 305        if (datalen <= 0 || datalen > 32767 || !prep->data)
 306                goto err;
 307
 308        ret = -ENOMEM;
 309        ckey = kmalloc(sizeof(*ckey), GFP_KERNEL);
 310        if (!ckey)
 311                goto err;
 312
 313        /* TODO ceph_crypto_key_decode should really take const input */
 314        p = (void *)prep->data;
 315        ret = ceph_crypto_key_decode(ckey, &p, (char*)prep->data+datalen);
 316        if (ret < 0)
 317                goto err_ckey;
 318
 319        prep->payload.data[0] = ckey;
 320        prep->quotalen = datalen;
 321        return 0;
 322
 323err_ckey:
 324        kfree(ckey);
 325err:
 326        return ret;
 327}
 328
 329static void ceph_key_free_preparse(struct key_preparsed_payload *prep)
 330{
 331        struct ceph_crypto_key *ckey = prep->payload.data[0];
 332        ceph_crypto_key_destroy(ckey);
 333        kfree(ckey);
 334}
 335
 336static void ceph_key_destroy(struct key *key)
 337{
 338        struct ceph_crypto_key *ckey = key->payload.data[0];
 339
 340        ceph_crypto_key_destroy(ckey);
 341        kfree(ckey);
 342}
 343
 344struct key_type key_type_ceph = {
 345        .name           = "ceph",
 346        .preparse       = ceph_key_preparse,
 347        .free_preparse  = ceph_key_free_preparse,
 348        .instantiate    = generic_key_instantiate,
 349        .destroy        = ceph_key_destroy,
 350};
 351
 352int __init ceph_crypto_init(void)
 353{
 354        return register_key_type(&key_type_ceph);
 355}
 356
 357void ceph_crypto_shutdown(void)
 358{
 359        unregister_key_type(&key_type_ceph);
 360}
 361