linux/drivers/crypto/qce/ablkcipher.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 and
   6 * only version 2 as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  11 * GNU General Public License for more details.
  12 */
  13
  14#include <linux/device.h>
  15#include <linux/interrupt.h>
  16#include <linux/types.h>
  17#include <crypto/aes.h>
  18#include <crypto/algapi.h>
  19#include <crypto/des.h>
  20
  21#include "cipher.h"
  22
  23static LIST_HEAD(ablkcipher_algs);
  24
  25static void qce_ablkcipher_done(void *data)
  26{
  27        struct crypto_async_request *async_req = data;
  28        struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
  29        struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
  30        struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
  31        struct qce_device *qce = tmpl->qce;
  32        enum dma_data_direction dir_src, dir_dst;
  33        u32 status;
  34        int error;
  35        bool diff_dst;
  36
  37        diff_dst = (req->src != req->dst) ? true : false;
  38        dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
  39        dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
  40
  41        error = qce_dma_terminate_all(&qce->dma);
  42        if (error)
  43                dev_dbg(qce->dev, "ablkcipher dma termination error (%d)\n",
  44                        error);
  45
  46        if (diff_dst)
  47                dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src);
  48        dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
  49
  50        sg_free_table(&rctx->dst_tbl);
  51
  52        error = qce_check_status(qce, &status);
  53        if (error < 0)
  54                dev_dbg(qce->dev, "ablkcipher operation error (%x)\n", status);
  55
  56        qce->async_req_done(tmpl->qce, error);
  57}
  58
  59static int
  60qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req)
  61{
  62        struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
  63        struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
  64        struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  65        struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
  66        struct qce_device *qce = tmpl->qce;
  67        enum dma_data_direction dir_src, dir_dst;
  68        struct scatterlist *sg;
  69        bool diff_dst;
  70        gfp_t gfp;
  71        int ret;
  72
  73        rctx->iv = req->info;
  74        rctx->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  75        rctx->cryptlen = req->nbytes;
  76
  77        diff_dst = (req->src != req->dst) ? true : false;
  78        dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
  79        dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
  80
  81        rctx->src_nents = sg_nents_for_len(req->src, req->nbytes);
  82        if (diff_dst)
  83                rctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
  84        else
  85                rctx->dst_nents = rctx->src_nents;
  86        if (rctx->src_nents < 0) {
  87                dev_err(qce->dev, "Invalid numbers of src SG.\n");
  88                return rctx->src_nents;
  89        }
  90        if (rctx->dst_nents < 0) {
  91                dev_err(qce->dev, "Invalid numbers of dst SG.\n");
  92                return -rctx->dst_nents;
  93        }
  94
  95        rctx->dst_nents += 1;
  96
  97        gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
  98                                                GFP_KERNEL : GFP_ATOMIC;
  99
 100        ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp);
 101        if (ret)
 102                return ret;
 103
 104        sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
 105
 106        sg = qce_sgtable_add(&rctx->dst_tbl, req->dst);
 107        if (IS_ERR(sg)) {
 108                ret = PTR_ERR(sg);
 109                goto error_free;
 110        }
 111
 112        sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg);
 113        if (IS_ERR(sg)) {
 114                ret = PTR_ERR(sg);
 115                goto error_free;
 116        }
 117
 118        sg_mark_end(sg);
 119        rctx->dst_sg = rctx->dst_tbl.sgl;
 120
 121        ret = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
 122        if (ret < 0)
 123                goto error_free;
 124
 125        if (diff_dst) {
 126                ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src);
 127                if (ret < 0)
 128                        goto error_unmap_dst;
 129                rctx->src_sg = req->src;
 130        } else {
 131                rctx->src_sg = rctx->dst_sg;
 132        }
 133
 134        ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, rctx->src_nents,
 135                               rctx->dst_sg, rctx->dst_nents,
 136                               qce_ablkcipher_done, async_req);
 137        if (ret)
 138                goto error_unmap_src;
 139
 140        qce_dma_issue_pending(&qce->dma);
 141
 142        ret = qce_start(async_req, tmpl->crypto_alg_type, req->nbytes, 0);
 143        if (ret)
 144                goto error_terminate;
 145
 146        return 0;
 147
 148error_terminate:
 149        qce_dma_terminate_all(&qce->dma);
 150error_unmap_src:
 151        if (diff_dst)
 152                dma_unmap_sg(qce->dev, req->src, rctx->src_nents, dir_src);
 153error_unmap_dst:
 154        dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
 155error_free:
 156        sg_free_table(&rctx->dst_tbl);
 157        return ret;
 158}
 159
 160static int qce_ablkcipher_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
 161                                 unsigned int keylen)
 162{
 163        struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablk);
 164        struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
 165        unsigned long flags = to_cipher_tmpl(tfm)->alg_flags;
 166        int ret;
 167
 168        if (!key || !keylen)
 169                return -EINVAL;
 170
 171        if (IS_AES(flags)) {
 172                switch (keylen) {
 173                case AES_KEYSIZE_128:
 174                case AES_KEYSIZE_256:
 175                        break;
 176                default:
 177                        goto fallback;
 178                }
 179        } else if (IS_DES(flags)) {
 180                u32 tmp[DES_EXPKEY_WORDS];
 181
 182                ret = des_ekey(tmp, key);
 183                if (!ret && crypto_ablkcipher_get_flags(ablk) &
 184                    CRYPTO_TFM_REQ_WEAK_KEY)
 185                        goto weakkey;
 186        }
 187
 188        ctx->enc_keylen = keylen;
 189        memcpy(ctx->enc_key, key, keylen);
 190        return 0;
 191fallback:
 192        ret = crypto_ablkcipher_setkey(ctx->fallback, key, keylen);
 193        if (!ret)
 194                ctx->enc_keylen = keylen;
 195        return ret;
 196weakkey:
 197        crypto_ablkcipher_set_flags(ablk, CRYPTO_TFM_RES_WEAK_KEY);
 198        return -EINVAL;
 199}
 200
 201static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt)
 202{
 203        struct crypto_tfm *tfm =
 204                        crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
 205        struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
 206        struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
 207        struct qce_alg_template *tmpl = to_cipher_tmpl(tfm);
 208        int ret;
 209
 210        rctx->flags = tmpl->alg_flags;
 211        rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
 212
 213        if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 &&
 214            ctx->enc_keylen != AES_KEYSIZE_256) {
 215                ablkcipher_request_set_tfm(req, ctx->fallback);
 216                ret = encrypt ? crypto_ablkcipher_encrypt(req) :
 217                                crypto_ablkcipher_decrypt(req);
 218                ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
 219                return ret;
 220        }
 221
 222        return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base);
 223}
 224
 225static int qce_ablkcipher_encrypt(struct ablkcipher_request *req)
 226{
 227        return qce_ablkcipher_crypt(req, 1);
 228}
 229
 230static int qce_ablkcipher_decrypt(struct ablkcipher_request *req)
 231{
 232        return qce_ablkcipher_crypt(req, 0);
 233}
 234
 235static int qce_ablkcipher_init(struct crypto_tfm *tfm)
 236{
 237        struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
 238
 239        memset(ctx, 0, sizeof(*ctx));
 240        tfm->crt_ablkcipher.reqsize = sizeof(struct qce_cipher_reqctx);
 241
 242        ctx->fallback = crypto_alloc_ablkcipher(crypto_tfm_alg_name(tfm),
 243                                                CRYPTO_ALG_TYPE_ABLKCIPHER,
 244                                                CRYPTO_ALG_ASYNC |
 245                                                CRYPTO_ALG_NEED_FALLBACK);
 246        if (IS_ERR(ctx->fallback))
 247                return PTR_ERR(ctx->fallback);
 248
 249        return 0;
 250}
 251
 252static void qce_ablkcipher_exit(struct crypto_tfm *tfm)
 253{
 254        struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
 255
 256        crypto_free_ablkcipher(ctx->fallback);
 257}
 258
 259struct qce_ablkcipher_def {
 260        unsigned long flags;
 261        const char *name;
 262        const char *drv_name;
 263        unsigned int blocksize;
 264        unsigned int ivsize;
 265        unsigned int min_keysize;
 266        unsigned int max_keysize;
 267};
 268
 269static const struct qce_ablkcipher_def ablkcipher_def[] = {
 270        {
 271                .flags          = QCE_ALG_AES | QCE_MODE_ECB,
 272                .name           = "ecb(aes)",
 273                .drv_name       = "ecb-aes-qce",
 274                .blocksize      = AES_BLOCK_SIZE,
 275                .ivsize         = AES_BLOCK_SIZE,
 276                .min_keysize    = AES_MIN_KEY_SIZE,
 277                .max_keysize    = AES_MAX_KEY_SIZE,
 278        },
 279        {
 280                .flags          = QCE_ALG_AES | QCE_MODE_CBC,
 281                .name           = "cbc(aes)",
 282                .drv_name       = "cbc-aes-qce",
 283                .blocksize      = AES_BLOCK_SIZE,
 284                .ivsize         = AES_BLOCK_SIZE,
 285                .min_keysize    = AES_MIN_KEY_SIZE,
 286                .max_keysize    = AES_MAX_KEY_SIZE,
 287        },
 288        {
 289                .flags          = QCE_ALG_AES | QCE_MODE_CTR,
 290                .name           = "ctr(aes)",
 291                .drv_name       = "ctr-aes-qce",
 292                .blocksize      = AES_BLOCK_SIZE,
 293                .ivsize         = AES_BLOCK_SIZE,
 294                .min_keysize    = AES_MIN_KEY_SIZE,
 295                .max_keysize    = AES_MAX_KEY_SIZE,
 296        },
 297        {
 298                .flags          = QCE_ALG_AES | QCE_MODE_XTS,
 299                .name           = "xts(aes)",
 300                .drv_name       = "xts-aes-qce",
 301                .blocksize      = AES_BLOCK_SIZE,
 302                .ivsize         = AES_BLOCK_SIZE,
 303                .min_keysize    = AES_MIN_KEY_SIZE,
 304                .max_keysize    = AES_MAX_KEY_SIZE,
 305        },
 306        {
 307                .flags          = QCE_ALG_DES | QCE_MODE_ECB,
 308                .name           = "ecb(des)",
 309                .drv_name       = "ecb-des-qce",
 310                .blocksize      = DES_BLOCK_SIZE,
 311                .ivsize         = 0,
 312                .min_keysize    = DES_KEY_SIZE,
 313                .max_keysize    = DES_KEY_SIZE,
 314        },
 315        {
 316                .flags          = QCE_ALG_DES | QCE_MODE_CBC,
 317                .name           = "cbc(des)",
 318                .drv_name       = "cbc-des-qce",
 319                .blocksize      = DES_BLOCK_SIZE,
 320                .ivsize         = DES_BLOCK_SIZE,
 321                .min_keysize    = DES_KEY_SIZE,
 322                .max_keysize    = DES_KEY_SIZE,
 323        },
 324        {
 325                .flags          = QCE_ALG_3DES | QCE_MODE_ECB,
 326                .name           = "ecb(des3_ede)",
 327                .drv_name       = "ecb-3des-qce",
 328                .blocksize      = DES3_EDE_BLOCK_SIZE,
 329                .ivsize         = 0,
 330                .min_keysize    = DES3_EDE_KEY_SIZE,
 331                .max_keysize    = DES3_EDE_KEY_SIZE,
 332        },
 333        {
 334                .flags          = QCE_ALG_3DES | QCE_MODE_CBC,
 335                .name           = "cbc(des3_ede)",
 336                .drv_name       = "cbc-3des-qce",
 337                .blocksize      = DES3_EDE_BLOCK_SIZE,
 338                .ivsize         = DES3_EDE_BLOCK_SIZE,
 339                .min_keysize    = DES3_EDE_KEY_SIZE,
 340                .max_keysize    = DES3_EDE_KEY_SIZE,
 341        },
 342};
 343
 344static int qce_ablkcipher_register_one(const struct qce_ablkcipher_def *def,
 345                                       struct qce_device *qce)
 346{
 347        struct qce_alg_template *tmpl;
 348        struct crypto_alg *alg;
 349        int ret;
 350
 351        tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
 352        if (!tmpl)
 353                return -ENOMEM;
 354
 355        alg = &tmpl->alg.crypto;
 356
 357        snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
 358        snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
 359                 def->drv_name);
 360
 361        alg->cra_blocksize = def->blocksize;
 362        alg->cra_ablkcipher.ivsize = def->ivsize;
 363        alg->cra_ablkcipher.min_keysize = def->min_keysize;
 364        alg->cra_ablkcipher.max_keysize = def->max_keysize;
 365        alg->cra_ablkcipher.setkey = qce_ablkcipher_setkey;
 366        alg->cra_ablkcipher.encrypt = qce_ablkcipher_encrypt;
 367        alg->cra_ablkcipher.decrypt = qce_ablkcipher_decrypt;
 368
 369        alg->cra_priority = 300;
 370        alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
 371                         CRYPTO_ALG_NEED_FALLBACK;
 372        alg->cra_ctxsize = sizeof(struct qce_cipher_ctx);
 373        alg->cra_alignmask = 0;
 374        alg->cra_type = &crypto_ablkcipher_type;
 375        alg->cra_module = THIS_MODULE;
 376        alg->cra_init = qce_ablkcipher_init;
 377        alg->cra_exit = qce_ablkcipher_exit;
 378        INIT_LIST_HEAD(&alg->cra_list);
 379
 380        INIT_LIST_HEAD(&tmpl->entry);
 381        tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_ABLKCIPHER;
 382        tmpl->alg_flags = def->flags;
 383        tmpl->qce = qce;
 384
 385        ret = crypto_register_alg(alg);
 386        if (ret) {
 387                kfree(tmpl);
 388                dev_err(qce->dev, "%s registration failed\n", alg->cra_name);
 389                return ret;
 390        }
 391
 392        list_add_tail(&tmpl->entry, &ablkcipher_algs);
 393        dev_dbg(qce->dev, "%s is registered\n", alg->cra_name);
 394        return 0;
 395}
 396
 397static void qce_ablkcipher_unregister(struct qce_device *qce)
 398{
 399        struct qce_alg_template *tmpl, *n;
 400
 401        list_for_each_entry_safe(tmpl, n, &ablkcipher_algs, entry) {
 402                crypto_unregister_alg(&tmpl->alg.crypto);
 403                list_del(&tmpl->entry);
 404                kfree(tmpl);
 405        }
 406}
 407
 408static int qce_ablkcipher_register(struct qce_device *qce)
 409{
 410        int ret, i;
 411
 412        for (i = 0; i < ARRAY_SIZE(ablkcipher_def); i++) {
 413                ret = qce_ablkcipher_register_one(&ablkcipher_def[i], qce);
 414                if (ret)
 415                        goto err;
 416        }
 417
 418        return 0;
 419err:
 420        qce_ablkcipher_unregister(qce);
 421        return ret;
 422}
 423
 424const struct qce_algo_ops ablkcipher_ops = {
 425        .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
 426        .register_algs = qce_ablkcipher_register,
 427        .unregister_algs = qce_ablkcipher_unregister,
 428        .async_req_handle = qce_ablkcipher_async_req_handle,
 429};
 430