linux/drivers/crypto/omap-aes.c
<<
>>
Prefs
   1/*
   2 * Cryptographic API.
   3 *
   4 * Support for OMAP AES HW acceleration.
   5 *
   6 * Copyright (c) 2010 Nokia Corporation
   7 * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
   8 * Copyright (c) 2011 Texas Instruments Incorporated
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License version 2 as published
  12 * by the Free Software Foundation.
  13 *
  14 */
  15
  16#define pr_fmt(fmt) "%20s: " fmt, __func__
  17#define prn(num) pr_debug(#num "=%d\n", num)
  18#define prx(num) pr_debug(#num "=%x\n", num)
  19
  20#include <linux/err.h>
  21#include <linux/module.h>
  22#include <linux/init.h>
  23#include <linux/errno.h>
  24#include <linux/kernel.h>
  25#include <linux/platform_device.h>
  26#include <linux/scatterlist.h>
  27#include <linux/dma-mapping.h>
  28#include <linux/dmaengine.h>
  29#include <linux/pm_runtime.h>
  30#include <linux/of.h>
  31#include <linux/of_device.h>
  32#include <linux/of_address.h>
  33#include <linux/io.h>
  34#include <linux/crypto.h>
  35#include <linux/interrupt.h>
  36#include <crypto/scatterwalk.h>
  37#include <crypto/aes.h>
  38#include <crypto/gcm.h>
  39#include <crypto/engine.h>
  40#include <crypto/internal/skcipher.h>
  41#include <crypto/internal/aead.h>
  42
  43#include "omap-crypto.h"
  44#include "omap-aes.h"
  45
  46/* keep registered devices data here */
  47static LIST_HEAD(dev_list);
  48static DEFINE_SPINLOCK(list_lock);
  49
  50static int aes_fallback_sz = 200;
  51
  52#ifdef DEBUG
  53#define omap_aes_read(dd, offset)                               \
  54({                                                              \
  55        int _read_ret;                                          \
  56        _read_ret = __raw_readl(dd->io_base + offset);          \
  57        pr_debug("omap_aes_read(" #offset "=%#x)= %#x\n",       \
  58                 offset, _read_ret);                            \
  59        _read_ret;                                              \
  60})
  61#else
  62inline u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset)
  63{
  64        return __raw_readl(dd->io_base + offset);
  65}
  66#endif
  67
  68#ifdef DEBUG
  69#define omap_aes_write(dd, offset, value)                               \
  70        do {                                                            \
  71                pr_debug("omap_aes_write(" #offset "=%#x) value=%#x\n", \
  72                         offset, value);                                \
  73                __raw_writel(value, dd->io_base + offset);              \
  74        } while (0)
  75#else
  76inline void omap_aes_write(struct omap_aes_dev *dd, u32 offset,
  77                                  u32 value)
  78{
  79        __raw_writel(value, dd->io_base + offset);
  80}
  81#endif
  82
  83static inline void omap_aes_write_mask(struct omap_aes_dev *dd, u32 offset,
  84                                        u32 value, u32 mask)
  85{
  86        u32 val;
  87
  88        val = omap_aes_read(dd, offset);
  89        val &= ~mask;
  90        val |= value;
  91        omap_aes_write(dd, offset, val);
  92}
  93
  94static void omap_aes_write_n(struct omap_aes_dev *dd, u32 offset,
  95                                        u32 *value, int count)
  96{
  97        for (; count--; value++, offset += 4)
  98                omap_aes_write(dd, offset, *value);
  99}
 100
 101static int omap_aes_hw_init(struct omap_aes_dev *dd)
 102{
 103        int err;
 104
 105        if (!(dd->flags & FLAGS_INIT)) {
 106                dd->flags |= FLAGS_INIT;
 107                dd->err = 0;
 108        }
 109
 110        err = pm_runtime_get_sync(dd->dev);
 111        if (err < 0) {
 112                dev_err(dd->dev, "failed to get sync: %d\n", err);
 113                return err;
 114        }
 115
 116        return 0;
 117}
 118
 119void omap_aes_clear_copy_flags(struct omap_aes_dev *dd)
 120{
 121        dd->flags &= ~(OMAP_CRYPTO_COPY_MASK << FLAGS_IN_DATA_ST_SHIFT);
 122        dd->flags &= ~(OMAP_CRYPTO_COPY_MASK << FLAGS_OUT_DATA_ST_SHIFT);
 123        dd->flags &= ~(OMAP_CRYPTO_COPY_MASK << FLAGS_ASSOC_DATA_ST_SHIFT);
 124}
 125
 126int omap_aes_write_ctrl(struct omap_aes_dev *dd)
 127{
 128        struct omap_aes_reqctx *rctx;
 129        unsigned int key32;
 130        int i, err;
 131        u32 val;
 132
 133        err = omap_aes_hw_init(dd);
 134        if (err)
 135                return err;
 136
 137        key32 = dd->ctx->keylen / sizeof(u32);
 138
 139        /* RESET the key as previous HASH keys should not get affected*/
 140        if (dd->flags & FLAGS_GCM)
 141                for (i = 0; i < 0x40; i = i + 4)
 142                        omap_aes_write(dd, i, 0x0);
 143
 144        for (i = 0; i < key32; i++) {
 145                omap_aes_write(dd, AES_REG_KEY(dd, i),
 146                        __le32_to_cpu(dd->ctx->key[i]));
 147        }
 148
 149        if ((dd->flags & (FLAGS_CBC | FLAGS_CTR)) && dd->req->info)
 150                omap_aes_write_n(dd, AES_REG_IV(dd, 0), dd->req->info, 4);
 151
 152        if ((dd->flags & (FLAGS_GCM)) && dd->aead_req->iv) {
 153                rctx = aead_request_ctx(dd->aead_req);
 154                omap_aes_write_n(dd, AES_REG_IV(dd, 0), (u32 *)rctx->iv, 4);
 155        }
 156
 157        val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3);
 158        if (dd->flags & FLAGS_CBC)
 159                val |= AES_REG_CTRL_CBC;
 160
 161        if (dd->flags & (FLAGS_CTR | FLAGS_GCM))
 162                val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_128;
 163
 164        if (dd->flags & FLAGS_GCM)
 165                val |= AES_REG_CTRL_GCM;
 166
 167        if (dd->flags & FLAGS_ENCRYPT)
 168                val |= AES_REG_CTRL_DIRECTION;
 169
 170        omap_aes_write_mask(dd, AES_REG_CTRL(dd), val, AES_REG_CTRL_MASK);
 171
 172        return 0;
 173}
 174
 175static void omap_aes_dma_trigger_omap2(struct omap_aes_dev *dd, int length)
 176{
 177        u32 mask, val;
 178
 179        val = dd->pdata->dma_start;
 180
 181        if (dd->dma_lch_out != NULL)
 182                val |= dd->pdata->dma_enable_out;
 183        if (dd->dma_lch_in != NULL)
 184                val |= dd->pdata->dma_enable_in;
 185
 186        mask = dd->pdata->dma_enable_out | dd->pdata->dma_enable_in |
 187               dd->pdata->dma_start;
 188
 189        omap_aes_write_mask(dd, AES_REG_MASK(dd), val, mask);
 190
 191}
 192
 193static void omap_aes_dma_trigger_omap4(struct omap_aes_dev *dd, int length)
 194{
 195        omap_aes_write(dd, AES_REG_LENGTH_N(0), length);
 196        omap_aes_write(dd, AES_REG_LENGTH_N(1), 0);
 197        if (dd->flags & FLAGS_GCM)
 198                omap_aes_write(dd, AES_REG_A_LEN, dd->assoc_len);
 199
 200        omap_aes_dma_trigger_omap2(dd, length);
 201}
 202
 203static void omap_aes_dma_stop(struct omap_aes_dev *dd)
 204{
 205        u32 mask;
 206
 207        mask = dd->pdata->dma_enable_out | dd->pdata->dma_enable_in |
 208               dd->pdata->dma_start;
 209
 210        omap_aes_write_mask(dd, AES_REG_MASK(dd), 0, mask);
 211}
 212
 213struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_reqctx *rctx)
 214{
 215        struct omap_aes_dev *dd;
 216
 217        spin_lock_bh(&list_lock);
 218        dd = list_first_entry(&dev_list, struct omap_aes_dev, list);
 219        list_move_tail(&dd->list, &dev_list);
 220        rctx->dd = dd;
 221        spin_unlock_bh(&list_lock);
 222
 223        return dd;
 224}
 225
 226static void omap_aes_dma_out_callback(void *data)
 227{
 228        struct omap_aes_dev *dd = data;
 229
 230        /* dma_lch_out - completed */
 231        tasklet_schedule(&dd->done_task);
 232}
 233
 234static int omap_aes_dma_init(struct omap_aes_dev *dd)
 235{
 236        int err;
 237
 238        dd->dma_lch_out = NULL;
 239        dd->dma_lch_in = NULL;
 240
 241        dd->dma_lch_in = dma_request_chan(dd->dev, "rx");
 242        if (IS_ERR(dd->dma_lch_in)) {
 243                dev_err(dd->dev, "Unable to request in DMA channel\n");
 244                return PTR_ERR(dd->dma_lch_in);
 245        }
 246
 247        dd->dma_lch_out = dma_request_chan(dd->dev, "tx");
 248        if (IS_ERR(dd->dma_lch_out)) {
 249                dev_err(dd->dev, "Unable to request out DMA channel\n");
 250                err = PTR_ERR(dd->dma_lch_out);
 251                goto err_dma_out;
 252        }
 253
 254        return 0;
 255
 256err_dma_out:
 257        dma_release_channel(dd->dma_lch_in);
 258
 259        return err;
 260}
 261
 262static void omap_aes_dma_cleanup(struct omap_aes_dev *dd)
 263{
 264        if (dd->pio_only)
 265                return;
 266
 267        dma_release_channel(dd->dma_lch_out);
 268        dma_release_channel(dd->dma_lch_in);
 269}
 270
 271static int omap_aes_crypt_dma(struct omap_aes_dev *dd,
 272                              struct scatterlist *in_sg,
 273                              struct scatterlist *out_sg,
 274                              int in_sg_len, int out_sg_len)
 275{
 276        struct dma_async_tx_descriptor *tx_in, *tx_out;
 277        struct dma_slave_config cfg;
 278        int ret;
 279
 280        if (dd->pio_only) {
 281                scatterwalk_start(&dd->in_walk, dd->in_sg);
 282                scatterwalk_start(&dd->out_walk, dd->out_sg);
 283
 284                /* Enable DATAIN interrupt and let it take
 285                   care of the rest */
 286                omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x2);
 287                return 0;
 288        }
 289
 290        dma_sync_sg_for_device(dd->dev, dd->in_sg, in_sg_len, DMA_TO_DEVICE);
 291
 292        memset(&cfg, 0, sizeof(cfg));
 293
 294        cfg.src_addr = dd->phys_base + AES_REG_DATA_N(dd, 0);
 295        cfg.dst_addr = dd->phys_base + AES_REG_DATA_N(dd, 0);
 296        cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 297        cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 298        cfg.src_maxburst = DST_MAXBURST;
 299        cfg.dst_maxburst = DST_MAXBURST;
 300
 301        /* IN */
 302        ret = dmaengine_slave_config(dd->dma_lch_in, &cfg);
 303        if (ret) {
 304                dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
 305                        ret);
 306                return ret;
 307        }
 308
 309        tx_in = dmaengine_prep_slave_sg(dd->dma_lch_in, in_sg, in_sg_len,
 310                                        DMA_MEM_TO_DEV,
 311                                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 312        if (!tx_in) {
 313                dev_err(dd->dev, "IN prep_slave_sg() failed\n");
 314                return -EINVAL;
 315        }
 316
 317        /* No callback necessary */
 318        tx_in->callback_param = dd;
 319
 320        /* OUT */
 321        ret = dmaengine_slave_config(dd->dma_lch_out, &cfg);
 322        if (ret) {
 323                dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n",
 324                        ret);
 325                return ret;
 326        }
 327
 328        tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg, out_sg_len,
 329                                        DMA_DEV_TO_MEM,
 330                                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 331        if (!tx_out) {
 332                dev_err(dd->dev, "OUT prep_slave_sg() failed\n");
 333                return -EINVAL;
 334        }
 335
 336        if (dd->flags & FLAGS_GCM)
 337                tx_out->callback = omap_aes_gcm_dma_out_callback;
 338        else
 339                tx_out->callback = omap_aes_dma_out_callback;
 340        tx_out->callback_param = dd;
 341
 342        dmaengine_submit(tx_in);
 343        dmaengine_submit(tx_out);
 344
 345        dma_async_issue_pending(dd->dma_lch_in);
 346        dma_async_issue_pending(dd->dma_lch_out);
 347
 348        /* start DMA */
 349        dd->pdata->trigger(dd, dd->total);
 350
 351        return 0;
 352}
 353
 354int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
 355{
 356        int err;
 357
 358        pr_debug("total: %d\n", dd->total);
 359
 360        if (!dd->pio_only) {
 361                err = dma_map_sg(dd->dev, dd->in_sg, dd->in_sg_len,
 362                                 DMA_TO_DEVICE);
 363                if (!err) {
 364                        dev_err(dd->dev, "dma_map_sg() error\n");
 365                        return -EINVAL;
 366                }
 367
 368                err = dma_map_sg(dd->dev, dd->out_sg, dd->out_sg_len,
 369                                 DMA_FROM_DEVICE);
 370                if (!err) {
 371                        dev_err(dd->dev, "dma_map_sg() error\n");
 372                        return -EINVAL;
 373                }
 374        }
 375
 376        err = omap_aes_crypt_dma(dd, dd->in_sg, dd->out_sg, dd->in_sg_len,
 377                                 dd->out_sg_len);
 378        if (err && !dd->pio_only) {
 379                dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
 380                dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len,
 381                             DMA_FROM_DEVICE);
 382        }
 383
 384        return err;
 385}
 386
 387static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
 388{
 389        struct ablkcipher_request *req = dd->req;
 390
 391        pr_debug("err: %d\n", err);
 392
 393        crypto_finalize_ablkcipher_request(dd->engine, req, err);
 394
 395        pm_runtime_mark_last_busy(dd->dev);
 396        pm_runtime_put_autosuspend(dd->dev);
 397}
 398
 399int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
 400{
 401        pr_debug("total: %d\n", dd->total);
 402
 403        omap_aes_dma_stop(dd);
 404
 405
 406        return 0;
 407}
 408
 409static int omap_aes_handle_queue(struct omap_aes_dev *dd,
 410                                 struct ablkcipher_request *req)
 411{
 412        if (req)
 413                return crypto_transfer_ablkcipher_request_to_engine(dd->engine, req);
 414
 415        return 0;
 416}
 417
 418static int omap_aes_prepare_req(struct crypto_engine *engine,
 419                                void *areq)
 420{
 421        struct ablkcipher_request *req = container_of(areq, struct ablkcipher_request, base);
 422        struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
 423                        crypto_ablkcipher_reqtfm(req));
 424        struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
 425        struct omap_aes_dev *dd = rctx->dd;
 426        int ret;
 427        u16 flags;
 428
 429        if (!dd)
 430                return -ENODEV;
 431
 432        /* assign new request to device */
 433        dd->req = req;
 434        dd->total = req->nbytes;
 435        dd->total_save = req->nbytes;
 436        dd->in_sg = req->src;
 437        dd->out_sg = req->dst;
 438        dd->orig_out = req->dst;
 439
 440        flags = OMAP_CRYPTO_COPY_DATA;
 441        if (req->src == req->dst)
 442                flags |= OMAP_CRYPTO_FORCE_COPY;
 443
 444        ret = omap_crypto_align_sg(&dd->in_sg, dd->total, AES_BLOCK_SIZE,
 445                                   dd->in_sgl, flags,
 446                                   FLAGS_IN_DATA_ST_SHIFT, &dd->flags);
 447        if (ret)
 448                return ret;
 449
 450        ret = omap_crypto_align_sg(&dd->out_sg, dd->total, AES_BLOCK_SIZE,
 451                                   &dd->out_sgl, 0,
 452                                   FLAGS_OUT_DATA_ST_SHIFT, &dd->flags);
 453        if (ret)
 454                return ret;
 455
 456        dd->in_sg_len = sg_nents_for_len(dd->in_sg, dd->total);
 457        if (dd->in_sg_len < 0)
 458                return dd->in_sg_len;
 459
 460        dd->out_sg_len = sg_nents_for_len(dd->out_sg, dd->total);
 461        if (dd->out_sg_len < 0)
 462                return dd->out_sg_len;
 463
 464        rctx->mode &= FLAGS_MODE_MASK;
 465        dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
 466
 467        dd->ctx = ctx;
 468        rctx->dd = dd;
 469
 470        return omap_aes_write_ctrl(dd);
 471}
 472
 473static int omap_aes_crypt_req(struct crypto_engine *engine,
 474                              void *areq)
 475{
 476        struct ablkcipher_request *req = container_of(areq, struct ablkcipher_request, base);
 477        struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
 478        struct omap_aes_dev *dd = rctx->dd;
 479
 480        if (!dd)
 481                return -ENODEV;
 482
 483        return omap_aes_crypt_dma_start(dd);
 484}
 485
 486static void omap_aes_done_task(unsigned long data)
 487{
 488        struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
 489
 490        pr_debug("enter done_task\n");
 491
 492        if (!dd->pio_only) {
 493                dma_sync_sg_for_device(dd->dev, dd->out_sg, dd->out_sg_len,
 494                                       DMA_FROM_DEVICE);
 495                dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
 496                dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len,
 497                             DMA_FROM_DEVICE);
 498                omap_aes_crypt_dma_stop(dd);
 499        }
 500
 501        omap_crypto_cleanup(dd->in_sgl, NULL, 0, dd->total_save,
 502                            FLAGS_IN_DATA_ST_SHIFT, dd->flags);
 503
 504        omap_crypto_cleanup(&dd->out_sgl, dd->orig_out, 0, dd->total_save,
 505                            FLAGS_OUT_DATA_ST_SHIFT, dd->flags);
 506
 507        omap_aes_finish_req(dd, 0);
 508
 509        pr_debug("exit\n");
 510}
 511
 512static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
 513{
 514        struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
 515                        crypto_ablkcipher_reqtfm(req));
 516        struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
 517        struct omap_aes_dev *dd;
 518        int ret;
 519
 520        pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes,
 521                  !!(mode & FLAGS_ENCRYPT),
 522                  !!(mode & FLAGS_CBC));
 523
 524        if (req->nbytes < aes_fallback_sz) {
 525                SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
 526
 527                skcipher_request_set_tfm(subreq, ctx->fallback);
 528                skcipher_request_set_callback(subreq, req->base.flags, NULL,
 529                                              NULL);
 530                skcipher_request_set_crypt(subreq, req->src, req->dst,
 531                                           req->nbytes, req->info);
 532
 533                if (mode & FLAGS_ENCRYPT)
 534                        ret = crypto_skcipher_encrypt(subreq);
 535                else
 536                        ret = crypto_skcipher_decrypt(subreq);
 537
 538                skcipher_request_zero(subreq);
 539                return ret;
 540        }
 541        dd = omap_aes_find_dev(rctx);
 542        if (!dd)
 543                return -ENODEV;
 544
 545        rctx->mode = mode;
 546
 547        return omap_aes_handle_queue(dd, req);
 548}
 549
 550/* ********************** ALG API ************************************ */
 551
 552static int omap_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
 553                           unsigned int keylen)
 554{
 555        struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
 556        int ret;
 557
 558        if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
 559                   keylen != AES_KEYSIZE_256)
 560                return -EINVAL;
 561
 562        pr_debug("enter, keylen: %d\n", keylen);
 563
 564        memcpy(ctx->key, key, keylen);
 565        ctx->keylen = keylen;
 566
 567        crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
 568        crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
 569                                                 CRYPTO_TFM_REQ_MASK);
 570
 571        ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
 572        if (!ret)
 573                return 0;
 574
 575        return 0;
 576}
 577
 578static int omap_aes_ecb_encrypt(struct ablkcipher_request *req)
 579{
 580        return omap_aes_crypt(req, FLAGS_ENCRYPT);
 581}
 582
 583static int omap_aes_ecb_decrypt(struct ablkcipher_request *req)
 584{
 585        return omap_aes_crypt(req, 0);
 586}
 587
 588static int omap_aes_cbc_encrypt(struct ablkcipher_request *req)
 589{
 590        return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
 591}
 592
 593static int omap_aes_cbc_decrypt(struct ablkcipher_request *req)
 594{
 595        return omap_aes_crypt(req, FLAGS_CBC);
 596}
 597
 598static int omap_aes_ctr_encrypt(struct ablkcipher_request *req)
 599{
 600        return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CTR);
 601}
 602
 603static int omap_aes_ctr_decrypt(struct ablkcipher_request *req)
 604{
 605        return omap_aes_crypt(req, FLAGS_CTR);
 606}
 607
 608static int omap_aes_prepare_req(struct crypto_engine *engine,
 609                                void *req);
 610static int omap_aes_crypt_req(struct crypto_engine *engine,
 611                              void *req);
 612
 613static int omap_aes_cra_init(struct crypto_tfm *tfm)
 614{
 615        const char *name = crypto_tfm_alg_name(tfm);
 616        const u32 flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
 617        struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
 618        struct crypto_skcipher *blk;
 619
 620        blk = crypto_alloc_skcipher(name, 0, flags);
 621        if (IS_ERR(blk))
 622                return PTR_ERR(blk);
 623
 624        ctx->fallback = blk;
 625
 626        tfm->crt_ablkcipher.reqsize = sizeof(struct omap_aes_reqctx);
 627
 628        ctx->enginectx.op.prepare_request = omap_aes_prepare_req;
 629        ctx->enginectx.op.unprepare_request = NULL;
 630        ctx->enginectx.op.do_one_request = omap_aes_crypt_req;
 631
 632        return 0;
 633}
 634
 635static int omap_aes_gcm_cra_init(struct crypto_aead *tfm)
 636{
 637        struct omap_aes_dev *dd = NULL;
 638        struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
 639        int err;
 640
 641        /* Find AES device, currently picks the first device */
 642        spin_lock_bh(&list_lock);
 643        list_for_each_entry(dd, &dev_list, list) {
 644                break;
 645        }
 646        spin_unlock_bh(&list_lock);
 647
 648        err = pm_runtime_get_sync(dd->dev);
 649        if (err < 0) {
 650                dev_err(dd->dev, "%s: failed to get_sync(%d)\n",
 651                        __func__, err);
 652                return err;
 653        }
 654
 655        tfm->reqsize = sizeof(struct omap_aes_reqctx);
 656        ctx->ctr = crypto_alloc_skcipher("ecb(aes)", 0, 0);
 657        if (IS_ERR(ctx->ctr)) {
 658                pr_warn("could not load aes driver for encrypting IV\n");
 659                return PTR_ERR(ctx->ctr);
 660        }
 661
 662        return 0;
 663}
 664
 665static void omap_aes_cra_exit(struct crypto_tfm *tfm)
 666{
 667        struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
 668
 669        if (ctx->fallback)
 670                crypto_free_skcipher(ctx->fallback);
 671
 672        ctx->fallback = NULL;
 673}
 674
 675static void omap_aes_gcm_cra_exit(struct crypto_aead *tfm)
 676{
 677        struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
 678
 679        omap_aes_cra_exit(crypto_aead_tfm(tfm));
 680
 681        if (ctx->ctr)
 682                crypto_free_skcipher(ctx->ctr);
 683}
 684
 685/* ********************** ALGS ************************************ */
 686
 687static struct crypto_alg algs_ecb_cbc[] = {
 688{
 689        .cra_name               = "ecb(aes)",
 690        .cra_driver_name        = "ecb-aes-omap",
 691        .cra_priority           = 300,
 692        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER |
 693                                  CRYPTO_ALG_KERN_DRIVER_ONLY |
 694                                  CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
 695        .cra_blocksize          = AES_BLOCK_SIZE,
 696        .cra_ctxsize            = sizeof(struct omap_aes_ctx),
 697        .cra_alignmask          = 0,
 698        .cra_type               = &crypto_ablkcipher_type,
 699        .cra_module             = THIS_MODULE,
 700        .cra_init               = omap_aes_cra_init,
 701        .cra_exit               = omap_aes_cra_exit,
 702        .cra_u.ablkcipher = {
 703                .min_keysize    = AES_MIN_KEY_SIZE,
 704                .max_keysize    = AES_MAX_KEY_SIZE,
 705                .setkey         = omap_aes_setkey,
 706                .encrypt        = omap_aes_ecb_encrypt,
 707                .decrypt        = omap_aes_ecb_decrypt,
 708        }
 709},
 710{
 711        .cra_name               = "cbc(aes)",
 712        .cra_driver_name        = "cbc-aes-omap",
 713        .cra_priority           = 300,
 714        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER |
 715                                  CRYPTO_ALG_KERN_DRIVER_ONLY |
 716                                  CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
 717        .cra_blocksize          = AES_BLOCK_SIZE,
 718        .cra_ctxsize            = sizeof(struct omap_aes_ctx),
 719        .cra_alignmask          = 0,
 720        .cra_type               = &crypto_ablkcipher_type,
 721        .cra_module             = THIS_MODULE,
 722        .cra_init               = omap_aes_cra_init,
 723        .cra_exit               = omap_aes_cra_exit,
 724        .cra_u.ablkcipher = {
 725                .min_keysize    = AES_MIN_KEY_SIZE,
 726                .max_keysize    = AES_MAX_KEY_SIZE,
 727                .ivsize         = AES_BLOCK_SIZE,
 728                .setkey         = omap_aes_setkey,
 729                .encrypt        = omap_aes_cbc_encrypt,
 730                .decrypt        = omap_aes_cbc_decrypt,
 731        }
 732}
 733};
 734
 735static struct crypto_alg algs_ctr[] = {
 736{
 737        .cra_name               = "ctr(aes)",
 738        .cra_driver_name        = "ctr-aes-omap",
 739        .cra_priority           = 300,
 740        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER |
 741                                  CRYPTO_ALG_KERN_DRIVER_ONLY |
 742                                  CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
 743        .cra_blocksize          = AES_BLOCK_SIZE,
 744        .cra_ctxsize            = sizeof(struct omap_aes_ctx),
 745        .cra_alignmask          = 0,
 746        .cra_type               = &crypto_ablkcipher_type,
 747        .cra_module             = THIS_MODULE,
 748        .cra_init               = omap_aes_cra_init,
 749        .cra_exit               = omap_aes_cra_exit,
 750        .cra_u.ablkcipher = {
 751                .min_keysize    = AES_MIN_KEY_SIZE,
 752                .max_keysize    = AES_MAX_KEY_SIZE,
 753                .geniv          = "eseqiv",
 754                .ivsize         = AES_BLOCK_SIZE,
 755                .setkey         = omap_aes_setkey,
 756                .encrypt        = omap_aes_ctr_encrypt,
 757                .decrypt        = omap_aes_ctr_decrypt,
 758        }
 759} ,
 760};
 761
 762static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc[] = {
 763        {
 764                .algs_list      = algs_ecb_cbc,
 765                .size           = ARRAY_SIZE(algs_ecb_cbc),
 766        },
 767};
 768
 769static struct aead_alg algs_aead_gcm[] = {
 770{
 771        .base = {
 772                .cra_name               = "gcm(aes)",
 773                .cra_driver_name        = "gcm-aes-omap",
 774                .cra_priority           = 300,
 775                .cra_flags              = CRYPTO_ALG_ASYNC |
 776                                          CRYPTO_ALG_KERN_DRIVER_ONLY,
 777                .cra_blocksize          = 1,
 778                .cra_ctxsize            = sizeof(struct omap_aes_ctx),
 779                .cra_alignmask          = 0xf,
 780                .cra_module             = THIS_MODULE,
 781        },
 782        .init           = omap_aes_gcm_cra_init,
 783        .exit           = omap_aes_gcm_cra_exit,
 784        .ivsize         = GCM_AES_IV_SIZE,
 785        .maxauthsize    = AES_BLOCK_SIZE,
 786        .setkey         = omap_aes_gcm_setkey,
 787        .encrypt        = omap_aes_gcm_encrypt,
 788        .decrypt        = omap_aes_gcm_decrypt,
 789},
 790{
 791        .base = {
 792                .cra_name               = "rfc4106(gcm(aes))",
 793                .cra_driver_name        = "rfc4106-gcm-aes-omap",
 794                .cra_priority           = 300,
 795                .cra_flags              = CRYPTO_ALG_ASYNC |
 796                                          CRYPTO_ALG_KERN_DRIVER_ONLY,
 797                .cra_blocksize          = 1,
 798                .cra_ctxsize            = sizeof(struct omap_aes_ctx),
 799                .cra_alignmask          = 0xf,
 800                .cra_module             = THIS_MODULE,
 801        },
 802        .init           = omap_aes_gcm_cra_init,
 803        .exit           = omap_aes_gcm_cra_exit,
 804        .maxauthsize    = AES_BLOCK_SIZE,
 805        .ivsize         = GCM_RFC4106_IV_SIZE,
 806        .setkey         = omap_aes_4106gcm_setkey,
 807        .encrypt        = omap_aes_4106gcm_encrypt,
 808        .decrypt        = omap_aes_4106gcm_decrypt,
 809},
 810};
 811
 812static struct omap_aes_aead_algs omap_aes_aead_info = {
 813        .algs_list      =       algs_aead_gcm,
 814        .size           =       ARRAY_SIZE(algs_aead_gcm),
 815};
 816
 817static const struct omap_aes_pdata omap_aes_pdata_omap2 = {
 818        .algs_info      = omap_aes_algs_info_ecb_cbc,
 819        .algs_info_size = ARRAY_SIZE(omap_aes_algs_info_ecb_cbc),
 820        .trigger        = omap_aes_dma_trigger_omap2,
 821        .key_ofs        = 0x1c,
 822        .iv_ofs         = 0x20,
 823        .ctrl_ofs       = 0x30,
 824        .data_ofs       = 0x34,
 825        .rev_ofs        = 0x44,
 826        .mask_ofs       = 0x48,
 827        .dma_enable_in  = BIT(2),
 828        .dma_enable_out = BIT(3),
 829        .dma_start      = BIT(5),
 830        .major_mask     = 0xf0,
 831        .major_shift    = 4,
 832        .minor_mask     = 0x0f,
 833        .minor_shift    = 0,
 834};
 835
 836#ifdef CONFIG_OF
 837static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc_ctr[] = {
 838        {
 839                .algs_list      = algs_ecb_cbc,
 840                .size           = ARRAY_SIZE(algs_ecb_cbc),
 841        },
 842        {
 843                .algs_list      = algs_ctr,
 844                .size           = ARRAY_SIZE(algs_ctr),
 845        },
 846};
 847
 848static const struct omap_aes_pdata omap_aes_pdata_omap3 = {
 849        .algs_info      = omap_aes_algs_info_ecb_cbc_ctr,
 850        .algs_info_size = ARRAY_SIZE(omap_aes_algs_info_ecb_cbc_ctr),
 851        .trigger        = omap_aes_dma_trigger_omap2,
 852        .key_ofs        = 0x1c,
 853        .iv_ofs         = 0x20,
 854        .ctrl_ofs       = 0x30,
 855        .data_ofs       = 0x34,
 856        .rev_ofs        = 0x44,
 857        .mask_ofs       = 0x48,
 858        .dma_enable_in  = BIT(2),
 859        .dma_enable_out = BIT(3),
 860        .dma_start      = BIT(5),
 861        .major_mask     = 0xf0,
 862        .major_shift    = 4,
 863        .minor_mask     = 0x0f,
 864        .minor_shift    = 0,
 865};
 866
 867static const struct omap_aes_pdata omap_aes_pdata_omap4 = {
 868        .algs_info      = omap_aes_algs_info_ecb_cbc_ctr,
 869        .algs_info_size = ARRAY_SIZE(omap_aes_algs_info_ecb_cbc_ctr),
 870        .aead_algs_info = &omap_aes_aead_info,
 871        .trigger        = omap_aes_dma_trigger_omap4,
 872        .key_ofs        = 0x3c,
 873        .iv_ofs         = 0x40,
 874        .ctrl_ofs       = 0x50,
 875        .data_ofs       = 0x60,
 876        .rev_ofs        = 0x80,
 877        .mask_ofs       = 0x84,
 878        .irq_status_ofs = 0x8c,
 879        .irq_enable_ofs = 0x90,
 880        .dma_enable_in  = BIT(5),
 881        .dma_enable_out = BIT(6),
 882        .major_mask     = 0x0700,
 883        .major_shift    = 8,
 884        .minor_mask     = 0x003f,
 885        .minor_shift    = 0,
 886};
 887
 888static irqreturn_t omap_aes_irq(int irq, void *dev_id)
 889{
 890        struct omap_aes_dev *dd = dev_id;
 891        u32 status, i;
 892        u32 *src, *dst;
 893
 894        status = omap_aes_read(dd, AES_REG_IRQ_STATUS(dd));
 895        if (status & AES_REG_IRQ_DATA_IN) {
 896                omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x0);
 897
 898                BUG_ON(!dd->in_sg);
 899
 900                BUG_ON(_calc_walked(in) > dd->in_sg->length);
 901
 902                src = sg_virt(dd->in_sg) + _calc_walked(in);
 903
 904                for (i = 0; i < AES_BLOCK_WORDS; i++) {
 905                        omap_aes_write(dd, AES_REG_DATA_N(dd, i), *src);
 906
 907                        scatterwalk_advance(&dd->in_walk, 4);
 908                        if (dd->in_sg->length == _calc_walked(in)) {
 909                                dd->in_sg = sg_next(dd->in_sg);
 910                                if (dd->in_sg) {
 911                                        scatterwalk_start(&dd->in_walk,
 912                                                          dd->in_sg);
 913                                        src = sg_virt(dd->in_sg) +
 914                                              _calc_walked(in);
 915                                }
 916                        } else {
 917                                src++;
 918                        }
 919                }
 920
 921                /* Clear IRQ status */
 922                status &= ~AES_REG_IRQ_DATA_IN;
 923                omap_aes_write(dd, AES_REG_IRQ_STATUS(dd), status);
 924
 925                /* Enable DATA_OUT interrupt */
 926                omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x4);
 927
 928        } else if (status & AES_REG_IRQ_DATA_OUT) {
 929                omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x0);
 930
 931                BUG_ON(!dd->out_sg);
 932
 933                BUG_ON(_calc_walked(out) > dd->out_sg->length);
 934
 935                dst = sg_virt(dd->out_sg) + _calc_walked(out);
 936
 937                for (i = 0; i < AES_BLOCK_WORDS; i++) {
 938                        *dst = omap_aes_read(dd, AES_REG_DATA_N(dd, i));
 939                        scatterwalk_advance(&dd->out_walk, 4);
 940                        if (dd->out_sg->length == _calc_walked(out)) {
 941                                dd->out_sg = sg_next(dd->out_sg);
 942                                if (dd->out_sg) {
 943                                        scatterwalk_start(&dd->out_walk,
 944                                                          dd->out_sg);
 945                                        dst = sg_virt(dd->out_sg) +
 946                                              _calc_walked(out);
 947                                }
 948                        } else {
 949                                dst++;
 950                        }
 951                }
 952
 953                dd->total -= min_t(size_t, AES_BLOCK_SIZE, dd->total);
 954
 955                /* Clear IRQ status */
 956                status &= ~AES_REG_IRQ_DATA_OUT;
 957                omap_aes_write(dd, AES_REG_IRQ_STATUS(dd), status);
 958
 959                if (!dd->total)
 960                        /* All bytes read! */
 961                        tasklet_schedule(&dd->done_task);
 962                else
 963                        /* Enable DATA_IN interrupt for next block */
 964                        omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x2);
 965        }
 966
 967        return IRQ_HANDLED;
 968}
 969
 970static const struct of_device_id omap_aes_of_match[] = {
 971        {
 972                .compatible     = "ti,omap2-aes",
 973                .data           = &omap_aes_pdata_omap2,
 974        },
 975        {
 976                .compatible     = "ti,omap3-aes",
 977                .data           = &omap_aes_pdata_omap3,
 978        },
 979        {
 980                .compatible     = "ti,omap4-aes",
 981                .data           = &omap_aes_pdata_omap4,
 982        },
 983        {},
 984};
 985MODULE_DEVICE_TABLE(of, omap_aes_of_match);
 986
 987static int omap_aes_get_res_of(struct omap_aes_dev *dd,
 988                struct device *dev, struct resource *res)
 989{
 990        struct device_node *node = dev->of_node;
 991        int err = 0;
 992
 993        dd->pdata = of_device_get_match_data(dev);
 994        if (!dd->pdata) {
 995                dev_err(dev, "no compatible OF match\n");
 996                err = -EINVAL;
 997                goto err;
 998        }
 999
1000        err = of_address_to_resource(node, 0, res);
1001        if (err < 0) {
1002                dev_err(dev, "can't translate OF node address\n");
1003                err = -EINVAL;
1004                goto err;
1005        }
1006
1007err:
1008        return err;
1009}
1010#else
1011static const struct of_device_id omap_aes_of_match[] = {
1012        {},
1013};
1014
1015static int omap_aes_get_res_of(struct omap_aes_dev *dd,
1016                struct device *dev, struct resource *res)
1017{
1018        return -EINVAL;
1019}
1020#endif
1021
1022static int omap_aes_get_res_pdev(struct omap_aes_dev *dd,
1023                struct platform_device *pdev, struct resource *res)
1024{
1025        struct device *dev = &pdev->dev;
1026        struct resource *r;
1027        int err = 0;
1028
1029        /* Get the base address */
1030        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1031        if (!r) {
1032                dev_err(dev, "no MEM resource info\n");
1033                err = -ENODEV;
1034                goto err;
1035        }
1036        memcpy(res, r, sizeof(*res));
1037
1038        /* Only OMAP2/3 can be non-DT */
1039        dd->pdata = &omap_aes_pdata_omap2;
1040
1041err:
1042        return err;
1043}
1044
1045static ssize_t fallback_show(struct device *dev, struct device_attribute *attr,
1046                             char *buf)
1047{
1048        return sprintf(buf, "%d\n", aes_fallback_sz);
1049}
1050
1051static ssize_t fallback_store(struct device *dev, struct device_attribute *attr,
1052                              const char *buf, size_t size)
1053{
1054        ssize_t status;
1055        long value;
1056
1057        status = kstrtol(buf, 0, &value);
1058        if (status)
1059                return status;
1060
1061        /* HW accelerator only works with buffers > 9 */
1062        if (value < 9) {
1063                dev_err(dev, "minimum fallback size 9\n");
1064                return -EINVAL;
1065        }
1066
1067        aes_fallback_sz = value;
1068
1069        return size;
1070}
1071
1072static ssize_t queue_len_show(struct device *dev, struct device_attribute *attr,
1073                              char *buf)
1074{
1075        struct omap_aes_dev *dd = dev_get_drvdata(dev);
1076
1077        return sprintf(buf, "%d\n", dd->engine->queue.max_qlen);
1078}
1079
1080static ssize_t queue_len_store(struct device *dev,
1081                               struct device_attribute *attr, const char *buf,
1082                               size_t size)
1083{
1084        struct omap_aes_dev *dd;
1085        ssize_t status;
1086        long value;
1087        unsigned long flags;
1088
1089        status = kstrtol(buf, 0, &value);
1090        if (status)
1091                return status;
1092
1093        if (value < 1)
1094                return -EINVAL;
1095
1096        /*
1097         * Changing the queue size in fly is safe, if size becomes smaller
1098         * than current size, it will just not accept new entries until
1099         * it has shrank enough.
1100         */
1101        spin_lock_bh(&list_lock);
1102        list_for_each_entry(dd, &dev_list, list) {
1103                spin_lock_irqsave(&dd->lock, flags);
1104                dd->engine->queue.max_qlen = value;
1105                dd->aead_queue.base.max_qlen = value;
1106                spin_unlock_irqrestore(&dd->lock, flags);
1107        }
1108        spin_unlock_bh(&list_lock);
1109
1110        return size;
1111}
1112
1113static DEVICE_ATTR_RW(queue_len);
1114static DEVICE_ATTR_RW(fallback);
1115
1116static struct attribute *omap_aes_attrs[] = {
1117        &dev_attr_queue_len.attr,
1118        &dev_attr_fallback.attr,
1119        NULL,
1120};
1121
1122static struct attribute_group omap_aes_attr_group = {
1123        .attrs = omap_aes_attrs,
1124};
1125
1126static int omap_aes_probe(struct platform_device *pdev)
1127{
1128        struct device *dev = &pdev->dev;
1129        struct omap_aes_dev *dd;
1130        struct crypto_alg *algp;
1131        struct aead_alg *aalg;
1132        struct resource res;
1133        int err = -ENOMEM, i, j, irq = -1;
1134        u32 reg;
1135
1136        dd = devm_kzalloc(dev, sizeof(struct omap_aes_dev), GFP_KERNEL);
1137        if (dd == NULL) {
1138                dev_err(dev, "unable to alloc data struct.\n");
1139                goto err_data;
1140        }
1141        dd->dev = dev;
1142        platform_set_drvdata(pdev, dd);
1143
1144        aead_init_queue(&dd->aead_queue, OMAP_AES_QUEUE_LENGTH);
1145
1146        err = (dev->of_node) ? omap_aes_get_res_of(dd, dev, &res) :
1147                               omap_aes_get_res_pdev(dd, pdev, &res);
1148        if (err)
1149                goto err_res;
1150
1151        dd->io_base = devm_ioremap_resource(dev, &res);
1152        if (IS_ERR(dd->io_base)) {
1153                err = PTR_ERR(dd->io_base);
1154                goto err_res;
1155        }
1156        dd->phys_base = res.start;
1157
1158        pm_runtime_use_autosuspend(dev);
1159        pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY);
1160
1161        pm_runtime_enable(dev);
1162        err = pm_runtime_get_sync(dev);
1163        if (err < 0) {
1164                dev_err(dev, "%s: failed to get_sync(%d)\n",
1165                        __func__, err);
1166                goto err_res;
1167        }
1168
1169        omap_aes_dma_stop(dd);
1170
1171        reg = omap_aes_read(dd, AES_REG_REV(dd));
1172
1173        pm_runtime_put_sync(dev);
1174
1175        dev_info(dev, "OMAP AES hw accel rev: %u.%u\n",
1176                 (reg & dd->pdata->major_mask) >> dd->pdata->major_shift,
1177                 (reg & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
1178
1179        tasklet_init(&dd->done_task, omap_aes_done_task, (unsigned long)dd);
1180
1181        err = omap_aes_dma_init(dd);
1182        if (err == -EPROBE_DEFER) {
1183                goto err_irq;
1184        } else if (err && AES_REG_IRQ_STATUS(dd) && AES_REG_IRQ_ENABLE(dd)) {
1185                dd->pio_only = 1;
1186
1187                irq = platform_get_irq(pdev, 0);
1188                if (irq < 0) {
1189                        dev_err(dev, "can't get IRQ resource\n");
1190                        err = irq;
1191                        goto err_irq;
1192                }
1193
1194                err = devm_request_irq(dev, irq, omap_aes_irq, 0,
1195                                dev_name(dev), dd);
1196                if (err) {
1197                        dev_err(dev, "Unable to grab omap-aes IRQ\n");
1198                        goto err_irq;
1199                }
1200        }
1201
1202        spin_lock_init(&dd->lock);
1203
1204        INIT_LIST_HEAD(&dd->list);
1205        spin_lock(&list_lock);
1206        list_add_tail(&dd->list, &dev_list);
1207        spin_unlock(&list_lock);
1208
1209        /* Initialize crypto engine */
1210        dd->engine = crypto_engine_alloc_init(dev, 1);
1211        if (!dd->engine) {
1212                err = -ENOMEM;
1213                goto err_engine;
1214        }
1215
1216        err = crypto_engine_start(dd->engine);
1217        if (err)
1218                goto err_engine;
1219
1220        for (i = 0; i < dd->pdata->algs_info_size; i++) {
1221                if (!dd->pdata->algs_info[i].registered) {
1222                        for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
1223                                algp = &dd->pdata->algs_info[i].algs_list[j];
1224
1225                                pr_debug("reg alg: %s\n", algp->cra_name);
1226                                INIT_LIST_HEAD(&algp->cra_list);
1227
1228                                err = crypto_register_alg(algp);
1229                                if (err)
1230                                        goto err_algs;
1231
1232                                dd->pdata->algs_info[i].registered++;
1233                        }
1234                }
1235        }
1236
1237        if (dd->pdata->aead_algs_info &&
1238            !dd->pdata->aead_algs_info->registered) {
1239                for (i = 0; i < dd->pdata->aead_algs_info->size; i++) {
1240                        aalg = &dd->pdata->aead_algs_info->algs_list[i];
1241                        algp = &aalg->base;
1242
1243                        pr_debug("reg alg: %s\n", algp->cra_name);
1244                        INIT_LIST_HEAD(&algp->cra_list);
1245
1246                        err = crypto_register_aead(aalg);
1247                        if (err)
1248                                goto err_aead_algs;
1249
1250                        dd->pdata->aead_algs_info->registered++;
1251                }
1252        }
1253
1254        err = sysfs_create_group(&dev->kobj, &omap_aes_attr_group);
1255        if (err) {
1256                dev_err(dev, "could not create sysfs device attrs\n");
1257                goto err_aead_algs;
1258        }
1259
1260        return 0;
1261err_aead_algs:
1262        for (i = dd->pdata->aead_algs_info->registered - 1; i >= 0; i--) {
1263                aalg = &dd->pdata->aead_algs_info->algs_list[i];
1264                crypto_unregister_aead(aalg);
1265        }
1266err_algs:
1267        for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
1268                for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
1269                        crypto_unregister_alg(
1270                                        &dd->pdata->algs_info[i].algs_list[j]);
1271
1272err_engine:
1273        if (dd->engine)
1274                crypto_engine_exit(dd->engine);
1275
1276        omap_aes_dma_cleanup(dd);
1277err_irq:
1278        tasklet_kill(&dd->done_task);
1279        pm_runtime_disable(dev);
1280err_res:
1281        dd = NULL;
1282err_data:
1283        dev_err(dev, "initialization failed.\n");
1284        return err;
1285}
1286
1287static int omap_aes_remove(struct platform_device *pdev)
1288{
1289        struct omap_aes_dev *dd = platform_get_drvdata(pdev);
1290        struct aead_alg *aalg;
1291        int i, j;
1292
1293        if (!dd)
1294                return -ENODEV;
1295
1296        spin_lock(&list_lock);
1297        list_del(&dd->list);
1298        spin_unlock(&list_lock);
1299
1300        for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
1301                for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
1302                        crypto_unregister_alg(
1303                                        &dd->pdata->algs_info[i].algs_list[j]);
1304
1305        for (i = dd->pdata->aead_algs_info->size - 1; i >= 0; i--) {
1306                aalg = &dd->pdata->aead_algs_info->algs_list[i];
1307                crypto_unregister_aead(aalg);
1308        }
1309
1310        crypto_engine_exit(dd->engine);
1311
1312        tasklet_kill(&dd->done_task);
1313        omap_aes_dma_cleanup(dd);
1314        pm_runtime_disable(dd->dev);
1315        dd = NULL;
1316
1317        return 0;
1318}
1319
1320#ifdef CONFIG_PM_SLEEP
1321static int omap_aes_suspend(struct device *dev)
1322{
1323        pm_runtime_put_sync(dev);
1324        return 0;
1325}
1326
1327static int omap_aes_resume(struct device *dev)
1328{
1329        pm_runtime_get_sync(dev);
1330        return 0;
1331}
1332#endif
1333
1334static SIMPLE_DEV_PM_OPS(omap_aes_pm_ops, omap_aes_suspend, omap_aes_resume);
1335
1336static struct platform_driver omap_aes_driver = {
1337        .probe  = omap_aes_probe,
1338        .remove = omap_aes_remove,
1339        .driver = {
1340                .name   = "omap-aes",
1341                .pm     = &omap_aes_pm_ops,
1342                .of_match_table = omap_aes_of_match,
1343        },
1344};
1345
1346module_platform_driver(omap_aes_driver);
1347
1348MODULE_DESCRIPTION("OMAP AES hw acceleration support.");
1349MODULE_LICENSE("GPL v2");
1350MODULE_AUTHOR("Dmitry Kasatkin");
1351
1352