linux/drivers/crypto/atmel-aes.c
<<
>>
Prefs
   1/*
   2 * Cryptographic API.
   3 *
   4 * Support for ATMEL AES HW acceleration.
   5 *
   6 * Copyright (c) 2012 Eukréa Electromatique - ATMEL
   7 * Author: Nicolas Royer <nicolas@eukrea.com>
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License version 2 as published
  11 * by the Free Software Foundation.
  12 *
  13 * Some ideas are from omap-aes.c driver.
  14 */
  15
  16
  17#include <linux/kernel.h>
  18#include <linux/module.h>
  19#include <linux/slab.h>
  20#include <linux/err.h>
  21#include <linux/clk.h>
  22#include <linux/io.h>
  23#include <linux/hw_random.h>
  24#include <linux/platform_device.h>
  25
  26#include <linux/device.h>
  27#include <linux/init.h>
  28#include <linux/errno.h>
  29#include <linux/interrupt.h>
  30#include <linux/irq.h>
  31#include <linux/scatterlist.h>
  32#include <linux/dma-mapping.h>
  33#include <linux/of_device.h>
  34#include <linux/delay.h>
  35#include <linux/crypto.h>
  36#include <crypto/scatterwalk.h>
  37#include <crypto/algapi.h>
  38#include <crypto/aes.h>
  39#include <crypto/internal/aead.h>
  40#include <linux/platform_data/crypto-atmel.h>
  41#include <dt-bindings/dma/at91.h>
  42#include "atmel-aes-regs.h"
  43
  44#define ATMEL_AES_PRIORITY      300
  45
  46#define ATMEL_AES_BUFFER_ORDER  2
  47#define ATMEL_AES_BUFFER_SIZE   (PAGE_SIZE << ATMEL_AES_BUFFER_ORDER)
  48
  49#define CFB8_BLOCK_SIZE         1
  50#define CFB16_BLOCK_SIZE        2
  51#define CFB32_BLOCK_SIZE        4
  52#define CFB64_BLOCK_SIZE        8
  53
  54#define SIZE_IN_WORDS(x)        ((x) >> 2)
  55
  56/* AES flags */
  57/* Reserve bits [18:16] [14:12] [1:0] for mode (same as for AES_MR) */
  58#define AES_FLAGS_ENCRYPT       AES_MR_CYPHER_ENC
  59#define AES_FLAGS_GTAGEN        AES_MR_GTAGEN
  60#define AES_FLAGS_OPMODE_MASK   (AES_MR_OPMOD_MASK | AES_MR_CFBS_MASK)
  61#define AES_FLAGS_ECB           AES_MR_OPMOD_ECB
  62#define AES_FLAGS_CBC           AES_MR_OPMOD_CBC
  63#define AES_FLAGS_OFB           AES_MR_OPMOD_OFB
  64#define AES_FLAGS_CFB128        (AES_MR_OPMOD_CFB | AES_MR_CFBS_128b)
  65#define AES_FLAGS_CFB64         (AES_MR_OPMOD_CFB | AES_MR_CFBS_64b)
  66#define AES_FLAGS_CFB32         (AES_MR_OPMOD_CFB | AES_MR_CFBS_32b)
  67#define AES_FLAGS_CFB16         (AES_MR_OPMOD_CFB | AES_MR_CFBS_16b)
  68#define AES_FLAGS_CFB8          (AES_MR_OPMOD_CFB | AES_MR_CFBS_8b)
  69#define AES_FLAGS_CTR           AES_MR_OPMOD_CTR
  70#define AES_FLAGS_GCM           AES_MR_OPMOD_GCM
  71
  72#define AES_FLAGS_MODE_MASK     (AES_FLAGS_OPMODE_MASK |        \
  73                                 AES_FLAGS_ENCRYPT |            \
  74                                 AES_FLAGS_GTAGEN)
  75
  76#define AES_FLAGS_INIT          BIT(2)
  77#define AES_FLAGS_BUSY          BIT(3)
  78#define AES_FLAGS_DUMP_REG      BIT(4)
  79
  80#define AES_FLAGS_PERSISTENT    (AES_FLAGS_INIT | AES_FLAGS_BUSY)
  81
  82#define ATMEL_AES_QUEUE_LENGTH  50
  83
  84#define ATMEL_AES_DMA_THRESHOLD         256
  85
  86
  87struct atmel_aes_caps {
  88        bool                    has_dualbuff;
  89        bool                    has_cfb64;
  90        bool                    has_ctr32;
  91        bool                    has_gcm;
  92        u32                     max_burst_size;
  93};
  94
  95struct atmel_aes_dev;
  96
  97
  98typedef int (*atmel_aes_fn_t)(struct atmel_aes_dev *);
  99
 100
 101struct atmel_aes_base_ctx {
 102        struct atmel_aes_dev    *dd;
 103        atmel_aes_fn_t          start;
 104        int                     keylen;
 105        u32                     key[AES_KEYSIZE_256 / sizeof(u32)];
 106        u16                     block_size;
 107};
 108
 109struct atmel_aes_ctx {
 110        struct atmel_aes_base_ctx       base;
 111};
 112
 113struct atmel_aes_ctr_ctx {
 114        struct atmel_aes_base_ctx       base;
 115
 116        u32                     iv[AES_BLOCK_SIZE / sizeof(u32)];
 117        size_t                  offset;
 118        struct scatterlist      src[2];
 119        struct scatterlist      dst[2];
 120};
 121
 122struct atmel_aes_gcm_ctx {
 123        struct atmel_aes_base_ctx       base;
 124
 125        struct scatterlist      src[2];
 126        struct scatterlist      dst[2];
 127
 128        u32                     j0[AES_BLOCK_SIZE / sizeof(u32)];
 129        u32                     tag[AES_BLOCK_SIZE / sizeof(u32)];
 130        u32                     ghash[AES_BLOCK_SIZE / sizeof(u32)];
 131        size_t                  textlen;
 132
 133        const u32               *ghash_in;
 134        u32                     *ghash_out;
 135        atmel_aes_fn_t          ghash_resume;
 136};
 137
 138struct atmel_aes_reqctx {
 139        unsigned long           mode;
 140};
 141
 142struct atmel_aes_dma {
 143        struct dma_chan         *chan;
 144        struct scatterlist      *sg;
 145        int                     nents;
 146        unsigned int            remainder;
 147        unsigned int            sg_len;
 148};
 149
 150struct atmel_aes_dev {
 151        struct list_head        list;
 152        unsigned long           phys_base;
 153        void __iomem            *io_base;
 154
 155        struct crypto_async_request     *areq;
 156        struct atmel_aes_base_ctx       *ctx;
 157
 158        bool                    is_async;
 159        atmel_aes_fn_t          resume;
 160        atmel_aes_fn_t          cpu_transfer_complete;
 161
 162        struct device           *dev;
 163        struct clk              *iclk;
 164        int                     irq;
 165
 166        unsigned long           flags;
 167
 168        spinlock_t              lock;
 169        struct crypto_queue     queue;
 170
 171        struct tasklet_struct   done_task;
 172        struct tasklet_struct   queue_task;
 173
 174        size_t                  total;
 175        size_t                  datalen;
 176        u32                     *data;
 177
 178        struct atmel_aes_dma    src;
 179        struct atmel_aes_dma    dst;
 180
 181        size_t                  buflen;
 182        void                    *buf;
 183        struct scatterlist      aligned_sg;
 184        struct scatterlist      *real_dst;
 185
 186        struct atmel_aes_caps   caps;
 187
 188        u32                     hw_version;
 189};
 190
 191struct atmel_aes_drv {
 192        struct list_head        dev_list;
 193        spinlock_t              lock;
 194};
 195
 196static struct atmel_aes_drv atmel_aes = {
 197        .dev_list = LIST_HEAD_INIT(atmel_aes.dev_list),
 198        .lock = __SPIN_LOCK_UNLOCKED(atmel_aes.lock),
 199};
 200
 201#ifdef VERBOSE_DEBUG
 202static const char *atmel_aes_reg_name(u32 offset, char *tmp, size_t sz)
 203{
 204        switch (offset) {
 205        case AES_CR:
 206                return "CR";
 207
 208        case AES_MR:
 209                return "MR";
 210
 211        case AES_ISR:
 212                return "ISR";
 213
 214        case AES_IMR:
 215                return "IMR";
 216
 217        case AES_IER:
 218                return "IER";
 219
 220        case AES_IDR:
 221                return "IDR";
 222
 223        case AES_KEYWR(0):
 224        case AES_KEYWR(1):
 225        case AES_KEYWR(2):
 226        case AES_KEYWR(3):
 227        case AES_KEYWR(4):
 228        case AES_KEYWR(5):
 229        case AES_KEYWR(6):
 230        case AES_KEYWR(7):
 231                snprintf(tmp, sz, "KEYWR[%u]", (offset - AES_KEYWR(0)) >> 2);
 232                break;
 233
 234        case AES_IDATAR(0):
 235        case AES_IDATAR(1):
 236        case AES_IDATAR(2):
 237        case AES_IDATAR(3):
 238                snprintf(tmp, sz, "IDATAR[%u]", (offset - AES_IDATAR(0)) >> 2);
 239                break;
 240
 241        case AES_ODATAR(0):
 242        case AES_ODATAR(1):
 243        case AES_ODATAR(2):
 244        case AES_ODATAR(3):
 245                snprintf(tmp, sz, "ODATAR[%u]", (offset - AES_ODATAR(0)) >> 2);
 246                break;
 247
 248        case AES_IVR(0):
 249        case AES_IVR(1):
 250        case AES_IVR(2):
 251        case AES_IVR(3):
 252                snprintf(tmp, sz, "IVR[%u]", (offset - AES_IVR(0)) >> 2);
 253                break;
 254
 255        case AES_AADLENR:
 256                return "AADLENR";
 257
 258        case AES_CLENR:
 259                return "CLENR";
 260
 261        case AES_GHASHR(0):
 262        case AES_GHASHR(1):
 263        case AES_GHASHR(2):
 264        case AES_GHASHR(3):
 265                snprintf(tmp, sz, "GHASHR[%u]", (offset - AES_GHASHR(0)) >> 2);
 266                break;
 267
 268        case AES_TAGR(0):
 269        case AES_TAGR(1):
 270        case AES_TAGR(2):
 271        case AES_TAGR(3):
 272                snprintf(tmp, sz, "TAGR[%u]", (offset - AES_TAGR(0)) >> 2);
 273                break;
 274
 275        case AES_CTRR:
 276                return "CTRR";
 277
 278        case AES_GCMHR(0):
 279        case AES_GCMHR(1):
 280        case AES_GCMHR(2):
 281        case AES_GCMHR(3):
 282                snprintf(tmp, sz, "GCMHR[%u]", (offset - AES_GCMHR(0)) >> 2);
 283                break;
 284
 285        default:
 286                snprintf(tmp, sz, "0x%02x", offset);
 287                break;
 288        }
 289
 290        return tmp;
 291}
 292#endif /* VERBOSE_DEBUG */
 293
 294/* Shared functions */
 295
 296static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset)
 297{
 298        u32 value = readl_relaxed(dd->io_base + offset);
 299
 300#ifdef VERBOSE_DEBUG
 301        if (dd->flags & AES_FLAGS_DUMP_REG) {
 302                char tmp[16];
 303
 304                dev_vdbg(dd->dev, "read 0x%08x from %s\n", value,
 305                         atmel_aes_reg_name(offset, tmp, sizeof(tmp)));
 306        }
 307#endif /* VERBOSE_DEBUG */
 308
 309        return value;
 310}
 311
 312static inline void atmel_aes_write(struct atmel_aes_dev *dd,
 313                                        u32 offset, u32 value)
 314{
 315#ifdef VERBOSE_DEBUG
 316        if (dd->flags & AES_FLAGS_DUMP_REG) {
 317                char tmp[16];
 318
 319                dev_vdbg(dd->dev, "write 0x%08x into %s\n", value,
 320                         atmel_aes_reg_name(offset, tmp));
 321        }
 322#endif /* VERBOSE_DEBUG */
 323
 324        writel_relaxed(value, dd->io_base + offset);
 325}
 326
 327static void atmel_aes_read_n(struct atmel_aes_dev *dd, u32 offset,
 328                                        u32 *value, int count)
 329{
 330        for (; count--; value++, offset += 4)
 331                *value = atmel_aes_read(dd, offset);
 332}
 333
 334static void atmel_aes_write_n(struct atmel_aes_dev *dd, u32 offset,
 335                              const u32 *value, int count)
 336{
 337        for (; count--; value++, offset += 4)
 338                atmel_aes_write(dd, offset, *value);
 339}
 340
 341static inline void atmel_aes_read_block(struct atmel_aes_dev *dd, u32 offset,
 342                                        u32 *value)
 343{
 344        atmel_aes_read_n(dd, offset, value, SIZE_IN_WORDS(AES_BLOCK_SIZE));
 345}
 346
 347static inline void atmel_aes_write_block(struct atmel_aes_dev *dd, u32 offset,
 348                                         const u32 *value)
 349{
 350        atmel_aes_write_n(dd, offset, value, SIZE_IN_WORDS(AES_BLOCK_SIZE));
 351}
 352
 353static inline int atmel_aes_wait_for_data_ready(struct atmel_aes_dev *dd,
 354                                                atmel_aes_fn_t resume)
 355{
 356        u32 isr = atmel_aes_read(dd, AES_ISR);
 357
 358        if (unlikely(isr & AES_INT_DATARDY))
 359                return resume(dd);
 360
 361        dd->resume = resume;
 362        atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
 363        return -EINPROGRESS;
 364}
 365
 366static inline size_t atmel_aes_padlen(size_t len, size_t block_size)
 367{
 368        len &= block_size - 1;
 369        return len ? block_size - len : 0;
 370}
 371
 372static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_base_ctx *ctx)
 373{
 374        struct atmel_aes_dev *aes_dd = NULL;
 375        struct atmel_aes_dev *tmp;
 376
 377        spin_lock_bh(&atmel_aes.lock);
 378        if (!ctx->dd) {
 379                list_for_each_entry(tmp, &atmel_aes.dev_list, list) {
 380                        aes_dd = tmp;
 381                        break;
 382                }
 383                ctx->dd = aes_dd;
 384        } else {
 385                aes_dd = ctx->dd;
 386        }
 387
 388        spin_unlock_bh(&atmel_aes.lock);
 389
 390        return aes_dd;
 391}
 392
 393static int atmel_aes_hw_init(struct atmel_aes_dev *dd)
 394{
 395        int err;
 396
 397        err = clk_enable(dd->iclk);
 398        if (err)
 399                return err;
 400
 401        if (!(dd->flags & AES_FLAGS_INIT)) {
 402                atmel_aes_write(dd, AES_CR, AES_CR_SWRST);
 403                atmel_aes_write(dd, AES_MR, 0xE << AES_MR_CKEY_OFFSET);
 404                dd->flags |= AES_FLAGS_INIT;
 405        }
 406
 407        return 0;
 408}
 409
 410static inline unsigned int atmel_aes_get_version(struct atmel_aes_dev *dd)
 411{
 412        return atmel_aes_read(dd, AES_HW_VERSION) & 0x00000fff;
 413}
 414
 415static int atmel_aes_hw_version_init(struct atmel_aes_dev *dd)
 416{
 417        int err;
 418
 419        err = atmel_aes_hw_init(dd);
 420        if (err)
 421                return err;
 422
 423        dd->hw_version = atmel_aes_get_version(dd);
 424
 425        dev_info(dd->dev, "version: 0x%x\n", dd->hw_version);
 426
 427        clk_disable(dd->iclk);
 428        return 0;
 429}
 430
 431static inline void atmel_aes_set_mode(struct atmel_aes_dev *dd,
 432                                      const struct atmel_aes_reqctx *rctx)
 433{
 434        /* Clear all but persistent flags and set request flags. */
 435        dd->flags = (dd->flags & AES_FLAGS_PERSISTENT) | rctx->mode;
 436}
 437
 438static inline bool atmel_aes_is_encrypt(const struct atmel_aes_dev *dd)
 439{
 440        return (dd->flags & AES_FLAGS_ENCRYPT);
 441}
 442
 443static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
 444{
 445        clk_disable(dd->iclk);
 446        dd->flags &= ~AES_FLAGS_BUSY;
 447
 448        if (dd->is_async)
 449                dd->areq->complete(dd->areq, err);
 450
 451        tasklet_schedule(&dd->queue_task);
 452
 453        return err;
 454}
 455
 456static void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma,
 457                                 const u32 *iv)
 458{
 459        u32 valmr = 0;
 460
 461        /* MR register must be set before IV registers */
 462        if (dd->ctx->keylen == AES_KEYSIZE_128)
 463                valmr |= AES_MR_KEYSIZE_128;
 464        else if (dd->ctx->keylen == AES_KEYSIZE_192)
 465                valmr |= AES_MR_KEYSIZE_192;
 466        else
 467                valmr |= AES_MR_KEYSIZE_256;
 468
 469        valmr |= dd->flags & AES_FLAGS_MODE_MASK;
 470
 471        if (use_dma) {
 472                valmr |= AES_MR_SMOD_IDATAR0;
 473                if (dd->caps.has_dualbuff)
 474                        valmr |= AES_MR_DUALBUFF;
 475        } else {
 476                valmr |= AES_MR_SMOD_AUTO;
 477        }
 478
 479        atmel_aes_write(dd, AES_MR, valmr);
 480
 481        atmel_aes_write_n(dd, AES_KEYWR(0), dd->ctx->key,
 482                          SIZE_IN_WORDS(dd->ctx->keylen));
 483
 484        if (iv && (valmr & AES_MR_OPMOD_MASK) != AES_MR_OPMOD_ECB)
 485                atmel_aes_write_block(dd, AES_IVR(0), iv);
 486}
 487
 488
 489/* CPU transfer */
 490
 491static int atmel_aes_cpu_transfer(struct atmel_aes_dev *dd)
 492{
 493        int err = 0;
 494        u32 isr;
 495
 496        for (;;) {
 497                atmel_aes_read_block(dd, AES_ODATAR(0), dd->data);
 498                dd->data += 4;
 499                dd->datalen -= AES_BLOCK_SIZE;
 500
 501                if (dd->datalen < AES_BLOCK_SIZE)
 502                        break;
 503
 504                atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
 505
 506                isr = atmel_aes_read(dd, AES_ISR);
 507                if (!(isr & AES_INT_DATARDY)) {
 508                        dd->resume = atmel_aes_cpu_transfer;
 509                        atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
 510                        return -EINPROGRESS;
 511                }
 512        }
 513
 514        if (!sg_copy_from_buffer(dd->real_dst, sg_nents(dd->real_dst),
 515                                 dd->buf, dd->total))
 516                err = -EINVAL;
 517
 518        if (err)
 519                return atmel_aes_complete(dd, err);
 520
 521        return dd->cpu_transfer_complete(dd);
 522}
 523
 524static int atmel_aes_cpu_start(struct atmel_aes_dev *dd,
 525                               struct scatterlist *src,
 526                               struct scatterlist *dst,
 527                               size_t len,
 528                               atmel_aes_fn_t resume)
 529{
 530        size_t padlen = atmel_aes_padlen(len, AES_BLOCK_SIZE);
 531
 532        if (unlikely(len == 0))
 533                return -EINVAL;
 534
 535        sg_copy_to_buffer(src, sg_nents(src), dd->buf, len);
 536
 537        dd->total = len;
 538        dd->real_dst = dst;
 539        dd->cpu_transfer_complete = resume;
 540        dd->datalen = len + padlen;
 541        dd->data = (u32 *)dd->buf;
 542        atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
 543        return atmel_aes_wait_for_data_ready(dd, atmel_aes_cpu_transfer);
 544}
 545
 546
 547/* DMA transfer */
 548
 549static void atmel_aes_dma_callback(void *data);
 550
 551static bool atmel_aes_check_aligned(struct atmel_aes_dev *dd,
 552                                    struct scatterlist *sg,
 553                                    size_t len,
 554                                    struct atmel_aes_dma *dma)
 555{
 556        int nents;
 557
 558        if (!IS_ALIGNED(len, dd->ctx->block_size))
 559                return false;
 560
 561        for (nents = 0; sg; sg = sg_next(sg), ++nents) {
 562                if (!IS_ALIGNED(sg->offset, sizeof(u32)))
 563                        return false;
 564
 565                if (len <= sg->length) {
 566                        if (!IS_ALIGNED(len, dd->ctx->block_size))
 567                                return false;
 568
 569                        dma->nents = nents+1;
 570                        dma->remainder = sg->length - len;
 571                        sg->length = len;
 572                        return true;
 573                }
 574
 575                if (!IS_ALIGNED(sg->length, dd->ctx->block_size))
 576                        return false;
 577
 578                len -= sg->length;
 579        }
 580
 581        return false;
 582}
 583
 584static inline void atmel_aes_restore_sg(const struct atmel_aes_dma *dma)
 585{
 586        struct scatterlist *sg = dma->sg;
 587        int nents = dma->nents;
 588
 589        if (!dma->remainder)
 590                return;
 591
 592        while (--nents > 0 && sg)
 593                sg = sg_next(sg);
 594
 595        if (!sg)
 596                return;
 597
 598        sg->length += dma->remainder;
 599}
 600
 601static int atmel_aes_map(struct atmel_aes_dev *dd,
 602                         struct scatterlist *src,
 603                         struct scatterlist *dst,
 604                         size_t len)
 605{
 606        bool src_aligned, dst_aligned;
 607        size_t padlen;
 608
 609        dd->total = len;
 610        dd->src.sg = src;
 611        dd->dst.sg = dst;
 612        dd->real_dst = dst;
 613
 614        src_aligned = atmel_aes_check_aligned(dd, src, len, &dd->src);
 615        if (src == dst)
 616                dst_aligned = src_aligned;
 617        else
 618                dst_aligned = atmel_aes_check_aligned(dd, dst, len, &dd->dst);
 619        if (!src_aligned || !dst_aligned) {
 620                padlen = atmel_aes_padlen(len, dd->ctx->block_size);
 621
 622                if (dd->buflen < len + padlen)
 623                        return -ENOMEM;
 624
 625                if (!src_aligned) {
 626                        sg_copy_to_buffer(src, sg_nents(src), dd->buf, len);
 627                        dd->src.sg = &dd->aligned_sg;
 628                        dd->src.nents = 1;
 629                        dd->src.remainder = 0;
 630                }
 631
 632                if (!dst_aligned) {
 633                        dd->dst.sg = &dd->aligned_sg;
 634                        dd->dst.nents = 1;
 635                        dd->dst.remainder = 0;
 636                }
 637
 638                sg_init_table(&dd->aligned_sg, 1);
 639                sg_set_buf(&dd->aligned_sg, dd->buf, len + padlen);
 640        }
 641
 642        if (dd->src.sg == dd->dst.sg) {
 643                dd->src.sg_len = dma_map_sg(dd->dev, dd->src.sg, dd->src.nents,
 644                                            DMA_BIDIRECTIONAL);
 645                dd->dst.sg_len = dd->src.sg_len;
 646                if (!dd->src.sg_len)
 647                        return -EFAULT;
 648        } else {
 649                dd->src.sg_len = dma_map_sg(dd->dev, dd->src.sg, dd->src.nents,
 650                                            DMA_TO_DEVICE);
 651                if (!dd->src.sg_len)
 652                        return -EFAULT;
 653
 654                dd->dst.sg_len = dma_map_sg(dd->dev, dd->dst.sg, dd->dst.nents,
 655                                            DMA_FROM_DEVICE);
 656                if (!dd->dst.sg_len) {
 657                        dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents,
 658                                     DMA_TO_DEVICE);
 659                        return -EFAULT;
 660                }
 661        }
 662
 663        return 0;
 664}
 665
 666static void atmel_aes_unmap(struct atmel_aes_dev *dd)
 667{
 668        if (dd->src.sg == dd->dst.sg) {
 669                dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents,
 670                             DMA_BIDIRECTIONAL);
 671
 672                if (dd->src.sg != &dd->aligned_sg)
 673                        atmel_aes_restore_sg(&dd->src);
 674        } else {
 675                dma_unmap_sg(dd->dev, dd->dst.sg, dd->dst.nents,
 676                             DMA_FROM_DEVICE);
 677
 678                if (dd->dst.sg != &dd->aligned_sg)
 679                        atmel_aes_restore_sg(&dd->dst);
 680
 681                dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents,
 682                             DMA_TO_DEVICE);
 683
 684                if (dd->src.sg != &dd->aligned_sg)
 685                        atmel_aes_restore_sg(&dd->src);
 686        }
 687
 688        if (dd->dst.sg == &dd->aligned_sg)
 689                sg_copy_from_buffer(dd->real_dst, sg_nents(dd->real_dst),
 690                                    dd->buf, dd->total);
 691}
 692
 693static int atmel_aes_dma_transfer_start(struct atmel_aes_dev *dd,
 694                                        enum dma_slave_buswidth addr_width,
 695                                        enum dma_transfer_direction dir,
 696                                        u32 maxburst)
 697{
 698        struct dma_async_tx_descriptor *desc;
 699        struct dma_slave_config config;
 700        dma_async_tx_callback callback;
 701        struct atmel_aes_dma *dma;
 702        int err;
 703
 704        memset(&config, 0, sizeof(config));
 705        config.direction = dir;
 706        config.src_addr_width = addr_width;
 707        config.dst_addr_width = addr_width;
 708        config.src_maxburst = maxburst;
 709        config.dst_maxburst = maxburst;
 710
 711        switch (dir) {
 712        case DMA_MEM_TO_DEV:
 713                dma = &dd->src;
 714                callback = NULL;
 715                config.dst_addr = dd->phys_base + AES_IDATAR(0);
 716                break;
 717
 718        case DMA_DEV_TO_MEM:
 719                dma = &dd->dst;
 720                callback = atmel_aes_dma_callback;
 721                config.src_addr = dd->phys_base + AES_ODATAR(0);
 722                break;
 723
 724        default:
 725                return -EINVAL;
 726        }
 727
 728        err = dmaengine_slave_config(dma->chan, &config);
 729        if (err)
 730                return err;
 731
 732        desc = dmaengine_prep_slave_sg(dma->chan, dma->sg, dma->sg_len, dir,
 733                                       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 734        if (!desc)
 735                return -ENOMEM;
 736
 737        desc->callback = callback;
 738        desc->callback_param = dd;
 739        dmaengine_submit(desc);
 740        dma_async_issue_pending(dma->chan);
 741
 742        return 0;
 743}
 744
 745static void atmel_aes_dma_transfer_stop(struct atmel_aes_dev *dd,
 746                                        enum dma_transfer_direction dir)
 747{
 748        struct atmel_aes_dma *dma;
 749
 750        switch (dir) {
 751        case DMA_MEM_TO_DEV:
 752                dma = &dd->src;
 753                break;
 754
 755        case DMA_DEV_TO_MEM:
 756                dma = &dd->dst;
 757                break;
 758
 759        default:
 760                return;
 761        }
 762
 763        dmaengine_terminate_all(dma->chan);
 764}
 765
 766static int atmel_aes_dma_start(struct atmel_aes_dev *dd,
 767                               struct scatterlist *src,
 768                               struct scatterlist *dst,
 769                               size_t len,
 770                               atmel_aes_fn_t resume)
 771{
 772        enum dma_slave_buswidth addr_width;
 773        u32 maxburst;
 774        int err;
 775
 776        switch (dd->ctx->block_size) {
 777        case CFB8_BLOCK_SIZE:
 778                addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
 779                maxburst = 1;
 780                break;
 781
 782        case CFB16_BLOCK_SIZE:
 783                addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
 784                maxburst = 1;
 785                break;
 786
 787        case CFB32_BLOCK_SIZE:
 788        case CFB64_BLOCK_SIZE:
 789                addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 790                maxburst = 1;
 791                break;
 792
 793        case AES_BLOCK_SIZE:
 794                addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 795                maxburst = dd->caps.max_burst_size;
 796                break;
 797
 798        default:
 799                err = -EINVAL;
 800                goto exit;
 801        }
 802
 803        err = atmel_aes_map(dd, src, dst, len);
 804        if (err)
 805                goto exit;
 806
 807        dd->resume = resume;
 808
 809        /* Set output DMA transfer first */
 810        err = atmel_aes_dma_transfer_start(dd, addr_width, DMA_DEV_TO_MEM,
 811                                           maxburst);
 812        if (err)
 813                goto unmap;
 814
 815        /* Then set input DMA transfer */
 816        err = atmel_aes_dma_transfer_start(dd, addr_width, DMA_MEM_TO_DEV,
 817                                           maxburst);
 818        if (err)
 819                goto output_transfer_stop;
 820
 821        return -EINPROGRESS;
 822
 823output_transfer_stop:
 824        atmel_aes_dma_transfer_stop(dd, DMA_DEV_TO_MEM);
 825unmap:
 826        atmel_aes_unmap(dd);
 827exit:
 828        return atmel_aes_complete(dd, err);
 829}
 830
 831static void atmel_aes_dma_stop(struct atmel_aes_dev *dd)
 832{
 833        atmel_aes_dma_transfer_stop(dd, DMA_MEM_TO_DEV);
 834        atmel_aes_dma_transfer_stop(dd, DMA_DEV_TO_MEM);
 835        atmel_aes_unmap(dd);
 836}
 837
 838static void atmel_aes_dma_callback(void *data)
 839{
 840        struct atmel_aes_dev *dd = data;
 841
 842        atmel_aes_dma_stop(dd);
 843        dd->is_async = true;
 844        (void)dd->resume(dd);
 845}
 846
 847static int atmel_aes_handle_queue(struct atmel_aes_dev *dd,
 848                                  struct crypto_async_request *new_areq)
 849{
 850        struct crypto_async_request *areq, *backlog;
 851        struct atmel_aes_base_ctx *ctx;
 852        unsigned long flags;
 853        int err, ret = 0;
 854
 855        spin_lock_irqsave(&dd->lock, flags);
 856        if (new_areq)
 857                ret = crypto_enqueue_request(&dd->queue, new_areq);
 858        if (dd->flags & AES_FLAGS_BUSY) {
 859                spin_unlock_irqrestore(&dd->lock, flags);
 860                return ret;
 861        }
 862        backlog = crypto_get_backlog(&dd->queue);
 863        areq = crypto_dequeue_request(&dd->queue);
 864        if (areq)
 865                dd->flags |= AES_FLAGS_BUSY;
 866        spin_unlock_irqrestore(&dd->lock, flags);
 867
 868        if (!areq)
 869                return ret;
 870
 871        if (backlog)
 872                backlog->complete(backlog, -EINPROGRESS);
 873
 874        ctx = crypto_tfm_ctx(areq->tfm);
 875
 876        dd->areq = areq;
 877        dd->ctx = ctx;
 878        dd->is_async = (areq != new_areq);
 879
 880        err = ctx->start(dd);
 881        return (dd->is_async) ? ret : err;
 882}
 883
 884
 885/* AES async block ciphers */
 886
 887static int atmel_aes_transfer_complete(struct atmel_aes_dev *dd)
 888{
 889        return atmel_aes_complete(dd, 0);
 890}
 891
 892static int atmel_aes_start(struct atmel_aes_dev *dd)
 893{
 894        struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
 895        struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
 896        bool use_dma = (req->nbytes >= ATMEL_AES_DMA_THRESHOLD ||
 897                        dd->ctx->block_size != AES_BLOCK_SIZE);
 898        int err;
 899
 900        atmel_aes_set_mode(dd, rctx);
 901
 902        err = atmel_aes_hw_init(dd);
 903        if (err)
 904                return atmel_aes_complete(dd, err);
 905
 906        atmel_aes_write_ctrl(dd, use_dma, req->info);
 907        if (use_dma)
 908                return atmel_aes_dma_start(dd, req->src, req->dst, req->nbytes,
 909                                           atmel_aes_transfer_complete);
 910
 911        return atmel_aes_cpu_start(dd, req->src, req->dst, req->nbytes,
 912                                   atmel_aes_transfer_complete);
 913}
 914
 915static inline struct atmel_aes_ctr_ctx *
 916atmel_aes_ctr_ctx_cast(struct atmel_aes_base_ctx *ctx)
 917{
 918        return container_of(ctx, struct atmel_aes_ctr_ctx, base);
 919}
 920
 921static int atmel_aes_ctr_transfer(struct atmel_aes_dev *dd)
 922{
 923        struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
 924        struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
 925        struct scatterlist *src, *dst;
 926        u32 ctr, blocks;
 927        size_t datalen;
 928        bool use_dma, fragmented = false;
 929
 930        /* Check for transfer completion. */
 931        ctx->offset += dd->total;
 932        if (ctx->offset >= req->nbytes)
 933                return atmel_aes_transfer_complete(dd);
 934
 935        /* Compute data length. */
 936        datalen = req->nbytes - ctx->offset;
 937        blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE);
 938        ctr = be32_to_cpu(ctx->iv[3]);
 939        if (dd->caps.has_ctr32) {
 940                /* Check 32bit counter overflow. */
 941                u32 start = ctr;
 942                u32 end = start + blocks - 1;
 943
 944                if (end < start) {
 945                        ctr |= 0xffffffff;
 946                        datalen = AES_BLOCK_SIZE * -start;
 947                        fragmented = true;
 948                }
 949        } else {
 950                /* Check 16bit counter overflow. */
 951                u16 start = ctr & 0xffff;
 952                u16 end = start + (u16)blocks - 1;
 953
 954                if (blocks >> 16 || end < start) {
 955                        ctr |= 0xffff;
 956                        datalen = AES_BLOCK_SIZE * (0x10000-start);
 957                        fragmented = true;
 958                }
 959        }
 960        use_dma = (datalen >= ATMEL_AES_DMA_THRESHOLD);
 961
 962        /* Jump to offset. */
 963        src = scatterwalk_ffwd(ctx->src, req->src, ctx->offset);
 964        dst = ((req->src == req->dst) ? src :
 965               scatterwalk_ffwd(ctx->dst, req->dst, ctx->offset));
 966
 967        /* Configure hardware. */
 968        atmel_aes_write_ctrl(dd, use_dma, ctx->iv);
 969        if (unlikely(fragmented)) {
 970                /*
 971                 * Increment the counter manually to cope with the hardware
 972                 * counter overflow.
 973                 */
 974                ctx->iv[3] = cpu_to_be32(ctr);
 975                crypto_inc((u8 *)ctx->iv, AES_BLOCK_SIZE);
 976        }
 977
 978        if (use_dma)
 979                return atmel_aes_dma_start(dd, src, dst, datalen,
 980                                           atmel_aes_ctr_transfer);
 981
 982        return atmel_aes_cpu_start(dd, src, dst, datalen,
 983                                   atmel_aes_ctr_transfer);
 984}
 985
 986static int atmel_aes_ctr_start(struct atmel_aes_dev *dd)
 987{
 988        struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
 989        struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
 990        struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
 991        int err;
 992
 993        atmel_aes_set_mode(dd, rctx);
 994
 995        err = atmel_aes_hw_init(dd);
 996        if (err)
 997                return atmel_aes_complete(dd, err);
 998
 999        memcpy(ctx->iv, req->info, AES_BLOCK_SIZE);
1000        ctx->offset = 0;
1001        dd->total = 0;
1002        return atmel_aes_ctr_transfer(dd);
1003}
1004
1005static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
1006{
1007        struct atmel_aes_base_ctx *ctx;
1008        struct atmel_aes_reqctx *rctx;
1009        struct atmel_aes_dev *dd;
1010
1011        ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
1012        switch (mode & AES_FLAGS_OPMODE_MASK) {
1013        case AES_FLAGS_CFB8:
1014                ctx->block_size = CFB8_BLOCK_SIZE;
1015                break;
1016
1017        case AES_FLAGS_CFB16:
1018                ctx->block_size = CFB16_BLOCK_SIZE;
1019                break;
1020
1021        case AES_FLAGS_CFB32:
1022                ctx->block_size = CFB32_BLOCK_SIZE;
1023                break;
1024
1025        case AES_FLAGS_CFB64:
1026                ctx->block_size = CFB64_BLOCK_SIZE;
1027                break;
1028
1029        default:
1030                ctx->block_size = AES_BLOCK_SIZE;
1031                break;
1032        }
1033
1034        dd = atmel_aes_find_dev(ctx);
1035        if (!dd)
1036                return -ENODEV;
1037
1038        rctx = ablkcipher_request_ctx(req);
1039        rctx->mode = mode;
1040
1041        return atmel_aes_handle_queue(dd, &req->base);
1042}
1043
1044static int atmel_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
1045                           unsigned int keylen)
1046{
1047        struct atmel_aes_base_ctx *ctx = crypto_ablkcipher_ctx(tfm);
1048
1049        if (keylen != AES_KEYSIZE_128 &&
1050            keylen != AES_KEYSIZE_192 &&
1051            keylen != AES_KEYSIZE_256) {
1052                crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1053                return -EINVAL;
1054        }
1055
1056        memcpy(ctx->key, key, keylen);
1057        ctx->keylen = keylen;
1058
1059        return 0;
1060}
1061
1062static int atmel_aes_ecb_encrypt(struct ablkcipher_request *req)
1063{
1064        return atmel_aes_crypt(req, AES_FLAGS_ECB | AES_FLAGS_ENCRYPT);
1065}
1066
1067static int atmel_aes_ecb_decrypt(struct ablkcipher_request *req)
1068{
1069        return atmel_aes_crypt(req, AES_FLAGS_ECB);
1070}
1071
1072static int atmel_aes_cbc_encrypt(struct ablkcipher_request *req)
1073{
1074        return atmel_aes_crypt(req, AES_FLAGS_CBC | AES_FLAGS_ENCRYPT);
1075}
1076
1077static int atmel_aes_cbc_decrypt(struct ablkcipher_request *req)
1078{
1079        return atmel_aes_crypt(req, AES_FLAGS_CBC);
1080}
1081
1082static int atmel_aes_ofb_encrypt(struct ablkcipher_request *req)
1083{
1084        return atmel_aes_crypt(req, AES_FLAGS_OFB | AES_FLAGS_ENCRYPT);
1085}
1086
1087static int atmel_aes_ofb_decrypt(struct ablkcipher_request *req)
1088{
1089        return atmel_aes_crypt(req, AES_FLAGS_OFB);
1090}
1091
1092static int atmel_aes_cfb_encrypt(struct ablkcipher_request *req)
1093{
1094        return atmel_aes_crypt(req, AES_FLAGS_CFB128 | AES_FLAGS_ENCRYPT);
1095}
1096
1097static int atmel_aes_cfb_decrypt(struct ablkcipher_request *req)
1098{
1099        return atmel_aes_crypt(req, AES_FLAGS_CFB128);
1100}
1101
1102static int atmel_aes_cfb64_encrypt(struct ablkcipher_request *req)
1103{
1104        return atmel_aes_crypt(req, AES_FLAGS_CFB64 | AES_FLAGS_ENCRYPT);
1105}
1106
1107static int atmel_aes_cfb64_decrypt(struct ablkcipher_request *req)
1108{
1109        return atmel_aes_crypt(req, AES_FLAGS_CFB64);
1110}
1111
1112static int atmel_aes_cfb32_encrypt(struct ablkcipher_request *req)
1113{
1114        return atmel_aes_crypt(req, AES_FLAGS_CFB32 | AES_FLAGS_ENCRYPT);
1115}
1116
1117static int atmel_aes_cfb32_decrypt(struct ablkcipher_request *req)
1118{
1119        return atmel_aes_crypt(req, AES_FLAGS_CFB32);
1120}
1121
1122static int atmel_aes_cfb16_encrypt(struct ablkcipher_request *req)
1123{
1124        return atmel_aes_crypt(req, AES_FLAGS_CFB16 | AES_FLAGS_ENCRYPT);
1125}
1126
1127static int atmel_aes_cfb16_decrypt(struct ablkcipher_request *req)
1128{
1129        return atmel_aes_crypt(req, AES_FLAGS_CFB16);
1130}
1131
1132static int atmel_aes_cfb8_encrypt(struct ablkcipher_request *req)
1133{
1134        return atmel_aes_crypt(req, AES_FLAGS_CFB8 | AES_FLAGS_ENCRYPT);
1135}
1136
1137static int atmel_aes_cfb8_decrypt(struct ablkcipher_request *req)
1138{
1139        return atmel_aes_crypt(req, AES_FLAGS_CFB8);
1140}
1141
1142static int atmel_aes_ctr_encrypt(struct ablkcipher_request *req)
1143{
1144        return atmel_aes_crypt(req, AES_FLAGS_CTR | AES_FLAGS_ENCRYPT);
1145}
1146
1147static int atmel_aes_ctr_decrypt(struct ablkcipher_request *req)
1148{
1149        return atmel_aes_crypt(req, AES_FLAGS_CTR);
1150}
1151
1152static int atmel_aes_cra_init(struct crypto_tfm *tfm)
1153{
1154        struct atmel_aes_ctx *ctx = crypto_tfm_ctx(tfm);
1155
1156        tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx);
1157        ctx->base.start = atmel_aes_start;
1158
1159        return 0;
1160}
1161
1162static int atmel_aes_ctr_cra_init(struct crypto_tfm *tfm)
1163{
1164        struct atmel_aes_ctx *ctx = crypto_tfm_ctx(tfm);
1165
1166        tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx);
1167        ctx->base.start = atmel_aes_ctr_start;
1168
1169        return 0;
1170}
1171
1172static void atmel_aes_cra_exit(struct crypto_tfm *tfm)
1173{
1174}
1175
1176static struct crypto_alg aes_algs[] = {
1177{
1178        .cra_name               = "ecb(aes)",
1179        .cra_driver_name        = "atmel-ecb-aes",
1180        .cra_priority           = ATMEL_AES_PRIORITY,
1181        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1182        .cra_blocksize          = AES_BLOCK_SIZE,
1183        .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
1184        .cra_alignmask          = 0xf,
1185        .cra_type               = &crypto_ablkcipher_type,
1186        .cra_module             = THIS_MODULE,
1187        .cra_init               = atmel_aes_cra_init,
1188        .cra_exit               = atmel_aes_cra_exit,
1189        .cra_u.ablkcipher = {
1190                .min_keysize    = AES_MIN_KEY_SIZE,
1191                .max_keysize    = AES_MAX_KEY_SIZE,
1192                .setkey         = atmel_aes_setkey,
1193                .encrypt        = atmel_aes_ecb_encrypt,
1194                .decrypt        = atmel_aes_ecb_decrypt,
1195        }
1196},
1197{
1198        .cra_name               = "cbc(aes)",
1199        .cra_driver_name        = "atmel-cbc-aes",
1200        .cra_priority           = ATMEL_AES_PRIORITY,
1201        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1202        .cra_blocksize          = AES_BLOCK_SIZE,
1203        .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
1204        .cra_alignmask          = 0xf,
1205        .cra_type               = &crypto_ablkcipher_type,
1206        .cra_module             = THIS_MODULE,
1207        .cra_init               = atmel_aes_cra_init,
1208        .cra_exit               = atmel_aes_cra_exit,
1209        .cra_u.ablkcipher = {
1210                .min_keysize    = AES_MIN_KEY_SIZE,
1211                .max_keysize    = AES_MAX_KEY_SIZE,
1212                .ivsize         = AES_BLOCK_SIZE,
1213                .setkey         = atmel_aes_setkey,
1214                .encrypt        = atmel_aes_cbc_encrypt,
1215                .decrypt        = atmel_aes_cbc_decrypt,
1216        }
1217},
1218{
1219        .cra_name               = "ofb(aes)",
1220        .cra_driver_name        = "atmel-ofb-aes",
1221        .cra_priority           = ATMEL_AES_PRIORITY,
1222        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1223        .cra_blocksize          = AES_BLOCK_SIZE,
1224        .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
1225        .cra_alignmask          = 0xf,
1226        .cra_type               = &crypto_ablkcipher_type,
1227        .cra_module             = THIS_MODULE,
1228        .cra_init               = atmel_aes_cra_init,
1229        .cra_exit               = atmel_aes_cra_exit,
1230        .cra_u.ablkcipher = {
1231                .min_keysize    = AES_MIN_KEY_SIZE,
1232                .max_keysize    = AES_MAX_KEY_SIZE,
1233                .ivsize         = AES_BLOCK_SIZE,
1234                .setkey         = atmel_aes_setkey,
1235                .encrypt        = atmel_aes_ofb_encrypt,
1236                .decrypt        = atmel_aes_ofb_decrypt,
1237        }
1238},
1239{
1240        .cra_name               = "cfb(aes)",
1241        .cra_driver_name        = "atmel-cfb-aes",
1242        .cra_priority           = ATMEL_AES_PRIORITY,
1243        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1244        .cra_blocksize          = AES_BLOCK_SIZE,
1245        .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
1246        .cra_alignmask          = 0xf,
1247        .cra_type               = &crypto_ablkcipher_type,
1248        .cra_module             = THIS_MODULE,
1249        .cra_init               = atmel_aes_cra_init,
1250        .cra_exit               = atmel_aes_cra_exit,
1251        .cra_u.ablkcipher = {
1252                .min_keysize    = AES_MIN_KEY_SIZE,
1253                .max_keysize    = AES_MAX_KEY_SIZE,
1254                .ivsize         = AES_BLOCK_SIZE,
1255                .setkey         = atmel_aes_setkey,
1256                .encrypt        = atmel_aes_cfb_encrypt,
1257                .decrypt        = atmel_aes_cfb_decrypt,
1258        }
1259},
1260{
1261        .cra_name               = "cfb32(aes)",
1262        .cra_driver_name        = "atmel-cfb32-aes",
1263        .cra_priority           = ATMEL_AES_PRIORITY,
1264        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1265        .cra_blocksize          = CFB32_BLOCK_SIZE,
1266        .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
1267        .cra_alignmask          = 0x3,
1268        .cra_type               = &crypto_ablkcipher_type,
1269        .cra_module             = THIS_MODULE,
1270        .cra_init               = atmel_aes_cra_init,
1271        .cra_exit               = atmel_aes_cra_exit,
1272        .cra_u.ablkcipher = {
1273                .min_keysize    = AES_MIN_KEY_SIZE,
1274                .max_keysize    = AES_MAX_KEY_SIZE,
1275                .ivsize         = AES_BLOCK_SIZE,
1276                .setkey         = atmel_aes_setkey,
1277                .encrypt        = atmel_aes_cfb32_encrypt,
1278                .decrypt        = atmel_aes_cfb32_decrypt,
1279        }
1280},
1281{
1282        .cra_name               = "cfb16(aes)",
1283        .cra_driver_name        = "atmel-cfb16-aes",
1284        .cra_priority           = ATMEL_AES_PRIORITY,
1285        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1286        .cra_blocksize          = CFB16_BLOCK_SIZE,
1287        .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
1288        .cra_alignmask          = 0x1,
1289        .cra_type               = &crypto_ablkcipher_type,
1290        .cra_module             = THIS_MODULE,
1291        .cra_init               = atmel_aes_cra_init,
1292        .cra_exit               = atmel_aes_cra_exit,
1293        .cra_u.ablkcipher = {
1294                .min_keysize    = AES_MIN_KEY_SIZE,
1295                .max_keysize    = AES_MAX_KEY_SIZE,
1296                .ivsize         = AES_BLOCK_SIZE,
1297                .setkey         = atmel_aes_setkey,
1298                .encrypt        = atmel_aes_cfb16_encrypt,
1299                .decrypt        = atmel_aes_cfb16_decrypt,
1300        }
1301},
1302{
1303        .cra_name               = "cfb8(aes)",
1304        .cra_driver_name        = "atmel-cfb8-aes",
1305        .cra_priority           = ATMEL_AES_PRIORITY,
1306        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1307        .cra_blocksize          = CFB8_BLOCK_SIZE,
1308        .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
1309        .cra_alignmask          = 0x0,
1310        .cra_type               = &crypto_ablkcipher_type,
1311        .cra_module             = THIS_MODULE,
1312        .cra_init               = atmel_aes_cra_init,
1313        .cra_exit               = atmel_aes_cra_exit,
1314        .cra_u.ablkcipher = {
1315                .min_keysize    = AES_MIN_KEY_SIZE,
1316                .max_keysize    = AES_MAX_KEY_SIZE,
1317                .ivsize         = AES_BLOCK_SIZE,
1318                .setkey         = atmel_aes_setkey,
1319                .encrypt        = atmel_aes_cfb8_encrypt,
1320                .decrypt        = atmel_aes_cfb8_decrypt,
1321        }
1322},
1323{
1324        .cra_name               = "ctr(aes)",
1325        .cra_driver_name        = "atmel-ctr-aes",
1326        .cra_priority           = ATMEL_AES_PRIORITY,
1327        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1328        .cra_blocksize          = 1,
1329        .cra_ctxsize            = sizeof(struct atmel_aes_ctr_ctx),
1330        .cra_alignmask          = 0xf,
1331        .cra_type               = &crypto_ablkcipher_type,
1332        .cra_module             = THIS_MODULE,
1333        .cra_init               = atmel_aes_ctr_cra_init,
1334        .cra_exit               = atmel_aes_cra_exit,
1335        .cra_u.ablkcipher = {
1336                .min_keysize    = AES_MIN_KEY_SIZE,
1337                .max_keysize    = AES_MAX_KEY_SIZE,
1338                .ivsize         = AES_BLOCK_SIZE,
1339                .setkey         = atmel_aes_setkey,
1340                .encrypt        = atmel_aes_ctr_encrypt,
1341                .decrypt        = atmel_aes_ctr_decrypt,
1342        }
1343},
1344};
1345
1346static struct crypto_alg aes_cfb64_alg = {
1347        .cra_name               = "cfb64(aes)",
1348        .cra_driver_name        = "atmel-cfb64-aes",
1349        .cra_priority           = ATMEL_AES_PRIORITY,
1350        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1351        .cra_blocksize          = CFB64_BLOCK_SIZE,
1352        .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
1353        .cra_alignmask          = 0x7,
1354        .cra_type               = &crypto_ablkcipher_type,
1355        .cra_module             = THIS_MODULE,
1356        .cra_init               = atmel_aes_cra_init,
1357        .cra_exit               = atmel_aes_cra_exit,
1358        .cra_u.ablkcipher = {
1359                .min_keysize    = AES_MIN_KEY_SIZE,
1360                .max_keysize    = AES_MAX_KEY_SIZE,
1361                .ivsize         = AES_BLOCK_SIZE,
1362                .setkey         = atmel_aes_setkey,
1363                .encrypt        = atmel_aes_cfb64_encrypt,
1364                .decrypt        = atmel_aes_cfb64_decrypt,
1365        }
1366};
1367
1368
1369/* gcm aead functions */
1370
1371static int atmel_aes_gcm_ghash(struct atmel_aes_dev *dd,
1372                               const u32 *data, size_t datalen,
1373                               const u32 *ghash_in, u32 *ghash_out,
1374                               atmel_aes_fn_t resume);
1375static int atmel_aes_gcm_ghash_init(struct atmel_aes_dev *dd);
1376static int atmel_aes_gcm_ghash_finalize(struct atmel_aes_dev *dd);
1377
1378static int atmel_aes_gcm_start(struct atmel_aes_dev *dd);
1379static int atmel_aes_gcm_process(struct atmel_aes_dev *dd);
1380static int atmel_aes_gcm_length(struct atmel_aes_dev *dd);
1381static int atmel_aes_gcm_data(struct atmel_aes_dev *dd);
1382static int atmel_aes_gcm_tag_init(struct atmel_aes_dev *dd);
1383static int atmel_aes_gcm_tag(struct atmel_aes_dev *dd);
1384static int atmel_aes_gcm_finalize(struct atmel_aes_dev *dd);
1385
1386static inline struct atmel_aes_gcm_ctx *
1387atmel_aes_gcm_ctx_cast(struct atmel_aes_base_ctx *ctx)
1388{
1389        return container_of(ctx, struct atmel_aes_gcm_ctx, base);
1390}
1391
1392static int atmel_aes_gcm_ghash(struct atmel_aes_dev *dd,
1393                               const u32 *data, size_t datalen,
1394                               const u32 *ghash_in, u32 *ghash_out,
1395                               atmel_aes_fn_t resume)
1396{
1397        struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1398
1399        dd->data = (u32 *)data;
1400        dd->datalen = datalen;
1401        ctx->ghash_in = ghash_in;
1402        ctx->ghash_out = ghash_out;
1403        ctx->ghash_resume = resume;
1404
1405        atmel_aes_write_ctrl(dd, false, NULL);
1406        return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_ghash_init);
1407}
1408
1409static int atmel_aes_gcm_ghash_init(struct atmel_aes_dev *dd)
1410{
1411        struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1412
1413        /* Set the data length. */
1414        atmel_aes_write(dd, AES_AADLENR, dd->total);
1415        atmel_aes_write(dd, AES_CLENR, 0);
1416
1417        /* If needed, overwrite the GCM Intermediate Hash Word Registers */
1418        if (ctx->ghash_in)
1419                atmel_aes_write_block(dd, AES_GHASHR(0), ctx->ghash_in);
1420
1421        return atmel_aes_gcm_ghash_finalize(dd);
1422}
1423
1424static int atmel_aes_gcm_ghash_finalize(struct atmel_aes_dev *dd)
1425{
1426        struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1427        u32 isr;
1428
1429        /* Write data into the Input Data Registers. */
1430        while (dd->datalen > 0) {
1431                atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
1432                dd->data += 4;
1433                dd->datalen -= AES_BLOCK_SIZE;
1434
1435                isr = atmel_aes_read(dd, AES_ISR);
1436                if (!(isr & AES_INT_DATARDY)) {
1437                        dd->resume = atmel_aes_gcm_ghash_finalize;
1438                        atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
1439                        return -EINPROGRESS;
1440                }
1441        }
1442
1443        /* Read the computed hash from GHASHRx. */
1444        atmel_aes_read_block(dd, AES_GHASHR(0), ctx->ghash_out);
1445
1446        return ctx->ghash_resume(dd);
1447}
1448
1449
1450static int atmel_aes_gcm_start(struct atmel_aes_dev *dd)
1451{
1452        struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1453        struct aead_request *req = aead_request_cast(dd->areq);
1454        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1455        struct atmel_aes_reqctx *rctx = aead_request_ctx(req);
1456        size_t ivsize = crypto_aead_ivsize(tfm);
1457        size_t datalen, padlen;
1458        const void *iv = req->iv;
1459        u8 *data = dd->buf;
1460        int err;
1461
1462        atmel_aes_set_mode(dd, rctx);
1463
1464        err = atmel_aes_hw_init(dd);
1465        if (err)
1466                return atmel_aes_complete(dd, err);
1467
1468        if (likely(ivsize == 12)) {
1469                memcpy(ctx->j0, iv, ivsize);
1470                ctx->j0[3] = cpu_to_be32(1);
1471                return atmel_aes_gcm_process(dd);
1472        }
1473
1474        padlen = atmel_aes_padlen(ivsize, AES_BLOCK_SIZE);
1475        datalen = ivsize + padlen + AES_BLOCK_SIZE;
1476        if (datalen > dd->buflen)
1477                return atmel_aes_complete(dd, -EINVAL);
1478
1479        memcpy(data, iv, ivsize);
1480        memset(data + ivsize, 0, padlen + sizeof(u64));
1481        ((u64 *)(data + datalen))[-1] = cpu_to_be64(ivsize * 8);
1482
1483        return atmel_aes_gcm_ghash(dd, (const u32 *)data, datalen,
1484                                   NULL, ctx->j0, atmel_aes_gcm_process);
1485}
1486
1487static int atmel_aes_gcm_process(struct atmel_aes_dev *dd)
1488{
1489        struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1490        struct aead_request *req = aead_request_cast(dd->areq);
1491        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1492        bool enc = atmel_aes_is_encrypt(dd);
1493        u32 authsize;
1494
1495        /* Compute text length. */
1496        authsize = crypto_aead_authsize(tfm);
1497        ctx->textlen = req->cryptlen - (enc ? 0 : authsize);
1498
1499        /*
1500         * According to tcrypt test suite, the GCM Automatic Tag Generation
1501         * fails when both the message and its associated data are empty.
1502         */
1503        if (likely(req->assoclen != 0 || ctx->textlen != 0))
1504                dd->flags |= AES_FLAGS_GTAGEN;
1505
1506        atmel_aes_write_ctrl(dd, false, NULL);
1507        return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_length);
1508}
1509
1510static int atmel_aes_gcm_length(struct atmel_aes_dev *dd)
1511{
1512        struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1513        struct aead_request *req = aead_request_cast(dd->areq);
1514        u32 j0_lsw, *j0 = ctx->j0;
1515        size_t padlen;
1516
1517        /* Write incr32(J0) into IV. */
1518        j0_lsw = j0[3];
1519        j0[3] = cpu_to_be32(be32_to_cpu(j0[3]) + 1);
1520        atmel_aes_write_block(dd, AES_IVR(0), j0);
1521        j0[3] = j0_lsw;
1522
1523        /* Set aad and text lengths. */
1524        atmel_aes_write(dd, AES_AADLENR, req->assoclen);
1525        atmel_aes_write(dd, AES_CLENR, ctx->textlen);
1526
1527        /* Check whether AAD are present. */
1528        if (unlikely(req->assoclen == 0)) {
1529                dd->datalen = 0;
1530                return atmel_aes_gcm_data(dd);
1531        }
1532
1533        /* Copy assoc data and add padding. */
1534        padlen = atmel_aes_padlen(req->assoclen, AES_BLOCK_SIZE);
1535        if (unlikely(req->assoclen + padlen > dd->buflen))
1536                return atmel_aes_complete(dd, -EINVAL);
1537        sg_copy_to_buffer(req->src, sg_nents(req->src), dd->buf, req->assoclen);
1538
1539        /* Write assoc data into the Input Data register. */
1540        dd->data = (u32 *)dd->buf;
1541        dd->datalen = req->assoclen + padlen;
1542        return atmel_aes_gcm_data(dd);
1543}
1544
1545static int atmel_aes_gcm_data(struct atmel_aes_dev *dd)
1546{
1547        struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1548        struct aead_request *req = aead_request_cast(dd->areq);
1549        bool use_dma = (ctx->textlen >= ATMEL_AES_DMA_THRESHOLD);
1550        struct scatterlist *src, *dst;
1551        u32 isr, mr;
1552
1553        /* Write AAD first. */
1554        while (dd->datalen > 0) {
1555                atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
1556                dd->data += 4;
1557                dd->datalen -= AES_BLOCK_SIZE;
1558
1559                isr = atmel_aes_read(dd, AES_ISR);
1560                if (!(isr & AES_INT_DATARDY)) {
1561                        dd->resume = atmel_aes_gcm_data;
1562                        atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
1563                        return -EINPROGRESS;
1564                }
1565        }
1566
1567        /* GMAC only. */
1568        if (unlikely(ctx->textlen == 0))
1569                return atmel_aes_gcm_tag_init(dd);
1570
1571        /* Prepare src and dst scatter lists to transfer cipher/plain texts */
1572        src = scatterwalk_ffwd(ctx->src, req->src, req->assoclen);
1573        dst = ((req->src == req->dst) ? src :
1574               scatterwalk_ffwd(ctx->dst, req->dst, req->assoclen));
1575
1576        if (use_dma) {
1577                /* Update the Mode Register for DMA transfers. */
1578                mr = atmel_aes_read(dd, AES_MR);
1579                mr &= ~(AES_MR_SMOD_MASK | AES_MR_DUALBUFF);
1580                mr |= AES_MR_SMOD_IDATAR0;
1581                if (dd->caps.has_dualbuff)
1582                        mr |= AES_MR_DUALBUFF;
1583                atmel_aes_write(dd, AES_MR, mr);
1584
1585                return atmel_aes_dma_start(dd, src, dst, ctx->textlen,
1586                                           atmel_aes_gcm_tag_init);
1587        }
1588
1589        return atmel_aes_cpu_start(dd, src, dst, ctx->textlen,
1590                                   atmel_aes_gcm_tag_init);
1591}
1592
1593static int atmel_aes_gcm_tag_init(struct atmel_aes_dev *dd)
1594{
1595        struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1596        struct aead_request *req = aead_request_cast(dd->areq);
1597        u64 *data = dd->buf;
1598
1599        if (likely(dd->flags & AES_FLAGS_GTAGEN)) {
1600                if (!(atmel_aes_read(dd, AES_ISR) & AES_INT_TAGRDY)) {
1601                        dd->resume = atmel_aes_gcm_tag_init;
1602                        atmel_aes_write(dd, AES_IER, AES_INT_TAGRDY);
1603                        return -EINPROGRESS;
1604                }
1605
1606                return atmel_aes_gcm_finalize(dd);
1607        }
1608
1609        /* Read the GCM Intermediate Hash Word Registers. */
1610        atmel_aes_read_block(dd, AES_GHASHR(0), ctx->ghash);
1611
1612        data[0] = cpu_to_be64(req->assoclen * 8);
1613        data[1] = cpu_to_be64(ctx->textlen * 8);
1614
1615        return atmel_aes_gcm_ghash(dd, (const u32 *)data, AES_BLOCK_SIZE,
1616                                   ctx->ghash, ctx->ghash, atmel_aes_gcm_tag);
1617}
1618
1619static int atmel_aes_gcm_tag(struct atmel_aes_dev *dd)
1620{
1621        struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1622        unsigned long flags;
1623
1624        /*
1625         * Change mode to CTR to complete the tag generation.
1626         * Use J0 as Initialization Vector.
1627         */
1628        flags = dd->flags;
1629        dd->flags &= ~(AES_FLAGS_OPMODE_MASK | AES_FLAGS_GTAGEN);
1630        dd->flags |= AES_FLAGS_CTR;
1631        atmel_aes_write_ctrl(dd, false, ctx->j0);
1632        dd->flags = flags;
1633
1634        atmel_aes_write_block(dd, AES_IDATAR(0), ctx->ghash);
1635        return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_finalize);
1636}
1637
1638static int atmel_aes_gcm_finalize(struct atmel_aes_dev *dd)
1639{
1640        struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1641        struct aead_request *req = aead_request_cast(dd->areq);
1642        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1643        bool enc = atmel_aes_is_encrypt(dd);
1644        u32 offset, authsize, itag[4], *otag = ctx->tag;
1645        int err;
1646
1647        /* Read the computed tag. */
1648        if (likely(dd->flags & AES_FLAGS_GTAGEN))
1649                atmel_aes_read_block(dd, AES_TAGR(0), ctx->tag);
1650        else
1651                atmel_aes_read_block(dd, AES_ODATAR(0), ctx->tag);
1652
1653        offset = req->assoclen + ctx->textlen;
1654        authsize = crypto_aead_authsize(tfm);
1655        if (enc) {
1656                scatterwalk_map_and_copy(otag, req->dst, offset, authsize, 1);
1657                err = 0;
1658        } else {
1659                scatterwalk_map_and_copy(itag, req->src, offset, authsize, 0);
1660                err = crypto_memneq(itag, otag, authsize) ? -EBADMSG : 0;
1661        }
1662
1663        return atmel_aes_complete(dd, err);
1664}
1665
1666static int atmel_aes_gcm_crypt(struct aead_request *req,
1667                               unsigned long mode)
1668{
1669        struct atmel_aes_base_ctx *ctx;
1670        struct atmel_aes_reqctx *rctx;
1671        struct atmel_aes_dev *dd;
1672
1673        ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
1674        ctx->block_size = AES_BLOCK_SIZE;
1675
1676        dd = atmel_aes_find_dev(ctx);
1677        if (!dd)
1678                return -ENODEV;
1679
1680        rctx = aead_request_ctx(req);
1681        rctx->mode = AES_FLAGS_GCM | mode;
1682
1683        return atmel_aes_handle_queue(dd, &req->base);
1684}
1685
1686static int atmel_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
1687                                unsigned int keylen)
1688{
1689        struct atmel_aes_base_ctx *ctx = crypto_aead_ctx(tfm);
1690
1691        if (keylen != AES_KEYSIZE_256 &&
1692            keylen != AES_KEYSIZE_192 &&
1693            keylen != AES_KEYSIZE_128) {
1694                crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1695                return -EINVAL;
1696        }
1697
1698        memcpy(ctx->key, key, keylen);
1699        ctx->keylen = keylen;
1700
1701        return 0;
1702}
1703
1704static int atmel_aes_gcm_setauthsize(struct crypto_aead *tfm,
1705                                     unsigned int authsize)
1706{
1707        /* Same as crypto_gcm_authsize() from crypto/gcm.c */
1708        switch (authsize) {
1709        case 4:
1710        case 8:
1711        case 12:
1712        case 13:
1713        case 14:
1714        case 15:
1715        case 16:
1716                break;
1717        default:
1718                return -EINVAL;
1719        }
1720
1721        return 0;
1722}
1723
1724static int atmel_aes_gcm_encrypt(struct aead_request *req)
1725{
1726        return atmel_aes_gcm_crypt(req, AES_FLAGS_ENCRYPT);
1727}
1728
1729static int atmel_aes_gcm_decrypt(struct aead_request *req)
1730{
1731        return atmel_aes_gcm_crypt(req, 0);
1732}
1733
1734static int atmel_aes_gcm_init(struct crypto_aead *tfm)
1735{
1736        struct atmel_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm);
1737
1738        crypto_aead_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
1739        ctx->base.start = atmel_aes_gcm_start;
1740
1741        return 0;
1742}
1743
1744static void atmel_aes_gcm_exit(struct crypto_aead *tfm)
1745{
1746
1747}
1748
1749static struct aead_alg aes_gcm_alg = {
1750        .setkey         = atmel_aes_gcm_setkey,
1751        .setauthsize    = atmel_aes_gcm_setauthsize,
1752        .encrypt        = atmel_aes_gcm_encrypt,
1753        .decrypt        = atmel_aes_gcm_decrypt,
1754        .init           = atmel_aes_gcm_init,
1755        .exit           = atmel_aes_gcm_exit,
1756        .ivsize         = 12,
1757        .maxauthsize    = AES_BLOCK_SIZE,
1758
1759        .base = {
1760                .cra_name               = "gcm(aes)",
1761                .cra_driver_name        = "atmel-gcm-aes",
1762                .cra_priority           = ATMEL_AES_PRIORITY,
1763                .cra_flags              = CRYPTO_ALG_ASYNC,
1764                .cra_blocksize          = 1,
1765                .cra_ctxsize            = sizeof(struct atmel_aes_gcm_ctx),
1766                .cra_alignmask          = 0xf,
1767                .cra_module             = THIS_MODULE,
1768        },
1769};
1770
1771
1772/* Probe functions */
1773
1774static int atmel_aes_buff_init(struct atmel_aes_dev *dd)
1775{
1776        dd->buf = (void *)__get_free_pages(GFP_KERNEL, ATMEL_AES_BUFFER_ORDER);
1777        dd->buflen = ATMEL_AES_BUFFER_SIZE;
1778        dd->buflen &= ~(AES_BLOCK_SIZE - 1);
1779
1780        if (!dd->buf) {
1781                dev_err(dd->dev, "unable to alloc pages.\n");
1782                return -ENOMEM;
1783        }
1784
1785        return 0;
1786}
1787
1788static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd)
1789{
1790        free_page((unsigned long)dd->buf);
1791}
1792
1793static bool atmel_aes_filter(struct dma_chan *chan, void *slave)
1794{
1795        struct at_dma_slave     *sl = slave;
1796
1797        if (sl && sl->dma_dev == chan->device->dev) {
1798                chan->private = sl;
1799                return true;
1800        } else {
1801                return false;
1802        }
1803}
1804
1805static int atmel_aes_dma_init(struct atmel_aes_dev *dd,
1806                              struct crypto_platform_data *pdata)
1807{
1808        struct at_dma_slave *slave;
1809        int err = -ENOMEM;
1810        dma_cap_mask_t mask;
1811
1812        dma_cap_zero(mask);
1813        dma_cap_set(DMA_SLAVE, mask);
1814
1815        /* Try to grab 2 DMA channels */
1816        slave = &pdata->dma_slave->rxdata;
1817        dd->src.chan = dma_request_slave_channel_compat(mask, atmel_aes_filter,
1818                                                        slave, dd->dev, "tx");
1819        if (!dd->src.chan)
1820                goto err_dma_in;
1821
1822        slave = &pdata->dma_slave->txdata;
1823        dd->dst.chan = dma_request_slave_channel_compat(mask, atmel_aes_filter,
1824                                                        slave, dd->dev, "rx");
1825        if (!dd->dst.chan)
1826                goto err_dma_out;
1827
1828        return 0;
1829
1830err_dma_out:
1831        dma_release_channel(dd->src.chan);
1832err_dma_in:
1833        dev_warn(dd->dev, "no DMA channel available\n");
1834        return err;
1835}
1836
1837static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd)
1838{
1839        dma_release_channel(dd->dst.chan);
1840        dma_release_channel(dd->src.chan);
1841}
1842
1843static void atmel_aes_queue_task(unsigned long data)
1844{
1845        struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data;
1846
1847        atmel_aes_handle_queue(dd, NULL);
1848}
1849
1850static void atmel_aes_done_task(unsigned long data)
1851{
1852        struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data;
1853
1854        dd->is_async = true;
1855        (void)dd->resume(dd);
1856}
1857
1858static irqreturn_t atmel_aes_irq(int irq, void *dev_id)
1859{
1860        struct atmel_aes_dev *aes_dd = dev_id;
1861        u32 reg;
1862
1863        reg = atmel_aes_read(aes_dd, AES_ISR);
1864        if (reg & atmel_aes_read(aes_dd, AES_IMR)) {
1865                atmel_aes_write(aes_dd, AES_IDR, reg);
1866                if (AES_FLAGS_BUSY & aes_dd->flags)
1867                        tasklet_schedule(&aes_dd->done_task);
1868                else
1869                        dev_warn(aes_dd->dev, "AES interrupt when no active requests.\n");
1870                return IRQ_HANDLED;
1871        }
1872
1873        return IRQ_NONE;
1874}
1875
1876static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
1877{
1878        int i;
1879
1880        if (dd->caps.has_gcm)
1881                crypto_unregister_aead(&aes_gcm_alg);
1882
1883        if (dd->caps.has_cfb64)
1884                crypto_unregister_alg(&aes_cfb64_alg);
1885
1886        for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1887                crypto_unregister_alg(&aes_algs[i]);
1888}
1889
1890static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
1891{
1892        int err, i, j;
1893
1894        for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
1895                err = crypto_register_alg(&aes_algs[i]);
1896                if (err)
1897                        goto err_aes_algs;
1898        }
1899
1900        if (dd->caps.has_cfb64) {
1901                err = crypto_register_alg(&aes_cfb64_alg);
1902                if (err)
1903                        goto err_aes_cfb64_alg;
1904        }
1905
1906        if (dd->caps.has_gcm) {
1907                err = crypto_register_aead(&aes_gcm_alg);
1908                if (err)
1909                        goto err_aes_gcm_alg;
1910        }
1911
1912        return 0;
1913
1914err_aes_gcm_alg:
1915        crypto_unregister_alg(&aes_cfb64_alg);
1916err_aes_cfb64_alg:
1917        i = ARRAY_SIZE(aes_algs);
1918err_aes_algs:
1919        for (j = 0; j < i; j++)
1920                crypto_unregister_alg(&aes_algs[j]);
1921
1922        return err;
1923}
1924
1925static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
1926{
1927        dd->caps.has_dualbuff = 0;
1928        dd->caps.has_cfb64 = 0;
1929        dd->caps.has_ctr32 = 0;
1930        dd->caps.has_gcm = 0;
1931        dd->caps.max_burst_size = 1;
1932
1933        /* keep only major version number */
1934        switch (dd->hw_version & 0xff0) {
1935        case 0x500:
1936                dd->caps.has_dualbuff = 1;
1937                dd->caps.has_cfb64 = 1;
1938                dd->caps.has_ctr32 = 1;
1939                dd->caps.has_gcm = 1;
1940                dd->caps.max_burst_size = 4;
1941                break;
1942        case 0x200:
1943                dd->caps.has_dualbuff = 1;
1944                dd->caps.has_cfb64 = 1;
1945                dd->caps.has_ctr32 = 1;
1946                dd->caps.has_gcm = 1;
1947                dd->caps.max_burst_size = 4;
1948                break;
1949        case 0x130:
1950                dd->caps.has_dualbuff = 1;
1951                dd->caps.has_cfb64 = 1;
1952                dd->caps.max_burst_size = 4;
1953                break;
1954        case 0x120:
1955                break;
1956        default:
1957                dev_warn(dd->dev,
1958                                "Unmanaged aes version, set minimum capabilities\n");
1959                break;
1960        }
1961}
1962
1963#if defined(CONFIG_OF)
1964static const struct of_device_id atmel_aes_dt_ids[] = {
1965        { .compatible = "atmel,at91sam9g46-aes" },
1966        { /* sentinel */ }
1967};
1968MODULE_DEVICE_TABLE(of, atmel_aes_dt_ids);
1969
1970static struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev)
1971{
1972        struct device_node *np = pdev->dev.of_node;
1973        struct crypto_platform_data *pdata;
1974
1975        if (!np) {
1976                dev_err(&pdev->dev, "device node not found\n");
1977                return ERR_PTR(-EINVAL);
1978        }
1979
1980        pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1981        if (!pdata) {
1982                dev_err(&pdev->dev, "could not allocate memory for pdata\n");
1983                return ERR_PTR(-ENOMEM);
1984        }
1985
1986        pdata->dma_slave = devm_kzalloc(&pdev->dev,
1987                                        sizeof(*(pdata->dma_slave)),
1988                                        GFP_KERNEL);
1989        if (!pdata->dma_slave) {
1990                dev_err(&pdev->dev, "could not allocate memory for dma_slave\n");
1991                devm_kfree(&pdev->dev, pdata);
1992                return ERR_PTR(-ENOMEM);
1993        }
1994
1995        return pdata;
1996}
1997#else
1998static inline struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev)
1999{
2000        return ERR_PTR(-EINVAL);
2001}
2002#endif
2003
2004static int atmel_aes_probe(struct platform_device *pdev)
2005{
2006        struct atmel_aes_dev *aes_dd;
2007        struct crypto_platform_data *pdata;
2008        struct device *dev = &pdev->dev;
2009        struct resource *aes_res;
2010        int err;
2011
2012        pdata = pdev->dev.platform_data;
2013        if (!pdata) {
2014                pdata = atmel_aes_of_init(pdev);
2015                if (IS_ERR(pdata)) {
2016                        err = PTR_ERR(pdata);
2017                        goto aes_dd_err;
2018                }
2019        }
2020
2021        if (!pdata->dma_slave) {
2022                err = -ENXIO;
2023                goto aes_dd_err;
2024        }
2025
2026        aes_dd = devm_kzalloc(&pdev->dev, sizeof(*aes_dd), GFP_KERNEL);
2027        if (aes_dd == NULL) {
2028                dev_err(dev, "unable to alloc data struct.\n");
2029                err = -ENOMEM;
2030                goto aes_dd_err;
2031        }
2032
2033        aes_dd->dev = dev;
2034
2035        platform_set_drvdata(pdev, aes_dd);
2036
2037        INIT_LIST_HEAD(&aes_dd->list);
2038        spin_lock_init(&aes_dd->lock);
2039
2040        tasklet_init(&aes_dd->done_task, atmel_aes_done_task,
2041                                        (unsigned long)aes_dd);
2042        tasklet_init(&aes_dd->queue_task, atmel_aes_queue_task,
2043                                        (unsigned long)aes_dd);
2044
2045        crypto_init_queue(&aes_dd->queue, ATMEL_AES_QUEUE_LENGTH);
2046
2047        aes_dd->irq = -1;
2048
2049        /* Get the base address */
2050        aes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2051        if (!aes_res) {
2052                dev_err(dev, "no MEM resource info\n");
2053                err = -ENODEV;
2054                goto res_err;
2055        }
2056        aes_dd->phys_base = aes_res->start;
2057
2058        /* Get the IRQ */
2059        aes_dd->irq = platform_get_irq(pdev,  0);
2060        if (aes_dd->irq < 0) {
2061                dev_err(dev, "no IRQ resource info\n");
2062                err = aes_dd->irq;
2063                goto res_err;
2064        }
2065
2066        err = devm_request_irq(&pdev->dev, aes_dd->irq, atmel_aes_irq,
2067                               IRQF_SHARED, "atmel-aes", aes_dd);
2068        if (err) {
2069                dev_err(dev, "unable to request aes irq.\n");
2070                goto res_err;
2071        }
2072
2073        /* Initializing the clock */
2074        aes_dd->iclk = devm_clk_get(&pdev->dev, "aes_clk");
2075        if (IS_ERR(aes_dd->iclk)) {
2076                dev_err(dev, "clock initialization failed.\n");
2077                err = PTR_ERR(aes_dd->iclk);
2078                goto res_err;
2079        }
2080
2081        aes_dd->io_base = devm_ioremap_resource(&pdev->dev, aes_res);
2082        if (IS_ERR(aes_dd->io_base)) {
2083                dev_err(dev, "can't ioremap\n");
2084                err = PTR_ERR(aes_dd->io_base);
2085                goto res_err;
2086        }
2087
2088        err = clk_prepare(aes_dd->iclk);
2089        if (err)
2090                goto res_err;
2091
2092        err = atmel_aes_hw_version_init(aes_dd);
2093        if (err)
2094                goto iclk_unprepare;
2095
2096        atmel_aes_get_cap(aes_dd);
2097
2098        err = atmel_aes_buff_init(aes_dd);
2099        if (err)
2100                goto err_aes_buff;
2101
2102        err = atmel_aes_dma_init(aes_dd, pdata);
2103        if (err)
2104                goto err_aes_dma;
2105
2106        spin_lock(&atmel_aes.lock);
2107        list_add_tail(&aes_dd->list, &atmel_aes.dev_list);
2108        spin_unlock(&atmel_aes.lock);
2109
2110        err = atmel_aes_register_algs(aes_dd);
2111        if (err)
2112                goto err_algs;
2113
2114        dev_info(dev, "Atmel AES - Using %s, %s for DMA transfers\n",
2115                        dma_chan_name(aes_dd->src.chan),
2116                        dma_chan_name(aes_dd->dst.chan));
2117
2118        return 0;
2119
2120err_algs:
2121        spin_lock(&atmel_aes.lock);
2122        list_del(&aes_dd->list);
2123        spin_unlock(&atmel_aes.lock);
2124        atmel_aes_dma_cleanup(aes_dd);
2125err_aes_dma:
2126        atmel_aes_buff_cleanup(aes_dd);
2127err_aes_buff:
2128iclk_unprepare:
2129        clk_unprepare(aes_dd->iclk);
2130res_err:
2131        tasklet_kill(&aes_dd->done_task);
2132        tasklet_kill(&aes_dd->queue_task);
2133aes_dd_err:
2134        dev_err(dev, "initialization failed.\n");
2135
2136        return err;
2137}
2138
2139static int atmel_aes_remove(struct platform_device *pdev)
2140{
2141        static struct atmel_aes_dev *aes_dd;
2142
2143        aes_dd = platform_get_drvdata(pdev);
2144        if (!aes_dd)
2145                return -ENODEV;
2146        spin_lock(&atmel_aes.lock);
2147        list_del(&aes_dd->list);
2148        spin_unlock(&atmel_aes.lock);
2149
2150        atmel_aes_unregister_algs(aes_dd);
2151
2152        tasklet_kill(&aes_dd->done_task);
2153        tasklet_kill(&aes_dd->queue_task);
2154
2155        atmel_aes_dma_cleanup(aes_dd);
2156        atmel_aes_buff_cleanup(aes_dd);
2157
2158        clk_unprepare(aes_dd->iclk);
2159
2160        return 0;
2161}
2162
2163static struct platform_driver atmel_aes_driver = {
2164        .probe          = atmel_aes_probe,
2165        .remove         = atmel_aes_remove,
2166        .driver         = {
2167                .name   = "atmel_aes",
2168                .of_match_table = of_match_ptr(atmel_aes_dt_ids),
2169        },
2170};
2171
2172module_platform_driver(atmel_aes_driver);
2173
2174MODULE_DESCRIPTION("Atmel AES hw acceleration support.");
2175MODULE_LICENSE("GPL v2");
2176MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");
2177