linux/drivers/crypto/atmel-aes.c
<<
>>
Prefs
   1/*
   2 * Cryptographic API.
   3 *
   4 * Support for ATMEL AES HW acceleration.
   5 *
   6 * Copyright (c) 2012 Eukréa Electromatique - ATMEL
   7 * Author: Nicolas Royer <nicolas@eukrea.com>
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License version 2 as published
  11 * by the Free Software Foundation.
  12 *
  13 * Some ideas are from omap-aes.c driver.
  14 */
  15
  16
  17#include <linux/kernel.h>
  18#include <linux/module.h>
  19#include <linux/slab.h>
  20#include <linux/err.h>
  21#include <linux/clk.h>
  22#include <linux/io.h>
  23#include <linux/hw_random.h>
  24#include <linux/platform_device.h>
  25
  26#include <linux/device.h>
  27#include <linux/init.h>
  28#include <linux/errno.h>
  29#include <linux/interrupt.h>
  30#include <linux/irq.h>
  31#include <linux/scatterlist.h>
  32#include <linux/dma-mapping.h>
  33#include <linux/of_device.h>
  34#include <linux/delay.h>
  35#include <linux/crypto.h>
  36#include <linux/cryptohash.h>
  37#include <crypto/scatterwalk.h>
  38#include <crypto/algapi.h>
  39#include <crypto/aes.h>
  40#include <crypto/hash.h>
  41#include <crypto/internal/hash.h>
  42#include <linux/platform_data/crypto-atmel.h>
  43#include <dt-bindings/dma/at91.h>
  44#include "atmel-aes-regs.h"
  45
  46#define CFB8_BLOCK_SIZE         1
  47#define CFB16_BLOCK_SIZE        2
  48#define CFB32_BLOCK_SIZE        4
  49#define CFB64_BLOCK_SIZE        8
  50
  51/* AES flags */
  52#define AES_FLAGS_MODE_MASK     0x03ff
  53#define AES_FLAGS_ENCRYPT       BIT(0)
  54#define AES_FLAGS_CBC           BIT(1)
  55#define AES_FLAGS_CFB           BIT(2)
  56#define AES_FLAGS_CFB8          BIT(3)
  57#define AES_FLAGS_CFB16         BIT(4)
  58#define AES_FLAGS_CFB32         BIT(5)
  59#define AES_FLAGS_CFB64         BIT(6)
  60#define AES_FLAGS_CFB128        BIT(7)
  61#define AES_FLAGS_OFB           BIT(8)
  62#define AES_FLAGS_CTR           BIT(9)
  63
  64#define AES_FLAGS_INIT          BIT(16)
  65#define AES_FLAGS_DMA           BIT(17)
  66#define AES_FLAGS_BUSY          BIT(18)
  67#define AES_FLAGS_FAST          BIT(19)
  68
  69#define ATMEL_AES_QUEUE_LENGTH  50
  70
  71#define ATMEL_AES_DMA_THRESHOLD         16
  72
  73
  74struct atmel_aes_caps {
  75        bool    has_dualbuff;
  76        bool    has_cfb64;
  77        u32             max_burst_size;
  78};
  79
  80struct atmel_aes_dev;
  81
  82struct atmel_aes_ctx {
  83        struct atmel_aes_dev *dd;
  84
  85        int             keylen;
  86        u32             key[AES_KEYSIZE_256 / sizeof(u32)];
  87
  88        u16             block_size;
  89};
  90
  91struct atmel_aes_reqctx {
  92        unsigned long mode;
  93};
  94
  95struct atmel_aes_dma {
  96        struct dma_chan                 *chan;
  97        struct dma_slave_config dma_conf;
  98};
  99
 100struct atmel_aes_dev {
 101        struct list_head        list;
 102        unsigned long           phys_base;
 103        void __iomem            *io_base;
 104
 105        struct atmel_aes_ctx    *ctx;
 106        struct device           *dev;
 107        struct clk              *iclk;
 108        int     irq;
 109
 110        unsigned long           flags;
 111        int     err;
 112
 113        spinlock_t              lock;
 114        struct crypto_queue     queue;
 115
 116        struct tasklet_struct   done_task;
 117        struct tasklet_struct   queue_task;
 118
 119        struct ablkcipher_request       *req;
 120        size_t  total;
 121
 122        struct scatterlist      *in_sg;
 123        unsigned int            nb_in_sg;
 124        size_t                          in_offset;
 125        struct scatterlist      *out_sg;
 126        unsigned int            nb_out_sg;
 127        size_t                          out_offset;
 128
 129        size_t  bufcnt;
 130        size_t  buflen;
 131        size_t  dma_size;
 132
 133        void    *buf_in;
 134        int             dma_in;
 135        dma_addr_t      dma_addr_in;
 136        struct atmel_aes_dma    dma_lch_in;
 137
 138        void    *buf_out;
 139        int             dma_out;
 140        dma_addr_t      dma_addr_out;
 141        struct atmel_aes_dma    dma_lch_out;
 142
 143        struct atmel_aes_caps   caps;
 144
 145        u32     hw_version;
 146};
 147
 148struct atmel_aes_drv {
 149        struct list_head        dev_list;
 150        spinlock_t              lock;
 151};
 152
 153static struct atmel_aes_drv atmel_aes = {
 154        .dev_list = LIST_HEAD_INIT(atmel_aes.dev_list),
 155        .lock = __SPIN_LOCK_UNLOCKED(atmel_aes.lock),
 156};
 157
 158static int atmel_aes_sg_length(struct ablkcipher_request *req,
 159                        struct scatterlist *sg)
 160{
 161        unsigned int total = req->nbytes;
 162        int sg_nb;
 163        unsigned int len;
 164        struct scatterlist *sg_list;
 165
 166        sg_nb = 0;
 167        sg_list = sg;
 168        total = req->nbytes;
 169
 170        while (total) {
 171                len = min(sg_list->length, total);
 172
 173                sg_nb++;
 174                total -= len;
 175
 176                sg_list = sg_next(sg_list);
 177                if (!sg_list)
 178                        total = 0;
 179        }
 180
 181        return sg_nb;
 182}
 183
 184static int atmel_aes_sg_copy(struct scatterlist **sg, size_t *offset,
 185                        void *buf, size_t buflen, size_t total, int out)
 186{
 187        unsigned int count, off = 0;
 188
 189        while (buflen && total) {
 190                count = min((*sg)->length - *offset, total);
 191                count = min(count, buflen);
 192
 193                if (!count)
 194                        return off;
 195
 196                scatterwalk_map_and_copy(buf + off, *sg, *offset, count, out);
 197
 198                off += count;
 199                buflen -= count;
 200                *offset += count;
 201                total -= count;
 202
 203                if (*offset == (*sg)->length) {
 204                        *sg = sg_next(*sg);
 205                        if (*sg)
 206                                *offset = 0;
 207                        else
 208                                total = 0;
 209                }
 210        }
 211
 212        return off;
 213}
 214
 215static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset)
 216{
 217        return readl_relaxed(dd->io_base + offset);
 218}
 219
 220static inline void atmel_aes_write(struct atmel_aes_dev *dd,
 221                                        u32 offset, u32 value)
 222{
 223        writel_relaxed(value, dd->io_base + offset);
 224}
 225
 226static void atmel_aes_read_n(struct atmel_aes_dev *dd, u32 offset,
 227                                        u32 *value, int count)
 228{
 229        for (; count--; value++, offset += 4)
 230                *value = atmel_aes_read(dd, offset);
 231}
 232
 233static void atmel_aes_write_n(struct atmel_aes_dev *dd, u32 offset,
 234                                        u32 *value, int count)
 235{
 236        for (; count--; value++, offset += 4)
 237                atmel_aes_write(dd, offset, *value);
 238}
 239
 240static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_ctx *ctx)
 241{
 242        struct atmel_aes_dev *aes_dd = NULL;
 243        struct atmel_aes_dev *tmp;
 244
 245        spin_lock_bh(&atmel_aes.lock);
 246        if (!ctx->dd) {
 247                list_for_each_entry(tmp, &atmel_aes.dev_list, list) {
 248                        aes_dd = tmp;
 249                        break;
 250                }
 251                ctx->dd = aes_dd;
 252        } else {
 253                aes_dd = ctx->dd;
 254        }
 255
 256        spin_unlock_bh(&atmel_aes.lock);
 257
 258        return aes_dd;
 259}
 260
 261static int atmel_aes_hw_init(struct atmel_aes_dev *dd)
 262{
 263        clk_prepare_enable(dd->iclk);
 264
 265        if (!(dd->flags & AES_FLAGS_INIT)) {
 266                atmel_aes_write(dd, AES_CR, AES_CR_SWRST);
 267                atmel_aes_write(dd, AES_MR, 0xE << AES_MR_CKEY_OFFSET);
 268                dd->flags |= AES_FLAGS_INIT;
 269                dd->err = 0;
 270        }
 271
 272        return 0;
 273}
 274
 275static inline unsigned int atmel_aes_get_version(struct atmel_aes_dev *dd)
 276{
 277        return atmel_aes_read(dd, AES_HW_VERSION) & 0x00000fff;
 278}
 279
 280static void atmel_aes_hw_version_init(struct atmel_aes_dev *dd)
 281{
 282        atmel_aes_hw_init(dd);
 283
 284        dd->hw_version = atmel_aes_get_version(dd);
 285
 286        dev_info(dd->dev,
 287                        "version: 0x%x\n", dd->hw_version);
 288
 289        clk_disable_unprepare(dd->iclk);
 290}
 291
 292static void atmel_aes_finish_req(struct atmel_aes_dev *dd, int err)
 293{
 294        struct ablkcipher_request *req = dd->req;
 295
 296        clk_disable_unprepare(dd->iclk);
 297        dd->flags &= ~AES_FLAGS_BUSY;
 298
 299        req->base.complete(&req->base, err);
 300}
 301
 302static void atmel_aes_dma_callback(void *data)
 303{
 304        struct atmel_aes_dev *dd = data;
 305
 306        /* dma_lch_out - completed */
 307        tasklet_schedule(&dd->done_task);
 308}
 309
 310static int atmel_aes_crypt_dma(struct atmel_aes_dev *dd,
 311                dma_addr_t dma_addr_in, dma_addr_t dma_addr_out, int length)
 312{
 313        struct scatterlist sg[2];
 314        struct dma_async_tx_descriptor  *in_desc, *out_desc;
 315
 316        dd->dma_size = length;
 317
 318        dma_sync_single_for_device(dd->dev, dma_addr_in, length,
 319                                   DMA_TO_DEVICE);
 320        dma_sync_single_for_device(dd->dev, dma_addr_out, length,
 321                                   DMA_FROM_DEVICE);
 322
 323        if (dd->flags & AES_FLAGS_CFB8) {
 324                dd->dma_lch_in.dma_conf.dst_addr_width =
 325                        DMA_SLAVE_BUSWIDTH_1_BYTE;
 326                dd->dma_lch_out.dma_conf.src_addr_width =
 327                        DMA_SLAVE_BUSWIDTH_1_BYTE;
 328        } else if (dd->flags & AES_FLAGS_CFB16) {
 329                dd->dma_lch_in.dma_conf.dst_addr_width =
 330                        DMA_SLAVE_BUSWIDTH_2_BYTES;
 331                dd->dma_lch_out.dma_conf.src_addr_width =
 332                        DMA_SLAVE_BUSWIDTH_2_BYTES;
 333        } else {
 334                dd->dma_lch_in.dma_conf.dst_addr_width =
 335                        DMA_SLAVE_BUSWIDTH_4_BYTES;
 336                dd->dma_lch_out.dma_conf.src_addr_width =
 337                        DMA_SLAVE_BUSWIDTH_4_BYTES;
 338        }
 339
 340        if (dd->flags & (AES_FLAGS_CFB8 | AES_FLAGS_CFB16 |
 341                        AES_FLAGS_CFB32 | AES_FLAGS_CFB64)) {
 342                dd->dma_lch_in.dma_conf.src_maxburst = 1;
 343                dd->dma_lch_in.dma_conf.dst_maxburst = 1;
 344                dd->dma_lch_out.dma_conf.src_maxburst = 1;
 345                dd->dma_lch_out.dma_conf.dst_maxburst = 1;
 346        } else {
 347                dd->dma_lch_in.dma_conf.src_maxburst = dd->caps.max_burst_size;
 348                dd->dma_lch_in.dma_conf.dst_maxburst = dd->caps.max_burst_size;
 349                dd->dma_lch_out.dma_conf.src_maxburst = dd->caps.max_burst_size;
 350                dd->dma_lch_out.dma_conf.dst_maxburst = dd->caps.max_burst_size;
 351        }
 352
 353        dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
 354        dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf);
 355
 356        dd->flags |= AES_FLAGS_DMA;
 357
 358        sg_init_table(&sg[0], 1);
 359        sg_dma_address(&sg[0]) = dma_addr_in;
 360        sg_dma_len(&sg[0]) = length;
 361
 362        sg_init_table(&sg[1], 1);
 363        sg_dma_address(&sg[1]) = dma_addr_out;
 364        sg_dma_len(&sg[1]) = length;
 365
 366        in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, &sg[0],
 367                                1, DMA_MEM_TO_DEV,
 368                                DMA_PREP_INTERRUPT  |  DMA_CTRL_ACK);
 369        if (!in_desc)
 370                return -EINVAL;
 371
 372        out_desc = dmaengine_prep_slave_sg(dd->dma_lch_out.chan, &sg[1],
 373                                1, DMA_DEV_TO_MEM,
 374                                DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 375        if (!out_desc)
 376                return -EINVAL;
 377
 378        out_desc->callback = atmel_aes_dma_callback;
 379        out_desc->callback_param = dd;
 380
 381        dmaengine_submit(out_desc);
 382        dma_async_issue_pending(dd->dma_lch_out.chan);
 383
 384        dmaengine_submit(in_desc);
 385        dma_async_issue_pending(dd->dma_lch_in.chan);
 386
 387        return 0;
 388}
 389
 390static int atmel_aes_crypt_cpu_start(struct atmel_aes_dev *dd)
 391{
 392        dd->flags &= ~AES_FLAGS_DMA;
 393
 394        dma_sync_single_for_cpu(dd->dev, dd->dma_addr_in,
 395                                dd->dma_size, DMA_TO_DEVICE);
 396        dma_sync_single_for_cpu(dd->dev, dd->dma_addr_out,
 397                                dd->dma_size, DMA_FROM_DEVICE);
 398
 399        /* use cache buffers */
 400        dd->nb_in_sg = atmel_aes_sg_length(dd->req, dd->in_sg);
 401        if (!dd->nb_in_sg)
 402                return -EINVAL;
 403
 404        dd->nb_out_sg = atmel_aes_sg_length(dd->req, dd->out_sg);
 405        if (!dd->nb_out_sg)
 406                return -EINVAL;
 407
 408        dd->bufcnt = sg_copy_to_buffer(dd->in_sg, dd->nb_in_sg,
 409                                        dd->buf_in, dd->total);
 410
 411        if (!dd->bufcnt)
 412                return -EINVAL;
 413
 414        dd->total -= dd->bufcnt;
 415
 416        atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
 417        atmel_aes_write_n(dd, AES_IDATAR(0), (u32 *) dd->buf_in,
 418                                dd->bufcnt >> 2);
 419
 420        return 0;
 421}
 422
 423static int atmel_aes_crypt_dma_start(struct atmel_aes_dev *dd)
 424{
 425        int err, fast = 0, in, out;
 426        size_t count;
 427        dma_addr_t addr_in, addr_out;
 428
 429        if ((!dd->in_offset) && (!dd->out_offset)) {
 430                /* check for alignment */
 431                in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)) &&
 432                        IS_ALIGNED(dd->in_sg->length, dd->ctx->block_size);
 433                out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32)) &&
 434                        IS_ALIGNED(dd->out_sg->length, dd->ctx->block_size);
 435                fast = in && out;
 436
 437                if (sg_dma_len(dd->in_sg) != sg_dma_len(dd->out_sg))
 438                        fast = 0;
 439        }
 440
 441
 442        if (fast)  {
 443                count = min(dd->total, sg_dma_len(dd->in_sg));
 444                count = min(count, sg_dma_len(dd->out_sg));
 445
 446                err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
 447                if (!err) {
 448                        dev_err(dd->dev, "dma_map_sg() error\n");
 449                        return -EINVAL;
 450                }
 451
 452                err = dma_map_sg(dd->dev, dd->out_sg, 1,
 453                                DMA_FROM_DEVICE);
 454                if (!err) {
 455                        dev_err(dd->dev, "dma_map_sg() error\n");
 456                        dma_unmap_sg(dd->dev, dd->in_sg, 1,
 457                                DMA_TO_DEVICE);
 458                        return -EINVAL;
 459                }
 460
 461                addr_in = sg_dma_address(dd->in_sg);
 462                addr_out = sg_dma_address(dd->out_sg);
 463
 464                dd->flags |= AES_FLAGS_FAST;
 465
 466        } else {
 467                dma_sync_single_for_cpu(dd->dev, dd->dma_addr_in,
 468                                        dd->dma_size, DMA_TO_DEVICE);
 469
 470                /* use cache buffers */
 471                count = atmel_aes_sg_copy(&dd->in_sg, &dd->in_offset,
 472                                dd->buf_in, dd->buflen, dd->total, 0);
 473
 474                addr_in = dd->dma_addr_in;
 475                addr_out = dd->dma_addr_out;
 476
 477                dd->flags &= ~AES_FLAGS_FAST;
 478        }
 479
 480        dd->total -= count;
 481
 482        err = atmel_aes_crypt_dma(dd, addr_in, addr_out, count);
 483
 484        if (err && (dd->flags & AES_FLAGS_FAST)) {
 485                dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
 486                dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
 487        }
 488
 489        return err;
 490}
 491
 492static int atmel_aes_write_ctrl(struct atmel_aes_dev *dd)
 493{
 494        int err;
 495        u32 valcr = 0, valmr = 0;
 496
 497        err = atmel_aes_hw_init(dd);
 498
 499        if (err)
 500                return err;
 501
 502        /* MR register must be set before IV registers */
 503        if (dd->ctx->keylen == AES_KEYSIZE_128)
 504                valmr |= AES_MR_KEYSIZE_128;
 505        else if (dd->ctx->keylen == AES_KEYSIZE_192)
 506                valmr |= AES_MR_KEYSIZE_192;
 507        else
 508                valmr |= AES_MR_KEYSIZE_256;
 509
 510        if (dd->flags & AES_FLAGS_CBC) {
 511                valmr |= AES_MR_OPMOD_CBC;
 512        } else if (dd->flags & AES_FLAGS_CFB) {
 513                valmr |= AES_MR_OPMOD_CFB;
 514                if (dd->flags & AES_FLAGS_CFB8)
 515                        valmr |= AES_MR_CFBS_8b;
 516                else if (dd->flags & AES_FLAGS_CFB16)
 517                        valmr |= AES_MR_CFBS_16b;
 518                else if (dd->flags & AES_FLAGS_CFB32)
 519                        valmr |= AES_MR_CFBS_32b;
 520                else if (dd->flags & AES_FLAGS_CFB64)
 521                        valmr |= AES_MR_CFBS_64b;
 522                else if (dd->flags & AES_FLAGS_CFB128)
 523                        valmr |= AES_MR_CFBS_128b;
 524        } else if (dd->flags & AES_FLAGS_OFB) {
 525                valmr |= AES_MR_OPMOD_OFB;
 526        } else if (dd->flags & AES_FLAGS_CTR) {
 527                valmr |= AES_MR_OPMOD_CTR;
 528        } else {
 529                valmr |= AES_MR_OPMOD_ECB;
 530        }
 531
 532        if (dd->flags & AES_FLAGS_ENCRYPT)
 533                valmr |= AES_MR_CYPHER_ENC;
 534
 535        if (dd->total > ATMEL_AES_DMA_THRESHOLD) {
 536                valmr |= AES_MR_SMOD_IDATAR0;
 537                if (dd->caps.has_dualbuff)
 538                        valmr |= AES_MR_DUALBUFF;
 539        } else {
 540                valmr |= AES_MR_SMOD_AUTO;
 541        }
 542
 543        atmel_aes_write(dd, AES_CR, valcr);
 544        atmel_aes_write(dd, AES_MR, valmr);
 545
 546        atmel_aes_write_n(dd, AES_KEYWR(0), dd->ctx->key,
 547                                                dd->ctx->keylen >> 2);
 548
 549        if (((dd->flags & AES_FLAGS_CBC) || (dd->flags & AES_FLAGS_CFB) ||
 550           (dd->flags & AES_FLAGS_OFB) || (dd->flags & AES_FLAGS_CTR)) &&
 551           dd->req->info) {
 552                atmel_aes_write_n(dd, AES_IVR(0), dd->req->info, 4);
 553        }
 554
 555        return 0;
 556}
 557
 558static int atmel_aes_handle_queue(struct atmel_aes_dev *dd,
 559                               struct ablkcipher_request *req)
 560{
 561        struct crypto_async_request *async_req, *backlog;
 562        struct atmel_aes_ctx *ctx;
 563        struct atmel_aes_reqctx *rctx;
 564        unsigned long flags;
 565        int err, ret = 0;
 566
 567        spin_lock_irqsave(&dd->lock, flags);
 568        if (req)
 569                ret = ablkcipher_enqueue_request(&dd->queue, req);
 570        if (dd->flags & AES_FLAGS_BUSY) {
 571                spin_unlock_irqrestore(&dd->lock, flags);
 572                return ret;
 573        }
 574        backlog = crypto_get_backlog(&dd->queue);
 575        async_req = crypto_dequeue_request(&dd->queue);
 576        if (async_req)
 577                dd->flags |= AES_FLAGS_BUSY;
 578        spin_unlock_irqrestore(&dd->lock, flags);
 579
 580        if (!async_req)
 581                return ret;
 582
 583        if (backlog)
 584                backlog->complete(backlog, -EINPROGRESS);
 585
 586        req = ablkcipher_request_cast(async_req);
 587
 588        /* assign new request to device */
 589        dd->req = req;
 590        dd->total = req->nbytes;
 591        dd->in_offset = 0;
 592        dd->in_sg = req->src;
 593        dd->out_offset = 0;
 594        dd->out_sg = req->dst;
 595
 596        rctx = ablkcipher_request_ctx(req);
 597        ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
 598        rctx->mode &= AES_FLAGS_MODE_MASK;
 599        dd->flags = (dd->flags & ~AES_FLAGS_MODE_MASK) | rctx->mode;
 600        dd->ctx = ctx;
 601        ctx->dd = dd;
 602
 603        err = atmel_aes_write_ctrl(dd);
 604        if (!err) {
 605                if (dd->total > ATMEL_AES_DMA_THRESHOLD)
 606                        err = atmel_aes_crypt_dma_start(dd);
 607                else
 608                        err = atmel_aes_crypt_cpu_start(dd);
 609        }
 610        if (err) {
 611                /* aes_task will not finish it, so do it here */
 612                atmel_aes_finish_req(dd, err);
 613                tasklet_schedule(&dd->queue_task);
 614        }
 615
 616        return ret;
 617}
 618
 619static int atmel_aes_crypt_dma_stop(struct atmel_aes_dev *dd)
 620{
 621        int err = -EINVAL;
 622        size_t count;
 623
 624        if (dd->flags & AES_FLAGS_DMA) {
 625                err = 0;
 626                if  (dd->flags & AES_FLAGS_FAST) {
 627                        dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
 628                        dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
 629                } else {
 630                        dma_sync_single_for_cpu(dd->dev, dd->dma_addr_out,
 631                                dd->dma_size, DMA_FROM_DEVICE);
 632
 633                        /* copy data */
 634                        count = atmel_aes_sg_copy(&dd->out_sg, &dd->out_offset,
 635                                dd->buf_out, dd->buflen, dd->dma_size, 1);
 636                        if (count != dd->dma_size) {
 637                                err = -EINVAL;
 638                                pr_err("not all data converted: %u\n", count);
 639                        }
 640                }
 641        }
 642
 643        return err;
 644}
 645
 646
 647static int atmel_aes_buff_init(struct atmel_aes_dev *dd)
 648{
 649        int err = -ENOMEM;
 650
 651        dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, 0);
 652        dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, 0);
 653        dd->buflen = PAGE_SIZE;
 654        dd->buflen &= ~(AES_BLOCK_SIZE - 1);
 655
 656        if (!dd->buf_in || !dd->buf_out) {
 657                dev_err(dd->dev, "unable to alloc pages.\n");
 658                goto err_alloc;
 659        }
 660
 661        /* MAP here */
 662        dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in,
 663                                        dd->buflen, DMA_TO_DEVICE);
 664        if (dma_mapping_error(dd->dev, dd->dma_addr_in)) {
 665                dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
 666                err = -EINVAL;
 667                goto err_map_in;
 668        }
 669
 670        dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out,
 671                                        dd->buflen, DMA_FROM_DEVICE);
 672        if (dma_mapping_error(dd->dev, dd->dma_addr_out)) {
 673                dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
 674                err = -EINVAL;
 675                goto err_map_out;
 676        }
 677
 678        return 0;
 679
 680err_map_out:
 681        dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
 682                DMA_TO_DEVICE);
 683err_map_in:
 684err_alloc:
 685        free_page((unsigned long)dd->buf_out);
 686        free_page((unsigned long)dd->buf_in);
 687        if (err)
 688                pr_err("error: %d\n", err);
 689        return err;
 690}
 691
 692static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd)
 693{
 694        dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
 695                         DMA_FROM_DEVICE);
 696        dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
 697                DMA_TO_DEVICE);
 698        free_page((unsigned long)dd->buf_out);
 699        free_page((unsigned long)dd->buf_in);
 700}
 701
 702static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
 703{
 704        struct atmel_aes_ctx *ctx = crypto_ablkcipher_ctx(
 705                        crypto_ablkcipher_reqtfm(req));
 706        struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
 707        struct atmel_aes_dev *dd;
 708
 709        if (mode & AES_FLAGS_CFB8) {
 710                if (!IS_ALIGNED(req->nbytes, CFB8_BLOCK_SIZE)) {
 711                        pr_err("request size is not exact amount of CFB8 blocks\n");
 712                        return -EINVAL;
 713                }
 714                ctx->block_size = CFB8_BLOCK_SIZE;
 715        } else if (mode & AES_FLAGS_CFB16) {
 716                if (!IS_ALIGNED(req->nbytes, CFB16_BLOCK_SIZE)) {
 717                        pr_err("request size is not exact amount of CFB16 blocks\n");
 718                        return -EINVAL;
 719                }
 720                ctx->block_size = CFB16_BLOCK_SIZE;
 721        } else if (mode & AES_FLAGS_CFB32) {
 722                if (!IS_ALIGNED(req->nbytes, CFB32_BLOCK_SIZE)) {
 723                        pr_err("request size is not exact amount of CFB32 blocks\n");
 724                        return -EINVAL;
 725                }
 726                ctx->block_size = CFB32_BLOCK_SIZE;
 727        } else if (mode & AES_FLAGS_CFB64) {
 728                if (!IS_ALIGNED(req->nbytes, CFB64_BLOCK_SIZE)) {
 729                        pr_err("request size is not exact amount of CFB64 blocks\n");
 730                        return -EINVAL;
 731                }
 732                ctx->block_size = CFB64_BLOCK_SIZE;
 733        } else {
 734                if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
 735                        pr_err("request size is not exact amount of AES blocks\n");
 736                        return -EINVAL;
 737                }
 738                ctx->block_size = AES_BLOCK_SIZE;
 739        }
 740
 741        dd = atmel_aes_find_dev(ctx);
 742        if (!dd)
 743                return -ENODEV;
 744
 745        rctx->mode = mode;
 746
 747        return atmel_aes_handle_queue(dd, req);
 748}
 749
 750static bool atmel_aes_filter(struct dma_chan *chan, void *slave)
 751{
 752        struct at_dma_slave     *sl = slave;
 753
 754        if (sl && sl->dma_dev == chan->device->dev) {
 755                chan->private = sl;
 756                return true;
 757        } else {
 758                return false;
 759        }
 760}
 761
 762static int atmel_aes_dma_init(struct atmel_aes_dev *dd,
 763        struct crypto_platform_data *pdata)
 764{
 765        int err = -ENOMEM;
 766        dma_cap_mask_t mask;
 767
 768        dma_cap_zero(mask);
 769        dma_cap_set(DMA_SLAVE, mask);
 770
 771        /* Try to grab 2 DMA channels */
 772        dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask,
 773                        atmel_aes_filter, &pdata->dma_slave->rxdata, dd->dev, "tx");
 774        if (!dd->dma_lch_in.chan)
 775                goto err_dma_in;
 776
 777        dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
 778        dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
 779                AES_IDATAR(0);
 780        dd->dma_lch_in.dma_conf.src_maxburst = dd->caps.max_burst_size;
 781        dd->dma_lch_in.dma_conf.src_addr_width =
 782                DMA_SLAVE_BUSWIDTH_4_BYTES;
 783        dd->dma_lch_in.dma_conf.dst_maxburst = dd->caps.max_burst_size;
 784        dd->dma_lch_in.dma_conf.dst_addr_width =
 785                DMA_SLAVE_BUSWIDTH_4_BYTES;
 786        dd->dma_lch_in.dma_conf.device_fc = false;
 787
 788        dd->dma_lch_out.chan = dma_request_slave_channel_compat(mask,
 789                        atmel_aes_filter, &pdata->dma_slave->txdata, dd->dev, "rx");
 790        if (!dd->dma_lch_out.chan)
 791                goto err_dma_out;
 792
 793        dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM;
 794        dd->dma_lch_out.dma_conf.src_addr = dd->phys_base +
 795                AES_ODATAR(0);
 796        dd->dma_lch_out.dma_conf.src_maxburst = dd->caps.max_burst_size;
 797        dd->dma_lch_out.dma_conf.src_addr_width =
 798                DMA_SLAVE_BUSWIDTH_4_BYTES;
 799        dd->dma_lch_out.dma_conf.dst_maxburst = dd->caps.max_burst_size;
 800        dd->dma_lch_out.dma_conf.dst_addr_width =
 801                DMA_SLAVE_BUSWIDTH_4_BYTES;
 802        dd->dma_lch_out.dma_conf.device_fc = false;
 803
 804        return 0;
 805
 806err_dma_out:
 807        dma_release_channel(dd->dma_lch_in.chan);
 808err_dma_in:
 809        dev_warn(dd->dev, "no DMA channel available\n");
 810        return err;
 811}
 812
 813static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd)
 814{
 815        dma_release_channel(dd->dma_lch_in.chan);
 816        dma_release_channel(dd->dma_lch_out.chan);
 817}
 818
 819static int atmel_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
 820                           unsigned int keylen)
 821{
 822        struct atmel_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
 823
 824        if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
 825                   keylen != AES_KEYSIZE_256) {
 826                crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
 827                return -EINVAL;
 828        }
 829
 830        memcpy(ctx->key, key, keylen);
 831        ctx->keylen = keylen;
 832
 833        return 0;
 834}
 835
 836static int atmel_aes_ecb_encrypt(struct ablkcipher_request *req)
 837{
 838        return atmel_aes_crypt(req,
 839                AES_FLAGS_ENCRYPT);
 840}
 841
 842static int atmel_aes_ecb_decrypt(struct ablkcipher_request *req)
 843{
 844        return atmel_aes_crypt(req,
 845                0);
 846}
 847
 848static int atmel_aes_cbc_encrypt(struct ablkcipher_request *req)
 849{
 850        return atmel_aes_crypt(req,
 851                AES_FLAGS_ENCRYPT | AES_FLAGS_CBC);
 852}
 853
 854static int atmel_aes_cbc_decrypt(struct ablkcipher_request *req)
 855{
 856        return atmel_aes_crypt(req,
 857                AES_FLAGS_CBC);
 858}
 859
 860static int atmel_aes_ofb_encrypt(struct ablkcipher_request *req)
 861{
 862        return atmel_aes_crypt(req,
 863                AES_FLAGS_ENCRYPT | AES_FLAGS_OFB);
 864}
 865
 866static int atmel_aes_ofb_decrypt(struct ablkcipher_request *req)
 867{
 868        return atmel_aes_crypt(req,
 869                AES_FLAGS_OFB);
 870}
 871
 872static int atmel_aes_cfb_encrypt(struct ablkcipher_request *req)
 873{
 874        return atmel_aes_crypt(req,
 875                AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB128);
 876}
 877
 878static int atmel_aes_cfb_decrypt(struct ablkcipher_request *req)
 879{
 880        return atmel_aes_crypt(req,
 881                AES_FLAGS_CFB | AES_FLAGS_CFB128);
 882}
 883
 884static int atmel_aes_cfb64_encrypt(struct ablkcipher_request *req)
 885{
 886        return atmel_aes_crypt(req,
 887                AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB64);
 888}
 889
 890static int atmel_aes_cfb64_decrypt(struct ablkcipher_request *req)
 891{
 892        return atmel_aes_crypt(req,
 893                AES_FLAGS_CFB | AES_FLAGS_CFB64);
 894}
 895
 896static int atmel_aes_cfb32_encrypt(struct ablkcipher_request *req)
 897{
 898        return atmel_aes_crypt(req,
 899                AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB32);
 900}
 901
 902static int atmel_aes_cfb32_decrypt(struct ablkcipher_request *req)
 903{
 904        return atmel_aes_crypt(req,
 905                AES_FLAGS_CFB | AES_FLAGS_CFB32);
 906}
 907
 908static int atmel_aes_cfb16_encrypt(struct ablkcipher_request *req)
 909{
 910        return atmel_aes_crypt(req,
 911                AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB16);
 912}
 913
 914static int atmel_aes_cfb16_decrypt(struct ablkcipher_request *req)
 915{
 916        return atmel_aes_crypt(req,
 917                AES_FLAGS_CFB | AES_FLAGS_CFB16);
 918}
 919
 920static int atmel_aes_cfb8_encrypt(struct ablkcipher_request *req)
 921{
 922        return atmel_aes_crypt(req,
 923                AES_FLAGS_ENCRYPT |     AES_FLAGS_CFB | AES_FLAGS_CFB8);
 924}
 925
 926static int atmel_aes_cfb8_decrypt(struct ablkcipher_request *req)
 927{
 928        return atmel_aes_crypt(req,
 929                AES_FLAGS_CFB | AES_FLAGS_CFB8);
 930}
 931
 932static int atmel_aes_ctr_encrypt(struct ablkcipher_request *req)
 933{
 934        return atmel_aes_crypt(req,
 935                AES_FLAGS_ENCRYPT | AES_FLAGS_CTR);
 936}
 937
 938static int atmel_aes_ctr_decrypt(struct ablkcipher_request *req)
 939{
 940        return atmel_aes_crypt(req,
 941                AES_FLAGS_CTR);
 942}
 943
 944static int atmel_aes_cra_init(struct crypto_tfm *tfm)
 945{
 946        tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx);
 947
 948        return 0;
 949}
 950
 951static void atmel_aes_cra_exit(struct crypto_tfm *tfm)
 952{
 953}
 954
 955static struct crypto_alg aes_algs[] = {
 956{
 957        .cra_name               = "ecb(aes)",
 958        .cra_driver_name        = "atmel-ecb-aes",
 959        .cra_priority           = 100,
 960        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
 961        .cra_blocksize          = AES_BLOCK_SIZE,
 962        .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
 963        .cra_alignmask          = 0xf,
 964        .cra_type               = &crypto_ablkcipher_type,
 965        .cra_module             = THIS_MODULE,
 966        .cra_init               = atmel_aes_cra_init,
 967        .cra_exit               = atmel_aes_cra_exit,
 968        .cra_u.ablkcipher = {
 969                .min_keysize    = AES_MIN_KEY_SIZE,
 970                .max_keysize    = AES_MAX_KEY_SIZE,
 971                .setkey         = atmel_aes_setkey,
 972                .encrypt        = atmel_aes_ecb_encrypt,
 973                .decrypt        = atmel_aes_ecb_decrypt,
 974        }
 975},
 976{
 977        .cra_name               = "cbc(aes)",
 978        .cra_driver_name        = "atmel-cbc-aes",
 979        .cra_priority           = 100,
 980        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
 981        .cra_blocksize          = AES_BLOCK_SIZE,
 982        .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
 983        .cra_alignmask          = 0xf,
 984        .cra_type               = &crypto_ablkcipher_type,
 985        .cra_module             = THIS_MODULE,
 986        .cra_init               = atmel_aes_cra_init,
 987        .cra_exit               = atmel_aes_cra_exit,
 988        .cra_u.ablkcipher = {
 989                .min_keysize    = AES_MIN_KEY_SIZE,
 990                .max_keysize    = AES_MAX_KEY_SIZE,
 991                .ivsize         = AES_BLOCK_SIZE,
 992                .setkey         = atmel_aes_setkey,
 993                .encrypt        = atmel_aes_cbc_encrypt,
 994                .decrypt        = atmel_aes_cbc_decrypt,
 995        }
 996},
 997{
 998        .cra_name               = "ofb(aes)",
 999        .cra_driver_name        = "atmel-ofb-aes",
1000        .cra_priority           = 100,
1001        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1002        .cra_blocksize          = AES_BLOCK_SIZE,
1003        .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
1004        .cra_alignmask          = 0xf,
1005        .cra_type               = &crypto_ablkcipher_type,
1006        .cra_module             = THIS_MODULE,
1007        .cra_init               = atmel_aes_cra_init,
1008        .cra_exit               = atmel_aes_cra_exit,
1009        .cra_u.ablkcipher = {
1010                .min_keysize    = AES_MIN_KEY_SIZE,
1011                .max_keysize    = AES_MAX_KEY_SIZE,
1012                .ivsize         = AES_BLOCK_SIZE,
1013                .setkey         = atmel_aes_setkey,
1014                .encrypt        = atmel_aes_ofb_encrypt,
1015                .decrypt        = atmel_aes_ofb_decrypt,
1016        }
1017},
1018{
1019        .cra_name               = "cfb(aes)",
1020        .cra_driver_name        = "atmel-cfb-aes",
1021        .cra_priority           = 100,
1022        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1023        .cra_blocksize          = AES_BLOCK_SIZE,
1024        .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
1025        .cra_alignmask          = 0xf,
1026        .cra_type               = &crypto_ablkcipher_type,
1027        .cra_module             = THIS_MODULE,
1028        .cra_init               = atmel_aes_cra_init,
1029        .cra_exit               = atmel_aes_cra_exit,
1030        .cra_u.ablkcipher = {
1031                .min_keysize    = AES_MIN_KEY_SIZE,
1032                .max_keysize    = AES_MAX_KEY_SIZE,
1033                .ivsize         = AES_BLOCK_SIZE,
1034                .setkey         = atmel_aes_setkey,
1035                .encrypt        = atmel_aes_cfb_encrypt,
1036                .decrypt        = atmel_aes_cfb_decrypt,
1037        }
1038},
1039{
1040        .cra_name               = "cfb32(aes)",
1041        .cra_driver_name        = "atmel-cfb32-aes",
1042        .cra_priority           = 100,
1043        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1044        .cra_blocksize          = CFB32_BLOCK_SIZE,
1045        .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
1046        .cra_alignmask          = 0x3,
1047        .cra_type               = &crypto_ablkcipher_type,
1048        .cra_module             = THIS_MODULE,
1049        .cra_init               = atmel_aes_cra_init,
1050        .cra_exit               = atmel_aes_cra_exit,
1051        .cra_u.ablkcipher = {
1052                .min_keysize    = AES_MIN_KEY_SIZE,
1053                .max_keysize    = AES_MAX_KEY_SIZE,
1054                .ivsize         = AES_BLOCK_SIZE,
1055                .setkey         = atmel_aes_setkey,
1056                .encrypt        = atmel_aes_cfb32_encrypt,
1057                .decrypt        = atmel_aes_cfb32_decrypt,
1058        }
1059},
1060{
1061        .cra_name               = "cfb16(aes)",
1062        .cra_driver_name        = "atmel-cfb16-aes",
1063        .cra_priority           = 100,
1064        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1065        .cra_blocksize          = CFB16_BLOCK_SIZE,
1066        .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
1067        .cra_alignmask          = 0x1,
1068        .cra_type               = &crypto_ablkcipher_type,
1069        .cra_module             = THIS_MODULE,
1070        .cra_init               = atmel_aes_cra_init,
1071        .cra_exit               = atmel_aes_cra_exit,
1072        .cra_u.ablkcipher = {
1073                .min_keysize    = AES_MIN_KEY_SIZE,
1074                .max_keysize    = AES_MAX_KEY_SIZE,
1075                .ivsize         = AES_BLOCK_SIZE,
1076                .setkey         = atmel_aes_setkey,
1077                .encrypt        = atmel_aes_cfb16_encrypt,
1078                .decrypt        = atmel_aes_cfb16_decrypt,
1079        }
1080},
1081{
1082        .cra_name               = "cfb8(aes)",
1083        .cra_driver_name        = "atmel-cfb8-aes",
1084        .cra_priority           = 100,
1085        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1086        .cra_blocksize          = CFB8_BLOCK_SIZE,
1087        .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
1088        .cra_alignmask          = 0x0,
1089        .cra_type               = &crypto_ablkcipher_type,
1090        .cra_module             = THIS_MODULE,
1091        .cra_init               = atmel_aes_cra_init,
1092        .cra_exit               = atmel_aes_cra_exit,
1093        .cra_u.ablkcipher = {
1094                .min_keysize    = AES_MIN_KEY_SIZE,
1095                .max_keysize    = AES_MAX_KEY_SIZE,
1096                .ivsize         = AES_BLOCK_SIZE,
1097                .setkey         = atmel_aes_setkey,
1098                .encrypt        = atmel_aes_cfb8_encrypt,
1099                .decrypt        = atmel_aes_cfb8_decrypt,
1100        }
1101},
1102{
1103        .cra_name               = "ctr(aes)",
1104        .cra_driver_name        = "atmel-ctr-aes",
1105        .cra_priority           = 100,
1106        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1107        .cra_blocksize          = AES_BLOCK_SIZE,
1108        .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
1109        .cra_alignmask          = 0xf,
1110        .cra_type               = &crypto_ablkcipher_type,
1111        .cra_module             = THIS_MODULE,
1112        .cra_init               = atmel_aes_cra_init,
1113        .cra_exit               = atmel_aes_cra_exit,
1114        .cra_u.ablkcipher = {
1115                .min_keysize    = AES_MIN_KEY_SIZE,
1116                .max_keysize    = AES_MAX_KEY_SIZE,
1117                .ivsize         = AES_BLOCK_SIZE,
1118                .setkey         = atmel_aes_setkey,
1119                .encrypt        = atmel_aes_ctr_encrypt,
1120                .decrypt        = atmel_aes_ctr_decrypt,
1121        }
1122},
1123};
1124
1125static struct crypto_alg aes_cfb64_alg = {
1126        .cra_name               = "cfb64(aes)",
1127        .cra_driver_name        = "atmel-cfb64-aes",
1128        .cra_priority           = 100,
1129        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1130        .cra_blocksize          = CFB64_BLOCK_SIZE,
1131        .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
1132        .cra_alignmask          = 0x7,
1133        .cra_type               = &crypto_ablkcipher_type,
1134        .cra_module             = THIS_MODULE,
1135        .cra_init               = atmel_aes_cra_init,
1136        .cra_exit               = atmel_aes_cra_exit,
1137        .cra_u.ablkcipher = {
1138                .min_keysize    = AES_MIN_KEY_SIZE,
1139                .max_keysize    = AES_MAX_KEY_SIZE,
1140                .ivsize         = AES_BLOCK_SIZE,
1141                .setkey         = atmel_aes_setkey,
1142                .encrypt        = atmel_aes_cfb64_encrypt,
1143                .decrypt        = atmel_aes_cfb64_decrypt,
1144        }
1145};
1146
1147static void atmel_aes_queue_task(unsigned long data)
1148{
1149        struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data;
1150
1151        atmel_aes_handle_queue(dd, NULL);
1152}
1153
1154static void atmel_aes_done_task(unsigned long data)
1155{
1156        struct atmel_aes_dev *dd = (struct atmel_aes_dev *) data;
1157        int err;
1158
1159        if (!(dd->flags & AES_FLAGS_DMA)) {
1160                atmel_aes_read_n(dd, AES_ODATAR(0), (u32 *) dd->buf_out,
1161                                dd->bufcnt >> 2);
1162
1163                if (sg_copy_from_buffer(dd->out_sg, dd->nb_out_sg,
1164                        dd->buf_out, dd->bufcnt))
1165                        err = 0;
1166                else
1167                        err = -EINVAL;
1168
1169                goto cpu_end;
1170        }
1171
1172        err = atmel_aes_crypt_dma_stop(dd);
1173
1174        err = dd->err ? : err;
1175
1176        if (dd->total && !err) {
1177                if (dd->flags & AES_FLAGS_FAST) {
1178                        dd->in_sg = sg_next(dd->in_sg);
1179                        dd->out_sg = sg_next(dd->out_sg);
1180                        if (!dd->in_sg || !dd->out_sg)
1181                                err = -EINVAL;
1182                }
1183                if (!err)
1184                        err = atmel_aes_crypt_dma_start(dd);
1185                if (!err)
1186                        return; /* DMA started. Not fininishing. */
1187        }
1188
1189cpu_end:
1190        atmel_aes_finish_req(dd, err);
1191        atmel_aes_handle_queue(dd, NULL);
1192}
1193
1194static irqreturn_t atmel_aes_irq(int irq, void *dev_id)
1195{
1196        struct atmel_aes_dev *aes_dd = dev_id;
1197        u32 reg;
1198
1199        reg = atmel_aes_read(aes_dd, AES_ISR);
1200        if (reg & atmel_aes_read(aes_dd, AES_IMR)) {
1201                atmel_aes_write(aes_dd, AES_IDR, reg);
1202                if (AES_FLAGS_BUSY & aes_dd->flags)
1203                        tasklet_schedule(&aes_dd->done_task);
1204                else
1205                        dev_warn(aes_dd->dev, "AES interrupt when no active requests.\n");
1206                return IRQ_HANDLED;
1207        }
1208
1209        return IRQ_NONE;
1210}
1211
1212static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
1213{
1214        int i;
1215
1216        for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1217                crypto_unregister_alg(&aes_algs[i]);
1218        if (dd->caps.has_cfb64)
1219                crypto_unregister_alg(&aes_cfb64_alg);
1220}
1221
1222static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
1223{
1224        int err, i, j;
1225
1226        for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
1227                err = crypto_register_alg(&aes_algs[i]);
1228                if (err)
1229                        goto err_aes_algs;
1230        }
1231
1232        if (dd->caps.has_cfb64) {
1233                err = crypto_register_alg(&aes_cfb64_alg);
1234                if (err)
1235                        goto err_aes_cfb64_alg;
1236        }
1237
1238        return 0;
1239
1240err_aes_cfb64_alg:
1241        i = ARRAY_SIZE(aes_algs);
1242err_aes_algs:
1243        for (j = 0; j < i; j++)
1244                crypto_unregister_alg(&aes_algs[j]);
1245
1246        return err;
1247}
1248
1249static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
1250{
1251        dd->caps.has_dualbuff = 0;
1252        dd->caps.has_cfb64 = 0;
1253        dd->caps.max_burst_size = 1;
1254
1255        /* keep only major version number */
1256        switch (dd->hw_version & 0xff0) {
1257        case 0x200:
1258                dd->caps.has_dualbuff = 1;
1259                dd->caps.has_cfb64 = 1;
1260                dd->caps.max_burst_size = 4;
1261                break;
1262        case 0x130:
1263                dd->caps.has_dualbuff = 1;
1264                dd->caps.has_cfb64 = 1;
1265                dd->caps.max_burst_size = 4;
1266                break;
1267        case 0x120:
1268                break;
1269        default:
1270                dev_warn(dd->dev,
1271                                "Unmanaged aes version, set minimum capabilities\n");
1272                break;
1273        }
1274}
1275
1276#if defined(CONFIG_OF)
1277static const struct of_device_id atmel_aes_dt_ids[] = {
1278        { .compatible = "atmel,at91sam9g46-aes" },
1279        { /* sentinel */ }
1280};
1281MODULE_DEVICE_TABLE(of, atmel_aes_dt_ids);
1282
1283static struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev)
1284{
1285        struct device_node *np = pdev->dev.of_node;
1286        struct crypto_platform_data *pdata;
1287
1288        if (!np) {
1289                dev_err(&pdev->dev, "device node not found\n");
1290                return ERR_PTR(-EINVAL);
1291        }
1292
1293        pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1294        if (!pdata) {
1295                dev_err(&pdev->dev, "could not allocate memory for pdata\n");
1296                return ERR_PTR(-ENOMEM);
1297        }
1298
1299        pdata->dma_slave = devm_kzalloc(&pdev->dev,
1300                                        sizeof(*(pdata->dma_slave)),
1301                                        GFP_KERNEL);
1302        if (!pdata->dma_slave) {
1303                dev_err(&pdev->dev, "could not allocate memory for dma_slave\n");
1304                devm_kfree(&pdev->dev, pdata);
1305                return ERR_PTR(-ENOMEM);
1306        }
1307
1308        return pdata;
1309}
1310#else
1311static inline struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev)
1312{
1313        return ERR_PTR(-EINVAL);
1314}
1315#endif
1316
1317static int atmel_aes_probe(struct platform_device *pdev)
1318{
1319        struct atmel_aes_dev *aes_dd;
1320        struct crypto_platform_data *pdata;
1321        struct device *dev = &pdev->dev;
1322        struct resource *aes_res;
1323        unsigned long aes_phys_size;
1324        int err;
1325
1326        pdata = pdev->dev.platform_data;
1327        if (!pdata) {
1328                pdata = atmel_aes_of_init(pdev);
1329                if (IS_ERR(pdata)) {
1330                        err = PTR_ERR(pdata);
1331                        goto aes_dd_err;
1332                }
1333        }
1334
1335        if (!pdata->dma_slave) {
1336                err = -ENXIO;
1337                goto aes_dd_err;
1338        }
1339
1340        aes_dd = kzalloc(sizeof(struct atmel_aes_dev), GFP_KERNEL);
1341        if (aes_dd == NULL) {
1342                dev_err(dev, "unable to alloc data struct.\n");
1343                err = -ENOMEM;
1344                goto aes_dd_err;
1345        }
1346
1347        aes_dd->dev = dev;
1348
1349        platform_set_drvdata(pdev, aes_dd);
1350
1351        INIT_LIST_HEAD(&aes_dd->list);
1352        spin_lock_init(&aes_dd->lock);
1353
1354        tasklet_init(&aes_dd->done_task, atmel_aes_done_task,
1355                                        (unsigned long)aes_dd);
1356        tasklet_init(&aes_dd->queue_task, atmel_aes_queue_task,
1357                                        (unsigned long)aes_dd);
1358
1359        crypto_init_queue(&aes_dd->queue, ATMEL_AES_QUEUE_LENGTH);
1360
1361        aes_dd->irq = -1;
1362
1363        /* Get the base address */
1364        aes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1365        if (!aes_res) {
1366                dev_err(dev, "no MEM resource info\n");
1367                err = -ENODEV;
1368                goto res_err;
1369        }
1370        aes_dd->phys_base = aes_res->start;
1371        aes_phys_size = resource_size(aes_res);
1372
1373        /* Get the IRQ */
1374        aes_dd->irq = platform_get_irq(pdev,  0);
1375        if (aes_dd->irq < 0) {
1376                dev_err(dev, "no IRQ resource info\n");
1377                err = aes_dd->irq;
1378                goto aes_irq_err;
1379        }
1380
1381        err = request_irq(aes_dd->irq, atmel_aes_irq, IRQF_SHARED, "atmel-aes",
1382                                                aes_dd);
1383        if (err) {
1384                dev_err(dev, "unable to request aes irq.\n");
1385                goto aes_irq_err;
1386        }
1387
1388        /* Initializing the clock */
1389        aes_dd->iclk = clk_get(&pdev->dev, "aes_clk");
1390        if (IS_ERR(aes_dd->iclk)) {
1391                dev_err(dev, "clock initialization failed.\n");
1392                err = PTR_ERR(aes_dd->iclk);
1393                goto clk_err;
1394        }
1395
1396        aes_dd->io_base = ioremap(aes_dd->phys_base, aes_phys_size);
1397        if (!aes_dd->io_base) {
1398                dev_err(dev, "can't ioremap\n");
1399                err = -ENOMEM;
1400                goto aes_io_err;
1401        }
1402
1403        atmel_aes_hw_version_init(aes_dd);
1404
1405        atmel_aes_get_cap(aes_dd);
1406
1407        err = atmel_aes_buff_init(aes_dd);
1408        if (err)
1409                goto err_aes_buff;
1410
1411        err = atmel_aes_dma_init(aes_dd, pdata);
1412        if (err)
1413                goto err_aes_dma;
1414
1415        spin_lock(&atmel_aes.lock);
1416        list_add_tail(&aes_dd->list, &atmel_aes.dev_list);
1417        spin_unlock(&atmel_aes.lock);
1418
1419        err = atmel_aes_register_algs(aes_dd);
1420        if (err)
1421                goto err_algs;
1422
1423        dev_info(dev, "Atmel AES - Using %s, %s for DMA transfers\n",
1424                        dma_chan_name(aes_dd->dma_lch_in.chan),
1425                        dma_chan_name(aes_dd->dma_lch_out.chan));
1426
1427        return 0;
1428
1429err_algs:
1430        spin_lock(&atmel_aes.lock);
1431        list_del(&aes_dd->list);
1432        spin_unlock(&atmel_aes.lock);
1433        atmel_aes_dma_cleanup(aes_dd);
1434err_aes_dma:
1435        atmel_aes_buff_cleanup(aes_dd);
1436err_aes_buff:
1437        iounmap(aes_dd->io_base);
1438aes_io_err:
1439        clk_put(aes_dd->iclk);
1440clk_err:
1441        free_irq(aes_dd->irq, aes_dd);
1442aes_irq_err:
1443res_err:
1444        tasklet_kill(&aes_dd->done_task);
1445        tasklet_kill(&aes_dd->queue_task);
1446        kfree(aes_dd);
1447        aes_dd = NULL;
1448aes_dd_err:
1449        dev_err(dev, "initialization failed.\n");
1450
1451        return err;
1452}
1453
1454static int atmel_aes_remove(struct platform_device *pdev)
1455{
1456        static struct atmel_aes_dev *aes_dd;
1457
1458        aes_dd = platform_get_drvdata(pdev);
1459        if (!aes_dd)
1460                return -ENODEV;
1461        spin_lock(&atmel_aes.lock);
1462        list_del(&aes_dd->list);
1463        spin_unlock(&atmel_aes.lock);
1464
1465        atmel_aes_unregister_algs(aes_dd);
1466
1467        tasklet_kill(&aes_dd->done_task);
1468        tasklet_kill(&aes_dd->queue_task);
1469
1470        atmel_aes_dma_cleanup(aes_dd);
1471
1472        iounmap(aes_dd->io_base);
1473
1474        clk_put(aes_dd->iclk);
1475
1476        if (aes_dd->irq > 0)
1477                free_irq(aes_dd->irq, aes_dd);
1478
1479        kfree(aes_dd);
1480        aes_dd = NULL;
1481
1482        return 0;
1483}
1484
1485static struct platform_driver atmel_aes_driver = {
1486        .probe          = atmel_aes_probe,
1487        .remove         = atmel_aes_remove,
1488        .driver         = {
1489                .name   = "atmel_aes",
1490                .of_match_table = of_match_ptr(atmel_aes_dt_ids),
1491        },
1492};
1493
1494module_platform_driver(atmel_aes_driver);
1495
1496MODULE_DESCRIPTION("Atmel AES hw acceleration support.");
1497MODULE_LICENSE("GPL v2");
1498MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");
1499