linux/drivers/crypto/atmel-sha.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Cryptographic API.
   4 *
   5 * Support for ATMEL SHA1/SHA256 HW acceleration.
   6 *
   7 * Copyright (c) 2012 Eukréa Electromatique - ATMEL
   8 * Author: Nicolas Royer <nicolas@eukrea.com>
   9 *
  10 * Some ideas are from omap-sham.c drivers.
  11 */
  12
  13
  14#include <linux/kernel.h>
  15#include <linux/module.h>
  16#include <linux/slab.h>
  17#include <linux/err.h>
  18#include <linux/clk.h>
  19#include <linux/io.h>
  20#include <linux/hw_random.h>
  21#include <linux/platform_device.h>
  22
  23#include <linux/device.h>
  24#include <linux/dmaengine.h>
  25#include <linux/init.h>
  26#include <linux/errno.h>
  27#include <linux/interrupt.h>
  28#include <linux/irq.h>
  29#include <linux/scatterlist.h>
  30#include <linux/dma-mapping.h>
  31#include <linux/of_device.h>
  32#include <linux/delay.h>
  33#include <linux/crypto.h>
  34#include <crypto/scatterwalk.h>
  35#include <crypto/algapi.h>
  36#include <crypto/sha1.h>
  37#include <crypto/sha2.h>
  38#include <crypto/hash.h>
  39#include <crypto/internal/hash.h>
  40#include "atmel-sha-regs.h"
  41#include "atmel-authenc.h"
  42
  43#define ATMEL_SHA_PRIORITY      300
  44
  45/* SHA flags */
  46#define SHA_FLAGS_BUSY                  BIT(0)
  47#define SHA_FLAGS_FINAL                 BIT(1)
  48#define SHA_FLAGS_DMA_ACTIVE    BIT(2)
  49#define SHA_FLAGS_OUTPUT_READY  BIT(3)
  50#define SHA_FLAGS_INIT                  BIT(4)
  51#define SHA_FLAGS_CPU                   BIT(5)
  52#define SHA_FLAGS_DMA_READY             BIT(6)
  53#define SHA_FLAGS_DUMP_REG      BIT(7)
  54
  55/* bits[11:8] are reserved. */
  56
  57#define SHA_FLAGS_FINUP         BIT(16)
  58#define SHA_FLAGS_SG            BIT(17)
  59#define SHA_FLAGS_ERROR         BIT(23)
  60#define SHA_FLAGS_PAD           BIT(24)
  61#define SHA_FLAGS_RESTORE       BIT(25)
  62#define SHA_FLAGS_IDATAR0       BIT(26)
  63#define SHA_FLAGS_WAIT_DATARDY  BIT(27)
  64
  65#define SHA_OP_INIT     0
  66#define SHA_OP_UPDATE   1
  67#define SHA_OP_FINAL    2
  68#define SHA_OP_DIGEST   3
  69
  70#define SHA_BUFFER_LEN          (PAGE_SIZE / 16)
  71
  72#define ATMEL_SHA_DMA_THRESHOLD         56
  73
  74struct atmel_sha_caps {
  75        bool    has_dma;
  76        bool    has_dualbuff;
  77        bool    has_sha224;
  78        bool    has_sha_384_512;
  79        bool    has_uihv;
  80        bool    has_hmac;
  81};
  82
  83struct atmel_sha_dev;
  84
  85/*
  86 * .statesize = sizeof(struct atmel_sha_reqctx) must be <= PAGE_SIZE / 8 as
  87 * tested by the ahash_prepare_alg() function.
  88 */
  89struct atmel_sha_reqctx {
  90        struct atmel_sha_dev    *dd;
  91        unsigned long   flags;
  92        unsigned long   op;
  93
  94        u8      digest[SHA512_DIGEST_SIZE] __aligned(sizeof(u32));
  95        u64     digcnt[2];
  96        size_t  bufcnt;
  97        size_t  buflen;
  98        dma_addr_t      dma_addr;
  99
 100        /* walk state */
 101        struct scatterlist      *sg;
 102        unsigned int    offset; /* offset in current sg */
 103        unsigned int    total;  /* total request */
 104
 105        size_t block_size;
 106        size_t hash_size;
 107
 108        u8 buffer[SHA_BUFFER_LEN + SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
 109};
 110
 111typedef int (*atmel_sha_fn_t)(struct atmel_sha_dev *);
 112
 113struct atmel_sha_ctx {
 114        struct atmel_sha_dev    *dd;
 115        atmel_sha_fn_t          start;
 116
 117        unsigned long           flags;
 118};
 119
 120#define ATMEL_SHA_QUEUE_LENGTH  50
 121
 122struct atmel_sha_dma {
 123        struct dma_chan                 *chan;
 124        struct dma_slave_config dma_conf;
 125        struct scatterlist      *sg;
 126        int                     nents;
 127        unsigned int            last_sg_length;
 128};
 129
 130struct atmel_sha_dev {
 131        struct list_head        list;
 132        unsigned long           phys_base;
 133        struct device           *dev;
 134        struct clk                      *iclk;
 135        int                                     irq;
 136        void __iomem            *io_base;
 137
 138        spinlock_t              lock;
 139        struct tasklet_struct   done_task;
 140        struct tasklet_struct   queue_task;
 141
 142        unsigned long           flags;
 143        struct crypto_queue     queue;
 144        struct ahash_request    *req;
 145        bool                    is_async;
 146        bool                    force_complete;
 147        atmel_sha_fn_t          resume;
 148        atmel_sha_fn_t          cpu_transfer_complete;
 149
 150        struct atmel_sha_dma    dma_lch_in;
 151
 152        struct atmel_sha_caps   caps;
 153
 154        struct scatterlist      tmp;
 155
 156        u32     hw_version;
 157};
 158
 159struct atmel_sha_drv {
 160        struct list_head        dev_list;
 161        spinlock_t              lock;
 162};
 163
 164static struct atmel_sha_drv atmel_sha = {
 165        .dev_list = LIST_HEAD_INIT(atmel_sha.dev_list),
 166        .lock = __SPIN_LOCK_UNLOCKED(atmel_sha.lock),
 167};
 168
 169#ifdef VERBOSE_DEBUG
 170static const char *atmel_sha_reg_name(u32 offset, char *tmp, size_t sz, bool wr)
 171{
 172        switch (offset) {
 173        case SHA_CR:
 174                return "CR";
 175
 176        case SHA_MR:
 177                return "MR";
 178
 179        case SHA_IER:
 180                return "IER";
 181
 182        case SHA_IDR:
 183                return "IDR";
 184
 185        case SHA_IMR:
 186                return "IMR";
 187
 188        case SHA_ISR:
 189                return "ISR";
 190
 191        case SHA_MSR:
 192                return "MSR";
 193
 194        case SHA_BCR:
 195                return "BCR";
 196
 197        case SHA_REG_DIN(0):
 198        case SHA_REG_DIN(1):
 199        case SHA_REG_DIN(2):
 200        case SHA_REG_DIN(3):
 201        case SHA_REG_DIN(4):
 202        case SHA_REG_DIN(5):
 203        case SHA_REG_DIN(6):
 204        case SHA_REG_DIN(7):
 205        case SHA_REG_DIN(8):
 206        case SHA_REG_DIN(9):
 207        case SHA_REG_DIN(10):
 208        case SHA_REG_DIN(11):
 209        case SHA_REG_DIN(12):
 210        case SHA_REG_DIN(13):
 211        case SHA_REG_DIN(14):
 212        case SHA_REG_DIN(15):
 213                snprintf(tmp, sz, "IDATAR[%u]", (offset - SHA_REG_DIN(0)) >> 2);
 214                break;
 215
 216        case SHA_REG_DIGEST(0):
 217        case SHA_REG_DIGEST(1):
 218        case SHA_REG_DIGEST(2):
 219        case SHA_REG_DIGEST(3):
 220        case SHA_REG_DIGEST(4):
 221        case SHA_REG_DIGEST(5):
 222        case SHA_REG_DIGEST(6):
 223        case SHA_REG_DIGEST(7):
 224        case SHA_REG_DIGEST(8):
 225        case SHA_REG_DIGEST(9):
 226        case SHA_REG_DIGEST(10):
 227        case SHA_REG_DIGEST(11):
 228        case SHA_REG_DIGEST(12):
 229        case SHA_REG_DIGEST(13):
 230        case SHA_REG_DIGEST(14):
 231        case SHA_REG_DIGEST(15):
 232                if (wr)
 233                        snprintf(tmp, sz, "IDATAR[%u]",
 234                                 16u + ((offset - SHA_REG_DIGEST(0)) >> 2));
 235                else
 236                        snprintf(tmp, sz, "ODATAR[%u]",
 237                                 (offset - SHA_REG_DIGEST(0)) >> 2);
 238                break;
 239
 240        case SHA_HW_VERSION:
 241                return "HWVER";
 242
 243        default:
 244                snprintf(tmp, sz, "0x%02x", offset);
 245                break;
 246        }
 247
 248        return tmp;
 249}
 250
 251#endif /* VERBOSE_DEBUG */
 252
 253static inline u32 atmel_sha_read(struct atmel_sha_dev *dd, u32 offset)
 254{
 255        u32 value = readl_relaxed(dd->io_base + offset);
 256
 257#ifdef VERBOSE_DEBUG
 258        if (dd->flags & SHA_FLAGS_DUMP_REG) {
 259                char tmp[16];
 260
 261                dev_vdbg(dd->dev, "read 0x%08x from %s\n", value,
 262                         atmel_sha_reg_name(offset, tmp, sizeof(tmp), false));
 263        }
 264#endif /* VERBOSE_DEBUG */
 265
 266        return value;
 267}
 268
 269static inline void atmel_sha_write(struct atmel_sha_dev *dd,
 270                                        u32 offset, u32 value)
 271{
 272#ifdef VERBOSE_DEBUG
 273        if (dd->flags & SHA_FLAGS_DUMP_REG) {
 274                char tmp[16];
 275
 276                dev_vdbg(dd->dev, "write 0x%08x into %s\n", value,
 277                         atmel_sha_reg_name(offset, tmp, sizeof(tmp), true));
 278        }
 279#endif /* VERBOSE_DEBUG */
 280
 281        writel_relaxed(value, dd->io_base + offset);
 282}
 283
 284static inline int atmel_sha_complete(struct atmel_sha_dev *dd, int err)
 285{
 286        struct ahash_request *req = dd->req;
 287
 288        dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL | SHA_FLAGS_CPU |
 289                       SHA_FLAGS_DMA_READY | SHA_FLAGS_OUTPUT_READY |
 290                       SHA_FLAGS_DUMP_REG);
 291
 292        clk_disable(dd->iclk);
 293
 294        if ((dd->is_async || dd->force_complete) && req->base.complete)
 295                req->base.complete(&req->base, err);
 296
 297        /* handle new request */
 298        tasklet_schedule(&dd->queue_task);
 299
 300        return err;
 301}
 302
 303static size_t atmel_sha_append_sg(struct atmel_sha_reqctx *ctx)
 304{
 305        size_t count;
 306
 307        while ((ctx->bufcnt < ctx->buflen) && ctx->total) {
 308                count = min(ctx->sg->length - ctx->offset, ctx->total);
 309                count = min(count, ctx->buflen - ctx->bufcnt);
 310
 311                if (count <= 0) {
 312                        /*
 313                        * Check if count <= 0 because the buffer is full or
 314                        * because the sg length is 0. In the latest case,
 315                        * check if there is another sg in the list, a 0 length
 316                        * sg doesn't necessarily mean the end of the sg list.
 317                        */
 318                        if ((ctx->sg->length == 0) && !sg_is_last(ctx->sg)) {
 319                                ctx->sg = sg_next(ctx->sg);
 320                                continue;
 321                        } else {
 322                                break;
 323                        }
 324                }
 325
 326                scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg,
 327                        ctx->offset, count, 0);
 328
 329                ctx->bufcnt += count;
 330                ctx->offset += count;
 331                ctx->total -= count;
 332
 333                if (ctx->offset == ctx->sg->length) {
 334                        ctx->sg = sg_next(ctx->sg);
 335                        if (ctx->sg)
 336                                ctx->offset = 0;
 337                        else
 338                                ctx->total = 0;
 339                }
 340        }
 341
 342        return 0;
 343}
 344
 345/*
 346 * The purpose of this padding is to ensure that the padded message is a
 347 * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512).
 348 * The bit "1" is appended at the end of the message followed by
 349 * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or
 350 * 128 bits block (SHA384/SHA512) equals to the message length in bits
 351 * is appended.
 352 *
 353 * For SHA1/SHA224/SHA256, padlen is calculated as followed:
 354 *  - if message length < 56 bytes then padlen = 56 - message length
 355 *  - else padlen = 64 + 56 - message length
 356 *
 357 * For SHA384/SHA512, padlen is calculated as followed:
 358 *  - if message length < 112 bytes then padlen = 112 - message length
 359 *  - else padlen = 128 + 112 - message length
 360 */
 361static void atmel_sha_fill_padding(struct atmel_sha_reqctx *ctx, int length)
 362{
 363        unsigned int index, padlen;
 364        __be64 bits[2];
 365        u64 size[2];
 366
 367        size[0] = ctx->digcnt[0];
 368        size[1] = ctx->digcnt[1];
 369
 370        size[0] += ctx->bufcnt;
 371        if (size[0] < ctx->bufcnt)
 372                size[1]++;
 373
 374        size[0] += length;
 375        if (size[0]  < length)
 376                size[1]++;
 377
 378        bits[1] = cpu_to_be64(size[0] << 3);
 379        bits[0] = cpu_to_be64(size[1] << 3 | size[0] >> 61);
 380
 381        switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
 382        case SHA_FLAGS_SHA384:
 383        case SHA_FLAGS_SHA512:
 384                index = ctx->bufcnt & 0x7f;
 385                padlen = (index < 112) ? (112 - index) : ((128+112) - index);
 386                *(ctx->buffer + ctx->bufcnt) = 0x80;
 387                memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1);
 388                memcpy(ctx->buffer + ctx->bufcnt + padlen, bits, 16);
 389                ctx->bufcnt += padlen + 16;
 390                ctx->flags |= SHA_FLAGS_PAD;
 391                break;
 392
 393        default:
 394                index = ctx->bufcnt & 0x3f;
 395                padlen = (index < 56) ? (56 - index) : ((64+56) - index);
 396                *(ctx->buffer + ctx->bufcnt) = 0x80;
 397                memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1);
 398                memcpy(ctx->buffer + ctx->bufcnt + padlen, &bits[1], 8);
 399                ctx->bufcnt += padlen + 8;
 400                ctx->flags |= SHA_FLAGS_PAD;
 401                break;
 402        }
 403}
 404
 405static struct atmel_sha_dev *atmel_sha_find_dev(struct atmel_sha_ctx *tctx)
 406{
 407        struct atmel_sha_dev *dd = NULL;
 408        struct atmel_sha_dev *tmp;
 409
 410        spin_lock_bh(&atmel_sha.lock);
 411        if (!tctx->dd) {
 412                list_for_each_entry(tmp, &atmel_sha.dev_list, list) {
 413                        dd = tmp;
 414                        break;
 415                }
 416                tctx->dd = dd;
 417        } else {
 418                dd = tctx->dd;
 419        }
 420
 421        spin_unlock_bh(&atmel_sha.lock);
 422
 423        return dd;
 424}
 425
 426static int atmel_sha_init(struct ahash_request *req)
 427{
 428        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 429        struct atmel_sha_ctx *tctx = crypto_ahash_ctx(tfm);
 430        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
 431        struct atmel_sha_dev *dd = atmel_sha_find_dev(tctx);
 432
 433        ctx->dd = dd;
 434
 435        ctx->flags = 0;
 436
 437        dev_dbg(dd->dev, "init: digest size: %u\n",
 438                crypto_ahash_digestsize(tfm));
 439
 440        switch (crypto_ahash_digestsize(tfm)) {
 441        case SHA1_DIGEST_SIZE:
 442                ctx->flags |= SHA_FLAGS_SHA1;
 443                ctx->block_size = SHA1_BLOCK_SIZE;
 444                break;
 445        case SHA224_DIGEST_SIZE:
 446                ctx->flags |= SHA_FLAGS_SHA224;
 447                ctx->block_size = SHA224_BLOCK_SIZE;
 448                break;
 449        case SHA256_DIGEST_SIZE:
 450                ctx->flags |= SHA_FLAGS_SHA256;
 451                ctx->block_size = SHA256_BLOCK_SIZE;
 452                break;
 453        case SHA384_DIGEST_SIZE:
 454                ctx->flags |= SHA_FLAGS_SHA384;
 455                ctx->block_size = SHA384_BLOCK_SIZE;
 456                break;
 457        case SHA512_DIGEST_SIZE:
 458                ctx->flags |= SHA_FLAGS_SHA512;
 459                ctx->block_size = SHA512_BLOCK_SIZE;
 460                break;
 461        default:
 462                return -EINVAL;
 463        }
 464
 465        ctx->bufcnt = 0;
 466        ctx->digcnt[0] = 0;
 467        ctx->digcnt[1] = 0;
 468        ctx->buflen = SHA_BUFFER_LEN;
 469
 470        return 0;
 471}
 472
 473static void atmel_sha_write_ctrl(struct atmel_sha_dev *dd, int dma)
 474{
 475        struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
 476        u32 valmr = SHA_MR_MODE_AUTO;
 477        unsigned int i, hashsize = 0;
 478
 479        if (likely(dma)) {
 480                if (!dd->caps.has_dma)
 481                        atmel_sha_write(dd, SHA_IER, SHA_INT_TXBUFE);
 482                valmr = SHA_MR_MODE_PDC;
 483                if (dd->caps.has_dualbuff)
 484                        valmr |= SHA_MR_DUALBUFF;
 485        } else {
 486                atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
 487        }
 488
 489        switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
 490        case SHA_FLAGS_SHA1:
 491                valmr |= SHA_MR_ALGO_SHA1;
 492                hashsize = SHA1_DIGEST_SIZE;
 493                break;
 494
 495        case SHA_FLAGS_SHA224:
 496                valmr |= SHA_MR_ALGO_SHA224;
 497                hashsize = SHA256_DIGEST_SIZE;
 498                break;
 499
 500        case SHA_FLAGS_SHA256:
 501                valmr |= SHA_MR_ALGO_SHA256;
 502                hashsize = SHA256_DIGEST_SIZE;
 503                break;
 504
 505        case SHA_FLAGS_SHA384:
 506                valmr |= SHA_MR_ALGO_SHA384;
 507                hashsize = SHA512_DIGEST_SIZE;
 508                break;
 509
 510        case SHA_FLAGS_SHA512:
 511                valmr |= SHA_MR_ALGO_SHA512;
 512                hashsize = SHA512_DIGEST_SIZE;
 513                break;
 514
 515        default:
 516                break;
 517        }
 518
 519        /* Setting CR_FIRST only for the first iteration */
 520        if (!(ctx->digcnt[0] || ctx->digcnt[1])) {
 521                atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
 522        } else if (dd->caps.has_uihv && (ctx->flags & SHA_FLAGS_RESTORE)) {
 523                const u32 *hash = (const u32 *)ctx->digest;
 524
 525                /*
 526                 * Restore the hardware context: update the User Initialize
 527                 * Hash Value (UIHV) with the value saved when the latest
 528                 * 'update' operation completed on this very same crypto
 529                 * request.
 530                 */
 531                ctx->flags &= ~SHA_FLAGS_RESTORE;
 532                atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV);
 533                for (i = 0; i < hashsize / sizeof(u32); ++i)
 534                        atmel_sha_write(dd, SHA_REG_DIN(i), hash[i]);
 535                atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
 536                valmr |= SHA_MR_UIHV;
 537        }
 538        /*
 539         * WARNING: If the UIHV feature is not available, the hardware CANNOT
 540         * process concurrent requests: the internal registers used to store
 541         * the hash/digest are still set to the partial digest output values
 542         * computed during the latest round.
 543         */
 544
 545        atmel_sha_write(dd, SHA_MR, valmr);
 546}
 547
 548static inline int atmel_sha_wait_for_data_ready(struct atmel_sha_dev *dd,
 549                                                atmel_sha_fn_t resume)
 550{
 551        u32 isr = atmel_sha_read(dd, SHA_ISR);
 552
 553        if (unlikely(isr & SHA_INT_DATARDY))
 554                return resume(dd);
 555
 556        dd->resume = resume;
 557        atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
 558        return -EINPROGRESS;
 559}
 560
 561static int atmel_sha_xmit_cpu(struct atmel_sha_dev *dd, const u8 *buf,
 562                              size_t length, int final)
 563{
 564        struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
 565        int count, len32;
 566        const u32 *buffer = (const u32 *)buf;
 567
 568        dev_dbg(dd->dev, "xmit_cpu: digcnt: 0x%llx 0x%llx, length: %zd, final: %d\n",
 569                ctx->digcnt[1], ctx->digcnt[0], length, final);
 570
 571        atmel_sha_write_ctrl(dd, 0);
 572
 573        /* should be non-zero before next lines to disable clocks later */
 574        ctx->digcnt[0] += length;
 575        if (ctx->digcnt[0] < length)
 576                ctx->digcnt[1]++;
 577
 578        if (final)
 579                dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
 580
 581        len32 = DIV_ROUND_UP(length, sizeof(u32));
 582
 583        dd->flags |= SHA_FLAGS_CPU;
 584
 585        for (count = 0; count < len32; count++)
 586                atmel_sha_write(dd, SHA_REG_DIN(count), buffer[count]);
 587
 588        return -EINPROGRESS;
 589}
 590
 591static int atmel_sha_xmit_pdc(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
 592                size_t length1, dma_addr_t dma_addr2, size_t length2, int final)
 593{
 594        struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
 595        int len32;
 596
 597        dev_dbg(dd->dev, "xmit_pdc: digcnt: 0x%llx 0x%llx, length: %zd, final: %d\n",
 598                ctx->digcnt[1], ctx->digcnt[0], length1, final);
 599
 600        len32 = DIV_ROUND_UP(length1, sizeof(u32));
 601        atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTDIS);
 602        atmel_sha_write(dd, SHA_TPR, dma_addr1);
 603        atmel_sha_write(dd, SHA_TCR, len32);
 604
 605        len32 = DIV_ROUND_UP(length2, sizeof(u32));
 606        atmel_sha_write(dd, SHA_TNPR, dma_addr2);
 607        atmel_sha_write(dd, SHA_TNCR, len32);
 608
 609        atmel_sha_write_ctrl(dd, 1);
 610
 611        /* should be non-zero before next lines to disable clocks later */
 612        ctx->digcnt[0] += length1;
 613        if (ctx->digcnt[0] < length1)
 614                ctx->digcnt[1]++;
 615
 616        if (final)
 617                dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
 618
 619        dd->flags |=  SHA_FLAGS_DMA_ACTIVE;
 620
 621        /* Start DMA transfer */
 622        atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTEN);
 623
 624        return -EINPROGRESS;
 625}
 626
 627static void atmel_sha_dma_callback(void *data)
 628{
 629        struct atmel_sha_dev *dd = data;
 630
 631        dd->is_async = true;
 632
 633        /* dma_lch_in - completed - wait DATRDY */
 634        atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
 635}
 636
 637static int atmel_sha_xmit_dma(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
 638                size_t length1, dma_addr_t dma_addr2, size_t length2, int final)
 639{
 640        struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
 641        struct dma_async_tx_descriptor  *in_desc;
 642        struct scatterlist sg[2];
 643
 644        dev_dbg(dd->dev, "xmit_dma: digcnt: 0x%llx 0x%llx, length: %zd, final: %d\n",
 645                ctx->digcnt[1], ctx->digcnt[0], length1, final);
 646
 647        dd->dma_lch_in.dma_conf.src_maxburst = 16;
 648        dd->dma_lch_in.dma_conf.dst_maxburst = 16;
 649
 650        dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
 651
 652        if (length2) {
 653                sg_init_table(sg, 2);
 654                sg_dma_address(&sg[0]) = dma_addr1;
 655                sg_dma_len(&sg[0]) = length1;
 656                sg_dma_address(&sg[1]) = dma_addr2;
 657                sg_dma_len(&sg[1]) = length2;
 658                in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 2,
 659                        DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 660        } else {
 661                sg_init_table(sg, 1);
 662                sg_dma_address(&sg[0]) = dma_addr1;
 663                sg_dma_len(&sg[0]) = length1;
 664                in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 1,
 665                        DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 666        }
 667        if (!in_desc)
 668                return atmel_sha_complete(dd, -EINVAL);
 669
 670        in_desc->callback = atmel_sha_dma_callback;
 671        in_desc->callback_param = dd;
 672
 673        atmel_sha_write_ctrl(dd, 1);
 674
 675        /* should be non-zero before next lines to disable clocks later */
 676        ctx->digcnt[0] += length1;
 677        if (ctx->digcnt[0] < length1)
 678                ctx->digcnt[1]++;
 679
 680        if (final)
 681                dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
 682
 683        dd->flags |=  SHA_FLAGS_DMA_ACTIVE;
 684
 685        /* Start DMA transfer */
 686        dmaengine_submit(in_desc);
 687        dma_async_issue_pending(dd->dma_lch_in.chan);
 688
 689        return -EINPROGRESS;
 690}
 691
 692static int atmel_sha_xmit_start(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
 693                size_t length1, dma_addr_t dma_addr2, size_t length2, int final)
 694{
 695        if (dd->caps.has_dma)
 696                return atmel_sha_xmit_dma(dd, dma_addr1, length1,
 697                                dma_addr2, length2, final);
 698        else
 699                return atmel_sha_xmit_pdc(dd, dma_addr1, length1,
 700                                dma_addr2, length2, final);
 701}
 702
 703static int atmel_sha_update_cpu(struct atmel_sha_dev *dd)
 704{
 705        struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
 706        int bufcnt;
 707
 708        atmel_sha_append_sg(ctx);
 709        atmel_sha_fill_padding(ctx, 0);
 710        bufcnt = ctx->bufcnt;
 711        ctx->bufcnt = 0;
 712
 713        return atmel_sha_xmit_cpu(dd, ctx->buffer, bufcnt, 1);
 714}
 715
 716static int atmel_sha_xmit_dma_map(struct atmel_sha_dev *dd,
 717                                        struct atmel_sha_reqctx *ctx,
 718                                        size_t length, int final)
 719{
 720        ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer,
 721                                ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
 722        if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
 723                dev_err(dd->dev, "dma %zu bytes error\n", ctx->buflen +
 724                                ctx->block_size);
 725                return atmel_sha_complete(dd, -EINVAL);
 726        }
 727
 728        ctx->flags &= ~SHA_FLAGS_SG;
 729
 730        /* next call does not fail... so no unmap in the case of error */
 731        return atmel_sha_xmit_start(dd, ctx->dma_addr, length, 0, 0, final);
 732}
 733
 734static int atmel_sha_update_dma_slow(struct atmel_sha_dev *dd)
 735{
 736        struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
 737        unsigned int final;
 738        size_t count;
 739
 740        atmel_sha_append_sg(ctx);
 741
 742        final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
 743
 744        dev_dbg(dd->dev, "slow: bufcnt: %zu, digcnt: 0x%llx 0x%llx, final: %d\n",
 745                 ctx->bufcnt, ctx->digcnt[1], ctx->digcnt[0], final);
 746
 747        if (final)
 748                atmel_sha_fill_padding(ctx, 0);
 749
 750        if (final || (ctx->bufcnt == ctx->buflen)) {
 751                count = ctx->bufcnt;
 752                ctx->bufcnt = 0;
 753                return atmel_sha_xmit_dma_map(dd, ctx, count, final);
 754        }
 755
 756        return 0;
 757}
 758
 759static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd)
 760{
 761        struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
 762        unsigned int length, final, tail;
 763        struct scatterlist *sg;
 764        unsigned int count;
 765
 766        if (!ctx->total)
 767                return 0;
 768
 769        if (ctx->bufcnt || ctx->offset)
 770                return atmel_sha_update_dma_slow(dd);
 771
 772        dev_dbg(dd->dev, "fast: digcnt: 0x%llx 0x%llx, bufcnt: %zd, total: %u\n",
 773                ctx->digcnt[1], ctx->digcnt[0], ctx->bufcnt, ctx->total);
 774
 775        sg = ctx->sg;
 776
 777        if (!IS_ALIGNED(sg->offset, sizeof(u32)))
 778                return atmel_sha_update_dma_slow(dd);
 779
 780        if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, ctx->block_size))
 781                /* size is not ctx->block_size aligned */
 782                return atmel_sha_update_dma_slow(dd);
 783
 784        length = min(ctx->total, sg->length);
 785
 786        if (sg_is_last(sg)) {
 787                if (!(ctx->flags & SHA_FLAGS_FINUP)) {
 788                        /* not last sg must be ctx->block_size aligned */
 789                        tail = length & (ctx->block_size - 1);
 790                        length -= tail;
 791                }
 792        }
 793
 794        ctx->total -= length;
 795        ctx->offset = length; /* offset where to start slow */
 796
 797        final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
 798
 799        /* Add padding */
 800        if (final) {
 801                tail = length & (ctx->block_size - 1);
 802                length -= tail;
 803                ctx->total += tail;
 804                ctx->offset = length; /* offset where to start slow */
 805
 806                sg = ctx->sg;
 807                atmel_sha_append_sg(ctx);
 808
 809                atmel_sha_fill_padding(ctx, length);
 810
 811                ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer,
 812                        ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
 813                if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
 814                        dev_err(dd->dev, "dma %zu bytes error\n",
 815                                ctx->buflen + ctx->block_size);
 816                        return atmel_sha_complete(dd, -EINVAL);
 817                }
 818
 819                if (length == 0) {
 820                        ctx->flags &= ~SHA_FLAGS_SG;
 821                        count = ctx->bufcnt;
 822                        ctx->bufcnt = 0;
 823                        return atmel_sha_xmit_start(dd, ctx->dma_addr, count, 0,
 824                                        0, final);
 825                } else {
 826                        ctx->sg = sg;
 827                        if (!dma_map_sg(dd->dev, ctx->sg, 1,
 828                                DMA_TO_DEVICE)) {
 829                                        dev_err(dd->dev, "dma_map_sg  error\n");
 830                                        return atmel_sha_complete(dd, -EINVAL);
 831                        }
 832
 833                        ctx->flags |= SHA_FLAGS_SG;
 834
 835                        count = ctx->bufcnt;
 836                        ctx->bufcnt = 0;
 837                        return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg),
 838                                        length, ctx->dma_addr, count, final);
 839                }
 840        }
 841
 842        if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
 843                dev_err(dd->dev, "dma_map_sg  error\n");
 844                return atmel_sha_complete(dd, -EINVAL);
 845        }
 846
 847        ctx->flags |= SHA_FLAGS_SG;
 848
 849        /* next call does not fail... so no unmap in the case of error */
 850        return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg), length, 0,
 851                                                                0, final);
 852}
 853
 854static void atmel_sha_update_dma_stop(struct atmel_sha_dev *dd)
 855{
 856        struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
 857
 858        if (ctx->flags & SHA_FLAGS_SG) {
 859                dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
 860                if (ctx->sg->length == ctx->offset) {
 861                        ctx->sg = sg_next(ctx->sg);
 862                        if (ctx->sg)
 863                                ctx->offset = 0;
 864                }
 865                if (ctx->flags & SHA_FLAGS_PAD) {
 866                        dma_unmap_single(dd->dev, ctx->dma_addr,
 867                                ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
 868                }
 869        } else {
 870                dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen +
 871                                                ctx->block_size, DMA_TO_DEVICE);
 872        }
 873}
 874
 875static int atmel_sha_update_req(struct atmel_sha_dev *dd)
 876{
 877        struct ahash_request *req = dd->req;
 878        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
 879        int err;
 880
 881        dev_dbg(dd->dev, "update_req: total: %u, digcnt: 0x%llx 0x%llx\n",
 882                ctx->total, ctx->digcnt[1], ctx->digcnt[0]);
 883
 884        if (ctx->flags & SHA_FLAGS_CPU)
 885                err = atmel_sha_update_cpu(dd);
 886        else
 887                err = atmel_sha_update_dma_start(dd);
 888
 889        /* wait for dma completion before can take more data */
 890        dev_dbg(dd->dev, "update: err: %d, digcnt: 0x%llx 0%llx\n",
 891                        err, ctx->digcnt[1], ctx->digcnt[0]);
 892
 893        return err;
 894}
 895
 896static int atmel_sha_final_req(struct atmel_sha_dev *dd)
 897{
 898        struct ahash_request *req = dd->req;
 899        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
 900        int err = 0;
 901        int count;
 902
 903        if (ctx->bufcnt >= ATMEL_SHA_DMA_THRESHOLD) {
 904                atmel_sha_fill_padding(ctx, 0);
 905                count = ctx->bufcnt;
 906                ctx->bufcnt = 0;
 907                err = atmel_sha_xmit_dma_map(dd, ctx, count, 1);
 908        }
 909        /* faster to handle last block with cpu */
 910        else {
 911                atmel_sha_fill_padding(ctx, 0);
 912                count = ctx->bufcnt;
 913                ctx->bufcnt = 0;
 914                err = atmel_sha_xmit_cpu(dd, ctx->buffer, count, 1);
 915        }
 916
 917        dev_dbg(dd->dev, "final_req: err: %d\n", err);
 918
 919        return err;
 920}
 921
 922static void atmel_sha_copy_hash(struct ahash_request *req)
 923{
 924        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
 925        u32 *hash = (u32 *)ctx->digest;
 926        unsigned int i, hashsize;
 927
 928        switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
 929        case SHA_FLAGS_SHA1:
 930                hashsize = SHA1_DIGEST_SIZE;
 931                break;
 932
 933        case SHA_FLAGS_SHA224:
 934        case SHA_FLAGS_SHA256:
 935                hashsize = SHA256_DIGEST_SIZE;
 936                break;
 937
 938        case SHA_FLAGS_SHA384:
 939        case SHA_FLAGS_SHA512:
 940                hashsize = SHA512_DIGEST_SIZE;
 941                break;
 942
 943        default:
 944                /* Should not happen... */
 945                return;
 946        }
 947
 948        for (i = 0; i < hashsize / sizeof(u32); ++i)
 949                hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
 950        ctx->flags |= SHA_FLAGS_RESTORE;
 951}
 952
 953static void atmel_sha_copy_ready_hash(struct ahash_request *req)
 954{
 955        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
 956
 957        if (!req->result)
 958                return;
 959
 960        switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
 961        default:
 962        case SHA_FLAGS_SHA1:
 963                memcpy(req->result, ctx->digest, SHA1_DIGEST_SIZE);
 964                break;
 965
 966        case SHA_FLAGS_SHA224:
 967                memcpy(req->result, ctx->digest, SHA224_DIGEST_SIZE);
 968                break;
 969
 970        case SHA_FLAGS_SHA256:
 971                memcpy(req->result, ctx->digest, SHA256_DIGEST_SIZE);
 972                break;
 973
 974        case SHA_FLAGS_SHA384:
 975                memcpy(req->result, ctx->digest, SHA384_DIGEST_SIZE);
 976                break;
 977
 978        case SHA_FLAGS_SHA512:
 979                memcpy(req->result, ctx->digest, SHA512_DIGEST_SIZE);
 980                break;
 981        }
 982}
 983
 984static int atmel_sha_finish(struct ahash_request *req)
 985{
 986        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
 987        struct atmel_sha_dev *dd = ctx->dd;
 988
 989        if (ctx->digcnt[0] || ctx->digcnt[1])
 990                atmel_sha_copy_ready_hash(req);
 991
 992        dev_dbg(dd->dev, "digcnt: 0x%llx 0x%llx, bufcnt: %zd\n", ctx->digcnt[1],
 993                ctx->digcnt[0], ctx->bufcnt);
 994
 995        return 0;
 996}
 997
 998static void atmel_sha_finish_req(struct ahash_request *req, int err)
 999{
1000        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1001        struct atmel_sha_dev *dd = ctx->dd;
1002
1003        if (!err) {
1004                atmel_sha_copy_hash(req);
1005                if (SHA_FLAGS_FINAL & dd->flags)
1006                        err = atmel_sha_finish(req);
1007        } else {
1008                ctx->flags |= SHA_FLAGS_ERROR;
1009        }
1010
1011        /* atomic operation is not needed here */
1012        (void)atmel_sha_complete(dd, err);
1013}
1014
1015static int atmel_sha_hw_init(struct atmel_sha_dev *dd)
1016{
1017        int err;
1018
1019        err = clk_enable(dd->iclk);
1020        if (err)
1021                return err;
1022
1023        if (!(SHA_FLAGS_INIT & dd->flags)) {
1024                atmel_sha_write(dd, SHA_CR, SHA_CR_SWRST);
1025                dd->flags |= SHA_FLAGS_INIT;
1026        }
1027
1028        return 0;
1029}
1030
1031static inline unsigned int atmel_sha_get_version(struct atmel_sha_dev *dd)
1032{
1033        return atmel_sha_read(dd, SHA_HW_VERSION) & 0x00000fff;
1034}
1035
1036static int atmel_sha_hw_version_init(struct atmel_sha_dev *dd)
1037{
1038        int err;
1039
1040        err = atmel_sha_hw_init(dd);
1041        if (err)
1042                return err;
1043
1044        dd->hw_version = atmel_sha_get_version(dd);
1045
1046        dev_info(dd->dev,
1047                        "version: 0x%x\n", dd->hw_version);
1048
1049        clk_disable(dd->iclk);
1050
1051        return 0;
1052}
1053
1054static int atmel_sha_handle_queue(struct atmel_sha_dev *dd,
1055                                  struct ahash_request *req)
1056{
1057        struct crypto_async_request *async_req, *backlog;
1058        struct atmel_sha_ctx *ctx;
1059        unsigned long flags;
1060        bool start_async;
1061        int err = 0, ret = 0;
1062
1063        spin_lock_irqsave(&dd->lock, flags);
1064        if (req)
1065                ret = ahash_enqueue_request(&dd->queue, req);
1066
1067        if (SHA_FLAGS_BUSY & dd->flags) {
1068                spin_unlock_irqrestore(&dd->lock, flags);
1069                return ret;
1070        }
1071
1072        backlog = crypto_get_backlog(&dd->queue);
1073        async_req = crypto_dequeue_request(&dd->queue);
1074        if (async_req)
1075                dd->flags |= SHA_FLAGS_BUSY;
1076
1077        spin_unlock_irqrestore(&dd->lock, flags);
1078
1079        if (!async_req)
1080                return ret;
1081
1082        if (backlog)
1083                backlog->complete(backlog, -EINPROGRESS);
1084
1085        ctx = crypto_tfm_ctx(async_req->tfm);
1086
1087        dd->req = ahash_request_cast(async_req);
1088        start_async = (dd->req != req);
1089        dd->is_async = start_async;
1090        dd->force_complete = false;
1091
1092        /* WARNING: ctx->start() MAY change dd->is_async. */
1093        err = ctx->start(dd);
1094        return (start_async) ? ret : err;
1095}
1096
1097static int atmel_sha_done(struct atmel_sha_dev *dd);
1098
1099static int atmel_sha_start(struct atmel_sha_dev *dd)
1100{
1101        struct ahash_request *req = dd->req;
1102        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1103        int err;
1104
1105        dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %u\n",
1106                                                ctx->op, req->nbytes);
1107
1108        err = atmel_sha_hw_init(dd);
1109        if (err)
1110                return atmel_sha_complete(dd, err);
1111
1112        /*
1113         * atmel_sha_update_req() and atmel_sha_final_req() can return either:
1114         *  -EINPROGRESS: the hardware is busy and the SHA driver will resume
1115         *                its job later in the done_task.
1116         *                This is the main path.
1117         *
1118         * 0: the SHA driver can continue its job then release the hardware
1119         *    later, if needed, with atmel_sha_finish_req().
1120         *    This is the alternate path.
1121         *
1122         * < 0: an error has occurred so atmel_sha_complete(dd, err) has already
1123         *      been called, hence the hardware has been released.
1124         *      The SHA driver must stop its job without calling
1125         *      atmel_sha_finish_req(), otherwise atmel_sha_complete() would be
1126         *      called a second time.
1127         *
1128         * Please note that currently, atmel_sha_final_req() never returns 0.
1129         */
1130
1131        dd->resume = atmel_sha_done;
1132        if (ctx->op == SHA_OP_UPDATE) {
1133                err = atmel_sha_update_req(dd);
1134                if (!err && (ctx->flags & SHA_FLAGS_FINUP))
1135                        /* no final() after finup() */
1136                        err = atmel_sha_final_req(dd);
1137        } else if (ctx->op == SHA_OP_FINAL) {
1138                err = atmel_sha_final_req(dd);
1139        }
1140
1141        if (!err)
1142                /* done_task will not finish it, so do it here */
1143                atmel_sha_finish_req(req, err);
1144
1145        dev_dbg(dd->dev, "exit, err: %d\n", err);
1146
1147        return err;
1148}
1149
1150static int atmel_sha_enqueue(struct ahash_request *req, unsigned int op)
1151{
1152        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1153        struct atmel_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1154        struct atmel_sha_dev *dd = tctx->dd;
1155
1156        ctx->op = op;
1157
1158        return atmel_sha_handle_queue(dd, req);
1159}
1160
1161static int atmel_sha_update(struct ahash_request *req)
1162{
1163        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1164
1165        if (!req->nbytes)
1166                return 0;
1167
1168        ctx->total = req->nbytes;
1169        ctx->sg = req->src;
1170        ctx->offset = 0;
1171
1172        if (ctx->flags & SHA_FLAGS_FINUP) {
1173                if (ctx->bufcnt + ctx->total < ATMEL_SHA_DMA_THRESHOLD)
1174                        /* faster to use CPU for short transfers */
1175                        ctx->flags |= SHA_FLAGS_CPU;
1176        } else if (ctx->bufcnt + ctx->total < ctx->buflen) {
1177                atmel_sha_append_sg(ctx);
1178                return 0;
1179        }
1180        return atmel_sha_enqueue(req, SHA_OP_UPDATE);
1181}
1182
1183static int atmel_sha_final(struct ahash_request *req)
1184{
1185        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1186
1187        ctx->flags |= SHA_FLAGS_FINUP;
1188
1189        if (ctx->flags & SHA_FLAGS_ERROR)
1190                return 0; /* uncompleted hash is not needed */
1191
1192        if (ctx->flags & SHA_FLAGS_PAD)
1193                /* copy ready hash (+ finalize hmac) */
1194                return atmel_sha_finish(req);
1195
1196        return atmel_sha_enqueue(req, SHA_OP_FINAL);
1197}
1198
1199static int atmel_sha_finup(struct ahash_request *req)
1200{
1201        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1202        int err1, err2;
1203
1204        ctx->flags |= SHA_FLAGS_FINUP;
1205
1206        err1 = atmel_sha_update(req);
1207        if (err1 == -EINPROGRESS ||
1208            (err1 == -EBUSY && (ahash_request_flags(req) &
1209                                CRYPTO_TFM_REQ_MAY_BACKLOG)))
1210                return err1;
1211
1212        /*
1213         * final() has to be always called to cleanup resources
1214         * even if udpate() failed, except EINPROGRESS
1215         */
1216        err2 = atmel_sha_final(req);
1217
1218        return err1 ?: err2;
1219}
1220
1221static int atmel_sha_digest(struct ahash_request *req)
1222{
1223        return atmel_sha_init(req) ?: atmel_sha_finup(req);
1224}
1225
1226
1227static int atmel_sha_export(struct ahash_request *req, void *out)
1228{
1229        const struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1230
1231        memcpy(out, ctx, sizeof(*ctx));
1232        return 0;
1233}
1234
1235static int atmel_sha_import(struct ahash_request *req, const void *in)
1236{
1237        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1238
1239        memcpy(ctx, in, sizeof(*ctx));
1240        return 0;
1241}
1242
1243static int atmel_sha_cra_init(struct crypto_tfm *tfm)
1244{
1245        struct atmel_sha_ctx *ctx = crypto_tfm_ctx(tfm);
1246
1247        crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1248                                 sizeof(struct atmel_sha_reqctx));
1249        ctx->start = atmel_sha_start;
1250
1251        return 0;
1252}
1253
1254static void atmel_sha_alg_init(struct ahash_alg *alg)
1255{
1256        alg->halg.base.cra_priority = ATMEL_SHA_PRIORITY;
1257        alg->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
1258        alg->halg.base.cra_ctxsize = sizeof(struct atmel_sha_ctx);
1259        alg->halg.base.cra_module = THIS_MODULE;
1260        alg->halg.base.cra_init = atmel_sha_cra_init;
1261
1262        alg->halg.statesize = sizeof(struct atmel_sha_reqctx);
1263
1264        alg->init = atmel_sha_init;
1265        alg->update = atmel_sha_update;
1266        alg->final = atmel_sha_final;
1267        alg->finup = atmel_sha_finup;
1268        alg->digest = atmel_sha_digest;
1269        alg->export = atmel_sha_export;
1270        alg->import = atmel_sha_import;
1271}
1272
1273static struct ahash_alg sha_1_256_algs[] = {
1274{
1275        .halg.base.cra_name             = "sha1",
1276        .halg.base.cra_driver_name      = "atmel-sha1",
1277        .halg.base.cra_blocksize        = SHA1_BLOCK_SIZE,
1278
1279        .halg.digestsize = SHA1_DIGEST_SIZE,
1280},
1281{
1282        .halg.base.cra_name             = "sha256",
1283        .halg.base.cra_driver_name      = "atmel-sha256",
1284        .halg.base.cra_blocksize        = SHA256_BLOCK_SIZE,
1285
1286        .halg.digestsize = SHA256_DIGEST_SIZE,
1287},
1288};
1289
1290static struct ahash_alg sha_224_alg = {
1291        .halg.base.cra_name             = "sha224",
1292        .halg.base.cra_driver_name      = "atmel-sha224",
1293        .halg.base.cra_blocksize        = SHA224_BLOCK_SIZE,
1294
1295        .halg.digestsize = SHA224_DIGEST_SIZE,
1296};
1297
1298static struct ahash_alg sha_384_512_algs[] = {
1299{
1300        .halg.base.cra_name             = "sha384",
1301        .halg.base.cra_driver_name      = "atmel-sha384",
1302        .halg.base.cra_blocksize        = SHA384_BLOCK_SIZE,
1303        .halg.base.cra_alignmask        = 0x3,
1304
1305        .halg.digestsize = SHA384_DIGEST_SIZE,
1306},
1307{
1308        .halg.base.cra_name             = "sha512",
1309        .halg.base.cra_driver_name      = "atmel-sha512",
1310        .halg.base.cra_blocksize        = SHA512_BLOCK_SIZE,
1311        .halg.base.cra_alignmask        = 0x3,
1312
1313        .halg.digestsize = SHA512_DIGEST_SIZE,
1314},
1315};
1316
1317static void atmel_sha_queue_task(unsigned long data)
1318{
1319        struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data;
1320
1321        atmel_sha_handle_queue(dd, NULL);
1322}
1323
1324static int atmel_sha_done(struct atmel_sha_dev *dd)
1325{
1326        int err = 0;
1327
1328        if (SHA_FLAGS_CPU & dd->flags) {
1329                if (SHA_FLAGS_OUTPUT_READY & dd->flags) {
1330                        dd->flags &= ~SHA_FLAGS_OUTPUT_READY;
1331                        goto finish;
1332                }
1333        } else if (SHA_FLAGS_DMA_READY & dd->flags) {
1334                if (SHA_FLAGS_DMA_ACTIVE & dd->flags) {
1335                        dd->flags &= ~SHA_FLAGS_DMA_ACTIVE;
1336                        atmel_sha_update_dma_stop(dd);
1337                }
1338                if (SHA_FLAGS_OUTPUT_READY & dd->flags) {
1339                        /* hash or semi-hash ready */
1340                        dd->flags &= ~(SHA_FLAGS_DMA_READY |
1341                                                SHA_FLAGS_OUTPUT_READY);
1342                        err = atmel_sha_update_dma_start(dd);
1343                        if (err != -EINPROGRESS)
1344                                goto finish;
1345                }
1346        }
1347        return err;
1348
1349finish:
1350        /* finish curent request */
1351        atmel_sha_finish_req(dd->req, err);
1352
1353        return err;
1354}
1355
1356static void atmel_sha_done_task(unsigned long data)
1357{
1358        struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data;
1359
1360        dd->is_async = true;
1361        (void)dd->resume(dd);
1362}
1363
1364static irqreturn_t atmel_sha_irq(int irq, void *dev_id)
1365{
1366        struct atmel_sha_dev *sha_dd = dev_id;
1367        u32 reg;
1368
1369        reg = atmel_sha_read(sha_dd, SHA_ISR);
1370        if (reg & atmel_sha_read(sha_dd, SHA_IMR)) {
1371                atmel_sha_write(sha_dd, SHA_IDR, reg);
1372                if (SHA_FLAGS_BUSY & sha_dd->flags) {
1373                        sha_dd->flags |= SHA_FLAGS_OUTPUT_READY;
1374                        if (!(SHA_FLAGS_CPU & sha_dd->flags))
1375                                sha_dd->flags |= SHA_FLAGS_DMA_READY;
1376                        tasklet_schedule(&sha_dd->done_task);
1377                } else {
1378                        dev_warn(sha_dd->dev, "SHA interrupt when no active requests.\n");
1379                }
1380                return IRQ_HANDLED;
1381        }
1382
1383        return IRQ_NONE;
1384}
1385
1386
1387/* DMA transfer functions */
1388
1389static bool atmel_sha_dma_check_aligned(struct atmel_sha_dev *dd,
1390                                        struct scatterlist *sg,
1391                                        size_t len)
1392{
1393        struct atmel_sha_dma *dma = &dd->dma_lch_in;
1394        struct ahash_request *req = dd->req;
1395        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1396        size_t bs = ctx->block_size;
1397        int nents;
1398
1399        for (nents = 0; sg; sg = sg_next(sg), ++nents) {
1400                if (!IS_ALIGNED(sg->offset, sizeof(u32)))
1401                        return false;
1402
1403                /*
1404                 * This is the last sg, the only one that is allowed to
1405                 * have an unaligned length.
1406                 */
1407                if (len <= sg->length) {
1408                        dma->nents = nents + 1;
1409                        dma->last_sg_length = sg->length;
1410                        sg->length = ALIGN(len, sizeof(u32));
1411                        return true;
1412                }
1413
1414                /* All other sg lengths MUST be aligned to the block size. */
1415                if (!IS_ALIGNED(sg->length, bs))
1416                        return false;
1417
1418                len -= sg->length;
1419        }
1420
1421        return false;
1422}
1423
1424static void atmel_sha_dma_callback2(void *data)
1425{
1426        struct atmel_sha_dev *dd = data;
1427        struct atmel_sha_dma *dma = &dd->dma_lch_in;
1428        struct scatterlist *sg;
1429        int nents;
1430
1431        dma_unmap_sg(dd->dev, dma->sg, dma->nents, DMA_TO_DEVICE);
1432
1433        sg = dma->sg;
1434        for (nents = 0; nents < dma->nents - 1; ++nents)
1435                sg = sg_next(sg);
1436        sg->length = dma->last_sg_length;
1437
1438        dd->is_async = true;
1439        (void)atmel_sha_wait_for_data_ready(dd, dd->resume);
1440}
1441
1442static int atmel_sha_dma_start(struct atmel_sha_dev *dd,
1443                               struct scatterlist *src,
1444                               size_t len,
1445                               atmel_sha_fn_t resume)
1446{
1447        struct atmel_sha_dma *dma = &dd->dma_lch_in;
1448        struct dma_slave_config *config = &dma->dma_conf;
1449        struct dma_chan *chan = dma->chan;
1450        struct dma_async_tx_descriptor *desc;
1451        dma_cookie_t cookie;
1452        unsigned int sg_len;
1453        int err;
1454
1455        dd->resume = resume;
1456
1457        /*
1458         * dma->nents has already been initialized by
1459         * atmel_sha_dma_check_aligned().
1460         */
1461        dma->sg = src;
1462        sg_len = dma_map_sg(dd->dev, dma->sg, dma->nents, DMA_TO_DEVICE);
1463        if (!sg_len) {
1464                err = -ENOMEM;
1465                goto exit;
1466        }
1467
1468        config->src_maxburst = 16;
1469        config->dst_maxburst = 16;
1470        err = dmaengine_slave_config(chan, config);
1471        if (err)
1472                goto unmap_sg;
1473
1474        desc = dmaengine_prep_slave_sg(chan, dma->sg, sg_len, DMA_MEM_TO_DEV,
1475                                       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1476        if (!desc) {
1477                err = -ENOMEM;
1478                goto unmap_sg;
1479        }
1480
1481        desc->callback = atmel_sha_dma_callback2;
1482        desc->callback_param = dd;
1483        cookie = dmaengine_submit(desc);
1484        err = dma_submit_error(cookie);
1485        if (err)
1486                goto unmap_sg;
1487
1488        dma_async_issue_pending(chan);
1489
1490        return -EINPROGRESS;
1491
1492unmap_sg:
1493        dma_unmap_sg(dd->dev, dma->sg, dma->nents, DMA_TO_DEVICE);
1494exit:
1495        return atmel_sha_complete(dd, err);
1496}
1497
1498
1499/* CPU transfer functions */
1500
1501static int atmel_sha_cpu_transfer(struct atmel_sha_dev *dd)
1502{
1503        struct ahash_request *req = dd->req;
1504        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1505        const u32 *words = (const u32 *)ctx->buffer;
1506        size_t i, num_words;
1507        u32 isr, din, din_inc;
1508
1509        din_inc = (ctx->flags & SHA_FLAGS_IDATAR0) ? 0 : 1;
1510        for (;;) {
1511                /* Write data into the Input Data Registers. */
1512                num_words = DIV_ROUND_UP(ctx->bufcnt, sizeof(u32));
1513                for (i = 0, din = 0; i < num_words; ++i, din += din_inc)
1514                        atmel_sha_write(dd, SHA_REG_DIN(din), words[i]);
1515
1516                ctx->offset += ctx->bufcnt;
1517                ctx->total -= ctx->bufcnt;
1518
1519                if (!ctx->total)
1520                        break;
1521
1522                /*
1523                 * Prepare next block:
1524                 * Fill ctx->buffer now with the next data to be written into
1525                 * IDATARx: it gives time for the SHA hardware to process
1526                 * the current data so the SHA_INT_DATARDY flag might be set
1527                 * in SHA_ISR when polling this register at the beginning of
1528                 * the next loop.
1529                 */
1530                ctx->bufcnt = min_t(size_t, ctx->block_size, ctx->total);
1531                scatterwalk_map_and_copy(ctx->buffer, ctx->sg,
1532                                         ctx->offset, ctx->bufcnt, 0);
1533
1534                /* Wait for hardware to be ready again. */
1535                isr = atmel_sha_read(dd, SHA_ISR);
1536                if (!(isr & SHA_INT_DATARDY)) {
1537                        /* Not ready yet. */
1538                        dd->resume = atmel_sha_cpu_transfer;
1539                        atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
1540                        return -EINPROGRESS;
1541                }
1542        }
1543
1544        if (unlikely(!(ctx->flags & SHA_FLAGS_WAIT_DATARDY)))
1545                return dd->cpu_transfer_complete(dd);
1546
1547        return atmel_sha_wait_for_data_ready(dd, dd->cpu_transfer_complete);
1548}
1549
1550static int atmel_sha_cpu_start(struct atmel_sha_dev *dd,
1551                               struct scatterlist *sg,
1552                               unsigned int len,
1553                               bool idatar0_only,
1554                               bool wait_data_ready,
1555                               atmel_sha_fn_t resume)
1556{
1557        struct ahash_request *req = dd->req;
1558        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1559
1560        if (!len)
1561                return resume(dd);
1562
1563        ctx->flags &= ~(SHA_FLAGS_IDATAR0 | SHA_FLAGS_WAIT_DATARDY);
1564
1565        if (idatar0_only)
1566                ctx->flags |= SHA_FLAGS_IDATAR0;
1567
1568        if (wait_data_ready)
1569                ctx->flags |= SHA_FLAGS_WAIT_DATARDY;
1570
1571        ctx->sg = sg;
1572        ctx->total = len;
1573        ctx->offset = 0;
1574
1575        /* Prepare the first block to be written. */
1576        ctx->bufcnt = min_t(size_t, ctx->block_size, ctx->total);
1577        scatterwalk_map_and_copy(ctx->buffer, ctx->sg,
1578                                 ctx->offset, ctx->bufcnt, 0);
1579
1580        dd->cpu_transfer_complete = resume;
1581        return atmel_sha_cpu_transfer(dd);
1582}
1583
1584static int atmel_sha_cpu_hash(struct atmel_sha_dev *dd,
1585                              const void *data, unsigned int datalen,
1586                              bool auto_padding,
1587                              atmel_sha_fn_t resume)
1588{
1589        struct ahash_request *req = dd->req;
1590        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1591        u32 msglen = (auto_padding) ? datalen : 0;
1592        u32 mr = SHA_MR_MODE_AUTO;
1593
1594        if (!(IS_ALIGNED(datalen, ctx->block_size) || auto_padding))
1595                return atmel_sha_complete(dd, -EINVAL);
1596
1597        mr |= (ctx->flags & SHA_FLAGS_ALGO_MASK);
1598        atmel_sha_write(dd, SHA_MR, mr);
1599        atmel_sha_write(dd, SHA_MSR, msglen);
1600        atmel_sha_write(dd, SHA_BCR, msglen);
1601        atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
1602
1603        sg_init_one(&dd->tmp, data, datalen);
1604        return atmel_sha_cpu_start(dd, &dd->tmp, datalen, false, true, resume);
1605}
1606
1607
1608/* hmac functions */
1609
1610struct atmel_sha_hmac_key {
1611        bool                    valid;
1612        unsigned int            keylen;
1613        u8                      buffer[SHA512_BLOCK_SIZE];
1614        u8                      *keydup;
1615};
1616
1617static inline void atmel_sha_hmac_key_init(struct atmel_sha_hmac_key *hkey)
1618{
1619        memset(hkey, 0, sizeof(*hkey));
1620}
1621
1622static inline void atmel_sha_hmac_key_release(struct atmel_sha_hmac_key *hkey)
1623{
1624        kfree(hkey->keydup);
1625        memset(hkey, 0, sizeof(*hkey));
1626}
1627
1628static inline int atmel_sha_hmac_key_set(struct atmel_sha_hmac_key *hkey,
1629                                         const u8 *key,
1630                                         unsigned int keylen)
1631{
1632        atmel_sha_hmac_key_release(hkey);
1633
1634        if (keylen > sizeof(hkey->buffer)) {
1635                hkey->keydup = kmemdup(key, keylen, GFP_KERNEL);
1636                if (!hkey->keydup)
1637                        return -ENOMEM;
1638
1639        } else {
1640                memcpy(hkey->buffer, key, keylen);
1641        }
1642
1643        hkey->valid = true;
1644        hkey->keylen = keylen;
1645        return 0;
1646}
1647
1648static inline bool atmel_sha_hmac_key_get(const struct atmel_sha_hmac_key *hkey,
1649                                          const u8 **key,
1650                                          unsigned int *keylen)
1651{
1652        if (!hkey->valid)
1653                return false;
1654
1655        *keylen = hkey->keylen;
1656        *key = (hkey->keydup) ? hkey->keydup : hkey->buffer;
1657        return true;
1658}
1659
1660
1661struct atmel_sha_hmac_ctx {
1662        struct atmel_sha_ctx    base;
1663
1664        struct atmel_sha_hmac_key       hkey;
1665        u32                     ipad[SHA512_BLOCK_SIZE / sizeof(u32)];
1666        u32                     opad[SHA512_BLOCK_SIZE / sizeof(u32)];
1667        atmel_sha_fn_t          resume;
1668};
1669
1670static int atmel_sha_hmac_setup(struct atmel_sha_dev *dd,
1671                                atmel_sha_fn_t resume);
1672static int atmel_sha_hmac_prehash_key(struct atmel_sha_dev *dd,
1673                                      const u8 *key, unsigned int keylen);
1674static int atmel_sha_hmac_prehash_key_done(struct atmel_sha_dev *dd);
1675static int atmel_sha_hmac_compute_ipad_hash(struct atmel_sha_dev *dd);
1676static int atmel_sha_hmac_compute_opad_hash(struct atmel_sha_dev *dd);
1677static int atmel_sha_hmac_setup_done(struct atmel_sha_dev *dd);
1678
1679static int atmel_sha_hmac_init_done(struct atmel_sha_dev *dd);
1680static int atmel_sha_hmac_final(struct atmel_sha_dev *dd);
1681static int atmel_sha_hmac_final_done(struct atmel_sha_dev *dd);
1682static int atmel_sha_hmac_digest2(struct atmel_sha_dev *dd);
1683
1684static int atmel_sha_hmac_setup(struct atmel_sha_dev *dd,
1685                                atmel_sha_fn_t resume)
1686{
1687        struct ahash_request *req = dd->req;
1688        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1689        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1690        struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
1691        unsigned int keylen;
1692        const u8 *key;
1693        size_t bs;
1694
1695        hmac->resume = resume;
1696        switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
1697        case SHA_FLAGS_SHA1:
1698                ctx->block_size = SHA1_BLOCK_SIZE;
1699                ctx->hash_size = SHA1_DIGEST_SIZE;
1700                break;
1701
1702        case SHA_FLAGS_SHA224:
1703                ctx->block_size = SHA224_BLOCK_SIZE;
1704                ctx->hash_size = SHA256_DIGEST_SIZE;
1705                break;
1706
1707        case SHA_FLAGS_SHA256:
1708                ctx->block_size = SHA256_BLOCK_SIZE;
1709                ctx->hash_size = SHA256_DIGEST_SIZE;
1710                break;
1711
1712        case SHA_FLAGS_SHA384:
1713                ctx->block_size = SHA384_BLOCK_SIZE;
1714                ctx->hash_size = SHA512_DIGEST_SIZE;
1715                break;
1716
1717        case SHA_FLAGS_SHA512:
1718                ctx->block_size = SHA512_BLOCK_SIZE;
1719                ctx->hash_size = SHA512_DIGEST_SIZE;
1720                break;
1721
1722        default:
1723                return atmel_sha_complete(dd, -EINVAL);
1724        }
1725        bs = ctx->block_size;
1726
1727        if (likely(!atmel_sha_hmac_key_get(&hmac->hkey, &key, &keylen)))
1728                return resume(dd);
1729
1730        /* Compute K' from K. */
1731        if (unlikely(keylen > bs))
1732                return atmel_sha_hmac_prehash_key(dd, key, keylen);
1733
1734        /* Prepare ipad. */
1735        memcpy((u8 *)hmac->ipad, key, keylen);
1736        memset((u8 *)hmac->ipad + keylen, 0, bs - keylen);
1737        return atmel_sha_hmac_compute_ipad_hash(dd);
1738}
1739
1740static int atmel_sha_hmac_prehash_key(struct atmel_sha_dev *dd,
1741                                      const u8 *key, unsigned int keylen)
1742{
1743        return atmel_sha_cpu_hash(dd, key, keylen, true,
1744                                  atmel_sha_hmac_prehash_key_done);
1745}
1746
1747static int atmel_sha_hmac_prehash_key_done(struct atmel_sha_dev *dd)
1748{
1749        struct ahash_request *req = dd->req;
1750        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1751        struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
1752        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1753        size_t ds = crypto_ahash_digestsize(tfm);
1754        size_t bs = ctx->block_size;
1755        size_t i, num_words = ds / sizeof(u32);
1756
1757        /* Prepare ipad. */
1758        for (i = 0; i < num_words; ++i)
1759                hmac->ipad[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i));
1760        memset((u8 *)hmac->ipad + ds, 0, bs - ds);
1761        return atmel_sha_hmac_compute_ipad_hash(dd);
1762}
1763
1764static int atmel_sha_hmac_compute_ipad_hash(struct atmel_sha_dev *dd)
1765{
1766        struct ahash_request *req = dd->req;
1767        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1768        struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
1769        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1770        size_t bs = ctx->block_size;
1771        size_t i, num_words = bs / sizeof(u32);
1772
1773        memcpy(hmac->opad, hmac->ipad, bs);
1774        for (i = 0; i < num_words; ++i) {
1775                hmac->ipad[i] ^= 0x36363636;
1776                hmac->opad[i] ^= 0x5c5c5c5c;
1777        }
1778
1779        return atmel_sha_cpu_hash(dd, hmac->ipad, bs, false,
1780                                  atmel_sha_hmac_compute_opad_hash);
1781}
1782
1783static int atmel_sha_hmac_compute_opad_hash(struct atmel_sha_dev *dd)
1784{
1785        struct ahash_request *req = dd->req;
1786        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1787        struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
1788        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1789        size_t bs = ctx->block_size;
1790        size_t hs = ctx->hash_size;
1791        size_t i, num_words = hs / sizeof(u32);
1792
1793        for (i = 0; i < num_words; ++i)
1794                hmac->ipad[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i));
1795        return atmel_sha_cpu_hash(dd, hmac->opad, bs, false,
1796                                  atmel_sha_hmac_setup_done);
1797}
1798
1799static int atmel_sha_hmac_setup_done(struct atmel_sha_dev *dd)
1800{
1801        struct ahash_request *req = dd->req;
1802        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1803        struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
1804        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1805        size_t hs = ctx->hash_size;
1806        size_t i, num_words = hs / sizeof(u32);
1807
1808        for (i = 0; i < num_words; ++i)
1809                hmac->opad[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i));
1810        atmel_sha_hmac_key_release(&hmac->hkey);
1811        return hmac->resume(dd);
1812}
1813
1814static int atmel_sha_hmac_start(struct atmel_sha_dev *dd)
1815{
1816        struct ahash_request *req = dd->req;
1817        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1818        int err;
1819
1820        err = atmel_sha_hw_init(dd);
1821        if (err)
1822                return atmel_sha_complete(dd, err);
1823
1824        switch (ctx->op) {
1825        case SHA_OP_INIT:
1826                err = atmel_sha_hmac_setup(dd, atmel_sha_hmac_init_done);
1827                break;
1828
1829        case SHA_OP_UPDATE:
1830                dd->resume = atmel_sha_done;
1831                err = atmel_sha_update_req(dd);
1832                break;
1833
1834        case SHA_OP_FINAL:
1835                dd->resume = atmel_sha_hmac_final;
1836                err = atmel_sha_final_req(dd);
1837                break;
1838
1839        case SHA_OP_DIGEST:
1840                err = atmel_sha_hmac_setup(dd, atmel_sha_hmac_digest2);
1841                break;
1842
1843        default:
1844                return atmel_sha_complete(dd, -EINVAL);
1845        }
1846
1847        return err;
1848}
1849
1850static int atmel_sha_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
1851                                 unsigned int keylen)
1852{
1853        struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
1854
1855        return atmel_sha_hmac_key_set(&hmac->hkey, key, keylen);
1856}
1857
1858static int atmel_sha_hmac_init(struct ahash_request *req)
1859{
1860        int err;
1861
1862        err = atmel_sha_init(req);
1863        if (err)
1864                return err;
1865
1866        return atmel_sha_enqueue(req, SHA_OP_INIT);
1867}
1868
1869static int atmel_sha_hmac_init_done(struct atmel_sha_dev *dd)
1870{
1871        struct ahash_request *req = dd->req;
1872        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1873        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1874        struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
1875        size_t bs = ctx->block_size;
1876        size_t hs = ctx->hash_size;
1877
1878        ctx->bufcnt = 0;
1879        ctx->digcnt[0] = bs;
1880        ctx->digcnt[1] = 0;
1881        ctx->flags |= SHA_FLAGS_RESTORE;
1882        memcpy(ctx->digest, hmac->ipad, hs);
1883        return atmel_sha_complete(dd, 0);
1884}
1885
1886static int atmel_sha_hmac_final(struct atmel_sha_dev *dd)
1887{
1888        struct ahash_request *req = dd->req;
1889        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1890        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1891        struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
1892        u32 *digest = (u32 *)ctx->digest;
1893        size_t ds = crypto_ahash_digestsize(tfm);
1894        size_t bs = ctx->block_size;
1895        size_t hs = ctx->hash_size;
1896        size_t i, num_words;
1897        u32 mr;
1898
1899        /* Save d = SHA((K' + ipad) | msg). */
1900        num_words = ds / sizeof(u32);
1901        for (i = 0; i < num_words; ++i)
1902                digest[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i));
1903
1904        /* Restore context to finish computing SHA((K' + opad) | d). */
1905        atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV);
1906        num_words = hs / sizeof(u32);
1907        for (i = 0; i < num_words; ++i)
1908                atmel_sha_write(dd, SHA_REG_DIN(i), hmac->opad[i]);
1909
1910        mr = SHA_MR_MODE_AUTO | SHA_MR_UIHV;
1911        mr |= (ctx->flags & SHA_FLAGS_ALGO_MASK);
1912        atmel_sha_write(dd, SHA_MR, mr);
1913        atmel_sha_write(dd, SHA_MSR, bs + ds);
1914        atmel_sha_write(dd, SHA_BCR, ds);
1915        atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
1916
1917        sg_init_one(&dd->tmp, digest, ds);
1918        return atmel_sha_cpu_start(dd, &dd->tmp, ds, false, true,
1919                                   atmel_sha_hmac_final_done);
1920}
1921
1922static int atmel_sha_hmac_final_done(struct atmel_sha_dev *dd)
1923{
1924        /*
1925         * req->result might not be sizeof(u32) aligned, so copy the
1926         * digest into ctx->digest[] before memcpy() the data into
1927         * req->result.
1928         */
1929        atmel_sha_copy_hash(dd->req);
1930        atmel_sha_copy_ready_hash(dd->req);
1931        return atmel_sha_complete(dd, 0);
1932}
1933
1934static int atmel_sha_hmac_digest(struct ahash_request *req)
1935{
1936        int err;
1937
1938        err = atmel_sha_init(req);
1939        if (err)
1940                return err;
1941
1942        return atmel_sha_enqueue(req, SHA_OP_DIGEST);
1943}
1944
1945static int atmel_sha_hmac_digest2(struct atmel_sha_dev *dd)
1946{
1947        struct ahash_request *req = dd->req;
1948        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1949        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1950        struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
1951        size_t hs = ctx->hash_size;
1952        size_t i, num_words = hs / sizeof(u32);
1953        bool use_dma = false;
1954        u32 mr;
1955
1956        /* Special case for empty message. */
1957        if (!req->nbytes)
1958                return atmel_sha_complete(dd, -EINVAL); // TODO:
1959
1960        /* Check DMA threshold and alignment. */
1961        if (req->nbytes > ATMEL_SHA_DMA_THRESHOLD &&
1962            atmel_sha_dma_check_aligned(dd, req->src, req->nbytes))
1963                use_dma = true;
1964
1965        /* Write both initial hash values to compute a HMAC. */
1966        atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV);
1967        for (i = 0; i < num_words; ++i)
1968                atmel_sha_write(dd, SHA_REG_DIN(i), hmac->ipad[i]);
1969
1970        atmel_sha_write(dd, SHA_CR, SHA_CR_WUIEHV);
1971        for (i = 0; i < num_words; ++i)
1972                atmel_sha_write(dd, SHA_REG_DIN(i), hmac->opad[i]);
1973
1974        /* Write the Mode, Message Size, Bytes Count then Control Registers. */
1975        mr = (SHA_MR_HMAC | SHA_MR_DUALBUFF);
1976        mr |= ctx->flags & SHA_FLAGS_ALGO_MASK;
1977        if (use_dma)
1978                mr |= SHA_MR_MODE_IDATAR0;
1979        else
1980                mr |= SHA_MR_MODE_AUTO;
1981        atmel_sha_write(dd, SHA_MR, mr);
1982
1983        atmel_sha_write(dd, SHA_MSR, req->nbytes);
1984        atmel_sha_write(dd, SHA_BCR, req->nbytes);
1985
1986        atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
1987
1988        /* Process data. */
1989        if (use_dma)
1990                return atmel_sha_dma_start(dd, req->src, req->nbytes,
1991                                           atmel_sha_hmac_final_done);
1992
1993        return atmel_sha_cpu_start(dd, req->src, req->nbytes, false, true,
1994                                   atmel_sha_hmac_final_done);
1995}
1996
1997static int atmel_sha_hmac_cra_init(struct crypto_tfm *tfm)
1998{
1999        struct atmel_sha_hmac_ctx *hmac = crypto_tfm_ctx(tfm);
2000
2001        crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2002                                 sizeof(struct atmel_sha_reqctx));
2003        hmac->base.start = atmel_sha_hmac_start;
2004        atmel_sha_hmac_key_init(&hmac->hkey);
2005
2006        return 0;
2007}
2008
2009static void atmel_sha_hmac_cra_exit(struct crypto_tfm *tfm)
2010{
2011        struct atmel_sha_hmac_ctx *hmac = crypto_tfm_ctx(tfm);
2012
2013        atmel_sha_hmac_key_release(&hmac->hkey);
2014}
2015
2016static void atmel_sha_hmac_alg_init(struct ahash_alg *alg)
2017{
2018        alg->halg.base.cra_priority = ATMEL_SHA_PRIORITY;
2019        alg->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
2020        alg->halg.base.cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx);
2021        alg->halg.base.cra_module = THIS_MODULE;
2022        alg->halg.base.cra_init = atmel_sha_hmac_cra_init;
2023        alg->halg.base.cra_exit = atmel_sha_hmac_cra_exit;
2024
2025        alg->halg.statesize = sizeof(struct atmel_sha_reqctx);
2026
2027        alg->init = atmel_sha_hmac_init;
2028        alg->update = atmel_sha_update;
2029        alg->final = atmel_sha_final;
2030        alg->digest = atmel_sha_hmac_digest;
2031        alg->setkey = atmel_sha_hmac_setkey;
2032        alg->export = atmel_sha_export;
2033        alg->import = atmel_sha_import;
2034}
2035
2036static struct ahash_alg sha_hmac_algs[] = {
2037{
2038        .halg.base.cra_name             = "hmac(sha1)",
2039        .halg.base.cra_driver_name      = "atmel-hmac-sha1",
2040        .halg.base.cra_blocksize        = SHA1_BLOCK_SIZE,
2041
2042        .halg.digestsize = SHA1_DIGEST_SIZE,
2043},
2044{
2045        .halg.base.cra_name             = "hmac(sha224)",
2046        .halg.base.cra_driver_name      = "atmel-hmac-sha224",
2047        .halg.base.cra_blocksize        = SHA224_BLOCK_SIZE,
2048
2049        .halg.digestsize = SHA224_DIGEST_SIZE,
2050},
2051{
2052        .halg.base.cra_name             = "hmac(sha256)",
2053        .halg.base.cra_driver_name      = "atmel-hmac-sha256",
2054        .halg.base.cra_blocksize        = SHA256_BLOCK_SIZE,
2055
2056        .halg.digestsize = SHA256_DIGEST_SIZE,
2057},
2058{
2059        .halg.base.cra_name             = "hmac(sha384)",
2060        .halg.base.cra_driver_name      = "atmel-hmac-sha384",
2061        .halg.base.cra_blocksize        = SHA384_BLOCK_SIZE,
2062
2063        .halg.digestsize = SHA384_DIGEST_SIZE,
2064},
2065{
2066        .halg.base.cra_name             = "hmac(sha512)",
2067        .halg.base.cra_driver_name      = "atmel-hmac-sha512",
2068        .halg.base.cra_blocksize        = SHA512_BLOCK_SIZE,
2069
2070        .halg.digestsize = SHA512_DIGEST_SIZE,
2071},
2072};
2073
2074#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
2075/* authenc functions */
2076
2077static int atmel_sha_authenc_init2(struct atmel_sha_dev *dd);
2078static int atmel_sha_authenc_init_done(struct atmel_sha_dev *dd);
2079static int atmel_sha_authenc_final_done(struct atmel_sha_dev *dd);
2080
2081
2082struct atmel_sha_authenc_ctx {
2083        struct crypto_ahash     *tfm;
2084};
2085
2086struct atmel_sha_authenc_reqctx {
2087        struct atmel_sha_reqctx base;
2088
2089        atmel_aes_authenc_fn_t  cb;
2090        struct atmel_aes_dev    *aes_dev;
2091
2092        /* _init() parameters. */
2093        struct scatterlist      *assoc;
2094        u32                     assoclen;
2095        u32                     textlen;
2096
2097        /* _final() parameters. */
2098        u32                     *digest;
2099        unsigned int            digestlen;
2100};
2101
2102static void atmel_sha_authenc_complete(struct crypto_async_request *areq,
2103                                       int err)
2104{
2105        struct ahash_request *req = areq->data;
2106        struct atmel_sha_authenc_reqctx *authctx  = ahash_request_ctx(req);
2107
2108        authctx->cb(authctx->aes_dev, err, authctx->base.dd->is_async);
2109}
2110
2111static int atmel_sha_authenc_start(struct atmel_sha_dev *dd)
2112{
2113        struct ahash_request *req = dd->req;
2114        struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
2115        int err;
2116
2117        /*
2118         * Force atmel_sha_complete() to call req->base.complete(), ie
2119         * atmel_sha_authenc_complete(), which in turn calls authctx->cb().
2120         */
2121        dd->force_complete = true;
2122
2123        err = atmel_sha_hw_init(dd);
2124        return authctx->cb(authctx->aes_dev, err, dd->is_async);
2125}
2126
2127bool atmel_sha_authenc_is_ready(void)
2128{
2129        struct atmel_sha_ctx dummy;
2130
2131        dummy.dd = NULL;
2132        return (atmel_sha_find_dev(&dummy) != NULL);
2133}
2134EXPORT_SYMBOL_GPL(atmel_sha_authenc_is_ready);
2135
2136unsigned int atmel_sha_authenc_get_reqsize(void)
2137{
2138        return sizeof(struct atmel_sha_authenc_reqctx);
2139}
2140EXPORT_SYMBOL_GPL(atmel_sha_authenc_get_reqsize);
2141
2142struct atmel_sha_authenc_ctx *atmel_sha_authenc_spawn(unsigned long mode)
2143{
2144        struct atmel_sha_authenc_ctx *auth;
2145        struct crypto_ahash *tfm;
2146        struct atmel_sha_ctx *tctx;
2147        const char *name;
2148        int err = -EINVAL;
2149
2150        switch (mode & SHA_FLAGS_MODE_MASK) {
2151        case SHA_FLAGS_HMAC_SHA1:
2152                name = "atmel-hmac-sha1";
2153                break;
2154
2155        case SHA_FLAGS_HMAC_SHA224:
2156                name = "atmel-hmac-sha224";
2157                break;
2158
2159        case SHA_FLAGS_HMAC_SHA256:
2160                name = "atmel-hmac-sha256";
2161                break;
2162
2163        case SHA_FLAGS_HMAC_SHA384:
2164                name = "atmel-hmac-sha384";
2165                break;
2166
2167        case SHA_FLAGS_HMAC_SHA512:
2168                name = "atmel-hmac-sha512";
2169                break;
2170
2171        default:
2172                goto error;
2173        }
2174
2175        tfm = crypto_alloc_ahash(name, 0, 0);
2176        if (IS_ERR(tfm)) {
2177                err = PTR_ERR(tfm);
2178                goto error;
2179        }
2180        tctx = crypto_ahash_ctx(tfm);
2181        tctx->start = atmel_sha_authenc_start;
2182        tctx->flags = mode;
2183
2184        auth = kzalloc(sizeof(*auth), GFP_KERNEL);
2185        if (!auth) {
2186                err = -ENOMEM;
2187                goto err_free_ahash;
2188        }
2189        auth->tfm = tfm;
2190
2191        return auth;
2192
2193err_free_ahash:
2194        crypto_free_ahash(tfm);
2195error:
2196        return ERR_PTR(err);
2197}
2198EXPORT_SYMBOL_GPL(atmel_sha_authenc_spawn);
2199
2200void atmel_sha_authenc_free(struct atmel_sha_authenc_ctx *auth)
2201{
2202        if (auth)
2203                crypto_free_ahash(auth->tfm);
2204        kfree(auth);
2205}
2206EXPORT_SYMBOL_GPL(atmel_sha_authenc_free);
2207
2208int atmel_sha_authenc_setkey(struct atmel_sha_authenc_ctx *auth,
2209                             const u8 *key, unsigned int keylen, u32 flags)
2210{
2211        struct crypto_ahash *tfm = auth->tfm;
2212
2213        crypto_ahash_clear_flags(tfm, CRYPTO_TFM_REQ_MASK);
2214        crypto_ahash_set_flags(tfm, flags & CRYPTO_TFM_REQ_MASK);
2215        return crypto_ahash_setkey(tfm, key, keylen);
2216}
2217EXPORT_SYMBOL_GPL(atmel_sha_authenc_setkey);
2218
2219int atmel_sha_authenc_schedule(struct ahash_request *req,
2220                               struct atmel_sha_authenc_ctx *auth,
2221                               atmel_aes_authenc_fn_t cb,
2222                               struct atmel_aes_dev *aes_dev)
2223{
2224        struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
2225        struct atmel_sha_reqctx *ctx = &authctx->base;
2226        struct crypto_ahash *tfm = auth->tfm;
2227        struct atmel_sha_ctx *tctx = crypto_ahash_ctx(tfm);
2228        struct atmel_sha_dev *dd;
2229
2230        /* Reset request context (MUST be done first). */
2231        memset(authctx, 0, sizeof(*authctx));
2232
2233        /* Get SHA device. */
2234        dd = atmel_sha_find_dev(tctx);
2235        if (!dd)
2236                return cb(aes_dev, -ENODEV, false);
2237
2238        /* Init request context. */
2239        ctx->dd = dd;
2240        ctx->buflen = SHA_BUFFER_LEN;
2241        authctx->cb = cb;
2242        authctx->aes_dev = aes_dev;
2243        ahash_request_set_tfm(req, tfm);
2244        ahash_request_set_callback(req, 0, atmel_sha_authenc_complete, req);
2245
2246        return atmel_sha_handle_queue(dd, req);
2247}
2248EXPORT_SYMBOL_GPL(atmel_sha_authenc_schedule);
2249
2250int atmel_sha_authenc_init(struct ahash_request *req,
2251                           struct scatterlist *assoc, unsigned int assoclen,
2252                           unsigned int textlen,
2253                           atmel_aes_authenc_fn_t cb,
2254                           struct atmel_aes_dev *aes_dev)
2255{
2256        struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
2257        struct atmel_sha_reqctx *ctx = &authctx->base;
2258        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2259        struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
2260        struct atmel_sha_dev *dd = ctx->dd;
2261
2262        if (unlikely(!IS_ALIGNED(assoclen, sizeof(u32))))
2263                return atmel_sha_complete(dd, -EINVAL);
2264
2265        authctx->cb = cb;
2266        authctx->aes_dev = aes_dev;
2267        authctx->assoc = assoc;
2268        authctx->assoclen = assoclen;
2269        authctx->textlen = textlen;
2270
2271        ctx->flags = hmac->base.flags;
2272        return atmel_sha_hmac_setup(dd, atmel_sha_authenc_init2);
2273}
2274EXPORT_SYMBOL_GPL(atmel_sha_authenc_init);
2275
2276static int atmel_sha_authenc_init2(struct atmel_sha_dev *dd)
2277{
2278        struct ahash_request *req = dd->req;
2279        struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
2280        struct atmel_sha_reqctx *ctx = &authctx->base;
2281        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2282        struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
2283        size_t hs = ctx->hash_size;
2284        size_t i, num_words = hs / sizeof(u32);
2285        u32 mr, msg_size;
2286
2287        atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV);
2288        for (i = 0; i < num_words; ++i)
2289                atmel_sha_write(dd, SHA_REG_DIN(i), hmac->ipad[i]);
2290
2291        atmel_sha_write(dd, SHA_CR, SHA_CR_WUIEHV);
2292        for (i = 0; i < num_words; ++i)
2293                atmel_sha_write(dd, SHA_REG_DIN(i), hmac->opad[i]);
2294
2295        mr = (SHA_MR_MODE_IDATAR0 |
2296              SHA_MR_HMAC |
2297              SHA_MR_DUALBUFF);
2298        mr |= ctx->flags & SHA_FLAGS_ALGO_MASK;
2299        atmel_sha_write(dd, SHA_MR, mr);
2300
2301        msg_size = authctx->assoclen + authctx->textlen;
2302        atmel_sha_write(dd, SHA_MSR, msg_size);
2303        atmel_sha_write(dd, SHA_BCR, msg_size);
2304
2305        atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
2306
2307        /* Process assoc data. */
2308        return atmel_sha_cpu_start(dd, authctx->assoc, authctx->assoclen,
2309                                   true, false,
2310                                   atmel_sha_authenc_init_done);
2311}
2312
2313static int atmel_sha_authenc_init_done(struct atmel_sha_dev *dd)
2314{
2315        struct ahash_request *req = dd->req;
2316        struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
2317
2318        return authctx->cb(authctx->aes_dev, 0, dd->is_async);
2319}
2320
2321int atmel_sha_authenc_final(struct ahash_request *req,
2322                            u32 *digest, unsigned int digestlen,
2323                            atmel_aes_authenc_fn_t cb,
2324                            struct atmel_aes_dev *aes_dev)
2325{
2326        struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
2327        struct atmel_sha_reqctx *ctx = &authctx->base;
2328        struct atmel_sha_dev *dd = ctx->dd;
2329
2330        switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
2331        case SHA_FLAGS_SHA1:
2332                authctx->digestlen = SHA1_DIGEST_SIZE;
2333                break;
2334
2335        case SHA_FLAGS_SHA224:
2336                authctx->digestlen = SHA224_DIGEST_SIZE;
2337                break;
2338
2339        case SHA_FLAGS_SHA256:
2340                authctx->digestlen = SHA256_DIGEST_SIZE;
2341                break;
2342
2343        case SHA_FLAGS_SHA384:
2344                authctx->digestlen = SHA384_DIGEST_SIZE;
2345                break;
2346
2347        case SHA_FLAGS_SHA512:
2348                authctx->digestlen = SHA512_DIGEST_SIZE;
2349                break;
2350
2351        default:
2352                return atmel_sha_complete(dd, -EINVAL);
2353        }
2354        if (authctx->digestlen > digestlen)
2355                authctx->digestlen = digestlen;
2356
2357        authctx->cb = cb;
2358        authctx->aes_dev = aes_dev;
2359        authctx->digest = digest;
2360        return atmel_sha_wait_for_data_ready(dd,
2361                                             atmel_sha_authenc_final_done);
2362}
2363EXPORT_SYMBOL_GPL(atmel_sha_authenc_final);
2364
2365static int atmel_sha_authenc_final_done(struct atmel_sha_dev *dd)
2366{
2367        struct ahash_request *req = dd->req;
2368        struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
2369        size_t i, num_words = authctx->digestlen / sizeof(u32);
2370
2371        for (i = 0; i < num_words; ++i)
2372                authctx->digest[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i));
2373
2374        return atmel_sha_complete(dd, 0);
2375}
2376
2377void atmel_sha_authenc_abort(struct ahash_request *req)
2378{
2379        struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
2380        struct atmel_sha_reqctx *ctx = &authctx->base;
2381        struct atmel_sha_dev *dd = ctx->dd;
2382
2383        /* Prevent atmel_sha_complete() from calling req->base.complete(). */
2384        dd->is_async = false;
2385        dd->force_complete = false;
2386        (void)atmel_sha_complete(dd, 0);
2387}
2388EXPORT_SYMBOL_GPL(atmel_sha_authenc_abort);
2389
2390#endif /* CONFIG_CRYPTO_DEV_ATMEL_AUTHENC */
2391
2392
2393static void atmel_sha_unregister_algs(struct atmel_sha_dev *dd)
2394{
2395        int i;
2396
2397        if (dd->caps.has_hmac)
2398                for (i = 0; i < ARRAY_SIZE(sha_hmac_algs); i++)
2399                        crypto_unregister_ahash(&sha_hmac_algs[i]);
2400
2401        for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++)
2402                crypto_unregister_ahash(&sha_1_256_algs[i]);
2403
2404        if (dd->caps.has_sha224)
2405                crypto_unregister_ahash(&sha_224_alg);
2406
2407        if (dd->caps.has_sha_384_512) {
2408                for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++)
2409                        crypto_unregister_ahash(&sha_384_512_algs[i]);
2410        }
2411}
2412
2413static int atmel_sha_register_algs(struct atmel_sha_dev *dd)
2414{
2415        int err, i, j;
2416
2417        for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++) {
2418                atmel_sha_alg_init(&sha_1_256_algs[i]);
2419
2420                err = crypto_register_ahash(&sha_1_256_algs[i]);
2421                if (err)
2422                        goto err_sha_1_256_algs;
2423        }
2424
2425        if (dd->caps.has_sha224) {
2426                atmel_sha_alg_init(&sha_224_alg);
2427
2428                err = crypto_register_ahash(&sha_224_alg);
2429                if (err)
2430                        goto err_sha_224_algs;
2431        }
2432
2433        if (dd->caps.has_sha_384_512) {
2434                for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++) {
2435                        atmel_sha_alg_init(&sha_384_512_algs[i]);
2436
2437                        err = crypto_register_ahash(&sha_384_512_algs[i]);
2438                        if (err)
2439                                goto err_sha_384_512_algs;
2440                }
2441        }
2442
2443        if (dd->caps.has_hmac) {
2444                for (i = 0; i < ARRAY_SIZE(sha_hmac_algs); i++) {
2445                        atmel_sha_hmac_alg_init(&sha_hmac_algs[i]);
2446
2447                        err = crypto_register_ahash(&sha_hmac_algs[i]);
2448                        if (err)
2449                                goto err_sha_hmac_algs;
2450                }
2451        }
2452
2453        return 0;
2454
2455        /*i = ARRAY_SIZE(sha_hmac_algs);*/
2456err_sha_hmac_algs:
2457        for (j = 0; j < i; j++)
2458                crypto_unregister_ahash(&sha_hmac_algs[j]);
2459        i = ARRAY_SIZE(sha_384_512_algs);
2460err_sha_384_512_algs:
2461        for (j = 0; j < i; j++)
2462                crypto_unregister_ahash(&sha_384_512_algs[j]);
2463        crypto_unregister_ahash(&sha_224_alg);
2464err_sha_224_algs:
2465        i = ARRAY_SIZE(sha_1_256_algs);
2466err_sha_1_256_algs:
2467        for (j = 0; j < i; j++)
2468                crypto_unregister_ahash(&sha_1_256_algs[j]);
2469
2470        return err;
2471}
2472
2473static int atmel_sha_dma_init(struct atmel_sha_dev *dd)
2474{
2475        dd->dma_lch_in.chan = dma_request_chan(dd->dev, "tx");
2476        if (IS_ERR(dd->dma_lch_in.chan)) {
2477                dev_err(dd->dev, "DMA channel is not available\n");
2478                return PTR_ERR(dd->dma_lch_in.chan);
2479        }
2480
2481        dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
2482                SHA_REG_DIN(0);
2483        dd->dma_lch_in.dma_conf.src_maxburst = 1;
2484        dd->dma_lch_in.dma_conf.src_addr_width =
2485                DMA_SLAVE_BUSWIDTH_4_BYTES;
2486        dd->dma_lch_in.dma_conf.dst_maxburst = 1;
2487        dd->dma_lch_in.dma_conf.dst_addr_width =
2488                DMA_SLAVE_BUSWIDTH_4_BYTES;
2489        dd->dma_lch_in.dma_conf.device_fc = false;
2490
2491        return 0;
2492}
2493
2494static void atmel_sha_dma_cleanup(struct atmel_sha_dev *dd)
2495{
2496        dma_release_channel(dd->dma_lch_in.chan);
2497}
2498
2499static void atmel_sha_get_cap(struct atmel_sha_dev *dd)
2500{
2501
2502        dd->caps.has_dma = 0;
2503        dd->caps.has_dualbuff = 0;
2504        dd->caps.has_sha224 = 0;
2505        dd->caps.has_sha_384_512 = 0;
2506        dd->caps.has_uihv = 0;
2507        dd->caps.has_hmac = 0;
2508
2509        /* keep only major version number */
2510        switch (dd->hw_version & 0xff0) {
2511        case 0x510:
2512                dd->caps.has_dma = 1;
2513                dd->caps.has_dualbuff = 1;
2514                dd->caps.has_sha224 = 1;
2515                dd->caps.has_sha_384_512 = 1;
2516                dd->caps.has_uihv = 1;
2517                dd->caps.has_hmac = 1;
2518                break;
2519        case 0x420:
2520                dd->caps.has_dma = 1;
2521                dd->caps.has_dualbuff = 1;
2522                dd->caps.has_sha224 = 1;
2523                dd->caps.has_sha_384_512 = 1;
2524                dd->caps.has_uihv = 1;
2525                break;
2526        case 0x410:
2527                dd->caps.has_dma = 1;
2528                dd->caps.has_dualbuff = 1;
2529                dd->caps.has_sha224 = 1;
2530                dd->caps.has_sha_384_512 = 1;
2531                break;
2532        case 0x400:
2533                dd->caps.has_dma = 1;
2534                dd->caps.has_dualbuff = 1;
2535                dd->caps.has_sha224 = 1;
2536                break;
2537        case 0x320:
2538                break;
2539        default:
2540                dev_warn(dd->dev,
2541                                "Unmanaged sha version, set minimum capabilities\n");
2542                break;
2543        }
2544}
2545
2546#if defined(CONFIG_OF)
2547static const struct of_device_id atmel_sha_dt_ids[] = {
2548        { .compatible = "atmel,at91sam9g46-sha" },
2549        { /* sentinel */ }
2550};
2551
2552MODULE_DEVICE_TABLE(of, atmel_sha_dt_ids);
2553#endif
2554
2555static int atmel_sha_probe(struct platform_device *pdev)
2556{
2557        struct atmel_sha_dev *sha_dd;
2558        struct device *dev = &pdev->dev;
2559        struct resource *sha_res;
2560        int err;
2561
2562        sha_dd = devm_kzalloc(&pdev->dev, sizeof(*sha_dd), GFP_KERNEL);
2563        if (!sha_dd)
2564                return -ENOMEM;
2565
2566        sha_dd->dev = dev;
2567
2568        platform_set_drvdata(pdev, sha_dd);
2569
2570        INIT_LIST_HEAD(&sha_dd->list);
2571        spin_lock_init(&sha_dd->lock);
2572
2573        tasklet_init(&sha_dd->done_task, atmel_sha_done_task,
2574                                        (unsigned long)sha_dd);
2575        tasklet_init(&sha_dd->queue_task, atmel_sha_queue_task,
2576                                        (unsigned long)sha_dd);
2577
2578        crypto_init_queue(&sha_dd->queue, ATMEL_SHA_QUEUE_LENGTH);
2579
2580        /* Get the base address */
2581        sha_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2582        if (!sha_res) {
2583                dev_err(dev, "no MEM resource info\n");
2584                err = -ENODEV;
2585                goto err_tasklet_kill;
2586        }
2587        sha_dd->phys_base = sha_res->start;
2588
2589        /* Get the IRQ */
2590        sha_dd->irq = platform_get_irq(pdev,  0);
2591        if (sha_dd->irq < 0) {
2592                err = sha_dd->irq;
2593                goto err_tasklet_kill;
2594        }
2595
2596        err = devm_request_irq(&pdev->dev, sha_dd->irq, atmel_sha_irq,
2597                               IRQF_SHARED, "atmel-sha", sha_dd);
2598        if (err) {
2599                dev_err(dev, "unable to request sha irq.\n");
2600                goto err_tasklet_kill;
2601        }
2602
2603        /* Initializing the clock */
2604        sha_dd->iclk = devm_clk_get(&pdev->dev, "sha_clk");
2605        if (IS_ERR(sha_dd->iclk)) {
2606                dev_err(dev, "clock initialization failed.\n");
2607                err = PTR_ERR(sha_dd->iclk);
2608                goto err_tasklet_kill;
2609        }
2610
2611        sha_dd->io_base = devm_ioremap_resource(&pdev->dev, sha_res);
2612        if (IS_ERR(sha_dd->io_base)) {
2613                dev_err(dev, "can't ioremap\n");
2614                err = PTR_ERR(sha_dd->io_base);
2615                goto err_tasklet_kill;
2616        }
2617
2618        err = clk_prepare(sha_dd->iclk);
2619        if (err)
2620                goto err_tasklet_kill;
2621
2622        err = atmel_sha_hw_version_init(sha_dd);
2623        if (err)
2624                goto err_iclk_unprepare;
2625
2626        atmel_sha_get_cap(sha_dd);
2627
2628        if (sha_dd->caps.has_dma) {
2629                err = atmel_sha_dma_init(sha_dd);
2630                if (err)
2631                        goto err_iclk_unprepare;
2632
2633                dev_info(dev, "using %s for DMA transfers\n",
2634                                dma_chan_name(sha_dd->dma_lch_in.chan));
2635        }
2636
2637        spin_lock(&atmel_sha.lock);
2638        list_add_tail(&sha_dd->list, &atmel_sha.dev_list);
2639        spin_unlock(&atmel_sha.lock);
2640
2641        err = atmel_sha_register_algs(sha_dd);
2642        if (err)
2643                goto err_algs;
2644
2645        dev_info(dev, "Atmel SHA1/SHA256%s%s\n",
2646                        sha_dd->caps.has_sha224 ? "/SHA224" : "",
2647                        sha_dd->caps.has_sha_384_512 ? "/SHA384/SHA512" : "");
2648
2649        return 0;
2650
2651err_algs:
2652        spin_lock(&atmel_sha.lock);
2653        list_del(&sha_dd->list);
2654        spin_unlock(&atmel_sha.lock);
2655        if (sha_dd->caps.has_dma)
2656                atmel_sha_dma_cleanup(sha_dd);
2657err_iclk_unprepare:
2658        clk_unprepare(sha_dd->iclk);
2659err_tasklet_kill:
2660        tasklet_kill(&sha_dd->queue_task);
2661        tasklet_kill(&sha_dd->done_task);
2662
2663        return err;
2664}
2665
2666static int atmel_sha_remove(struct platform_device *pdev)
2667{
2668        struct atmel_sha_dev *sha_dd;
2669
2670        sha_dd = platform_get_drvdata(pdev);
2671        if (!sha_dd)
2672                return -ENODEV;
2673        spin_lock(&atmel_sha.lock);
2674        list_del(&sha_dd->list);
2675        spin_unlock(&atmel_sha.lock);
2676
2677        atmel_sha_unregister_algs(sha_dd);
2678
2679        tasklet_kill(&sha_dd->queue_task);
2680        tasklet_kill(&sha_dd->done_task);
2681
2682        if (sha_dd->caps.has_dma)
2683                atmel_sha_dma_cleanup(sha_dd);
2684
2685        clk_unprepare(sha_dd->iclk);
2686
2687        return 0;
2688}
2689
2690static struct platform_driver atmel_sha_driver = {
2691        .probe          = atmel_sha_probe,
2692        .remove         = atmel_sha_remove,
2693        .driver         = {
2694                .name   = "atmel_sha",
2695                .of_match_table = of_match_ptr(atmel_sha_dt_ids),
2696        },
2697};
2698
2699module_platform_driver(atmel_sha_driver);
2700
2701MODULE_DESCRIPTION("Atmel SHA (1/256/224/384/512) hw acceleration support.");
2702MODULE_LICENSE("GPL v2");
2703MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");
2704