linux/drivers/crypto/atmel-sha.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Cryptographic API.
   4 *
   5 * Support for ATMEL SHA1/SHA256 HW acceleration.
   6 *
   7 * Copyright (c) 2012 Eukréa Electromatique - ATMEL
   8 * Author: Nicolas Royer <nicolas@eukrea.com>
   9 *
  10 * Some ideas are from omap-sham.c drivers.
  11 */
  12
  13
  14#include <linux/kernel.h>
  15#include <linux/module.h>
  16#include <linux/slab.h>
  17#include <linux/err.h>
  18#include <linux/clk.h>
  19#include <linux/io.h>
  20#include <linux/hw_random.h>
  21#include <linux/platform_device.h>
  22
  23#include <linux/device.h>
  24#include <linux/init.h>
  25#include <linux/errno.h>
  26#include <linux/interrupt.h>
  27#include <linux/irq.h>
  28#include <linux/scatterlist.h>
  29#include <linux/dma-mapping.h>
  30#include <linux/of_device.h>
  31#include <linux/delay.h>
  32#include <linux/crypto.h>
  33#include <linux/cryptohash.h>
  34#include <crypto/scatterwalk.h>
  35#include <crypto/algapi.h>
  36#include <crypto/sha.h>
  37#include <crypto/hash.h>
  38#include <crypto/internal/hash.h>
  39#include <linux/platform_data/crypto-atmel.h>
  40#include "atmel-sha-regs.h"
  41#include "atmel-authenc.h"
  42
  43/* SHA flags */
  44#define SHA_FLAGS_BUSY                  BIT(0)
  45#define SHA_FLAGS_FINAL                 BIT(1)
  46#define SHA_FLAGS_DMA_ACTIVE    BIT(2)
  47#define SHA_FLAGS_OUTPUT_READY  BIT(3)
  48#define SHA_FLAGS_INIT                  BIT(4)
  49#define SHA_FLAGS_CPU                   BIT(5)
  50#define SHA_FLAGS_DMA_READY             BIT(6)
  51#define SHA_FLAGS_DUMP_REG      BIT(7)
  52
  53/* bits[11:8] are reserved. */
  54
  55#define SHA_FLAGS_FINUP         BIT(16)
  56#define SHA_FLAGS_SG            BIT(17)
  57#define SHA_FLAGS_ERROR         BIT(23)
  58#define SHA_FLAGS_PAD           BIT(24)
  59#define SHA_FLAGS_RESTORE       BIT(25)
  60#define SHA_FLAGS_IDATAR0       BIT(26)
  61#define SHA_FLAGS_WAIT_DATARDY  BIT(27)
  62
  63#define SHA_OP_INIT     0
  64#define SHA_OP_UPDATE   1
  65#define SHA_OP_FINAL    2
  66#define SHA_OP_DIGEST   3
  67
  68#define SHA_BUFFER_LEN          (PAGE_SIZE / 16)
  69
  70#define ATMEL_SHA_DMA_THRESHOLD         56
  71
  72struct atmel_sha_caps {
  73        bool    has_dma;
  74        bool    has_dualbuff;
  75        bool    has_sha224;
  76        bool    has_sha_384_512;
  77        bool    has_uihv;
  78        bool    has_hmac;
  79};
  80
  81struct atmel_sha_dev;
  82
  83/*
  84 * .statesize = sizeof(struct atmel_sha_reqctx) must be <= PAGE_SIZE / 8 as
  85 * tested by the ahash_prepare_alg() function.
  86 */
  87struct atmel_sha_reqctx {
  88        struct atmel_sha_dev    *dd;
  89        unsigned long   flags;
  90        unsigned long   op;
  91
  92        u8      digest[SHA512_DIGEST_SIZE] __aligned(sizeof(u32));
  93        u64     digcnt[2];
  94        size_t  bufcnt;
  95        size_t  buflen;
  96        dma_addr_t      dma_addr;
  97
  98        /* walk state */
  99        struct scatterlist      *sg;
 100        unsigned int    offset; /* offset in current sg */
 101        unsigned int    total;  /* total request */
 102
 103        size_t block_size;
 104        size_t hash_size;
 105
 106        u8 buffer[SHA_BUFFER_LEN + SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
 107};
 108
 109typedef int (*atmel_sha_fn_t)(struct atmel_sha_dev *);
 110
 111struct atmel_sha_ctx {
 112        struct atmel_sha_dev    *dd;
 113        atmel_sha_fn_t          start;
 114
 115        unsigned long           flags;
 116};
 117
 118#define ATMEL_SHA_QUEUE_LENGTH  50
 119
 120struct atmel_sha_dma {
 121        struct dma_chan                 *chan;
 122        struct dma_slave_config dma_conf;
 123        struct scatterlist      *sg;
 124        int                     nents;
 125        unsigned int            last_sg_length;
 126};
 127
 128struct atmel_sha_dev {
 129        struct list_head        list;
 130        unsigned long           phys_base;
 131        struct device           *dev;
 132        struct clk                      *iclk;
 133        int                                     irq;
 134        void __iomem            *io_base;
 135
 136        spinlock_t              lock;
 137        int                     err;
 138        struct tasklet_struct   done_task;
 139        struct tasklet_struct   queue_task;
 140
 141        unsigned long           flags;
 142        struct crypto_queue     queue;
 143        struct ahash_request    *req;
 144        bool                    is_async;
 145        bool                    force_complete;
 146        atmel_sha_fn_t          resume;
 147        atmel_sha_fn_t          cpu_transfer_complete;
 148
 149        struct atmel_sha_dma    dma_lch_in;
 150
 151        struct atmel_sha_caps   caps;
 152
 153        struct scatterlist      tmp;
 154
 155        u32     hw_version;
 156};
 157
 158struct atmel_sha_drv {
 159        struct list_head        dev_list;
 160        spinlock_t              lock;
 161};
 162
 163static struct atmel_sha_drv atmel_sha = {
 164        .dev_list = LIST_HEAD_INIT(atmel_sha.dev_list),
 165        .lock = __SPIN_LOCK_UNLOCKED(atmel_sha.lock),
 166};
 167
 168#ifdef VERBOSE_DEBUG
 169static const char *atmel_sha_reg_name(u32 offset, char *tmp, size_t sz, bool wr)
 170{
 171        switch (offset) {
 172        case SHA_CR:
 173                return "CR";
 174
 175        case SHA_MR:
 176                return "MR";
 177
 178        case SHA_IER:
 179                return "IER";
 180
 181        case SHA_IDR:
 182                return "IDR";
 183
 184        case SHA_IMR:
 185                return "IMR";
 186
 187        case SHA_ISR:
 188                return "ISR";
 189
 190        case SHA_MSR:
 191                return "MSR";
 192
 193        case SHA_BCR:
 194                return "BCR";
 195
 196        case SHA_REG_DIN(0):
 197        case SHA_REG_DIN(1):
 198        case SHA_REG_DIN(2):
 199        case SHA_REG_DIN(3):
 200        case SHA_REG_DIN(4):
 201        case SHA_REG_DIN(5):
 202        case SHA_REG_DIN(6):
 203        case SHA_REG_DIN(7):
 204        case SHA_REG_DIN(8):
 205        case SHA_REG_DIN(9):
 206        case SHA_REG_DIN(10):
 207        case SHA_REG_DIN(11):
 208        case SHA_REG_DIN(12):
 209        case SHA_REG_DIN(13):
 210        case SHA_REG_DIN(14):
 211        case SHA_REG_DIN(15):
 212                snprintf(tmp, sz, "IDATAR[%u]", (offset - SHA_REG_DIN(0)) >> 2);
 213                break;
 214
 215        case SHA_REG_DIGEST(0):
 216        case SHA_REG_DIGEST(1):
 217        case SHA_REG_DIGEST(2):
 218        case SHA_REG_DIGEST(3):
 219        case SHA_REG_DIGEST(4):
 220        case SHA_REG_DIGEST(5):
 221        case SHA_REG_DIGEST(6):
 222        case SHA_REG_DIGEST(7):
 223        case SHA_REG_DIGEST(8):
 224        case SHA_REG_DIGEST(9):
 225        case SHA_REG_DIGEST(10):
 226        case SHA_REG_DIGEST(11):
 227        case SHA_REG_DIGEST(12):
 228        case SHA_REG_DIGEST(13):
 229        case SHA_REG_DIGEST(14):
 230        case SHA_REG_DIGEST(15):
 231                if (wr)
 232                        snprintf(tmp, sz, "IDATAR[%u]",
 233                                 16u + ((offset - SHA_REG_DIGEST(0)) >> 2));
 234                else
 235                        snprintf(tmp, sz, "ODATAR[%u]",
 236                                 (offset - SHA_REG_DIGEST(0)) >> 2);
 237                break;
 238
 239        case SHA_HW_VERSION:
 240                return "HWVER";
 241
 242        default:
 243                snprintf(tmp, sz, "0x%02x", offset);
 244                break;
 245        }
 246
 247        return tmp;
 248}
 249
 250#endif /* VERBOSE_DEBUG */
 251
 252static inline u32 atmel_sha_read(struct atmel_sha_dev *dd, u32 offset)
 253{
 254        u32 value = readl_relaxed(dd->io_base + offset);
 255
 256#ifdef VERBOSE_DEBUG
 257        if (dd->flags & SHA_FLAGS_DUMP_REG) {
 258                char tmp[16];
 259
 260                dev_vdbg(dd->dev, "read 0x%08x from %s\n", value,
 261                         atmel_sha_reg_name(offset, tmp, sizeof(tmp), false));
 262        }
 263#endif /* VERBOSE_DEBUG */
 264
 265        return value;
 266}
 267
 268static inline void atmel_sha_write(struct atmel_sha_dev *dd,
 269                                        u32 offset, u32 value)
 270{
 271#ifdef VERBOSE_DEBUG
 272        if (dd->flags & SHA_FLAGS_DUMP_REG) {
 273                char tmp[16];
 274
 275                dev_vdbg(dd->dev, "write 0x%08x into %s\n", value,
 276                         atmel_sha_reg_name(offset, tmp, sizeof(tmp), true));
 277        }
 278#endif /* VERBOSE_DEBUG */
 279
 280        writel_relaxed(value, dd->io_base + offset);
 281}
 282
 283static inline int atmel_sha_complete(struct atmel_sha_dev *dd, int err)
 284{
 285        struct ahash_request *req = dd->req;
 286
 287        dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL | SHA_FLAGS_CPU |
 288                       SHA_FLAGS_DMA_READY | SHA_FLAGS_OUTPUT_READY |
 289                       SHA_FLAGS_DUMP_REG);
 290
 291        clk_disable(dd->iclk);
 292
 293        if ((dd->is_async || dd->force_complete) && req->base.complete)
 294                req->base.complete(&req->base, err);
 295
 296        /* handle new request */
 297        tasklet_schedule(&dd->queue_task);
 298
 299        return err;
 300}
 301
 302static size_t atmel_sha_append_sg(struct atmel_sha_reqctx *ctx)
 303{
 304        size_t count;
 305
 306        while ((ctx->bufcnt < ctx->buflen) && ctx->total) {
 307                count = min(ctx->sg->length - ctx->offset, ctx->total);
 308                count = min(count, ctx->buflen - ctx->bufcnt);
 309
 310                if (count <= 0) {
 311                        /*
 312                        * Check if count <= 0 because the buffer is full or
 313                        * because the sg length is 0. In the latest case,
 314                        * check if there is another sg in the list, a 0 length
 315                        * sg doesn't necessarily mean the end of the sg list.
 316                        */
 317                        if ((ctx->sg->length == 0) && !sg_is_last(ctx->sg)) {
 318                                ctx->sg = sg_next(ctx->sg);
 319                                continue;
 320                        } else {
 321                                break;
 322                        }
 323                }
 324
 325                scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg,
 326                        ctx->offset, count, 0);
 327
 328                ctx->bufcnt += count;
 329                ctx->offset += count;
 330                ctx->total -= count;
 331
 332                if (ctx->offset == ctx->sg->length) {
 333                        ctx->sg = sg_next(ctx->sg);
 334                        if (ctx->sg)
 335                                ctx->offset = 0;
 336                        else
 337                                ctx->total = 0;
 338                }
 339        }
 340
 341        return 0;
 342}
 343
 344/*
 345 * The purpose of this padding is to ensure that the padded message is a
 346 * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512).
 347 * The bit "1" is appended at the end of the message followed by
 348 * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or
 349 * 128 bits block (SHA384/SHA512) equals to the message length in bits
 350 * is appended.
 351 *
 352 * For SHA1/SHA224/SHA256, padlen is calculated as followed:
 353 *  - if message length < 56 bytes then padlen = 56 - message length
 354 *  - else padlen = 64 + 56 - message length
 355 *
 356 * For SHA384/SHA512, padlen is calculated as followed:
 357 *  - if message length < 112 bytes then padlen = 112 - message length
 358 *  - else padlen = 128 + 112 - message length
 359 */
 360static void atmel_sha_fill_padding(struct atmel_sha_reqctx *ctx, int length)
 361{
 362        unsigned int index, padlen;
 363        u64 bits[2];
 364        u64 size[2];
 365
 366        size[0] = ctx->digcnt[0];
 367        size[1] = ctx->digcnt[1];
 368
 369        size[0] += ctx->bufcnt;
 370        if (size[0] < ctx->bufcnt)
 371                size[1]++;
 372
 373        size[0] += length;
 374        if (size[0]  < length)
 375                size[1]++;
 376
 377        bits[1] = cpu_to_be64(size[0] << 3);
 378        bits[0] = cpu_to_be64(size[1] << 3 | size[0] >> 61);
 379
 380        switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
 381        case SHA_FLAGS_SHA384:
 382        case SHA_FLAGS_SHA512:
 383                index = ctx->bufcnt & 0x7f;
 384                padlen = (index < 112) ? (112 - index) : ((128+112) - index);
 385                *(ctx->buffer + ctx->bufcnt) = 0x80;
 386                memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1);
 387                memcpy(ctx->buffer + ctx->bufcnt + padlen, bits, 16);
 388                ctx->bufcnt += padlen + 16;
 389                ctx->flags |= SHA_FLAGS_PAD;
 390                break;
 391
 392        default:
 393                index = ctx->bufcnt & 0x3f;
 394                padlen = (index < 56) ? (56 - index) : ((64+56) - index);
 395                *(ctx->buffer + ctx->bufcnt) = 0x80;
 396                memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1);
 397                memcpy(ctx->buffer + ctx->bufcnt + padlen, &bits[1], 8);
 398                ctx->bufcnt += padlen + 8;
 399                ctx->flags |= SHA_FLAGS_PAD;
 400                break;
 401        }
 402}
 403
 404static struct atmel_sha_dev *atmel_sha_find_dev(struct atmel_sha_ctx *tctx)
 405{
 406        struct atmel_sha_dev *dd = NULL;
 407        struct atmel_sha_dev *tmp;
 408
 409        spin_lock_bh(&atmel_sha.lock);
 410        if (!tctx->dd) {
 411                list_for_each_entry(tmp, &atmel_sha.dev_list, list) {
 412                        dd = tmp;
 413                        break;
 414                }
 415                tctx->dd = dd;
 416        } else {
 417                dd = tctx->dd;
 418        }
 419
 420        spin_unlock_bh(&atmel_sha.lock);
 421
 422        return dd;
 423}
 424
 425static int atmel_sha_init(struct ahash_request *req)
 426{
 427        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 428        struct atmel_sha_ctx *tctx = crypto_ahash_ctx(tfm);
 429        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
 430        struct atmel_sha_dev *dd = atmel_sha_find_dev(tctx);
 431
 432        ctx->dd = dd;
 433
 434        ctx->flags = 0;
 435
 436        dev_dbg(dd->dev, "init: digest size: %d\n",
 437                crypto_ahash_digestsize(tfm));
 438
 439        switch (crypto_ahash_digestsize(tfm)) {
 440        case SHA1_DIGEST_SIZE:
 441                ctx->flags |= SHA_FLAGS_SHA1;
 442                ctx->block_size = SHA1_BLOCK_SIZE;
 443                break;
 444        case SHA224_DIGEST_SIZE:
 445                ctx->flags |= SHA_FLAGS_SHA224;
 446                ctx->block_size = SHA224_BLOCK_SIZE;
 447                break;
 448        case SHA256_DIGEST_SIZE:
 449                ctx->flags |= SHA_FLAGS_SHA256;
 450                ctx->block_size = SHA256_BLOCK_SIZE;
 451                break;
 452        case SHA384_DIGEST_SIZE:
 453                ctx->flags |= SHA_FLAGS_SHA384;
 454                ctx->block_size = SHA384_BLOCK_SIZE;
 455                break;
 456        case SHA512_DIGEST_SIZE:
 457                ctx->flags |= SHA_FLAGS_SHA512;
 458                ctx->block_size = SHA512_BLOCK_SIZE;
 459                break;
 460        default:
 461                return -EINVAL;
 462                break;
 463        }
 464
 465        ctx->bufcnt = 0;
 466        ctx->digcnt[0] = 0;
 467        ctx->digcnt[1] = 0;
 468        ctx->buflen = SHA_BUFFER_LEN;
 469
 470        return 0;
 471}
 472
 473static void atmel_sha_write_ctrl(struct atmel_sha_dev *dd, int dma)
 474{
 475        struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
 476        u32 valmr = SHA_MR_MODE_AUTO;
 477        unsigned int i, hashsize = 0;
 478
 479        if (likely(dma)) {
 480                if (!dd->caps.has_dma)
 481                        atmel_sha_write(dd, SHA_IER, SHA_INT_TXBUFE);
 482                valmr = SHA_MR_MODE_PDC;
 483                if (dd->caps.has_dualbuff)
 484                        valmr |= SHA_MR_DUALBUFF;
 485        } else {
 486                atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
 487        }
 488
 489        switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
 490        case SHA_FLAGS_SHA1:
 491                valmr |= SHA_MR_ALGO_SHA1;
 492                hashsize = SHA1_DIGEST_SIZE;
 493                break;
 494
 495        case SHA_FLAGS_SHA224:
 496                valmr |= SHA_MR_ALGO_SHA224;
 497                hashsize = SHA256_DIGEST_SIZE;
 498                break;
 499
 500        case SHA_FLAGS_SHA256:
 501                valmr |= SHA_MR_ALGO_SHA256;
 502                hashsize = SHA256_DIGEST_SIZE;
 503                break;
 504
 505        case SHA_FLAGS_SHA384:
 506                valmr |= SHA_MR_ALGO_SHA384;
 507                hashsize = SHA512_DIGEST_SIZE;
 508                break;
 509
 510        case SHA_FLAGS_SHA512:
 511                valmr |= SHA_MR_ALGO_SHA512;
 512                hashsize = SHA512_DIGEST_SIZE;
 513                break;
 514
 515        default:
 516                break;
 517        }
 518
 519        /* Setting CR_FIRST only for the first iteration */
 520        if (!(ctx->digcnt[0] || ctx->digcnt[1])) {
 521                atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
 522        } else if (dd->caps.has_uihv && (ctx->flags & SHA_FLAGS_RESTORE)) {
 523                const u32 *hash = (const u32 *)ctx->digest;
 524
 525                /*
 526                 * Restore the hardware context: update the User Initialize
 527                 * Hash Value (UIHV) with the value saved when the latest
 528                 * 'update' operation completed on this very same crypto
 529                 * request.
 530                 */
 531                ctx->flags &= ~SHA_FLAGS_RESTORE;
 532                atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV);
 533                for (i = 0; i < hashsize / sizeof(u32); ++i)
 534                        atmel_sha_write(dd, SHA_REG_DIN(i), hash[i]);
 535                atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
 536                valmr |= SHA_MR_UIHV;
 537        }
 538        /*
 539         * WARNING: If the UIHV feature is not available, the hardware CANNOT
 540         * process concurrent requests: the internal registers used to store
 541         * the hash/digest are still set to the partial digest output values
 542         * computed during the latest round.
 543         */
 544
 545        atmel_sha_write(dd, SHA_MR, valmr);
 546}
 547
 548static inline int atmel_sha_wait_for_data_ready(struct atmel_sha_dev *dd,
 549                                                atmel_sha_fn_t resume)
 550{
 551        u32 isr = atmel_sha_read(dd, SHA_ISR);
 552
 553        if (unlikely(isr & SHA_INT_DATARDY))
 554                return resume(dd);
 555
 556        dd->resume = resume;
 557        atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
 558        return -EINPROGRESS;
 559}
 560
 561static int atmel_sha_xmit_cpu(struct atmel_sha_dev *dd, const u8 *buf,
 562                              size_t length, int final)
 563{
 564        struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
 565        int count, len32;
 566        const u32 *buffer = (const u32 *)buf;
 567
 568        dev_dbg(dd->dev, "xmit_cpu: digcnt: 0x%llx 0x%llx, length: %zd, final: %d\n",
 569                ctx->digcnt[1], ctx->digcnt[0], length, final);
 570
 571        atmel_sha_write_ctrl(dd, 0);
 572
 573        /* should be non-zero before next lines to disable clocks later */
 574        ctx->digcnt[0] += length;
 575        if (ctx->digcnt[0] < length)
 576                ctx->digcnt[1]++;
 577
 578        if (final)
 579                dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
 580
 581        len32 = DIV_ROUND_UP(length, sizeof(u32));
 582
 583        dd->flags |= SHA_FLAGS_CPU;
 584
 585        for (count = 0; count < len32; count++)
 586                atmel_sha_write(dd, SHA_REG_DIN(count), buffer[count]);
 587
 588        return -EINPROGRESS;
 589}
 590
 591static int atmel_sha_xmit_pdc(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
 592                size_t length1, dma_addr_t dma_addr2, size_t length2, int final)
 593{
 594        struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
 595        int len32;
 596
 597        dev_dbg(dd->dev, "xmit_pdc: digcnt: 0x%llx 0x%llx, length: %zd, final: %d\n",
 598                ctx->digcnt[1], ctx->digcnt[0], length1, final);
 599
 600        len32 = DIV_ROUND_UP(length1, sizeof(u32));
 601        atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTDIS);
 602        atmel_sha_write(dd, SHA_TPR, dma_addr1);
 603        atmel_sha_write(dd, SHA_TCR, len32);
 604
 605        len32 = DIV_ROUND_UP(length2, sizeof(u32));
 606        atmel_sha_write(dd, SHA_TNPR, dma_addr2);
 607        atmel_sha_write(dd, SHA_TNCR, len32);
 608
 609        atmel_sha_write_ctrl(dd, 1);
 610
 611        /* should be non-zero before next lines to disable clocks later */
 612        ctx->digcnt[0] += length1;
 613        if (ctx->digcnt[0] < length1)
 614                ctx->digcnt[1]++;
 615
 616        if (final)
 617                dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
 618
 619        dd->flags |=  SHA_FLAGS_DMA_ACTIVE;
 620
 621        /* Start DMA transfer */
 622        atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTEN);
 623
 624        return -EINPROGRESS;
 625}
 626
 627static void atmel_sha_dma_callback(void *data)
 628{
 629        struct atmel_sha_dev *dd = data;
 630
 631        dd->is_async = true;
 632
 633        /* dma_lch_in - completed - wait DATRDY */
 634        atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
 635}
 636
 637static int atmel_sha_xmit_dma(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
 638                size_t length1, dma_addr_t dma_addr2, size_t length2, int final)
 639{
 640        struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
 641        struct dma_async_tx_descriptor  *in_desc;
 642        struct scatterlist sg[2];
 643
 644        dev_dbg(dd->dev, "xmit_dma: digcnt: 0x%llx 0x%llx, length: %zd, final: %d\n",
 645                ctx->digcnt[1], ctx->digcnt[0], length1, final);
 646
 647        dd->dma_lch_in.dma_conf.src_maxburst = 16;
 648        dd->dma_lch_in.dma_conf.dst_maxburst = 16;
 649
 650        dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
 651
 652        if (length2) {
 653                sg_init_table(sg, 2);
 654                sg_dma_address(&sg[0]) = dma_addr1;
 655                sg_dma_len(&sg[0]) = length1;
 656                sg_dma_address(&sg[1]) = dma_addr2;
 657                sg_dma_len(&sg[1]) = length2;
 658                in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 2,
 659                        DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 660        } else {
 661                sg_init_table(sg, 1);
 662                sg_dma_address(&sg[0]) = dma_addr1;
 663                sg_dma_len(&sg[0]) = length1;
 664                in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 1,
 665                        DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 666        }
 667        if (!in_desc)
 668                return atmel_sha_complete(dd, -EINVAL);
 669
 670        in_desc->callback = atmel_sha_dma_callback;
 671        in_desc->callback_param = dd;
 672
 673        atmel_sha_write_ctrl(dd, 1);
 674
 675        /* should be non-zero before next lines to disable clocks later */
 676        ctx->digcnt[0] += length1;
 677        if (ctx->digcnt[0] < length1)
 678                ctx->digcnt[1]++;
 679
 680        if (final)
 681                dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
 682
 683        dd->flags |=  SHA_FLAGS_DMA_ACTIVE;
 684
 685        /* Start DMA transfer */
 686        dmaengine_submit(in_desc);
 687        dma_async_issue_pending(dd->dma_lch_in.chan);
 688
 689        return -EINPROGRESS;
 690}
 691
 692static int atmel_sha_xmit_start(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
 693                size_t length1, dma_addr_t dma_addr2, size_t length2, int final)
 694{
 695        if (dd->caps.has_dma)
 696                return atmel_sha_xmit_dma(dd, dma_addr1, length1,
 697                                dma_addr2, length2, final);
 698        else
 699                return atmel_sha_xmit_pdc(dd, dma_addr1, length1,
 700                                dma_addr2, length2, final);
 701}
 702
 703static int atmel_sha_update_cpu(struct atmel_sha_dev *dd)
 704{
 705        struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
 706        int bufcnt;
 707
 708        atmel_sha_append_sg(ctx);
 709        atmel_sha_fill_padding(ctx, 0);
 710        bufcnt = ctx->bufcnt;
 711        ctx->bufcnt = 0;
 712
 713        return atmel_sha_xmit_cpu(dd, ctx->buffer, bufcnt, 1);
 714}
 715
 716static int atmel_sha_xmit_dma_map(struct atmel_sha_dev *dd,
 717                                        struct atmel_sha_reqctx *ctx,
 718                                        size_t length, int final)
 719{
 720        ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer,
 721                                ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
 722        if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
 723                dev_err(dd->dev, "dma %zu bytes error\n", ctx->buflen +
 724                                ctx->block_size);
 725                return atmel_sha_complete(dd, -EINVAL);
 726        }
 727
 728        ctx->flags &= ~SHA_FLAGS_SG;
 729
 730        /* next call does not fail... so no unmap in the case of error */
 731        return atmel_sha_xmit_start(dd, ctx->dma_addr, length, 0, 0, final);
 732}
 733
 734static int atmel_sha_update_dma_slow(struct atmel_sha_dev *dd)
 735{
 736        struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
 737        unsigned int final;
 738        size_t count;
 739
 740        atmel_sha_append_sg(ctx);
 741
 742        final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
 743
 744        dev_dbg(dd->dev, "slow: bufcnt: %zu, digcnt: 0x%llx 0x%llx, final: %d\n",
 745                 ctx->bufcnt, ctx->digcnt[1], ctx->digcnt[0], final);
 746
 747        if (final)
 748                atmel_sha_fill_padding(ctx, 0);
 749
 750        if (final || (ctx->bufcnt == ctx->buflen)) {
 751                count = ctx->bufcnt;
 752                ctx->bufcnt = 0;
 753                return atmel_sha_xmit_dma_map(dd, ctx, count, final);
 754        }
 755
 756        return 0;
 757}
 758
 759static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd)
 760{
 761        struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
 762        unsigned int length, final, tail;
 763        struct scatterlist *sg;
 764        unsigned int count;
 765
 766        if (!ctx->total)
 767                return 0;
 768
 769        if (ctx->bufcnt || ctx->offset)
 770                return atmel_sha_update_dma_slow(dd);
 771
 772        dev_dbg(dd->dev, "fast: digcnt: 0x%llx 0x%llx, bufcnt: %zd, total: %u\n",
 773                ctx->digcnt[1], ctx->digcnt[0], ctx->bufcnt, ctx->total);
 774
 775        sg = ctx->sg;
 776
 777        if (!IS_ALIGNED(sg->offset, sizeof(u32)))
 778                return atmel_sha_update_dma_slow(dd);
 779
 780        if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, ctx->block_size))
 781                /* size is not ctx->block_size aligned */
 782                return atmel_sha_update_dma_slow(dd);
 783
 784        length = min(ctx->total, sg->length);
 785
 786        if (sg_is_last(sg)) {
 787                if (!(ctx->flags & SHA_FLAGS_FINUP)) {
 788                        /* not last sg must be ctx->block_size aligned */
 789                        tail = length & (ctx->block_size - 1);
 790                        length -= tail;
 791                }
 792        }
 793
 794        ctx->total -= length;
 795        ctx->offset = length; /* offset where to start slow */
 796
 797        final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
 798
 799        /* Add padding */
 800        if (final) {
 801                tail = length & (ctx->block_size - 1);
 802                length -= tail;
 803                ctx->total += tail;
 804                ctx->offset = length; /* offset where to start slow */
 805
 806                sg = ctx->sg;
 807                atmel_sha_append_sg(ctx);
 808
 809                atmel_sha_fill_padding(ctx, length);
 810
 811                ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer,
 812                        ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
 813                if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
 814                        dev_err(dd->dev, "dma %zu bytes error\n",
 815                                ctx->buflen + ctx->block_size);
 816                        return atmel_sha_complete(dd, -EINVAL);
 817                }
 818
 819                if (length == 0) {
 820                        ctx->flags &= ~SHA_FLAGS_SG;
 821                        count = ctx->bufcnt;
 822                        ctx->bufcnt = 0;
 823                        return atmel_sha_xmit_start(dd, ctx->dma_addr, count, 0,
 824                                        0, final);
 825                } else {
 826                        ctx->sg = sg;
 827                        if (!dma_map_sg(dd->dev, ctx->sg, 1,
 828                                DMA_TO_DEVICE)) {
 829                                        dev_err(dd->dev, "dma_map_sg  error\n");
 830                                        return atmel_sha_complete(dd, -EINVAL);
 831                        }
 832
 833                        ctx->flags |= SHA_FLAGS_SG;
 834
 835                        count = ctx->bufcnt;
 836                        ctx->bufcnt = 0;
 837                        return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg),
 838                                        length, ctx->dma_addr, count, final);
 839                }
 840        }
 841
 842        if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
 843                dev_err(dd->dev, "dma_map_sg  error\n");
 844                return atmel_sha_complete(dd, -EINVAL);
 845        }
 846
 847        ctx->flags |= SHA_FLAGS_SG;
 848
 849        /* next call does not fail... so no unmap in the case of error */
 850        return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg), length, 0,
 851                                                                0, final);
 852}
 853
 854static int atmel_sha_update_dma_stop(struct atmel_sha_dev *dd)
 855{
 856        struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
 857
 858        if (ctx->flags & SHA_FLAGS_SG) {
 859                dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
 860                if (ctx->sg->length == ctx->offset) {
 861                        ctx->sg = sg_next(ctx->sg);
 862                        if (ctx->sg)
 863                                ctx->offset = 0;
 864                }
 865                if (ctx->flags & SHA_FLAGS_PAD) {
 866                        dma_unmap_single(dd->dev, ctx->dma_addr,
 867                                ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
 868                }
 869        } else {
 870                dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen +
 871                                                ctx->block_size, DMA_TO_DEVICE);
 872        }
 873
 874        return 0;
 875}
 876
 877static int atmel_sha_update_req(struct atmel_sha_dev *dd)
 878{
 879        struct ahash_request *req = dd->req;
 880        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
 881        int err;
 882
 883        dev_dbg(dd->dev, "update_req: total: %u, digcnt: 0x%llx 0x%llx\n",
 884                ctx->total, ctx->digcnt[1], ctx->digcnt[0]);
 885
 886        if (ctx->flags & SHA_FLAGS_CPU)
 887                err = atmel_sha_update_cpu(dd);
 888        else
 889                err = atmel_sha_update_dma_start(dd);
 890
 891        /* wait for dma completion before can take more data */
 892        dev_dbg(dd->dev, "update: err: %d, digcnt: 0x%llx 0%llx\n",
 893                        err, ctx->digcnt[1], ctx->digcnt[0]);
 894
 895        return err;
 896}
 897
 898static int atmel_sha_final_req(struct atmel_sha_dev *dd)
 899{
 900        struct ahash_request *req = dd->req;
 901        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
 902        int err = 0;
 903        int count;
 904
 905        if (ctx->bufcnt >= ATMEL_SHA_DMA_THRESHOLD) {
 906                atmel_sha_fill_padding(ctx, 0);
 907                count = ctx->bufcnt;
 908                ctx->bufcnt = 0;
 909                err = atmel_sha_xmit_dma_map(dd, ctx, count, 1);
 910        }
 911        /* faster to handle last block with cpu */
 912        else {
 913                atmel_sha_fill_padding(ctx, 0);
 914                count = ctx->bufcnt;
 915                ctx->bufcnt = 0;
 916                err = atmel_sha_xmit_cpu(dd, ctx->buffer, count, 1);
 917        }
 918
 919        dev_dbg(dd->dev, "final_req: err: %d\n", err);
 920
 921        return err;
 922}
 923
 924static void atmel_sha_copy_hash(struct ahash_request *req)
 925{
 926        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
 927        u32 *hash = (u32 *)ctx->digest;
 928        unsigned int i, hashsize;
 929
 930        switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
 931        case SHA_FLAGS_SHA1:
 932                hashsize = SHA1_DIGEST_SIZE;
 933                break;
 934
 935        case SHA_FLAGS_SHA224:
 936        case SHA_FLAGS_SHA256:
 937                hashsize = SHA256_DIGEST_SIZE;
 938                break;
 939
 940        case SHA_FLAGS_SHA384:
 941        case SHA_FLAGS_SHA512:
 942                hashsize = SHA512_DIGEST_SIZE;
 943                break;
 944
 945        default:
 946                /* Should not happen... */
 947                return;
 948        }
 949
 950        for (i = 0; i < hashsize / sizeof(u32); ++i)
 951                hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
 952        ctx->flags |= SHA_FLAGS_RESTORE;
 953}
 954
 955static void atmel_sha_copy_ready_hash(struct ahash_request *req)
 956{
 957        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
 958
 959        if (!req->result)
 960                return;
 961
 962        switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
 963        default:
 964        case SHA_FLAGS_SHA1:
 965                memcpy(req->result, ctx->digest, SHA1_DIGEST_SIZE);
 966                break;
 967
 968        case SHA_FLAGS_SHA224:
 969                memcpy(req->result, ctx->digest, SHA224_DIGEST_SIZE);
 970                break;
 971
 972        case SHA_FLAGS_SHA256:
 973                memcpy(req->result, ctx->digest, SHA256_DIGEST_SIZE);
 974                break;
 975
 976        case SHA_FLAGS_SHA384:
 977                memcpy(req->result, ctx->digest, SHA384_DIGEST_SIZE);
 978                break;
 979
 980        case SHA_FLAGS_SHA512:
 981                memcpy(req->result, ctx->digest, SHA512_DIGEST_SIZE);
 982                break;
 983        }
 984}
 985
 986static int atmel_sha_finish(struct ahash_request *req)
 987{
 988        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
 989        struct atmel_sha_dev *dd = ctx->dd;
 990
 991        if (ctx->digcnt[0] || ctx->digcnt[1])
 992                atmel_sha_copy_ready_hash(req);
 993
 994        dev_dbg(dd->dev, "digcnt: 0x%llx 0x%llx, bufcnt: %zd\n", ctx->digcnt[1],
 995                ctx->digcnt[0], ctx->bufcnt);
 996
 997        return 0;
 998}
 999
1000static void atmel_sha_finish_req(struct ahash_request *req, int err)
1001{
1002        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1003        struct atmel_sha_dev *dd = ctx->dd;
1004
1005        if (!err) {
1006                atmel_sha_copy_hash(req);
1007                if (SHA_FLAGS_FINAL & dd->flags)
1008                        err = atmel_sha_finish(req);
1009        } else {
1010                ctx->flags |= SHA_FLAGS_ERROR;
1011        }
1012
1013        /* atomic operation is not needed here */
1014        (void)atmel_sha_complete(dd, err);
1015}
1016
1017static int atmel_sha_hw_init(struct atmel_sha_dev *dd)
1018{
1019        int err;
1020
1021        err = clk_enable(dd->iclk);
1022        if (err)
1023                return err;
1024
1025        if (!(SHA_FLAGS_INIT & dd->flags)) {
1026                atmel_sha_write(dd, SHA_CR, SHA_CR_SWRST);
1027                dd->flags |= SHA_FLAGS_INIT;
1028                dd->err = 0;
1029        }
1030
1031        return 0;
1032}
1033
1034static inline unsigned int atmel_sha_get_version(struct atmel_sha_dev *dd)
1035{
1036        return atmel_sha_read(dd, SHA_HW_VERSION) & 0x00000fff;
1037}
1038
1039static void atmel_sha_hw_version_init(struct atmel_sha_dev *dd)
1040{
1041        atmel_sha_hw_init(dd);
1042
1043        dd->hw_version = atmel_sha_get_version(dd);
1044
1045        dev_info(dd->dev,
1046                        "version: 0x%x\n", dd->hw_version);
1047
1048        clk_disable(dd->iclk);
1049}
1050
1051static int atmel_sha_handle_queue(struct atmel_sha_dev *dd,
1052                                  struct ahash_request *req)
1053{
1054        struct crypto_async_request *async_req, *backlog;
1055        struct atmel_sha_ctx *ctx;
1056        unsigned long flags;
1057        bool start_async;
1058        int err = 0, ret = 0;
1059
1060        spin_lock_irqsave(&dd->lock, flags);
1061        if (req)
1062                ret = ahash_enqueue_request(&dd->queue, req);
1063
1064        if (SHA_FLAGS_BUSY & dd->flags) {
1065                spin_unlock_irqrestore(&dd->lock, flags);
1066                return ret;
1067        }
1068
1069        backlog = crypto_get_backlog(&dd->queue);
1070        async_req = crypto_dequeue_request(&dd->queue);
1071        if (async_req)
1072                dd->flags |= SHA_FLAGS_BUSY;
1073
1074        spin_unlock_irqrestore(&dd->lock, flags);
1075
1076        if (!async_req)
1077                return ret;
1078
1079        if (backlog)
1080                backlog->complete(backlog, -EINPROGRESS);
1081
1082        ctx = crypto_tfm_ctx(async_req->tfm);
1083
1084        dd->req = ahash_request_cast(async_req);
1085        start_async = (dd->req != req);
1086        dd->is_async = start_async;
1087        dd->force_complete = false;
1088
1089        /* WARNING: ctx->start() MAY change dd->is_async. */
1090        err = ctx->start(dd);
1091        return (start_async) ? ret : err;
1092}
1093
1094static int atmel_sha_done(struct atmel_sha_dev *dd);
1095
1096static int atmel_sha_start(struct atmel_sha_dev *dd)
1097{
1098        struct ahash_request *req = dd->req;
1099        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1100        int err;
1101
1102        dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
1103                                                ctx->op, req->nbytes);
1104
1105        err = atmel_sha_hw_init(dd);
1106        if (err)
1107                return atmel_sha_complete(dd, err);
1108
1109        /*
1110         * atmel_sha_update_req() and atmel_sha_final_req() can return either:
1111         *  -EINPROGRESS: the hardware is busy and the SHA driver will resume
1112         *                its job later in the done_task.
1113         *                This is the main path.
1114         *
1115         * 0: the SHA driver can continue its job then release the hardware
1116         *    later, if needed, with atmel_sha_finish_req().
1117         *    This is the alternate path.
1118         *
1119         * < 0: an error has occurred so atmel_sha_complete(dd, err) has already
1120         *      been called, hence the hardware has been released.
1121         *      The SHA driver must stop its job without calling
1122         *      atmel_sha_finish_req(), otherwise atmel_sha_complete() would be
1123         *      called a second time.
1124         *
1125         * Please note that currently, atmel_sha_final_req() never returns 0.
1126         */
1127
1128        dd->resume = atmel_sha_done;
1129        if (ctx->op == SHA_OP_UPDATE) {
1130                err = atmel_sha_update_req(dd);
1131                if (!err && (ctx->flags & SHA_FLAGS_FINUP))
1132                        /* no final() after finup() */
1133                        err = atmel_sha_final_req(dd);
1134        } else if (ctx->op == SHA_OP_FINAL) {
1135                err = atmel_sha_final_req(dd);
1136        }
1137
1138        if (!err)
1139                /* done_task will not finish it, so do it here */
1140                atmel_sha_finish_req(req, err);
1141
1142        dev_dbg(dd->dev, "exit, err: %d\n", err);
1143
1144        return err;
1145}
1146
1147static int atmel_sha_enqueue(struct ahash_request *req, unsigned int op)
1148{
1149        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1150        struct atmel_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1151        struct atmel_sha_dev *dd = tctx->dd;
1152
1153        ctx->op = op;
1154
1155        return atmel_sha_handle_queue(dd, req);
1156}
1157
1158static int atmel_sha_update(struct ahash_request *req)
1159{
1160        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1161
1162        if (!req->nbytes)
1163                return 0;
1164
1165        ctx->total = req->nbytes;
1166        ctx->sg = req->src;
1167        ctx->offset = 0;
1168
1169        if (ctx->flags & SHA_FLAGS_FINUP) {
1170                if (ctx->bufcnt + ctx->total < ATMEL_SHA_DMA_THRESHOLD)
1171                        /* faster to use CPU for short transfers */
1172                        ctx->flags |= SHA_FLAGS_CPU;
1173        } else if (ctx->bufcnt + ctx->total < ctx->buflen) {
1174                atmel_sha_append_sg(ctx);
1175                return 0;
1176        }
1177        return atmel_sha_enqueue(req, SHA_OP_UPDATE);
1178}
1179
1180static int atmel_sha_final(struct ahash_request *req)
1181{
1182        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1183
1184        ctx->flags |= SHA_FLAGS_FINUP;
1185
1186        if (ctx->flags & SHA_FLAGS_ERROR)
1187                return 0; /* uncompleted hash is not needed */
1188
1189        if (ctx->flags & SHA_FLAGS_PAD)
1190                /* copy ready hash (+ finalize hmac) */
1191                return atmel_sha_finish(req);
1192
1193        return atmel_sha_enqueue(req, SHA_OP_FINAL);
1194}
1195
1196static int atmel_sha_finup(struct ahash_request *req)
1197{
1198        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1199        int err1, err2;
1200
1201        ctx->flags |= SHA_FLAGS_FINUP;
1202
1203        err1 = atmel_sha_update(req);
1204        if (err1 == -EINPROGRESS ||
1205            (err1 == -EBUSY && (ahash_request_flags(req) &
1206                                CRYPTO_TFM_REQ_MAY_BACKLOG)))
1207                return err1;
1208
1209        /*
1210         * final() has to be always called to cleanup resources
1211         * even if udpate() failed, except EINPROGRESS
1212         */
1213        err2 = atmel_sha_final(req);
1214
1215        return err1 ?: err2;
1216}
1217
1218static int atmel_sha_digest(struct ahash_request *req)
1219{
1220        return atmel_sha_init(req) ?: atmel_sha_finup(req);
1221}
1222
1223
1224static int atmel_sha_export(struct ahash_request *req, void *out)
1225{
1226        const struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1227
1228        memcpy(out, ctx, sizeof(*ctx));
1229        return 0;
1230}
1231
1232static int atmel_sha_import(struct ahash_request *req, const void *in)
1233{
1234        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1235
1236        memcpy(ctx, in, sizeof(*ctx));
1237        return 0;
1238}
1239
1240static int atmel_sha_cra_init(struct crypto_tfm *tfm)
1241{
1242        struct atmel_sha_ctx *ctx = crypto_tfm_ctx(tfm);
1243
1244        crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1245                                 sizeof(struct atmel_sha_reqctx));
1246        ctx->start = atmel_sha_start;
1247
1248        return 0;
1249}
1250
1251static struct ahash_alg sha_1_256_algs[] = {
1252{
1253        .init           = atmel_sha_init,
1254        .update         = atmel_sha_update,
1255        .final          = atmel_sha_final,
1256        .finup          = atmel_sha_finup,
1257        .digest         = atmel_sha_digest,
1258        .export         = atmel_sha_export,
1259        .import         = atmel_sha_import,
1260        .halg = {
1261                .digestsize     = SHA1_DIGEST_SIZE,
1262                .statesize      = sizeof(struct atmel_sha_reqctx),
1263                .base   = {
1264                        .cra_name               = "sha1",
1265                        .cra_driver_name        = "atmel-sha1",
1266                        .cra_priority           = 100,
1267                        .cra_flags              = CRYPTO_ALG_ASYNC,
1268                        .cra_blocksize          = SHA1_BLOCK_SIZE,
1269                        .cra_ctxsize            = sizeof(struct atmel_sha_ctx),
1270                        .cra_alignmask          = 0,
1271                        .cra_module             = THIS_MODULE,
1272                        .cra_init               = atmel_sha_cra_init,
1273                }
1274        }
1275},
1276{
1277        .init           = atmel_sha_init,
1278        .update         = atmel_sha_update,
1279        .final          = atmel_sha_final,
1280        .finup          = atmel_sha_finup,
1281        .digest         = atmel_sha_digest,
1282        .export         = atmel_sha_export,
1283        .import         = atmel_sha_import,
1284        .halg = {
1285                .digestsize     = SHA256_DIGEST_SIZE,
1286                .statesize      = sizeof(struct atmel_sha_reqctx),
1287                .base   = {
1288                        .cra_name               = "sha256",
1289                        .cra_driver_name        = "atmel-sha256",
1290                        .cra_priority           = 100,
1291                        .cra_flags              = CRYPTO_ALG_ASYNC,
1292                        .cra_blocksize          = SHA256_BLOCK_SIZE,
1293                        .cra_ctxsize            = sizeof(struct atmel_sha_ctx),
1294                        .cra_alignmask          = 0,
1295                        .cra_module             = THIS_MODULE,
1296                        .cra_init               = atmel_sha_cra_init,
1297                }
1298        }
1299},
1300};
1301
1302static struct ahash_alg sha_224_alg = {
1303        .init           = atmel_sha_init,
1304        .update         = atmel_sha_update,
1305        .final          = atmel_sha_final,
1306        .finup          = atmel_sha_finup,
1307        .digest         = atmel_sha_digest,
1308        .export         = atmel_sha_export,
1309        .import         = atmel_sha_import,
1310        .halg = {
1311                .digestsize     = SHA224_DIGEST_SIZE,
1312                .statesize      = sizeof(struct atmel_sha_reqctx),
1313                .base   = {
1314                        .cra_name               = "sha224",
1315                        .cra_driver_name        = "atmel-sha224",
1316                        .cra_priority           = 100,
1317                        .cra_flags              = CRYPTO_ALG_ASYNC,
1318                        .cra_blocksize          = SHA224_BLOCK_SIZE,
1319                        .cra_ctxsize            = sizeof(struct atmel_sha_ctx),
1320                        .cra_alignmask          = 0,
1321                        .cra_module             = THIS_MODULE,
1322                        .cra_init               = atmel_sha_cra_init,
1323                }
1324        }
1325};
1326
1327static struct ahash_alg sha_384_512_algs[] = {
1328{
1329        .init           = atmel_sha_init,
1330        .update         = atmel_sha_update,
1331        .final          = atmel_sha_final,
1332        .finup          = atmel_sha_finup,
1333        .digest         = atmel_sha_digest,
1334        .export         = atmel_sha_export,
1335        .import         = atmel_sha_import,
1336        .halg = {
1337                .digestsize     = SHA384_DIGEST_SIZE,
1338                .statesize      = sizeof(struct atmel_sha_reqctx),
1339                .base   = {
1340                        .cra_name               = "sha384",
1341                        .cra_driver_name        = "atmel-sha384",
1342                        .cra_priority           = 100,
1343                        .cra_flags              = CRYPTO_ALG_ASYNC,
1344                        .cra_blocksize          = SHA384_BLOCK_SIZE,
1345                        .cra_ctxsize            = sizeof(struct atmel_sha_ctx),
1346                        .cra_alignmask          = 0x3,
1347                        .cra_module             = THIS_MODULE,
1348                        .cra_init               = atmel_sha_cra_init,
1349                }
1350        }
1351},
1352{
1353        .init           = atmel_sha_init,
1354        .update         = atmel_sha_update,
1355        .final          = atmel_sha_final,
1356        .finup          = atmel_sha_finup,
1357        .digest         = atmel_sha_digest,
1358        .export         = atmel_sha_export,
1359        .import         = atmel_sha_import,
1360        .halg = {
1361                .digestsize     = SHA512_DIGEST_SIZE,
1362                .statesize      = sizeof(struct atmel_sha_reqctx),
1363                .base   = {
1364                        .cra_name               = "sha512",
1365                        .cra_driver_name        = "atmel-sha512",
1366                        .cra_priority           = 100,
1367                        .cra_flags              = CRYPTO_ALG_ASYNC,
1368                        .cra_blocksize          = SHA512_BLOCK_SIZE,
1369                        .cra_ctxsize            = sizeof(struct atmel_sha_ctx),
1370                        .cra_alignmask          = 0x3,
1371                        .cra_module             = THIS_MODULE,
1372                        .cra_init               = atmel_sha_cra_init,
1373                }
1374        }
1375},
1376};
1377
1378static void atmel_sha_queue_task(unsigned long data)
1379{
1380        struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data;
1381
1382        atmel_sha_handle_queue(dd, NULL);
1383}
1384
1385static int atmel_sha_done(struct atmel_sha_dev *dd)
1386{
1387        int err = 0;
1388
1389        if (SHA_FLAGS_CPU & dd->flags) {
1390                if (SHA_FLAGS_OUTPUT_READY & dd->flags) {
1391                        dd->flags &= ~SHA_FLAGS_OUTPUT_READY;
1392                        goto finish;
1393                }
1394        } else if (SHA_FLAGS_DMA_READY & dd->flags) {
1395                if (SHA_FLAGS_DMA_ACTIVE & dd->flags) {
1396                        dd->flags &= ~SHA_FLAGS_DMA_ACTIVE;
1397                        atmel_sha_update_dma_stop(dd);
1398                        if (dd->err) {
1399                                err = dd->err;
1400                                goto finish;
1401                        }
1402                }
1403                if (SHA_FLAGS_OUTPUT_READY & dd->flags) {
1404                        /* hash or semi-hash ready */
1405                        dd->flags &= ~(SHA_FLAGS_DMA_READY |
1406                                                SHA_FLAGS_OUTPUT_READY);
1407                        err = atmel_sha_update_dma_start(dd);
1408                        if (err != -EINPROGRESS)
1409                                goto finish;
1410                }
1411        }
1412        return err;
1413
1414finish:
1415        /* finish curent request */
1416        atmel_sha_finish_req(dd->req, err);
1417
1418        return err;
1419}
1420
1421static void atmel_sha_done_task(unsigned long data)
1422{
1423        struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data;
1424
1425        dd->is_async = true;
1426        (void)dd->resume(dd);
1427}
1428
1429static irqreturn_t atmel_sha_irq(int irq, void *dev_id)
1430{
1431        struct atmel_sha_dev *sha_dd = dev_id;
1432        u32 reg;
1433
1434        reg = atmel_sha_read(sha_dd, SHA_ISR);
1435        if (reg & atmel_sha_read(sha_dd, SHA_IMR)) {
1436                atmel_sha_write(sha_dd, SHA_IDR, reg);
1437                if (SHA_FLAGS_BUSY & sha_dd->flags) {
1438                        sha_dd->flags |= SHA_FLAGS_OUTPUT_READY;
1439                        if (!(SHA_FLAGS_CPU & sha_dd->flags))
1440                                sha_dd->flags |= SHA_FLAGS_DMA_READY;
1441                        tasklet_schedule(&sha_dd->done_task);
1442                } else {
1443                        dev_warn(sha_dd->dev, "SHA interrupt when no active requests.\n");
1444                }
1445                return IRQ_HANDLED;
1446        }
1447
1448        return IRQ_NONE;
1449}
1450
1451
1452/* DMA transfer functions */
1453
1454static bool atmel_sha_dma_check_aligned(struct atmel_sha_dev *dd,
1455                                        struct scatterlist *sg,
1456                                        size_t len)
1457{
1458        struct atmel_sha_dma *dma = &dd->dma_lch_in;
1459        struct ahash_request *req = dd->req;
1460        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1461        size_t bs = ctx->block_size;
1462        int nents;
1463
1464        for (nents = 0; sg; sg = sg_next(sg), ++nents) {
1465                if (!IS_ALIGNED(sg->offset, sizeof(u32)))
1466                        return false;
1467
1468                /*
1469                 * This is the last sg, the only one that is allowed to
1470                 * have an unaligned length.
1471                 */
1472                if (len <= sg->length) {
1473                        dma->nents = nents + 1;
1474                        dma->last_sg_length = sg->length;
1475                        sg->length = ALIGN(len, sizeof(u32));
1476                        return true;
1477                }
1478
1479                /* All other sg lengths MUST be aligned to the block size. */
1480                if (!IS_ALIGNED(sg->length, bs))
1481                        return false;
1482
1483                len -= sg->length;
1484        }
1485
1486        return false;
1487}
1488
1489static void atmel_sha_dma_callback2(void *data)
1490{
1491        struct atmel_sha_dev *dd = data;
1492        struct atmel_sha_dma *dma = &dd->dma_lch_in;
1493        struct scatterlist *sg;
1494        int nents;
1495
1496        dmaengine_terminate_all(dma->chan);
1497        dma_unmap_sg(dd->dev, dma->sg, dma->nents, DMA_TO_DEVICE);
1498
1499        sg = dma->sg;
1500        for (nents = 0; nents < dma->nents - 1; ++nents)
1501                sg = sg_next(sg);
1502        sg->length = dma->last_sg_length;
1503
1504        dd->is_async = true;
1505        (void)atmel_sha_wait_for_data_ready(dd, dd->resume);
1506}
1507
1508static int atmel_sha_dma_start(struct atmel_sha_dev *dd,
1509                               struct scatterlist *src,
1510                               size_t len,
1511                               atmel_sha_fn_t resume)
1512{
1513        struct atmel_sha_dma *dma = &dd->dma_lch_in;
1514        struct dma_slave_config *config = &dma->dma_conf;
1515        struct dma_chan *chan = dma->chan;
1516        struct dma_async_tx_descriptor *desc;
1517        dma_cookie_t cookie;
1518        unsigned int sg_len;
1519        int err;
1520
1521        dd->resume = resume;
1522
1523        /*
1524         * dma->nents has already been initialized by
1525         * atmel_sha_dma_check_aligned().
1526         */
1527        dma->sg = src;
1528        sg_len = dma_map_sg(dd->dev, dma->sg, dma->nents, DMA_TO_DEVICE);
1529        if (!sg_len) {
1530                err = -ENOMEM;
1531                goto exit;
1532        }
1533
1534        config->src_maxburst = 16;
1535        config->dst_maxburst = 16;
1536        err = dmaengine_slave_config(chan, config);
1537        if (err)
1538                goto unmap_sg;
1539
1540        desc = dmaengine_prep_slave_sg(chan, dma->sg, sg_len, DMA_MEM_TO_DEV,
1541                                       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1542        if (!desc) {
1543                err = -ENOMEM;
1544                goto unmap_sg;
1545        }
1546
1547        desc->callback = atmel_sha_dma_callback2;
1548        desc->callback_param = dd;
1549        cookie = dmaengine_submit(desc);
1550        err = dma_submit_error(cookie);
1551        if (err)
1552                goto unmap_sg;
1553
1554        dma_async_issue_pending(chan);
1555
1556        return -EINPROGRESS;
1557
1558unmap_sg:
1559        dma_unmap_sg(dd->dev, dma->sg, dma->nents, DMA_TO_DEVICE);
1560exit:
1561        return atmel_sha_complete(dd, err);
1562}
1563
1564
1565/* CPU transfer functions */
1566
1567static int atmel_sha_cpu_transfer(struct atmel_sha_dev *dd)
1568{
1569        struct ahash_request *req = dd->req;
1570        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1571        const u32 *words = (const u32 *)ctx->buffer;
1572        size_t i, num_words;
1573        u32 isr, din, din_inc;
1574
1575        din_inc = (ctx->flags & SHA_FLAGS_IDATAR0) ? 0 : 1;
1576        for (;;) {
1577                /* Write data into the Input Data Registers. */
1578                num_words = DIV_ROUND_UP(ctx->bufcnt, sizeof(u32));
1579                for (i = 0, din = 0; i < num_words; ++i, din += din_inc)
1580                        atmel_sha_write(dd, SHA_REG_DIN(din), words[i]);
1581
1582                ctx->offset += ctx->bufcnt;
1583                ctx->total -= ctx->bufcnt;
1584
1585                if (!ctx->total)
1586                        break;
1587
1588                /*
1589                 * Prepare next block:
1590                 * Fill ctx->buffer now with the next data to be written into
1591                 * IDATARx: it gives time for the SHA hardware to process
1592                 * the current data so the SHA_INT_DATARDY flag might be set
1593                 * in SHA_ISR when polling this register at the beginning of
1594                 * the next loop.
1595                 */
1596                ctx->bufcnt = min_t(size_t, ctx->block_size, ctx->total);
1597                scatterwalk_map_and_copy(ctx->buffer, ctx->sg,
1598                                         ctx->offset, ctx->bufcnt, 0);
1599
1600                /* Wait for hardware to be ready again. */
1601                isr = atmel_sha_read(dd, SHA_ISR);
1602                if (!(isr & SHA_INT_DATARDY)) {
1603                        /* Not ready yet. */
1604                        dd->resume = atmel_sha_cpu_transfer;
1605                        atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
1606                        return -EINPROGRESS;
1607                }
1608        }
1609
1610        if (unlikely(!(ctx->flags & SHA_FLAGS_WAIT_DATARDY)))
1611                return dd->cpu_transfer_complete(dd);
1612
1613        return atmel_sha_wait_for_data_ready(dd, dd->cpu_transfer_complete);
1614}
1615
1616static int atmel_sha_cpu_start(struct atmel_sha_dev *dd,
1617                               struct scatterlist *sg,
1618                               unsigned int len,
1619                               bool idatar0_only,
1620                               bool wait_data_ready,
1621                               atmel_sha_fn_t resume)
1622{
1623        struct ahash_request *req = dd->req;
1624        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1625
1626        if (!len)
1627                return resume(dd);
1628
1629        ctx->flags &= ~(SHA_FLAGS_IDATAR0 | SHA_FLAGS_WAIT_DATARDY);
1630
1631        if (idatar0_only)
1632                ctx->flags |= SHA_FLAGS_IDATAR0;
1633
1634        if (wait_data_ready)
1635                ctx->flags |= SHA_FLAGS_WAIT_DATARDY;
1636
1637        ctx->sg = sg;
1638        ctx->total = len;
1639        ctx->offset = 0;
1640
1641        /* Prepare the first block to be written. */
1642        ctx->bufcnt = min_t(size_t, ctx->block_size, ctx->total);
1643        scatterwalk_map_and_copy(ctx->buffer, ctx->sg,
1644                                 ctx->offset, ctx->bufcnt, 0);
1645
1646        dd->cpu_transfer_complete = resume;
1647        return atmel_sha_cpu_transfer(dd);
1648}
1649
1650static int atmel_sha_cpu_hash(struct atmel_sha_dev *dd,
1651                              const void *data, unsigned int datalen,
1652                              bool auto_padding,
1653                              atmel_sha_fn_t resume)
1654{
1655        struct ahash_request *req = dd->req;
1656        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1657        u32 msglen = (auto_padding) ? datalen : 0;
1658        u32 mr = SHA_MR_MODE_AUTO;
1659
1660        if (!(IS_ALIGNED(datalen, ctx->block_size) || auto_padding))
1661                return atmel_sha_complete(dd, -EINVAL);
1662
1663        mr |= (ctx->flags & SHA_FLAGS_ALGO_MASK);
1664        atmel_sha_write(dd, SHA_MR, mr);
1665        atmel_sha_write(dd, SHA_MSR, msglen);
1666        atmel_sha_write(dd, SHA_BCR, msglen);
1667        atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
1668
1669        sg_init_one(&dd->tmp, data, datalen);
1670        return atmel_sha_cpu_start(dd, &dd->tmp, datalen, false, true, resume);
1671}
1672
1673
1674/* hmac functions */
1675
1676struct atmel_sha_hmac_key {
1677        bool                    valid;
1678        unsigned int            keylen;
1679        u8                      buffer[SHA512_BLOCK_SIZE];
1680        u8                      *keydup;
1681};
1682
1683static inline void atmel_sha_hmac_key_init(struct atmel_sha_hmac_key *hkey)
1684{
1685        memset(hkey, 0, sizeof(*hkey));
1686}
1687
1688static inline void atmel_sha_hmac_key_release(struct atmel_sha_hmac_key *hkey)
1689{
1690        kfree(hkey->keydup);
1691        memset(hkey, 0, sizeof(*hkey));
1692}
1693
1694static inline int atmel_sha_hmac_key_set(struct atmel_sha_hmac_key *hkey,
1695                                         const u8 *key,
1696                                         unsigned int keylen)
1697{
1698        atmel_sha_hmac_key_release(hkey);
1699
1700        if (keylen > sizeof(hkey->buffer)) {
1701                hkey->keydup = kmemdup(key, keylen, GFP_KERNEL);
1702                if (!hkey->keydup)
1703                        return -ENOMEM;
1704
1705        } else {
1706                memcpy(hkey->buffer, key, keylen);
1707        }
1708
1709        hkey->valid = true;
1710        hkey->keylen = keylen;
1711        return 0;
1712}
1713
1714static inline bool atmel_sha_hmac_key_get(const struct atmel_sha_hmac_key *hkey,
1715                                          const u8 **key,
1716                                          unsigned int *keylen)
1717{
1718        if (!hkey->valid)
1719                return false;
1720
1721        *keylen = hkey->keylen;
1722        *key = (hkey->keydup) ? hkey->keydup : hkey->buffer;
1723        return true;
1724}
1725
1726
1727struct atmel_sha_hmac_ctx {
1728        struct atmel_sha_ctx    base;
1729
1730        struct atmel_sha_hmac_key       hkey;
1731        u32                     ipad[SHA512_BLOCK_SIZE / sizeof(u32)];
1732        u32                     opad[SHA512_BLOCK_SIZE / sizeof(u32)];
1733        atmel_sha_fn_t          resume;
1734};
1735
1736static int atmel_sha_hmac_setup(struct atmel_sha_dev *dd,
1737                                atmel_sha_fn_t resume);
1738static int atmel_sha_hmac_prehash_key(struct atmel_sha_dev *dd,
1739                                      const u8 *key, unsigned int keylen);
1740static int atmel_sha_hmac_prehash_key_done(struct atmel_sha_dev *dd);
1741static int atmel_sha_hmac_compute_ipad_hash(struct atmel_sha_dev *dd);
1742static int atmel_sha_hmac_compute_opad_hash(struct atmel_sha_dev *dd);
1743static int atmel_sha_hmac_setup_done(struct atmel_sha_dev *dd);
1744
1745static int atmel_sha_hmac_init_done(struct atmel_sha_dev *dd);
1746static int atmel_sha_hmac_final(struct atmel_sha_dev *dd);
1747static int atmel_sha_hmac_final_done(struct atmel_sha_dev *dd);
1748static int atmel_sha_hmac_digest2(struct atmel_sha_dev *dd);
1749
1750static int atmel_sha_hmac_setup(struct atmel_sha_dev *dd,
1751                                atmel_sha_fn_t resume)
1752{
1753        struct ahash_request *req = dd->req;
1754        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1755        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1756        struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
1757        unsigned int keylen;
1758        const u8 *key;
1759        size_t bs;
1760
1761        hmac->resume = resume;
1762        switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
1763        case SHA_FLAGS_SHA1:
1764                ctx->block_size = SHA1_BLOCK_SIZE;
1765                ctx->hash_size = SHA1_DIGEST_SIZE;
1766                break;
1767
1768        case SHA_FLAGS_SHA224:
1769                ctx->block_size = SHA224_BLOCK_SIZE;
1770                ctx->hash_size = SHA256_DIGEST_SIZE;
1771                break;
1772
1773        case SHA_FLAGS_SHA256:
1774                ctx->block_size = SHA256_BLOCK_SIZE;
1775                ctx->hash_size = SHA256_DIGEST_SIZE;
1776                break;
1777
1778        case SHA_FLAGS_SHA384:
1779                ctx->block_size = SHA384_BLOCK_SIZE;
1780                ctx->hash_size = SHA512_DIGEST_SIZE;
1781                break;
1782
1783        case SHA_FLAGS_SHA512:
1784                ctx->block_size = SHA512_BLOCK_SIZE;
1785                ctx->hash_size = SHA512_DIGEST_SIZE;
1786                break;
1787
1788        default:
1789                return atmel_sha_complete(dd, -EINVAL);
1790        }
1791        bs = ctx->block_size;
1792
1793        if (likely(!atmel_sha_hmac_key_get(&hmac->hkey, &key, &keylen)))
1794                return resume(dd);
1795
1796        /* Compute K' from K. */
1797        if (unlikely(keylen > bs))
1798                return atmel_sha_hmac_prehash_key(dd, key, keylen);
1799
1800        /* Prepare ipad. */
1801        memcpy((u8 *)hmac->ipad, key, keylen);
1802        memset((u8 *)hmac->ipad + keylen, 0, bs - keylen);
1803        return atmel_sha_hmac_compute_ipad_hash(dd);
1804}
1805
1806static int atmel_sha_hmac_prehash_key(struct atmel_sha_dev *dd,
1807                                      const u8 *key, unsigned int keylen)
1808{
1809        return atmel_sha_cpu_hash(dd, key, keylen, true,
1810                                  atmel_sha_hmac_prehash_key_done);
1811}
1812
1813static int atmel_sha_hmac_prehash_key_done(struct atmel_sha_dev *dd)
1814{
1815        struct ahash_request *req = dd->req;
1816        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1817        struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
1818        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1819        size_t ds = crypto_ahash_digestsize(tfm);
1820        size_t bs = ctx->block_size;
1821        size_t i, num_words = ds / sizeof(u32);
1822
1823        /* Prepare ipad. */
1824        for (i = 0; i < num_words; ++i)
1825                hmac->ipad[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i));
1826        memset((u8 *)hmac->ipad + ds, 0, bs - ds);
1827        return atmel_sha_hmac_compute_ipad_hash(dd);
1828}
1829
1830static int atmel_sha_hmac_compute_ipad_hash(struct atmel_sha_dev *dd)
1831{
1832        struct ahash_request *req = dd->req;
1833        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1834        struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
1835        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1836        size_t bs = ctx->block_size;
1837        size_t i, num_words = bs / sizeof(u32);
1838
1839        memcpy(hmac->opad, hmac->ipad, bs);
1840        for (i = 0; i < num_words; ++i) {
1841                hmac->ipad[i] ^= 0x36363636;
1842                hmac->opad[i] ^= 0x5c5c5c5c;
1843        }
1844
1845        return atmel_sha_cpu_hash(dd, hmac->ipad, bs, false,
1846                                  atmel_sha_hmac_compute_opad_hash);
1847}
1848
1849static int atmel_sha_hmac_compute_opad_hash(struct atmel_sha_dev *dd)
1850{
1851        struct ahash_request *req = dd->req;
1852        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1853        struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
1854        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1855        size_t bs = ctx->block_size;
1856        size_t hs = ctx->hash_size;
1857        size_t i, num_words = hs / sizeof(u32);
1858
1859        for (i = 0; i < num_words; ++i)
1860                hmac->ipad[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i));
1861        return atmel_sha_cpu_hash(dd, hmac->opad, bs, false,
1862                                  atmel_sha_hmac_setup_done);
1863}
1864
1865static int atmel_sha_hmac_setup_done(struct atmel_sha_dev *dd)
1866{
1867        struct ahash_request *req = dd->req;
1868        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1869        struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
1870        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1871        size_t hs = ctx->hash_size;
1872        size_t i, num_words = hs / sizeof(u32);
1873
1874        for (i = 0; i < num_words; ++i)
1875                hmac->opad[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i));
1876        atmel_sha_hmac_key_release(&hmac->hkey);
1877        return hmac->resume(dd);
1878}
1879
1880static int atmel_sha_hmac_start(struct atmel_sha_dev *dd)
1881{
1882        struct ahash_request *req = dd->req;
1883        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1884        int err;
1885
1886        err = atmel_sha_hw_init(dd);
1887        if (err)
1888                return atmel_sha_complete(dd, err);
1889
1890        switch (ctx->op) {
1891        case SHA_OP_INIT:
1892                err = atmel_sha_hmac_setup(dd, atmel_sha_hmac_init_done);
1893                break;
1894
1895        case SHA_OP_UPDATE:
1896                dd->resume = atmel_sha_done;
1897                err = atmel_sha_update_req(dd);
1898                break;
1899
1900        case SHA_OP_FINAL:
1901                dd->resume = atmel_sha_hmac_final;
1902                err = atmel_sha_final_req(dd);
1903                break;
1904
1905        case SHA_OP_DIGEST:
1906                err = atmel_sha_hmac_setup(dd, atmel_sha_hmac_digest2);
1907                break;
1908
1909        default:
1910                return atmel_sha_complete(dd, -EINVAL);
1911        }
1912
1913        return err;
1914}
1915
1916static int atmel_sha_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
1917                                 unsigned int keylen)
1918{
1919        struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
1920
1921        if (atmel_sha_hmac_key_set(&hmac->hkey, key, keylen)) {
1922                crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1923                return -EINVAL;
1924        }
1925
1926        return 0;
1927}
1928
1929static int atmel_sha_hmac_init(struct ahash_request *req)
1930{
1931        int err;
1932
1933        err = atmel_sha_init(req);
1934        if (err)
1935                return err;
1936
1937        return atmel_sha_enqueue(req, SHA_OP_INIT);
1938}
1939
1940static int atmel_sha_hmac_init_done(struct atmel_sha_dev *dd)
1941{
1942        struct ahash_request *req = dd->req;
1943        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1944        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1945        struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
1946        size_t bs = ctx->block_size;
1947        size_t hs = ctx->hash_size;
1948
1949        ctx->bufcnt = 0;
1950        ctx->digcnt[0] = bs;
1951        ctx->digcnt[1] = 0;
1952        ctx->flags |= SHA_FLAGS_RESTORE;
1953        memcpy(ctx->digest, hmac->ipad, hs);
1954        return atmel_sha_complete(dd, 0);
1955}
1956
1957static int atmel_sha_hmac_final(struct atmel_sha_dev *dd)
1958{
1959        struct ahash_request *req = dd->req;
1960        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1961        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1962        struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
1963        u32 *digest = (u32 *)ctx->digest;
1964        size_t ds = crypto_ahash_digestsize(tfm);
1965        size_t bs = ctx->block_size;
1966        size_t hs = ctx->hash_size;
1967        size_t i, num_words;
1968        u32 mr;
1969
1970        /* Save d = SHA((K' + ipad) | msg). */
1971        num_words = ds / sizeof(u32);
1972        for (i = 0; i < num_words; ++i)
1973                digest[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i));
1974
1975        /* Restore context to finish computing SHA((K' + opad) | d). */
1976        atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV);
1977        num_words = hs / sizeof(u32);
1978        for (i = 0; i < num_words; ++i)
1979                atmel_sha_write(dd, SHA_REG_DIN(i), hmac->opad[i]);
1980
1981        mr = SHA_MR_MODE_AUTO | SHA_MR_UIHV;
1982        mr |= (ctx->flags & SHA_FLAGS_ALGO_MASK);
1983        atmel_sha_write(dd, SHA_MR, mr);
1984        atmel_sha_write(dd, SHA_MSR, bs + ds);
1985        atmel_sha_write(dd, SHA_BCR, ds);
1986        atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
1987
1988        sg_init_one(&dd->tmp, digest, ds);
1989        return atmel_sha_cpu_start(dd, &dd->tmp, ds, false, true,
1990                                   atmel_sha_hmac_final_done);
1991}
1992
1993static int atmel_sha_hmac_final_done(struct atmel_sha_dev *dd)
1994{
1995        /*
1996         * req->result might not be sizeof(u32) aligned, so copy the
1997         * digest into ctx->digest[] before memcpy() the data into
1998         * req->result.
1999         */
2000        atmel_sha_copy_hash(dd->req);
2001        atmel_sha_copy_ready_hash(dd->req);
2002        return atmel_sha_complete(dd, 0);
2003}
2004
2005static int atmel_sha_hmac_digest(struct ahash_request *req)
2006{
2007        int err;
2008
2009        err = atmel_sha_init(req);
2010        if (err)
2011                return err;
2012
2013        return atmel_sha_enqueue(req, SHA_OP_DIGEST);
2014}
2015
2016static int atmel_sha_hmac_digest2(struct atmel_sha_dev *dd)
2017{
2018        struct ahash_request *req = dd->req;
2019        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
2020        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2021        struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
2022        size_t hs = ctx->hash_size;
2023        size_t i, num_words = hs / sizeof(u32);
2024        bool use_dma = false;
2025        u32 mr;
2026
2027        /* Special case for empty message. */
2028        if (!req->nbytes)
2029                return atmel_sha_complete(dd, -EINVAL); // TODO:
2030
2031        /* Check DMA threshold and alignment. */
2032        if (req->nbytes > ATMEL_SHA_DMA_THRESHOLD &&
2033            atmel_sha_dma_check_aligned(dd, req->src, req->nbytes))
2034                use_dma = true;
2035
2036        /* Write both initial hash values to compute a HMAC. */
2037        atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV);
2038        for (i = 0; i < num_words; ++i)
2039                atmel_sha_write(dd, SHA_REG_DIN(i), hmac->ipad[i]);
2040
2041        atmel_sha_write(dd, SHA_CR, SHA_CR_WUIEHV);
2042        for (i = 0; i < num_words; ++i)
2043                atmel_sha_write(dd, SHA_REG_DIN(i), hmac->opad[i]);
2044
2045        /* Write the Mode, Message Size, Bytes Count then Control Registers. */
2046        mr = (SHA_MR_HMAC | SHA_MR_DUALBUFF);
2047        mr |= ctx->flags & SHA_FLAGS_ALGO_MASK;
2048        if (use_dma)
2049                mr |= SHA_MR_MODE_IDATAR0;
2050        else
2051                mr |= SHA_MR_MODE_AUTO;
2052        atmel_sha_write(dd, SHA_MR, mr);
2053
2054        atmel_sha_write(dd, SHA_MSR, req->nbytes);
2055        atmel_sha_write(dd, SHA_BCR, req->nbytes);
2056
2057        atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
2058
2059        /* Process data. */
2060        if (use_dma)
2061                return atmel_sha_dma_start(dd, req->src, req->nbytes,
2062                                           atmel_sha_hmac_final_done);
2063
2064        return atmel_sha_cpu_start(dd, req->src, req->nbytes, false, true,
2065                                   atmel_sha_hmac_final_done);
2066}
2067
2068static int atmel_sha_hmac_cra_init(struct crypto_tfm *tfm)
2069{
2070        struct atmel_sha_hmac_ctx *hmac = crypto_tfm_ctx(tfm);
2071
2072        crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2073                                 sizeof(struct atmel_sha_reqctx));
2074        hmac->base.start = atmel_sha_hmac_start;
2075        atmel_sha_hmac_key_init(&hmac->hkey);
2076
2077        return 0;
2078}
2079
2080static void atmel_sha_hmac_cra_exit(struct crypto_tfm *tfm)
2081{
2082        struct atmel_sha_hmac_ctx *hmac = crypto_tfm_ctx(tfm);
2083
2084        atmel_sha_hmac_key_release(&hmac->hkey);
2085}
2086
2087static struct ahash_alg sha_hmac_algs[] = {
2088{
2089        .init           = atmel_sha_hmac_init,
2090        .update         = atmel_sha_update,
2091        .final          = atmel_sha_final,
2092        .digest         = atmel_sha_hmac_digest,
2093        .setkey         = atmel_sha_hmac_setkey,
2094        .export         = atmel_sha_export,
2095        .import         = atmel_sha_import,
2096        .halg = {
2097                .digestsize     = SHA1_DIGEST_SIZE,
2098                .statesize      = sizeof(struct atmel_sha_reqctx),
2099                .base   = {
2100                        .cra_name               = "hmac(sha1)",
2101                        .cra_driver_name        = "atmel-hmac-sha1",
2102                        .cra_priority           = 100,
2103                        .cra_flags              = CRYPTO_ALG_ASYNC,
2104                        .cra_blocksize          = SHA1_BLOCK_SIZE,
2105                        .cra_ctxsize            = sizeof(struct atmel_sha_hmac_ctx),
2106                        .cra_alignmask          = 0,
2107                        .cra_module             = THIS_MODULE,
2108                        .cra_init               = atmel_sha_hmac_cra_init,
2109                        .cra_exit               = atmel_sha_hmac_cra_exit,
2110                }
2111        }
2112},
2113{
2114        .init           = atmel_sha_hmac_init,
2115        .update         = atmel_sha_update,
2116        .final          = atmel_sha_final,
2117        .digest         = atmel_sha_hmac_digest,
2118        .setkey         = atmel_sha_hmac_setkey,
2119        .export         = atmel_sha_export,
2120        .import         = atmel_sha_import,
2121        .halg = {
2122                .digestsize     = SHA224_DIGEST_SIZE,
2123                .statesize      = sizeof(struct atmel_sha_reqctx),
2124                .base   = {
2125                        .cra_name               = "hmac(sha224)",
2126                        .cra_driver_name        = "atmel-hmac-sha224",
2127                        .cra_priority           = 100,
2128                        .cra_flags              = CRYPTO_ALG_ASYNC,
2129                        .cra_blocksize          = SHA224_BLOCK_SIZE,
2130                        .cra_ctxsize            = sizeof(struct atmel_sha_hmac_ctx),
2131                        .cra_alignmask          = 0,
2132                        .cra_module             = THIS_MODULE,
2133                        .cra_init               = atmel_sha_hmac_cra_init,
2134                        .cra_exit               = atmel_sha_hmac_cra_exit,
2135                }
2136        }
2137},
2138{
2139        .init           = atmel_sha_hmac_init,
2140        .update         = atmel_sha_update,
2141        .final          = atmel_sha_final,
2142        .digest         = atmel_sha_hmac_digest,
2143        .setkey         = atmel_sha_hmac_setkey,
2144        .export         = atmel_sha_export,
2145        .import         = atmel_sha_import,
2146        .halg = {
2147                .digestsize     = SHA256_DIGEST_SIZE,
2148                .statesize      = sizeof(struct atmel_sha_reqctx),
2149                .base   = {
2150                        .cra_name               = "hmac(sha256)",
2151                        .cra_driver_name        = "atmel-hmac-sha256",
2152                        .cra_priority           = 100,
2153                        .cra_flags              = CRYPTO_ALG_ASYNC,
2154                        .cra_blocksize          = SHA256_BLOCK_SIZE,
2155                        .cra_ctxsize            = sizeof(struct atmel_sha_hmac_ctx),
2156                        .cra_alignmask          = 0,
2157                        .cra_module             = THIS_MODULE,
2158                        .cra_init               = atmel_sha_hmac_cra_init,
2159                        .cra_exit               = atmel_sha_hmac_cra_exit,
2160                }
2161        }
2162},
2163{
2164        .init           = atmel_sha_hmac_init,
2165        .update         = atmel_sha_update,
2166        .final          = atmel_sha_final,
2167        .digest         = atmel_sha_hmac_digest,
2168        .setkey         = atmel_sha_hmac_setkey,
2169        .export         = atmel_sha_export,
2170        .import         = atmel_sha_import,
2171        .halg = {
2172                .digestsize     = SHA384_DIGEST_SIZE,
2173                .statesize      = sizeof(struct atmel_sha_reqctx),
2174                .base   = {
2175                        .cra_name               = "hmac(sha384)",
2176                        .cra_driver_name        = "atmel-hmac-sha384",
2177                        .cra_priority           = 100,
2178                        .cra_flags              = CRYPTO_ALG_ASYNC,
2179                        .cra_blocksize          = SHA384_BLOCK_SIZE,
2180                        .cra_ctxsize            = sizeof(struct atmel_sha_hmac_ctx),
2181                        .cra_alignmask          = 0,
2182                        .cra_module             = THIS_MODULE,
2183                        .cra_init               = atmel_sha_hmac_cra_init,
2184                        .cra_exit               = atmel_sha_hmac_cra_exit,
2185                }
2186        }
2187},
2188{
2189        .init           = atmel_sha_hmac_init,
2190        .update         = atmel_sha_update,
2191        .final          = atmel_sha_final,
2192        .digest         = atmel_sha_hmac_digest,
2193        .setkey         = atmel_sha_hmac_setkey,
2194        .export         = atmel_sha_export,
2195        .import         = atmel_sha_import,
2196        .halg = {
2197                .digestsize     = SHA512_DIGEST_SIZE,
2198                .statesize      = sizeof(struct atmel_sha_reqctx),
2199                .base   = {
2200                        .cra_name               = "hmac(sha512)",
2201                        .cra_driver_name        = "atmel-hmac-sha512",
2202                        .cra_priority           = 100,
2203                        .cra_flags              = CRYPTO_ALG_ASYNC,
2204                        .cra_blocksize          = SHA512_BLOCK_SIZE,
2205                        .cra_ctxsize            = sizeof(struct atmel_sha_hmac_ctx),
2206                        .cra_alignmask          = 0,
2207                        .cra_module             = THIS_MODULE,
2208                        .cra_init               = atmel_sha_hmac_cra_init,
2209                        .cra_exit               = atmel_sha_hmac_cra_exit,
2210                }
2211        }
2212},
2213};
2214
2215#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
2216/* authenc functions */
2217
2218static int atmel_sha_authenc_init2(struct atmel_sha_dev *dd);
2219static int atmel_sha_authenc_init_done(struct atmel_sha_dev *dd);
2220static int atmel_sha_authenc_final_done(struct atmel_sha_dev *dd);
2221
2222
2223struct atmel_sha_authenc_ctx {
2224        struct crypto_ahash     *tfm;
2225};
2226
2227struct atmel_sha_authenc_reqctx {
2228        struct atmel_sha_reqctx base;
2229
2230        atmel_aes_authenc_fn_t  cb;
2231        struct atmel_aes_dev    *aes_dev;
2232
2233        /* _init() parameters. */
2234        struct scatterlist      *assoc;
2235        u32                     assoclen;
2236        u32                     textlen;
2237
2238        /* _final() parameters. */
2239        u32                     *digest;
2240        unsigned int            digestlen;
2241};
2242
2243static void atmel_sha_authenc_complete(struct crypto_async_request *areq,
2244                                       int err)
2245{
2246        struct ahash_request *req = areq->data;
2247        struct atmel_sha_authenc_reqctx *authctx  = ahash_request_ctx(req);
2248
2249        authctx->cb(authctx->aes_dev, err, authctx->base.dd->is_async);
2250}
2251
2252static int atmel_sha_authenc_start(struct atmel_sha_dev *dd)
2253{
2254        struct ahash_request *req = dd->req;
2255        struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
2256        int err;
2257
2258        /*
2259         * Force atmel_sha_complete() to call req->base.complete(), ie
2260         * atmel_sha_authenc_complete(), which in turn calls authctx->cb().
2261         */
2262        dd->force_complete = true;
2263
2264        err = atmel_sha_hw_init(dd);
2265        return authctx->cb(authctx->aes_dev, err, dd->is_async);
2266}
2267
2268bool atmel_sha_authenc_is_ready(void)
2269{
2270        struct atmel_sha_ctx dummy;
2271
2272        dummy.dd = NULL;
2273        return (atmel_sha_find_dev(&dummy) != NULL);
2274}
2275EXPORT_SYMBOL_GPL(atmel_sha_authenc_is_ready);
2276
2277unsigned int atmel_sha_authenc_get_reqsize(void)
2278{
2279        return sizeof(struct atmel_sha_authenc_reqctx);
2280}
2281EXPORT_SYMBOL_GPL(atmel_sha_authenc_get_reqsize);
2282
2283struct atmel_sha_authenc_ctx *atmel_sha_authenc_spawn(unsigned long mode)
2284{
2285        struct atmel_sha_authenc_ctx *auth;
2286        struct crypto_ahash *tfm;
2287        struct atmel_sha_ctx *tctx;
2288        const char *name;
2289        int err = -EINVAL;
2290
2291        switch (mode & SHA_FLAGS_MODE_MASK) {
2292        case SHA_FLAGS_HMAC_SHA1:
2293                name = "atmel-hmac-sha1";
2294                break;
2295
2296        case SHA_FLAGS_HMAC_SHA224:
2297                name = "atmel-hmac-sha224";
2298                break;
2299
2300        case SHA_FLAGS_HMAC_SHA256:
2301                name = "atmel-hmac-sha256";
2302                break;
2303
2304        case SHA_FLAGS_HMAC_SHA384:
2305                name = "atmel-hmac-sha384";
2306                break;
2307
2308        case SHA_FLAGS_HMAC_SHA512:
2309                name = "atmel-hmac-sha512";
2310                break;
2311
2312        default:
2313                goto error;
2314        }
2315
2316        tfm = crypto_alloc_ahash(name, 0, 0);
2317        if (IS_ERR(tfm)) {
2318                err = PTR_ERR(tfm);
2319                goto error;
2320        }
2321        tctx = crypto_ahash_ctx(tfm);
2322        tctx->start = atmel_sha_authenc_start;
2323        tctx->flags = mode;
2324
2325        auth = kzalloc(sizeof(*auth), GFP_KERNEL);
2326        if (!auth) {
2327                err = -ENOMEM;
2328                goto err_free_ahash;
2329        }
2330        auth->tfm = tfm;
2331
2332        return auth;
2333
2334err_free_ahash:
2335        crypto_free_ahash(tfm);
2336error:
2337        return ERR_PTR(err);
2338}
2339EXPORT_SYMBOL_GPL(atmel_sha_authenc_spawn);
2340
2341void atmel_sha_authenc_free(struct atmel_sha_authenc_ctx *auth)
2342{
2343        if (auth)
2344                crypto_free_ahash(auth->tfm);
2345        kfree(auth);
2346}
2347EXPORT_SYMBOL_GPL(atmel_sha_authenc_free);
2348
2349int atmel_sha_authenc_setkey(struct atmel_sha_authenc_ctx *auth,
2350                             const u8 *key, unsigned int keylen,
2351                             u32 *flags)
2352{
2353        struct crypto_ahash *tfm = auth->tfm;
2354        int err;
2355
2356        crypto_ahash_clear_flags(tfm, CRYPTO_TFM_REQ_MASK);
2357        crypto_ahash_set_flags(tfm, *flags & CRYPTO_TFM_REQ_MASK);
2358        err = crypto_ahash_setkey(tfm, key, keylen);
2359        *flags = crypto_ahash_get_flags(tfm);
2360
2361        return err;
2362}
2363EXPORT_SYMBOL_GPL(atmel_sha_authenc_setkey);
2364
2365int atmel_sha_authenc_schedule(struct ahash_request *req,
2366                               struct atmel_sha_authenc_ctx *auth,
2367                               atmel_aes_authenc_fn_t cb,
2368                               struct atmel_aes_dev *aes_dev)
2369{
2370        struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
2371        struct atmel_sha_reqctx *ctx = &authctx->base;
2372        struct crypto_ahash *tfm = auth->tfm;
2373        struct atmel_sha_ctx *tctx = crypto_ahash_ctx(tfm);
2374        struct atmel_sha_dev *dd;
2375
2376        /* Reset request context (MUST be done first). */
2377        memset(authctx, 0, sizeof(*authctx));
2378
2379        /* Get SHA device. */
2380        dd = atmel_sha_find_dev(tctx);
2381        if (!dd)
2382                return cb(aes_dev, -ENODEV, false);
2383
2384        /* Init request context. */
2385        ctx->dd = dd;
2386        ctx->buflen = SHA_BUFFER_LEN;
2387        authctx->cb = cb;
2388        authctx->aes_dev = aes_dev;
2389        ahash_request_set_tfm(req, tfm);
2390        ahash_request_set_callback(req, 0, atmel_sha_authenc_complete, req);
2391
2392        return atmel_sha_handle_queue(dd, req);
2393}
2394EXPORT_SYMBOL_GPL(atmel_sha_authenc_schedule);
2395
2396int atmel_sha_authenc_init(struct ahash_request *req,
2397                           struct scatterlist *assoc, unsigned int assoclen,
2398                           unsigned int textlen,
2399                           atmel_aes_authenc_fn_t cb,
2400                           struct atmel_aes_dev *aes_dev)
2401{
2402        struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
2403        struct atmel_sha_reqctx *ctx = &authctx->base;
2404        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2405        struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
2406        struct atmel_sha_dev *dd = ctx->dd;
2407
2408        if (unlikely(!IS_ALIGNED(assoclen, sizeof(u32))))
2409                return atmel_sha_complete(dd, -EINVAL);
2410
2411        authctx->cb = cb;
2412        authctx->aes_dev = aes_dev;
2413        authctx->assoc = assoc;
2414        authctx->assoclen = assoclen;
2415        authctx->textlen = textlen;
2416
2417        ctx->flags = hmac->base.flags;
2418        return atmel_sha_hmac_setup(dd, atmel_sha_authenc_init2);
2419}
2420EXPORT_SYMBOL_GPL(atmel_sha_authenc_init);
2421
2422static int atmel_sha_authenc_init2(struct atmel_sha_dev *dd)
2423{
2424        struct ahash_request *req = dd->req;
2425        struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
2426        struct atmel_sha_reqctx *ctx = &authctx->base;
2427        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2428        struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
2429        size_t hs = ctx->hash_size;
2430        size_t i, num_words = hs / sizeof(u32);
2431        u32 mr, msg_size;
2432
2433        atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV);
2434        for (i = 0; i < num_words; ++i)
2435                atmel_sha_write(dd, SHA_REG_DIN(i), hmac->ipad[i]);
2436
2437        atmel_sha_write(dd, SHA_CR, SHA_CR_WUIEHV);
2438        for (i = 0; i < num_words; ++i)
2439                atmel_sha_write(dd, SHA_REG_DIN(i), hmac->opad[i]);
2440
2441        mr = (SHA_MR_MODE_IDATAR0 |
2442              SHA_MR_HMAC |
2443              SHA_MR_DUALBUFF);
2444        mr |= ctx->flags & SHA_FLAGS_ALGO_MASK;
2445        atmel_sha_write(dd, SHA_MR, mr);
2446
2447        msg_size = authctx->assoclen + authctx->textlen;
2448        atmel_sha_write(dd, SHA_MSR, msg_size);
2449        atmel_sha_write(dd, SHA_BCR, msg_size);
2450
2451        atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
2452
2453        /* Process assoc data. */
2454        return atmel_sha_cpu_start(dd, authctx->assoc, authctx->assoclen,
2455                                   true, false,
2456                                   atmel_sha_authenc_init_done);
2457}
2458
2459static int atmel_sha_authenc_init_done(struct atmel_sha_dev *dd)
2460{
2461        struct ahash_request *req = dd->req;
2462        struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
2463
2464        return authctx->cb(authctx->aes_dev, 0, dd->is_async);
2465}
2466
2467int atmel_sha_authenc_final(struct ahash_request *req,
2468                            u32 *digest, unsigned int digestlen,
2469                            atmel_aes_authenc_fn_t cb,
2470                            struct atmel_aes_dev *aes_dev)
2471{
2472        struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
2473        struct atmel_sha_reqctx *ctx = &authctx->base;
2474        struct atmel_sha_dev *dd = ctx->dd;
2475
2476        switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
2477        case SHA_FLAGS_SHA1:
2478                authctx->digestlen = SHA1_DIGEST_SIZE;
2479                break;
2480
2481        case SHA_FLAGS_SHA224:
2482                authctx->digestlen = SHA224_DIGEST_SIZE;
2483                break;
2484
2485        case SHA_FLAGS_SHA256:
2486                authctx->digestlen = SHA256_DIGEST_SIZE;
2487                break;
2488
2489        case SHA_FLAGS_SHA384:
2490                authctx->digestlen = SHA384_DIGEST_SIZE;
2491                break;
2492
2493        case SHA_FLAGS_SHA512:
2494                authctx->digestlen = SHA512_DIGEST_SIZE;
2495                break;
2496
2497        default:
2498                return atmel_sha_complete(dd, -EINVAL);
2499        }
2500        if (authctx->digestlen > digestlen)
2501                authctx->digestlen = digestlen;
2502
2503        authctx->cb = cb;
2504        authctx->aes_dev = aes_dev;
2505        authctx->digest = digest;
2506        return atmel_sha_wait_for_data_ready(dd,
2507                                             atmel_sha_authenc_final_done);
2508}
2509EXPORT_SYMBOL_GPL(atmel_sha_authenc_final);
2510
2511static int atmel_sha_authenc_final_done(struct atmel_sha_dev *dd)
2512{
2513        struct ahash_request *req = dd->req;
2514        struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
2515        size_t i, num_words = authctx->digestlen / sizeof(u32);
2516
2517        for (i = 0; i < num_words; ++i)
2518                authctx->digest[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i));
2519
2520        return atmel_sha_complete(dd, 0);
2521}
2522
2523void atmel_sha_authenc_abort(struct ahash_request *req)
2524{
2525        struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
2526        struct atmel_sha_reqctx *ctx = &authctx->base;
2527        struct atmel_sha_dev *dd = ctx->dd;
2528
2529        /* Prevent atmel_sha_complete() from calling req->base.complete(). */
2530        dd->is_async = false;
2531        dd->force_complete = false;
2532        (void)atmel_sha_complete(dd, 0);
2533}
2534EXPORT_SYMBOL_GPL(atmel_sha_authenc_abort);
2535
2536#endif /* CONFIG_CRYPTO_DEV_ATMEL_AUTHENC */
2537
2538
2539static void atmel_sha_unregister_algs(struct atmel_sha_dev *dd)
2540{
2541        int i;
2542
2543        if (dd->caps.has_hmac)
2544                for (i = 0; i < ARRAY_SIZE(sha_hmac_algs); i++)
2545                        crypto_unregister_ahash(&sha_hmac_algs[i]);
2546
2547        for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++)
2548                crypto_unregister_ahash(&sha_1_256_algs[i]);
2549
2550        if (dd->caps.has_sha224)
2551                crypto_unregister_ahash(&sha_224_alg);
2552
2553        if (dd->caps.has_sha_384_512) {
2554                for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++)
2555                        crypto_unregister_ahash(&sha_384_512_algs[i]);
2556        }
2557}
2558
2559static int atmel_sha_register_algs(struct atmel_sha_dev *dd)
2560{
2561        int err, i, j;
2562
2563        for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++) {
2564                err = crypto_register_ahash(&sha_1_256_algs[i]);
2565                if (err)
2566                        goto err_sha_1_256_algs;
2567        }
2568
2569        if (dd->caps.has_sha224) {
2570                err = crypto_register_ahash(&sha_224_alg);
2571                if (err)
2572                        goto err_sha_224_algs;
2573        }
2574
2575        if (dd->caps.has_sha_384_512) {
2576                for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++) {
2577                        err = crypto_register_ahash(&sha_384_512_algs[i]);
2578                        if (err)
2579                                goto err_sha_384_512_algs;
2580                }
2581        }
2582
2583        if (dd->caps.has_hmac) {
2584                for (i = 0; i < ARRAY_SIZE(sha_hmac_algs); i++) {
2585                        err = crypto_register_ahash(&sha_hmac_algs[i]);
2586                        if (err)
2587                                goto err_sha_hmac_algs;
2588                }
2589        }
2590
2591        return 0;
2592
2593        /*i = ARRAY_SIZE(sha_hmac_algs);*/
2594err_sha_hmac_algs:
2595        for (j = 0; j < i; j++)
2596                crypto_unregister_ahash(&sha_hmac_algs[j]);
2597        i = ARRAY_SIZE(sha_384_512_algs);
2598err_sha_384_512_algs:
2599        for (j = 0; j < i; j++)
2600                crypto_unregister_ahash(&sha_384_512_algs[j]);
2601        crypto_unregister_ahash(&sha_224_alg);
2602err_sha_224_algs:
2603        i = ARRAY_SIZE(sha_1_256_algs);
2604err_sha_1_256_algs:
2605        for (j = 0; j < i; j++)
2606                crypto_unregister_ahash(&sha_1_256_algs[j]);
2607
2608        return err;
2609}
2610
2611static bool atmel_sha_filter(struct dma_chan *chan, void *slave)
2612{
2613        struct at_dma_slave     *sl = slave;
2614
2615        if (sl && sl->dma_dev == chan->device->dev) {
2616                chan->private = sl;
2617                return true;
2618        } else {
2619                return false;
2620        }
2621}
2622
2623static int atmel_sha_dma_init(struct atmel_sha_dev *dd,
2624                                struct crypto_platform_data *pdata)
2625{
2626        dma_cap_mask_t mask_in;
2627
2628        /* Try to grab DMA channel */
2629        dma_cap_zero(mask_in);
2630        dma_cap_set(DMA_SLAVE, mask_in);
2631
2632        dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask_in,
2633                        atmel_sha_filter, &pdata->dma_slave->rxdata, dd->dev, "tx");
2634        if (!dd->dma_lch_in.chan) {
2635                dev_warn(dd->dev, "no DMA channel available\n");
2636                return -ENODEV;
2637        }
2638
2639        dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
2640        dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
2641                SHA_REG_DIN(0);
2642        dd->dma_lch_in.dma_conf.src_maxburst = 1;
2643        dd->dma_lch_in.dma_conf.src_addr_width =
2644                DMA_SLAVE_BUSWIDTH_4_BYTES;
2645        dd->dma_lch_in.dma_conf.dst_maxburst = 1;
2646        dd->dma_lch_in.dma_conf.dst_addr_width =
2647                DMA_SLAVE_BUSWIDTH_4_BYTES;
2648        dd->dma_lch_in.dma_conf.device_fc = false;
2649
2650        return 0;
2651}
2652
2653static void atmel_sha_dma_cleanup(struct atmel_sha_dev *dd)
2654{
2655        dma_release_channel(dd->dma_lch_in.chan);
2656}
2657
2658static void atmel_sha_get_cap(struct atmel_sha_dev *dd)
2659{
2660
2661        dd->caps.has_dma = 0;
2662        dd->caps.has_dualbuff = 0;
2663        dd->caps.has_sha224 = 0;
2664        dd->caps.has_sha_384_512 = 0;
2665        dd->caps.has_uihv = 0;
2666        dd->caps.has_hmac = 0;
2667
2668        /* keep only major version number */
2669        switch (dd->hw_version & 0xff0) {
2670        case 0x510:
2671                dd->caps.has_dma = 1;
2672                dd->caps.has_dualbuff = 1;
2673                dd->caps.has_sha224 = 1;
2674                dd->caps.has_sha_384_512 = 1;
2675                dd->caps.has_uihv = 1;
2676                dd->caps.has_hmac = 1;
2677                break;
2678        case 0x420:
2679                dd->caps.has_dma = 1;
2680                dd->caps.has_dualbuff = 1;
2681                dd->caps.has_sha224 = 1;
2682                dd->caps.has_sha_384_512 = 1;
2683                dd->caps.has_uihv = 1;
2684                break;
2685        case 0x410:
2686                dd->caps.has_dma = 1;
2687                dd->caps.has_dualbuff = 1;
2688                dd->caps.has_sha224 = 1;
2689                dd->caps.has_sha_384_512 = 1;
2690                break;
2691        case 0x400:
2692                dd->caps.has_dma = 1;
2693                dd->caps.has_dualbuff = 1;
2694                dd->caps.has_sha224 = 1;
2695                break;
2696        case 0x320:
2697                break;
2698        default:
2699                dev_warn(dd->dev,
2700                                "Unmanaged sha version, set minimum capabilities\n");
2701                break;
2702        }
2703}
2704
2705#if defined(CONFIG_OF)
2706static const struct of_device_id atmel_sha_dt_ids[] = {
2707        { .compatible = "atmel,at91sam9g46-sha" },
2708        { /* sentinel */ }
2709};
2710
2711MODULE_DEVICE_TABLE(of, atmel_sha_dt_ids);
2712
2713static struct crypto_platform_data *atmel_sha_of_init(struct platform_device *pdev)
2714{
2715        struct device_node *np = pdev->dev.of_node;
2716        struct crypto_platform_data *pdata;
2717
2718        if (!np) {
2719                dev_err(&pdev->dev, "device node not found\n");
2720                return ERR_PTR(-EINVAL);
2721        }
2722
2723        pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
2724        if (!pdata)
2725                return ERR_PTR(-ENOMEM);
2726
2727        pdata->dma_slave = devm_kzalloc(&pdev->dev,
2728                                        sizeof(*(pdata->dma_slave)),
2729                                        GFP_KERNEL);
2730        if (!pdata->dma_slave)
2731                return ERR_PTR(-ENOMEM);
2732
2733        return pdata;
2734}
2735#else /* CONFIG_OF */
2736static inline struct crypto_platform_data *atmel_sha_of_init(struct platform_device *dev)
2737{
2738        return ERR_PTR(-EINVAL);
2739}
2740#endif
2741
2742static int atmel_sha_probe(struct platform_device *pdev)
2743{
2744        struct atmel_sha_dev *sha_dd;
2745        struct crypto_platform_data     *pdata;
2746        struct device *dev = &pdev->dev;
2747        struct resource *sha_res;
2748        int err;
2749
2750        sha_dd = devm_kzalloc(&pdev->dev, sizeof(*sha_dd), GFP_KERNEL);
2751        if (sha_dd == NULL) {
2752                err = -ENOMEM;
2753                goto sha_dd_err;
2754        }
2755
2756        sha_dd->dev = dev;
2757
2758        platform_set_drvdata(pdev, sha_dd);
2759
2760        INIT_LIST_HEAD(&sha_dd->list);
2761        spin_lock_init(&sha_dd->lock);
2762
2763        tasklet_init(&sha_dd->done_task, atmel_sha_done_task,
2764                                        (unsigned long)sha_dd);
2765        tasklet_init(&sha_dd->queue_task, atmel_sha_queue_task,
2766                                        (unsigned long)sha_dd);
2767
2768        crypto_init_queue(&sha_dd->queue, ATMEL_SHA_QUEUE_LENGTH);
2769
2770        /* Get the base address */
2771        sha_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2772        if (!sha_res) {
2773                dev_err(dev, "no MEM resource info\n");
2774                err = -ENODEV;
2775                goto res_err;
2776        }
2777        sha_dd->phys_base = sha_res->start;
2778
2779        /* Get the IRQ */
2780        sha_dd->irq = platform_get_irq(pdev,  0);
2781        if (sha_dd->irq < 0) {
2782                dev_err(dev, "no IRQ resource info\n");
2783                err = sha_dd->irq;
2784                goto res_err;
2785        }
2786
2787        err = devm_request_irq(&pdev->dev, sha_dd->irq, atmel_sha_irq,
2788                               IRQF_SHARED, "atmel-sha", sha_dd);
2789        if (err) {
2790                dev_err(dev, "unable to request sha irq.\n");
2791                goto res_err;
2792        }
2793
2794        /* Initializing the clock */
2795        sha_dd->iclk = devm_clk_get(&pdev->dev, "sha_clk");
2796        if (IS_ERR(sha_dd->iclk)) {
2797                dev_err(dev, "clock initialization failed.\n");
2798                err = PTR_ERR(sha_dd->iclk);
2799                goto res_err;
2800        }
2801
2802        sha_dd->io_base = devm_ioremap_resource(&pdev->dev, sha_res);
2803        if (IS_ERR(sha_dd->io_base)) {
2804                dev_err(dev, "can't ioremap\n");
2805                err = PTR_ERR(sha_dd->io_base);
2806                goto res_err;
2807        }
2808
2809        err = clk_prepare(sha_dd->iclk);
2810        if (err)
2811                goto res_err;
2812
2813        atmel_sha_hw_version_init(sha_dd);
2814
2815        atmel_sha_get_cap(sha_dd);
2816
2817        if (sha_dd->caps.has_dma) {
2818                pdata = pdev->dev.platform_data;
2819                if (!pdata) {
2820                        pdata = atmel_sha_of_init(pdev);
2821                        if (IS_ERR(pdata)) {
2822                                dev_err(&pdev->dev, "platform data not available\n");
2823                                err = PTR_ERR(pdata);
2824                                goto iclk_unprepare;
2825                        }
2826                }
2827                if (!pdata->dma_slave) {
2828                        err = -ENXIO;
2829                        goto iclk_unprepare;
2830                }
2831                err = atmel_sha_dma_init(sha_dd, pdata);
2832                if (err)
2833                        goto err_sha_dma;
2834
2835                dev_info(dev, "using %s for DMA transfers\n",
2836                                dma_chan_name(sha_dd->dma_lch_in.chan));
2837        }
2838
2839        spin_lock(&atmel_sha.lock);
2840        list_add_tail(&sha_dd->list, &atmel_sha.dev_list);
2841        spin_unlock(&atmel_sha.lock);
2842
2843        err = atmel_sha_register_algs(sha_dd);
2844        if (err)
2845                goto err_algs;
2846
2847        dev_info(dev, "Atmel SHA1/SHA256%s%s\n",
2848                        sha_dd->caps.has_sha224 ? "/SHA224" : "",
2849                        sha_dd->caps.has_sha_384_512 ? "/SHA384/SHA512" : "");
2850
2851        return 0;
2852
2853err_algs:
2854        spin_lock(&atmel_sha.lock);
2855        list_del(&sha_dd->list);
2856        spin_unlock(&atmel_sha.lock);
2857        if (sha_dd->caps.has_dma)
2858                atmel_sha_dma_cleanup(sha_dd);
2859err_sha_dma:
2860iclk_unprepare:
2861        clk_unprepare(sha_dd->iclk);
2862res_err:
2863        tasklet_kill(&sha_dd->queue_task);
2864        tasklet_kill(&sha_dd->done_task);
2865sha_dd_err:
2866        dev_err(dev, "initialization failed.\n");
2867
2868        return err;
2869}
2870
2871static int atmel_sha_remove(struct platform_device *pdev)
2872{
2873        struct atmel_sha_dev *sha_dd;
2874
2875        sha_dd = platform_get_drvdata(pdev);
2876        if (!sha_dd)
2877                return -ENODEV;
2878        spin_lock(&atmel_sha.lock);
2879        list_del(&sha_dd->list);
2880        spin_unlock(&atmel_sha.lock);
2881
2882        atmel_sha_unregister_algs(sha_dd);
2883
2884        tasklet_kill(&sha_dd->queue_task);
2885        tasklet_kill(&sha_dd->done_task);
2886
2887        if (sha_dd->caps.has_dma)
2888                atmel_sha_dma_cleanup(sha_dd);
2889
2890        clk_unprepare(sha_dd->iclk);
2891
2892        return 0;
2893}
2894
2895static struct platform_driver atmel_sha_driver = {
2896        .probe          = atmel_sha_probe,
2897        .remove         = atmel_sha_remove,
2898        .driver         = {
2899                .name   = "atmel_sha",
2900                .of_match_table = of_match_ptr(atmel_sha_dt_ids),
2901        },
2902};
2903
2904module_platform_driver(atmel_sha_driver);
2905
2906MODULE_DESCRIPTION("Atmel SHA (1/256/224/384/512) hw acceleration support.");
2907MODULE_LICENSE("GPL v2");
2908MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");
2909