linux/drivers/crypto/sahara.c
<<
>>
Prefs
   1/*
   2 * Cryptographic API.
   3 *
   4 * Support for SAHARA cryptographic accelerator.
   5 *
   6 * Copyright (c) 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de>
   7 * Copyright (c) 2013 Vista Silicon S.L.
   8 * Author: Javier Martin <javier.martin@vista-silicon.com>
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License version 2 as published
  12 * by the Free Software Foundation.
  13 *
  14 * Based on omap-aes.c and tegra-aes.c
  15 */
  16
  17#include <crypto/algapi.h>
  18#include <crypto/aes.h>
  19#include <crypto/hash.h>
  20#include <crypto/internal/hash.h>
  21#include <crypto/scatterwalk.h>
  22#include <crypto/sha.h>
  23
  24#include <linux/clk.h>
  25#include <linux/crypto.h>
  26#include <linux/interrupt.h>
  27#include <linux/io.h>
  28#include <linux/irq.h>
  29#include <linux/kernel.h>
  30#include <linux/kthread.h>
  31#include <linux/module.h>
  32#include <linux/mutex.h>
  33#include <linux/of.h>
  34#include <linux/of_device.h>
  35#include <linux/platform_device.h>
  36
  37#define SHA_BUFFER_LEN          PAGE_SIZE
  38#define SAHARA_MAX_SHA_BLOCK_SIZE       SHA256_BLOCK_SIZE
  39
  40#define SAHARA_NAME "sahara"
  41#define SAHARA_VERSION_3        3
  42#define SAHARA_VERSION_4        4
  43#define SAHARA_TIMEOUT_MS       1000
  44#define SAHARA_MAX_HW_DESC      2
  45#define SAHARA_MAX_HW_LINK      20
  46
  47#define FLAGS_MODE_MASK         0x000f
  48#define FLAGS_ENCRYPT           BIT(0)
  49#define FLAGS_CBC               BIT(1)
  50#define FLAGS_NEW_KEY           BIT(3)
  51
  52#define SAHARA_HDR_BASE                 0x00800000
  53#define SAHARA_HDR_SKHA_ALG_AES 0
  54#define SAHARA_HDR_SKHA_OP_ENC          (1 << 2)
  55#define SAHARA_HDR_SKHA_MODE_ECB        (0 << 3)
  56#define SAHARA_HDR_SKHA_MODE_CBC        (1 << 3)
  57#define SAHARA_HDR_FORM_DATA            (5 << 16)
  58#define SAHARA_HDR_FORM_KEY             (8 << 16)
  59#define SAHARA_HDR_LLO                  (1 << 24)
  60#define SAHARA_HDR_CHA_SKHA             (1 << 28)
  61#define SAHARA_HDR_CHA_MDHA             (2 << 28)
  62#define SAHARA_HDR_PARITY_BIT           (1 << 31)
  63
  64#define SAHARA_HDR_MDHA_SET_MODE_MD_KEY 0x20880000
  65#define SAHARA_HDR_MDHA_SET_MODE_HASH   0x208D0000
  66#define SAHARA_HDR_MDHA_HASH            0xA0850000
  67#define SAHARA_HDR_MDHA_STORE_DIGEST    0x20820000
  68#define SAHARA_HDR_MDHA_ALG_SHA1        0
  69#define SAHARA_HDR_MDHA_ALG_MD5         1
  70#define SAHARA_HDR_MDHA_ALG_SHA256      2
  71#define SAHARA_HDR_MDHA_ALG_SHA224      3
  72#define SAHARA_HDR_MDHA_PDATA           (1 << 2)
  73#define SAHARA_HDR_MDHA_HMAC            (1 << 3)
  74#define SAHARA_HDR_MDHA_INIT            (1 << 5)
  75#define SAHARA_HDR_MDHA_IPAD            (1 << 6)
  76#define SAHARA_HDR_MDHA_OPAD            (1 << 7)
  77#define SAHARA_HDR_MDHA_SWAP            (1 << 8)
  78#define SAHARA_HDR_MDHA_MAC_FULL        (1 << 9)
  79#define SAHARA_HDR_MDHA_SSL             (1 << 10)
  80
  81/* SAHARA can only process one request at a time */
  82#define SAHARA_QUEUE_LENGTH     1
  83
  84#define SAHARA_REG_VERSION      0x00
  85#define SAHARA_REG_DAR          0x04
  86#define SAHARA_REG_CONTROL      0x08
  87#define         SAHARA_CONTROL_SET_THROTTLE(x)  (((x) & 0xff) << 24)
  88#define         SAHARA_CONTROL_SET_MAXBURST(x)  (((x) & 0xff) << 16)
  89#define         SAHARA_CONTROL_RNG_AUTORSD      (1 << 7)
  90#define         SAHARA_CONTROL_ENABLE_INT       (1 << 4)
  91#define SAHARA_REG_CMD          0x0C
  92#define         SAHARA_CMD_RESET                (1 << 0)
  93#define         SAHARA_CMD_CLEAR_INT            (1 << 8)
  94#define         SAHARA_CMD_CLEAR_ERR            (1 << 9)
  95#define         SAHARA_CMD_SINGLE_STEP          (1 << 10)
  96#define         SAHARA_CMD_MODE_BATCH           (1 << 16)
  97#define         SAHARA_CMD_MODE_DEBUG           (1 << 18)
  98#define SAHARA_REG_STATUS       0x10
  99#define         SAHARA_STATUS_GET_STATE(x)      ((x) & 0x7)
 100#define                 SAHARA_STATE_IDLE       0
 101#define                 SAHARA_STATE_BUSY       1
 102#define                 SAHARA_STATE_ERR        2
 103#define                 SAHARA_STATE_FAULT      3
 104#define                 SAHARA_STATE_COMPLETE   4
 105#define                 SAHARA_STATE_COMP_FLAG  (1 << 2)
 106#define         SAHARA_STATUS_DAR_FULL          (1 << 3)
 107#define         SAHARA_STATUS_ERROR             (1 << 4)
 108#define         SAHARA_STATUS_SECURE            (1 << 5)
 109#define         SAHARA_STATUS_FAIL              (1 << 6)
 110#define         SAHARA_STATUS_INIT              (1 << 7)
 111#define         SAHARA_STATUS_RNG_RESEED        (1 << 8)
 112#define         SAHARA_STATUS_ACTIVE_RNG        (1 << 9)
 113#define         SAHARA_STATUS_ACTIVE_MDHA       (1 << 10)
 114#define         SAHARA_STATUS_ACTIVE_SKHA       (1 << 11)
 115#define         SAHARA_STATUS_MODE_BATCH        (1 << 16)
 116#define         SAHARA_STATUS_MODE_DEDICATED    (1 << 17)
 117#define         SAHARA_STATUS_MODE_DEBUG        (1 << 18)
 118#define         SAHARA_STATUS_GET_ISTATE(x)     (((x) >> 24) & 0xff)
 119#define SAHARA_REG_ERRSTATUS    0x14
 120#define         SAHARA_ERRSTATUS_GET_SOURCE(x)  ((x) & 0xf)
 121#define                 SAHARA_ERRSOURCE_CHA    14
 122#define                 SAHARA_ERRSOURCE_DMA    15
 123#define         SAHARA_ERRSTATUS_DMA_DIR        (1 << 8)
 124#define         SAHARA_ERRSTATUS_GET_DMASZ(x)(((x) >> 9) & 0x3)
 125#define         SAHARA_ERRSTATUS_GET_DMASRC(x) (((x) >> 13) & 0x7)
 126#define         SAHARA_ERRSTATUS_GET_CHASRC(x)  (((x) >> 16) & 0xfff)
 127#define         SAHARA_ERRSTATUS_GET_CHAERR(x)  (((x) >> 28) & 0x3)
 128#define SAHARA_REG_FADDR        0x18
 129#define SAHARA_REG_CDAR         0x1C
 130#define SAHARA_REG_IDAR         0x20
 131
 132struct sahara_hw_desc {
 133        u32     hdr;
 134        u32     len1;
 135        u32     p1;
 136        u32     len2;
 137        u32     p2;
 138        u32     next;
 139};
 140
 141struct sahara_hw_link {
 142        u32     len;
 143        u32     p;
 144        u32     next;
 145};
 146
 147struct sahara_ctx {
 148        unsigned long flags;
 149
 150        /* AES-specific context */
 151        int keylen;
 152        u8 key[AES_KEYSIZE_128];
 153        struct crypto_ablkcipher *fallback;
 154
 155        /* SHA-specific context */
 156        struct crypto_shash *shash_fallback;
 157};
 158
 159struct sahara_aes_reqctx {
 160        unsigned long mode;
 161};
 162
 163/*
 164 * struct sahara_sha_reqctx - private data per request
 165 * @buf: holds data for requests smaller than block_size
 166 * @rembuf: used to prepare one block_size-aligned request
 167 * @context: hw-specific context for request. Digest is extracted from this
 168 * @mode: specifies what type of hw-descriptor needs to be built
 169 * @digest_size: length of digest for this request
 170 * @context_size: length of hw-context for this request.
 171 *                Always digest_size + 4
 172 * @buf_cnt: number of bytes saved in buf
 173 * @sg_in_idx: number of hw links
 174 * @in_sg: scatterlist for input data
 175 * @in_sg_chain: scatterlists for chained input data
 176 * @total: total number of bytes for transfer
 177 * @last: is this the last block
 178 * @first: is this the first block
 179 * @active: inside a transfer
 180 */
 181struct sahara_sha_reqctx {
 182        u8                      buf[SAHARA_MAX_SHA_BLOCK_SIZE];
 183        u8                      rembuf[SAHARA_MAX_SHA_BLOCK_SIZE];
 184        u8                      context[SHA256_DIGEST_SIZE + 4];
 185        unsigned int            mode;
 186        unsigned int            digest_size;
 187        unsigned int            context_size;
 188        unsigned int            buf_cnt;
 189        unsigned int            sg_in_idx;
 190        struct scatterlist      *in_sg;
 191        struct scatterlist      in_sg_chain[2];
 192        size_t                  total;
 193        unsigned int            last;
 194        unsigned int            first;
 195        unsigned int            active;
 196};
 197
 198struct sahara_dev {
 199        struct device           *device;
 200        unsigned int            version;
 201        void __iomem            *regs_base;
 202        struct clk              *clk_ipg;
 203        struct clk              *clk_ahb;
 204        struct mutex            queue_mutex;
 205        struct task_struct      *kthread;
 206        struct completion       dma_completion;
 207
 208        struct sahara_ctx       *ctx;
 209        spinlock_t              lock;
 210        struct crypto_queue     queue;
 211        unsigned long           flags;
 212
 213        struct sahara_hw_desc   *hw_desc[SAHARA_MAX_HW_DESC];
 214        dma_addr_t              hw_phys_desc[SAHARA_MAX_HW_DESC];
 215
 216        u8                      *key_base;
 217        dma_addr_t              key_phys_base;
 218
 219        u8                      *iv_base;
 220        dma_addr_t              iv_phys_base;
 221
 222        u8                      *context_base;
 223        dma_addr_t              context_phys_base;
 224
 225        struct sahara_hw_link   *hw_link[SAHARA_MAX_HW_LINK];
 226        dma_addr_t              hw_phys_link[SAHARA_MAX_HW_LINK];
 227
 228        size_t                  total;
 229        struct scatterlist      *in_sg;
 230        int             nb_in_sg;
 231        struct scatterlist      *out_sg;
 232        int             nb_out_sg;
 233
 234        u32                     error;
 235};
 236
 237static struct sahara_dev *dev_ptr;
 238
 239static inline void sahara_write(struct sahara_dev *dev, u32 data, u32 reg)
 240{
 241        writel(data, dev->regs_base + reg);
 242}
 243
 244static inline unsigned int sahara_read(struct sahara_dev *dev, u32 reg)
 245{
 246        return readl(dev->regs_base + reg);
 247}
 248
 249static u32 sahara_aes_key_hdr(struct sahara_dev *dev)
 250{
 251        u32 hdr = SAHARA_HDR_BASE | SAHARA_HDR_SKHA_ALG_AES |
 252                        SAHARA_HDR_FORM_KEY | SAHARA_HDR_LLO |
 253                        SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
 254
 255        if (dev->flags & FLAGS_CBC) {
 256                hdr |= SAHARA_HDR_SKHA_MODE_CBC;
 257                hdr ^= SAHARA_HDR_PARITY_BIT;
 258        }
 259
 260        if (dev->flags & FLAGS_ENCRYPT) {
 261                hdr |= SAHARA_HDR_SKHA_OP_ENC;
 262                hdr ^= SAHARA_HDR_PARITY_BIT;
 263        }
 264
 265        return hdr;
 266}
 267
 268static u32 sahara_aes_data_link_hdr(struct sahara_dev *dev)
 269{
 270        return SAHARA_HDR_BASE | SAHARA_HDR_FORM_DATA |
 271                        SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
 272}
 273
 274static const char *sahara_err_src[16] = {
 275        "No error",
 276        "Header error",
 277        "Descriptor length error",
 278        "Descriptor length or pointer error",
 279        "Link length error",
 280        "Link pointer error",
 281        "Input buffer error",
 282        "Output buffer error",
 283        "Output buffer starvation",
 284        "Internal state fault",
 285        "General descriptor problem",
 286        "Reserved",
 287        "Descriptor address error",
 288        "Link address error",
 289        "CHA error",
 290        "DMA error"
 291};
 292
 293static const char *sahara_err_dmasize[4] = {
 294        "Byte transfer",
 295        "Half-word transfer",
 296        "Word transfer",
 297        "Reserved"
 298};
 299
 300static const char *sahara_err_dmasrc[8] = {
 301        "No error",
 302        "AHB bus error",
 303        "Internal IP bus error",
 304        "Parity error",
 305        "DMA crosses 256 byte boundary",
 306        "DMA is busy",
 307        "Reserved",
 308        "DMA HW error"
 309};
 310
 311static const char *sahara_cha_errsrc[12] = {
 312        "Input buffer non-empty",
 313        "Illegal address",
 314        "Illegal mode",
 315        "Illegal data size",
 316        "Illegal key size",
 317        "Write during processing",
 318        "CTX read during processing",
 319        "HW error",
 320        "Input buffer disabled/underflow",
 321        "Output buffer disabled/overflow",
 322        "DES key parity error",
 323        "Reserved"
 324};
 325
 326static const char *sahara_cha_err[4] = { "No error", "SKHA", "MDHA", "RNG" };
 327
 328static void sahara_decode_error(struct sahara_dev *dev, unsigned int error)
 329{
 330        u8 source = SAHARA_ERRSTATUS_GET_SOURCE(error);
 331        u16 chasrc = ffs(SAHARA_ERRSTATUS_GET_CHASRC(error));
 332
 333        dev_err(dev->device, "%s: Error Register = 0x%08x\n", __func__, error);
 334
 335        dev_err(dev->device, "  - %s.\n", sahara_err_src[source]);
 336
 337        if (source == SAHARA_ERRSOURCE_DMA) {
 338                if (error & SAHARA_ERRSTATUS_DMA_DIR)
 339                        dev_err(dev->device, "          * DMA read.\n");
 340                else
 341                        dev_err(dev->device, "          * DMA write.\n");
 342
 343                dev_err(dev->device, "          * %s.\n",
 344                       sahara_err_dmasize[SAHARA_ERRSTATUS_GET_DMASZ(error)]);
 345                dev_err(dev->device, "          * %s.\n",
 346                       sahara_err_dmasrc[SAHARA_ERRSTATUS_GET_DMASRC(error)]);
 347        } else if (source == SAHARA_ERRSOURCE_CHA) {
 348                dev_err(dev->device, "          * %s.\n",
 349                        sahara_cha_errsrc[chasrc]);
 350                dev_err(dev->device, "          * %s.\n",
 351                       sahara_cha_err[SAHARA_ERRSTATUS_GET_CHAERR(error)]);
 352        }
 353        dev_err(dev->device, "\n");
 354}
 355
 356static const char *sahara_state[4] = { "Idle", "Busy", "Error", "HW Fault" };
 357
 358static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
 359{
 360        u8 state;
 361
 362        if (!IS_ENABLED(DEBUG))
 363                return;
 364
 365        state = SAHARA_STATUS_GET_STATE(status);
 366
 367        dev_dbg(dev->device, "%s: Status Register = 0x%08x\n",
 368                __func__, status);
 369
 370        dev_dbg(dev->device, "  - State = %d:\n", state);
 371        if (state & SAHARA_STATE_COMP_FLAG)
 372                dev_dbg(dev->device, "          * Descriptor completed. IRQ pending.\n");
 373
 374        dev_dbg(dev->device, "          * %s.\n",
 375               sahara_state[state & ~SAHARA_STATE_COMP_FLAG]);
 376
 377        if (status & SAHARA_STATUS_DAR_FULL)
 378                dev_dbg(dev->device, "  - DAR Full.\n");
 379        if (status & SAHARA_STATUS_ERROR)
 380                dev_dbg(dev->device, "  - Error.\n");
 381        if (status & SAHARA_STATUS_SECURE)
 382                dev_dbg(dev->device, "  - Secure.\n");
 383        if (status & SAHARA_STATUS_FAIL)
 384                dev_dbg(dev->device, "  - Fail.\n");
 385        if (status & SAHARA_STATUS_RNG_RESEED)
 386                dev_dbg(dev->device, "  - RNG Reseed Request.\n");
 387        if (status & SAHARA_STATUS_ACTIVE_RNG)
 388                dev_dbg(dev->device, "  - RNG Active.\n");
 389        if (status & SAHARA_STATUS_ACTIVE_MDHA)
 390                dev_dbg(dev->device, "  - MDHA Active.\n");
 391        if (status & SAHARA_STATUS_ACTIVE_SKHA)
 392                dev_dbg(dev->device, "  - SKHA Active.\n");
 393
 394        if (status & SAHARA_STATUS_MODE_BATCH)
 395                dev_dbg(dev->device, "  - Batch Mode.\n");
 396        else if (status & SAHARA_STATUS_MODE_DEDICATED)
 397                dev_dbg(dev->device, "  - Decidated Mode.\n");
 398        else if (status & SAHARA_STATUS_MODE_DEBUG)
 399                dev_dbg(dev->device, "  - Debug Mode.\n");
 400
 401        dev_dbg(dev->device, "  - Internal state = 0x%02x\n",
 402               SAHARA_STATUS_GET_ISTATE(status));
 403
 404        dev_dbg(dev->device, "Current DAR: 0x%08x\n",
 405                sahara_read(dev, SAHARA_REG_CDAR));
 406        dev_dbg(dev->device, "Initial DAR: 0x%08x\n\n",
 407                sahara_read(dev, SAHARA_REG_IDAR));
 408}
 409
 410static void sahara_dump_descriptors(struct sahara_dev *dev)
 411{
 412        int i;
 413
 414        if (!IS_ENABLED(DEBUG))
 415                return;
 416
 417        for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
 418                dev_dbg(dev->device, "Descriptor (%d) (%pad):\n",
 419                        i, &dev->hw_phys_desc[i]);
 420                dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr);
 421                dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1);
 422                dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1);
 423                dev_dbg(dev->device, "\tlen2 = %u\n", dev->hw_desc[i]->len2);
 424                dev_dbg(dev->device, "\tp2 = 0x%08x\n", dev->hw_desc[i]->p2);
 425                dev_dbg(dev->device, "\tnext = 0x%08x\n",
 426                        dev->hw_desc[i]->next);
 427        }
 428        dev_dbg(dev->device, "\n");
 429}
 430
 431static void sahara_dump_links(struct sahara_dev *dev)
 432{
 433        int i;
 434
 435        if (!IS_ENABLED(DEBUG))
 436                return;
 437
 438        for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
 439                dev_dbg(dev->device, "Link (%d) (%pad):\n",
 440                        i, &dev->hw_phys_link[i]);
 441                dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len);
 442                dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p);
 443                dev_dbg(dev->device, "\tnext = 0x%08x\n",
 444                        dev->hw_link[i]->next);
 445        }
 446        dev_dbg(dev->device, "\n");
 447}
 448
 449static int sahara_hw_descriptor_create(struct sahara_dev *dev)
 450{
 451        struct sahara_ctx *ctx = dev->ctx;
 452        struct scatterlist *sg;
 453        int ret;
 454        int i, j;
 455        int idx = 0;
 456
 457        /* Copy new key if necessary */
 458        if (ctx->flags & FLAGS_NEW_KEY) {
 459                memcpy(dev->key_base, ctx->key, ctx->keylen);
 460                ctx->flags &= ~FLAGS_NEW_KEY;
 461
 462                if (dev->flags & FLAGS_CBC) {
 463                        dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
 464                        dev->hw_desc[idx]->p1 = dev->iv_phys_base;
 465                } else {
 466                        dev->hw_desc[idx]->len1 = 0;
 467                        dev->hw_desc[idx]->p1 = 0;
 468                }
 469                dev->hw_desc[idx]->len2 = ctx->keylen;
 470                dev->hw_desc[idx]->p2 = dev->key_phys_base;
 471                dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
 472
 473                dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
 474
 475                idx++;
 476        }
 477
 478        dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total);
 479        if (dev->nb_in_sg < 0) {
 480                dev_err(dev->device, "Invalid numbers of src SG.\n");
 481                return dev->nb_in_sg;
 482        }
 483        dev->nb_out_sg = sg_nents_for_len(dev->out_sg, dev->total);
 484        if (dev->nb_out_sg < 0) {
 485                dev_err(dev->device, "Invalid numbers of dst SG.\n");
 486                return dev->nb_out_sg;
 487        }
 488        if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) {
 489                dev_err(dev->device, "not enough hw links (%d)\n",
 490                        dev->nb_in_sg + dev->nb_out_sg);
 491                return -EINVAL;
 492        }
 493
 494        ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
 495                         DMA_TO_DEVICE);
 496        if (ret != dev->nb_in_sg) {
 497                dev_err(dev->device, "couldn't map in sg\n");
 498                goto unmap_in;
 499        }
 500        ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
 501                         DMA_FROM_DEVICE);
 502        if (ret != dev->nb_out_sg) {
 503                dev_err(dev->device, "couldn't map out sg\n");
 504                goto unmap_out;
 505        }
 506
 507        /* Create input links */
 508        dev->hw_desc[idx]->p1 = dev->hw_phys_link[0];
 509        sg = dev->in_sg;
 510        for (i = 0; i < dev->nb_in_sg; i++) {
 511                dev->hw_link[i]->len = sg->length;
 512                dev->hw_link[i]->p = sg->dma_address;
 513                if (i == (dev->nb_in_sg - 1)) {
 514                        dev->hw_link[i]->next = 0;
 515                } else {
 516                        dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
 517                        sg = sg_next(sg);
 518                }
 519        }
 520
 521        /* Create output links */
 522        dev->hw_desc[idx]->p2 = dev->hw_phys_link[i];
 523        sg = dev->out_sg;
 524        for (j = i; j < dev->nb_out_sg + i; j++) {
 525                dev->hw_link[j]->len = sg->length;
 526                dev->hw_link[j]->p = sg->dma_address;
 527                if (j == (dev->nb_out_sg + i - 1)) {
 528                        dev->hw_link[j]->next = 0;
 529                } else {
 530                        dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
 531                        sg = sg_next(sg);
 532                }
 533        }
 534
 535        /* Fill remaining fields of hw_desc[1] */
 536        dev->hw_desc[idx]->hdr = sahara_aes_data_link_hdr(dev);
 537        dev->hw_desc[idx]->len1 = dev->total;
 538        dev->hw_desc[idx]->len2 = dev->total;
 539        dev->hw_desc[idx]->next = 0;
 540
 541        sahara_dump_descriptors(dev);
 542        sahara_dump_links(dev);
 543
 544        sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
 545
 546        return 0;
 547
 548unmap_out:
 549        dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
 550                DMA_TO_DEVICE);
 551unmap_in:
 552        dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
 553                DMA_FROM_DEVICE);
 554
 555        return -EINVAL;
 556}
 557
 558static int sahara_aes_process(struct ablkcipher_request *req)
 559{
 560        struct sahara_dev *dev = dev_ptr;
 561        struct sahara_ctx *ctx;
 562        struct sahara_aes_reqctx *rctx;
 563        int ret;
 564        unsigned long timeout;
 565
 566        /* Request is ready to be dispatched by the device */
 567        dev_dbg(dev->device,
 568                "dispatch request (nbytes=%d, src=%p, dst=%p)\n",
 569                req->nbytes, req->src, req->dst);
 570
 571        /* assign new request to device */
 572        dev->total = req->nbytes;
 573        dev->in_sg = req->src;
 574        dev->out_sg = req->dst;
 575
 576        rctx = ablkcipher_request_ctx(req);
 577        ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
 578        rctx->mode &= FLAGS_MODE_MASK;
 579        dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
 580
 581        if ((dev->flags & FLAGS_CBC) && req->info)
 582                memcpy(dev->iv_base, req->info, AES_KEYSIZE_128);
 583
 584        /* assign new context to device */
 585        dev->ctx = ctx;
 586
 587        reinit_completion(&dev->dma_completion);
 588
 589        ret = sahara_hw_descriptor_create(dev);
 590        if (ret)
 591                return -EINVAL;
 592
 593        timeout = wait_for_completion_timeout(&dev->dma_completion,
 594                                msecs_to_jiffies(SAHARA_TIMEOUT_MS));
 595        if (!timeout) {
 596                dev_err(dev->device, "AES timeout\n");
 597                return -ETIMEDOUT;
 598        }
 599
 600        dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
 601                DMA_TO_DEVICE);
 602        dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
 603                DMA_FROM_DEVICE);
 604
 605        return 0;
 606}
 607
 608static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
 609                             unsigned int keylen)
 610{
 611        struct sahara_ctx *ctx = crypto_ablkcipher_ctx(tfm);
 612        int ret;
 613
 614        ctx->keylen = keylen;
 615
 616        /* SAHARA only supports 128bit keys */
 617        if (keylen == AES_KEYSIZE_128) {
 618                memcpy(ctx->key, key, keylen);
 619                ctx->flags |= FLAGS_NEW_KEY;
 620                return 0;
 621        }
 622
 623        if (keylen != AES_KEYSIZE_128 &&
 624            keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
 625                return -EINVAL;
 626
 627        /*
 628         * The requested key size is not supported by HW, do a fallback.
 629         */
 630        ctx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
 631        ctx->fallback->base.crt_flags |=
 632                (tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
 633
 634        ret = crypto_ablkcipher_setkey(ctx->fallback, key, keylen);
 635        if (ret) {
 636                struct crypto_tfm *tfm_aux = crypto_ablkcipher_tfm(tfm);
 637
 638                tfm_aux->crt_flags &= ~CRYPTO_TFM_RES_MASK;
 639                tfm_aux->crt_flags |=
 640                        (ctx->fallback->base.crt_flags & CRYPTO_TFM_RES_MASK);
 641        }
 642        return ret;
 643}
 644
 645static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
 646{
 647        struct sahara_aes_reqctx *rctx = ablkcipher_request_ctx(req);
 648        struct sahara_dev *dev = dev_ptr;
 649        int err = 0;
 650
 651        dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
 652                req->nbytes, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
 653
 654        if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
 655                dev_err(dev->device,
 656                        "request size is not exact amount of AES blocks\n");
 657                return -EINVAL;
 658        }
 659
 660        rctx->mode = mode;
 661
 662        mutex_lock(&dev->queue_mutex);
 663        err = ablkcipher_enqueue_request(&dev->queue, req);
 664        mutex_unlock(&dev->queue_mutex);
 665
 666        wake_up_process(dev->kthread);
 667
 668        return err;
 669}
 670
 671static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req)
 672{
 673        struct crypto_tfm *tfm =
 674                crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
 675        struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
 676                crypto_ablkcipher_reqtfm(req));
 677        int err;
 678
 679        if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
 680                ablkcipher_request_set_tfm(req, ctx->fallback);
 681                err = crypto_ablkcipher_encrypt(req);
 682                ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
 683                return err;
 684        }
 685
 686        return sahara_aes_crypt(req, FLAGS_ENCRYPT);
 687}
 688
 689static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req)
 690{
 691        struct crypto_tfm *tfm =
 692                crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
 693        struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
 694                crypto_ablkcipher_reqtfm(req));
 695        int err;
 696
 697        if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
 698                ablkcipher_request_set_tfm(req, ctx->fallback);
 699                err = crypto_ablkcipher_decrypt(req);
 700                ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
 701                return err;
 702        }
 703
 704        return sahara_aes_crypt(req, 0);
 705}
 706
 707static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req)
 708{
 709        struct crypto_tfm *tfm =
 710                crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
 711        struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
 712                crypto_ablkcipher_reqtfm(req));
 713        int err;
 714
 715        if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
 716                ablkcipher_request_set_tfm(req, ctx->fallback);
 717                err = crypto_ablkcipher_encrypt(req);
 718                ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
 719                return err;
 720        }
 721
 722        return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
 723}
 724
 725static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req)
 726{
 727        struct crypto_tfm *tfm =
 728                crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
 729        struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
 730                crypto_ablkcipher_reqtfm(req));
 731        int err;
 732
 733        if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
 734                ablkcipher_request_set_tfm(req, ctx->fallback);
 735                err = crypto_ablkcipher_decrypt(req);
 736                ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
 737                return err;
 738        }
 739
 740        return sahara_aes_crypt(req, FLAGS_CBC);
 741}
 742
 743static int sahara_aes_cra_init(struct crypto_tfm *tfm)
 744{
 745        const char *name = crypto_tfm_alg_name(tfm);
 746        struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
 747
 748        ctx->fallback = crypto_alloc_ablkcipher(name, 0,
 749                                CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
 750        if (IS_ERR(ctx->fallback)) {
 751                pr_err("Error allocating fallback algo %s\n", name);
 752                return PTR_ERR(ctx->fallback);
 753        }
 754
 755        tfm->crt_ablkcipher.reqsize = sizeof(struct sahara_aes_reqctx);
 756
 757        return 0;
 758}
 759
 760static void sahara_aes_cra_exit(struct crypto_tfm *tfm)
 761{
 762        struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
 763
 764        if (ctx->fallback)
 765                crypto_free_ablkcipher(ctx->fallback);
 766        ctx->fallback = NULL;
 767}
 768
 769static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
 770                              struct sahara_sha_reqctx *rctx)
 771{
 772        u32 hdr = 0;
 773
 774        hdr = rctx->mode;
 775
 776        if (rctx->first) {
 777                hdr |= SAHARA_HDR_MDHA_SET_MODE_HASH;
 778                hdr |= SAHARA_HDR_MDHA_INIT;
 779        } else {
 780                hdr |= SAHARA_HDR_MDHA_SET_MODE_MD_KEY;
 781        }
 782
 783        if (rctx->last)
 784                hdr |= SAHARA_HDR_MDHA_PDATA;
 785
 786        if (hweight_long(hdr) % 2 == 0)
 787                hdr |= SAHARA_HDR_PARITY_BIT;
 788
 789        return hdr;
 790}
 791
 792static int sahara_sha_hw_links_create(struct sahara_dev *dev,
 793                                       struct sahara_sha_reqctx *rctx,
 794                                       int start)
 795{
 796        struct scatterlist *sg;
 797        unsigned int i;
 798        int ret;
 799
 800        dev->in_sg = rctx->in_sg;
 801
 802        dev->nb_in_sg = sg_nents_for_len(dev->in_sg, rctx->total);
 803        if (dev->nb_in_sg < 0) {
 804                dev_err(dev->device, "Invalid numbers of src SG.\n");
 805                return dev->nb_in_sg;
 806        }
 807        if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) {
 808                dev_err(dev->device, "not enough hw links (%d)\n",
 809                        dev->nb_in_sg + dev->nb_out_sg);
 810                return -EINVAL;
 811        }
 812
 813        sg = dev->in_sg;
 814        ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg, DMA_TO_DEVICE);
 815        if (!ret)
 816                return -EFAULT;
 817
 818        for (i = start; i < dev->nb_in_sg + start; i++) {
 819                dev->hw_link[i]->len = sg->length;
 820                dev->hw_link[i]->p = sg->dma_address;
 821                if (i == (dev->nb_in_sg + start - 1)) {
 822                        dev->hw_link[i]->next = 0;
 823                } else {
 824                        dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
 825                        sg = sg_next(sg);
 826                }
 827        }
 828
 829        return i;
 830}
 831
 832static int sahara_sha_hw_data_descriptor_create(struct sahara_dev *dev,
 833                                                struct sahara_sha_reqctx *rctx,
 834                                                struct ahash_request *req,
 835                                                int index)
 836{
 837        unsigned result_len;
 838        int i = index;
 839
 840        if (rctx->first)
 841                /* Create initial descriptor: #8*/
 842                dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
 843        else
 844                /* Create hash descriptor: #10. Must follow #6. */
 845                dev->hw_desc[index]->hdr = SAHARA_HDR_MDHA_HASH;
 846
 847        dev->hw_desc[index]->len1 = rctx->total;
 848        if (dev->hw_desc[index]->len1 == 0) {
 849                /* if len1 is 0, p1 must be 0, too */
 850                dev->hw_desc[index]->p1 = 0;
 851                rctx->sg_in_idx = 0;
 852        } else {
 853                /* Create input links */
 854                dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
 855                i = sahara_sha_hw_links_create(dev, rctx, index);
 856
 857                rctx->sg_in_idx = index;
 858                if (i < 0)
 859                        return i;
 860        }
 861
 862        dev->hw_desc[index]->p2 = dev->hw_phys_link[i];
 863
 864        /* Save the context for the next operation */
 865        result_len = rctx->context_size;
 866        dev->hw_link[i]->p = dev->context_phys_base;
 867
 868        dev->hw_link[i]->len = result_len;
 869        dev->hw_desc[index]->len2 = result_len;
 870
 871        dev->hw_link[i]->next = 0;
 872
 873        return 0;
 874}
 875
 876/*
 877 * Load descriptor aka #6
 878 *
 879 * To load a previously saved context back to the MDHA unit
 880 *
 881 * p1: Saved Context
 882 * p2: NULL
 883 *
 884 */
 885static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev,
 886                                                struct sahara_sha_reqctx *rctx,
 887                                                struct ahash_request *req,
 888                                                int index)
 889{
 890        dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
 891
 892        dev->hw_desc[index]->len1 = rctx->context_size;
 893        dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
 894        dev->hw_desc[index]->len2 = 0;
 895        dev->hw_desc[index]->p2 = 0;
 896
 897        dev->hw_link[index]->len = rctx->context_size;
 898        dev->hw_link[index]->p = dev->context_phys_base;
 899        dev->hw_link[index]->next = 0;
 900
 901        return 0;
 902}
 903
 904static int sahara_walk_and_recalc(struct scatterlist *sg, unsigned int nbytes)
 905{
 906        if (!sg || !sg->length)
 907                return nbytes;
 908
 909        while (nbytes && sg) {
 910                if (nbytes <= sg->length) {
 911                        sg->length = nbytes;
 912                        sg_mark_end(sg);
 913                        break;
 914                }
 915                nbytes -= sg->length;
 916                sg = sg_next(sg);
 917        }
 918
 919        return nbytes;
 920}
 921
 922static int sahara_sha_prepare_request(struct ahash_request *req)
 923{
 924        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 925        struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
 926        unsigned int hash_later;
 927        unsigned int block_size;
 928        unsigned int len;
 929
 930        block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
 931
 932        /* append bytes from previous operation */
 933        len = rctx->buf_cnt + req->nbytes;
 934
 935        /* only the last transfer can be padded in hardware */
 936        if (!rctx->last && (len < block_size)) {
 937                /* to few data, save for next operation */
 938                scatterwalk_map_and_copy(rctx->buf + rctx->buf_cnt, req->src,
 939                                         0, req->nbytes, 0);
 940                rctx->buf_cnt += req->nbytes;
 941
 942                return 0;
 943        }
 944
 945        /* add data from previous operation first */
 946        if (rctx->buf_cnt)
 947                memcpy(rctx->rembuf, rctx->buf, rctx->buf_cnt);
 948
 949        /* data must always be a multiple of block_size */
 950        hash_later = rctx->last ? 0 : len & (block_size - 1);
 951        if (hash_later) {
 952                unsigned int offset = req->nbytes - hash_later;
 953                /* Save remaining bytes for later use */
 954                scatterwalk_map_and_copy(rctx->buf, req->src, offset,
 955                                        hash_later, 0);
 956        }
 957
 958        /* nbytes should now be multiple of blocksize */
 959        req->nbytes = req->nbytes - hash_later;
 960
 961        sahara_walk_and_recalc(req->src, req->nbytes);
 962
 963        /* have data from previous operation and current */
 964        if (rctx->buf_cnt && req->nbytes) {
 965                sg_init_table(rctx->in_sg_chain, 2);
 966                sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
 967
 968                sg_chain(rctx->in_sg_chain, 2, req->src);
 969
 970                rctx->total = req->nbytes + rctx->buf_cnt;
 971                rctx->in_sg = rctx->in_sg_chain;
 972
 973                req->src = rctx->in_sg_chain;
 974        /* only data from previous operation */
 975        } else if (rctx->buf_cnt) {
 976                if (req->src)
 977                        rctx->in_sg = req->src;
 978                else
 979                        rctx->in_sg = rctx->in_sg_chain;
 980                /* buf was copied into rembuf above */
 981                sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt);
 982                rctx->total = rctx->buf_cnt;
 983        /* no data from previous operation */
 984        } else {
 985                rctx->in_sg = req->src;
 986                rctx->total = req->nbytes;
 987                req->src = rctx->in_sg;
 988        }
 989
 990        /* on next call, we only have the remaining data in the buffer */
 991        rctx->buf_cnt = hash_later;
 992
 993        return -EINPROGRESS;
 994}
 995
 996static int sahara_sha_process(struct ahash_request *req)
 997{
 998        struct sahara_dev *dev = dev_ptr;
 999        struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1000        int ret;
1001        unsigned long timeout;
1002
1003        ret = sahara_sha_prepare_request(req);
1004        if (!ret)
1005                return ret;
1006
1007        if (rctx->first) {
1008                sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
1009                dev->hw_desc[0]->next = 0;
1010                rctx->first = 0;
1011        } else {
1012                memcpy(dev->context_base, rctx->context, rctx->context_size);
1013
1014                sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0);
1015                dev->hw_desc[0]->next = dev->hw_phys_desc[1];
1016                sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
1017                dev->hw_desc[1]->next = 0;
1018        }
1019
1020        sahara_dump_descriptors(dev);
1021        sahara_dump_links(dev);
1022
1023        reinit_completion(&dev->dma_completion);
1024
1025        sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
1026
1027        timeout = wait_for_completion_timeout(&dev->dma_completion,
1028                                msecs_to_jiffies(SAHARA_TIMEOUT_MS));
1029        if (!timeout) {
1030                dev_err(dev->device, "SHA timeout\n");
1031                return -ETIMEDOUT;
1032        }
1033
1034        if (rctx->sg_in_idx)
1035                dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
1036                             DMA_TO_DEVICE);
1037
1038        memcpy(rctx->context, dev->context_base, rctx->context_size);
1039
1040        if (req->result)
1041                memcpy(req->result, rctx->context, rctx->digest_size);
1042
1043        return 0;
1044}
1045
1046static int sahara_queue_manage(void *data)
1047{
1048        struct sahara_dev *dev = (struct sahara_dev *)data;
1049        struct crypto_async_request *async_req;
1050        struct crypto_async_request *backlog;
1051        int ret = 0;
1052
1053        do {
1054                __set_current_state(TASK_INTERRUPTIBLE);
1055
1056                mutex_lock(&dev->queue_mutex);
1057                backlog = crypto_get_backlog(&dev->queue);
1058                async_req = crypto_dequeue_request(&dev->queue);
1059                mutex_unlock(&dev->queue_mutex);
1060
1061                if (backlog)
1062                        backlog->complete(backlog, -EINPROGRESS);
1063
1064                if (async_req) {
1065                        if (crypto_tfm_alg_type(async_req->tfm) ==
1066                            CRYPTO_ALG_TYPE_AHASH) {
1067                                struct ahash_request *req =
1068                                        ahash_request_cast(async_req);
1069
1070                                ret = sahara_sha_process(req);
1071                        } else {
1072                                struct ablkcipher_request *req =
1073                                        ablkcipher_request_cast(async_req);
1074
1075                                ret = sahara_aes_process(req);
1076                        }
1077
1078                        async_req->complete(async_req, ret);
1079
1080                        continue;
1081                }
1082
1083                schedule();
1084        } while (!kthread_should_stop());
1085
1086        return 0;
1087}
1088
1089static int sahara_sha_enqueue(struct ahash_request *req, int last)
1090{
1091        struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1092        struct sahara_dev *dev = dev_ptr;
1093        int ret;
1094
1095        if (!req->nbytes && !last)
1096                return 0;
1097
1098        rctx->last = last;
1099
1100        if (!rctx->active) {
1101                rctx->active = 1;
1102                rctx->first = 1;
1103        }
1104
1105        mutex_lock(&dev->queue_mutex);
1106        ret = crypto_enqueue_request(&dev->queue, &req->base);
1107        mutex_unlock(&dev->queue_mutex);
1108
1109        wake_up_process(dev->kthread);
1110
1111        return ret;
1112}
1113
1114static int sahara_sha_init(struct ahash_request *req)
1115{
1116        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1117        struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1118
1119        memset(rctx, 0, sizeof(*rctx));
1120
1121        switch (crypto_ahash_digestsize(tfm)) {
1122        case SHA1_DIGEST_SIZE:
1123                rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA1;
1124                rctx->digest_size = SHA1_DIGEST_SIZE;
1125                break;
1126        case SHA256_DIGEST_SIZE:
1127                rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA256;
1128                rctx->digest_size = SHA256_DIGEST_SIZE;
1129                break;
1130        default:
1131                return -EINVAL;
1132        }
1133
1134        rctx->context_size = rctx->digest_size + 4;
1135        rctx->active = 0;
1136
1137        return 0;
1138}
1139
1140static int sahara_sha_update(struct ahash_request *req)
1141{
1142        return sahara_sha_enqueue(req, 0);
1143}
1144
1145static int sahara_sha_final(struct ahash_request *req)
1146{
1147        req->nbytes = 0;
1148        return sahara_sha_enqueue(req, 1);
1149}
1150
1151static int sahara_sha_finup(struct ahash_request *req)
1152{
1153        return sahara_sha_enqueue(req, 1);
1154}
1155
1156static int sahara_sha_digest(struct ahash_request *req)
1157{
1158        sahara_sha_init(req);
1159
1160        return sahara_sha_finup(req);
1161}
1162
1163static int sahara_sha_export(struct ahash_request *req, void *out)
1164{
1165        struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1166
1167        memcpy(out, rctx, sizeof(struct sahara_sha_reqctx));
1168
1169        return 0;
1170}
1171
1172static int sahara_sha_import(struct ahash_request *req, const void *in)
1173{
1174        struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1175
1176        memcpy(rctx, in, sizeof(struct sahara_sha_reqctx));
1177
1178        return 0;
1179}
1180
1181static int sahara_sha_cra_init(struct crypto_tfm *tfm)
1182{
1183        const char *name = crypto_tfm_alg_name(tfm);
1184        struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
1185
1186        ctx->shash_fallback = crypto_alloc_shash(name, 0,
1187                                        CRYPTO_ALG_NEED_FALLBACK);
1188        if (IS_ERR(ctx->shash_fallback)) {
1189                pr_err("Error allocating fallback algo %s\n", name);
1190                return PTR_ERR(ctx->shash_fallback);
1191        }
1192        crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1193                                 sizeof(struct sahara_sha_reqctx) +
1194                                 SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);
1195
1196        return 0;
1197}
1198
1199static void sahara_sha_cra_exit(struct crypto_tfm *tfm)
1200{
1201        struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
1202
1203        crypto_free_shash(ctx->shash_fallback);
1204        ctx->shash_fallback = NULL;
1205}
1206
1207static struct crypto_alg aes_algs[] = {
1208{
1209        .cra_name               = "ecb(aes)",
1210        .cra_driver_name        = "sahara-ecb-aes",
1211        .cra_priority           = 300,
1212        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER |
1213                        CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1214        .cra_blocksize          = AES_BLOCK_SIZE,
1215        .cra_ctxsize            = sizeof(struct sahara_ctx),
1216        .cra_alignmask          = 0x0,
1217        .cra_type               = &crypto_ablkcipher_type,
1218        .cra_module             = THIS_MODULE,
1219        .cra_init               = sahara_aes_cra_init,
1220        .cra_exit               = sahara_aes_cra_exit,
1221        .cra_u.ablkcipher = {
1222                .min_keysize    = AES_MIN_KEY_SIZE ,
1223                .max_keysize    = AES_MAX_KEY_SIZE,
1224                .setkey         = sahara_aes_setkey,
1225                .encrypt        = sahara_aes_ecb_encrypt,
1226                .decrypt        = sahara_aes_ecb_decrypt,
1227        }
1228}, {
1229        .cra_name               = "cbc(aes)",
1230        .cra_driver_name        = "sahara-cbc-aes",
1231        .cra_priority           = 300,
1232        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER |
1233                        CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1234        .cra_blocksize          = AES_BLOCK_SIZE,
1235        .cra_ctxsize            = sizeof(struct sahara_ctx),
1236        .cra_alignmask          = 0x0,
1237        .cra_type               = &crypto_ablkcipher_type,
1238        .cra_module             = THIS_MODULE,
1239        .cra_init               = sahara_aes_cra_init,
1240        .cra_exit               = sahara_aes_cra_exit,
1241        .cra_u.ablkcipher = {
1242                .min_keysize    = AES_MIN_KEY_SIZE ,
1243                .max_keysize    = AES_MAX_KEY_SIZE,
1244                .ivsize         = AES_BLOCK_SIZE,
1245                .setkey         = sahara_aes_setkey,
1246                .encrypt        = sahara_aes_cbc_encrypt,
1247                .decrypt        = sahara_aes_cbc_decrypt,
1248        }
1249}
1250};
1251
1252static struct ahash_alg sha_v3_algs[] = {
1253{
1254        .init           = sahara_sha_init,
1255        .update         = sahara_sha_update,
1256        .final          = sahara_sha_final,
1257        .finup          = sahara_sha_finup,
1258        .digest         = sahara_sha_digest,
1259        .export         = sahara_sha_export,
1260        .import         = sahara_sha_import,
1261        .halg.digestsize        = SHA1_DIGEST_SIZE,
1262        .halg.statesize         = sizeof(struct sahara_sha_reqctx),
1263        .halg.base      = {
1264                .cra_name               = "sha1",
1265                .cra_driver_name        = "sahara-sha1",
1266                .cra_priority           = 300,
1267                .cra_flags              = CRYPTO_ALG_TYPE_AHASH |
1268                                                CRYPTO_ALG_ASYNC |
1269                                                CRYPTO_ALG_NEED_FALLBACK,
1270                .cra_blocksize          = SHA1_BLOCK_SIZE,
1271                .cra_ctxsize            = sizeof(struct sahara_ctx),
1272                .cra_alignmask          = 0,
1273                .cra_module             = THIS_MODULE,
1274                .cra_init               = sahara_sha_cra_init,
1275                .cra_exit               = sahara_sha_cra_exit,
1276        }
1277},
1278};
1279
1280static struct ahash_alg sha_v4_algs[] = {
1281{
1282        .init           = sahara_sha_init,
1283        .update         = sahara_sha_update,
1284        .final          = sahara_sha_final,
1285        .finup          = sahara_sha_finup,
1286        .digest         = sahara_sha_digest,
1287        .export         = sahara_sha_export,
1288        .import         = sahara_sha_import,
1289        .halg.digestsize        = SHA256_DIGEST_SIZE,
1290        .halg.statesize         = sizeof(struct sahara_sha_reqctx),
1291        .halg.base      = {
1292                .cra_name               = "sha256",
1293                .cra_driver_name        = "sahara-sha256",
1294                .cra_priority           = 300,
1295                .cra_flags              = CRYPTO_ALG_TYPE_AHASH |
1296                                                CRYPTO_ALG_ASYNC |
1297                                                CRYPTO_ALG_NEED_FALLBACK,
1298                .cra_blocksize          = SHA256_BLOCK_SIZE,
1299                .cra_ctxsize            = sizeof(struct sahara_ctx),
1300                .cra_alignmask          = 0,
1301                .cra_module             = THIS_MODULE,
1302                .cra_init               = sahara_sha_cra_init,
1303                .cra_exit               = sahara_sha_cra_exit,
1304        }
1305},
1306};
1307
1308static irqreturn_t sahara_irq_handler(int irq, void *data)
1309{
1310        struct sahara_dev *dev = (struct sahara_dev *)data;
1311        unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
1312        unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
1313
1314        sahara_write(dev, SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR,
1315                     SAHARA_REG_CMD);
1316
1317        sahara_decode_status(dev, stat);
1318
1319        if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_BUSY) {
1320                return IRQ_NONE;
1321        } else if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_COMPLETE) {
1322                dev->error = 0;
1323        } else {
1324                sahara_decode_error(dev, err);
1325                dev->error = -EINVAL;
1326        }
1327
1328        complete(&dev->dma_completion);
1329
1330        return IRQ_HANDLED;
1331}
1332
1333
1334static int sahara_register_algs(struct sahara_dev *dev)
1335{
1336        int err;
1337        unsigned int i, j, k, l;
1338
1339        for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
1340                INIT_LIST_HEAD(&aes_algs[i].cra_list);
1341                err = crypto_register_alg(&aes_algs[i]);
1342                if (err)
1343                        goto err_aes_algs;
1344        }
1345
1346        for (k = 0; k < ARRAY_SIZE(sha_v3_algs); k++) {
1347                err = crypto_register_ahash(&sha_v3_algs[k]);
1348                if (err)
1349                        goto err_sha_v3_algs;
1350        }
1351
1352        if (dev->version > SAHARA_VERSION_3)
1353                for (l = 0; l < ARRAY_SIZE(sha_v4_algs); l++) {
1354                        err = crypto_register_ahash(&sha_v4_algs[l]);
1355                        if (err)
1356                                goto err_sha_v4_algs;
1357                }
1358
1359        return 0;
1360
1361err_sha_v4_algs:
1362        for (j = 0; j < l; j++)
1363                crypto_unregister_ahash(&sha_v4_algs[j]);
1364
1365err_sha_v3_algs:
1366        for (j = 0; j < k; j++)
1367                crypto_unregister_ahash(&sha_v4_algs[j]);
1368
1369err_aes_algs:
1370        for (j = 0; j < i; j++)
1371                crypto_unregister_alg(&aes_algs[j]);
1372
1373        return err;
1374}
1375
1376static void sahara_unregister_algs(struct sahara_dev *dev)
1377{
1378        unsigned int i;
1379
1380        for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1381                crypto_unregister_alg(&aes_algs[i]);
1382
1383        for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
1384                crypto_unregister_ahash(&sha_v3_algs[i]);
1385
1386        if (dev->version > SAHARA_VERSION_3)
1387                for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
1388                        crypto_unregister_ahash(&sha_v4_algs[i]);
1389}
1390
1391static struct platform_device_id sahara_platform_ids[] = {
1392        { .name = "sahara-imx27" },
1393        { /* sentinel */ }
1394};
1395MODULE_DEVICE_TABLE(platform, sahara_platform_ids);
1396
1397static struct of_device_id sahara_dt_ids[] = {
1398        { .compatible = "fsl,imx53-sahara" },
1399        { .compatible = "fsl,imx27-sahara" },
1400        { /* sentinel */ }
1401};
1402MODULE_DEVICE_TABLE(of, sahara_dt_ids);
1403
1404static int sahara_probe(struct platform_device *pdev)
1405{
1406        struct sahara_dev *dev;
1407        struct resource *res;
1408        u32 version;
1409        int irq;
1410        int err;
1411        int i;
1412
1413        dev = devm_kzalloc(&pdev->dev, sizeof(struct sahara_dev), GFP_KERNEL);
1414        if (dev == NULL) {
1415                dev_err(&pdev->dev, "unable to alloc data struct.\n");
1416                return -ENOMEM;
1417        }
1418
1419        dev->device = &pdev->dev;
1420        platform_set_drvdata(pdev, dev);
1421
1422        /* Get the base address */
1423        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1424        dev->regs_base = devm_ioremap_resource(&pdev->dev, res);
1425        if (IS_ERR(dev->regs_base))
1426                return PTR_ERR(dev->regs_base);
1427
1428        /* Get the IRQ */
1429        irq = platform_get_irq(pdev,  0);
1430        if (irq < 0) {
1431                dev_err(&pdev->dev, "failed to get irq resource\n");
1432                return irq;
1433        }
1434
1435        err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
1436                               0, dev_name(&pdev->dev), dev);
1437        if (err) {
1438                dev_err(&pdev->dev, "failed to request irq\n");
1439                return err;
1440        }
1441
1442        /* clocks */
1443        dev->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1444        if (IS_ERR(dev->clk_ipg)) {
1445                dev_err(&pdev->dev, "Could not get ipg clock\n");
1446                return PTR_ERR(dev->clk_ipg);
1447        }
1448
1449        dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
1450        if (IS_ERR(dev->clk_ahb)) {
1451                dev_err(&pdev->dev, "Could not get ahb clock\n");
1452                return PTR_ERR(dev->clk_ahb);
1453        }
1454
1455        /* Allocate HW descriptors */
1456        dev->hw_desc[0] = dmam_alloc_coherent(&pdev->dev,
1457                        SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
1458                        &dev->hw_phys_desc[0], GFP_KERNEL);
1459        if (!dev->hw_desc[0]) {
1460                dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
1461                return -ENOMEM;
1462        }
1463        dev->hw_desc[1] = dev->hw_desc[0] + 1;
1464        dev->hw_phys_desc[1] = dev->hw_phys_desc[0] +
1465                                sizeof(struct sahara_hw_desc);
1466
1467        /* Allocate space for iv and key */
1468        dev->key_base = dmam_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
1469                                &dev->key_phys_base, GFP_KERNEL);
1470        if (!dev->key_base) {
1471                dev_err(&pdev->dev, "Could not allocate memory for key\n");
1472                return -ENOMEM;
1473        }
1474        dev->iv_base = dev->key_base + AES_KEYSIZE_128;
1475        dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
1476
1477        /* Allocate space for context: largest digest + message length field */
1478        dev->context_base = dmam_alloc_coherent(&pdev->dev,
1479                                        SHA256_DIGEST_SIZE + 4,
1480                                        &dev->context_phys_base, GFP_KERNEL);
1481        if (!dev->context_base) {
1482                dev_err(&pdev->dev, "Could not allocate memory for MDHA context\n");
1483                return -ENOMEM;
1484        }
1485
1486        /* Allocate space for HW links */
1487        dev->hw_link[0] = dmam_alloc_coherent(&pdev->dev,
1488                        SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
1489                        &dev->hw_phys_link[0], GFP_KERNEL);
1490        if (!dev->hw_link[0]) {
1491                dev_err(&pdev->dev, "Could not allocate hw links\n");
1492                return -ENOMEM;
1493        }
1494        for (i = 1; i < SAHARA_MAX_HW_LINK; i++) {
1495                dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] +
1496                                        sizeof(struct sahara_hw_link);
1497                dev->hw_link[i] = dev->hw_link[i - 1] + 1;
1498        }
1499
1500        crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
1501
1502        spin_lock_init(&dev->lock);
1503        mutex_init(&dev->queue_mutex);
1504
1505        dev_ptr = dev;
1506
1507        dev->kthread = kthread_run(sahara_queue_manage, dev, "sahara_crypto");
1508        if (IS_ERR(dev->kthread)) {
1509                return PTR_ERR(dev->kthread);
1510        }
1511
1512        init_completion(&dev->dma_completion);
1513
1514        err = clk_prepare_enable(dev->clk_ipg);
1515        if (err)
1516                return err;
1517        err = clk_prepare_enable(dev->clk_ahb);
1518        if (err)
1519                goto clk_ipg_disable;
1520
1521        version = sahara_read(dev, SAHARA_REG_VERSION);
1522        if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx27-sahara")) {
1523                if (version != SAHARA_VERSION_3)
1524                        err = -ENODEV;
1525        } else if (of_device_is_compatible(pdev->dev.of_node,
1526                        "fsl,imx53-sahara")) {
1527                if (((version >> 8) & 0xff) != SAHARA_VERSION_4)
1528                        err = -ENODEV;
1529                version = (version >> 8) & 0xff;
1530        }
1531        if (err == -ENODEV) {
1532                dev_err(&pdev->dev, "SAHARA version %d not supported\n",
1533                                version);
1534                goto err_algs;
1535        }
1536
1537        dev->version = version;
1538
1539        sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH,
1540                     SAHARA_REG_CMD);
1541        sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) |
1542                        SAHARA_CONTROL_SET_MAXBURST(8) |
1543                        SAHARA_CONTROL_RNG_AUTORSD |
1544                        SAHARA_CONTROL_ENABLE_INT,
1545                        SAHARA_REG_CONTROL);
1546
1547        err = sahara_register_algs(dev);
1548        if (err)
1549                goto err_algs;
1550
1551        dev_info(&pdev->dev, "SAHARA version %d initialized\n", version);
1552
1553        return 0;
1554
1555err_algs:
1556        kthread_stop(dev->kthread);
1557        dev_ptr = NULL;
1558        clk_disable_unprepare(dev->clk_ahb);
1559clk_ipg_disable:
1560        clk_disable_unprepare(dev->clk_ipg);
1561
1562        return err;
1563}
1564
1565static int sahara_remove(struct platform_device *pdev)
1566{
1567        struct sahara_dev *dev = platform_get_drvdata(pdev);
1568
1569        kthread_stop(dev->kthread);
1570
1571        sahara_unregister_algs(dev);
1572
1573        clk_disable_unprepare(dev->clk_ipg);
1574        clk_disable_unprepare(dev->clk_ahb);
1575
1576        dev_ptr = NULL;
1577
1578        return 0;
1579}
1580
1581static struct platform_driver sahara_driver = {
1582        .probe          = sahara_probe,
1583        .remove         = sahara_remove,
1584        .driver         = {
1585                .name   = SAHARA_NAME,
1586                .of_match_table = sahara_dt_ids,
1587        },
1588        .id_table = sahara_platform_ids,
1589};
1590
1591module_platform_driver(sahara_driver);
1592
1593MODULE_LICENSE("GPL");
1594MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
1595MODULE_AUTHOR("Steffen Trumtrar <s.trumtrar@pengutronix.de>");
1596MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator");
1597