linux/drivers/crypto/sahara.c
<<
>>
Prefs
   1/*
   2 * Cryptographic API.
   3 *
   4 * Support for SAHARA cryptographic accelerator.
   5 *
   6 * Copyright (c) 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de>
   7 * Copyright (c) 2013 Vista Silicon S.L.
   8 * Author: Javier Martin <javier.martin@vista-silicon.com>
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License version 2 as published
  12 * by the Free Software Foundation.
  13 *
  14 * Based on omap-aes.c and tegra-aes.c
  15 */
  16
  17#include <crypto/aes.h>
  18#include <crypto/internal/hash.h>
  19#include <crypto/internal/skcipher.h>
  20#include <crypto/scatterwalk.h>
  21#include <crypto/sha.h>
  22
  23#include <linux/clk.h>
  24#include <linux/crypto.h>
  25#include <linux/interrupt.h>
  26#include <linux/io.h>
  27#include <linux/irq.h>
  28#include <linux/kernel.h>
  29#include <linux/kthread.h>
  30#include <linux/module.h>
  31#include <linux/mutex.h>
  32#include <linux/of.h>
  33#include <linux/of_device.h>
  34#include <linux/platform_device.h>
  35
  36#define SHA_BUFFER_LEN          PAGE_SIZE
  37#define SAHARA_MAX_SHA_BLOCK_SIZE       SHA256_BLOCK_SIZE
  38
  39#define SAHARA_NAME "sahara"
  40#define SAHARA_VERSION_3        3
  41#define SAHARA_VERSION_4        4
  42#define SAHARA_TIMEOUT_MS       1000
  43#define SAHARA_MAX_HW_DESC      2
  44#define SAHARA_MAX_HW_LINK      20
  45
  46#define FLAGS_MODE_MASK         0x000f
  47#define FLAGS_ENCRYPT           BIT(0)
  48#define FLAGS_CBC               BIT(1)
  49#define FLAGS_NEW_KEY           BIT(3)
  50
  51#define SAHARA_HDR_BASE                 0x00800000
  52#define SAHARA_HDR_SKHA_ALG_AES 0
  53#define SAHARA_HDR_SKHA_OP_ENC          (1 << 2)
  54#define SAHARA_HDR_SKHA_MODE_ECB        (0 << 3)
  55#define SAHARA_HDR_SKHA_MODE_CBC        (1 << 3)
  56#define SAHARA_HDR_FORM_DATA            (5 << 16)
  57#define SAHARA_HDR_FORM_KEY             (8 << 16)
  58#define SAHARA_HDR_LLO                  (1 << 24)
  59#define SAHARA_HDR_CHA_SKHA             (1 << 28)
  60#define SAHARA_HDR_CHA_MDHA             (2 << 28)
  61#define SAHARA_HDR_PARITY_BIT           (1 << 31)
  62
  63#define SAHARA_HDR_MDHA_SET_MODE_MD_KEY 0x20880000
  64#define SAHARA_HDR_MDHA_SET_MODE_HASH   0x208D0000
  65#define SAHARA_HDR_MDHA_HASH            0xA0850000
  66#define SAHARA_HDR_MDHA_STORE_DIGEST    0x20820000
  67#define SAHARA_HDR_MDHA_ALG_SHA1        0
  68#define SAHARA_HDR_MDHA_ALG_MD5         1
  69#define SAHARA_HDR_MDHA_ALG_SHA256      2
  70#define SAHARA_HDR_MDHA_ALG_SHA224      3
  71#define SAHARA_HDR_MDHA_PDATA           (1 << 2)
  72#define SAHARA_HDR_MDHA_HMAC            (1 << 3)
  73#define SAHARA_HDR_MDHA_INIT            (1 << 5)
  74#define SAHARA_HDR_MDHA_IPAD            (1 << 6)
  75#define SAHARA_HDR_MDHA_OPAD            (1 << 7)
  76#define SAHARA_HDR_MDHA_SWAP            (1 << 8)
  77#define SAHARA_HDR_MDHA_MAC_FULL        (1 << 9)
  78#define SAHARA_HDR_MDHA_SSL             (1 << 10)
  79
  80/* SAHARA can only process one request at a time */
  81#define SAHARA_QUEUE_LENGTH     1
  82
  83#define SAHARA_REG_VERSION      0x00
  84#define SAHARA_REG_DAR          0x04
  85#define SAHARA_REG_CONTROL      0x08
  86#define         SAHARA_CONTROL_SET_THROTTLE(x)  (((x) & 0xff) << 24)
  87#define         SAHARA_CONTROL_SET_MAXBURST(x)  (((x) & 0xff) << 16)
  88#define         SAHARA_CONTROL_RNG_AUTORSD      (1 << 7)
  89#define         SAHARA_CONTROL_ENABLE_INT       (1 << 4)
  90#define SAHARA_REG_CMD          0x0C
  91#define         SAHARA_CMD_RESET                (1 << 0)
  92#define         SAHARA_CMD_CLEAR_INT            (1 << 8)
  93#define         SAHARA_CMD_CLEAR_ERR            (1 << 9)
  94#define         SAHARA_CMD_SINGLE_STEP          (1 << 10)
  95#define         SAHARA_CMD_MODE_BATCH           (1 << 16)
  96#define         SAHARA_CMD_MODE_DEBUG           (1 << 18)
  97#define SAHARA_REG_STATUS       0x10
  98#define         SAHARA_STATUS_GET_STATE(x)      ((x) & 0x7)
  99#define                 SAHARA_STATE_IDLE       0
 100#define                 SAHARA_STATE_BUSY       1
 101#define                 SAHARA_STATE_ERR        2
 102#define                 SAHARA_STATE_FAULT      3
 103#define                 SAHARA_STATE_COMPLETE   4
 104#define                 SAHARA_STATE_COMP_FLAG  (1 << 2)
 105#define         SAHARA_STATUS_DAR_FULL          (1 << 3)
 106#define         SAHARA_STATUS_ERROR             (1 << 4)
 107#define         SAHARA_STATUS_SECURE            (1 << 5)
 108#define         SAHARA_STATUS_FAIL              (1 << 6)
 109#define         SAHARA_STATUS_INIT              (1 << 7)
 110#define         SAHARA_STATUS_RNG_RESEED        (1 << 8)
 111#define         SAHARA_STATUS_ACTIVE_RNG        (1 << 9)
 112#define         SAHARA_STATUS_ACTIVE_MDHA       (1 << 10)
 113#define         SAHARA_STATUS_ACTIVE_SKHA       (1 << 11)
 114#define         SAHARA_STATUS_MODE_BATCH        (1 << 16)
 115#define         SAHARA_STATUS_MODE_DEDICATED    (1 << 17)
 116#define         SAHARA_STATUS_MODE_DEBUG        (1 << 18)
 117#define         SAHARA_STATUS_GET_ISTATE(x)     (((x) >> 24) & 0xff)
 118#define SAHARA_REG_ERRSTATUS    0x14
 119#define         SAHARA_ERRSTATUS_GET_SOURCE(x)  ((x) & 0xf)
 120#define                 SAHARA_ERRSOURCE_CHA    14
 121#define                 SAHARA_ERRSOURCE_DMA    15
 122#define         SAHARA_ERRSTATUS_DMA_DIR        (1 << 8)
 123#define         SAHARA_ERRSTATUS_GET_DMASZ(x)(((x) >> 9) & 0x3)
 124#define         SAHARA_ERRSTATUS_GET_DMASRC(x) (((x) >> 13) & 0x7)
 125#define         SAHARA_ERRSTATUS_GET_CHASRC(x)  (((x) >> 16) & 0xfff)
 126#define         SAHARA_ERRSTATUS_GET_CHAERR(x)  (((x) >> 28) & 0x3)
 127#define SAHARA_REG_FADDR        0x18
 128#define SAHARA_REG_CDAR         0x1C
 129#define SAHARA_REG_IDAR         0x20
 130
 131struct sahara_hw_desc {
 132        u32     hdr;
 133        u32     len1;
 134        u32     p1;
 135        u32     len2;
 136        u32     p2;
 137        u32     next;
 138};
 139
 140struct sahara_hw_link {
 141        u32     len;
 142        u32     p;
 143        u32     next;
 144};
 145
 146struct sahara_ctx {
 147        unsigned long flags;
 148
 149        /* AES-specific context */
 150        int keylen;
 151        u8 key[AES_KEYSIZE_128];
 152        struct crypto_skcipher *fallback;
 153};
 154
 155struct sahara_aes_reqctx {
 156        unsigned long mode;
 157};
 158
 159/*
 160 * struct sahara_sha_reqctx - private data per request
 161 * @buf: holds data for requests smaller than block_size
 162 * @rembuf: used to prepare one block_size-aligned request
 163 * @context: hw-specific context for request. Digest is extracted from this
 164 * @mode: specifies what type of hw-descriptor needs to be built
 165 * @digest_size: length of digest for this request
 166 * @context_size: length of hw-context for this request.
 167 *                Always digest_size + 4
 168 * @buf_cnt: number of bytes saved in buf
 169 * @sg_in_idx: number of hw links
 170 * @in_sg: scatterlist for input data
 171 * @in_sg_chain: scatterlists for chained input data
 172 * @total: total number of bytes for transfer
 173 * @last: is this the last block
 174 * @first: is this the first block
 175 * @active: inside a transfer
 176 */
 177struct sahara_sha_reqctx {
 178        u8                      buf[SAHARA_MAX_SHA_BLOCK_SIZE];
 179        u8                      rembuf[SAHARA_MAX_SHA_BLOCK_SIZE];
 180        u8                      context[SHA256_DIGEST_SIZE + 4];
 181        unsigned int            mode;
 182        unsigned int            digest_size;
 183        unsigned int            context_size;
 184        unsigned int            buf_cnt;
 185        unsigned int            sg_in_idx;
 186        struct scatterlist      *in_sg;
 187        struct scatterlist      in_sg_chain[2];
 188        size_t                  total;
 189        unsigned int            last;
 190        unsigned int            first;
 191        unsigned int            active;
 192};
 193
 194struct sahara_dev {
 195        struct device           *device;
 196        unsigned int            version;
 197        void __iomem            *regs_base;
 198        struct clk              *clk_ipg;
 199        struct clk              *clk_ahb;
 200        struct mutex            queue_mutex;
 201        struct task_struct      *kthread;
 202        struct completion       dma_completion;
 203
 204        struct sahara_ctx       *ctx;
 205        spinlock_t              lock;
 206        struct crypto_queue     queue;
 207        unsigned long           flags;
 208
 209        struct sahara_hw_desc   *hw_desc[SAHARA_MAX_HW_DESC];
 210        dma_addr_t              hw_phys_desc[SAHARA_MAX_HW_DESC];
 211
 212        u8                      *key_base;
 213        dma_addr_t              key_phys_base;
 214
 215        u8                      *iv_base;
 216        dma_addr_t              iv_phys_base;
 217
 218        u8                      *context_base;
 219        dma_addr_t              context_phys_base;
 220
 221        struct sahara_hw_link   *hw_link[SAHARA_MAX_HW_LINK];
 222        dma_addr_t              hw_phys_link[SAHARA_MAX_HW_LINK];
 223
 224        size_t                  total;
 225        struct scatterlist      *in_sg;
 226        int             nb_in_sg;
 227        struct scatterlist      *out_sg;
 228        int             nb_out_sg;
 229
 230        u32                     error;
 231};
 232
 233static struct sahara_dev *dev_ptr;
 234
 235static inline void sahara_write(struct sahara_dev *dev, u32 data, u32 reg)
 236{
 237        writel(data, dev->regs_base + reg);
 238}
 239
 240static inline unsigned int sahara_read(struct sahara_dev *dev, u32 reg)
 241{
 242        return readl(dev->regs_base + reg);
 243}
 244
 245static u32 sahara_aes_key_hdr(struct sahara_dev *dev)
 246{
 247        u32 hdr = SAHARA_HDR_BASE | SAHARA_HDR_SKHA_ALG_AES |
 248                        SAHARA_HDR_FORM_KEY | SAHARA_HDR_LLO |
 249                        SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
 250
 251        if (dev->flags & FLAGS_CBC) {
 252                hdr |= SAHARA_HDR_SKHA_MODE_CBC;
 253                hdr ^= SAHARA_HDR_PARITY_BIT;
 254        }
 255
 256        if (dev->flags & FLAGS_ENCRYPT) {
 257                hdr |= SAHARA_HDR_SKHA_OP_ENC;
 258                hdr ^= SAHARA_HDR_PARITY_BIT;
 259        }
 260
 261        return hdr;
 262}
 263
 264static u32 sahara_aes_data_link_hdr(struct sahara_dev *dev)
 265{
 266        return SAHARA_HDR_BASE | SAHARA_HDR_FORM_DATA |
 267                        SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
 268}
 269
 270static const char *sahara_err_src[16] = {
 271        "No error",
 272        "Header error",
 273        "Descriptor length error",
 274        "Descriptor length or pointer error",
 275        "Link length error",
 276        "Link pointer error",
 277        "Input buffer error",
 278        "Output buffer error",
 279        "Output buffer starvation",
 280        "Internal state fault",
 281        "General descriptor problem",
 282        "Reserved",
 283        "Descriptor address error",
 284        "Link address error",
 285        "CHA error",
 286        "DMA error"
 287};
 288
 289static const char *sahara_err_dmasize[4] = {
 290        "Byte transfer",
 291        "Half-word transfer",
 292        "Word transfer",
 293        "Reserved"
 294};
 295
 296static const char *sahara_err_dmasrc[8] = {
 297        "No error",
 298        "AHB bus error",
 299        "Internal IP bus error",
 300        "Parity error",
 301        "DMA crosses 256 byte boundary",
 302        "DMA is busy",
 303        "Reserved",
 304        "DMA HW error"
 305};
 306
 307static const char *sahara_cha_errsrc[12] = {
 308        "Input buffer non-empty",
 309        "Illegal address",
 310        "Illegal mode",
 311        "Illegal data size",
 312        "Illegal key size",
 313        "Write during processing",
 314        "CTX read during processing",
 315        "HW error",
 316        "Input buffer disabled/underflow",
 317        "Output buffer disabled/overflow",
 318        "DES key parity error",
 319        "Reserved"
 320};
 321
 322static const char *sahara_cha_err[4] = { "No error", "SKHA", "MDHA", "RNG" };
 323
 324static void sahara_decode_error(struct sahara_dev *dev, unsigned int error)
 325{
 326        u8 source = SAHARA_ERRSTATUS_GET_SOURCE(error);
 327        u16 chasrc = ffs(SAHARA_ERRSTATUS_GET_CHASRC(error));
 328
 329        dev_err(dev->device, "%s: Error Register = 0x%08x\n", __func__, error);
 330
 331        dev_err(dev->device, "  - %s.\n", sahara_err_src[source]);
 332
 333        if (source == SAHARA_ERRSOURCE_DMA) {
 334                if (error & SAHARA_ERRSTATUS_DMA_DIR)
 335                        dev_err(dev->device, "          * DMA read.\n");
 336                else
 337                        dev_err(dev->device, "          * DMA write.\n");
 338
 339                dev_err(dev->device, "          * %s.\n",
 340                       sahara_err_dmasize[SAHARA_ERRSTATUS_GET_DMASZ(error)]);
 341                dev_err(dev->device, "          * %s.\n",
 342                       sahara_err_dmasrc[SAHARA_ERRSTATUS_GET_DMASRC(error)]);
 343        } else if (source == SAHARA_ERRSOURCE_CHA) {
 344                dev_err(dev->device, "          * %s.\n",
 345                        sahara_cha_errsrc[chasrc]);
 346                dev_err(dev->device, "          * %s.\n",
 347                       sahara_cha_err[SAHARA_ERRSTATUS_GET_CHAERR(error)]);
 348        }
 349        dev_err(dev->device, "\n");
 350}
 351
 352static const char *sahara_state[4] = { "Idle", "Busy", "Error", "HW Fault" };
 353
 354static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
 355{
 356        u8 state;
 357
 358        if (!IS_ENABLED(DEBUG))
 359                return;
 360
 361        state = SAHARA_STATUS_GET_STATE(status);
 362
 363        dev_dbg(dev->device, "%s: Status Register = 0x%08x\n",
 364                __func__, status);
 365
 366        dev_dbg(dev->device, "  - State = %d:\n", state);
 367        if (state & SAHARA_STATE_COMP_FLAG)
 368                dev_dbg(dev->device, "          * Descriptor completed. IRQ pending.\n");
 369
 370        dev_dbg(dev->device, "          * %s.\n",
 371               sahara_state[state & ~SAHARA_STATE_COMP_FLAG]);
 372
 373        if (status & SAHARA_STATUS_DAR_FULL)
 374                dev_dbg(dev->device, "  - DAR Full.\n");
 375        if (status & SAHARA_STATUS_ERROR)
 376                dev_dbg(dev->device, "  - Error.\n");
 377        if (status & SAHARA_STATUS_SECURE)
 378                dev_dbg(dev->device, "  - Secure.\n");
 379        if (status & SAHARA_STATUS_FAIL)
 380                dev_dbg(dev->device, "  - Fail.\n");
 381        if (status & SAHARA_STATUS_RNG_RESEED)
 382                dev_dbg(dev->device, "  - RNG Reseed Request.\n");
 383        if (status & SAHARA_STATUS_ACTIVE_RNG)
 384                dev_dbg(dev->device, "  - RNG Active.\n");
 385        if (status & SAHARA_STATUS_ACTIVE_MDHA)
 386                dev_dbg(dev->device, "  - MDHA Active.\n");
 387        if (status & SAHARA_STATUS_ACTIVE_SKHA)
 388                dev_dbg(dev->device, "  - SKHA Active.\n");
 389
 390        if (status & SAHARA_STATUS_MODE_BATCH)
 391                dev_dbg(dev->device, "  - Batch Mode.\n");
 392        else if (status & SAHARA_STATUS_MODE_DEDICATED)
 393                dev_dbg(dev->device, "  - Decidated Mode.\n");
 394        else if (status & SAHARA_STATUS_MODE_DEBUG)
 395                dev_dbg(dev->device, "  - Debug Mode.\n");
 396
 397        dev_dbg(dev->device, "  - Internal state = 0x%02x\n",
 398               SAHARA_STATUS_GET_ISTATE(status));
 399
 400        dev_dbg(dev->device, "Current DAR: 0x%08x\n",
 401                sahara_read(dev, SAHARA_REG_CDAR));
 402        dev_dbg(dev->device, "Initial DAR: 0x%08x\n\n",
 403                sahara_read(dev, SAHARA_REG_IDAR));
 404}
 405
 406static void sahara_dump_descriptors(struct sahara_dev *dev)
 407{
 408        int i;
 409
 410        if (!IS_ENABLED(DEBUG))
 411                return;
 412
 413        for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
 414                dev_dbg(dev->device, "Descriptor (%d) (%pad):\n",
 415                        i, &dev->hw_phys_desc[i]);
 416                dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr);
 417                dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1);
 418                dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1);
 419                dev_dbg(dev->device, "\tlen2 = %u\n", dev->hw_desc[i]->len2);
 420                dev_dbg(dev->device, "\tp2 = 0x%08x\n", dev->hw_desc[i]->p2);
 421                dev_dbg(dev->device, "\tnext = 0x%08x\n",
 422                        dev->hw_desc[i]->next);
 423        }
 424        dev_dbg(dev->device, "\n");
 425}
 426
 427static void sahara_dump_links(struct sahara_dev *dev)
 428{
 429        int i;
 430
 431        if (!IS_ENABLED(DEBUG))
 432                return;
 433
 434        for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
 435                dev_dbg(dev->device, "Link (%d) (%pad):\n",
 436                        i, &dev->hw_phys_link[i]);
 437                dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len);
 438                dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p);
 439                dev_dbg(dev->device, "\tnext = 0x%08x\n",
 440                        dev->hw_link[i]->next);
 441        }
 442        dev_dbg(dev->device, "\n");
 443}
 444
 445static int sahara_hw_descriptor_create(struct sahara_dev *dev)
 446{
 447        struct sahara_ctx *ctx = dev->ctx;
 448        struct scatterlist *sg;
 449        int ret;
 450        int i, j;
 451        int idx = 0;
 452
 453        /* Copy new key if necessary */
 454        if (ctx->flags & FLAGS_NEW_KEY) {
 455                memcpy(dev->key_base, ctx->key, ctx->keylen);
 456                ctx->flags &= ~FLAGS_NEW_KEY;
 457
 458                if (dev->flags & FLAGS_CBC) {
 459                        dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
 460                        dev->hw_desc[idx]->p1 = dev->iv_phys_base;
 461                } else {
 462                        dev->hw_desc[idx]->len1 = 0;
 463                        dev->hw_desc[idx]->p1 = 0;
 464                }
 465                dev->hw_desc[idx]->len2 = ctx->keylen;
 466                dev->hw_desc[idx]->p2 = dev->key_phys_base;
 467                dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
 468
 469                dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
 470
 471                idx++;
 472        }
 473
 474        dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total);
 475        if (dev->nb_in_sg < 0) {
 476                dev_err(dev->device, "Invalid numbers of src SG.\n");
 477                return dev->nb_in_sg;
 478        }
 479        dev->nb_out_sg = sg_nents_for_len(dev->out_sg, dev->total);
 480        if (dev->nb_out_sg < 0) {
 481                dev_err(dev->device, "Invalid numbers of dst SG.\n");
 482                return dev->nb_out_sg;
 483        }
 484        if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) {
 485                dev_err(dev->device, "not enough hw links (%d)\n",
 486                        dev->nb_in_sg + dev->nb_out_sg);
 487                return -EINVAL;
 488        }
 489
 490        ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
 491                         DMA_TO_DEVICE);
 492        if (ret != dev->nb_in_sg) {
 493                dev_err(dev->device, "couldn't map in sg\n");
 494                goto unmap_in;
 495        }
 496        ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
 497                         DMA_FROM_DEVICE);
 498        if (ret != dev->nb_out_sg) {
 499                dev_err(dev->device, "couldn't map out sg\n");
 500                goto unmap_out;
 501        }
 502
 503        /* Create input links */
 504        dev->hw_desc[idx]->p1 = dev->hw_phys_link[0];
 505        sg = dev->in_sg;
 506        for (i = 0; i < dev->nb_in_sg; i++) {
 507                dev->hw_link[i]->len = sg->length;
 508                dev->hw_link[i]->p = sg->dma_address;
 509                if (i == (dev->nb_in_sg - 1)) {
 510                        dev->hw_link[i]->next = 0;
 511                } else {
 512                        dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
 513                        sg = sg_next(sg);
 514                }
 515        }
 516
 517        /* Create output links */
 518        dev->hw_desc[idx]->p2 = dev->hw_phys_link[i];
 519        sg = dev->out_sg;
 520        for (j = i; j < dev->nb_out_sg + i; j++) {
 521                dev->hw_link[j]->len = sg->length;
 522                dev->hw_link[j]->p = sg->dma_address;
 523                if (j == (dev->nb_out_sg + i - 1)) {
 524                        dev->hw_link[j]->next = 0;
 525                } else {
 526                        dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
 527                        sg = sg_next(sg);
 528                }
 529        }
 530
 531        /* Fill remaining fields of hw_desc[1] */
 532        dev->hw_desc[idx]->hdr = sahara_aes_data_link_hdr(dev);
 533        dev->hw_desc[idx]->len1 = dev->total;
 534        dev->hw_desc[idx]->len2 = dev->total;
 535        dev->hw_desc[idx]->next = 0;
 536
 537        sahara_dump_descriptors(dev);
 538        sahara_dump_links(dev);
 539
 540        sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
 541
 542        return 0;
 543
 544unmap_out:
 545        dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
 546                DMA_TO_DEVICE);
 547unmap_in:
 548        dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
 549                DMA_FROM_DEVICE);
 550
 551        return -EINVAL;
 552}
 553
 554static int sahara_aes_process(struct ablkcipher_request *req)
 555{
 556        struct sahara_dev *dev = dev_ptr;
 557        struct sahara_ctx *ctx;
 558        struct sahara_aes_reqctx *rctx;
 559        int ret;
 560        unsigned long timeout;
 561
 562        /* Request is ready to be dispatched by the device */
 563        dev_dbg(dev->device,
 564                "dispatch request (nbytes=%d, src=%p, dst=%p)\n",
 565                req->nbytes, req->src, req->dst);
 566
 567        /* assign new request to device */
 568        dev->total = req->nbytes;
 569        dev->in_sg = req->src;
 570        dev->out_sg = req->dst;
 571
 572        rctx = ablkcipher_request_ctx(req);
 573        ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
 574        rctx->mode &= FLAGS_MODE_MASK;
 575        dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
 576
 577        if ((dev->flags & FLAGS_CBC) && req->info)
 578                memcpy(dev->iv_base, req->info, AES_KEYSIZE_128);
 579
 580        /* assign new context to device */
 581        dev->ctx = ctx;
 582
 583        reinit_completion(&dev->dma_completion);
 584
 585        ret = sahara_hw_descriptor_create(dev);
 586        if (ret)
 587                return -EINVAL;
 588
 589        timeout = wait_for_completion_timeout(&dev->dma_completion,
 590                                msecs_to_jiffies(SAHARA_TIMEOUT_MS));
 591        if (!timeout) {
 592                dev_err(dev->device, "AES timeout\n");
 593                return -ETIMEDOUT;
 594        }
 595
 596        dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
 597                DMA_TO_DEVICE);
 598        dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
 599                DMA_FROM_DEVICE);
 600
 601        return 0;
 602}
 603
 604static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
 605                             unsigned int keylen)
 606{
 607        struct sahara_ctx *ctx = crypto_ablkcipher_ctx(tfm);
 608        int ret;
 609
 610        ctx->keylen = keylen;
 611
 612        /* SAHARA only supports 128bit keys */
 613        if (keylen == AES_KEYSIZE_128) {
 614                memcpy(ctx->key, key, keylen);
 615                ctx->flags |= FLAGS_NEW_KEY;
 616                return 0;
 617        }
 618
 619        if (keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
 620                return -EINVAL;
 621
 622        /*
 623         * The requested key size is not supported by HW, do a fallback.
 624         */
 625        crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
 626        crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
 627                                                 CRYPTO_TFM_REQ_MASK);
 628
 629        ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
 630
 631        tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK;
 632        tfm->base.crt_flags |= crypto_skcipher_get_flags(ctx->fallback) &
 633                               CRYPTO_TFM_RES_MASK;
 634        return ret;
 635}
 636
 637static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
 638{
 639        struct sahara_aes_reqctx *rctx = ablkcipher_request_ctx(req);
 640        struct sahara_dev *dev = dev_ptr;
 641        int err = 0;
 642
 643        dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
 644                req->nbytes, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
 645
 646        if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
 647                dev_err(dev->device,
 648                        "request size is not exact amount of AES blocks\n");
 649                return -EINVAL;
 650        }
 651
 652        rctx->mode = mode;
 653
 654        mutex_lock(&dev->queue_mutex);
 655        err = ablkcipher_enqueue_request(&dev->queue, req);
 656        mutex_unlock(&dev->queue_mutex);
 657
 658        wake_up_process(dev->kthread);
 659
 660        return err;
 661}
 662
 663static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req)
 664{
 665        struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
 666                crypto_ablkcipher_reqtfm(req));
 667        int err;
 668
 669        if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
 670                SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
 671
 672                skcipher_request_set_tfm(subreq, ctx->fallback);
 673                skcipher_request_set_callback(subreq, req->base.flags,
 674                                              NULL, NULL);
 675                skcipher_request_set_crypt(subreq, req->src, req->dst,
 676                                           req->nbytes, req->info);
 677                err = crypto_skcipher_encrypt(subreq);
 678                skcipher_request_zero(subreq);
 679                return err;
 680        }
 681
 682        return sahara_aes_crypt(req, FLAGS_ENCRYPT);
 683}
 684
 685static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req)
 686{
 687        struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
 688                crypto_ablkcipher_reqtfm(req));
 689        int err;
 690
 691        if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
 692                SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
 693
 694                skcipher_request_set_tfm(subreq, ctx->fallback);
 695                skcipher_request_set_callback(subreq, req->base.flags,
 696                                              NULL, NULL);
 697                skcipher_request_set_crypt(subreq, req->src, req->dst,
 698                                           req->nbytes, req->info);
 699                err = crypto_skcipher_decrypt(subreq);
 700                skcipher_request_zero(subreq);
 701                return err;
 702        }
 703
 704        return sahara_aes_crypt(req, 0);
 705}
 706
 707static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req)
 708{
 709        struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
 710                crypto_ablkcipher_reqtfm(req));
 711        int err;
 712
 713        if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
 714                SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
 715
 716                skcipher_request_set_tfm(subreq, ctx->fallback);
 717                skcipher_request_set_callback(subreq, req->base.flags,
 718                                              NULL, NULL);
 719                skcipher_request_set_crypt(subreq, req->src, req->dst,
 720                                           req->nbytes, req->info);
 721                err = crypto_skcipher_encrypt(subreq);
 722                skcipher_request_zero(subreq);
 723                return err;
 724        }
 725
 726        return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
 727}
 728
 729static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req)
 730{
 731        struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
 732                crypto_ablkcipher_reqtfm(req));
 733        int err;
 734
 735        if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
 736                SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
 737
 738                skcipher_request_set_tfm(subreq, ctx->fallback);
 739                skcipher_request_set_callback(subreq, req->base.flags,
 740                                              NULL, NULL);
 741                skcipher_request_set_crypt(subreq, req->src, req->dst,
 742                                           req->nbytes, req->info);
 743                err = crypto_skcipher_decrypt(subreq);
 744                skcipher_request_zero(subreq);
 745                return err;
 746        }
 747
 748        return sahara_aes_crypt(req, FLAGS_CBC);
 749}
 750
 751static int sahara_aes_cra_init(struct crypto_tfm *tfm)
 752{
 753        const char *name = crypto_tfm_alg_name(tfm);
 754        struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
 755
 756        ctx->fallback = crypto_alloc_skcipher(name, 0,
 757                                              CRYPTO_ALG_ASYNC |
 758                                              CRYPTO_ALG_NEED_FALLBACK);
 759        if (IS_ERR(ctx->fallback)) {
 760                pr_err("Error allocating fallback algo %s\n", name);
 761                return PTR_ERR(ctx->fallback);
 762        }
 763
 764        tfm->crt_ablkcipher.reqsize = sizeof(struct sahara_aes_reqctx);
 765
 766        return 0;
 767}
 768
 769static void sahara_aes_cra_exit(struct crypto_tfm *tfm)
 770{
 771        struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
 772
 773        crypto_free_skcipher(ctx->fallback);
 774}
 775
 776static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
 777                              struct sahara_sha_reqctx *rctx)
 778{
 779        u32 hdr = 0;
 780
 781        hdr = rctx->mode;
 782
 783        if (rctx->first) {
 784                hdr |= SAHARA_HDR_MDHA_SET_MODE_HASH;
 785                hdr |= SAHARA_HDR_MDHA_INIT;
 786        } else {
 787                hdr |= SAHARA_HDR_MDHA_SET_MODE_MD_KEY;
 788        }
 789
 790        if (rctx->last)
 791                hdr |= SAHARA_HDR_MDHA_PDATA;
 792
 793        if (hweight_long(hdr) % 2 == 0)
 794                hdr |= SAHARA_HDR_PARITY_BIT;
 795
 796        return hdr;
 797}
 798
 799static int sahara_sha_hw_links_create(struct sahara_dev *dev,
 800                                       struct sahara_sha_reqctx *rctx,
 801                                       int start)
 802{
 803        struct scatterlist *sg;
 804        unsigned int i;
 805        int ret;
 806
 807        dev->in_sg = rctx->in_sg;
 808
 809        dev->nb_in_sg = sg_nents_for_len(dev->in_sg, rctx->total);
 810        if (dev->nb_in_sg < 0) {
 811                dev_err(dev->device, "Invalid numbers of src SG.\n");
 812                return dev->nb_in_sg;
 813        }
 814        if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) {
 815                dev_err(dev->device, "not enough hw links (%d)\n",
 816                        dev->nb_in_sg + dev->nb_out_sg);
 817                return -EINVAL;
 818        }
 819
 820        sg = dev->in_sg;
 821        ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg, DMA_TO_DEVICE);
 822        if (!ret)
 823                return -EFAULT;
 824
 825        for (i = start; i < dev->nb_in_sg + start; i++) {
 826                dev->hw_link[i]->len = sg->length;
 827                dev->hw_link[i]->p = sg->dma_address;
 828                if (i == (dev->nb_in_sg + start - 1)) {
 829                        dev->hw_link[i]->next = 0;
 830                } else {
 831                        dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
 832                        sg = sg_next(sg);
 833                }
 834        }
 835
 836        return i;
 837}
 838
 839static int sahara_sha_hw_data_descriptor_create(struct sahara_dev *dev,
 840                                                struct sahara_sha_reqctx *rctx,
 841                                                struct ahash_request *req,
 842                                                int index)
 843{
 844        unsigned result_len;
 845        int i = index;
 846
 847        if (rctx->first)
 848                /* Create initial descriptor: #8*/
 849                dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
 850        else
 851                /* Create hash descriptor: #10. Must follow #6. */
 852                dev->hw_desc[index]->hdr = SAHARA_HDR_MDHA_HASH;
 853
 854        dev->hw_desc[index]->len1 = rctx->total;
 855        if (dev->hw_desc[index]->len1 == 0) {
 856                /* if len1 is 0, p1 must be 0, too */
 857                dev->hw_desc[index]->p1 = 0;
 858                rctx->sg_in_idx = 0;
 859        } else {
 860                /* Create input links */
 861                dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
 862                i = sahara_sha_hw_links_create(dev, rctx, index);
 863
 864                rctx->sg_in_idx = index;
 865                if (i < 0)
 866                        return i;
 867        }
 868
 869        dev->hw_desc[index]->p2 = dev->hw_phys_link[i];
 870
 871        /* Save the context for the next operation */
 872        result_len = rctx->context_size;
 873        dev->hw_link[i]->p = dev->context_phys_base;
 874
 875        dev->hw_link[i]->len = result_len;
 876        dev->hw_desc[index]->len2 = result_len;
 877
 878        dev->hw_link[i]->next = 0;
 879
 880        return 0;
 881}
 882
 883/*
 884 * Load descriptor aka #6
 885 *
 886 * To load a previously saved context back to the MDHA unit
 887 *
 888 * p1: Saved Context
 889 * p2: NULL
 890 *
 891 */
 892static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev,
 893                                                struct sahara_sha_reqctx *rctx,
 894                                                struct ahash_request *req,
 895                                                int index)
 896{
 897        dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
 898
 899        dev->hw_desc[index]->len1 = rctx->context_size;
 900        dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
 901        dev->hw_desc[index]->len2 = 0;
 902        dev->hw_desc[index]->p2 = 0;
 903
 904        dev->hw_link[index]->len = rctx->context_size;
 905        dev->hw_link[index]->p = dev->context_phys_base;
 906        dev->hw_link[index]->next = 0;
 907
 908        return 0;
 909}
 910
 911static int sahara_walk_and_recalc(struct scatterlist *sg, unsigned int nbytes)
 912{
 913        if (!sg || !sg->length)
 914                return nbytes;
 915
 916        while (nbytes && sg) {
 917                if (nbytes <= sg->length) {
 918                        sg->length = nbytes;
 919                        sg_mark_end(sg);
 920                        break;
 921                }
 922                nbytes -= sg->length;
 923                sg = sg_next(sg);
 924        }
 925
 926        return nbytes;
 927}
 928
 929static int sahara_sha_prepare_request(struct ahash_request *req)
 930{
 931        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 932        struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
 933        unsigned int hash_later;
 934        unsigned int block_size;
 935        unsigned int len;
 936
 937        block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
 938
 939        /* append bytes from previous operation */
 940        len = rctx->buf_cnt + req->nbytes;
 941
 942        /* only the last transfer can be padded in hardware */
 943        if (!rctx->last && (len < block_size)) {
 944                /* to few data, save for next operation */
 945                scatterwalk_map_and_copy(rctx->buf + rctx->buf_cnt, req->src,
 946                                         0, req->nbytes, 0);
 947                rctx->buf_cnt += req->nbytes;
 948
 949                return 0;
 950        }
 951
 952        /* add data from previous operation first */
 953        if (rctx->buf_cnt)
 954                memcpy(rctx->rembuf, rctx->buf, rctx->buf_cnt);
 955
 956        /* data must always be a multiple of block_size */
 957        hash_later = rctx->last ? 0 : len & (block_size - 1);
 958        if (hash_later) {
 959                unsigned int offset = req->nbytes - hash_later;
 960                /* Save remaining bytes for later use */
 961                scatterwalk_map_and_copy(rctx->buf, req->src, offset,
 962                                        hash_later, 0);
 963        }
 964
 965        /* nbytes should now be multiple of blocksize */
 966        req->nbytes = req->nbytes - hash_later;
 967
 968        sahara_walk_and_recalc(req->src, req->nbytes);
 969
 970        /* have data from previous operation and current */
 971        if (rctx->buf_cnt && req->nbytes) {
 972                sg_init_table(rctx->in_sg_chain, 2);
 973                sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
 974
 975                sg_chain(rctx->in_sg_chain, 2, req->src);
 976
 977                rctx->total = req->nbytes + rctx->buf_cnt;
 978                rctx->in_sg = rctx->in_sg_chain;
 979
 980                req->src = rctx->in_sg_chain;
 981        /* only data from previous operation */
 982        } else if (rctx->buf_cnt) {
 983                if (req->src)
 984                        rctx->in_sg = req->src;
 985                else
 986                        rctx->in_sg = rctx->in_sg_chain;
 987                /* buf was copied into rembuf above */
 988                sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt);
 989                rctx->total = rctx->buf_cnt;
 990        /* no data from previous operation */
 991        } else {
 992                rctx->in_sg = req->src;
 993                rctx->total = req->nbytes;
 994                req->src = rctx->in_sg;
 995        }
 996
 997        /* on next call, we only have the remaining data in the buffer */
 998        rctx->buf_cnt = hash_later;
 999
1000        return -EINPROGRESS;
1001}
1002
1003static int sahara_sha_process(struct ahash_request *req)
1004{
1005        struct sahara_dev *dev = dev_ptr;
1006        struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1007        int ret;
1008        unsigned long timeout;
1009
1010        ret = sahara_sha_prepare_request(req);
1011        if (!ret)
1012                return ret;
1013
1014        if (rctx->first) {
1015                sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
1016                dev->hw_desc[0]->next = 0;
1017                rctx->first = 0;
1018        } else {
1019                memcpy(dev->context_base, rctx->context, rctx->context_size);
1020
1021                sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0);
1022                dev->hw_desc[0]->next = dev->hw_phys_desc[1];
1023                sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
1024                dev->hw_desc[1]->next = 0;
1025        }
1026
1027        sahara_dump_descriptors(dev);
1028        sahara_dump_links(dev);
1029
1030        reinit_completion(&dev->dma_completion);
1031
1032        sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
1033
1034        timeout = wait_for_completion_timeout(&dev->dma_completion,
1035                                msecs_to_jiffies(SAHARA_TIMEOUT_MS));
1036        if (!timeout) {
1037                dev_err(dev->device, "SHA timeout\n");
1038                return -ETIMEDOUT;
1039        }
1040
1041        if (rctx->sg_in_idx)
1042                dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
1043                             DMA_TO_DEVICE);
1044
1045        memcpy(rctx->context, dev->context_base, rctx->context_size);
1046
1047        if (req->result)
1048                memcpy(req->result, rctx->context, rctx->digest_size);
1049
1050        return 0;
1051}
1052
1053static int sahara_queue_manage(void *data)
1054{
1055        struct sahara_dev *dev = (struct sahara_dev *)data;
1056        struct crypto_async_request *async_req;
1057        struct crypto_async_request *backlog;
1058        int ret = 0;
1059
1060        do {
1061                __set_current_state(TASK_INTERRUPTIBLE);
1062
1063                mutex_lock(&dev->queue_mutex);
1064                backlog = crypto_get_backlog(&dev->queue);
1065                async_req = crypto_dequeue_request(&dev->queue);
1066                mutex_unlock(&dev->queue_mutex);
1067
1068                if (backlog)
1069                        backlog->complete(backlog, -EINPROGRESS);
1070
1071                if (async_req) {
1072                        if (crypto_tfm_alg_type(async_req->tfm) ==
1073                            CRYPTO_ALG_TYPE_AHASH) {
1074                                struct ahash_request *req =
1075                                        ahash_request_cast(async_req);
1076
1077                                ret = sahara_sha_process(req);
1078                        } else {
1079                                struct ablkcipher_request *req =
1080                                        ablkcipher_request_cast(async_req);
1081
1082                                ret = sahara_aes_process(req);
1083                        }
1084
1085                        async_req->complete(async_req, ret);
1086
1087                        continue;
1088                }
1089
1090                schedule();
1091        } while (!kthread_should_stop());
1092
1093        return 0;
1094}
1095
1096static int sahara_sha_enqueue(struct ahash_request *req, int last)
1097{
1098        struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1099        struct sahara_dev *dev = dev_ptr;
1100        int ret;
1101
1102        if (!req->nbytes && !last)
1103                return 0;
1104
1105        rctx->last = last;
1106
1107        if (!rctx->active) {
1108                rctx->active = 1;
1109                rctx->first = 1;
1110        }
1111
1112        mutex_lock(&dev->queue_mutex);
1113        ret = crypto_enqueue_request(&dev->queue, &req->base);
1114        mutex_unlock(&dev->queue_mutex);
1115
1116        wake_up_process(dev->kthread);
1117
1118        return ret;
1119}
1120
1121static int sahara_sha_init(struct ahash_request *req)
1122{
1123        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1124        struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1125
1126        memset(rctx, 0, sizeof(*rctx));
1127
1128        switch (crypto_ahash_digestsize(tfm)) {
1129        case SHA1_DIGEST_SIZE:
1130                rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA1;
1131                rctx->digest_size = SHA1_DIGEST_SIZE;
1132                break;
1133        case SHA256_DIGEST_SIZE:
1134                rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA256;
1135                rctx->digest_size = SHA256_DIGEST_SIZE;
1136                break;
1137        default:
1138                return -EINVAL;
1139        }
1140
1141        rctx->context_size = rctx->digest_size + 4;
1142        rctx->active = 0;
1143
1144        return 0;
1145}
1146
1147static int sahara_sha_update(struct ahash_request *req)
1148{
1149        return sahara_sha_enqueue(req, 0);
1150}
1151
1152static int sahara_sha_final(struct ahash_request *req)
1153{
1154        req->nbytes = 0;
1155        return sahara_sha_enqueue(req, 1);
1156}
1157
1158static int sahara_sha_finup(struct ahash_request *req)
1159{
1160        return sahara_sha_enqueue(req, 1);
1161}
1162
1163static int sahara_sha_digest(struct ahash_request *req)
1164{
1165        sahara_sha_init(req);
1166
1167        return sahara_sha_finup(req);
1168}
1169
1170static int sahara_sha_export(struct ahash_request *req, void *out)
1171{
1172        struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1173
1174        memcpy(out, rctx, sizeof(struct sahara_sha_reqctx));
1175
1176        return 0;
1177}
1178
1179static int sahara_sha_import(struct ahash_request *req, const void *in)
1180{
1181        struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1182
1183        memcpy(rctx, in, sizeof(struct sahara_sha_reqctx));
1184
1185        return 0;
1186}
1187
1188static int sahara_sha_cra_init(struct crypto_tfm *tfm)
1189{
1190        crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1191                                 sizeof(struct sahara_sha_reqctx) +
1192                                 SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);
1193
1194        return 0;
1195}
1196
1197static struct crypto_alg aes_algs[] = {
1198{
1199        .cra_name               = "ecb(aes)",
1200        .cra_driver_name        = "sahara-ecb-aes",
1201        .cra_priority           = 300,
1202        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER |
1203                        CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1204        .cra_blocksize          = AES_BLOCK_SIZE,
1205        .cra_ctxsize            = sizeof(struct sahara_ctx),
1206        .cra_alignmask          = 0x0,
1207        .cra_type               = &crypto_ablkcipher_type,
1208        .cra_module             = THIS_MODULE,
1209        .cra_init               = sahara_aes_cra_init,
1210        .cra_exit               = sahara_aes_cra_exit,
1211        .cra_u.ablkcipher = {
1212                .min_keysize    = AES_MIN_KEY_SIZE ,
1213                .max_keysize    = AES_MAX_KEY_SIZE,
1214                .setkey         = sahara_aes_setkey,
1215                .encrypt        = sahara_aes_ecb_encrypt,
1216                .decrypt        = sahara_aes_ecb_decrypt,
1217        }
1218}, {
1219        .cra_name               = "cbc(aes)",
1220        .cra_driver_name        = "sahara-cbc-aes",
1221        .cra_priority           = 300,
1222        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER |
1223                        CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1224        .cra_blocksize          = AES_BLOCK_SIZE,
1225        .cra_ctxsize            = sizeof(struct sahara_ctx),
1226        .cra_alignmask          = 0x0,
1227        .cra_type               = &crypto_ablkcipher_type,
1228        .cra_module             = THIS_MODULE,
1229        .cra_init               = sahara_aes_cra_init,
1230        .cra_exit               = sahara_aes_cra_exit,
1231        .cra_u.ablkcipher = {
1232                .min_keysize    = AES_MIN_KEY_SIZE ,
1233                .max_keysize    = AES_MAX_KEY_SIZE,
1234                .ivsize         = AES_BLOCK_SIZE,
1235                .setkey         = sahara_aes_setkey,
1236                .encrypt        = sahara_aes_cbc_encrypt,
1237                .decrypt        = sahara_aes_cbc_decrypt,
1238        }
1239}
1240};
1241
1242static struct ahash_alg sha_v3_algs[] = {
1243{
1244        .init           = sahara_sha_init,
1245        .update         = sahara_sha_update,
1246        .final          = sahara_sha_final,
1247        .finup          = sahara_sha_finup,
1248        .digest         = sahara_sha_digest,
1249        .export         = sahara_sha_export,
1250        .import         = sahara_sha_import,
1251        .halg.digestsize        = SHA1_DIGEST_SIZE,
1252        .halg.statesize         = sizeof(struct sahara_sha_reqctx),
1253        .halg.base      = {
1254                .cra_name               = "sha1",
1255                .cra_driver_name        = "sahara-sha1",
1256                .cra_priority           = 300,
1257                .cra_flags              = CRYPTO_ALG_TYPE_AHASH |
1258                                                CRYPTO_ALG_ASYNC |
1259                                                CRYPTO_ALG_NEED_FALLBACK,
1260                .cra_blocksize          = SHA1_BLOCK_SIZE,
1261                .cra_ctxsize            = sizeof(struct sahara_ctx),
1262                .cra_alignmask          = 0,
1263                .cra_module             = THIS_MODULE,
1264                .cra_init               = sahara_sha_cra_init,
1265        }
1266},
1267};
1268
1269static struct ahash_alg sha_v4_algs[] = {
1270{
1271        .init           = sahara_sha_init,
1272        .update         = sahara_sha_update,
1273        .final          = sahara_sha_final,
1274        .finup          = sahara_sha_finup,
1275        .digest         = sahara_sha_digest,
1276        .export         = sahara_sha_export,
1277        .import         = sahara_sha_import,
1278        .halg.digestsize        = SHA256_DIGEST_SIZE,
1279        .halg.statesize         = sizeof(struct sahara_sha_reqctx),
1280        .halg.base      = {
1281                .cra_name               = "sha256",
1282                .cra_driver_name        = "sahara-sha256",
1283                .cra_priority           = 300,
1284                .cra_flags              = CRYPTO_ALG_TYPE_AHASH |
1285                                                CRYPTO_ALG_ASYNC |
1286                                                CRYPTO_ALG_NEED_FALLBACK,
1287                .cra_blocksize          = SHA256_BLOCK_SIZE,
1288                .cra_ctxsize            = sizeof(struct sahara_ctx),
1289                .cra_alignmask          = 0,
1290                .cra_module             = THIS_MODULE,
1291                .cra_init               = sahara_sha_cra_init,
1292        }
1293},
1294};
1295
1296static irqreturn_t sahara_irq_handler(int irq, void *data)
1297{
1298        struct sahara_dev *dev = (struct sahara_dev *)data;
1299        unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
1300        unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
1301
1302        sahara_write(dev, SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR,
1303                     SAHARA_REG_CMD);
1304
1305        sahara_decode_status(dev, stat);
1306
1307        if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_BUSY) {
1308                return IRQ_NONE;
1309        } else if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_COMPLETE) {
1310                dev->error = 0;
1311        } else {
1312                sahara_decode_error(dev, err);
1313                dev->error = -EINVAL;
1314        }
1315
1316        complete(&dev->dma_completion);
1317
1318        return IRQ_HANDLED;
1319}
1320
1321
1322static int sahara_register_algs(struct sahara_dev *dev)
1323{
1324        int err;
1325        unsigned int i, j, k, l;
1326
1327        for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
1328                INIT_LIST_HEAD(&aes_algs[i].cra_list);
1329                err = crypto_register_alg(&aes_algs[i]);
1330                if (err)
1331                        goto err_aes_algs;
1332        }
1333
1334        for (k = 0; k < ARRAY_SIZE(sha_v3_algs); k++) {
1335                err = crypto_register_ahash(&sha_v3_algs[k]);
1336                if (err)
1337                        goto err_sha_v3_algs;
1338        }
1339
1340        if (dev->version > SAHARA_VERSION_3)
1341                for (l = 0; l < ARRAY_SIZE(sha_v4_algs); l++) {
1342                        err = crypto_register_ahash(&sha_v4_algs[l]);
1343                        if (err)
1344                                goto err_sha_v4_algs;
1345                }
1346
1347        return 0;
1348
1349err_sha_v4_algs:
1350        for (j = 0; j < l; j++)
1351                crypto_unregister_ahash(&sha_v4_algs[j]);
1352
1353err_sha_v3_algs:
1354        for (j = 0; j < k; j++)
1355                crypto_unregister_ahash(&sha_v4_algs[j]);
1356
1357err_aes_algs:
1358        for (j = 0; j < i; j++)
1359                crypto_unregister_alg(&aes_algs[j]);
1360
1361        return err;
1362}
1363
1364static void sahara_unregister_algs(struct sahara_dev *dev)
1365{
1366        unsigned int i;
1367
1368        for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1369                crypto_unregister_alg(&aes_algs[i]);
1370
1371        for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
1372                crypto_unregister_ahash(&sha_v3_algs[i]);
1373
1374        if (dev->version > SAHARA_VERSION_3)
1375                for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
1376                        crypto_unregister_ahash(&sha_v4_algs[i]);
1377}
1378
1379static struct platform_device_id sahara_platform_ids[] = {
1380        { .name = "sahara-imx27" },
1381        { /* sentinel */ }
1382};
1383MODULE_DEVICE_TABLE(platform, sahara_platform_ids);
1384
1385static struct of_device_id sahara_dt_ids[] = {
1386        { .compatible = "fsl,imx53-sahara" },
1387        { .compatible = "fsl,imx27-sahara" },
1388        { /* sentinel */ }
1389};
1390MODULE_DEVICE_TABLE(of, sahara_dt_ids);
1391
1392static int sahara_probe(struct platform_device *pdev)
1393{
1394        struct sahara_dev *dev;
1395        struct resource *res;
1396        u32 version;
1397        int irq;
1398        int err;
1399        int i;
1400
1401        dev = devm_kzalloc(&pdev->dev, sizeof(struct sahara_dev), GFP_KERNEL);
1402        if (dev == NULL) {
1403                dev_err(&pdev->dev, "unable to alloc data struct.\n");
1404                return -ENOMEM;
1405        }
1406
1407        dev->device = &pdev->dev;
1408        platform_set_drvdata(pdev, dev);
1409
1410        /* Get the base address */
1411        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1412        dev->regs_base = devm_ioremap_resource(&pdev->dev, res);
1413        if (IS_ERR(dev->regs_base))
1414                return PTR_ERR(dev->regs_base);
1415
1416        /* Get the IRQ */
1417        irq = platform_get_irq(pdev,  0);
1418        if (irq < 0) {
1419                dev_err(&pdev->dev, "failed to get irq resource\n");
1420                return irq;
1421        }
1422
1423        err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
1424                               0, dev_name(&pdev->dev), dev);
1425        if (err) {
1426                dev_err(&pdev->dev, "failed to request irq\n");
1427                return err;
1428        }
1429
1430        /* clocks */
1431        dev->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1432        if (IS_ERR(dev->clk_ipg)) {
1433                dev_err(&pdev->dev, "Could not get ipg clock\n");
1434                return PTR_ERR(dev->clk_ipg);
1435        }
1436
1437        dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
1438        if (IS_ERR(dev->clk_ahb)) {
1439                dev_err(&pdev->dev, "Could not get ahb clock\n");
1440                return PTR_ERR(dev->clk_ahb);
1441        }
1442
1443        /* Allocate HW descriptors */
1444        dev->hw_desc[0] = dmam_alloc_coherent(&pdev->dev,
1445                        SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
1446                        &dev->hw_phys_desc[0], GFP_KERNEL);
1447        if (!dev->hw_desc[0]) {
1448                dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
1449                return -ENOMEM;
1450        }
1451        dev->hw_desc[1] = dev->hw_desc[0] + 1;
1452        dev->hw_phys_desc[1] = dev->hw_phys_desc[0] +
1453                                sizeof(struct sahara_hw_desc);
1454
1455        /* Allocate space for iv and key */
1456        dev->key_base = dmam_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
1457                                &dev->key_phys_base, GFP_KERNEL);
1458        if (!dev->key_base) {
1459                dev_err(&pdev->dev, "Could not allocate memory for key\n");
1460                return -ENOMEM;
1461        }
1462        dev->iv_base = dev->key_base + AES_KEYSIZE_128;
1463        dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
1464
1465        /* Allocate space for context: largest digest + message length field */
1466        dev->context_base = dmam_alloc_coherent(&pdev->dev,
1467                                        SHA256_DIGEST_SIZE + 4,
1468                                        &dev->context_phys_base, GFP_KERNEL);
1469        if (!dev->context_base) {
1470                dev_err(&pdev->dev, "Could not allocate memory for MDHA context\n");
1471                return -ENOMEM;
1472        }
1473
1474        /* Allocate space for HW links */
1475        dev->hw_link[0] = dmam_alloc_coherent(&pdev->dev,
1476                        SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
1477                        &dev->hw_phys_link[0], GFP_KERNEL);
1478        if (!dev->hw_link[0]) {
1479                dev_err(&pdev->dev, "Could not allocate hw links\n");
1480                return -ENOMEM;
1481        }
1482        for (i = 1; i < SAHARA_MAX_HW_LINK; i++) {
1483                dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] +
1484                                        sizeof(struct sahara_hw_link);
1485                dev->hw_link[i] = dev->hw_link[i - 1] + 1;
1486        }
1487
1488        crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
1489
1490        spin_lock_init(&dev->lock);
1491        mutex_init(&dev->queue_mutex);
1492
1493        dev_ptr = dev;
1494
1495        dev->kthread = kthread_run(sahara_queue_manage, dev, "sahara_crypto");
1496        if (IS_ERR(dev->kthread)) {
1497                return PTR_ERR(dev->kthread);
1498        }
1499
1500        init_completion(&dev->dma_completion);
1501
1502        err = clk_prepare_enable(dev->clk_ipg);
1503        if (err)
1504                return err;
1505        err = clk_prepare_enable(dev->clk_ahb);
1506        if (err)
1507                goto clk_ipg_disable;
1508
1509        version = sahara_read(dev, SAHARA_REG_VERSION);
1510        if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx27-sahara")) {
1511                if (version != SAHARA_VERSION_3)
1512                        err = -ENODEV;
1513        } else if (of_device_is_compatible(pdev->dev.of_node,
1514                        "fsl,imx53-sahara")) {
1515                if (((version >> 8) & 0xff) != SAHARA_VERSION_4)
1516                        err = -ENODEV;
1517                version = (version >> 8) & 0xff;
1518        }
1519        if (err == -ENODEV) {
1520                dev_err(&pdev->dev, "SAHARA version %d not supported\n",
1521                                version);
1522                goto err_algs;
1523        }
1524
1525        dev->version = version;
1526
1527        sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH,
1528                     SAHARA_REG_CMD);
1529        sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) |
1530                        SAHARA_CONTROL_SET_MAXBURST(8) |
1531                        SAHARA_CONTROL_RNG_AUTORSD |
1532                        SAHARA_CONTROL_ENABLE_INT,
1533                        SAHARA_REG_CONTROL);
1534
1535        err = sahara_register_algs(dev);
1536        if (err)
1537                goto err_algs;
1538
1539        dev_info(&pdev->dev, "SAHARA version %d initialized\n", version);
1540
1541        return 0;
1542
1543err_algs:
1544        kthread_stop(dev->kthread);
1545        dev_ptr = NULL;
1546        clk_disable_unprepare(dev->clk_ahb);
1547clk_ipg_disable:
1548        clk_disable_unprepare(dev->clk_ipg);
1549
1550        return err;
1551}
1552
1553static int sahara_remove(struct platform_device *pdev)
1554{
1555        struct sahara_dev *dev = platform_get_drvdata(pdev);
1556
1557        kthread_stop(dev->kthread);
1558
1559        sahara_unregister_algs(dev);
1560
1561        clk_disable_unprepare(dev->clk_ipg);
1562        clk_disable_unprepare(dev->clk_ahb);
1563
1564        dev_ptr = NULL;
1565
1566        return 0;
1567}
1568
1569static struct platform_driver sahara_driver = {
1570        .probe          = sahara_probe,
1571        .remove         = sahara_remove,
1572        .driver         = {
1573                .name   = SAHARA_NAME,
1574                .of_match_table = sahara_dt_ids,
1575        },
1576        .id_table = sahara_platform_ids,
1577};
1578
1579module_platform_driver(sahara_driver);
1580
1581MODULE_LICENSE("GPL");
1582MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
1583MODULE_AUTHOR("Steffen Trumtrar <s.trumtrar@pengutronix.de>");
1584MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator");
1585