linux/drivers/crypto/sahara.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Cryptographic API.
   4 *
   5 * Support for SAHARA cryptographic accelerator.
   6 *
   7 * Copyright (c) 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de>
   8 * Copyright (c) 2013 Vista Silicon S.L.
   9 * Author: Javier Martin <javier.martin@vista-silicon.com>
  10 *
  11 * Based on omap-aes.c and tegra-aes.c
  12 */
  13
  14#include <crypto/aes.h>
  15#include <crypto/internal/hash.h>
  16#include <crypto/internal/skcipher.h>
  17#include <crypto/scatterwalk.h>
  18#include <crypto/sha.h>
  19
  20#include <linux/clk.h>
  21#include <linux/crypto.h>
  22#include <linux/interrupt.h>
  23#include <linux/io.h>
  24#include <linux/irq.h>
  25#include <linux/kernel.h>
  26#include <linux/kthread.h>
  27#include <linux/module.h>
  28#include <linux/mutex.h>
  29#include <linux/of.h>
  30#include <linux/of_device.h>
  31#include <linux/platform_device.h>
  32
  33#define SHA_BUFFER_LEN          PAGE_SIZE
  34#define SAHARA_MAX_SHA_BLOCK_SIZE       SHA256_BLOCK_SIZE
  35
  36#define SAHARA_NAME "sahara"
  37#define SAHARA_VERSION_3        3
  38#define SAHARA_VERSION_4        4
  39#define SAHARA_TIMEOUT_MS       1000
  40#define SAHARA_MAX_HW_DESC      2
  41#define SAHARA_MAX_HW_LINK      20
  42
  43#define FLAGS_MODE_MASK         0x000f
  44#define FLAGS_ENCRYPT           BIT(0)
  45#define FLAGS_CBC               BIT(1)
  46#define FLAGS_NEW_KEY           BIT(3)
  47
  48#define SAHARA_HDR_BASE                 0x00800000
  49#define SAHARA_HDR_SKHA_ALG_AES 0
  50#define SAHARA_HDR_SKHA_OP_ENC          (1 << 2)
  51#define SAHARA_HDR_SKHA_MODE_ECB        (0 << 3)
  52#define SAHARA_HDR_SKHA_MODE_CBC        (1 << 3)
  53#define SAHARA_HDR_FORM_DATA            (5 << 16)
  54#define SAHARA_HDR_FORM_KEY             (8 << 16)
  55#define SAHARA_HDR_LLO                  (1 << 24)
  56#define SAHARA_HDR_CHA_SKHA             (1 << 28)
  57#define SAHARA_HDR_CHA_MDHA             (2 << 28)
  58#define SAHARA_HDR_PARITY_BIT           (1 << 31)
  59
  60#define SAHARA_HDR_MDHA_SET_MODE_MD_KEY 0x20880000
  61#define SAHARA_HDR_MDHA_SET_MODE_HASH   0x208D0000
  62#define SAHARA_HDR_MDHA_HASH            0xA0850000
  63#define SAHARA_HDR_MDHA_STORE_DIGEST    0x20820000
  64#define SAHARA_HDR_MDHA_ALG_SHA1        0
  65#define SAHARA_HDR_MDHA_ALG_MD5         1
  66#define SAHARA_HDR_MDHA_ALG_SHA256      2
  67#define SAHARA_HDR_MDHA_ALG_SHA224      3
  68#define SAHARA_HDR_MDHA_PDATA           (1 << 2)
  69#define SAHARA_HDR_MDHA_HMAC            (1 << 3)
  70#define SAHARA_HDR_MDHA_INIT            (1 << 5)
  71#define SAHARA_HDR_MDHA_IPAD            (1 << 6)
  72#define SAHARA_HDR_MDHA_OPAD            (1 << 7)
  73#define SAHARA_HDR_MDHA_SWAP            (1 << 8)
  74#define SAHARA_HDR_MDHA_MAC_FULL        (1 << 9)
  75#define SAHARA_HDR_MDHA_SSL             (1 << 10)
  76
  77/* SAHARA can only process one request at a time */
  78#define SAHARA_QUEUE_LENGTH     1
  79
  80#define SAHARA_REG_VERSION      0x00
  81#define SAHARA_REG_DAR          0x04
  82#define SAHARA_REG_CONTROL      0x08
  83#define         SAHARA_CONTROL_SET_THROTTLE(x)  (((x) & 0xff) << 24)
  84#define         SAHARA_CONTROL_SET_MAXBURST(x)  (((x) & 0xff) << 16)
  85#define         SAHARA_CONTROL_RNG_AUTORSD      (1 << 7)
  86#define         SAHARA_CONTROL_ENABLE_INT       (1 << 4)
  87#define SAHARA_REG_CMD          0x0C
  88#define         SAHARA_CMD_RESET                (1 << 0)
  89#define         SAHARA_CMD_CLEAR_INT            (1 << 8)
  90#define         SAHARA_CMD_CLEAR_ERR            (1 << 9)
  91#define         SAHARA_CMD_SINGLE_STEP          (1 << 10)
  92#define         SAHARA_CMD_MODE_BATCH           (1 << 16)
  93#define         SAHARA_CMD_MODE_DEBUG           (1 << 18)
  94#define SAHARA_REG_STATUS       0x10
  95#define         SAHARA_STATUS_GET_STATE(x)      ((x) & 0x7)
  96#define                 SAHARA_STATE_IDLE       0
  97#define                 SAHARA_STATE_BUSY       1
  98#define                 SAHARA_STATE_ERR        2
  99#define                 SAHARA_STATE_FAULT      3
 100#define                 SAHARA_STATE_COMPLETE   4
 101#define                 SAHARA_STATE_COMP_FLAG  (1 << 2)
 102#define         SAHARA_STATUS_DAR_FULL          (1 << 3)
 103#define         SAHARA_STATUS_ERROR             (1 << 4)
 104#define         SAHARA_STATUS_SECURE            (1 << 5)
 105#define         SAHARA_STATUS_FAIL              (1 << 6)
 106#define         SAHARA_STATUS_INIT              (1 << 7)
 107#define         SAHARA_STATUS_RNG_RESEED        (1 << 8)
 108#define         SAHARA_STATUS_ACTIVE_RNG        (1 << 9)
 109#define         SAHARA_STATUS_ACTIVE_MDHA       (1 << 10)
 110#define         SAHARA_STATUS_ACTIVE_SKHA       (1 << 11)
 111#define         SAHARA_STATUS_MODE_BATCH        (1 << 16)
 112#define         SAHARA_STATUS_MODE_DEDICATED    (1 << 17)
 113#define         SAHARA_STATUS_MODE_DEBUG        (1 << 18)
 114#define         SAHARA_STATUS_GET_ISTATE(x)     (((x) >> 24) & 0xff)
 115#define SAHARA_REG_ERRSTATUS    0x14
 116#define         SAHARA_ERRSTATUS_GET_SOURCE(x)  ((x) & 0xf)
 117#define                 SAHARA_ERRSOURCE_CHA    14
 118#define                 SAHARA_ERRSOURCE_DMA    15
 119#define         SAHARA_ERRSTATUS_DMA_DIR        (1 << 8)
 120#define         SAHARA_ERRSTATUS_GET_DMASZ(x)(((x) >> 9) & 0x3)
 121#define         SAHARA_ERRSTATUS_GET_DMASRC(x) (((x) >> 13) & 0x7)
 122#define         SAHARA_ERRSTATUS_GET_CHASRC(x)  (((x) >> 16) & 0xfff)
 123#define         SAHARA_ERRSTATUS_GET_CHAERR(x)  (((x) >> 28) & 0x3)
 124#define SAHARA_REG_FADDR        0x18
 125#define SAHARA_REG_CDAR         0x1C
 126#define SAHARA_REG_IDAR         0x20
 127
 128struct sahara_hw_desc {
 129        u32     hdr;
 130        u32     len1;
 131        u32     p1;
 132        u32     len2;
 133        u32     p2;
 134        u32     next;
 135};
 136
 137struct sahara_hw_link {
 138        u32     len;
 139        u32     p;
 140        u32     next;
 141};
 142
 143struct sahara_ctx {
 144        unsigned long flags;
 145
 146        /* AES-specific context */
 147        int keylen;
 148        u8 key[AES_KEYSIZE_128];
 149        struct crypto_sync_skcipher *fallback;
 150};
 151
 152struct sahara_aes_reqctx {
 153        unsigned long mode;
 154};
 155
 156/*
 157 * struct sahara_sha_reqctx - private data per request
 158 * @buf: holds data for requests smaller than block_size
 159 * @rembuf: used to prepare one block_size-aligned request
 160 * @context: hw-specific context for request. Digest is extracted from this
 161 * @mode: specifies what type of hw-descriptor needs to be built
 162 * @digest_size: length of digest for this request
 163 * @context_size: length of hw-context for this request.
 164 *                Always digest_size + 4
 165 * @buf_cnt: number of bytes saved in buf
 166 * @sg_in_idx: number of hw links
 167 * @in_sg: scatterlist for input data
 168 * @in_sg_chain: scatterlists for chained input data
 169 * @total: total number of bytes for transfer
 170 * @last: is this the last block
 171 * @first: is this the first block
 172 * @active: inside a transfer
 173 */
 174struct sahara_sha_reqctx {
 175        u8                      buf[SAHARA_MAX_SHA_BLOCK_SIZE];
 176        u8                      rembuf[SAHARA_MAX_SHA_BLOCK_SIZE];
 177        u8                      context[SHA256_DIGEST_SIZE + 4];
 178        unsigned int            mode;
 179        unsigned int            digest_size;
 180        unsigned int            context_size;
 181        unsigned int            buf_cnt;
 182        unsigned int            sg_in_idx;
 183        struct scatterlist      *in_sg;
 184        struct scatterlist      in_sg_chain[2];
 185        size_t                  total;
 186        unsigned int            last;
 187        unsigned int            first;
 188        unsigned int            active;
 189};
 190
 191struct sahara_dev {
 192        struct device           *device;
 193        unsigned int            version;
 194        void __iomem            *regs_base;
 195        struct clk              *clk_ipg;
 196        struct clk              *clk_ahb;
 197        struct mutex            queue_mutex;
 198        struct task_struct      *kthread;
 199        struct completion       dma_completion;
 200
 201        struct sahara_ctx       *ctx;
 202        struct crypto_queue     queue;
 203        unsigned long           flags;
 204
 205        struct sahara_hw_desc   *hw_desc[SAHARA_MAX_HW_DESC];
 206        dma_addr_t              hw_phys_desc[SAHARA_MAX_HW_DESC];
 207
 208        u8                      *key_base;
 209        dma_addr_t              key_phys_base;
 210
 211        u8                      *iv_base;
 212        dma_addr_t              iv_phys_base;
 213
 214        u8                      *context_base;
 215        dma_addr_t              context_phys_base;
 216
 217        struct sahara_hw_link   *hw_link[SAHARA_MAX_HW_LINK];
 218        dma_addr_t              hw_phys_link[SAHARA_MAX_HW_LINK];
 219
 220        size_t                  total;
 221        struct scatterlist      *in_sg;
 222        int             nb_in_sg;
 223        struct scatterlist      *out_sg;
 224        int             nb_out_sg;
 225
 226        u32                     error;
 227};
 228
 229static struct sahara_dev *dev_ptr;
 230
 231static inline void sahara_write(struct sahara_dev *dev, u32 data, u32 reg)
 232{
 233        writel(data, dev->regs_base + reg);
 234}
 235
 236static inline unsigned int sahara_read(struct sahara_dev *dev, u32 reg)
 237{
 238        return readl(dev->regs_base + reg);
 239}
 240
 241static u32 sahara_aes_key_hdr(struct sahara_dev *dev)
 242{
 243        u32 hdr = SAHARA_HDR_BASE | SAHARA_HDR_SKHA_ALG_AES |
 244                        SAHARA_HDR_FORM_KEY | SAHARA_HDR_LLO |
 245                        SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
 246
 247        if (dev->flags & FLAGS_CBC) {
 248                hdr |= SAHARA_HDR_SKHA_MODE_CBC;
 249                hdr ^= SAHARA_HDR_PARITY_BIT;
 250        }
 251
 252        if (dev->flags & FLAGS_ENCRYPT) {
 253                hdr |= SAHARA_HDR_SKHA_OP_ENC;
 254                hdr ^= SAHARA_HDR_PARITY_BIT;
 255        }
 256
 257        return hdr;
 258}
 259
 260static u32 sahara_aes_data_link_hdr(struct sahara_dev *dev)
 261{
 262        return SAHARA_HDR_BASE | SAHARA_HDR_FORM_DATA |
 263                        SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
 264}
 265
 266static const char *sahara_err_src[16] = {
 267        "No error",
 268        "Header error",
 269        "Descriptor length error",
 270        "Descriptor length or pointer error",
 271        "Link length error",
 272        "Link pointer error",
 273        "Input buffer error",
 274        "Output buffer error",
 275        "Output buffer starvation",
 276        "Internal state fault",
 277        "General descriptor problem",
 278        "Reserved",
 279        "Descriptor address error",
 280        "Link address error",
 281        "CHA error",
 282        "DMA error"
 283};
 284
 285static const char *sahara_err_dmasize[4] = {
 286        "Byte transfer",
 287        "Half-word transfer",
 288        "Word transfer",
 289        "Reserved"
 290};
 291
 292static const char *sahara_err_dmasrc[8] = {
 293        "No error",
 294        "AHB bus error",
 295        "Internal IP bus error",
 296        "Parity error",
 297        "DMA crosses 256 byte boundary",
 298        "DMA is busy",
 299        "Reserved",
 300        "DMA HW error"
 301};
 302
 303static const char *sahara_cha_errsrc[12] = {
 304        "Input buffer non-empty",
 305        "Illegal address",
 306        "Illegal mode",
 307        "Illegal data size",
 308        "Illegal key size",
 309        "Write during processing",
 310        "CTX read during processing",
 311        "HW error",
 312        "Input buffer disabled/underflow",
 313        "Output buffer disabled/overflow",
 314        "DES key parity error",
 315        "Reserved"
 316};
 317
 318static const char *sahara_cha_err[4] = { "No error", "SKHA", "MDHA", "RNG" };
 319
 320static void sahara_decode_error(struct sahara_dev *dev, unsigned int error)
 321{
 322        u8 source = SAHARA_ERRSTATUS_GET_SOURCE(error);
 323        u16 chasrc = ffs(SAHARA_ERRSTATUS_GET_CHASRC(error));
 324
 325        dev_err(dev->device, "%s: Error Register = 0x%08x\n", __func__, error);
 326
 327        dev_err(dev->device, "  - %s.\n", sahara_err_src[source]);
 328
 329        if (source == SAHARA_ERRSOURCE_DMA) {
 330                if (error & SAHARA_ERRSTATUS_DMA_DIR)
 331                        dev_err(dev->device, "          * DMA read.\n");
 332                else
 333                        dev_err(dev->device, "          * DMA write.\n");
 334
 335                dev_err(dev->device, "          * %s.\n",
 336                       sahara_err_dmasize[SAHARA_ERRSTATUS_GET_DMASZ(error)]);
 337                dev_err(dev->device, "          * %s.\n",
 338                       sahara_err_dmasrc[SAHARA_ERRSTATUS_GET_DMASRC(error)]);
 339        } else if (source == SAHARA_ERRSOURCE_CHA) {
 340                dev_err(dev->device, "          * %s.\n",
 341                        sahara_cha_errsrc[chasrc]);
 342                dev_err(dev->device, "          * %s.\n",
 343                       sahara_cha_err[SAHARA_ERRSTATUS_GET_CHAERR(error)]);
 344        }
 345        dev_err(dev->device, "\n");
 346}
 347
 348static const char *sahara_state[4] = { "Idle", "Busy", "Error", "HW Fault" };
 349
 350static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
 351{
 352        u8 state;
 353
 354        if (!__is_defined(DEBUG))
 355                return;
 356
 357        state = SAHARA_STATUS_GET_STATE(status);
 358
 359        dev_dbg(dev->device, "%s: Status Register = 0x%08x\n",
 360                __func__, status);
 361
 362        dev_dbg(dev->device, "  - State = %d:\n", state);
 363        if (state & SAHARA_STATE_COMP_FLAG)
 364                dev_dbg(dev->device, "          * Descriptor completed. IRQ pending.\n");
 365
 366        dev_dbg(dev->device, "          * %s.\n",
 367               sahara_state[state & ~SAHARA_STATE_COMP_FLAG]);
 368
 369        if (status & SAHARA_STATUS_DAR_FULL)
 370                dev_dbg(dev->device, "  - DAR Full.\n");
 371        if (status & SAHARA_STATUS_ERROR)
 372                dev_dbg(dev->device, "  - Error.\n");
 373        if (status & SAHARA_STATUS_SECURE)
 374                dev_dbg(dev->device, "  - Secure.\n");
 375        if (status & SAHARA_STATUS_FAIL)
 376                dev_dbg(dev->device, "  - Fail.\n");
 377        if (status & SAHARA_STATUS_RNG_RESEED)
 378                dev_dbg(dev->device, "  - RNG Reseed Request.\n");
 379        if (status & SAHARA_STATUS_ACTIVE_RNG)
 380                dev_dbg(dev->device, "  - RNG Active.\n");
 381        if (status & SAHARA_STATUS_ACTIVE_MDHA)
 382                dev_dbg(dev->device, "  - MDHA Active.\n");
 383        if (status & SAHARA_STATUS_ACTIVE_SKHA)
 384                dev_dbg(dev->device, "  - SKHA Active.\n");
 385
 386        if (status & SAHARA_STATUS_MODE_BATCH)
 387                dev_dbg(dev->device, "  - Batch Mode.\n");
 388        else if (status & SAHARA_STATUS_MODE_DEDICATED)
 389                dev_dbg(dev->device, "  - Dedicated Mode.\n");
 390        else if (status & SAHARA_STATUS_MODE_DEBUG)
 391                dev_dbg(dev->device, "  - Debug Mode.\n");
 392
 393        dev_dbg(dev->device, "  - Internal state = 0x%02x\n",
 394               SAHARA_STATUS_GET_ISTATE(status));
 395
 396        dev_dbg(dev->device, "Current DAR: 0x%08x\n",
 397                sahara_read(dev, SAHARA_REG_CDAR));
 398        dev_dbg(dev->device, "Initial DAR: 0x%08x\n\n",
 399                sahara_read(dev, SAHARA_REG_IDAR));
 400}
 401
 402static void sahara_dump_descriptors(struct sahara_dev *dev)
 403{
 404        int i;
 405
 406        if (!__is_defined(DEBUG))
 407                return;
 408
 409        for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
 410                dev_dbg(dev->device, "Descriptor (%d) (%pad):\n",
 411                        i, &dev->hw_phys_desc[i]);
 412                dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr);
 413                dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1);
 414                dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1);
 415                dev_dbg(dev->device, "\tlen2 = %u\n", dev->hw_desc[i]->len2);
 416                dev_dbg(dev->device, "\tp2 = 0x%08x\n", dev->hw_desc[i]->p2);
 417                dev_dbg(dev->device, "\tnext = 0x%08x\n",
 418                        dev->hw_desc[i]->next);
 419        }
 420        dev_dbg(dev->device, "\n");
 421}
 422
 423static void sahara_dump_links(struct sahara_dev *dev)
 424{
 425        int i;
 426
 427        if (!__is_defined(DEBUG))
 428                return;
 429
 430        for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
 431                dev_dbg(dev->device, "Link (%d) (%pad):\n",
 432                        i, &dev->hw_phys_link[i]);
 433                dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len);
 434                dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p);
 435                dev_dbg(dev->device, "\tnext = 0x%08x\n",
 436                        dev->hw_link[i]->next);
 437        }
 438        dev_dbg(dev->device, "\n");
 439}
 440
 441static int sahara_hw_descriptor_create(struct sahara_dev *dev)
 442{
 443        struct sahara_ctx *ctx = dev->ctx;
 444        struct scatterlist *sg;
 445        int ret;
 446        int i, j;
 447        int idx = 0;
 448
 449        /* Copy new key if necessary */
 450        if (ctx->flags & FLAGS_NEW_KEY) {
 451                memcpy(dev->key_base, ctx->key, ctx->keylen);
 452                ctx->flags &= ~FLAGS_NEW_KEY;
 453
 454                if (dev->flags & FLAGS_CBC) {
 455                        dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
 456                        dev->hw_desc[idx]->p1 = dev->iv_phys_base;
 457                } else {
 458                        dev->hw_desc[idx]->len1 = 0;
 459                        dev->hw_desc[idx]->p1 = 0;
 460                }
 461                dev->hw_desc[idx]->len2 = ctx->keylen;
 462                dev->hw_desc[idx]->p2 = dev->key_phys_base;
 463                dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
 464
 465                dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
 466
 467                idx++;
 468        }
 469
 470        dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total);
 471        if (dev->nb_in_sg < 0) {
 472                dev_err(dev->device, "Invalid numbers of src SG.\n");
 473                return dev->nb_in_sg;
 474        }
 475        dev->nb_out_sg = sg_nents_for_len(dev->out_sg, dev->total);
 476        if (dev->nb_out_sg < 0) {
 477                dev_err(dev->device, "Invalid numbers of dst SG.\n");
 478                return dev->nb_out_sg;
 479        }
 480        if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) {
 481                dev_err(dev->device, "not enough hw links (%d)\n",
 482                        dev->nb_in_sg + dev->nb_out_sg);
 483                return -EINVAL;
 484        }
 485
 486        ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
 487                         DMA_TO_DEVICE);
 488        if (ret != dev->nb_in_sg) {
 489                dev_err(dev->device, "couldn't map in sg\n");
 490                goto unmap_in;
 491        }
 492        ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
 493                         DMA_FROM_DEVICE);
 494        if (ret != dev->nb_out_sg) {
 495                dev_err(dev->device, "couldn't map out sg\n");
 496                goto unmap_out;
 497        }
 498
 499        /* Create input links */
 500        dev->hw_desc[idx]->p1 = dev->hw_phys_link[0];
 501        sg = dev->in_sg;
 502        for (i = 0; i < dev->nb_in_sg; i++) {
 503                dev->hw_link[i]->len = sg->length;
 504                dev->hw_link[i]->p = sg->dma_address;
 505                if (i == (dev->nb_in_sg - 1)) {
 506                        dev->hw_link[i]->next = 0;
 507                } else {
 508                        dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
 509                        sg = sg_next(sg);
 510                }
 511        }
 512
 513        /* Create output links */
 514        dev->hw_desc[idx]->p2 = dev->hw_phys_link[i];
 515        sg = dev->out_sg;
 516        for (j = i; j < dev->nb_out_sg + i; j++) {
 517                dev->hw_link[j]->len = sg->length;
 518                dev->hw_link[j]->p = sg->dma_address;
 519                if (j == (dev->nb_out_sg + i - 1)) {
 520                        dev->hw_link[j]->next = 0;
 521                } else {
 522                        dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
 523                        sg = sg_next(sg);
 524                }
 525        }
 526
 527        /* Fill remaining fields of hw_desc[1] */
 528        dev->hw_desc[idx]->hdr = sahara_aes_data_link_hdr(dev);
 529        dev->hw_desc[idx]->len1 = dev->total;
 530        dev->hw_desc[idx]->len2 = dev->total;
 531        dev->hw_desc[idx]->next = 0;
 532
 533        sahara_dump_descriptors(dev);
 534        sahara_dump_links(dev);
 535
 536        sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
 537
 538        return 0;
 539
 540unmap_out:
 541        dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
 542                DMA_FROM_DEVICE);
 543unmap_in:
 544        dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
 545                DMA_TO_DEVICE);
 546
 547        return -EINVAL;
 548}
 549
 550static int sahara_aes_process(struct skcipher_request *req)
 551{
 552        struct sahara_dev *dev = dev_ptr;
 553        struct sahara_ctx *ctx;
 554        struct sahara_aes_reqctx *rctx;
 555        int ret;
 556        unsigned long timeout;
 557
 558        /* Request is ready to be dispatched by the device */
 559        dev_dbg(dev->device,
 560                "dispatch request (nbytes=%d, src=%p, dst=%p)\n",
 561                req->cryptlen, req->src, req->dst);
 562
 563        /* assign new request to device */
 564        dev->total = req->cryptlen;
 565        dev->in_sg = req->src;
 566        dev->out_sg = req->dst;
 567
 568        rctx = skcipher_request_ctx(req);
 569        ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
 570        rctx->mode &= FLAGS_MODE_MASK;
 571        dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
 572
 573        if ((dev->flags & FLAGS_CBC) && req->iv)
 574                memcpy(dev->iv_base, req->iv, AES_KEYSIZE_128);
 575
 576        /* assign new context to device */
 577        dev->ctx = ctx;
 578
 579        reinit_completion(&dev->dma_completion);
 580
 581        ret = sahara_hw_descriptor_create(dev);
 582        if (ret)
 583                return -EINVAL;
 584
 585        timeout = wait_for_completion_timeout(&dev->dma_completion,
 586                                msecs_to_jiffies(SAHARA_TIMEOUT_MS));
 587        if (!timeout) {
 588                dev_err(dev->device, "AES timeout\n");
 589                return -ETIMEDOUT;
 590        }
 591
 592        dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
 593                DMA_FROM_DEVICE);
 594        dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
 595                DMA_TO_DEVICE);
 596
 597        return 0;
 598}
 599
 600static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
 601                             unsigned int keylen)
 602{
 603        struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
 604
 605        ctx->keylen = keylen;
 606
 607        /* SAHARA only supports 128bit keys */
 608        if (keylen == AES_KEYSIZE_128) {
 609                memcpy(ctx->key, key, keylen);
 610                ctx->flags |= FLAGS_NEW_KEY;
 611                return 0;
 612        }
 613
 614        if (keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
 615                return -EINVAL;
 616
 617        /*
 618         * The requested key size is not supported by HW, do a fallback.
 619         */
 620        crypto_sync_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
 621        crypto_sync_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
 622                                                 CRYPTO_TFM_REQ_MASK);
 623        return crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
 624}
 625
 626static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)
 627{
 628        struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
 629        struct sahara_dev *dev = dev_ptr;
 630        int err = 0;
 631
 632        dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
 633                req->cryptlen, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
 634
 635        if (!IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE)) {
 636                dev_err(dev->device,
 637                        "request size is not exact amount of AES blocks\n");
 638                return -EINVAL;
 639        }
 640
 641        rctx->mode = mode;
 642
 643        mutex_lock(&dev->queue_mutex);
 644        err = crypto_enqueue_request(&dev->queue, &req->base);
 645        mutex_unlock(&dev->queue_mutex);
 646
 647        wake_up_process(dev->kthread);
 648
 649        return err;
 650}
 651
 652static int sahara_aes_ecb_encrypt(struct skcipher_request *req)
 653{
 654        struct sahara_ctx *ctx = crypto_skcipher_ctx(
 655                crypto_skcipher_reqtfm(req));
 656        int err;
 657
 658        if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
 659                SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
 660
 661                skcipher_request_set_sync_tfm(subreq, ctx->fallback);
 662                skcipher_request_set_callback(subreq, req->base.flags,
 663                                              NULL, NULL);
 664                skcipher_request_set_crypt(subreq, req->src, req->dst,
 665                                           req->cryptlen, req->iv);
 666                err = crypto_skcipher_encrypt(subreq);
 667                skcipher_request_zero(subreq);
 668                return err;
 669        }
 670
 671        return sahara_aes_crypt(req, FLAGS_ENCRYPT);
 672}
 673
 674static int sahara_aes_ecb_decrypt(struct skcipher_request *req)
 675{
 676        struct sahara_ctx *ctx = crypto_skcipher_ctx(
 677                crypto_skcipher_reqtfm(req));
 678        int err;
 679
 680        if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
 681                SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
 682
 683                skcipher_request_set_sync_tfm(subreq, ctx->fallback);
 684                skcipher_request_set_callback(subreq, req->base.flags,
 685                                              NULL, NULL);
 686                skcipher_request_set_crypt(subreq, req->src, req->dst,
 687                                           req->cryptlen, req->iv);
 688                err = crypto_skcipher_decrypt(subreq);
 689                skcipher_request_zero(subreq);
 690                return err;
 691        }
 692
 693        return sahara_aes_crypt(req, 0);
 694}
 695
 696static int sahara_aes_cbc_encrypt(struct skcipher_request *req)
 697{
 698        struct sahara_ctx *ctx = crypto_skcipher_ctx(
 699                crypto_skcipher_reqtfm(req));
 700        int err;
 701
 702        if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
 703                SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
 704
 705                skcipher_request_set_sync_tfm(subreq, ctx->fallback);
 706                skcipher_request_set_callback(subreq, req->base.flags,
 707                                              NULL, NULL);
 708                skcipher_request_set_crypt(subreq, req->src, req->dst,
 709                                           req->cryptlen, req->iv);
 710                err = crypto_skcipher_encrypt(subreq);
 711                skcipher_request_zero(subreq);
 712                return err;
 713        }
 714
 715        return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
 716}
 717
 718static int sahara_aes_cbc_decrypt(struct skcipher_request *req)
 719{
 720        struct sahara_ctx *ctx = crypto_skcipher_ctx(
 721                crypto_skcipher_reqtfm(req));
 722        int err;
 723
 724        if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
 725                SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
 726
 727                skcipher_request_set_sync_tfm(subreq, ctx->fallback);
 728                skcipher_request_set_callback(subreq, req->base.flags,
 729                                              NULL, NULL);
 730                skcipher_request_set_crypt(subreq, req->src, req->dst,
 731                                           req->cryptlen, req->iv);
 732                err = crypto_skcipher_decrypt(subreq);
 733                skcipher_request_zero(subreq);
 734                return err;
 735        }
 736
 737        return sahara_aes_crypt(req, FLAGS_CBC);
 738}
 739
 740static int sahara_aes_init_tfm(struct crypto_skcipher *tfm)
 741{
 742        const char *name = crypto_tfm_alg_name(&tfm->base);
 743        struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
 744
 745        ctx->fallback = crypto_alloc_sync_skcipher(name, 0,
 746                                              CRYPTO_ALG_NEED_FALLBACK);
 747        if (IS_ERR(ctx->fallback)) {
 748                pr_err("Error allocating fallback algo %s\n", name);
 749                return PTR_ERR(ctx->fallback);
 750        }
 751
 752        crypto_skcipher_set_reqsize(tfm, sizeof(struct sahara_aes_reqctx));
 753
 754        return 0;
 755}
 756
 757static void sahara_aes_exit_tfm(struct crypto_skcipher *tfm)
 758{
 759        struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
 760
 761        crypto_free_sync_skcipher(ctx->fallback);
 762}
 763
 764static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
 765                              struct sahara_sha_reqctx *rctx)
 766{
 767        u32 hdr = 0;
 768
 769        hdr = rctx->mode;
 770
 771        if (rctx->first) {
 772                hdr |= SAHARA_HDR_MDHA_SET_MODE_HASH;
 773                hdr |= SAHARA_HDR_MDHA_INIT;
 774        } else {
 775                hdr |= SAHARA_HDR_MDHA_SET_MODE_MD_KEY;
 776        }
 777
 778        if (rctx->last)
 779                hdr |= SAHARA_HDR_MDHA_PDATA;
 780
 781        if (hweight_long(hdr) % 2 == 0)
 782                hdr |= SAHARA_HDR_PARITY_BIT;
 783
 784        return hdr;
 785}
 786
 787static int sahara_sha_hw_links_create(struct sahara_dev *dev,
 788                                       struct sahara_sha_reqctx *rctx,
 789                                       int start)
 790{
 791        struct scatterlist *sg;
 792        unsigned int i;
 793        int ret;
 794
 795        dev->in_sg = rctx->in_sg;
 796
 797        dev->nb_in_sg = sg_nents_for_len(dev->in_sg, rctx->total);
 798        if (dev->nb_in_sg < 0) {
 799                dev_err(dev->device, "Invalid numbers of src SG.\n");
 800                return dev->nb_in_sg;
 801        }
 802        if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) {
 803                dev_err(dev->device, "not enough hw links (%d)\n",
 804                        dev->nb_in_sg + dev->nb_out_sg);
 805                return -EINVAL;
 806        }
 807
 808        sg = dev->in_sg;
 809        ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg, DMA_TO_DEVICE);
 810        if (!ret)
 811                return -EFAULT;
 812
 813        for (i = start; i < dev->nb_in_sg + start; i++) {
 814                dev->hw_link[i]->len = sg->length;
 815                dev->hw_link[i]->p = sg->dma_address;
 816                if (i == (dev->nb_in_sg + start - 1)) {
 817                        dev->hw_link[i]->next = 0;
 818                } else {
 819                        dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
 820                        sg = sg_next(sg);
 821                }
 822        }
 823
 824        return i;
 825}
 826
 827static int sahara_sha_hw_data_descriptor_create(struct sahara_dev *dev,
 828                                                struct sahara_sha_reqctx *rctx,
 829                                                struct ahash_request *req,
 830                                                int index)
 831{
 832        unsigned result_len;
 833        int i = index;
 834
 835        if (rctx->first)
 836                /* Create initial descriptor: #8*/
 837                dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
 838        else
 839                /* Create hash descriptor: #10. Must follow #6. */
 840                dev->hw_desc[index]->hdr = SAHARA_HDR_MDHA_HASH;
 841
 842        dev->hw_desc[index]->len1 = rctx->total;
 843        if (dev->hw_desc[index]->len1 == 0) {
 844                /* if len1 is 0, p1 must be 0, too */
 845                dev->hw_desc[index]->p1 = 0;
 846                rctx->sg_in_idx = 0;
 847        } else {
 848                /* Create input links */
 849                dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
 850                i = sahara_sha_hw_links_create(dev, rctx, index);
 851
 852                rctx->sg_in_idx = index;
 853                if (i < 0)
 854                        return i;
 855        }
 856
 857        dev->hw_desc[index]->p2 = dev->hw_phys_link[i];
 858
 859        /* Save the context for the next operation */
 860        result_len = rctx->context_size;
 861        dev->hw_link[i]->p = dev->context_phys_base;
 862
 863        dev->hw_link[i]->len = result_len;
 864        dev->hw_desc[index]->len2 = result_len;
 865
 866        dev->hw_link[i]->next = 0;
 867
 868        return 0;
 869}
 870
 871/*
 872 * Load descriptor aka #6
 873 *
 874 * To load a previously saved context back to the MDHA unit
 875 *
 876 * p1: Saved Context
 877 * p2: NULL
 878 *
 879 */
 880static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev,
 881                                                struct sahara_sha_reqctx *rctx,
 882                                                struct ahash_request *req,
 883                                                int index)
 884{
 885        dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
 886
 887        dev->hw_desc[index]->len1 = rctx->context_size;
 888        dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
 889        dev->hw_desc[index]->len2 = 0;
 890        dev->hw_desc[index]->p2 = 0;
 891
 892        dev->hw_link[index]->len = rctx->context_size;
 893        dev->hw_link[index]->p = dev->context_phys_base;
 894        dev->hw_link[index]->next = 0;
 895
 896        return 0;
 897}
 898
 899static int sahara_walk_and_recalc(struct scatterlist *sg, unsigned int nbytes)
 900{
 901        if (!sg || !sg->length)
 902                return nbytes;
 903
 904        while (nbytes && sg) {
 905                if (nbytes <= sg->length) {
 906                        sg->length = nbytes;
 907                        sg_mark_end(sg);
 908                        break;
 909                }
 910                nbytes -= sg->length;
 911                sg = sg_next(sg);
 912        }
 913
 914        return nbytes;
 915}
 916
 917static int sahara_sha_prepare_request(struct ahash_request *req)
 918{
 919        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 920        struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
 921        unsigned int hash_later;
 922        unsigned int block_size;
 923        unsigned int len;
 924
 925        block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
 926
 927        /* append bytes from previous operation */
 928        len = rctx->buf_cnt + req->nbytes;
 929
 930        /* only the last transfer can be padded in hardware */
 931        if (!rctx->last && (len < block_size)) {
 932                /* to few data, save for next operation */
 933                scatterwalk_map_and_copy(rctx->buf + rctx->buf_cnt, req->src,
 934                                         0, req->nbytes, 0);
 935                rctx->buf_cnt += req->nbytes;
 936
 937                return 0;
 938        }
 939
 940        /* add data from previous operation first */
 941        if (rctx->buf_cnt)
 942                memcpy(rctx->rembuf, rctx->buf, rctx->buf_cnt);
 943
 944        /* data must always be a multiple of block_size */
 945        hash_later = rctx->last ? 0 : len & (block_size - 1);
 946        if (hash_later) {
 947                unsigned int offset = req->nbytes - hash_later;
 948                /* Save remaining bytes for later use */
 949                scatterwalk_map_and_copy(rctx->buf, req->src, offset,
 950                                        hash_later, 0);
 951        }
 952
 953        /* nbytes should now be multiple of blocksize */
 954        req->nbytes = req->nbytes - hash_later;
 955
 956        sahara_walk_and_recalc(req->src, req->nbytes);
 957
 958        /* have data from previous operation and current */
 959        if (rctx->buf_cnt && req->nbytes) {
 960                sg_init_table(rctx->in_sg_chain, 2);
 961                sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
 962
 963                sg_chain(rctx->in_sg_chain, 2, req->src);
 964
 965                rctx->total = req->nbytes + rctx->buf_cnt;
 966                rctx->in_sg = rctx->in_sg_chain;
 967
 968                req->src = rctx->in_sg_chain;
 969        /* only data from previous operation */
 970        } else if (rctx->buf_cnt) {
 971                if (req->src)
 972                        rctx->in_sg = req->src;
 973                else
 974                        rctx->in_sg = rctx->in_sg_chain;
 975                /* buf was copied into rembuf above */
 976                sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt);
 977                rctx->total = rctx->buf_cnt;
 978        /* no data from previous operation */
 979        } else {
 980                rctx->in_sg = req->src;
 981                rctx->total = req->nbytes;
 982                req->src = rctx->in_sg;
 983        }
 984
 985        /* on next call, we only have the remaining data in the buffer */
 986        rctx->buf_cnt = hash_later;
 987
 988        return -EINPROGRESS;
 989}
 990
 991static int sahara_sha_process(struct ahash_request *req)
 992{
 993        struct sahara_dev *dev = dev_ptr;
 994        struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
 995        int ret;
 996        unsigned long timeout;
 997
 998        ret = sahara_sha_prepare_request(req);
 999        if (!ret)
1000                return ret;
1001
1002        if (rctx->first) {
1003                sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
1004                dev->hw_desc[0]->next = 0;
1005                rctx->first = 0;
1006        } else {
1007                memcpy(dev->context_base, rctx->context, rctx->context_size);
1008
1009                sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0);
1010                dev->hw_desc[0]->next = dev->hw_phys_desc[1];
1011                sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
1012                dev->hw_desc[1]->next = 0;
1013        }
1014
1015        sahara_dump_descriptors(dev);
1016        sahara_dump_links(dev);
1017
1018        reinit_completion(&dev->dma_completion);
1019
1020        sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
1021
1022        timeout = wait_for_completion_timeout(&dev->dma_completion,
1023                                msecs_to_jiffies(SAHARA_TIMEOUT_MS));
1024        if (!timeout) {
1025                dev_err(dev->device, "SHA timeout\n");
1026                return -ETIMEDOUT;
1027        }
1028
1029        if (rctx->sg_in_idx)
1030                dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
1031                             DMA_TO_DEVICE);
1032
1033        memcpy(rctx->context, dev->context_base, rctx->context_size);
1034
1035        if (req->result)
1036                memcpy(req->result, rctx->context, rctx->digest_size);
1037
1038        return 0;
1039}
1040
1041static int sahara_queue_manage(void *data)
1042{
1043        struct sahara_dev *dev = (struct sahara_dev *)data;
1044        struct crypto_async_request *async_req;
1045        struct crypto_async_request *backlog;
1046        int ret = 0;
1047
1048        do {
1049                __set_current_state(TASK_INTERRUPTIBLE);
1050
1051                mutex_lock(&dev->queue_mutex);
1052                backlog = crypto_get_backlog(&dev->queue);
1053                async_req = crypto_dequeue_request(&dev->queue);
1054                mutex_unlock(&dev->queue_mutex);
1055
1056                if (backlog)
1057                        backlog->complete(backlog, -EINPROGRESS);
1058
1059                if (async_req) {
1060                        if (crypto_tfm_alg_type(async_req->tfm) ==
1061                            CRYPTO_ALG_TYPE_AHASH) {
1062                                struct ahash_request *req =
1063                                        ahash_request_cast(async_req);
1064
1065                                ret = sahara_sha_process(req);
1066                        } else {
1067                                struct skcipher_request *req =
1068                                        skcipher_request_cast(async_req);
1069
1070                                ret = sahara_aes_process(req);
1071                        }
1072
1073                        async_req->complete(async_req, ret);
1074
1075                        continue;
1076                }
1077
1078                schedule();
1079        } while (!kthread_should_stop());
1080
1081        return 0;
1082}
1083
1084static int sahara_sha_enqueue(struct ahash_request *req, int last)
1085{
1086        struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1087        struct sahara_dev *dev = dev_ptr;
1088        int ret;
1089
1090        if (!req->nbytes && !last)
1091                return 0;
1092
1093        rctx->last = last;
1094
1095        if (!rctx->active) {
1096                rctx->active = 1;
1097                rctx->first = 1;
1098        }
1099
1100        mutex_lock(&dev->queue_mutex);
1101        ret = crypto_enqueue_request(&dev->queue, &req->base);
1102        mutex_unlock(&dev->queue_mutex);
1103
1104        wake_up_process(dev->kthread);
1105
1106        return ret;
1107}
1108
1109static int sahara_sha_init(struct ahash_request *req)
1110{
1111        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1112        struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1113
1114        memset(rctx, 0, sizeof(*rctx));
1115
1116        switch (crypto_ahash_digestsize(tfm)) {
1117        case SHA1_DIGEST_SIZE:
1118                rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA1;
1119                rctx->digest_size = SHA1_DIGEST_SIZE;
1120                break;
1121        case SHA256_DIGEST_SIZE:
1122                rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA256;
1123                rctx->digest_size = SHA256_DIGEST_SIZE;
1124                break;
1125        default:
1126                return -EINVAL;
1127        }
1128
1129        rctx->context_size = rctx->digest_size + 4;
1130        rctx->active = 0;
1131
1132        return 0;
1133}
1134
1135static int sahara_sha_update(struct ahash_request *req)
1136{
1137        return sahara_sha_enqueue(req, 0);
1138}
1139
1140static int sahara_sha_final(struct ahash_request *req)
1141{
1142        req->nbytes = 0;
1143        return sahara_sha_enqueue(req, 1);
1144}
1145
1146static int sahara_sha_finup(struct ahash_request *req)
1147{
1148        return sahara_sha_enqueue(req, 1);
1149}
1150
1151static int sahara_sha_digest(struct ahash_request *req)
1152{
1153        sahara_sha_init(req);
1154
1155        return sahara_sha_finup(req);
1156}
1157
1158static int sahara_sha_export(struct ahash_request *req, void *out)
1159{
1160        struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1161
1162        memcpy(out, rctx, sizeof(struct sahara_sha_reqctx));
1163
1164        return 0;
1165}
1166
1167static int sahara_sha_import(struct ahash_request *req, const void *in)
1168{
1169        struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1170
1171        memcpy(rctx, in, sizeof(struct sahara_sha_reqctx));
1172
1173        return 0;
1174}
1175
1176static int sahara_sha_cra_init(struct crypto_tfm *tfm)
1177{
1178        crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1179                                 sizeof(struct sahara_sha_reqctx) +
1180                                 SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);
1181
1182        return 0;
1183}
1184
1185static struct skcipher_alg aes_algs[] = {
1186{
1187        .base.cra_name          = "ecb(aes)",
1188        .base.cra_driver_name   = "sahara-ecb-aes",
1189        .base.cra_priority      = 300,
1190        .base.cra_flags         = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1191        .base.cra_blocksize     = AES_BLOCK_SIZE,
1192        .base.cra_ctxsize       = sizeof(struct sahara_ctx),
1193        .base.cra_alignmask     = 0x0,
1194        .base.cra_module        = THIS_MODULE,
1195
1196        .init                   = sahara_aes_init_tfm,
1197        .exit                   = sahara_aes_exit_tfm,
1198        .min_keysize            = AES_MIN_KEY_SIZE ,
1199        .max_keysize            = AES_MAX_KEY_SIZE,
1200        .setkey                 = sahara_aes_setkey,
1201        .encrypt                = sahara_aes_ecb_encrypt,
1202        .decrypt                = sahara_aes_ecb_decrypt,
1203}, {
1204        .base.cra_name          = "cbc(aes)",
1205        .base.cra_driver_name   = "sahara-cbc-aes",
1206        .base.cra_priority      = 300,
1207        .base.cra_flags         = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1208        .base.cra_blocksize     = AES_BLOCK_SIZE,
1209        .base.cra_ctxsize       = sizeof(struct sahara_ctx),
1210        .base.cra_alignmask     = 0x0,
1211        .base.cra_module        = THIS_MODULE,
1212
1213        .init                   = sahara_aes_init_tfm,
1214        .exit                   = sahara_aes_exit_tfm,
1215        .min_keysize            = AES_MIN_KEY_SIZE ,
1216        .max_keysize            = AES_MAX_KEY_SIZE,
1217        .ivsize                 = AES_BLOCK_SIZE,
1218        .setkey                 = sahara_aes_setkey,
1219        .encrypt                = sahara_aes_cbc_encrypt,
1220        .decrypt                = sahara_aes_cbc_decrypt,
1221}
1222};
1223
1224static struct ahash_alg sha_v3_algs[] = {
1225{
1226        .init           = sahara_sha_init,
1227        .update         = sahara_sha_update,
1228        .final          = sahara_sha_final,
1229        .finup          = sahara_sha_finup,
1230        .digest         = sahara_sha_digest,
1231        .export         = sahara_sha_export,
1232        .import         = sahara_sha_import,
1233        .halg.digestsize        = SHA1_DIGEST_SIZE,
1234        .halg.statesize         = sizeof(struct sahara_sha_reqctx),
1235        .halg.base      = {
1236                .cra_name               = "sha1",
1237                .cra_driver_name        = "sahara-sha1",
1238                .cra_priority           = 300,
1239                .cra_flags              = CRYPTO_ALG_ASYNC |
1240                                                CRYPTO_ALG_NEED_FALLBACK,
1241                .cra_blocksize          = SHA1_BLOCK_SIZE,
1242                .cra_ctxsize            = sizeof(struct sahara_ctx),
1243                .cra_alignmask          = 0,
1244                .cra_module             = THIS_MODULE,
1245                .cra_init               = sahara_sha_cra_init,
1246        }
1247},
1248};
1249
1250static struct ahash_alg sha_v4_algs[] = {
1251{
1252        .init           = sahara_sha_init,
1253        .update         = sahara_sha_update,
1254        .final          = sahara_sha_final,
1255        .finup          = sahara_sha_finup,
1256        .digest         = sahara_sha_digest,
1257        .export         = sahara_sha_export,
1258        .import         = sahara_sha_import,
1259        .halg.digestsize        = SHA256_DIGEST_SIZE,
1260        .halg.statesize         = sizeof(struct sahara_sha_reqctx),
1261        .halg.base      = {
1262                .cra_name               = "sha256",
1263                .cra_driver_name        = "sahara-sha256",
1264                .cra_priority           = 300,
1265                .cra_flags              = CRYPTO_ALG_ASYNC |
1266                                                CRYPTO_ALG_NEED_FALLBACK,
1267                .cra_blocksize          = SHA256_BLOCK_SIZE,
1268                .cra_ctxsize            = sizeof(struct sahara_ctx),
1269                .cra_alignmask          = 0,
1270                .cra_module             = THIS_MODULE,
1271                .cra_init               = sahara_sha_cra_init,
1272        }
1273},
1274};
1275
1276static irqreturn_t sahara_irq_handler(int irq, void *data)
1277{
1278        struct sahara_dev *dev = (struct sahara_dev *)data;
1279        unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
1280        unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
1281
1282        sahara_write(dev, SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR,
1283                     SAHARA_REG_CMD);
1284
1285        sahara_decode_status(dev, stat);
1286
1287        if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_BUSY) {
1288                return IRQ_NONE;
1289        } else if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_COMPLETE) {
1290                dev->error = 0;
1291        } else {
1292                sahara_decode_error(dev, err);
1293                dev->error = -EINVAL;
1294        }
1295
1296        complete(&dev->dma_completion);
1297
1298        return IRQ_HANDLED;
1299}
1300
1301
1302static int sahara_register_algs(struct sahara_dev *dev)
1303{
1304        int err;
1305        unsigned int i, j, k, l;
1306
1307        for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
1308                err = crypto_register_skcipher(&aes_algs[i]);
1309                if (err)
1310                        goto err_aes_algs;
1311        }
1312
1313        for (k = 0; k < ARRAY_SIZE(sha_v3_algs); k++) {
1314                err = crypto_register_ahash(&sha_v3_algs[k]);
1315                if (err)
1316                        goto err_sha_v3_algs;
1317        }
1318
1319        if (dev->version > SAHARA_VERSION_3)
1320                for (l = 0; l < ARRAY_SIZE(sha_v4_algs); l++) {
1321                        err = crypto_register_ahash(&sha_v4_algs[l]);
1322                        if (err)
1323                                goto err_sha_v4_algs;
1324                }
1325
1326        return 0;
1327
1328err_sha_v4_algs:
1329        for (j = 0; j < l; j++)
1330                crypto_unregister_ahash(&sha_v4_algs[j]);
1331
1332err_sha_v3_algs:
1333        for (j = 0; j < k; j++)
1334                crypto_unregister_ahash(&sha_v3_algs[j]);
1335
1336err_aes_algs:
1337        for (j = 0; j < i; j++)
1338                crypto_unregister_skcipher(&aes_algs[j]);
1339
1340        return err;
1341}
1342
1343static void sahara_unregister_algs(struct sahara_dev *dev)
1344{
1345        unsigned int i;
1346
1347        for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1348                crypto_unregister_skcipher(&aes_algs[i]);
1349
1350        for (i = 0; i < ARRAY_SIZE(sha_v3_algs); i++)
1351                crypto_unregister_ahash(&sha_v3_algs[i]);
1352
1353        if (dev->version > SAHARA_VERSION_3)
1354                for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
1355                        crypto_unregister_ahash(&sha_v4_algs[i]);
1356}
1357
1358static const struct platform_device_id sahara_platform_ids[] = {
1359        { .name = "sahara-imx27" },
1360        { /* sentinel */ }
1361};
1362MODULE_DEVICE_TABLE(platform, sahara_platform_ids);
1363
1364static const struct of_device_id sahara_dt_ids[] = {
1365        { .compatible = "fsl,imx53-sahara" },
1366        { .compatible = "fsl,imx27-sahara" },
1367        { /* sentinel */ }
1368};
1369MODULE_DEVICE_TABLE(of, sahara_dt_ids);
1370
1371static int sahara_probe(struct platform_device *pdev)
1372{
1373        struct sahara_dev *dev;
1374        u32 version;
1375        int irq;
1376        int err;
1377        int i;
1378
1379        dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
1380        if (!dev)
1381                return -ENOMEM;
1382
1383        dev->device = &pdev->dev;
1384        platform_set_drvdata(pdev, dev);
1385
1386        /* Get the base address */
1387        dev->regs_base = devm_platform_ioremap_resource(pdev, 0);
1388        if (IS_ERR(dev->regs_base))
1389                return PTR_ERR(dev->regs_base);
1390
1391        /* Get the IRQ */
1392        irq = platform_get_irq(pdev,  0);
1393        if (irq < 0)
1394                return irq;
1395
1396        err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
1397                               0, dev_name(&pdev->dev), dev);
1398        if (err) {
1399                dev_err(&pdev->dev, "failed to request irq\n");
1400                return err;
1401        }
1402
1403        /* clocks */
1404        dev->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1405        if (IS_ERR(dev->clk_ipg)) {
1406                dev_err(&pdev->dev, "Could not get ipg clock\n");
1407                return PTR_ERR(dev->clk_ipg);
1408        }
1409
1410        dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
1411        if (IS_ERR(dev->clk_ahb)) {
1412                dev_err(&pdev->dev, "Could not get ahb clock\n");
1413                return PTR_ERR(dev->clk_ahb);
1414        }
1415
1416        /* Allocate HW descriptors */
1417        dev->hw_desc[0] = dmam_alloc_coherent(&pdev->dev,
1418                        SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
1419                        &dev->hw_phys_desc[0], GFP_KERNEL);
1420        if (!dev->hw_desc[0]) {
1421                dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
1422                return -ENOMEM;
1423        }
1424        dev->hw_desc[1] = dev->hw_desc[0] + 1;
1425        dev->hw_phys_desc[1] = dev->hw_phys_desc[0] +
1426                                sizeof(struct sahara_hw_desc);
1427
1428        /* Allocate space for iv and key */
1429        dev->key_base = dmam_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
1430                                &dev->key_phys_base, GFP_KERNEL);
1431        if (!dev->key_base) {
1432                dev_err(&pdev->dev, "Could not allocate memory for key\n");
1433                return -ENOMEM;
1434        }
1435        dev->iv_base = dev->key_base + AES_KEYSIZE_128;
1436        dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
1437
1438        /* Allocate space for context: largest digest + message length field */
1439        dev->context_base = dmam_alloc_coherent(&pdev->dev,
1440                                        SHA256_DIGEST_SIZE + 4,
1441                                        &dev->context_phys_base, GFP_KERNEL);
1442        if (!dev->context_base) {
1443                dev_err(&pdev->dev, "Could not allocate memory for MDHA context\n");
1444                return -ENOMEM;
1445        }
1446
1447        /* Allocate space for HW links */
1448        dev->hw_link[0] = dmam_alloc_coherent(&pdev->dev,
1449                        SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
1450                        &dev->hw_phys_link[0], GFP_KERNEL);
1451        if (!dev->hw_link[0]) {
1452                dev_err(&pdev->dev, "Could not allocate hw links\n");
1453                return -ENOMEM;
1454        }
1455        for (i = 1; i < SAHARA_MAX_HW_LINK; i++) {
1456                dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] +
1457                                        sizeof(struct sahara_hw_link);
1458                dev->hw_link[i] = dev->hw_link[i - 1] + 1;
1459        }
1460
1461        crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
1462
1463        mutex_init(&dev->queue_mutex);
1464
1465        dev_ptr = dev;
1466
1467        dev->kthread = kthread_run(sahara_queue_manage, dev, "sahara_crypto");
1468        if (IS_ERR(dev->kthread)) {
1469                return PTR_ERR(dev->kthread);
1470        }
1471
1472        init_completion(&dev->dma_completion);
1473
1474        err = clk_prepare_enable(dev->clk_ipg);
1475        if (err)
1476                return err;
1477        err = clk_prepare_enable(dev->clk_ahb);
1478        if (err)
1479                goto clk_ipg_disable;
1480
1481        version = sahara_read(dev, SAHARA_REG_VERSION);
1482        if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx27-sahara")) {
1483                if (version != SAHARA_VERSION_3)
1484                        err = -ENODEV;
1485        } else if (of_device_is_compatible(pdev->dev.of_node,
1486                        "fsl,imx53-sahara")) {
1487                if (((version >> 8) & 0xff) != SAHARA_VERSION_4)
1488                        err = -ENODEV;
1489                version = (version >> 8) & 0xff;
1490        }
1491        if (err == -ENODEV) {
1492                dev_err(&pdev->dev, "SAHARA version %d not supported\n",
1493                                version);
1494                goto err_algs;
1495        }
1496
1497        dev->version = version;
1498
1499        sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH,
1500                     SAHARA_REG_CMD);
1501        sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) |
1502                        SAHARA_CONTROL_SET_MAXBURST(8) |
1503                        SAHARA_CONTROL_RNG_AUTORSD |
1504                        SAHARA_CONTROL_ENABLE_INT,
1505                        SAHARA_REG_CONTROL);
1506
1507        err = sahara_register_algs(dev);
1508        if (err)
1509                goto err_algs;
1510
1511        dev_info(&pdev->dev, "SAHARA version %d initialized\n", version);
1512
1513        return 0;
1514
1515err_algs:
1516        kthread_stop(dev->kthread);
1517        dev_ptr = NULL;
1518        clk_disable_unprepare(dev->clk_ahb);
1519clk_ipg_disable:
1520        clk_disable_unprepare(dev->clk_ipg);
1521
1522        return err;
1523}
1524
1525static int sahara_remove(struct platform_device *pdev)
1526{
1527        struct sahara_dev *dev = platform_get_drvdata(pdev);
1528
1529        kthread_stop(dev->kthread);
1530
1531        sahara_unregister_algs(dev);
1532
1533        clk_disable_unprepare(dev->clk_ipg);
1534        clk_disable_unprepare(dev->clk_ahb);
1535
1536        dev_ptr = NULL;
1537
1538        return 0;
1539}
1540
1541static struct platform_driver sahara_driver = {
1542        .probe          = sahara_probe,
1543        .remove         = sahara_remove,
1544        .driver         = {
1545                .name   = SAHARA_NAME,
1546                .of_match_table = sahara_dt_ids,
1547        },
1548        .id_table = sahara_platform_ids,
1549};
1550
1551module_platform_driver(sahara_driver);
1552
1553MODULE_LICENSE("GPL");
1554MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
1555MODULE_AUTHOR("Steffen Trumtrar <s.trumtrar@pengutronix.de>");
1556MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator");
1557