linux/drivers/crypto/sahara.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Cryptographic API.
   4 *
   5 * Support for SAHARA cryptographic accelerator.
   6 *
   7 * Copyright (c) 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de>
   8 * Copyright (c) 2013 Vista Silicon S.L.
   9 * Author: Javier Martin <javier.martin@vista-silicon.com>
  10 *
  11 * Based on omap-aes.c and tegra-aes.c
  12 */
  13
  14#include <crypto/aes.h>
  15#include <crypto/internal/hash.h>
  16#include <crypto/internal/skcipher.h>
  17#include <crypto/scatterwalk.h>
  18#include <crypto/sha.h>
  19
  20#include <linux/clk.h>
  21#include <linux/dma-mapping.h>
  22#include <linux/interrupt.h>
  23#include <linux/io.h>
  24#include <linux/irq.h>
  25#include <linux/kernel.h>
  26#include <linux/kthread.h>
  27#include <linux/module.h>
  28#include <linux/mutex.h>
  29#include <linux/of.h>
  30#include <linux/of_device.h>
  31#include <linux/platform_device.h>
  32
  33#define SHA_BUFFER_LEN          PAGE_SIZE
  34#define SAHARA_MAX_SHA_BLOCK_SIZE       SHA256_BLOCK_SIZE
  35
  36#define SAHARA_NAME "sahara"
  37#define SAHARA_VERSION_3        3
  38#define SAHARA_VERSION_4        4
  39#define SAHARA_TIMEOUT_MS       1000
  40#define SAHARA_MAX_HW_DESC      2
  41#define SAHARA_MAX_HW_LINK      20
  42
  43#define FLAGS_MODE_MASK         0x000f
  44#define FLAGS_ENCRYPT           BIT(0)
  45#define FLAGS_CBC               BIT(1)
  46#define FLAGS_NEW_KEY           BIT(3)
  47
  48#define SAHARA_HDR_BASE                 0x00800000
  49#define SAHARA_HDR_SKHA_ALG_AES 0
  50#define SAHARA_HDR_SKHA_OP_ENC          (1 << 2)
  51#define SAHARA_HDR_SKHA_MODE_ECB        (0 << 3)
  52#define SAHARA_HDR_SKHA_MODE_CBC        (1 << 3)
  53#define SAHARA_HDR_FORM_DATA            (5 << 16)
  54#define SAHARA_HDR_FORM_KEY             (8 << 16)
  55#define SAHARA_HDR_LLO                  (1 << 24)
  56#define SAHARA_HDR_CHA_SKHA             (1 << 28)
  57#define SAHARA_HDR_CHA_MDHA             (2 << 28)
  58#define SAHARA_HDR_PARITY_BIT           (1 << 31)
  59
  60#define SAHARA_HDR_MDHA_SET_MODE_MD_KEY 0x20880000
  61#define SAHARA_HDR_MDHA_SET_MODE_HASH   0x208D0000
  62#define SAHARA_HDR_MDHA_HASH            0xA0850000
  63#define SAHARA_HDR_MDHA_STORE_DIGEST    0x20820000
  64#define SAHARA_HDR_MDHA_ALG_SHA1        0
  65#define SAHARA_HDR_MDHA_ALG_MD5         1
  66#define SAHARA_HDR_MDHA_ALG_SHA256      2
  67#define SAHARA_HDR_MDHA_ALG_SHA224      3
  68#define SAHARA_HDR_MDHA_PDATA           (1 << 2)
  69#define SAHARA_HDR_MDHA_HMAC            (1 << 3)
  70#define SAHARA_HDR_MDHA_INIT            (1 << 5)
  71#define SAHARA_HDR_MDHA_IPAD            (1 << 6)
  72#define SAHARA_HDR_MDHA_OPAD            (1 << 7)
  73#define SAHARA_HDR_MDHA_SWAP            (1 << 8)
  74#define SAHARA_HDR_MDHA_MAC_FULL        (1 << 9)
  75#define SAHARA_HDR_MDHA_SSL             (1 << 10)
  76
  77/* SAHARA can only process one request at a time */
  78#define SAHARA_QUEUE_LENGTH     1
  79
  80#define SAHARA_REG_VERSION      0x00
  81#define SAHARA_REG_DAR          0x04
  82#define SAHARA_REG_CONTROL      0x08
  83#define         SAHARA_CONTROL_SET_THROTTLE(x)  (((x) & 0xff) << 24)
  84#define         SAHARA_CONTROL_SET_MAXBURST(x)  (((x) & 0xff) << 16)
  85#define         SAHARA_CONTROL_RNG_AUTORSD      (1 << 7)
  86#define         SAHARA_CONTROL_ENABLE_INT       (1 << 4)
  87#define SAHARA_REG_CMD          0x0C
  88#define         SAHARA_CMD_RESET                (1 << 0)
  89#define         SAHARA_CMD_CLEAR_INT            (1 << 8)
  90#define         SAHARA_CMD_CLEAR_ERR            (1 << 9)
  91#define         SAHARA_CMD_SINGLE_STEP          (1 << 10)
  92#define         SAHARA_CMD_MODE_BATCH           (1 << 16)
  93#define         SAHARA_CMD_MODE_DEBUG           (1 << 18)
  94#define SAHARA_REG_STATUS       0x10
  95#define         SAHARA_STATUS_GET_STATE(x)      ((x) & 0x7)
  96#define                 SAHARA_STATE_IDLE       0
  97#define                 SAHARA_STATE_BUSY       1
  98#define                 SAHARA_STATE_ERR        2
  99#define                 SAHARA_STATE_FAULT      3
 100#define                 SAHARA_STATE_COMPLETE   4
 101#define                 SAHARA_STATE_COMP_FLAG  (1 << 2)
 102#define         SAHARA_STATUS_DAR_FULL          (1 << 3)
 103#define         SAHARA_STATUS_ERROR             (1 << 4)
 104#define         SAHARA_STATUS_SECURE            (1 << 5)
 105#define         SAHARA_STATUS_FAIL              (1 << 6)
 106#define         SAHARA_STATUS_INIT              (1 << 7)
 107#define         SAHARA_STATUS_RNG_RESEED        (1 << 8)
 108#define         SAHARA_STATUS_ACTIVE_RNG        (1 << 9)
 109#define         SAHARA_STATUS_ACTIVE_MDHA       (1 << 10)
 110#define         SAHARA_STATUS_ACTIVE_SKHA       (1 << 11)
 111#define         SAHARA_STATUS_MODE_BATCH        (1 << 16)
 112#define         SAHARA_STATUS_MODE_DEDICATED    (1 << 17)
 113#define         SAHARA_STATUS_MODE_DEBUG        (1 << 18)
 114#define         SAHARA_STATUS_GET_ISTATE(x)     (((x) >> 24) & 0xff)
 115#define SAHARA_REG_ERRSTATUS    0x14
 116#define         SAHARA_ERRSTATUS_GET_SOURCE(x)  ((x) & 0xf)
 117#define                 SAHARA_ERRSOURCE_CHA    14
 118#define                 SAHARA_ERRSOURCE_DMA    15
 119#define         SAHARA_ERRSTATUS_DMA_DIR        (1 << 8)
 120#define         SAHARA_ERRSTATUS_GET_DMASZ(x)(((x) >> 9) & 0x3)
 121#define         SAHARA_ERRSTATUS_GET_DMASRC(x) (((x) >> 13) & 0x7)
 122#define         SAHARA_ERRSTATUS_GET_CHASRC(x)  (((x) >> 16) & 0xfff)
 123#define         SAHARA_ERRSTATUS_GET_CHAERR(x)  (((x) >> 28) & 0x3)
 124#define SAHARA_REG_FADDR        0x18
 125#define SAHARA_REG_CDAR         0x1C
 126#define SAHARA_REG_IDAR         0x20
 127
 128struct sahara_hw_desc {
 129        u32     hdr;
 130        u32     len1;
 131        u32     p1;
 132        u32     len2;
 133        u32     p2;
 134        u32     next;
 135};
 136
 137struct sahara_hw_link {
 138        u32     len;
 139        u32     p;
 140        u32     next;
 141};
 142
 143struct sahara_ctx {
 144        unsigned long flags;
 145
 146        /* AES-specific context */
 147        int keylen;
 148        u8 key[AES_KEYSIZE_128];
 149        struct crypto_skcipher *fallback;
 150};
 151
 152struct sahara_aes_reqctx {
 153        unsigned long mode;
 154        struct skcipher_request fallback_req;   // keep at the end
 155};
 156
 157/*
 158 * struct sahara_sha_reqctx - private data per request
 159 * @buf: holds data for requests smaller than block_size
 160 * @rembuf: used to prepare one block_size-aligned request
 161 * @context: hw-specific context for request. Digest is extracted from this
 162 * @mode: specifies what type of hw-descriptor needs to be built
 163 * @digest_size: length of digest for this request
 164 * @context_size: length of hw-context for this request.
 165 *                Always digest_size + 4
 166 * @buf_cnt: number of bytes saved in buf
 167 * @sg_in_idx: number of hw links
 168 * @in_sg: scatterlist for input data
 169 * @in_sg_chain: scatterlists for chained input data
 170 * @total: total number of bytes for transfer
 171 * @last: is this the last block
 172 * @first: is this the first block
 173 * @active: inside a transfer
 174 */
 175struct sahara_sha_reqctx {
 176        u8                      buf[SAHARA_MAX_SHA_BLOCK_SIZE];
 177        u8                      rembuf[SAHARA_MAX_SHA_BLOCK_SIZE];
 178        u8                      context[SHA256_DIGEST_SIZE + 4];
 179        unsigned int            mode;
 180        unsigned int            digest_size;
 181        unsigned int            context_size;
 182        unsigned int            buf_cnt;
 183        unsigned int            sg_in_idx;
 184        struct scatterlist      *in_sg;
 185        struct scatterlist      in_sg_chain[2];
 186        size_t                  total;
 187        unsigned int            last;
 188        unsigned int            first;
 189        unsigned int            active;
 190};
 191
 192struct sahara_dev {
 193        struct device           *device;
 194        unsigned int            version;
 195        void __iomem            *regs_base;
 196        struct clk              *clk_ipg;
 197        struct clk              *clk_ahb;
 198        struct mutex            queue_mutex;
 199        struct task_struct      *kthread;
 200        struct completion       dma_completion;
 201
 202        struct sahara_ctx       *ctx;
 203        struct crypto_queue     queue;
 204        unsigned long           flags;
 205
 206        struct sahara_hw_desc   *hw_desc[SAHARA_MAX_HW_DESC];
 207        dma_addr_t              hw_phys_desc[SAHARA_MAX_HW_DESC];
 208
 209        u8                      *key_base;
 210        dma_addr_t              key_phys_base;
 211
 212        u8                      *iv_base;
 213        dma_addr_t              iv_phys_base;
 214
 215        u8                      *context_base;
 216        dma_addr_t              context_phys_base;
 217
 218        struct sahara_hw_link   *hw_link[SAHARA_MAX_HW_LINK];
 219        dma_addr_t              hw_phys_link[SAHARA_MAX_HW_LINK];
 220
 221        size_t                  total;
 222        struct scatterlist      *in_sg;
 223        int             nb_in_sg;
 224        struct scatterlist      *out_sg;
 225        int             nb_out_sg;
 226
 227        u32                     error;
 228};
 229
 230static struct sahara_dev *dev_ptr;
 231
 232static inline void sahara_write(struct sahara_dev *dev, u32 data, u32 reg)
 233{
 234        writel(data, dev->regs_base + reg);
 235}
 236
 237static inline unsigned int sahara_read(struct sahara_dev *dev, u32 reg)
 238{
 239        return readl(dev->regs_base + reg);
 240}
 241
 242static u32 sahara_aes_key_hdr(struct sahara_dev *dev)
 243{
 244        u32 hdr = SAHARA_HDR_BASE | SAHARA_HDR_SKHA_ALG_AES |
 245                        SAHARA_HDR_FORM_KEY | SAHARA_HDR_LLO |
 246                        SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
 247
 248        if (dev->flags & FLAGS_CBC) {
 249                hdr |= SAHARA_HDR_SKHA_MODE_CBC;
 250                hdr ^= SAHARA_HDR_PARITY_BIT;
 251        }
 252
 253        if (dev->flags & FLAGS_ENCRYPT) {
 254                hdr |= SAHARA_HDR_SKHA_OP_ENC;
 255                hdr ^= SAHARA_HDR_PARITY_BIT;
 256        }
 257
 258        return hdr;
 259}
 260
 261static u32 sahara_aes_data_link_hdr(struct sahara_dev *dev)
 262{
 263        return SAHARA_HDR_BASE | SAHARA_HDR_FORM_DATA |
 264                        SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
 265}
 266
 267static const char *sahara_err_src[16] = {
 268        "No error",
 269        "Header error",
 270        "Descriptor length error",
 271        "Descriptor length or pointer error",
 272        "Link length error",
 273        "Link pointer error",
 274        "Input buffer error",
 275        "Output buffer error",
 276        "Output buffer starvation",
 277        "Internal state fault",
 278        "General descriptor problem",
 279        "Reserved",
 280        "Descriptor address error",
 281        "Link address error",
 282        "CHA error",
 283        "DMA error"
 284};
 285
 286static const char *sahara_err_dmasize[4] = {
 287        "Byte transfer",
 288        "Half-word transfer",
 289        "Word transfer",
 290        "Reserved"
 291};
 292
 293static const char *sahara_err_dmasrc[8] = {
 294        "No error",
 295        "AHB bus error",
 296        "Internal IP bus error",
 297        "Parity error",
 298        "DMA crosses 256 byte boundary",
 299        "DMA is busy",
 300        "Reserved",
 301        "DMA HW error"
 302};
 303
 304static const char *sahara_cha_errsrc[12] = {
 305        "Input buffer non-empty",
 306        "Illegal address",
 307        "Illegal mode",
 308        "Illegal data size",
 309        "Illegal key size",
 310        "Write during processing",
 311        "CTX read during processing",
 312        "HW error",
 313        "Input buffer disabled/underflow",
 314        "Output buffer disabled/overflow",
 315        "DES key parity error",
 316        "Reserved"
 317};
 318
 319static const char *sahara_cha_err[4] = { "No error", "SKHA", "MDHA", "RNG" };
 320
 321static void sahara_decode_error(struct sahara_dev *dev, unsigned int error)
 322{
 323        u8 source = SAHARA_ERRSTATUS_GET_SOURCE(error);
 324        u16 chasrc = ffs(SAHARA_ERRSTATUS_GET_CHASRC(error));
 325
 326        dev_err(dev->device, "%s: Error Register = 0x%08x\n", __func__, error);
 327
 328        dev_err(dev->device, "  - %s.\n", sahara_err_src[source]);
 329
 330        if (source == SAHARA_ERRSOURCE_DMA) {
 331                if (error & SAHARA_ERRSTATUS_DMA_DIR)
 332                        dev_err(dev->device, "          * DMA read.\n");
 333                else
 334                        dev_err(dev->device, "          * DMA write.\n");
 335
 336                dev_err(dev->device, "          * %s.\n",
 337                       sahara_err_dmasize[SAHARA_ERRSTATUS_GET_DMASZ(error)]);
 338                dev_err(dev->device, "          * %s.\n",
 339                       sahara_err_dmasrc[SAHARA_ERRSTATUS_GET_DMASRC(error)]);
 340        } else if (source == SAHARA_ERRSOURCE_CHA) {
 341                dev_err(dev->device, "          * %s.\n",
 342                        sahara_cha_errsrc[chasrc]);
 343                dev_err(dev->device, "          * %s.\n",
 344                       sahara_cha_err[SAHARA_ERRSTATUS_GET_CHAERR(error)]);
 345        }
 346        dev_err(dev->device, "\n");
 347}
 348
 349static const char *sahara_state[4] = { "Idle", "Busy", "Error", "HW Fault" };
 350
 351static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
 352{
 353        u8 state;
 354
 355        if (!__is_defined(DEBUG))
 356                return;
 357
 358        state = SAHARA_STATUS_GET_STATE(status);
 359
 360        dev_dbg(dev->device, "%s: Status Register = 0x%08x\n",
 361                __func__, status);
 362
 363        dev_dbg(dev->device, "  - State = %d:\n", state);
 364        if (state & SAHARA_STATE_COMP_FLAG)
 365                dev_dbg(dev->device, "          * Descriptor completed. IRQ pending.\n");
 366
 367        dev_dbg(dev->device, "          * %s.\n",
 368               sahara_state[state & ~SAHARA_STATE_COMP_FLAG]);
 369
 370        if (status & SAHARA_STATUS_DAR_FULL)
 371                dev_dbg(dev->device, "  - DAR Full.\n");
 372        if (status & SAHARA_STATUS_ERROR)
 373                dev_dbg(dev->device, "  - Error.\n");
 374        if (status & SAHARA_STATUS_SECURE)
 375                dev_dbg(dev->device, "  - Secure.\n");
 376        if (status & SAHARA_STATUS_FAIL)
 377                dev_dbg(dev->device, "  - Fail.\n");
 378        if (status & SAHARA_STATUS_RNG_RESEED)
 379                dev_dbg(dev->device, "  - RNG Reseed Request.\n");
 380        if (status & SAHARA_STATUS_ACTIVE_RNG)
 381                dev_dbg(dev->device, "  - RNG Active.\n");
 382        if (status & SAHARA_STATUS_ACTIVE_MDHA)
 383                dev_dbg(dev->device, "  - MDHA Active.\n");
 384        if (status & SAHARA_STATUS_ACTIVE_SKHA)
 385                dev_dbg(dev->device, "  - SKHA Active.\n");
 386
 387        if (status & SAHARA_STATUS_MODE_BATCH)
 388                dev_dbg(dev->device, "  - Batch Mode.\n");
 389        else if (status & SAHARA_STATUS_MODE_DEDICATED)
 390                dev_dbg(dev->device, "  - Dedicated Mode.\n");
 391        else if (status & SAHARA_STATUS_MODE_DEBUG)
 392                dev_dbg(dev->device, "  - Debug Mode.\n");
 393
 394        dev_dbg(dev->device, "  - Internal state = 0x%02x\n",
 395               SAHARA_STATUS_GET_ISTATE(status));
 396
 397        dev_dbg(dev->device, "Current DAR: 0x%08x\n",
 398                sahara_read(dev, SAHARA_REG_CDAR));
 399        dev_dbg(dev->device, "Initial DAR: 0x%08x\n\n",
 400                sahara_read(dev, SAHARA_REG_IDAR));
 401}
 402
 403static void sahara_dump_descriptors(struct sahara_dev *dev)
 404{
 405        int i;
 406
 407        if (!__is_defined(DEBUG))
 408                return;
 409
 410        for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
 411                dev_dbg(dev->device, "Descriptor (%d) (%pad):\n",
 412                        i, &dev->hw_phys_desc[i]);
 413                dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr);
 414                dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1);
 415                dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1);
 416                dev_dbg(dev->device, "\tlen2 = %u\n", dev->hw_desc[i]->len2);
 417                dev_dbg(dev->device, "\tp2 = 0x%08x\n", dev->hw_desc[i]->p2);
 418                dev_dbg(dev->device, "\tnext = 0x%08x\n",
 419                        dev->hw_desc[i]->next);
 420        }
 421        dev_dbg(dev->device, "\n");
 422}
 423
 424static void sahara_dump_links(struct sahara_dev *dev)
 425{
 426        int i;
 427
 428        if (!__is_defined(DEBUG))
 429                return;
 430
 431        for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
 432                dev_dbg(dev->device, "Link (%d) (%pad):\n",
 433                        i, &dev->hw_phys_link[i]);
 434                dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len);
 435                dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p);
 436                dev_dbg(dev->device, "\tnext = 0x%08x\n",
 437                        dev->hw_link[i]->next);
 438        }
 439        dev_dbg(dev->device, "\n");
 440}
 441
 442static int sahara_hw_descriptor_create(struct sahara_dev *dev)
 443{
 444        struct sahara_ctx *ctx = dev->ctx;
 445        struct scatterlist *sg;
 446        int ret;
 447        int i, j;
 448        int idx = 0;
 449
 450        /* Copy new key if necessary */
 451        if (ctx->flags & FLAGS_NEW_KEY) {
 452                memcpy(dev->key_base, ctx->key, ctx->keylen);
 453                ctx->flags &= ~FLAGS_NEW_KEY;
 454
 455                if (dev->flags & FLAGS_CBC) {
 456                        dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
 457                        dev->hw_desc[idx]->p1 = dev->iv_phys_base;
 458                } else {
 459                        dev->hw_desc[idx]->len1 = 0;
 460                        dev->hw_desc[idx]->p1 = 0;
 461                }
 462                dev->hw_desc[idx]->len2 = ctx->keylen;
 463                dev->hw_desc[idx]->p2 = dev->key_phys_base;
 464                dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
 465
 466                dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
 467
 468                idx++;
 469        }
 470
 471        dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total);
 472        if (dev->nb_in_sg < 0) {
 473                dev_err(dev->device, "Invalid numbers of src SG.\n");
 474                return dev->nb_in_sg;
 475        }
 476        dev->nb_out_sg = sg_nents_for_len(dev->out_sg, dev->total);
 477        if (dev->nb_out_sg < 0) {
 478                dev_err(dev->device, "Invalid numbers of dst SG.\n");
 479                return dev->nb_out_sg;
 480        }
 481        if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) {
 482                dev_err(dev->device, "not enough hw links (%d)\n",
 483                        dev->nb_in_sg + dev->nb_out_sg);
 484                return -EINVAL;
 485        }
 486
 487        ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
 488                         DMA_TO_DEVICE);
 489        if (ret != dev->nb_in_sg) {
 490                dev_err(dev->device, "couldn't map in sg\n");
 491                goto unmap_in;
 492        }
 493        ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
 494                         DMA_FROM_DEVICE);
 495        if (ret != dev->nb_out_sg) {
 496                dev_err(dev->device, "couldn't map out sg\n");
 497                goto unmap_out;
 498        }
 499
 500        /* Create input links */
 501        dev->hw_desc[idx]->p1 = dev->hw_phys_link[0];
 502        sg = dev->in_sg;
 503        for (i = 0; i < dev->nb_in_sg; i++) {
 504                dev->hw_link[i]->len = sg->length;
 505                dev->hw_link[i]->p = sg->dma_address;
 506                if (i == (dev->nb_in_sg - 1)) {
 507                        dev->hw_link[i]->next = 0;
 508                } else {
 509                        dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
 510                        sg = sg_next(sg);
 511                }
 512        }
 513
 514        /* Create output links */
 515        dev->hw_desc[idx]->p2 = dev->hw_phys_link[i];
 516        sg = dev->out_sg;
 517        for (j = i; j < dev->nb_out_sg + i; j++) {
 518                dev->hw_link[j]->len = sg->length;
 519                dev->hw_link[j]->p = sg->dma_address;
 520                if (j == (dev->nb_out_sg + i - 1)) {
 521                        dev->hw_link[j]->next = 0;
 522                } else {
 523                        dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
 524                        sg = sg_next(sg);
 525                }
 526        }
 527
 528        /* Fill remaining fields of hw_desc[1] */
 529        dev->hw_desc[idx]->hdr = sahara_aes_data_link_hdr(dev);
 530        dev->hw_desc[idx]->len1 = dev->total;
 531        dev->hw_desc[idx]->len2 = dev->total;
 532        dev->hw_desc[idx]->next = 0;
 533
 534        sahara_dump_descriptors(dev);
 535        sahara_dump_links(dev);
 536
 537        sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
 538
 539        return 0;
 540
 541unmap_out:
 542        dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
 543                DMA_FROM_DEVICE);
 544unmap_in:
 545        dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
 546                DMA_TO_DEVICE);
 547
 548        return -EINVAL;
 549}
 550
 551static int sahara_aes_process(struct skcipher_request *req)
 552{
 553        struct sahara_dev *dev = dev_ptr;
 554        struct sahara_ctx *ctx;
 555        struct sahara_aes_reqctx *rctx;
 556        int ret;
 557        unsigned long timeout;
 558
 559        /* Request is ready to be dispatched by the device */
 560        dev_dbg(dev->device,
 561                "dispatch request (nbytes=%d, src=%p, dst=%p)\n",
 562                req->cryptlen, req->src, req->dst);
 563
 564        /* assign new request to device */
 565        dev->total = req->cryptlen;
 566        dev->in_sg = req->src;
 567        dev->out_sg = req->dst;
 568
 569        rctx = skcipher_request_ctx(req);
 570        ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
 571        rctx->mode &= FLAGS_MODE_MASK;
 572        dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
 573
 574        if ((dev->flags & FLAGS_CBC) && req->iv)
 575                memcpy(dev->iv_base, req->iv, AES_KEYSIZE_128);
 576
 577        /* assign new context to device */
 578        dev->ctx = ctx;
 579
 580        reinit_completion(&dev->dma_completion);
 581
 582        ret = sahara_hw_descriptor_create(dev);
 583        if (ret)
 584                return -EINVAL;
 585
 586        timeout = wait_for_completion_timeout(&dev->dma_completion,
 587                                msecs_to_jiffies(SAHARA_TIMEOUT_MS));
 588        if (!timeout) {
 589                dev_err(dev->device, "AES timeout\n");
 590                return -ETIMEDOUT;
 591        }
 592
 593        dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
 594                DMA_FROM_DEVICE);
 595        dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
 596                DMA_TO_DEVICE);
 597
 598        return 0;
 599}
 600
 601static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
 602                             unsigned int keylen)
 603{
 604        struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
 605
 606        ctx->keylen = keylen;
 607
 608        /* SAHARA only supports 128bit keys */
 609        if (keylen == AES_KEYSIZE_128) {
 610                memcpy(ctx->key, key, keylen);
 611                ctx->flags |= FLAGS_NEW_KEY;
 612                return 0;
 613        }
 614
 615        if (keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
 616                return -EINVAL;
 617
 618        /*
 619         * The requested key size is not supported by HW, do a fallback.
 620         */
 621        crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
 622        crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
 623                                                 CRYPTO_TFM_REQ_MASK);
 624        return crypto_skcipher_setkey(ctx->fallback, key, keylen);
 625}
 626
 627static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)
 628{
 629        struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
 630        struct sahara_dev *dev = dev_ptr;
 631        int err = 0;
 632
 633        dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
 634                req->cryptlen, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
 635
 636        if (!IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE)) {
 637                dev_err(dev->device,
 638                        "request size is not exact amount of AES blocks\n");
 639                return -EINVAL;
 640        }
 641
 642        rctx->mode = mode;
 643
 644        mutex_lock(&dev->queue_mutex);
 645        err = crypto_enqueue_request(&dev->queue, &req->base);
 646        mutex_unlock(&dev->queue_mutex);
 647
 648        wake_up_process(dev->kthread);
 649
 650        return err;
 651}
 652
 653static int sahara_aes_ecb_encrypt(struct skcipher_request *req)
 654{
 655        struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
 656        struct sahara_ctx *ctx = crypto_skcipher_ctx(
 657                crypto_skcipher_reqtfm(req));
 658
 659        if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
 660                skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
 661                skcipher_request_set_callback(&rctx->fallback_req,
 662                                              req->base.flags,
 663                                              req->base.complete,
 664                                              req->base.data);
 665                skcipher_request_set_crypt(&rctx->fallback_req, req->src,
 666                                           req->dst, req->cryptlen, req->iv);
 667                return crypto_skcipher_encrypt(&rctx->fallback_req);
 668        }
 669
 670        return sahara_aes_crypt(req, FLAGS_ENCRYPT);
 671}
 672
 673static int sahara_aes_ecb_decrypt(struct skcipher_request *req)
 674{
 675        struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
 676        struct sahara_ctx *ctx = crypto_skcipher_ctx(
 677                crypto_skcipher_reqtfm(req));
 678
 679        if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
 680                skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
 681                skcipher_request_set_callback(&rctx->fallback_req,
 682                                              req->base.flags,
 683                                              req->base.complete,
 684                                              req->base.data);
 685                skcipher_request_set_crypt(&rctx->fallback_req, req->src,
 686                                           req->dst, req->cryptlen, req->iv);
 687                return crypto_skcipher_decrypt(&rctx->fallback_req);
 688        }
 689
 690        return sahara_aes_crypt(req, 0);
 691}
 692
 693static int sahara_aes_cbc_encrypt(struct skcipher_request *req)
 694{
 695        struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
 696        struct sahara_ctx *ctx = crypto_skcipher_ctx(
 697                crypto_skcipher_reqtfm(req));
 698
 699        if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
 700                skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
 701                skcipher_request_set_callback(&rctx->fallback_req,
 702                                              req->base.flags,
 703                                              req->base.complete,
 704                                              req->base.data);
 705                skcipher_request_set_crypt(&rctx->fallback_req, req->src,
 706                                           req->dst, req->cryptlen, req->iv);
 707                return crypto_skcipher_encrypt(&rctx->fallback_req);
 708        }
 709
 710        return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
 711}
 712
 713static int sahara_aes_cbc_decrypt(struct skcipher_request *req)
 714{
 715        struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
 716        struct sahara_ctx *ctx = crypto_skcipher_ctx(
 717                crypto_skcipher_reqtfm(req));
 718
 719        if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
 720                skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
 721                skcipher_request_set_callback(&rctx->fallback_req,
 722                                              req->base.flags,
 723                                              req->base.complete,
 724                                              req->base.data);
 725                skcipher_request_set_crypt(&rctx->fallback_req, req->src,
 726                                           req->dst, req->cryptlen, req->iv);
 727                return crypto_skcipher_decrypt(&rctx->fallback_req);
 728        }
 729
 730        return sahara_aes_crypt(req, FLAGS_CBC);
 731}
 732
 733static int sahara_aes_init_tfm(struct crypto_skcipher *tfm)
 734{
 735        const char *name = crypto_tfm_alg_name(&tfm->base);
 736        struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
 737
 738        ctx->fallback = crypto_alloc_skcipher(name, 0,
 739                                              CRYPTO_ALG_NEED_FALLBACK);
 740        if (IS_ERR(ctx->fallback)) {
 741                pr_err("Error allocating fallback algo %s\n", name);
 742                return PTR_ERR(ctx->fallback);
 743        }
 744
 745        crypto_skcipher_set_reqsize(tfm, sizeof(struct sahara_aes_reqctx) +
 746                                         crypto_skcipher_reqsize(ctx->fallback));
 747
 748        return 0;
 749}
 750
 751static void sahara_aes_exit_tfm(struct crypto_skcipher *tfm)
 752{
 753        struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
 754
 755        crypto_free_skcipher(ctx->fallback);
 756}
 757
 758static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
 759                              struct sahara_sha_reqctx *rctx)
 760{
 761        u32 hdr = 0;
 762
 763        hdr = rctx->mode;
 764
 765        if (rctx->first) {
 766                hdr |= SAHARA_HDR_MDHA_SET_MODE_HASH;
 767                hdr |= SAHARA_HDR_MDHA_INIT;
 768        } else {
 769                hdr |= SAHARA_HDR_MDHA_SET_MODE_MD_KEY;
 770        }
 771
 772        if (rctx->last)
 773                hdr |= SAHARA_HDR_MDHA_PDATA;
 774
 775        if (hweight_long(hdr) % 2 == 0)
 776                hdr |= SAHARA_HDR_PARITY_BIT;
 777
 778        return hdr;
 779}
 780
 781static int sahara_sha_hw_links_create(struct sahara_dev *dev,
 782                                       struct sahara_sha_reqctx *rctx,
 783                                       int start)
 784{
 785        struct scatterlist *sg;
 786        unsigned int i;
 787        int ret;
 788
 789        dev->in_sg = rctx->in_sg;
 790
 791        dev->nb_in_sg = sg_nents_for_len(dev->in_sg, rctx->total);
 792        if (dev->nb_in_sg < 0) {
 793                dev_err(dev->device, "Invalid numbers of src SG.\n");
 794                return dev->nb_in_sg;
 795        }
 796        if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) {
 797                dev_err(dev->device, "not enough hw links (%d)\n",
 798                        dev->nb_in_sg + dev->nb_out_sg);
 799                return -EINVAL;
 800        }
 801
 802        sg = dev->in_sg;
 803        ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg, DMA_TO_DEVICE);
 804        if (!ret)
 805                return -EFAULT;
 806
 807        for (i = start; i < dev->nb_in_sg + start; i++) {
 808                dev->hw_link[i]->len = sg->length;
 809                dev->hw_link[i]->p = sg->dma_address;
 810                if (i == (dev->nb_in_sg + start - 1)) {
 811                        dev->hw_link[i]->next = 0;
 812                } else {
 813                        dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
 814                        sg = sg_next(sg);
 815                }
 816        }
 817
 818        return i;
 819}
 820
 821static int sahara_sha_hw_data_descriptor_create(struct sahara_dev *dev,
 822                                                struct sahara_sha_reqctx *rctx,
 823                                                struct ahash_request *req,
 824                                                int index)
 825{
 826        unsigned result_len;
 827        int i = index;
 828
 829        if (rctx->first)
 830                /* Create initial descriptor: #8*/
 831                dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
 832        else
 833                /* Create hash descriptor: #10. Must follow #6. */
 834                dev->hw_desc[index]->hdr = SAHARA_HDR_MDHA_HASH;
 835
 836        dev->hw_desc[index]->len1 = rctx->total;
 837        if (dev->hw_desc[index]->len1 == 0) {
 838                /* if len1 is 0, p1 must be 0, too */
 839                dev->hw_desc[index]->p1 = 0;
 840                rctx->sg_in_idx = 0;
 841        } else {
 842                /* Create input links */
 843                dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
 844                i = sahara_sha_hw_links_create(dev, rctx, index);
 845
 846                rctx->sg_in_idx = index;
 847                if (i < 0)
 848                        return i;
 849        }
 850
 851        dev->hw_desc[index]->p2 = dev->hw_phys_link[i];
 852
 853        /* Save the context for the next operation */
 854        result_len = rctx->context_size;
 855        dev->hw_link[i]->p = dev->context_phys_base;
 856
 857        dev->hw_link[i]->len = result_len;
 858        dev->hw_desc[index]->len2 = result_len;
 859
 860        dev->hw_link[i]->next = 0;
 861
 862        return 0;
 863}
 864
 865/*
 866 * Load descriptor aka #6
 867 *
 868 * To load a previously saved context back to the MDHA unit
 869 *
 870 * p1: Saved Context
 871 * p2: NULL
 872 *
 873 */
 874static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev,
 875                                                struct sahara_sha_reqctx *rctx,
 876                                                struct ahash_request *req,
 877                                                int index)
 878{
 879        dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
 880
 881        dev->hw_desc[index]->len1 = rctx->context_size;
 882        dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
 883        dev->hw_desc[index]->len2 = 0;
 884        dev->hw_desc[index]->p2 = 0;
 885
 886        dev->hw_link[index]->len = rctx->context_size;
 887        dev->hw_link[index]->p = dev->context_phys_base;
 888        dev->hw_link[index]->next = 0;
 889
 890        return 0;
 891}
 892
 893static int sahara_walk_and_recalc(struct scatterlist *sg, unsigned int nbytes)
 894{
 895        if (!sg || !sg->length)
 896                return nbytes;
 897
 898        while (nbytes && sg) {
 899                if (nbytes <= sg->length) {
 900                        sg->length = nbytes;
 901                        sg_mark_end(sg);
 902                        break;
 903                }
 904                nbytes -= sg->length;
 905                sg = sg_next(sg);
 906        }
 907
 908        return nbytes;
 909}
 910
 911static int sahara_sha_prepare_request(struct ahash_request *req)
 912{
 913        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 914        struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
 915        unsigned int hash_later;
 916        unsigned int block_size;
 917        unsigned int len;
 918
 919        block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
 920
 921        /* append bytes from previous operation */
 922        len = rctx->buf_cnt + req->nbytes;
 923
 924        /* only the last transfer can be padded in hardware */
 925        if (!rctx->last && (len < block_size)) {
 926                /* to few data, save for next operation */
 927                scatterwalk_map_and_copy(rctx->buf + rctx->buf_cnt, req->src,
 928                                         0, req->nbytes, 0);
 929                rctx->buf_cnt += req->nbytes;
 930
 931                return 0;
 932        }
 933
 934        /* add data from previous operation first */
 935        if (rctx->buf_cnt)
 936                memcpy(rctx->rembuf, rctx->buf, rctx->buf_cnt);
 937
 938        /* data must always be a multiple of block_size */
 939        hash_later = rctx->last ? 0 : len & (block_size - 1);
 940        if (hash_later) {
 941                unsigned int offset = req->nbytes - hash_later;
 942                /* Save remaining bytes for later use */
 943                scatterwalk_map_and_copy(rctx->buf, req->src, offset,
 944                                        hash_later, 0);
 945        }
 946
 947        /* nbytes should now be multiple of blocksize */
 948        req->nbytes = req->nbytes - hash_later;
 949
 950        sahara_walk_and_recalc(req->src, req->nbytes);
 951
 952        /* have data from previous operation and current */
 953        if (rctx->buf_cnt && req->nbytes) {
 954                sg_init_table(rctx->in_sg_chain, 2);
 955                sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
 956
 957                sg_chain(rctx->in_sg_chain, 2, req->src);
 958
 959                rctx->total = req->nbytes + rctx->buf_cnt;
 960                rctx->in_sg = rctx->in_sg_chain;
 961
 962                req->src = rctx->in_sg_chain;
 963        /* only data from previous operation */
 964        } else if (rctx->buf_cnt) {
 965                if (req->src)
 966                        rctx->in_sg = req->src;
 967                else
 968                        rctx->in_sg = rctx->in_sg_chain;
 969                /* buf was copied into rembuf above */
 970                sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt);
 971                rctx->total = rctx->buf_cnt;
 972        /* no data from previous operation */
 973        } else {
 974                rctx->in_sg = req->src;
 975                rctx->total = req->nbytes;
 976                req->src = rctx->in_sg;
 977        }
 978
 979        /* on next call, we only have the remaining data in the buffer */
 980        rctx->buf_cnt = hash_later;
 981
 982        return -EINPROGRESS;
 983}
 984
 985static int sahara_sha_process(struct ahash_request *req)
 986{
 987        struct sahara_dev *dev = dev_ptr;
 988        struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
 989        int ret;
 990        unsigned long timeout;
 991
 992        ret = sahara_sha_prepare_request(req);
 993        if (!ret)
 994                return ret;
 995
 996        if (rctx->first) {
 997                sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
 998                dev->hw_desc[0]->next = 0;
 999                rctx->first = 0;
1000        } else {
1001                memcpy(dev->context_base, rctx->context, rctx->context_size);
1002
1003                sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0);
1004                dev->hw_desc[0]->next = dev->hw_phys_desc[1];
1005                sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
1006                dev->hw_desc[1]->next = 0;
1007        }
1008
1009        sahara_dump_descriptors(dev);
1010        sahara_dump_links(dev);
1011
1012        reinit_completion(&dev->dma_completion);
1013
1014        sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
1015
1016        timeout = wait_for_completion_timeout(&dev->dma_completion,
1017                                msecs_to_jiffies(SAHARA_TIMEOUT_MS));
1018        if (!timeout) {
1019                dev_err(dev->device, "SHA timeout\n");
1020                return -ETIMEDOUT;
1021        }
1022
1023        if (rctx->sg_in_idx)
1024                dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
1025                             DMA_TO_DEVICE);
1026
1027        memcpy(rctx->context, dev->context_base, rctx->context_size);
1028
1029        if (req->result)
1030                memcpy(req->result, rctx->context, rctx->digest_size);
1031
1032        return 0;
1033}
1034
1035static int sahara_queue_manage(void *data)
1036{
1037        struct sahara_dev *dev = (struct sahara_dev *)data;
1038        struct crypto_async_request *async_req;
1039        struct crypto_async_request *backlog;
1040        int ret = 0;
1041
1042        do {
1043                __set_current_state(TASK_INTERRUPTIBLE);
1044
1045                mutex_lock(&dev->queue_mutex);
1046                backlog = crypto_get_backlog(&dev->queue);
1047                async_req = crypto_dequeue_request(&dev->queue);
1048                mutex_unlock(&dev->queue_mutex);
1049
1050                if (backlog)
1051                        backlog->complete(backlog, -EINPROGRESS);
1052
1053                if (async_req) {
1054                        if (crypto_tfm_alg_type(async_req->tfm) ==
1055                            CRYPTO_ALG_TYPE_AHASH) {
1056                                struct ahash_request *req =
1057                                        ahash_request_cast(async_req);
1058
1059                                ret = sahara_sha_process(req);
1060                        } else {
1061                                struct skcipher_request *req =
1062                                        skcipher_request_cast(async_req);
1063
1064                                ret = sahara_aes_process(req);
1065                        }
1066
1067                        async_req->complete(async_req, ret);
1068
1069                        continue;
1070                }
1071
1072                schedule();
1073        } while (!kthread_should_stop());
1074
1075        return 0;
1076}
1077
1078static int sahara_sha_enqueue(struct ahash_request *req, int last)
1079{
1080        struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1081        struct sahara_dev *dev = dev_ptr;
1082        int ret;
1083
1084        if (!req->nbytes && !last)
1085                return 0;
1086
1087        rctx->last = last;
1088
1089        if (!rctx->active) {
1090                rctx->active = 1;
1091                rctx->first = 1;
1092        }
1093
1094        mutex_lock(&dev->queue_mutex);
1095        ret = crypto_enqueue_request(&dev->queue, &req->base);
1096        mutex_unlock(&dev->queue_mutex);
1097
1098        wake_up_process(dev->kthread);
1099
1100        return ret;
1101}
1102
1103static int sahara_sha_init(struct ahash_request *req)
1104{
1105        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1106        struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1107
1108        memset(rctx, 0, sizeof(*rctx));
1109
1110        switch (crypto_ahash_digestsize(tfm)) {
1111        case SHA1_DIGEST_SIZE:
1112                rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA1;
1113                rctx->digest_size = SHA1_DIGEST_SIZE;
1114                break;
1115        case SHA256_DIGEST_SIZE:
1116                rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA256;
1117                rctx->digest_size = SHA256_DIGEST_SIZE;
1118                break;
1119        default:
1120                return -EINVAL;
1121        }
1122
1123        rctx->context_size = rctx->digest_size + 4;
1124        rctx->active = 0;
1125
1126        return 0;
1127}
1128
1129static int sahara_sha_update(struct ahash_request *req)
1130{
1131        return sahara_sha_enqueue(req, 0);
1132}
1133
1134static int sahara_sha_final(struct ahash_request *req)
1135{
1136        req->nbytes = 0;
1137        return sahara_sha_enqueue(req, 1);
1138}
1139
1140static int sahara_sha_finup(struct ahash_request *req)
1141{
1142        return sahara_sha_enqueue(req, 1);
1143}
1144
1145static int sahara_sha_digest(struct ahash_request *req)
1146{
1147        sahara_sha_init(req);
1148
1149        return sahara_sha_finup(req);
1150}
1151
1152static int sahara_sha_export(struct ahash_request *req, void *out)
1153{
1154        struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1155
1156        memcpy(out, rctx, sizeof(struct sahara_sha_reqctx));
1157
1158        return 0;
1159}
1160
1161static int sahara_sha_import(struct ahash_request *req, const void *in)
1162{
1163        struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1164
1165        memcpy(rctx, in, sizeof(struct sahara_sha_reqctx));
1166
1167        return 0;
1168}
1169
1170static int sahara_sha_cra_init(struct crypto_tfm *tfm)
1171{
1172        crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1173                                 sizeof(struct sahara_sha_reqctx) +
1174                                 SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);
1175
1176        return 0;
1177}
1178
1179static struct skcipher_alg aes_algs[] = {
1180{
1181        .base.cra_name          = "ecb(aes)",
1182        .base.cra_driver_name   = "sahara-ecb-aes",
1183        .base.cra_priority      = 300,
1184        .base.cra_flags         = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1185        .base.cra_blocksize     = AES_BLOCK_SIZE,
1186        .base.cra_ctxsize       = sizeof(struct sahara_ctx),
1187        .base.cra_alignmask     = 0x0,
1188        .base.cra_module        = THIS_MODULE,
1189
1190        .init                   = sahara_aes_init_tfm,
1191        .exit                   = sahara_aes_exit_tfm,
1192        .min_keysize            = AES_MIN_KEY_SIZE ,
1193        .max_keysize            = AES_MAX_KEY_SIZE,
1194        .setkey                 = sahara_aes_setkey,
1195        .encrypt                = sahara_aes_ecb_encrypt,
1196        .decrypt                = sahara_aes_ecb_decrypt,
1197}, {
1198        .base.cra_name          = "cbc(aes)",
1199        .base.cra_driver_name   = "sahara-cbc-aes",
1200        .base.cra_priority      = 300,
1201        .base.cra_flags         = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1202        .base.cra_blocksize     = AES_BLOCK_SIZE,
1203        .base.cra_ctxsize       = sizeof(struct sahara_ctx),
1204        .base.cra_alignmask     = 0x0,
1205        .base.cra_module        = THIS_MODULE,
1206
1207        .init                   = sahara_aes_init_tfm,
1208        .exit                   = sahara_aes_exit_tfm,
1209        .min_keysize            = AES_MIN_KEY_SIZE ,
1210        .max_keysize            = AES_MAX_KEY_SIZE,
1211        .ivsize                 = AES_BLOCK_SIZE,
1212        .setkey                 = sahara_aes_setkey,
1213        .encrypt                = sahara_aes_cbc_encrypt,
1214        .decrypt                = sahara_aes_cbc_decrypt,
1215}
1216};
1217
1218static struct ahash_alg sha_v3_algs[] = {
1219{
1220        .init           = sahara_sha_init,
1221        .update         = sahara_sha_update,
1222        .final          = sahara_sha_final,
1223        .finup          = sahara_sha_finup,
1224        .digest         = sahara_sha_digest,
1225        .export         = sahara_sha_export,
1226        .import         = sahara_sha_import,
1227        .halg.digestsize        = SHA1_DIGEST_SIZE,
1228        .halg.statesize         = sizeof(struct sahara_sha_reqctx),
1229        .halg.base      = {
1230                .cra_name               = "sha1",
1231                .cra_driver_name        = "sahara-sha1",
1232                .cra_priority           = 300,
1233                .cra_flags              = CRYPTO_ALG_ASYNC |
1234                                                CRYPTO_ALG_NEED_FALLBACK,
1235                .cra_blocksize          = SHA1_BLOCK_SIZE,
1236                .cra_ctxsize            = sizeof(struct sahara_ctx),
1237                .cra_alignmask          = 0,
1238                .cra_module             = THIS_MODULE,
1239                .cra_init               = sahara_sha_cra_init,
1240        }
1241},
1242};
1243
1244static struct ahash_alg sha_v4_algs[] = {
1245{
1246        .init           = sahara_sha_init,
1247        .update         = sahara_sha_update,
1248        .final          = sahara_sha_final,
1249        .finup          = sahara_sha_finup,
1250        .digest         = sahara_sha_digest,
1251        .export         = sahara_sha_export,
1252        .import         = sahara_sha_import,
1253        .halg.digestsize        = SHA256_DIGEST_SIZE,
1254        .halg.statesize         = sizeof(struct sahara_sha_reqctx),
1255        .halg.base      = {
1256                .cra_name               = "sha256",
1257                .cra_driver_name        = "sahara-sha256",
1258                .cra_priority           = 300,
1259                .cra_flags              = CRYPTO_ALG_ASYNC |
1260                                                CRYPTO_ALG_NEED_FALLBACK,
1261                .cra_blocksize          = SHA256_BLOCK_SIZE,
1262                .cra_ctxsize            = sizeof(struct sahara_ctx),
1263                .cra_alignmask          = 0,
1264                .cra_module             = THIS_MODULE,
1265                .cra_init               = sahara_sha_cra_init,
1266        }
1267},
1268};
1269
1270static irqreturn_t sahara_irq_handler(int irq, void *data)
1271{
1272        struct sahara_dev *dev = (struct sahara_dev *)data;
1273        unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
1274        unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
1275
1276        sahara_write(dev, SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR,
1277                     SAHARA_REG_CMD);
1278
1279        sahara_decode_status(dev, stat);
1280
1281        if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_BUSY) {
1282                return IRQ_NONE;
1283        } else if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_COMPLETE) {
1284                dev->error = 0;
1285        } else {
1286                sahara_decode_error(dev, err);
1287                dev->error = -EINVAL;
1288        }
1289
1290        complete(&dev->dma_completion);
1291
1292        return IRQ_HANDLED;
1293}
1294
1295
1296static int sahara_register_algs(struct sahara_dev *dev)
1297{
1298        int err;
1299        unsigned int i, j, k, l;
1300
1301        for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
1302                err = crypto_register_skcipher(&aes_algs[i]);
1303                if (err)
1304                        goto err_aes_algs;
1305        }
1306
1307        for (k = 0; k < ARRAY_SIZE(sha_v3_algs); k++) {
1308                err = crypto_register_ahash(&sha_v3_algs[k]);
1309                if (err)
1310                        goto err_sha_v3_algs;
1311        }
1312
1313        if (dev->version > SAHARA_VERSION_3)
1314                for (l = 0; l < ARRAY_SIZE(sha_v4_algs); l++) {
1315                        err = crypto_register_ahash(&sha_v4_algs[l]);
1316                        if (err)
1317                                goto err_sha_v4_algs;
1318                }
1319
1320        return 0;
1321
1322err_sha_v4_algs:
1323        for (j = 0; j < l; j++)
1324                crypto_unregister_ahash(&sha_v4_algs[j]);
1325
1326err_sha_v3_algs:
1327        for (j = 0; j < k; j++)
1328                crypto_unregister_ahash(&sha_v3_algs[j]);
1329
1330err_aes_algs:
1331        for (j = 0; j < i; j++)
1332                crypto_unregister_skcipher(&aes_algs[j]);
1333
1334        return err;
1335}
1336
1337static void sahara_unregister_algs(struct sahara_dev *dev)
1338{
1339        unsigned int i;
1340
1341        for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1342                crypto_unregister_skcipher(&aes_algs[i]);
1343
1344        for (i = 0; i < ARRAY_SIZE(sha_v3_algs); i++)
1345                crypto_unregister_ahash(&sha_v3_algs[i]);
1346
1347        if (dev->version > SAHARA_VERSION_3)
1348                for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
1349                        crypto_unregister_ahash(&sha_v4_algs[i]);
1350}
1351
1352static const struct platform_device_id sahara_platform_ids[] = {
1353        { .name = "sahara-imx27" },
1354        { /* sentinel */ }
1355};
1356MODULE_DEVICE_TABLE(platform, sahara_platform_ids);
1357
1358static const struct of_device_id sahara_dt_ids[] = {
1359        { .compatible = "fsl,imx53-sahara" },
1360        { .compatible = "fsl,imx27-sahara" },
1361        { /* sentinel */ }
1362};
1363MODULE_DEVICE_TABLE(of, sahara_dt_ids);
1364
1365static int sahara_probe(struct platform_device *pdev)
1366{
1367        struct sahara_dev *dev;
1368        u32 version;
1369        int irq;
1370        int err;
1371        int i;
1372
1373        dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
1374        if (!dev)
1375                return -ENOMEM;
1376
1377        dev->device = &pdev->dev;
1378        platform_set_drvdata(pdev, dev);
1379
1380        /* Get the base address */
1381        dev->regs_base = devm_platform_ioremap_resource(pdev, 0);
1382        if (IS_ERR(dev->regs_base))
1383                return PTR_ERR(dev->regs_base);
1384
1385        /* Get the IRQ */
1386        irq = platform_get_irq(pdev,  0);
1387        if (irq < 0)
1388                return irq;
1389
1390        err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
1391                               0, dev_name(&pdev->dev), dev);
1392        if (err) {
1393                dev_err(&pdev->dev, "failed to request irq\n");
1394                return err;
1395        }
1396
1397        /* clocks */
1398        dev->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1399        if (IS_ERR(dev->clk_ipg)) {
1400                dev_err(&pdev->dev, "Could not get ipg clock\n");
1401                return PTR_ERR(dev->clk_ipg);
1402        }
1403
1404        dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
1405        if (IS_ERR(dev->clk_ahb)) {
1406                dev_err(&pdev->dev, "Could not get ahb clock\n");
1407                return PTR_ERR(dev->clk_ahb);
1408        }
1409
1410        /* Allocate HW descriptors */
1411        dev->hw_desc[0] = dmam_alloc_coherent(&pdev->dev,
1412                        SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
1413                        &dev->hw_phys_desc[0], GFP_KERNEL);
1414        if (!dev->hw_desc[0]) {
1415                dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
1416                return -ENOMEM;
1417        }
1418        dev->hw_desc[1] = dev->hw_desc[0] + 1;
1419        dev->hw_phys_desc[1] = dev->hw_phys_desc[0] +
1420                                sizeof(struct sahara_hw_desc);
1421
1422        /* Allocate space for iv and key */
1423        dev->key_base = dmam_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
1424                                &dev->key_phys_base, GFP_KERNEL);
1425        if (!dev->key_base) {
1426                dev_err(&pdev->dev, "Could not allocate memory for key\n");
1427                return -ENOMEM;
1428        }
1429        dev->iv_base = dev->key_base + AES_KEYSIZE_128;
1430        dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
1431
1432        /* Allocate space for context: largest digest + message length field */
1433        dev->context_base = dmam_alloc_coherent(&pdev->dev,
1434                                        SHA256_DIGEST_SIZE + 4,
1435                                        &dev->context_phys_base, GFP_KERNEL);
1436        if (!dev->context_base) {
1437                dev_err(&pdev->dev, "Could not allocate memory for MDHA context\n");
1438                return -ENOMEM;
1439        }
1440
1441        /* Allocate space for HW links */
1442        dev->hw_link[0] = dmam_alloc_coherent(&pdev->dev,
1443                        SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
1444                        &dev->hw_phys_link[0], GFP_KERNEL);
1445        if (!dev->hw_link[0]) {
1446                dev_err(&pdev->dev, "Could not allocate hw links\n");
1447                return -ENOMEM;
1448        }
1449        for (i = 1; i < SAHARA_MAX_HW_LINK; i++) {
1450                dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] +
1451                                        sizeof(struct sahara_hw_link);
1452                dev->hw_link[i] = dev->hw_link[i - 1] + 1;
1453        }
1454
1455        crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
1456
1457        mutex_init(&dev->queue_mutex);
1458
1459        dev_ptr = dev;
1460
1461        dev->kthread = kthread_run(sahara_queue_manage, dev, "sahara_crypto");
1462        if (IS_ERR(dev->kthread)) {
1463                return PTR_ERR(dev->kthread);
1464        }
1465
1466        init_completion(&dev->dma_completion);
1467
1468        err = clk_prepare_enable(dev->clk_ipg);
1469        if (err)
1470                return err;
1471        err = clk_prepare_enable(dev->clk_ahb);
1472        if (err)
1473                goto clk_ipg_disable;
1474
1475        version = sahara_read(dev, SAHARA_REG_VERSION);
1476        if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx27-sahara")) {
1477                if (version != SAHARA_VERSION_3)
1478                        err = -ENODEV;
1479        } else if (of_device_is_compatible(pdev->dev.of_node,
1480                        "fsl,imx53-sahara")) {
1481                if (((version >> 8) & 0xff) != SAHARA_VERSION_4)
1482                        err = -ENODEV;
1483                version = (version >> 8) & 0xff;
1484        }
1485        if (err == -ENODEV) {
1486                dev_err(&pdev->dev, "SAHARA version %d not supported\n",
1487                                version);
1488                goto err_algs;
1489        }
1490
1491        dev->version = version;
1492
1493        sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH,
1494                     SAHARA_REG_CMD);
1495        sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) |
1496                        SAHARA_CONTROL_SET_MAXBURST(8) |
1497                        SAHARA_CONTROL_RNG_AUTORSD |
1498                        SAHARA_CONTROL_ENABLE_INT,
1499                        SAHARA_REG_CONTROL);
1500
1501        err = sahara_register_algs(dev);
1502        if (err)
1503                goto err_algs;
1504
1505        dev_info(&pdev->dev, "SAHARA version %d initialized\n", version);
1506
1507        return 0;
1508
1509err_algs:
1510        kthread_stop(dev->kthread);
1511        dev_ptr = NULL;
1512        clk_disable_unprepare(dev->clk_ahb);
1513clk_ipg_disable:
1514        clk_disable_unprepare(dev->clk_ipg);
1515
1516        return err;
1517}
1518
1519static int sahara_remove(struct platform_device *pdev)
1520{
1521        struct sahara_dev *dev = platform_get_drvdata(pdev);
1522
1523        kthread_stop(dev->kthread);
1524
1525        sahara_unregister_algs(dev);
1526
1527        clk_disable_unprepare(dev->clk_ipg);
1528        clk_disable_unprepare(dev->clk_ahb);
1529
1530        dev_ptr = NULL;
1531
1532        return 0;
1533}
1534
1535static struct platform_driver sahara_driver = {
1536        .probe          = sahara_probe,
1537        .remove         = sahara_remove,
1538        .driver         = {
1539                .name   = SAHARA_NAME,
1540                .of_match_table = sahara_dt_ids,
1541        },
1542        .id_table = sahara_platform_ids,
1543};
1544
1545module_platform_driver(sahara_driver);
1546
1547MODULE_LICENSE("GPL");
1548MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
1549MODULE_AUTHOR("Steffen Trumtrar <s.trumtrar@pengutronix.de>");
1550MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator");
1551