linux/drivers/crypto/mv_cesa.c
<<
>>
Prefs
   1/*
   2 * Support for Marvell's crypto engine which can be found on some Orion5X
   3 * boards.
   4 *
   5 * Author: Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
   6 * License: GPLv2
   7 *
   8 */
   9#include <crypto/aes.h>
  10#include <crypto/algapi.h>
  11#include <linux/crypto.h>
  12#include <linux/interrupt.h>
  13#include <linux/io.h>
  14#include <linux/kthread.h>
  15#include <linux/platform_device.h>
  16#include <linux/scatterlist.h>
  17#include <linux/slab.h>
  18#include <linux/module.h>
  19#include <linux/clk.h>
  20#include <crypto/internal/hash.h>
  21#include <crypto/sha.h>
  22#include <linux/of.h>
  23#include <linux/of_platform.h>
  24#include <linux/of_irq.h>
  25
  26#include "mv_cesa.h"
  27
  28#define MV_CESA "MV-CESA:"
  29#define MAX_HW_HASH_SIZE        0xFFFF
  30#define MV_CESA_EXPIRE          500 /* msec */
  31
  32/*
  33 * STM:
  34 *   /---------------------------------------\
  35 *   |                                       | request complete
  36 *  \./                                      |
  37 * IDLE -> new request -> BUSY -> done -> DEQUEUE
  38 *                         /°\               |
  39 *                          |                | more scatter entries
  40 *                          \________________/
  41 */
  42enum engine_status {
  43        ENGINE_IDLE,
  44        ENGINE_BUSY,
  45        ENGINE_W_DEQUEUE,
  46};
  47
  48/**
  49 * struct req_progress - used for every crypt request
  50 * @src_sg_it:          sg iterator for src
  51 * @dst_sg_it:          sg iterator for dst
  52 * @sg_src_left:        bytes left in src to process (scatter list)
  53 * @src_start:          offset to add to src start position (scatter list)
  54 * @crypt_len:          length of current hw crypt/hash process
  55 * @hw_nbytes:          total bytes to process in hw for this request
  56 * @copy_back:          whether to copy data back (crypt) or not (hash)
  57 * @sg_dst_left:        bytes left dst to process in this scatter list
  58 * @dst_start:          offset to add to dst start position (scatter list)
  59 * @hw_processed_bytes: number of bytes processed by hw (request).
  60 *
  61 * sg helper are used to iterate over the scatterlist. Since the size of the
  62 * SRAM may be less than the scatter size, this struct struct is used to keep
  63 * track of progress within current scatterlist.
  64 */
  65struct req_progress {
  66        struct sg_mapping_iter src_sg_it;
  67        struct sg_mapping_iter dst_sg_it;
  68        void (*complete) (void);
  69        void (*process) (int is_first);
  70
  71        /* src mostly */
  72        int sg_src_left;
  73        int src_start;
  74        int crypt_len;
  75        int hw_nbytes;
  76        /* dst mostly */
  77        int copy_back;
  78        int sg_dst_left;
  79        int dst_start;
  80        int hw_processed_bytes;
  81};
  82
  83struct crypto_priv {
  84        void __iomem *reg;
  85        void __iomem *sram;
  86        int irq;
  87        struct clk *clk;
  88        struct task_struct *queue_th;
  89
  90        /* the lock protects queue and eng_st */
  91        spinlock_t lock;
  92        struct crypto_queue queue;
  93        enum engine_status eng_st;
  94        struct timer_list completion_timer;
  95        struct crypto_async_request *cur_req;
  96        struct req_progress p;
  97        int max_req_size;
  98        int sram_size;
  99        int has_sha1;
 100        int has_hmac_sha1;
 101};
 102
 103static struct crypto_priv *cpg;
 104
 105struct mv_ctx {
 106        u8 aes_enc_key[AES_KEY_LEN];
 107        u32 aes_dec_key[8];
 108        int key_len;
 109        u32 need_calc_aes_dkey;
 110};
 111
 112enum crypto_op {
 113        COP_AES_ECB,
 114        COP_AES_CBC,
 115};
 116
 117struct mv_req_ctx {
 118        enum crypto_op op;
 119        int decrypt;
 120};
 121
 122enum hash_op {
 123        COP_SHA1,
 124        COP_HMAC_SHA1
 125};
 126
 127struct mv_tfm_hash_ctx {
 128        struct crypto_shash *fallback;
 129        struct crypto_shash *base_hash;
 130        u32 ivs[2 * SHA1_DIGEST_SIZE / 4];
 131        int count_add;
 132        enum hash_op op;
 133};
 134
 135struct mv_req_hash_ctx {
 136        u64 count;
 137        u32 state[SHA1_DIGEST_SIZE / 4];
 138        u8 buffer[SHA1_BLOCK_SIZE];
 139        int first_hash;         /* marks that we don't have previous state */
 140        int last_chunk;         /* marks that this is the 'final' request */
 141        int extra_bytes;        /* unprocessed bytes in buffer */
 142        enum hash_op op;
 143        int count_add;
 144};
 145
 146static void mv_completion_timer_callback(unsigned long unused)
 147{
 148        int active = readl(cpg->reg + SEC_ACCEL_CMD) & SEC_CMD_EN_SEC_ACCL0;
 149
 150        printk(KERN_ERR MV_CESA
 151               "completion timer expired (CESA %sactive), cleaning up.\n",
 152               active ? "" : "in");
 153
 154        del_timer(&cpg->completion_timer);
 155        writel(SEC_CMD_DISABLE_SEC, cpg->reg + SEC_ACCEL_CMD);
 156        while(readl(cpg->reg + SEC_ACCEL_CMD) & SEC_CMD_DISABLE_SEC)
 157                printk(KERN_INFO MV_CESA "%s: waiting for engine finishing\n", __func__);
 158        cpg->eng_st = ENGINE_W_DEQUEUE;
 159        wake_up_process(cpg->queue_th);
 160}
 161
 162static void mv_setup_timer(void)
 163{
 164        setup_timer(&cpg->completion_timer, &mv_completion_timer_callback, 0);
 165        mod_timer(&cpg->completion_timer,
 166                        jiffies + msecs_to_jiffies(MV_CESA_EXPIRE));
 167}
 168
 169static void compute_aes_dec_key(struct mv_ctx *ctx)
 170{
 171        struct crypto_aes_ctx gen_aes_key;
 172        int key_pos;
 173
 174        if (!ctx->need_calc_aes_dkey)
 175                return;
 176
 177        crypto_aes_expand_key(&gen_aes_key, ctx->aes_enc_key, ctx->key_len);
 178
 179        key_pos = ctx->key_len + 24;
 180        memcpy(ctx->aes_dec_key, &gen_aes_key.key_enc[key_pos], 4 * 4);
 181        switch (ctx->key_len) {
 182        case AES_KEYSIZE_256:
 183                key_pos -= 2;
 184                /* fall */
 185        case AES_KEYSIZE_192:
 186                key_pos -= 2;
 187                memcpy(&ctx->aes_dec_key[4], &gen_aes_key.key_enc[key_pos],
 188                                4 * 4);
 189                break;
 190        }
 191        ctx->need_calc_aes_dkey = 0;
 192}
 193
 194static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
 195                unsigned int len)
 196{
 197        struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
 198        struct mv_ctx *ctx = crypto_tfm_ctx(tfm);
 199
 200        switch (len) {
 201        case AES_KEYSIZE_128:
 202        case AES_KEYSIZE_192:
 203        case AES_KEYSIZE_256:
 204                break;
 205        default:
 206                crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
 207                return -EINVAL;
 208        }
 209        ctx->key_len = len;
 210        ctx->need_calc_aes_dkey = 1;
 211
 212        memcpy(ctx->aes_enc_key, key, AES_KEY_LEN);
 213        return 0;
 214}
 215
 216static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len)
 217{
 218        int ret;
 219        void *sbuf;
 220        int copy_len;
 221
 222        while (len) {
 223                if (!p->sg_src_left) {
 224                        ret = sg_miter_next(&p->src_sg_it);
 225                        BUG_ON(!ret);
 226                        p->sg_src_left = p->src_sg_it.length;
 227                        p->src_start = 0;
 228                }
 229
 230                sbuf = p->src_sg_it.addr + p->src_start;
 231
 232                copy_len = min(p->sg_src_left, len);
 233                memcpy(dbuf, sbuf, copy_len);
 234
 235                p->src_start += copy_len;
 236                p->sg_src_left -= copy_len;
 237
 238                len -= copy_len;
 239                dbuf += copy_len;
 240        }
 241}
 242
 243static void setup_data_in(void)
 244{
 245        struct req_progress *p = &cpg->p;
 246        int data_in_sram =
 247            min(p->hw_nbytes - p->hw_processed_bytes, cpg->max_req_size);
 248        copy_src_to_buf(p, cpg->sram + SRAM_DATA_IN_START + p->crypt_len,
 249                        data_in_sram - p->crypt_len);
 250        p->crypt_len = data_in_sram;
 251}
 252
 253static void mv_process_current_q(int first_block)
 254{
 255        struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
 256        struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
 257        struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
 258        struct sec_accel_config op;
 259
 260        switch (req_ctx->op) {
 261        case COP_AES_ECB:
 262                op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB;
 263                break;
 264        case COP_AES_CBC:
 265        default:
 266                op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC;
 267                op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) |
 268                        ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF);
 269                if (first_block)
 270                        memcpy(cpg->sram + SRAM_DATA_IV, req->info, 16);
 271                break;
 272        }
 273        if (req_ctx->decrypt) {
 274                op.config |= CFG_DIR_DEC;
 275                memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_dec_key,
 276                                AES_KEY_LEN);
 277        } else {
 278                op.config |= CFG_DIR_ENC;
 279                memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_enc_key,
 280                                AES_KEY_LEN);
 281        }
 282
 283        switch (ctx->key_len) {
 284        case AES_KEYSIZE_128:
 285                op.config |= CFG_AES_LEN_128;
 286                break;
 287        case AES_KEYSIZE_192:
 288                op.config |= CFG_AES_LEN_192;
 289                break;
 290        case AES_KEYSIZE_256:
 291                op.config |= CFG_AES_LEN_256;
 292                break;
 293        }
 294        op.enc_p = ENC_P_SRC(SRAM_DATA_IN_START) |
 295                ENC_P_DST(SRAM_DATA_OUT_START);
 296        op.enc_key_p = SRAM_DATA_KEY_P;
 297
 298        setup_data_in();
 299        op.enc_len = cpg->p.crypt_len;
 300        memcpy(cpg->sram + SRAM_CONFIG, &op,
 301                        sizeof(struct sec_accel_config));
 302
 303        /* GO */
 304        mv_setup_timer();
 305        writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
 306}
 307
 308static void mv_crypto_algo_completion(void)
 309{
 310        struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
 311        struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
 312
 313        sg_miter_stop(&cpg->p.src_sg_it);
 314        sg_miter_stop(&cpg->p.dst_sg_it);
 315
 316        if (req_ctx->op != COP_AES_CBC)
 317                return ;
 318
 319        memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16);
 320}
 321
 322static void mv_process_hash_current(int first_block)
 323{
 324        struct ahash_request *req = ahash_request_cast(cpg->cur_req);
 325        const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
 326        struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
 327        struct req_progress *p = &cpg->p;
 328        struct sec_accel_config op = { 0 };
 329        int is_last;
 330
 331        switch (req_ctx->op) {
 332        case COP_SHA1:
 333        default:
 334                op.config = CFG_OP_MAC_ONLY | CFG_MACM_SHA1;
 335                break;
 336        case COP_HMAC_SHA1:
 337                op.config = CFG_OP_MAC_ONLY | CFG_MACM_HMAC_SHA1;
 338                memcpy(cpg->sram + SRAM_HMAC_IV_IN,
 339                                tfm_ctx->ivs, sizeof(tfm_ctx->ivs));
 340                break;
 341        }
 342
 343        op.mac_src_p =
 344                MAC_SRC_DATA_P(SRAM_DATA_IN_START) | MAC_SRC_TOTAL_LEN((u32)
 345                req_ctx->
 346                count);
 347
 348        setup_data_in();
 349
 350        op.mac_digest =
 351                MAC_DIGEST_P(SRAM_DIGEST_BUF) | MAC_FRAG_LEN(p->crypt_len);
 352        op.mac_iv =
 353                MAC_INNER_IV_P(SRAM_HMAC_IV_IN) |
 354                MAC_OUTER_IV_P(SRAM_HMAC_IV_OUT);
 355
 356        is_last = req_ctx->last_chunk
 357                && (p->hw_processed_bytes + p->crypt_len >= p->hw_nbytes)
 358                && (req_ctx->count <= MAX_HW_HASH_SIZE);
 359        if (req_ctx->first_hash) {
 360                if (is_last)
 361                        op.config |= CFG_NOT_FRAG;
 362                else
 363                        op.config |= CFG_FIRST_FRAG;
 364
 365                req_ctx->first_hash = 0;
 366        } else {
 367                if (is_last)
 368                        op.config |= CFG_LAST_FRAG;
 369                else
 370                        op.config |= CFG_MID_FRAG;
 371
 372                if (first_block) {
 373                        writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
 374                        writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
 375                        writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
 376                        writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
 377                        writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
 378                }
 379        }
 380
 381        memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config));
 382
 383        /* GO */
 384        mv_setup_timer();
 385        writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
 386}
 387
 388static inline int mv_hash_import_sha1_ctx(const struct mv_req_hash_ctx *ctx,
 389                                          struct shash_desc *desc)
 390{
 391        int i;
 392        struct sha1_state shash_state;
 393
 394        shash_state.count = ctx->count + ctx->count_add;
 395        for (i = 0; i < 5; i++)
 396                shash_state.state[i] = ctx->state[i];
 397        memcpy(shash_state.buffer, ctx->buffer, sizeof(shash_state.buffer));
 398        return crypto_shash_import(desc, &shash_state);
 399}
 400
 401static int mv_hash_final_fallback(struct ahash_request *req)
 402{
 403        const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
 404        struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
 405        struct {
 406                struct shash_desc shash;
 407                char ctx[crypto_shash_descsize(tfm_ctx->fallback)];
 408        } desc;
 409        int rc;
 410
 411        desc.shash.tfm = tfm_ctx->fallback;
 412        desc.shash.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
 413        if (unlikely(req_ctx->first_hash)) {
 414                crypto_shash_init(&desc.shash);
 415                crypto_shash_update(&desc.shash, req_ctx->buffer,
 416                                    req_ctx->extra_bytes);
 417        } else {
 418                /* only SHA1 for now....
 419                 */
 420                rc = mv_hash_import_sha1_ctx(req_ctx, &desc.shash);
 421                if (rc)
 422                        goto out;
 423        }
 424        rc = crypto_shash_final(&desc.shash, req->result);
 425out:
 426        return rc;
 427}
 428
 429static void mv_save_digest_state(struct mv_req_hash_ctx *ctx)
 430{
 431        ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A);
 432        ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B);
 433        ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C);
 434        ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D);
 435        ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E);
 436}
 437
 438static void mv_hash_algo_completion(void)
 439{
 440        struct ahash_request *req = ahash_request_cast(cpg->cur_req);
 441        struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
 442
 443        if (ctx->extra_bytes)
 444                copy_src_to_buf(&cpg->p, ctx->buffer, ctx->extra_bytes);
 445        sg_miter_stop(&cpg->p.src_sg_it);
 446
 447        if (likely(ctx->last_chunk)) {
 448                if (likely(ctx->count <= MAX_HW_HASH_SIZE)) {
 449                        memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF,
 450                               crypto_ahash_digestsize(crypto_ahash_reqtfm
 451                                                       (req)));
 452                } else {
 453                        mv_save_digest_state(ctx);
 454                        mv_hash_final_fallback(req);
 455                }
 456        } else {
 457                mv_save_digest_state(ctx);
 458        }
 459}
 460
 461static void dequeue_complete_req(void)
 462{
 463        struct crypto_async_request *req = cpg->cur_req;
 464        void *buf;
 465        int ret;
 466        cpg->p.hw_processed_bytes += cpg->p.crypt_len;
 467        if (cpg->p.copy_back) {
 468                int need_copy_len = cpg->p.crypt_len;
 469                int sram_offset = 0;
 470                do {
 471                        int dst_copy;
 472
 473                        if (!cpg->p.sg_dst_left) {
 474                                ret = sg_miter_next(&cpg->p.dst_sg_it);
 475                                BUG_ON(!ret);
 476                                cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
 477                                cpg->p.dst_start = 0;
 478                        }
 479
 480                        buf = cpg->p.dst_sg_it.addr;
 481                        buf += cpg->p.dst_start;
 482
 483                        dst_copy = min(need_copy_len, cpg->p.sg_dst_left);
 484
 485                        memcpy(buf,
 486                               cpg->sram + SRAM_DATA_OUT_START + sram_offset,
 487                               dst_copy);
 488                        sram_offset += dst_copy;
 489                        cpg->p.sg_dst_left -= dst_copy;
 490                        need_copy_len -= dst_copy;
 491                        cpg->p.dst_start += dst_copy;
 492                } while (need_copy_len > 0);
 493        }
 494
 495        cpg->p.crypt_len = 0;
 496
 497        BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
 498        if (cpg->p.hw_processed_bytes < cpg->p.hw_nbytes) {
 499                /* process next scatter list entry */
 500                cpg->eng_st = ENGINE_BUSY;
 501                cpg->p.process(0);
 502        } else {
 503                cpg->p.complete();
 504                cpg->eng_st = ENGINE_IDLE;
 505                local_bh_disable();
 506                req->complete(req, 0);
 507                local_bh_enable();
 508        }
 509}
 510
 511static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
 512{
 513        int i = 0;
 514        size_t cur_len;
 515
 516        while (sl) {
 517                cur_len = sl[i].length;
 518                ++i;
 519                if (total_bytes > cur_len)
 520                        total_bytes -= cur_len;
 521                else
 522                        break;
 523        }
 524
 525        return i;
 526}
 527
 528static void mv_start_new_crypt_req(struct ablkcipher_request *req)
 529{
 530        struct req_progress *p = &cpg->p;
 531        int num_sgs;
 532
 533        cpg->cur_req = &req->base;
 534        memset(p, 0, sizeof(struct req_progress));
 535        p->hw_nbytes = req->nbytes;
 536        p->complete = mv_crypto_algo_completion;
 537        p->process = mv_process_current_q;
 538        p->copy_back = 1;
 539
 540        num_sgs = count_sgs(req->src, req->nbytes);
 541        sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
 542
 543        num_sgs = count_sgs(req->dst, req->nbytes);
 544        sg_miter_start(&p->dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG);
 545
 546        mv_process_current_q(1);
 547}
 548
 549static void mv_start_new_hash_req(struct ahash_request *req)
 550{
 551        struct req_progress *p = &cpg->p;
 552        struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
 553        int num_sgs, hw_bytes, old_extra_bytes, rc;
 554        cpg->cur_req = &req->base;
 555        memset(p, 0, sizeof(struct req_progress));
 556        hw_bytes = req->nbytes + ctx->extra_bytes;
 557        old_extra_bytes = ctx->extra_bytes;
 558
 559        ctx->extra_bytes = hw_bytes % SHA1_BLOCK_SIZE;
 560        if (ctx->extra_bytes != 0
 561            && (!ctx->last_chunk || ctx->count > MAX_HW_HASH_SIZE))
 562                hw_bytes -= ctx->extra_bytes;
 563        else
 564                ctx->extra_bytes = 0;
 565
 566        num_sgs = count_sgs(req->src, req->nbytes);
 567        sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
 568
 569        if (hw_bytes) {
 570                p->hw_nbytes = hw_bytes;
 571                p->complete = mv_hash_algo_completion;
 572                p->process = mv_process_hash_current;
 573
 574                if (unlikely(old_extra_bytes)) {
 575                        memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer,
 576                               old_extra_bytes);
 577                        p->crypt_len = old_extra_bytes;
 578                }
 579
 580                mv_process_hash_current(1);
 581        } else {
 582                copy_src_to_buf(p, ctx->buffer + old_extra_bytes,
 583                                ctx->extra_bytes - old_extra_bytes);
 584                sg_miter_stop(&p->src_sg_it);
 585                if (ctx->last_chunk)
 586                        rc = mv_hash_final_fallback(req);
 587                else
 588                        rc = 0;
 589                cpg->eng_st = ENGINE_IDLE;
 590                local_bh_disable();
 591                req->base.complete(&req->base, rc);
 592                local_bh_enable();
 593        }
 594}
 595
 596static int queue_manag(void *data)
 597{
 598        cpg->eng_st = ENGINE_IDLE;
 599        do {
 600                struct crypto_async_request *async_req = NULL;
 601                struct crypto_async_request *backlog;
 602
 603                __set_current_state(TASK_INTERRUPTIBLE);
 604
 605                if (cpg->eng_st == ENGINE_W_DEQUEUE)
 606                        dequeue_complete_req();
 607
 608                spin_lock_irq(&cpg->lock);
 609                if (cpg->eng_st == ENGINE_IDLE) {
 610                        backlog = crypto_get_backlog(&cpg->queue);
 611                        async_req = crypto_dequeue_request(&cpg->queue);
 612                        if (async_req) {
 613                                BUG_ON(cpg->eng_st != ENGINE_IDLE);
 614                                cpg->eng_st = ENGINE_BUSY;
 615                        }
 616                }
 617                spin_unlock_irq(&cpg->lock);
 618
 619                if (backlog) {
 620                        backlog->complete(backlog, -EINPROGRESS);
 621                        backlog = NULL;
 622                }
 623
 624                if (async_req) {
 625                        if (async_req->tfm->__crt_alg->cra_type !=
 626                            &crypto_ahash_type) {
 627                                struct ablkcipher_request *req =
 628                                    ablkcipher_request_cast(async_req);
 629                                mv_start_new_crypt_req(req);
 630                        } else {
 631                                struct ahash_request *req =
 632                                    ahash_request_cast(async_req);
 633                                mv_start_new_hash_req(req);
 634                        }
 635                        async_req = NULL;
 636                }
 637
 638                schedule();
 639
 640        } while (!kthread_should_stop());
 641        return 0;
 642}
 643
 644static int mv_handle_req(struct crypto_async_request *req)
 645{
 646        unsigned long flags;
 647        int ret;
 648
 649        spin_lock_irqsave(&cpg->lock, flags);
 650        ret = crypto_enqueue_request(&cpg->queue, req);
 651        spin_unlock_irqrestore(&cpg->lock, flags);
 652        wake_up_process(cpg->queue_th);
 653        return ret;
 654}
 655
 656static int mv_enc_aes_ecb(struct ablkcipher_request *req)
 657{
 658        struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
 659
 660        req_ctx->op = COP_AES_ECB;
 661        req_ctx->decrypt = 0;
 662
 663        return mv_handle_req(&req->base);
 664}
 665
 666static int mv_dec_aes_ecb(struct ablkcipher_request *req)
 667{
 668        struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
 669        struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
 670
 671        req_ctx->op = COP_AES_ECB;
 672        req_ctx->decrypt = 1;
 673
 674        compute_aes_dec_key(ctx);
 675        return mv_handle_req(&req->base);
 676}
 677
 678static int mv_enc_aes_cbc(struct ablkcipher_request *req)
 679{
 680        struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
 681
 682        req_ctx->op = COP_AES_CBC;
 683        req_ctx->decrypt = 0;
 684
 685        return mv_handle_req(&req->base);
 686}
 687
 688static int mv_dec_aes_cbc(struct ablkcipher_request *req)
 689{
 690        struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
 691        struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
 692
 693        req_ctx->op = COP_AES_CBC;
 694        req_ctx->decrypt = 1;
 695
 696        compute_aes_dec_key(ctx);
 697        return mv_handle_req(&req->base);
 698}
 699
 700static int mv_cra_init(struct crypto_tfm *tfm)
 701{
 702        tfm->crt_ablkcipher.reqsize = sizeof(struct mv_req_ctx);
 703        return 0;
 704}
 705
 706static void mv_init_hash_req_ctx(struct mv_req_hash_ctx *ctx, int op,
 707                                 int is_last, unsigned int req_len,
 708                                 int count_add)
 709{
 710        memset(ctx, 0, sizeof(*ctx));
 711        ctx->op = op;
 712        ctx->count = req_len;
 713        ctx->first_hash = 1;
 714        ctx->last_chunk = is_last;
 715        ctx->count_add = count_add;
 716}
 717
 718static void mv_update_hash_req_ctx(struct mv_req_hash_ctx *ctx, int is_last,
 719                                   unsigned req_len)
 720{
 721        ctx->last_chunk = is_last;
 722        ctx->count += req_len;
 723}
 724
 725static int mv_hash_init(struct ahash_request *req)
 726{
 727        const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
 728        mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 0, 0,
 729                             tfm_ctx->count_add);
 730        return 0;
 731}
 732
 733static int mv_hash_update(struct ahash_request *req)
 734{
 735        if (!req->nbytes)
 736                return 0;
 737
 738        mv_update_hash_req_ctx(ahash_request_ctx(req), 0, req->nbytes);
 739        return mv_handle_req(&req->base);
 740}
 741
 742static int mv_hash_final(struct ahash_request *req)
 743{
 744        struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
 745
 746        ahash_request_set_crypt(req, NULL, req->result, 0);
 747        mv_update_hash_req_ctx(ctx, 1, 0);
 748        return mv_handle_req(&req->base);
 749}
 750
 751static int mv_hash_finup(struct ahash_request *req)
 752{
 753        mv_update_hash_req_ctx(ahash_request_ctx(req), 1, req->nbytes);
 754        return mv_handle_req(&req->base);
 755}
 756
 757static int mv_hash_digest(struct ahash_request *req)
 758{
 759        const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
 760        mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 1,
 761                             req->nbytes, tfm_ctx->count_add);
 762        return mv_handle_req(&req->base);
 763}
 764
 765static void mv_hash_init_ivs(struct mv_tfm_hash_ctx *ctx, const void *istate,
 766                             const void *ostate)
 767{
 768        const struct sha1_state *isha1_state = istate, *osha1_state = ostate;
 769        int i;
 770        for (i = 0; i < 5; i++) {
 771                ctx->ivs[i] = cpu_to_be32(isha1_state->state[i]);
 772                ctx->ivs[i + 5] = cpu_to_be32(osha1_state->state[i]);
 773        }
 774}
 775
 776static int mv_hash_setkey(struct crypto_ahash *tfm, const u8 * key,
 777                          unsigned int keylen)
 778{
 779        int rc;
 780        struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(&tfm->base);
 781        int bs, ds, ss;
 782
 783        if (!ctx->base_hash)
 784                return 0;
 785
 786        rc = crypto_shash_setkey(ctx->fallback, key, keylen);
 787        if (rc)
 788                return rc;
 789
 790        /* Can't see a way to extract the ipad/opad from the fallback tfm
 791           so I'm basically copying code from the hmac module */
 792        bs = crypto_shash_blocksize(ctx->base_hash);
 793        ds = crypto_shash_digestsize(ctx->base_hash);
 794        ss = crypto_shash_statesize(ctx->base_hash);
 795
 796        {
 797                struct {
 798                        struct shash_desc shash;
 799                        char ctx[crypto_shash_descsize(ctx->base_hash)];
 800                } desc;
 801                unsigned int i;
 802                char ipad[ss];
 803                char opad[ss];
 804
 805                desc.shash.tfm = ctx->base_hash;
 806                desc.shash.flags = crypto_shash_get_flags(ctx->base_hash) &
 807                    CRYPTO_TFM_REQ_MAY_SLEEP;
 808
 809                if (keylen > bs) {
 810                        int err;
 811
 812                        err =
 813                            crypto_shash_digest(&desc.shash, key, keylen, ipad);
 814                        if (err)
 815                                return err;
 816
 817                        keylen = ds;
 818                } else
 819                        memcpy(ipad, key, keylen);
 820
 821                memset(ipad + keylen, 0, bs - keylen);
 822                memcpy(opad, ipad, bs);
 823
 824                for (i = 0; i < bs; i++) {
 825                        ipad[i] ^= 0x36;
 826                        opad[i] ^= 0x5c;
 827                }
 828
 829                rc = crypto_shash_init(&desc.shash) ? :
 830                    crypto_shash_update(&desc.shash, ipad, bs) ? :
 831                    crypto_shash_export(&desc.shash, ipad) ? :
 832                    crypto_shash_init(&desc.shash) ? :
 833                    crypto_shash_update(&desc.shash, opad, bs) ? :
 834                    crypto_shash_export(&desc.shash, opad);
 835
 836                if (rc == 0)
 837                        mv_hash_init_ivs(ctx, ipad, opad);
 838
 839                return rc;
 840        }
 841}
 842
 843static int mv_cra_hash_init(struct crypto_tfm *tfm, const char *base_hash_name,
 844                            enum hash_op op, int count_add)
 845{
 846        const char *fallback_driver_name = tfm->__crt_alg->cra_name;
 847        struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
 848        struct crypto_shash *fallback_tfm = NULL;
 849        struct crypto_shash *base_hash = NULL;
 850        int err = -ENOMEM;
 851
 852        ctx->op = op;
 853        ctx->count_add = count_add;
 854
 855        /* Allocate a fallback and abort if it failed. */
 856        fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
 857                                          CRYPTO_ALG_NEED_FALLBACK);
 858        if (IS_ERR(fallback_tfm)) {
 859                printk(KERN_WARNING MV_CESA
 860                       "Fallback driver '%s' could not be loaded!\n",
 861                       fallback_driver_name);
 862                err = PTR_ERR(fallback_tfm);
 863                goto out;
 864        }
 865        ctx->fallback = fallback_tfm;
 866
 867        if (base_hash_name) {
 868                /* Allocate a hash to compute the ipad/opad of hmac. */
 869                base_hash = crypto_alloc_shash(base_hash_name, 0,
 870                                               CRYPTO_ALG_NEED_FALLBACK);
 871                if (IS_ERR(base_hash)) {
 872                        printk(KERN_WARNING MV_CESA
 873                               "Base driver '%s' could not be loaded!\n",
 874                               base_hash_name);
 875                        err = PTR_ERR(base_hash);
 876                        goto err_bad_base;
 877                }
 878        }
 879        ctx->base_hash = base_hash;
 880
 881        crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
 882                                 sizeof(struct mv_req_hash_ctx) +
 883                                 crypto_shash_descsize(ctx->fallback));
 884        return 0;
 885err_bad_base:
 886        crypto_free_shash(fallback_tfm);
 887out:
 888        return err;
 889}
 890
 891static void mv_cra_hash_exit(struct crypto_tfm *tfm)
 892{
 893        struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
 894
 895        crypto_free_shash(ctx->fallback);
 896        if (ctx->base_hash)
 897                crypto_free_shash(ctx->base_hash);
 898}
 899
 900static int mv_cra_hash_sha1_init(struct crypto_tfm *tfm)
 901{
 902        return mv_cra_hash_init(tfm, NULL, COP_SHA1, 0);
 903}
 904
 905static int mv_cra_hash_hmac_sha1_init(struct crypto_tfm *tfm)
 906{
 907        return mv_cra_hash_init(tfm, "sha1", COP_HMAC_SHA1, SHA1_BLOCK_SIZE);
 908}
 909
 910irqreturn_t crypto_int(int irq, void *priv)
 911{
 912        u32 val;
 913
 914        val = readl(cpg->reg + SEC_ACCEL_INT_STATUS);
 915        if (!(val & SEC_INT_ACCEL0_DONE))
 916                return IRQ_NONE;
 917
 918        if (!del_timer(&cpg->completion_timer)) {
 919                printk(KERN_WARNING MV_CESA
 920                       "got an interrupt but no pending timer?\n");
 921        }
 922        val &= ~SEC_INT_ACCEL0_DONE;
 923        writel(val, cpg->reg + FPGA_INT_STATUS);
 924        writel(val, cpg->reg + SEC_ACCEL_INT_STATUS);
 925        BUG_ON(cpg->eng_st != ENGINE_BUSY);
 926        cpg->eng_st = ENGINE_W_DEQUEUE;
 927        wake_up_process(cpg->queue_th);
 928        return IRQ_HANDLED;
 929}
 930
 931struct crypto_alg mv_aes_alg_ecb = {
 932        .cra_name               = "ecb(aes)",
 933        .cra_driver_name        = "mv-ecb-aes",
 934        .cra_priority   = 300,
 935        .cra_flags      = CRYPTO_ALG_TYPE_ABLKCIPHER |
 936                          CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
 937        .cra_blocksize  = 16,
 938        .cra_ctxsize    = sizeof(struct mv_ctx),
 939        .cra_alignmask  = 0,
 940        .cra_type       = &crypto_ablkcipher_type,
 941        .cra_module     = THIS_MODULE,
 942        .cra_init       = mv_cra_init,
 943        .cra_u          = {
 944                .ablkcipher = {
 945                        .min_keysize    =       AES_MIN_KEY_SIZE,
 946                        .max_keysize    =       AES_MAX_KEY_SIZE,
 947                        .setkey         =       mv_setkey_aes,
 948                        .encrypt        =       mv_enc_aes_ecb,
 949                        .decrypt        =       mv_dec_aes_ecb,
 950                },
 951        },
 952};
 953
 954struct crypto_alg mv_aes_alg_cbc = {
 955        .cra_name               = "cbc(aes)",
 956        .cra_driver_name        = "mv-cbc-aes",
 957        .cra_priority   = 300,
 958        .cra_flags      = CRYPTO_ALG_TYPE_ABLKCIPHER |
 959                          CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
 960        .cra_blocksize  = AES_BLOCK_SIZE,
 961        .cra_ctxsize    = sizeof(struct mv_ctx),
 962        .cra_alignmask  = 0,
 963        .cra_type       = &crypto_ablkcipher_type,
 964        .cra_module     = THIS_MODULE,
 965        .cra_init       = mv_cra_init,
 966        .cra_u          = {
 967                .ablkcipher = {
 968                        .ivsize         =       AES_BLOCK_SIZE,
 969                        .min_keysize    =       AES_MIN_KEY_SIZE,
 970                        .max_keysize    =       AES_MAX_KEY_SIZE,
 971                        .setkey         =       mv_setkey_aes,
 972                        .encrypt        =       mv_enc_aes_cbc,
 973                        .decrypt        =       mv_dec_aes_cbc,
 974                },
 975        },
 976};
 977
 978struct ahash_alg mv_sha1_alg = {
 979        .init = mv_hash_init,
 980        .update = mv_hash_update,
 981        .final = mv_hash_final,
 982        .finup = mv_hash_finup,
 983        .digest = mv_hash_digest,
 984        .halg = {
 985                 .digestsize = SHA1_DIGEST_SIZE,
 986                 .base = {
 987                          .cra_name = "sha1",
 988                          .cra_driver_name = "mv-sha1",
 989                          .cra_priority = 300,
 990                          .cra_flags =
 991                          CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
 992                          CRYPTO_ALG_NEED_FALLBACK,
 993                          .cra_blocksize = SHA1_BLOCK_SIZE,
 994                          .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),
 995                          .cra_init = mv_cra_hash_sha1_init,
 996                          .cra_exit = mv_cra_hash_exit,
 997                          .cra_module = THIS_MODULE,
 998                          }
 999                 }
1000};
1001
1002struct ahash_alg mv_hmac_sha1_alg = {
1003        .init = mv_hash_init,
1004        .update = mv_hash_update,
1005        .final = mv_hash_final,
1006        .finup = mv_hash_finup,
1007        .digest = mv_hash_digest,
1008        .setkey = mv_hash_setkey,
1009        .halg = {
1010                 .digestsize = SHA1_DIGEST_SIZE,
1011                 .base = {
1012                          .cra_name = "hmac(sha1)",
1013                          .cra_driver_name = "mv-hmac-sha1",
1014                          .cra_priority = 300,
1015                          .cra_flags =
1016                          CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
1017                          CRYPTO_ALG_NEED_FALLBACK,
1018                          .cra_blocksize = SHA1_BLOCK_SIZE,
1019                          .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),
1020                          .cra_init = mv_cra_hash_hmac_sha1_init,
1021                          .cra_exit = mv_cra_hash_exit,
1022                          .cra_module = THIS_MODULE,
1023                          }
1024                 }
1025};
1026
1027static int mv_probe(struct platform_device *pdev)
1028{
1029        struct crypto_priv *cp;
1030        struct resource *res;
1031        int irq;
1032        int ret;
1033
1034        if (cpg) {
1035                printk(KERN_ERR MV_CESA "Second crypto dev?\n");
1036                return -EEXIST;
1037        }
1038
1039        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
1040        if (!res)
1041                return -ENXIO;
1042
1043        cp = kzalloc(sizeof(*cp), GFP_KERNEL);
1044        if (!cp)
1045                return -ENOMEM;
1046
1047        spin_lock_init(&cp->lock);
1048        crypto_init_queue(&cp->queue, 50);
1049        cp->reg = ioremap(res->start, resource_size(res));
1050        if (!cp->reg) {
1051                ret = -ENOMEM;
1052                goto err;
1053        }
1054
1055        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
1056        if (!res) {
1057                ret = -ENXIO;
1058                goto err_unmap_reg;
1059        }
1060        cp->sram_size = resource_size(res);
1061        cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE;
1062        cp->sram = ioremap(res->start, cp->sram_size);
1063        if (!cp->sram) {
1064                ret = -ENOMEM;
1065                goto err_unmap_reg;
1066        }
1067
1068        if (pdev->dev.of_node)
1069                irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
1070        else
1071                irq = platform_get_irq(pdev, 0);
1072        if (irq < 0 || irq == NO_IRQ) {
1073                ret = irq;
1074                goto err_unmap_sram;
1075        }
1076        cp->irq = irq;
1077
1078        platform_set_drvdata(pdev, cp);
1079        cpg = cp;
1080
1081        cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto");
1082        if (IS_ERR(cp->queue_th)) {
1083                ret = PTR_ERR(cp->queue_th);
1084                goto err_unmap_sram;
1085        }
1086
1087        ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev),
1088                        cp);
1089        if (ret)
1090                goto err_thread;
1091
1092        /* Not all platforms can gate the clock, so it is not
1093           an error if the clock does not exists. */
1094        cp->clk = clk_get(&pdev->dev, NULL);
1095        if (!IS_ERR(cp->clk))
1096                clk_prepare_enable(cp->clk);
1097
1098        writel(0, cpg->reg + SEC_ACCEL_INT_STATUS);
1099        writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK);
1100        writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG);
1101        writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
1102
1103        ret = crypto_register_alg(&mv_aes_alg_ecb);
1104        if (ret) {
1105                printk(KERN_WARNING MV_CESA
1106                       "Could not register aes-ecb driver\n");
1107                goto err_irq;
1108        }
1109
1110        ret = crypto_register_alg(&mv_aes_alg_cbc);
1111        if (ret) {
1112                printk(KERN_WARNING MV_CESA
1113                       "Could not register aes-cbc driver\n");
1114                goto err_unreg_ecb;
1115        }
1116
1117        ret = crypto_register_ahash(&mv_sha1_alg);
1118        if (ret == 0)
1119                cpg->has_sha1 = 1;
1120        else
1121                printk(KERN_WARNING MV_CESA "Could not register sha1 driver\n");
1122
1123        ret = crypto_register_ahash(&mv_hmac_sha1_alg);
1124        if (ret == 0) {
1125                cpg->has_hmac_sha1 = 1;
1126        } else {
1127                printk(KERN_WARNING MV_CESA
1128                       "Could not register hmac-sha1 driver\n");
1129        }
1130
1131        return 0;
1132err_unreg_ecb:
1133        crypto_unregister_alg(&mv_aes_alg_ecb);
1134err_irq:
1135        free_irq(irq, cp);
1136        if (!IS_ERR(cp->clk)) {
1137                clk_disable_unprepare(cp->clk);
1138                clk_put(cp->clk);
1139        }
1140err_thread:
1141        kthread_stop(cp->queue_th);
1142err_unmap_sram:
1143        iounmap(cp->sram);
1144err_unmap_reg:
1145        iounmap(cp->reg);
1146err:
1147        kfree(cp);
1148        cpg = NULL;
1149        return ret;
1150}
1151
1152static int mv_remove(struct platform_device *pdev)
1153{
1154        struct crypto_priv *cp = platform_get_drvdata(pdev);
1155
1156        crypto_unregister_alg(&mv_aes_alg_ecb);
1157        crypto_unregister_alg(&mv_aes_alg_cbc);
1158        if (cp->has_sha1)
1159                crypto_unregister_ahash(&mv_sha1_alg);
1160        if (cp->has_hmac_sha1)
1161                crypto_unregister_ahash(&mv_hmac_sha1_alg);
1162        kthread_stop(cp->queue_th);
1163        free_irq(cp->irq, cp);
1164        memset(cp->sram, 0, cp->sram_size);
1165        iounmap(cp->sram);
1166        iounmap(cp->reg);
1167
1168        if (!IS_ERR(cp->clk)) {
1169                clk_disable_unprepare(cp->clk);
1170                clk_put(cp->clk);
1171        }
1172
1173        kfree(cp);
1174        cpg = NULL;
1175        return 0;
1176}
1177
1178static const struct of_device_id mv_cesa_of_match_table[] = {
1179        { .compatible = "marvell,orion-crypto", },
1180        {}
1181};
1182MODULE_DEVICE_TABLE(of, mv_cesa_of_match_table);
1183
1184static struct platform_driver marvell_crypto = {
1185        .probe          = mv_probe,
1186        .remove         = mv_remove,
1187        .driver         = {
1188                .owner  = THIS_MODULE,
1189                .name   = "mv_crypto",
1190                .of_match_table = of_match_ptr(mv_cesa_of_match_table),
1191        },
1192};
1193MODULE_ALIAS("platform:mv_crypto");
1194
1195module_platform_driver(marvell_crypto);
1196
1197MODULE_AUTHOR("Sebastian Andrzej Siewior <sebastian@breakpoint.cc>");
1198MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
1199MODULE_LICENSE("GPL");
1200