linux/drivers/crypto/picoxcell_crypto.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Copyright (c) 2010-2011 Picochip Ltd., Jamie Iles
   4 */
   5#include <crypto/internal/aead.h>
   6#include <crypto/aes.h>
   7#include <crypto/algapi.h>
   8#include <crypto/authenc.h>
   9#include <crypto/internal/des.h>
  10#include <crypto/md5.h>
  11#include <crypto/sha1.h>
  12#include <crypto/sha2.h>
  13#include <crypto/internal/skcipher.h>
  14#include <linux/clk.h>
  15#include <linux/crypto.h>
  16#include <linux/delay.h>
  17#include <linux/dma-mapping.h>
  18#include <linux/dmapool.h>
  19#include <linux/err.h>
  20#include <linux/init.h>
  21#include <linux/interrupt.h>
  22#include <linux/io.h>
  23#include <linux/list.h>
  24#include <linux/module.h>
  25#include <linux/of.h>
  26#include <linux/platform_device.h>
  27#include <linux/pm.h>
  28#include <linux/rtnetlink.h>
  29#include <linux/scatterlist.h>
  30#include <linux/sched.h>
  31#include <linux/sizes.h>
  32#include <linux/slab.h>
  33#include <linux/timer.h>
  34
  35#include "picoxcell_crypto_regs.h"
  36
  37/*
  38 * The threshold for the number of entries in the CMD FIFO available before
  39 * the CMD0_CNT interrupt is raised. Increasing this value will reduce the
  40 * number of interrupts raised to the CPU.
  41 */
  42#define CMD0_IRQ_THRESHOLD   1
  43
  44/*
  45 * The timeout period (in jiffies) for a PDU. When the the number of PDUs in
  46 * flight is greater than the STAT_IRQ_THRESHOLD or 0 the timer is disabled.
  47 * When there are packets in flight but lower than the threshold, we enable
  48 * the timer and at expiry, attempt to remove any processed packets from the
  49 * queue and if there are still packets left, schedule the timer again.
  50 */
  51#define PACKET_TIMEOUT      1
  52
  53/* The priority to register each algorithm with. */
  54#define SPACC_CRYPTO_ALG_PRIORITY       10000
  55
  56#define SPACC_CRYPTO_KASUMI_F8_KEY_LEN  16
  57#define SPACC_CRYPTO_IPSEC_CIPHER_PG_SZ 64
  58#define SPACC_CRYPTO_IPSEC_HASH_PG_SZ   64
  59#define SPACC_CRYPTO_IPSEC_MAX_CTXS     32
  60#define SPACC_CRYPTO_IPSEC_FIFO_SZ      32
  61#define SPACC_CRYPTO_L2_CIPHER_PG_SZ    64
  62#define SPACC_CRYPTO_L2_HASH_PG_SZ      64
  63#define SPACC_CRYPTO_L2_MAX_CTXS        128
  64#define SPACC_CRYPTO_L2_FIFO_SZ         128
  65
  66#define MAX_DDT_LEN                     16
  67
  68/* DDT format. This must match the hardware DDT format exactly. */
  69struct spacc_ddt {
  70        dma_addr_t      p;
  71        u32             len;
  72};
  73
  74/*
  75 * Asynchronous crypto request structure.
  76 *
  77 * This structure defines a request that is either queued for processing or
  78 * being processed.
  79 */
  80struct spacc_req {
  81        struct list_head                list;
  82        struct spacc_engine             *engine;
  83        struct crypto_async_request     *req;
  84        int                             result;
  85        bool                            is_encrypt;
  86        unsigned                        ctx_id;
  87        dma_addr_t                      src_addr, dst_addr;
  88        struct spacc_ddt                *src_ddt, *dst_ddt;
  89        void                            (*complete)(struct spacc_req *req);
  90        struct skcipher_request         fallback_req;   // keep at the end
  91};
  92
  93struct spacc_aead {
  94        unsigned long                   ctrl_default;
  95        unsigned long                   type;
  96        struct aead_alg                 alg;
  97        struct spacc_engine             *engine;
  98        struct list_head                entry;
  99        int                             key_offs;
 100        int                             iv_offs;
 101};
 102
 103struct spacc_engine {
 104        void __iomem                    *regs;
 105        struct list_head                pending;
 106        int                             next_ctx;
 107        spinlock_t                      hw_lock;
 108        int                             in_flight;
 109        struct list_head                completed;
 110        struct list_head                in_progress;
 111        struct tasklet_struct           complete;
 112        unsigned long                   fifo_sz;
 113        void __iomem                    *cipher_ctx_base;
 114        void __iomem                    *hash_key_base;
 115        struct spacc_alg                *algs;
 116        unsigned                        num_algs;
 117        struct list_head                registered_algs;
 118        struct spacc_aead               *aeads;
 119        unsigned                        num_aeads;
 120        struct list_head                registered_aeads;
 121        size_t                          cipher_pg_sz;
 122        size_t                          hash_pg_sz;
 123        const char                      *name;
 124        struct clk                      *clk;
 125        struct device                   *dev;
 126        unsigned                        max_ctxs;
 127        struct timer_list               packet_timeout;
 128        unsigned                        stat_irq_thresh;
 129        struct dma_pool                 *req_pool;
 130};
 131
 132/* Algorithm type mask. */
 133#define SPACC_CRYPTO_ALG_MASK           0x7
 134
 135/* SPACC definition of a crypto algorithm. */
 136struct spacc_alg {
 137        unsigned long                   ctrl_default;
 138        unsigned long                   type;
 139        struct skcipher_alg             alg;
 140        struct spacc_engine             *engine;
 141        struct list_head                entry;
 142        int                             key_offs;
 143        int                             iv_offs;
 144};
 145
 146/* Generic context structure for any algorithm type. */
 147struct spacc_generic_ctx {
 148        struct spacc_engine             *engine;
 149        int                             flags;
 150        int                             key_offs;
 151        int                             iv_offs;
 152};
 153
 154/* Block cipher context. */
 155struct spacc_ablk_ctx {
 156        struct spacc_generic_ctx        generic;
 157        u8                              key[AES_MAX_KEY_SIZE];
 158        u8                              key_len;
 159        /*
 160         * The fallback cipher. If the operation can't be done in hardware,
 161         * fallback to a software version.
 162         */
 163        struct crypto_skcipher          *sw_cipher;
 164};
 165
 166/* AEAD cipher context. */
 167struct spacc_aead_ctx {
 168        struct spacc_generic_ctx        generic;
 169        u8                              cipher_key[AES_MAX_KEY_SIZE];
 170        u8                              hash_ctx[SPACC_CRYPTO_IPSEC_HASH_PG_SZ];
 171        u8                              cipher_key_len;
 172        u8                              hash_key_len;
 173        struct crypto_aead              *sw_cipher;
 174};
 175
 176static int spacc_ablk_submit(struct spacc_req *req);
 177
 178static inline struct spacc_alg *to_spacc_skcipher(struct skcipher_alg *alg)
 179{
 180        return alg ? container_of(alg, struct spacc_alg, alg) : NULL;
 181}
 182
 183static inline struct spacc_aead *to_spacc_aead(struct aead_alg *alg)
 184{
 185        return container_of(alg, struct spacc_aead, alg);
 186}
 187
 188static inline int spacc_fifo_cmd_full(struct spacc_engine *engine)
 189{
 190        u32 fifo_stat = readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET);
 191
 192        return fifo_stat & SPA_FIFO_CMD_FULL;
 193}
 194
 195/*
 196 * Given a cipher context, and a context number, get the base address of the
 197 * context page.
 198 *
 199 * Returns the address of the context page where the key/context may
 200 * be written.
 201 */
 202static inline void __iomem *spacc_ctx_page_addr(struct spacc_generic_ctx *ctx,
 203                                                unsigned indx,
 204                                                bool is_cipher_ctx)
 205{
 206        return is_cipher_ctx ? ctx->engine->cipher_ctx_base +
 207                        (indx * ctx->engine->cipher_pg_sz) :
 208                ctx->engine->hash_key_base + (indx * ctx->engine->hash_pg_sz);
 209}
 210
 211/* The context pages can only be written with 32-bit accesses. */
 212static inline void memcpy_toio32(u32 __iomem *dst, const void *src,
 213                                 unsigned count)
 214{
 215        const u32 *src32 = (const u32 *) src;
 216
 217        while (count--)
 218                writel(*src32++, dst++);
 219}
 220
 221static void spacc_cipher_write_ctx(struct spacc_generic_ctx *ctx,
 222                                   void __iomem *page_addr, const u8 *key,
 223                                   size_t key_len, const u8 *iv, size_t iv_len)
 224{
 225        void __iomem *key_ptr = page_addr + ctx->key_offs;
 226        void __iomem *iv_ptr = page_addr + ctx->iv_offs;
 227
 228        memcpy_toio32(key_ptr, key, key_len / 4);
 229        memcpy_toio32(iv_ptr, iv, iv_len / 4);
 230}
 231
 232/*
 233 * Load a context into the engines context memory.
 234 *
 235 * Returns the index of the context page where the context was loaded.
 236 */
 237static unsigned spacc_load_ctx(struct spacc_generic_ctx *ctx,
 238                               const u8 *ciph_key, size_t ciph_len,
 239                               const u8 *iv, size_t ivlen, const u8 *hash_key,
 240                               size_t hash_len)
 241{
 242        unsigned indx = ctx->engine->next_ctx++;
 243        void __iomem *ciph_page_addr, *hash_page_addr;
 244
 245        ciph_page_addr = spacc_ctx_page_addr(ctx, indx, 1);
 246        hash_page_addr = spacc_ctx_page_addr(ctx, indx, 0);
 247
 248        ctx->engine->next_ctx &= ctx->engine->fifo_sz - 1;
 249        spacc_cipher_write_ctx(ctx, ciph_page_addr, ciph_key, ciph_len, iv,
 250                               ivlen);
 251        writel(ciph_len | (indx << SPA_KEY_SZ_CTX_INDEX_OFFSET) |
 252               (1 << SPA_KEY_SZ_CIPHER_OFFSET),
 253               ctx->engine->regs + SPA_KEY_SZ_REG_OFFSET);
 254
 255        if (hash_key) {
 256                memcpy_toio32(hash_page_addr, hash_key, hash_len / 4);
 257                writel(hash_len | (indx << SPA_KEY_SZ_CTX_INDEX_OFFSET),
 258                       ctx->engine->regs + SPA_KEY_SZ_REG_OFFSET);
 259        }
 260
 261        return indx;
 262}
 263
 264static inline void ddt_set(struct spacc_ddt *ddt, dma_addr_t phys, size_t len)
 265{
 266        ddt->p = phys;
 267        ddt->len = len;
 268}
 269
 270/*
 271 * Take a crypto request and scatterlists for the data and turn them into DDTs
 272 * for passing to the crypto engines. This also DMA maps the data so that the
 273 * crypto engines can DMA to/from them.
 274 */
 275static struct spacc_ddt *spacc_sg_to_ddt(struct spacc_engine *engine,
 276                                         struct scatterlist *payload,
 277                                         unsigned nbytes,
 278                                         enum dma_data_direction dir,
 279                                         dma_addr_t *ddt_phys)
 280{
 281        unsigned mapped_ents;
 282        struct scatterlist *cur;
 283        struct spacc_ddt *ddt;
 284        int i;
 285        int nents;
 286
 287        nents = sg_nents_for_len(payload, nbytes);
 288        if (nents < 0) {
 289                dev_err(engine->dev, "Invalid numbers of SG.\n");
 290                return NULL;
 291        }
 292        mapped_ents = dma_map_sg(engine->dev, payload, nents, dir);
 293
 294        if (mapped_ents + 1 > MAX_DDT_LEN)
 295                goto out;
 296
 297        ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, ddt_phys);
 298        if (!ddt)
 299                goto out;
 300
 301        for_each_sg(payload, cur, mapped_ents, i)
 302                ddt_set(&ddt[i], sg_dma_address(cur), sg_dma_len(cur));
 303        ddt_set(&ddt[mapped_ents], 0, 0);
 304
 305        return ddt;
 306
 307out:
 308        dma_unmap_sg(engine->dev, payload, nents, dir);
 309        return NULL;
 310}
 311
 312static int spacc_aead_make_ddts(struct aead_request *areq)
 313{
 314        struct crypto_aead *aead = crypto_aead_reqtfm(areq);
 315        struct spacc_req *req = aead_request_ctx(areq);
 316        struct spacc_engine *engine = req->engine;
 317        struct spacc_ddt *src_ddt, *dst_ddt;
 318        unsigned total;
 319        int src_nents, dst_nents;
 320        struct scatterlist *cur;
 321        int i, dst_ents, src_ents;
 322
 323        total = areq->assoclen + areq->cryptlen;
 324        if (req->is_encrypt)
 325                total += crypto_aead_authsize(aead);
 326
 327        src_nents = sg_nents_for_len(areq->src, total);
 328        if (src_nents < 0) {
 329                dev_err(engine->dev, "Invalid numbers of src SG.\n");
 330                return src_nents;
 331        }
 332        if (src_nents + 1 > MAX_DDT_LEN)
 333                return -E2BIG;
 334
 335        dst_nents = 0;
 336        if (areq->src != areq->dst) {
 337                dst_nents = sg_nents_for_len(areq->dst, total);
 338                if (dst_nents < 0) {
 339                        dev_err(engine->dev, "Invalid numbers of dst SG.\n");
 340                        return dst_nents;
 341                }
 342                if (src_nents + 1 > MAX_DDT_LEN)
 343                        return -E2BIG;
 344        }
 345
 346        src_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->src_addr);
 347        if (!src_ddt)
 348                goto err;
 349
 350        dst_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->dst_addr);
 351        if (!dst_ddt)
 352                goto err_free_src;
 353
 354        req->src_ddt = src_ddt;
 355        req->dst_ddt = dst_ddt;
 356
 357        if (dst_nents) {
 358                src_ents = dma_map_sg(engine->dev, areq->src, src_nents,
 359                                      DMA_TO_DEVICE);
 360                if (!src_ents)
 361                        goto err_free_dst;
 362
 363                dst_ents = dma_map_sg(engine->dev, areq->dst, dst_nents,
 364                                      DMA_FROM_DEVICE);
 365
 366                if (!dst_ents) {
 367                        dma_unmap_sg(engine->dev, areq->src, src_nents,
 368                                     DMA_TO_DEVICE);
 369                        goto err_free_dst;
 370                }
 371        } else {
 372                src_ents = dma_map_sg(engine->dev, areq->src, src_nents,
 373                                      DMA_BIDIRECTIONAL);
 374                if (!src_ents)
 375                        goto err_free_dst;
 376                dst_ents = src_ents;
 377        }
 378
 379        /*
 380         * Now map in the payload for the source and destination and terminate
 381         * with the NULL pointers.
 382         */
 383        for_each_sg(areq->src, cur, src_ents, i)
 384                ddt_set(src_ddt++, sg_dma_address(cur), sg_dma_len(cur));
 385
 386        /* For decryption we need to skip the associated data. */
 387        total = req->is_encrypt ? 0 : areq->assoclen;
 388        for_each_sg(areq->dst, cur, dst_ents, i) {
 389                unsigned len = sg_dma_len(cur);
 390
 391                if (len <= total) {
 392                        total -= len;
 393                        continue;
 394                }
 395
 396                ddt_set(dst_ddt++, sg_dma_address(cur) + total, len - total);
 397        }
 398
 399        ddt_set(src_ddt, 0, 0);
 400        ddt_set(dst_ddt, 0, 0);
 401
 402        return 0;
 403
 404err_free_dst:
 405        dma_pool_free(engine->req_pool, dst_ddt, req->dst_addr);
 406err_free_src:
 407        dma_pool_free(engine->req_pool, src_ddt, req->src_addr);
 408err:
 409        return -ENOMEM;
 410}
 411
 412static void spacc_aead_free_ddts(struct spacc_req *req)
 413{
 414        struct aead_request *areq = container_of(req->req, struct aead_request,
 415                                                 base);
 416        struct crypto_aead *aead = crypto_aead_reqtfm(areq);
 417        unsigned total = areq->assoclen + areq->cryptlen +
 418                         (req->is_encrypt ? crypto_aead_authsize(aead) : 0);
 419        struct spacc_aead_ctx *aead_ctx = crypto_aead_ctx(aead);
 420        struct spacc_engine *engine = aead_ctx->generic.engine;
 421        int nents = sg_nents_for_len(areq->src, total);
 422
 423        /* sg_nents_for_len should not fail since it works when mapping sg */
 424        if (unlikely(nents < 0)) {
 425                dev_err(engine->dev, "Invalid numbers of src SG.\n");
 426                return;
 427        }
 428
 429        if (areq->src != areq->dst) {
 430                dma_unmap_sg(engine->dev, areq->src, nents, DMA_TO_DEVICE);
 431                nents = sg_nents_for_len(areq->dst, total);
 432                if (unlikely(nents < 0)) {
 433                        dev_err(engine->dev, "Invalid numbers of dst SG.\n");
 434                        return;
 435                }
 436                dma_unmap_sg(engine->dev, areq->dst, nents, DMA_FROM_DEVICE);
 437        } else
 438                dma_unmap_sg(engine->dev, areq->src, nents, DMA_BIDIRECTIONAL);
 439
 440        dma_pool_free(engine->req_pool, req->src_ddt, req->src_addr);
 441        dma_pool_free(engine->req_pool, req->dst_ddt, req->dst_addr);
 442}
 443
 444static void spacc_free_ddt(struct spacc_req *req, struct spacc_ddt *ddt,
 445                           dma_addr_t ddt_addr, struct scatterlist *payload,
 446                           unsigned nbytes, enum dma_data_direction dir)
 447{
 448        int nents = sg_nents_for_len(payload, nbytes);
 449
 450        if (nents < 0) {
 451                dev_err(req->engine->dev, "Invalid numbers of SG.\n");
 452                return;
 453        }
 454
 455        dma_unmap_sg(req->engine->dev, payload, nents, dir);
 456        dma_pool_free(req->engine->req_pool, ddt, ddt_addr);
 457}
 458
 459static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
 460                             unsigned int keylen)
 461{
 462        struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 463        struct crypto_authenc_keys keys;
 464        int err;
 465
 466        crypto_aead_clear_flags(ctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
 467        crypto_aead_set_flags(ctx->sw_cipher, crypto_aead_get_flags(tfm) &
 468                                              CRYPTO_TFM_REQ_MASK);
 469        err = crypto_aead_setkey(ctx->sw_cipher, key, keylen);
 470        if (err)
 471                return err;
 472
 473        if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
 474                goto badkey;
 475
 476        if (keys.enckeylen > AES_MAX_KEY_SIZE)
 477                goto badkey;
 478
 479        if (keys.authkeylen > sizeof(ctx->hash_ctx))
 480                goto badkey;
 481
 482        memcpy(ctx->cipher_key, keys.enckey, keys.enckeylen);
 483        ctx->cipher_key_len = keys.enckeylen;
 484
 485        memcpy(ctx->hash_ctx, keys.authkey, keys.authkeylen);
 486        ctx->hash_key_len = keys.authkeylen;
 487
 488        memzero_explicit(&keys, sizeof(keys));
 489        return 0;
 490
 491badkey:
 492        memzero_explicit(&keys, sizeof(keys));
 493        return -EINVAL;
 494}
 495
 496static int spacc_aead_setauthsize(struct crypto_aead *tfm,
 497                                  unsigned int authsize)
 498{
 499        struct spacc_aead_ctx *ctx = crypto_tfm_ctx(crypto_aead_tfm(tfm));
 500
 501        return crypto_aead_setauthsize(ctx->sw_cipher, authsize);
 502}
 503
 504/*
 505 * Check if an AEAD request requires a fallback operation. Some requests can't
 506 * be completed in hardware because the hardware may not support certain key
 507 * sizes. In these cases we need to complete the request in software.
 508 */
 509static int spacc_aead_need_fallback(struct aead_request *aead_req)
 510{
 511        struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
 512        struct aead_alg *alg = crypto_aead_alg(aead);
 513        struct spacc_aead *spacc_alg = to_spacc_aead(alg);
 514        struct spacc_aead_ctx *ctx = crypto_aead_ctx(aead);
 515
 516        /*
 517         * If we have a non-supported key-length, then we need to do a
 518         * software fallback.
 519         */
 520        if ((spacc_alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) ==
 521            SPA_CTRL_CIPH_ALG_AES &&
 522            ctx->cipher_key_len != AES_KEYSIZE_128 &&
 523            ctx->cipher_key_len != AES_KEYSIZE_256)
 524                return 1;
 525
 526        return 0;
 527}
 528
 529static int spacc_aead_do_fallback(struct aead_request *req, unsigned alg_type,
 530                                  bool is_encrypt)
 531{
 532        struct crypto_tfm *old_tfm = crypto_aead_tfm(crypto_aead_reqtfm(req));
 533        struct spacc_aead_ctx *ctx = crypto_tfm_ctx(old_tfm);
 534        struct aead_request *subreq = aead_request_ctx(req);
 535
 536        aead_request_set_tfm(subreq, ctx->sw_cipher);
 537        aead_request_set_callback(subreq, req->base.flags,
 538                                  req->base.complete, req->base.data);
 539        aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
 540                               req->iv);
 541        aead_request_set_ad(subreq, req->assoclen);
 542
 543        return is_encrypt ? crypto_aead_encrypt(subreq) :
 544                            crypto_aead_decrypt(subreq);
 545}
 546
 547static void spacc_aead_complete(struct spacc_req *req)
 548{
 549        spacc_aead_free_ddts(req);
 550        req->req->complete(req->req, req->result);
 551}
 552
 553static int spacc_aead_submit(struct spacc_req *req)
 554{
 555        struct aead_request *aead_req =
 556                container_of(req->req, struct aead_request, base);
 557        struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
 558        unsigned int authsize = crypto_aead_authsize(aead);
 559        struct spacc_aead_ctx *ctx = crypto_aead_ctx(aead);
 560        struct aead_alg *alg = crypto_aead_alg(aead);
 561        struct spacc_aead *spacc_alg = to_spacc_aead(alg);
 562        struct spacc_engine *engine = ctx->generic.engine;
 563        u32 ctrl, proc_len, assoc_len;
 564
 565        req->result = -EINPROGRESS;
 566        req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->cipher_key,
 567                ctx->cipher_key_len, aead_req->iv, crypto_aead_ivsize(aead),
 568                ctx->hash_ctx, ctx->hash_key_len);
 569
 570        /* Set the source and destination DDT pointers. */
 571        writel(req->src_addr, engine->regs + SPA_SRC_PTR_REG_OFFSET);
 572        writel(req->dst_addr, engine->regs + SPA_DST_PTR_REG_OFFSET);
 573        writel(0, engine->regs + SPA_OFFSET_REG_OFFSET);
 574
 575        assoc_len = aead_req->assoclen;
 576        proc_len = aead_req->cryptlen + assoc_len;
 577
 578        /*
 579         * If we are decrypting, we need to take the length of the ICV out of
 580         * the processing length.
 581         */
 582        if (!req->is_encrypt)
 583                proc_len -= authsize;
 584
 585        writel(proc_len, engine->regs + SPA_PROC_LEN_REG_OFFSET);
 586        writel(assoc_len, engine->regs + SPA_AAD_LEN_REG_OFFSET);
 587        writel(authsize, engine->regs + SPA_ICV_LEN_REG_OFFSET);
 588        writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET);
 589        writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET);
 590
 591        ctrl = spacc_alg->ctrl_default | (req->ctx_id << SPA_CTRL_CTX_IDX) |
 592                (1 << SPA_CTRL_ICV_APPEND);
 593        if (req->is_encrypt)
 594                ctrl |= (1 << SPA_CTRL_ENCRYPT_IDX) | (1 << SPA_CTRL_AAD_COPY);
 595        else
 596                ctrl |= (1 << SPA_CTRL_KEY_EXP);
 597
 598        mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT);
 599
 600        writel(ctrl, engine->regs + SPA_CTRL_REG_OFFSET);
 601
 602        return -EINPROGRESS;
 603}
 604
 605static int spacc_req_submit(struct spacc_req *req);
 606
 607static void spacc_push(struct spacc_engine *engine)
 608{
 609        struct spacc_req *req;
 610
 611        while (!list_empty(&engine->pending) &&
 612               engine->in_flight + 1 <= engine->fifo_sz) {
 613
 614                ++engine->in_flight;
 615                req = list_first_entry(&engine->pending, struct spacc_req,
 616                                       list);
 617                list_move_tail(&req->list, &engine->in_progress);
 618
 619                req->result = spacc_req_submit(req);
 620        }
 621}
 622
 623/*
 624 * Setup an AEAD request for processing. This will configure the engine, load
 625 * the context and then start the packet processing.
 626 */
 627static int spacc_aead_setup(struct aead_request *req,
 628                            unsigned alg_type, bool is_encrypt)
 629{
 630        struct crypto_aead *aead = crypto_aead_reqtfm(req);
 631        struct aead_alg *alg = crypto_aead_alg(aead);
 632        struct spacc_engine *engine = to_spacc_aead(alg)->engine;
 633        struct spacc_req *dev_req = aead_request_ctx(req);
 634        int err;
 635        unsigned long flags;
 636
 637        dev_req->req            = &req->base;
 638        dev_req->is_encrypt     = is_encrypt;
 639        dev_req->result         = -EBUSY;
 640        dev_req->engine         = engine;
 641        dev_req->complete       = spacc_aead_complete;
 642
 643        if (unlikely(spacc_aead_need_fallback(req) ||
 644                     ((err = spacc_aead_make_ddts(req)) == -E2BIG)))
 645                return spacc_aead_do_fallback(req, alg_type, is_encrypt);
 646
 647        if (err)
 648                goto out;
 649
 650        err = -EINPROGRESS;
 651        spin_lock_irqsave(&engine->hw_lock, flags);
 652        if (unlikely(spacc_fifo_cmd_full(engine)) ||
 653            engine->in_flight + 1 > engine->fifo_sz) {
 654                if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
 655                        err = -EBUSY;
 656                        spin_unlock_irqrestore(&engine->hw_lock, flags);
 657                        goto out_free_ddts;
 658                }
 659                list_add_tail(&dev_req->list, &engine->pending);
 660        } else {
 661                list_add_tail(&dev_req->list, &engine->pending);
 662                spacc_push(engine);
 663        }
 664        spin_unlock_irqrestore(&engine->hw_lock, flags);
 665
 666        goto out;
 667
 668out_free_ddts:
 669        spacc_aead_free_ddts(dev_req);
 670out:
 671        return err;
 672}
 673
 674static int spacc_aead_encrypt(struct aead_request *req)
 675{
 676        struct crypto_aead *aead = crypto_aead_reqtfm(req);
 677        struct spacc_aead *alg = to_spacc_aead(crypto_aead_alg(aead));
 678
 679        return spacc_aead_setup(req, alg->type, 1);
 680}
 681
 682static int spacc_aead_decrypt(struct aead_request *req)
 683{
 684        struct crypto_aead *aead = crypto_aead_reqtfm(req);
 685        struct spacc_aead  *alg = to_spacc_aead(crypto_aead_alg(aead));
 686
 687        return spacc_aead_setup(req, alg->type, 0);
 688}
 689
 690/*
 691 * Initialise a new AEAD context. This is responsible for allocating the
 692 * fallback cipher and initialising the context.
 693 */
 694static int spacc_aead_cra_init(struct crypto_aead *tfm)
 695{
 696        struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 697        struct aead_alg *alg = crypto_aead_alg(tfm);
 698        struct spacc_aead *spacc_alg = to_spacc_aead(alg);
 699        struct spacc_engine *engine = spacc_alg->engine;
 700
 701        ctx->generic.flags = spacc_alg->type;
 702        ctx->generic.engine = engine;
 703        ctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
 704                                           CRYPTO_ALG_NEED_FALLBACK);
 705        if (IS_ERR(ctx->sw_cipher))
 706                return PTR_ERR(ctx->sw_cipher);
 707        ctx->generic.key_offs = spacc_alg->key_offs;
 708        ctx->generic.iv_offs = spacc_alg->iv_offs;
 709
 710        crypto_aead_set_reqsize(
 711                tfm,
 712                max(sizeof(struct spacc_req),
 713                    sizeof(struct aead_request) +
 714                    crypto_aead_reqsize(ctx->sw_cipher)));
 715
 716        return 0;
 717}
 718
 719/*
 720 * Destructor for an AEAD context. This is called when the transform is freed
 721 * and must free the fallback cipher.
 722 */
 723static void spacc_aead_cra_exit(struct crypto_aead *tfm)
 724{
 725        struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 726
 727        crypto_free_aead(ctx->sw_cipher);
 728}
 729
 730/*
 731 * Set the DES key for a block cipher transform. This also performs weak key
 732 * checking if the transform has requested it.
 733 */
 734static int spacc_des_setkey(struct crypto_skcipher *cipher, const u8 *key,
 735                            unsigned int len)
 736{
 737        struct spacc_ablk_ctx *ctx = crypto_skcipher_ctx(cipher);
 738        int err;
 739
 740        err = verify_skcipher_des_key(cipher, key);
 741        if (err)
 742                return err;
 743
 744        memcpy(ctx->key, key, len);
 745        ctx->key_len = len;
 746
 747        return 0;
 748}
 749
 750/*
 751 * Set the 3DES key for a block cipher transform. This also performs weak key
 752 * checking if the transform has requested it.
 753 */
 754static int spacc_des3_setkey(struct crypto_skcipher *cipher, const u8 *key,
 755                             unsigned int len)
 756{
 757        struct spacc_ablk_ctx *ctx = crypto_skcipher_ctx(cipher);
 758        int err;
 759
 760        err = verify_skcipher_des3_key(cipher, key);
 761        if (err)
 762                return err;
 763
 764        memcpy(ctx->key, key, len);
 765        ctx->key_len = len;
 766
 767        return 0;
 768}
 769
 770/*
 771 * Set the key for an AES block cipher. Some key lengths are not supported in
 772 * hardware so this must also check whether a fallback is needed.
 773 */
 774static int spacc_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
 775                            unsigned int len)
 776{
 777        struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
 778        struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
 779        int err = 0;
 780
 781        if (len > AES_MAX_KEY_SIZE)
 782                return -EINVAL;
 783
 784        /*
 785         * IPSec engine only supports 128 and 256 bit AES keys. If we get a
 786         * request for any other size (192 bits) then we need to do a software
 787         * fallback.
 788         */
 789        if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256) {
 790                if (!ctx->sw_cipher)
 791                        return -EINVAL;
 792
 793                /*
 794                 * Set the fallback transform to use the same request flags as
 795                 * the hardware transform.
 796                 */
 797                crypto_skcipher_clear_flags(ctx->sw_cipher,
 798                                            CRYPTO_TFM_REQ_MASK);
 799                crypto_skcipher_set_flags(ctx->sw_cipher,
 800                                          cipher->base.crt_flags &
 801                                          CRYPTO_TFM_REQ_MASK);
 802
 803                err = crypto_skcipher_setkey(ctx->sw_cipher, key, len);
 804                if (err)
 805                        goto sw_setkey_failed;
 806        }
 807
 808        memcpy(ctx->key, key, len);
 809        ctx->key_len = len;
 810
 811sw_setkey_failed:
 812        return err;
 813}
 814
 815static int spacc_kasumi_f8_setkey(struct crypto_skcipher *cipher,
 816                                  const u8 *key, unsigned int len)
 817{
 818        struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
 819        struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
 820        int err = 0;
 821
 822        if (len > AES_MAX_KEY_SIZE) {
 823                err = -EINVAL;
 824                goto out;
 825        }
 826
 827        memcpy(ctx->key, key, len);
 828        ctx->key_len = len;
 829
 830out:
 831        return err;
 832}
 833
 834static int spacc_ablk_need_fallback(struct spacc_req *req)
 835{
 836        struct skcipher_request *ablk_req = skcipher_request_cast(req->req);
 837        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(ablk_req);
 838        struct spacc_alg *spacc_alg = to_spacc_skcipher(crypto_skcipher_alg(tfm));
 839        struct spacc_ablk_ctx *ctx;
 840
 841        ctx = crypto_skcipher_ctx(tfm);
 842
 843        return (spacc_alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) ==
 844                        SPA_CTRL_CIPH_ALG_AES &&
 845                        ctx->key_len != AES_KEYSIZE_128 &&
 846                        ctx->key_len != AES_KEYSIZE_256;
 847}
 848
 849static void spacc_ablk_complete(struct spacc_req *req)
 850{
 851        struct skcipher_request *ablk_req = skcipher_request_cast(req->req);
 852
 853        if (ablk_req->src != ablk_req->dst) {
 854                spacc_free_ddt(req, req->src_ddt, req->src_addr, ablk_req->src,
 855                               ablk_req->cryptlen, DMA_TO_DEVICE);
 856                spacc_free_ddt(req, req->dst_ddt, req->dst_addr, ablk_req->dst,
 857                               ablk_req->cryptlen, DMA_FROM_DEVICE);
 858        } else
 859                spacc_free_ddt(req, req->dst_ddt, req->dst_addr, ablk_req->dst,
 860                               ablk_req->cryptlen, DMA_BIDIRECTIONAL);
 861
 862        req->req->complete(req->req, req->result);
 863}
 864
 865static int spacc_ablk_submit(struct spacc_req *req)
 866{
 867        struct skcipher_request *ablk_req = skcipher_request_cast(req->req);
 868        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(ablk_req);
 869        struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
 870        struct spacc_alg *spacc_alg = to_spacc_skcipher(alg);
 871        struct spacc_ablk_ctx *ctx = crypto_skcipher_ctx(tfm);
 872        struct spacc_engine *engine = ctx->generic.engine;
 873        u32 ctrl;
 874
 875        req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->key,
 876                ctx->key_len, ablk_req->iv, alg->ivsize,
 877                NULL, 0);
 878
 879        writel(req->src_addr, engine->regs + SPA_SRC_PTR_REG_OFFSET);
 880        writel(req->dst_addr, engine->regs + SPA_DST_PTR_REG_OFFSET);
 881        writel(0, engine->regs + SPA_OFFSET_REG_OFFSET);
 882
 883        writel(ablk_req->cryptlen, engine->regs + SPA_PROC_LEN_REG_OFFSET);
 884        writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET);
 885        writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET);
 886        writel(0, engine->regs + SPA_AAD_LEN_REG_OFFSET);
 887
 888        ctrl = spacc_alg->ctrl_default | (req->ctx_id << SPA_CTRL_CTX_IDX) |
 889                (req->is_encrypt ? (1 << SPA_CTRL_ENCRYPT_IDX) :
 890                 (1 << SPA_CTRL_KEY_EXP));
 891
 892        mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT);
 893
 894        writel(ctrl, engine->regs + SPA_CTRL_REG_OFFSET);
 895
 896        return -EINPROGRESS;
 897}
 898
 899static int spacc_ablk_do_fallback(struct skcipher_request *req,
 900                                  unsigned alg_type, bool is_encrypt)
 901{
 902        struct crypto_tfm *old_tfm =
 903            crypto_skcipher_tfm(crypto_skcipher_reqtfm(req));
 904        struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(old_tfm);
 905        struct spacc_req *dev_req = skcipher_request_ctx(req);
 906        int err;
 907
 908        /*
 909         * Change the request to use the software fallback transform, and once
 910         * the ciphering has completed, put the old transform back into the
 911         * request.
 912         */
 913        skcipher_request_set_tfm(&dev_req->fallback_req, ctx->sw_cipher);
 914        skcipher_request_set_callback(&dev_req->fallback_req, req->base.flags,
 915                                      req->base.complete, req->base.data);
 916        skcipher_request_set_crypt(&dev_req->fallback_req, req->src, req->dst,
 917                                   req->cryptlen, req->iv);
 918        err = is_encrypt ? crypto_skcipher_encrypt(&dev_req->fallback_req) :
 919                           crypto_skcipher_decrypt(&dev_req->fallback_req);
 920
 921        return err;
 922}
 923
 924static int spacc_ablk_setup(struct skcipher_request *req, unsigned alg_type,
 925                            bool is_encrypt)
 926{
 927        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 928        struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
 929        struct spacc_engine *engine = to_spacc_skcipher(alg)->engine;
 930        struct spacc_req *dev_req = skcipher_request_ctx(req);
 931        unsigned long flags;
 932        int err = -ENOMEM;
 933
 934        dev_req->req            = &req->base;
 935        dev_req->is_encrypt     = is_encrypt;
 936        dev_req->engine         = engine;
 937        dev_req->complete       = spacc_ablk_complete;
 938        dev_req->result         = -EINPROGRESS;
 939
 940        if (unlikely(spacc_ablk_need_fallback(dev_req)))
 941                return spacc_ablk_do_fallback(req, alg_type, is_encrypt);
 942
 943        /*
 944         * Create the DDT's for the engine. If we share the same source and
 945         * destination then we can optimize by reusing the DDT's.
 946         */
 947        if (req->src != req->dst) {
 948                dev_req->src_ddt = spacc_sg_to_ddt(engine, req->src,
 949                        req->cryptlen, DMA_TO_DEVICE, &dev_req->src_addr);
 950                if (!dev_req->src_ddt)
 951                        goto out;
 952
 953                dev_req->dst_ddt = spacc_sg_to_ddt(engine, req->dst,
 954                        req->cryptlen, DMA_FROM_DEVICE, &dev_req->dst_addr);
 955                if (!dev_req->dst_ddt)
 956                        goto out_free_src;
 957        } else {
 958                dev_req->dst_ddt = spacc_sg_to_ddt(engine, req->dst,
 959                        req->cryptlen, DMA_BIDIRECTIONAL, &dev_req->dst_addr);
 960                if (!dev_req->dst_ddt)
 961                        goto out;
 962
 963                dev_req->src_ddt = NULL;
 964                dev_req->src_addr = dev_req->dst_addr;
 965        }
 966
 967        err = -EINPROGRESS;
 968        spin_lock_irqsave(&engine->hw_lock, flags);
 969        /*
 970         * Check if the engine will accept the operation now. If it won't then
 971         * we either stick it on the end of a pending list if we can backlog,
 972         * or bailout with an error if not.
 973         */
 974        if (unlikely(spacc_fifo_cmd_full(engine)) ||
 975            engine->in_flight + 1 > engine->fifo_sz) {
 976                if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
 977                        err = -EBUSY;
 978                        spin_unlock_irqrestore(&engine->hw_lock, flags);
 979                        goto out_free_ddts;
 980                }
 981                list_add_tail(&dev_req->list, &engine->pending);
 982        } else {
 983                list_add_tail(&dev_req->list, &engine->pending);
 984                spacc_push(engine);
 985        }
 986        spin_unlock_irqrestore(&engine->hw_lock, flags);
 987
 988        goto out;
 989
 990out_free_ddts:
 991        spacc_free_ddt(dev_req, dev_req->dst_ddt, dev_req->dst_addr, req->dst,
 992                       req->cryptlen, req->src == req->dst ?
 993                       DMA_BIDIRECTIONAL : DMA_FROM_DEVICE);
 994out_free_src:
 995        if (req->src != req->dst)
 996                spacc_free_ddt(dev_req, dev_req->src_ddt, dev_req->src_addr,
 997                               req->src, req->cryptlen, DMA_TO_DEVICE);
 998out:
 999        return err;
1000}
1001
1002static int spacc_ablk_init_tfm(struct crypto_skcipher *tfm)
1003{
1004        struct spacc_ablk_ctx *ctx = crypto_skcipher_ctx(tfm);
1005        struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1006        struct spacc_alg *spacc_alg = to_spacc_skcipher(alg);
1007        struct spacc_engine *engine = spacc_alg->engine;
1008
1009        ctx->generic.flags = spacc_alg->type;
1010        ctx->generic.engine = engine;
1011        if (alg->base.cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
1012                ctx->sw_cipher = crypto_alloc_skcipher(alg->base.cra_name, 0,
1013                                                       CRYPTO_ALG_NEED_FALLBACK);
1014                if (IS_ERR(ctx->sw_cipher)) {
1015                        dev_warn(engine->dev, "failed to allocate fallback for %s\n",
1016                                 alg->base.cra_name);
1017                        return PTR_ERR(ctx->sw_cipher);
1018                }
1019                crypto_skcipher_set_reqsize(tfm, sizeof(struct spacc_req) +
1020                                                 crypto_skcipher_reqsize(ctx->sw_cipher));
1021        } else {
1022                /* take the size without the fallback skcipher_request at the end */
1023                crypto_skcipher_set_reqsize(tfm, offsetof(struct spacc_req,
1024                                                          fallback_req));
1025        }
1026
1027        ctx->generic.key_offs = spacc_alg->key_offs;
1028        ctx->generic.iv_offs = spacc_alg->iv_offs;
1029
1030        return 0;
1031}
1032
1033static void spacc_ablk_exit_tfm(struct crypto_skcipher *tfm)
1034{
1035        struct spacc_ablk_ctx *ctx = crypto_skcipher_ctx(tfm);
1036
1037        crypto_free_skcipher(ctx->sw_cipher);
1038}
1039
1040static int spacc_ablk_encrypt(struct skcipher_request *req)
1041{
1042        struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
1043        struct skcipher_alg *alg = crypto_skcipher_alg(cipher);
1044        struct spacc_alg *spacc_alg = to_spacc_skcipher(alg);
1045
1046        return spacc_ablk_setup(req, spacc_alg->type, 1);
1047}
1048
1049static int spacc_ablk_decrypt(struct skcipher_request *req)
1050{
1051        struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
1052        struct skcipher_alg *alg = crypto_skcipher_alg(cipher);
1053        struct spacc_alg *spacc_alg = to_spacc_skcipher(alg);
1054
1055        return spacc_ablk_setup(req, spacc_alg->type, 0);
1056}
1057
1058static inline int spacc_fifo_stat_empty(struct spacc_engine *engine)
1059{
1060        return readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET) &
1061                SPA_FIFO_STAT_EMPTY;
1062}
1063
1064static void spacc_process_done(struct spacc_engine *engine)
1065{
1066        struct spacc_req *req;
1067        unsigned long flags;
1068
1069        spin_lock_irqsave(&engine->hw_lock, flags);
1070
1071        while (!spacc_fifo_stat_empty(engine)) {
1072                req = list_first_entry(&engine->in_progress, struct spacc_req,
1073                                       list);
1074                list_move_tail(&req->list, &engine->completed);
1075                --engine->in_flight;
1076
1077                /* POP the status register. */
1078                writel(~0, engine->regs + SPA_STAT_POP_REG_OFFSET);
1079                req->result = (readl(engine->regs + SPA_STATUS_REG_OFFSET) &
1080                     SPA_STATUS_RES_CODE_MASK) >> SPA_STATUS_RES_CODE_OFFSET;
1081
1082                /*
1083                 * Convert the SPAcc error status into the standard POSIX error
1084                 * codes.
1085                 */
1086                if (unlikely(req->result)) {
1087                        switch (req->result) {
1088                        case SPA_STATUS_ICV_FAIL:
1089                                req->result = -EBADMSG;
1090                                break;
1091
1092                        case SPA_STATUS_MEMORY_ERROR:
1093                                dev_warn(engine->dev,
1094                                         "memory error triggered\n");
1095                                req->result = -EFAULT;
1096                                break;
1097
1098                        case SPA_STATUS_BLOCK_ERROR:
1099                                dev_warn(engine->dev,
1100                                         "block error triggered\n");
1101                                req->result = -EIO;
1102                                break;
1103                        }
1104                }
1105        }
1106
1107        tasklet_schedule(&engine->complete);
1108
1109        spin_unlock_irqrestore(&engine->hw_lock, flags);
1110}
1111
1112static irqreturn_t spacc_spacc_irq(int irq, void *dev)
1113{
1114        struct spacc_engine *engine = (struct spacc_engine *)dev;
1115        u32 spacc_irq_stat = readl(engine->regs + SPA_IRQ_STAT_REG_OFFSET);
1116
1117        writel(spacc_irq_stat, engine->regs + SPA_IRQ_STAT_REG_OFFSET);
1118        spacc_process_done(engine);
1119
1120        return IRQ_HANDLED;
1121}
1122
1123static void spacc_packet_timeout(struct timer_list *t)
1124{
1125        struct spacc_engine *engine = from_timer(engine, t, packet_timeout);
1126
1127        spacc_process_done(engine);
1128}
1129
1130static int spacc_req_submit(struct spacc_req *req)
1131{
1132        struct crypto_alg *alg = req->req->tfm->__crt_alg;
1133
1134        if (CRYPTO_ALG_TYPE_AEAD == (CRYPTO_ALG_TYPE_MASK & alg->cra_flags))
1135                return spacc_aead_submit(req);
1136        else
1137                return spacc_ablk_submit(req);
1138}
1139
1140static void spacc_spacc_complete(unsigned long data)
1141{
1142        struct spacc_engine *engine = (struct spacc_engine *)data;
1143        struct spacc_req *req, *tmp;
1144        unsigned long flags;
1145        LIST_HEAD(completed);
1146
1147        spin_lock_irqsave(&engine->hw_lock, flags);
1148
1149        list_splice_init(&engine->completed, &completed);
1150        spacc_push(engine);
1151        if (engine->in_flight)
1152                mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT);
1153
1154        spin_unlock_irqrestore(&engine->hw_lock, flags);
1155
1156        list_for_each_entry_safe(req, tmp, &completed, list) {
1157                list_del(&req->list);
1158                req->complete(req);
1159        }
1160}
1161
1162#ifdef CONFIG_PM
1163static int spacc_suspend(struct device *dev)
1164{
1165        struct spacc_engine *engine = dev_get_drvdata(dev);
1166
1167        /*
1168         * We only support standby mode. All we have to do is gate the clock to
1169         * the spacc. The hardware will preserve state until we turn it back
1170         * on again.
1171         */
1172        clk_disable(engine->clk);
1173
1174        return 0;
1175}
1176
1177static int spacc_resume(struct device *dev)
1178{
1179        struct spacc_engine *engine = dev_get_drvdata(dev);
1180
1181        return clk_enable(engine->clk);
1182}
1183
1184static const struct dev_pm_ops spacc_pm_ops = {
1185        .suspend        = spacc_suspend,
1186        .resume         = spacc_resume,
1187};
1188#endif /* CONFIG_PM */
1189
1190static inline struct spacc_engine *spacc_dev_to_engine(struct device *dev)
1191{
1192        return dev ? dev_get_drvdata(dev) : NULL;
1193}
1194
1195static ssize_t spacc_stat_irq_thresh_show(struct device *dev,
1196                                          struct device_attribute *attr,
1197                                          char *buf)
1198{
1199        struct spacc_engine *engine = spacc_dev_to_engine(dev);
1200
1201        return snprintf(buf, PAGE_SIZE, "%u\n", engine->stat_irq_thresh);
1202}
1203
1204static ssize_t spacc_stat_irq_thresh_store(struct device *dev,
1205                                           struct device_attribute *attr,
1206                                           const char *buf, size_t len)
1207{
1208        struct spacc_engine *engine = spacc_dev_to_engine(dev);
1209        unsigned long thresh;
1210
1211        if (kstrtoul(buf, 0, &thresh))
1212                return -EINVAL;
1213
1214        thresh = clamp(thresh, 1UL, engine->fifo_sz - 1);
1215
1216        engine->stat_irq_thresh = thresh;
1217        writel(engine->stat_irq_thresh << SPA_IRQ_CTRL_STAT_CNT_OFFSET,
1218               engine->regs + SPA_IRQ_CTRL_REG_OFFSET);
1219
1220        return len;
1221}
1222static DEVICE_ATTR(stat_irq_thresh, 0644, spacc_stat_irq_thresh_show,
1223                   spacc_stat_irq_thresh_store);
1224
1225static struct spacc_alg ipsec_engine_algs[] = {
1226        {
1227                .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC,
1228                .key_offs = 0,
1229                .iv_offs = AES_MAX_KEY_SIZE,
1230                .alg = {
1231                        .base.cra_name          = "cbc(aes)",
1232                        .base.cra_driver_name   = "cbc-aes-picoxcell",
1233                        .base.cra_priority      = SPACC_CRYPTO_ALG_PRIORITY,
1234                        .base.cra_flags         = CRYPTO_ALG_KERN_DRIVER_ONLY |
1235                                                  CRYPTO_ALG_ASYNC |
1236                                                  CRYPTO_ALG_ALLOCATES_MEMORY |
1237                                                  CRYPTO_ALG_NEED_FALLBACK,
1238                        .base.cra_blocksize     = AES_BLOCK_SIZE,
1239                        .base.cra_ctxsize       = sizeof(struct spacc_ablk_ctx),
1240                        .base.cra_module        = THIS_MODULE,
1241
1242                        .setkey                 = spacc_aes_setkey,
1243                        .encrypt                = spacc_ablk_encrypt,
1244                        .decrypt                = spacc_ablk_decrypt,
1245                        .min_keysize            = AES_MIN_KEY_SIZE,
1246                        .max_keysize            = AES_MAX_KEY_SIZE,
1247                        .ivsize                 = AES_BLOCK_SIZE,
1248                        .init                   = spacc_ablk_init_tfm,
1249                        .exit                   = spacc_ablk_exit_tfm,
1250                },
1251        },
1252        {
1253                .key_offs = 0,
1254                .iv_offs = AES_MAX_KEY_SIZE,
1255                .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_ECB,
1256                .alg = {
1257                        .base.cra_name          = "ecb(aes)",
1258                        .base.cra_driver_name   = "ecb-aes-picoxcell",
1259                        .base.cra_priority      = SPACC_CRYPTO_ALG_PRIORITY,
1260                        .base.cra_flags         = CRYPTO_ALG_KERN_DRIVER_ONLY |
1261                                                  CRYPTO_ALG_ASYNC |
1262                                                  CRYPTO_ALG_ALLOCATES_MEMORY |
1263                                                  CRYPTO_ALG_NEED_FALLBACK,
1264                        .base.cra_blocksize     = AES_BLOCK_SIZE,
1265                        .base.cra_ctxsize       = sizeof(struct spacc_ablk_ctx),
1266                        .base.cra_module        = THIS_MODULE,
1267
1268                        .setkey                 = spacc_aes_setkey,
1269                        .encrypt                = spacc_ablk_encrypt,
1270                        .decrypt                = spacc_ablk_decrypt,
1271                        .min_keysize            = AES_MIN_KEY_SIZE,
1272                        .max_keysize            = AES_MAX_KEY_SIZE,
1273                        .init                   = spacc_ablk_init_tfm,
1274                        .exit                   = spacc_ablk_exit_tfm,
1275                },
1276        },
1277        {
1278                .key_offs = DES_BLOCK_SIZE,
1279                .iv_offs = 0,
1280                .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC,
1281                .alg = {
1282                        .base.cra_name          = "cbc(des)",
1283                        .base.cra_driver_name   = "cbc-des-picoxcell",
1284                        .base.cra_priority      = SPACC_CRYPTO_ALG_PRIORITY,
1285                        .base.cra_flags         = CRYPTO_ALG_KERN_DRIVER_ONLY |
1286                                                  CRYPTO_ALG_ASYNC |
1287                                                  CRYPTO_ALG_ALLOCATES_MEMORY,
1288                        .base.cra_blocksize     = DES_BLOCK_SIZE,
1289                        .base.cra_ctxsize       = sizeof(struct spacc_ablk_ctx),
1290                        .base.cra_module        = THIS_MODULE,
1291
1292                        .setkey                 = spacc_des_setkey,
1293                        .encrypt                = spacc_ablk_encrypt,
1294                        .decrypt                = spacc_ablk_decrypt,
1295                        .min_keysize            = DES_KEY_SIZE,
1296                        .max_keysize            = DES_KEY_SIZE,
1297                        .ivsize                 = DES_BLOCK_SIZE,
1298                        .init                   = spacc_ablk_init_tfm,
1299                        .exit                   = spacc_ablk_exit_tfm,
1300                },
1301        },
1302        {
1303                .key_offs = DES_BLOCK_SIZE,
1304                .iv_offs = 0,
1305                .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_ECB,
1306                .alg = {
1307                        .base.cra_name          = "ecb(des)",
1308                        .base.cra_driver_name   = "ecb-des-picoxcell",
1309                        .base.cra_priority      = SPACC_CRYPTO_ALG_PRIORITY,
1310                        .base.cra_flags         = CRYPTO_ALG_KERN_DRIVER_ONLY |
1311                                                  CRYPTO_ALG_ASYNC |
1312                                                  CRYPTO_ALG_ALLOCATES_MEMORY,
1313                        .base.cra_blocksize     = DES_BLOCK_SIZE,
1314                        .base.cra_ctxsize       = sizeof(struct spacc_ablk_ctx),
1315                        .base.cra_module        = THIS_MODULE,
1316
1317                        .setkey                 = spacc_des_setkey,
1318                        .encrypt                = spacc_ablk_encrypt,
1319                        .decrypt                = spacc_ablk_decrypt,
1320                        .min_keysize            = DES_KEY_SIZE,
1321                        .max_keysize            = DES_KEY_SIZE,
1322                        .init                   = spacc_ablk_init_tfm,
1323                        .exit                   = spacc_ablk_exit_tfm,
1324                },
1325        },
1326        {
1327                .key_offs = DES_BLOCK_SIZE,
1328                .iv_offs = 0,
1329                .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC,
1330                .alg = {
1331                        .base.cra_name          = "cbc(des3_ede)",
1332                        .base.cra_driver_name   = "cbc-des3-ede-picoxcell",
1333                        .base.cra_priority      = SPACC_CRYPTO_ALG_PRIORITY,
1334                        .base.cra_flags         = CRYPTO_ALG_ASYNC |
1335                                                  CRYPTO_ALG_ALLOCATES_MEMORY |
1336                                                  CRYPTO_ALG_KERN_DRIVER_ONLY,
1337                        .base.cra_blocksize     = DES3_EDE_BLOCK_SIZE,
1338                        .base.cra_ctxsize       = sizeof(struct spacc_ablk_ctx),
1339                        .base.cra_module        = THIS_MODULE,
1340
1341                        .setkey                 = spacc_des3_setkey,
1342                        .encrypt                = spacc_ablk_encrypt,
1343                        .decrypt                = spacc_ablk_decrypt,
1344                        .min_keysize            = DES3_EDE_KEY_SIZE,
1345                        .max_keysize            = DES3_EDE_KEY_SIZE,
1346                        .ivsize                 = DES3_EDE_BLOCK_SIZE,
1347                        .init                   = spacc_ablk_init_tfm,
1348                        .exit                   = spacc_ablk_exit_tfm,
1349                },
1350        },
1351        {
1352                .key_offs = DES_BLOCK_SIZE,
1353                .iv_offs = 0,
1354                .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_ECB,
1355                .alg = {
1356                        .base.cra_name          = "ecb(des3_ede)",
1357                        .base.cra_driver_name   = "ecb-des3-ede-picoxcell",
1358                        .base.cra_priority      = SPACC_CRYPTO_ALG_PRIORITY,
1359                        .base.cra_flags         = CRYPTO_ALG_ASYNC |
1360                                                  CRYPTO_ALG_ALLOCATES_MEMORY |
1361                                                  CRYPTO_ALG_KERN_DRIVER_ONLY,
1362                        .base.cra_blocksize     = DES3_EDE_BLOCK_SIZE,
1363                        .base.cra_ctxsize       = sizeof(struct spacc_ablk_ctx),
1364                        .base.cra_module        = THIS_MODULE,
1365
1366                        .setkey                 = spacc_des3_setkey,
1367                        .encrypt                = spacc_ablk_encrypt,
1368                        .decrypt                = spacc_ablk_decrypt,
1369                        .min_keysize            = DES3_EDE_KEY_SIZE,
1370                        .max_keysize            = DES3_EDE_KEY_SIZE,
1371                        .init                   = spacc_ablk_init_tfm,
1372                        .exit                   = spacc_ablk_exit_tfm,
1373                },
1374        },
1375};
1376
1377static struct spacc_aead ipsec_engine_aeads[] = {
1378        {
1379                .ctrl_default = SPA_CTRL_CIPH_ALG_AES |
1380                                SPA_CTRL_CIPH_MODE_CBC |
1381                                SPA_CTRL_HASH_ALG_SHA |
1382                                SPA_CTRL_HASH_MODE_HMAC,
1383                .key_offs = 0,
1384                .iv_offs = AES_MAX_KEY_SIZE,
1385                .alg = {
1386                        .base = {
1387                                .cra_name = "authenc(hmac(sha1),cbc(aes))",
1388                                .cra_driver_name = "authenc-hmac-sha1-"
1389                                                   "cbc-aes-picoxcell",
1390                                .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1391                                .cra_flags = CRYPTO_ALG_ASYNC |
1392                                             CRYPTO_ALG_ALLOCATES_MEMORY |
1393                                             CRYPTO_ALG_NEED_FALLBACK |
1394                                             CRYPTO_ALG_KERN_DRIVER_ONLY,
1395                                .cra_blocksize = AES_BLOCK_SIZE,
1396                                .cra_ctxsize = sizeof(struct spacc_aead_ctx),
1397                                .cra_module = THIS_MODULE,
1398                        },
1399                        .setkey = spacc_aead_setkey,
1400                        .setauthsize = spacc_aead_setauthsize,
1401                        .encrypt = spacc_aead_encrypt,
1402                        .decrypt = spacc_aead_decrypt,
1403                        .ivsize = AES_BLOCK_SIZE,
1404                        .maxauthsize = SHA1_DIGEST_SIZE,
1405                        .init = spacc_aead_cra_init,
1406                        .exit = spacc_aead_cra_exit,
1407                },
1408        },
1409        {
1410                .ctrl_default = SPA_CTRL_CIPH_ALG_AES |
1411                                SPA_CTRL_CIPH_MODE_CBC |
1412                                SPA_CTRL_HASH_ALG_SHA256 |
1413                                SPA_CTRL_HASH_MODE_HMAC,
1414                .key_offs = 0,
1415                .iv_offs = AES_MAX_KEY_SIZE,
1416                .alg = {
1417                        .base = {
1418                                .cra_name = "authenc(hmac(sha256),cbc(aes))",
1419                                .cra_driver_name = "authenc-hmac-sha256-"
1420                                                   "cbc-aes-picoxcell",
1421                                .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1422                                .cra_flags = CRYPTO_ALG_ASYNC |
1423                                             CRYPTO_ALG_ALLOCATES_MEMORY |
1424                                             CRYPTO_ALG_NEED_FALLBACK |
1425                                             CRYPTO_ALG_KERN_DRIVER_ONLY,
1426                                .cra_blocksize = AES_BLOCK_SIZE,
1427                                .cra_ctxsize = sizeof(struct spacc_aead_ctx),
1428                                .cra_module = THIS_MODULE,
1429                        },
1430                        .setkey = spacc_aead_setkey,
1431                        .setauthsize = spacc_aead_setauthsize,
1432                        .encrypt = spacc_aead_encrypt,
1433                        .decrypt = spacc_aead_decrypt,
1434                        .ivsize = AES_BLOCK_SIZE,
1435                        .maxauthsize = SHA256_DIGEST_SIZE,
1436                        .init = spacc_aead_cra_init,
1437                        .exit = spacc_aead_cra_exit,
1438                },
1439        },
1440        {
1441                .key_offs = 0,
1442                .iv_offs = AES_MAX_KEY_SIZE,
1443                .ctrl_default = SPA_CTRL_CIPH_ALG_AES |
1444                                SPA_CTRL_CIPH_MODE_CBC |
1445                                SPA_CTRL_HASH_ALG_MD5 |
1446                                SPA_CTRL_HASH_MODE_HMAC,
1447                .alg = {
1448                        .base = {
1449                                .cra_name = "authenc(hmac(md5),cbc(aes))",
1450                                .cra_driver_name = "authenc-hmac-md5-"
1451                                                   "cbc-aes-picoxcell",
1452                                .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1453                                .cra_flags = CRYPTO_ALG_ASYNC |
1454                                             CRYPTO_ALG_ALLOCATES_MEMORY |
1455                                             CRYPTO_ALG_NEED_FALLBACK |
1456                                             CRYPTO_ALG_KERN_DRIVER_ONLY,
1457                                .cra_blocksize = AES_BLOCK_SIZE,
1458                                .cra_ctxsize = sizeof(struct spacc_aead_ctx),
1459                                .cra_module = THIS_MODULE,
1460                        },
1461                        .setkey = spacc_aead_setkey,
1462                        .setauthsize = spacc_aead_setauthsize,
1463                        .encrypt = spacc_aead_encrypt,
1464                        .decrypt = spacc_aead_decrypt,
1465                        .ivsize = AES_BLOCK_SIZE,
1466                        .maxauthsize = MD5_DIGEST_SIZE,
1467                        .init = spacc_aead_cra_init,
1468                        .exit = spacc_aead_cra_exit,
1469                },
1470        },
1471        {
1472                .key_offs = DES_BLOCK_SIZE,
1473                .iv_offs = 0,
1474                .ctrl_default = SPA_CTRL_CIPH_ALG_DES |
1475                                SPA_CTRL_CIPH_MODE_CBC |
1476                                SPA_CTRL_HASH_ALG_SHA |
1477                                SPA_CTRL_HASH_MODE_HMAC,
1478                .alg = {
1479                        .base = {
1480                                .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
1481                                .cra_driver_name = "authenc-hmac-sha1-"
1482                                                   "cbc-3des-picoxcell",
1483                                .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1484                                .cra_flags = CRYPTO_ALG_ASYNC |
1485                                             CRYPTO_ALG_ALLOCATES_MEMORY |
1486                                             CRYPTO_ALG_NEED_FALLBACK |
1487                                             CRYPTO_ALG_KERN_DRIVER_ONLY,
1488                                .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1489                                .cra_ctxsize = sizeof(struct spacc_aead_ctx),
1490                                .cra_module = THIS_MODULE,
1491                        },
1492                        .setkey = spacc_aead_setkey,
1493                        .setauthsize = spacc_aead_setauthsize,
1494                        .encrypt = spacc_aead_encrypt,
1495                        .decrypt = spacc_aead_decrypt,
1496                        .ivsize = DES3_EDE_BLOCK_SIZE,
1497                        .maxauthsize = SHA1_DIGEST_SIZE,
1498                        .init = spacc_aead_cra_init,
1499                        .exit = spacc_aead_cra_exit,
1500                },
1501        },
1502        {
1503                .key_offs = DES_BLOCK_SIZE,
1504                .iv_offs = 0,
1505                .ctrl_default = SPA_CTRL_CIPH_ALG_AES |
1506                                SPA_CTRL_CIPH_MODE_CBC |
1507                                SPA_CTRL_HASH_ALG_SHA256 |
1508                                SPA_CTRL_HASH_MODE_HMAC,
1509                .alg = {
1510                        .base = {
1511                                .cra_name = "authenc(hmac(sha256),"
1512                                            "cbc(des3_ede))",
1513                                .cra_driver_name = "authenc-hmac-sha256-"
1514                                                   "cbc-3des-picoxcell",
1515                                .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1516                                .cra_flags = CRYPTO_ALG_ASYNC |
1517                                             CRYPTO_ALG_ALLOCATES_MEMORY |
1518                                             CRYPTO_ALG_NEED_FALLBACK |
1519                                             CRYPTO_ALG_KERN_DRIVER_ONLY,
1520                                .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1521                                .cra_ctxsize = sizeof(struct spacc_aead_ctx),
1522                                .cra_module = THIS_MODULE,
1523                        },
1524                        .setkey = spacc_aead_setkey,
1525                        .setauthsize = spacc_aead_setauthsize,
1526                        .encrypt = spacc_aead_encrypt,
1527                        .decrypt = spacc_aead_decrypt,
1528                        .ivsize = DES3_EDE_BLOCK_SIZE,
1529                        .maxauthsize = SHA256_DIGEST_SIZE,
1530                        .init = spacc_aead_cra_init,
1531                        .exit = spacc_aead_cra_exit,
1532                },
1533        },
1534        {
1535                .key_offs = DES_BLOCK_SIZE,
1536                .iv_offs = 0,
1537                .ctrl_default = SPA_CTRL_CIPH_ALG_DES |
1538                                SPA_CTRL_CIPH_MODE_CBC |
1539                                SPA_CTRL_HASH_ALG_MD5 |
1540                                SPA_CTRL_HASH_MODE_HMAC,
1541                .alg = {
1542                        .base = {
1543                                .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
1544                                .cra_driver_name = "authenc-hmac-md5-"
1545                                                   "cbc-3des-picoxcell",
1546                                .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1547                                .cra_flags = CRYPTO_ALG_ASYNC |
1548                                             CRYPTO_ALG_ALLOCATES_MEMORY |
1549                                             CRYPTO_ALG_NEED_FALLBACK |
1550                                             CRYPTO_ALG_KERN_DRIVER_ONLY,
1551                                .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1552                                .cra_ctxsize = sizeof(struct spacc_aead_ctx),
1553                                .cra_module = THIS_MODULE,
1554                        },
1555                        .setkey = spacc_aead_setkey,
1556                        .setauthsize = spacc_aead_setauthsize,
1557                        .encrypt = spacc_aead_encrypt,
1558                        .decrypt = spacc_aead_decrypt,
1559                        .ivsize = DES3_EDE_BLOCK_SIZE,
1560                        .maxauthsize = MD5_DIGEST_SIZE,
1561                        .init = spacc_aead_cra_init,
1562                        .exit = spacc_aead_cra_exit,
1563                },
1564        },
1565};
1566
1567static struct spacc_alg l2_engine_algs[] = {
1568        {
1569                .key_offs = 0,
1570                .iv_offs = SPACC_CRYPTO_KASUMI_F8_KEY_LEN,
1571                .ctrl_default = SPA_CTRL_CIPH_ALG_KASUMI |
1572                                SPA_CTRL_CIPH_MODE_F8,
1573                .alg = {
1574                        .base.cra_name          = "f8(kasumi)",
1575                        .base.cra_driver_name   = "f8-kasumi-picoxcell",
1576                        .base.cra_priority      = SPACC_CRYPTO_ALG_PRIORITY,
1577                        .base.cra_flags         = CRYPTO_ALG_ASYNC |
1578                                                  CRYPTO_ALG_ALLOCATES_MEMORY |
1579                                                  CRYPTO_ALG_KERN_DRIVER_ONLY,
1580                        .base.cra_blocksize     = 8,
1581                        .base.cra_ctxsize       = sizeof(struct spacc_ablk_ctx),
1582                        .base.cra_module        = THIS_MODULE,
1583
1584                        .setkey                 = spacc_kasumi_f8_setkey,
1585                        .encrypt                = spacc_ablk_encrypt,
1586                        .decrypt                = spacc_ablk_decrypt,
1587                        .min_keysize            = 16,
1588                        .max_keysize            = 16,
1589                        .ivsize                 = 8,
1590                        .init                   = spacc_ablk_init_tfm,
1591                        .exit                   = spacc_ablk_exit_tfm,
1592                },
1593        },
1594};
1595
1596#ifdef CONFIG_OF
1597static const struct of_device_id spacc_of_id_table[] = {
1598        { .compatible = "picochip,spacc-ipsec" },
1599        { .compatible = "picochip,spacc-l2" },
1600        {}
1601};
1602MODULE_DEVICE_TABLE(of, spacc_of_id_table);
1603#endif /* CONFIG_OF */
1604
1605static void spacc_tasklet_kill(void *data)
1606{
1607        tasklet_kill(data);
1608}
1609
1610static int spacc_probe(struct platform_device *pdev)
1611{
1612        int i, err, ret;
1613        struct resource *irq;
1614        struct device_node *np = pdev->dev.of_node;
1615        struct spacc_engine *engine = devm_kzalloc(&pdev->dev, sizeof(*engine),
1616                                                   GFP_KERNEL);
1617        if (!engine)
1618                return -ENOMEM;
1619
1620        if (of_device_is_compatible(np, "picochip,spacc-ipsec")) {
1621                engine->max_ctxs        = SPACC_CRYPTO_IPSEC_MAX_CTXS;
1622                engine->cipher_pg_sz    = SPACC_CRYPTO_IPSEC_CIPHER_PG_SZ;
1623                engine->hash_pg_sz      = SPACC_CRYPTO_IPSEC_HASH_PG_SZ;
1624                engine->fifo_sz         = SPACC_CRYPTO_IPSEC_FIFO_SZ;
1625                engine->algs            = ipsec_engine_algs;
1626                engine->num_algs        = ARRAY_SIZE(ipsec_engine_algs);
1627                engine->aeads           = ipsec_engine_aeads;
1628                engine->num_aeads       = ARRAY_SIZE(ipsec_engine_aeads);
1629        } else if (of_device_is_compatible(np, "picochip,spacc-l2")) {
1630                engine->max_ctxs        = SPACC_CRYPTO_L2_MAX_CTXS;
1631                engine->cipher_pg_sz    = SPACC_CRYPTO_L2_CIPHER_PG_SZ;
1632                engine->hash_pg_sz      = SPACC_CRYPTO_L2_HASH_PG_SZ;
1633                engine->fifo_sz         = SPACC_CRYPTO_L2_FIFO_SZ;
1634                engine->algs            = l2_engine_algs;
1635                engine->num_algs        = ARRAY_SIZE(l2_engine_algs);
1636        } else {
1637                return -EINVAL;
1638        }
1639
1640        engine->name = dev_name(&pdev->dev);
1641
1642        engine->regs = devm_platform_ioremap_resource(pdev, 0);
1643        if (IS_ERR(engine->regs))
1644                return PTR_ERR(engine->regs);
1645
1646        irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1647        if (!irq) {
1648                dev_err(&pdev->dev, "no memory/irq resource for engine\n");
1649                return -ENXIO;
1650        }
1651
1652        tasklet_init(&engine->complete, spacc_spacc_complete,
1653                     (unsigned long)engine);
1654
1655        ret = devm_add_action(&pdev->dev, spacc_tasklet_kill,
1656                              &engine->complete);
1657        if (ret)
1658                return ret;
1659
1660        if (devm_request_irq(&pdev->dev, irq->start, spacc_spacc_irq, 0,
1661                             engine->name, engine)) {
1662                dev_err(engine->dev, "failed to request IRQ\n");
1663                return -EBUSY;
1664        }
1665
1666        engine->dev             = &pdev->dev;
1667        engine->cipher_ctx_base = engine->regs + SPA_CIPH_KEY_BASE_REG_OFFSET;
1668        engine->hash_key_base   = engine->regs + SPA_HASH_KEY_BASE_REG_OFFSET;
1669
1670        engine->req_pool = dmam_pool_create(engine->name, engine->dev,
1671                MAX_DDT_LEN * sizeof(struct spacc_ddt), 8, SZ_64K);
1672        if (!engine->req_pool)
1673                return -ENOMEM;
1674
1675        spin_lock_init(&engine->hw_lock);
1676
1677        engine->clk = clk_get(&pdev->dev, "ref");
1678        if (IS_ERR(engine->clk)) {
1679                dev_info(&pdev->dev, "clk unavailable\n");
1680                return PTR_ERR(engine->clk);
1681        }
1682
1683        if (clk_prepare_enable(engine->clk)) {
1684                dev_info(&pdev->dev, "unable to prepare/enable clk\n");
1685                ret = -EIO;
1686                goto err_clk_put;
1687        }
1688
1689        /*
1690         * Use an IRQ threshold of 50% as a default. This seems to be a
1691         * reasonable trade off of latency against throughput but can be
1692         * changed at runtime.
1693         */
1694        engine->stat_irq_thresh = (engine->fifo_sz / 2);
1695
1696        ret = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh);
1697        if (ret)
1698                goto err_clk_disable;
1699
1700        /*
1701         * Configure the interrupts. We only use the STAT_CNT interrupt as we
1702         * only submit a new packet for processing when we complete another in
1703         * the queue. This minimizes time spent in the interrupt handler.
1704         */
1705        writel(engine->stat_irq_thresh << SPA_IRQ_CTRL_STAT_CNT_OFFSET,
1706               engine->regs + SPA_IRQ_CTRL_REG_OFFSET);
1707        writel(SPA_IRQ_EN_STAT_EN | SPA_IRQ_EN_GLBL_EN,
1708               engine->regs + SPA_IRQ_EN_REG_OFFSET);
1709
1710        timer_setup(&engine->packet_timeout, spacc_packet_timeout, 0);
1711
1712        INIT_LIST_HEAD(&engine->pending);
1713        INIT_LIST_HEAD(&engine->completed);
1714        INIT_LIST_HEAD(&engine->in_progress);
1715        engine->in_flight = 0;
1716
1717        platform_set_drvdata(pdev, engine);
1718
1719        ret = -EINVAL;
1720        INIT_LIST_HEAD(&engine->registered_algs);
1721        for (i = 0; i < engine->num_algs; ++i) {
1722                engine->algs[i].engine = engine;
1723                err = crypto_register_skcipher(&engine->algs[i].alg);
1724                if (!err) {
1725                        list_add_tail(&engine->algs[i].entry,
1726                                      &engine->registered_algs);
1727                        ret = 0;
1728                }
1729                if (err)
1730                        dev_err(engine->dev, "failed to register alg \"%s\"\n",
1731                                engine->algs[i].alg.base.cra_name);
1732                else
1733                        dev_dbg(engine->dev, "registered alg \"%s\"\n",
1734                                engine->algs[i].alg.base.cra_name);
1735        }
1736
1737        INIT_LIST_HEAD(&engine->registered_aeads);
1738        for (i = 0; i < engine->num_aeads; ++i) {
1739                engine->aeads[i].engine = engine;
1740                err = crypto_register_aead(&engine->aeads[i].alg);
1741                if (!err) {
1742                        list_add_tail(&engine->aeads[i].entry,
1743                                      &engine->registered_aeads);
1744                        ret = 0;
1745                }
1746                if (err)
1747                        dev_err(engine->dev, "failed to register alg \"%s\"\n",
1748                                engine->aeads[i].alg.base.cra_name);
1749                else
1750                        dev_dbg(engine->dev, "registered alg \"%s\"\n",
1751                                engine->aeads[i].alg.base.cra_name);
1752        }
1753
1754        if (!ret)
1755                return 0;
1756
1757        del_timer_sync(&engine->packet_timeout);
1758        device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh);
1759err_clk_disable:
1760        clk_disable_unprepare(engine->clk);
1761err_clk_put:
1762        clk_put(engine->clk);
1763
1764        return ret;
1765}
1766
1767static int spacc_remove(struct platform_device *pdev)
1768{
1769        struct spacc_aead *aead, *an;
1770        struct spacc_alg *alg, *next;
1771        struct spacc_engine *engine = platform_get_drvdata(pdev);
1772
1773        del_timer_sync(&engine->packet_timeout);
1774        device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh);
1775
1776        list_for_each_entry_safe(aead, an, &engine->registered_aeads, entry) {
1777                list_del(&aead->entry);
1778                crypto_unregister_aead(&aead->alg);
1779        }
1780
1781        list_for_each_entry_safe(alg, next, &engine->registered_algs, entry) {
1782                list_del(&alg->entry);
1783                crypto_unregister_skcipher(&alg->alg);
1784        }
1785
1786        clk_disable_unprepare(engine->clk);
1787        clk_put(engine->clk);
1788
1789        return 0;
1790}
1791
1792static struct platform_driver spacc_driver = {
1793        .probe          = spacc_probe,
1794        .remove         = spacc_remove,
1795        .driver         = {
1796                .name   = "picochip,spacc",
1797#ifdef CONFIG_PM
1798                .pm     = &spacc_pm_ops,
1799#endif /* CONFIG_PM */
1800                .of_match_table = of_match_ptr(spacc_of_id_table),
1801        },
1802};
1803
1804module_platform_driver(spacc_driver);
1805
1806MODULE_LICENSE("GPL");
1807MODULE_AUTHOR("Jamie Iles");
1808