linux/drivers/crypto/n2_core.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support.
   3 *
   4 * Copyright (C) 2010, 2011 David S. Miller <davem@davemloft.net>
   5 */
   6
   7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   8
   9#include <linux/kernel.h>
  10#include <linux/module.h>
  11#include <linux/of.h>
  12#include <linux/of_device.h>
  13#include <linux/cpumask.h>
  14#include <linux/slab.h>
  15#include <linux/interrupt.h>
  16#include <linux/crypto.h>
  17#include <crypto/md5.h>
  18#include <crypto/sha.h>
  19#include <crypto/aes.h>
  20#include <crypto/internal/des.h>
  21#include <linux/mutex.h>
  22#include <linux/delay.h>
  23#include <linux/sched.h>
  24
  25#include <crypto/internal/hash.h>
  26#include <crypto/internal/skcipher.h>
  27#include <crypto/scatterwalk.h>
  28#include <crypto/algapi.h>
  29
  30#include <asm/hypervisor.h>
  31#include <asm/mdesc.h>
  32
  33#include "n2_core.h"
  34
  35#define DRV_MODULE_NAME         "n2_crypto"
  36#define DRV_MODULE_VERSION      "0.2"
  37#define DRV_MODULE_RELDATE      "July 28, 2011"
  38
  39static const char version[] =
  40        DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
  41
  42MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
  43MODULE_DESCRIPTION("Niagara2 Crypto driver");
  44MODULE_LICENSE("GPL");
  45MODULE_VERSION(DRV_MODULE_VERSION);
  46
  47#define N2_CRA_PRIORITY         200
  48
  49static DEFINE_MUTEX(spu_lock);
  50
  51struct spu_queue {
  52        cpumask_t               sharing;
  53        unsigned long           qhandle;
  54
  55        spinlock_t              lock;
  56        u8                      q_type;
  57        void                    *q;
  58        unsigned long           head;
  59        unsigned long           tail;
  60        struct list_head        jobs;
  61
  62        unsigned long           devino;
  63
  64        char                    irq_name[32];
  65        unsigned int            irq;
  66
  67        struct list_head        list;
  68};
  69
  70struct spu_qreg {
  71        struct spu_queue        *queue;
  72        unsigned long           type;
  73};
  74
  75static struct spu_queue **cpu_to_cwq;
  76static struct spu_queue **cpu_to_mau;
  77
  78static unsigned long spu_next_offset(struct spu_queue *q, unsigned long off)
  79{
  80        if (q->q_type == HV_NCS_QTYPE_MAU) {
  81                off += MAU_ENTRY_SIZE;
  82                if (off == (MAU_ENTRY_SIZE * MAU_NUM_ENTRIES))
  83                        off = 0;
  84        } else {
  85                off += CWQ_ENTRY_SIZE;
  86                if (off == (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES))
  87                        off = 0;
  88        }
  89        return off;
  90}
  91
  92struct n2_request_common {
  93        struct list_head        entry;
  94        unsigned int            offset;
  95};
  96#define OFFSET_NOT_RUNNING      (~(unsigned int)0)
  97
  98/* An async job request records the final tail value it used in
  99 * n2_request_common->offset, test to see if that offset is in
 100 * the range old_head, new_head, inclusive.
 101 */
 102static inline bool job_finished(struct spu_queue *q, unsigned int offset,
 103                                unsigned long old_head, unsigned long new_head)
 104{
 105        if (old_head <= new_head) {
 106                if (offset > old_head && offset <= new_head)
 107                        return true;
 108        } else {
 109                if (offset > old_head || offset <= new_head)
 110                        return true;
 111        }
 112        return false;
 113}
 114
 115/* When the HEAD marker is unequal to the actual HEAD, we get
 116 * a virtual device INO interrupt.  We should process the
 117 * completed CWQ entries and adjust the HEAD marker to clear
 118 * the IRQ.
 119 */
 120static irqreturn_t cwq_intr(int irq, void *dev_id)
 121{
 122        unsigned long off, new_head, hv_ret;
 123        struct spu_queue *q = dev_id;
 124
 125        pr_err("CPU[%d]: Got CWQ interrupt for qhdl[%lx]\n",
 126               smp_processor_id(), q->qhandle);
 127
 128        spin_lock(&q->lock);
 129
 130        hv_ret = sun4v_ncs_gethead(q->qhandle, &new_head);
 131
 132        pr_err("CPU[%d]: CWQ gethead[%lx] hv_ret[%lu]\n",
 133               smp_processor_id(), new_head, hv_ret);
 134
 135        for (off = q->head; off != new_head; off = spu_next_offset(q, off)) {
 136                /* XXX ... XXX */
 137        }
 138
 139        hv_ret = sun4v_ncs_sethead_marker(q->qhandle, new_head);
 140        if (hv_ret == HV_EOK)
 141                q->head = new_head;
 142
 143        spin_unlock(&q->lock);
 144
 145        return IRQ_HANDLED;
 146}
 147
 148static irqreturn_t mau_intr(int irq, void *dev_id)
 149{
 150        struct spu_queue *q = dev_id;
 151        unsigned long head, hv_ret;
 152
 153        spin_lock(&q->lock);
 154
 155        pr_err("CPU[%d]: Got MAU interrupt for qhdl[%lx]\n",
 156               smp_processor_id(), q->qhandle);
 157
 158        hv_ret = sun4v_ncs_gethead(q->qhandle, &head);
 159
 160        pr_err("CPU[%d]: MAU gethead[%lx] hv_ret[%lu]\n",
 161               smp_processor_id(), head, hv_ret);
 162
 163        sun4v_ncs_sethead_marker(q->qhandle, head);
 164
 165        spin_unlock(&q->lock);
 166
 167        return IRQ_HANDLED;
 168}
 169
 170static void *spu_queue_next(struct spu_queue *q, void *cur)
 171{
 172        return q->q + spu_next_offset(q, cur - q->q);
 173}
 174
 175static int spu_queue_num_free(struct spu_queue *q)
 176{
 177        unsigned long head = q->head;
 178        unsigned long tail = q->tail;
 179        unsigned long end = (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES);
 180        unsigned long diff;
 181
 182        if (head > tail)
 183                diff = head - tail;
 184        else
 185                diff = (end - tail) + head;
 186
 187        return (diff / CWQ_ENTRY_SIZE) - 1;
 188}
 189
 190static void *spu_queue_alloc(struct spu_queue *q, int num_entries)
 191{
 192        int avail = spu_queue_num_free(q);
 193
 194        if (avail >= num_entries)
 195                return q->q + q->tail;
 196
 197        return NULL;
 198}
 199
 200static unsigned long spu_queue_submit(struct spu_queue *q, void *last)
 201{
 202        unsigned long hv_ret, new_tail;
 203
 204        new_tail = spu_next_offset(q, last - q->q);
 205
 206        hv_ret = sun4v_ncs_settail(q->qhandle, new_tail);
 207        if (hv_ret == HV_EOK)
 208                q->tail = new_tail;
 209        return hv_ret;
 210}
 211
 212static u64 control_word_base(unsigned int len, unsigned int hmac_key_len,
 213                             int enc_type, int auth_type,
 214                             unsigned int hash_len,
 215                             bool sfas, bool sob, bool eob, bool encrypt,
 216                             int opcode)
 217{
 218        u64 word = (len - 1) & CONTROL_LEN;
 219
 220        word |= ((u64) opcode << CONTROL_OPCODE_SHIFT);
 221        word |= ((u64) enc_type << CONTROL_ENC_TYPE_SHIFT);
 222        word |= ((u64) auth_type << CONTROL_AUTH_TYPE_SHIFT);
 223        if (sfas)
 224                word |= CONTROL_STORE_FINAL_AUTH_STATE;
 225        if (sob)
 226                word |= CONTROL_START_OF_BLOCK;
 227        if (eob)
 228                word |= CONTROL_END_OF_BLOCK;
 229        if (encrypt)
 230                word |= CONTROL_ENCRYPT;
 231        if (hmac_key_len)
 232                word |= ((u64) (hmac_key_len - 1)) << CONTROL_HMAC_KEY_LEN_SHIFT;
 233        if (hash_len)
 234                word |= ((u64) (hash_len - 1)) << CONTROL_HASH_LEN_SHIFT;
 235
 236        return word;
 237}
 238
 239#if 0
 240static inline bool n2_should_run_async(struct spu_queue *qp, int this_len)
 241{
 242        if (this_len >= 64 ||
 243            qp->head != qp->tail)
 244                return true;
 245        return false;
 246}
 247#endif
 248
 249struct n2_ahash_alg {
 250        struct list_head        entry;
 251        const u8                *hash_zero;
 252        const u32               *hash_init;
 253        u8                      hw_op_hashsz;
 254        u8                      digest_size;
 255        u8                      auth_type;
 256        u8                      hmac_type;
 257        struct ahash_alg        alg;
 258};
 259
 260static inline struct n2_ahash_alg *n2_ahash_alg(struct crypto_tfm *tfm)
 261{
 262        struct crypto_alg *alg = tfm->__crt_alg;
 263        struct ahash_alg *ahash_alg;
 264
 265        ahash_alg = container_of(alg, struct ahash_alg, halg.base);
 266
 267        return container_of(ahash_alg, struct n2_ahash_alg, alg);
 268}
 269
 270struct n2_hmac_alg {
 271        const char              *child_alg;
 272        struct n2_ahash_alg     derived;
 273};
 274
 275static inline struct n2_hmac_alg *n2_hmac_alg(struct crypto_tfm *tfm)
 276{
 277        struct crypto_alg *alg = tfm->__crt_alg;
 278        struct ahash_alg *ahash_alg;
 279
 280        ahash_alg = container_of(alg, struct ahash_alg, halg.base);
 281
 282        return container_of(ahash_alg, struct n2_hmac_alg, derived.alg);
 283}
 284
 285struct n2_hash_ctx {
 286        struct crypto_ahash             *fallback_tfm;
 287};
 288
 289#define N2_HASH_KEY_MAX                 32 /* HW limit for all HMAC requests */
 290
 291struct n2_hmac_ctx {
 292        struct n2_hash_ctx              base;
 293
 294        struct crypto_shash             *child_shash;
 295
 296        int                             hash_key_len;
 297        unsigned char                   hash_key[N2_HASH_KEY_MAX];
 298};
 299
 300struct n2_hash_req_ctx {
 301        union {
 302                struct md5_state        md5;
 303                struct sha1_state       sha1;
 304                struct sha256_state     sha256;
 305        } u;
 306
 307        struct ahash_request            fallback_req;
 308};
 309
 310static int n2_hash_async_init(struct ahash_request *req)
 311{
 312        struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
 313        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 314        struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 315
 316        ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
 317        rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
 318
 319        return crypto_ahash_init(&rctx->fallback_req);
 320}
 321
 322static int n2_hash_async_update(struct ahash_request *req)
 323{
 324        struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
 325        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 326        struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 327
 328        ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
 329        rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
 330        rctx->fallback_req.nbytes = req->nbytes;
 331        rctx->fallback_req.src = req->src;
 332
 333        return crypto_ahash_update(&rctx->fallback_req);
 334}
 335
 336static int n2_hash_async_final(struct ahash_request *req)
 337{
 338        struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
 339        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 340        struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 341
 342        ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
 343        rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
 344        rctx->fallback_req.result = req->result;
 345
 346        return crypto_ahash_final(&rctx->fallback_req);
 347}
 348
 349static int n2_hash_async_finup(struct ahash_request *req)
 350{
 351        struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
 352        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 353        struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 354
 355        ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
 356        rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
 357        rctx->fallback_req.nbytes = req->nbytes;
 358        rctx->fallback_req.src = req->src;
 359        rctx->fallback_req.result = req->result;
 360
 361        return crypto_ahash_finup(&rctx->fallback_req);
 362}
 363
 364static int n2_hash_async_noimport(struct ahash_request *req, const void *in)
 365{
 366        return -ENOSYS;
 367}
 368
 369static int n2_hash_async_noexport(struct ahash_request *req, void *out)
 370{
 371        return -ENOSYS;
 372}
 373
 374static int n2_hash_cra_init(struct crypto_tfm *tfm)
 375{
 376        const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
 377        struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
 378        struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
 379        struct crypto_ahash *fallback_tfm;
 380        int err;
 381
 382        fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
 383                                          CRYPTO_ALG_NEED_FALLBACK);
 384        if (IS_ERR(fallback_tfm)) {
 385                pr_warn("Fallback driver '%s' could not be loaded!\n",
 386                        fallback_driver_name);
 387                err = PTR_ERR(fallback_tfm);
 388                goto out;
 389        }
 390
 391        crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
 392                                         crypto_ahash_reqsize(fallback_tfm)));
 393
 394        ctx->fallback_tfm = fallback_tfm;
 395        return 0;
 396
 397out:
 398        return err;
 399}
 400
 401static void n2_hash_cra_exit(struct crypto_tfm *tfm)
 402{
 403        struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
 404        struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
 405
 406        crypto_free_ahash(ctx->fallback_tfm);
 407}
 408
 409static int n2_hmac_cra_init(struct crypto_tfm *tfm)
 410{
 411        const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
 412        struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
 413        struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
 414        struct n2_hmac_alg *n2alg = n2_hmac_alg(tfm);
 415        struct crypto_ahash *fallback_tfm;
 416        struct crypto_shash *child_shash;
 417        int err;
 418
 419        fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
 420                                          CRYPTO_ALG_NEED_FALLBACK);
 421        if (IS_ERR(fallback_tfm)) {
 422                pr_warn("Fallback driver '%s' could not be loaded!\n",
 423                        fallback_driver_name);
 424                err = PTR_ERR(fallback_tfm);
 425                goto out;
 426        }
 427
 428        child_shash = crypto_alloc_shash(n2alg->child_alg, 0, 0);
 429        if (IS_ERR(child_shash)) {
 430                pr_warn("Child shash '%s' could not be loaded!\n",
 431                        n2alg->child_alg);
 432                err = PTR_ERR(child_shash);
 433                goto out_free_fallback;
 434        }
 435
 436        crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
 437                                         crypto_ahash_reqsize(fallback_tfm)));
 438
 439        ctx->child_shash = child_shash;
 440        ctx->base.fallback_tfm = fallback_tfm;
 441        return 0;
 442
 443out_free_fallback:
 444        crypto_free_ahash(fallback_tfm);
 445
 446out:
 447        return err;
 448}
 449
 450static void n2_hmac_cra_exit(struct crypto_tfm *tfm)
 451{
 452        struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
 453        struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
 454
 455        crypto_free_ahash(ctx->base.fallback_tfm);
 456        crypto_free_shash(ctx->child_shash);
 457}
 458
 459static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key,
 460                                unsigned int keylen)
 461{
 462        struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
 463        struct crypto_shash *child_shash = ctx->child_shash;
 464        struct crypto_ahash *fallback_tfm;
 465        int err, bs, ds;
 466
 467        fallback_tfm = ctx->base.fallback_tfm;
 468        err = crypto_ahash_setkey(fallback_tfm, key, keylen);
 469        if (err)
 470                return err;
 471
 472        bs = crypto_shash_blocksize(child_shash);
 473        ds = crypto_shash_digestsize(child_shash);
 474        BUG_ON(ds > N2_HASH_KEY_MAX);
 475        if (keylen > bs) {
 476                err = crypto_shash_tfm_digest(child_shash, key, keylen,
 477                                              ctx->hash_key);
 478                if (err)
 479                        return err;
 480                keylen = ds;
 481        } else if (keylen <= N2_HASH_KEY_MAX)
 482                memcpy(ctx->hash_key, key, keylen);
 483
 484        ctx->hash_key_len = keylen;
 485
 486        return err;
 487}
 488
 489static unsigned long wait_for_tail(struct spu_queue *qp)
 490{
 491        unsigned long head, hv_ret;
 492
 493        do {
 494                hv_ret = sun4v_ncs_gethead(qp->qhandle, &head);
 495                if (hv_ret != HV_EOK) {
 496                        pr_err("Hypervisor error on gethead\n");
 497                        break;
 498                }
 499                if (head == qp->tail) {
 500                        qp->head = head;
 501                        break;
 502                }
 503        } while (1);
 504        return hv_ret;
 505}
 506
 507static unsigned long submit_and_wait_for_tail(struct spu_queue *qp,
 508                                              struct cwq_initial_entry *ent)
 509{
 510        unsigned long hv_ret = spu_queue_submit(qp, ent);
 511
 512        if (hv_ret == HV_EOK)
 513                hv_ret = wait_for_tail(qp);
 514
 515        return hv_ret;
 516}
 517
 518static int n2_do_async_digest(struct ahash_request *req,
 519                              unsigned int auth_type, unsigned int digest_size,
 520                              unsigned int result_size, void *hash_loc,
 521                              unsigned long auth_key, unsigned int auth_key_len)
 522{
 523        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 524        struct cwq_initial_entry *ent;
 525        struct crypto_hash_walk walk;
 526        struct spu_queue *qp;
 527        unsigned long flags;
 528        int err = -ENODEV;
 529        int nbytes, cpu;
 530
 531        /* The total effective length of the operation may not
 532         * exceed 2^16.
 533         */
 534        if (unlikely(req->nbytes > (1 << 16))) {
 535                struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
 536                struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 537
 538                ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
 539                rctx->fallback_req.base.flags =
 540                        req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
 541                rctx->fallback_req.nbytes = req->nbytes;
 542                rctx->fallback_req.src = req->src;
 543                rctx->fallback_req.result = req->result;
 544
 545                return crypto_ahash_digest(&rctx->fallback_req);
 546        }
 547
 548        nbytes = crypto_hash_walk_first(req, &walk);
 549
 550        cpu = get_cpu();
 551        qp = cpu_to_cwq[cpu];
 552        if (!qp)
 553                goto out;
 554
 555        spin_lock_irqsave(&qp->lock, flags);
 556
 557        /* XXX can do better, improve this later by doing a by-hand scatterlist
 558         * XXX walk, etc.
 559         */
 560        ent = qp->q + qp->tail;
 561
 562        ent->control = control_word_base(nbytes, auth_key_len, 0,
 563                                         auth_type, digest_size,
 564                                         false, true, false, false,
 565                                         OPCODE_INPLACE_BIT |
 566                                         OPCODE_AUTH_MAC);
 567        ent->src_addr = __pa(walk.data);
 568        ent->auth_key_addr = auth_key;
 569        ent->auth_iv_addr = __pa(hash_loc);
 570        ent->final_auth_state_addr = 0UL;
 571        ent->enc_key_addr = 0UL;
 572        ent->enc_iv_addr = 0UL;
 573        ent->dest_addr = __pa(hash_loc);
 574
 575        nbytes = crypto_hash_walk_done(&walk, 0);
 576        while (nbytes > 0) {
 577                ent = spu_queue_next(qp, ent);
 578
 579                ent->control = (nbytes - 1);
 580                ent->src_addr = __pa(walk.data);
 581                ent->auth_key_addr = 0UL;
 582                ent->auth_iv_addr = 0UL;
 583                ent->final_auth_state_addr = 0UL;
 584                ent->enc_key_addr = 0UL;
 585                ent->enc_iv_addr = 0UL;
 586                ent->dest_addr = 0UL;
 587
 588                nbytes = crypto_hash_walk_done(&walk, 0);
 589        }
 590        ent->control |= CONTROL_END_OF_BLOCK;
 591
 592        if (submit_and_wait_for_tail(qp, ent) != HV_EOK)
 593                err = -EINVAL;
 594        else
 595                err = 0;
 596
 597        spin_unlock_irqrestore(&qp->lock, flags);
 598
 599        if (!err)
 600                memcpy(req->result, hash_loc, result_size);
 601out:
 602        put_cpu();
 603
 604        return err;
 605}
 606
 607static int n2_hash_async_digest(struct ahash_request *req)
 608{
 609        struct n2_ahash_alg *n2alg = n2_ahash_alg(req->base.tfm);
 610        struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
 611        int ds;
 612
 613        ds = n2alg->digest_size;
 614        if (unlikely(req->nbytes == 0)) {
 615                memcpy(req->result, n2alg->hash_zero, ds);
 616                return 0;
 617        }
 618        memcpy(&rctx->u, n2alg->hash_init, n2alg->hw_op_hashsz);
 619
 620        return n2_do_async_digest(req, n2alg->auth_type,
 621                                  n2alg->hw_op_hashsz, ds,
 622                                  &rctx->u, 0UL, 0);
 623}
 624
 625static int n2_hmac_async_digest(struct ahash_request *req)
 626{
 627        struct n2_hmac_alg *n2alg = n2_hmac_alg(req->base.tfm);
 628        struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
 629        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 630        struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
 631        int ds;
 632
 633        ds = n2alg->derived.digest_size;
 634        if (unlikely(req->nbytes == 0) ||
 635            unlikely(ctx->hash_key_len > N2_HASH_KEY_MAX)) {
 636                struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
 637                struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 638
 639                ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
 640                rctx->fallback_req.base.flags =
 641                        req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
 642                rctx->fallback_req.nbytes = req->nbytes;
 643                rctx->fallback_req.src = req->src;
 644                rctx->fallback_req.result = req->result;
 645
 646                return crypto_ahash_digest(&rctx->fallback_req);
 647        }
 648        memcpy(&rctx->u, n2alg->derived.hash_init,
 649               n2alg->derived.hw_op_hashsz);
 650
 651        return n2_do_async_digest(req, n2alg->derived.hmac_type,
 652                                  n2alg->derived.hw_op_hashsz, ds,
 653                                  &rctx->u,
 654                                  __pa(&ctx->hash_key),
 655                                  ctx->hash_key_len);
 656}
 657
 658struct n2_skcipher_context {
 659        int                     key_len;
 660        int                     enc_type;
 661        union {
 662                u8              aes[AES_MAX_KEY_SIZE];
 663                u8              des[DES_KEY_SIZE];
 664                u8              des3[3 * DES_KEY_SIZE];
 665                u8              arc4[258]; /* S-box, X, Y */
 666        } key;
 667};
 668
 669#define N2_CHUNK_ARR_LEN        16
 670
 671struct n2_crypto_chunk {
 672        struct list_head        entry;
 673        unsigned long           iv_paddr : 44;
 674        unsigned long           arr_len : 20;
 675        unsigned long           dest_paddr;
 676        unsigned long           dest_final;
 677        struct {
 678                unsigned long   src_paddr : 44;
 679                unsigned long   src_len : 20;
 680        } arr[N2_CHUNK_ARR_LEN];
 681};
 682
 683struct n2_request_context {
 684        struct skcipher_walk    walk;
 685        struct list_head        chunk_list;
 686        struct n2_crypto_chunk  chunk;
 687        u8                      temp_iv[16];
 688};
 689
 690/* The SPU allows some level of flexibility for partial cipher blocks
 691 * being specified in a descriptor.
 692 *
 693 * It merely requires that every descriptor's length field is at least
 694 * as large as the cipher block size.  This means that a cipher block
 695 * can span at most 2 descriptors.  However, this does not allow a
 696 * partial block to span into the final descriptor as that would
 697 * violate the rule (since every descriptor's length must be at lest
 698 * the block size).  So, for example, assuming an 8 byte block size:
 699 *
 700 *      0xe --> 0xa --> 0x8
 701 *
 702 * is a valid length sequence, whereas:
 703 *
 704 *      0xe --> 0xb --> 0x7
 705 *
 706 * is not a valid sequence.
 707 */
 708
 709struct n2_skcipher_alg {
 710        struct list_head        entry;
 711        u8                      enc_type;
 712        struct skcipher_alg     skcipher;
 713};
 714
 715static inline struct n2_skcipher_alg *n2_skcipher_alg(struct crypto_skcipher *tfm)
 716{
 717        struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
 718
 719        return container_of(alg, struct n2_skcipher_alg, skcipher);
 720}
 721
 722struct n2_skcipher_request_context {
 723        struct skcipher_walk    walk;
 724};
 725
 726static int n2_aes_setkey(struct crypto_skcipher *skcipher, const u8 *key,
 727                         unsigned int keylen)
 728{
 729        struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
 730        struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
 731        struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
 732
 733        ctx->enc_type = (n2alg->enc_type & ENC_TYPE_CHAINING_MASK);
 734
 735        switch (keylen) {
 736        case AES_KEYSIZE_128:
 737                ctx->enc_type |= ENC_TYPE_ALG_AES128;
 738                break;
 739        case AES_KEYSIZE_192:
 740                ctx->enc_type |= ENC_TYPE_ALG_AES192;
 741                break;
 742        case AES_KEYSIZE_256:
 743                ctx->enc_type |= ENC_TYPE_ALG_AES256;
 744                break;
 745        default:
 746                return -EINVAL;
 747        }
 748
 749        ctx->key_len = keylen;
 750        memcpy(ctx->key.aes, key, keylen);
 751        return 0;
 752}
 753
 754static int n2_des_setkey(struct crypto_skcipher *skcipher, const u8 *key,
 755                         unsigned int keylen)
 756{
 757        struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
 758        struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
 759        struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
 760        int err;
 761
 762        err = verify_skcipher_des_key(skcipher, key);
 763        if (err)
 764                return err;
 765
 766        ctx->enc_type = n2alg->enc_type;
 767
 768        ctx->key_len = keylen;
 769        memcpy(ctx->key.des, key, keylen);
 770        return 0;
 771}
 772
 773static int n2_3des_setkey(struct crypto_skcipher *skcipher, const u8 *key,
 774                          unsigned int keylen)
 775{
 776        struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
 777        struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
 778        struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
 779        int err;
 780
 781        err = verify_skcipher_des3_key(skcipher, key);
 782        if (err)
 783                return err;
 784
 785        ctx->enc_type = n2alg->enc_type;
 786
 787        ctx->key_len = keylen;
 788        memcpy(ctx->key.des3, key, keylen);
 789        return 0;
 790}
 791
 792static int n2_arc4_setkey(struct crypto_skcipher *skcipher, const u8 *key,
 793                          unsigned int keylen)
 794{
 795        struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
 796        struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
 797        struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
 798        u8 *s = ctx->key.arc4;
 799        u8 *x = s + 256;
 800        u8 *y = x + 1;
 801        int i, j, k;
 802
 803        ctx->enc_type = n2alg->enc_type;
 804
 805        j = k = 0;
 806        *x = 0;
 807        *y = 0;
 808        for (i = 0; i < 256; i++)
 809                s[i] = i;
 810        for (i = 0; i < 256; i++) {
 811                u8 a = s[i];
 812                j = (j + key[k] + a) & 0xff;
 813                s[i] = s[j];
 814                s[j] = a;
 815                if (++k >= keylen)
 816                        k = 0;
 817        }
 818
 819        return 0;
 820}
 821
 822static inline int skcipher_descriptor_len(int nbytes, unsigned int block_size)
 823{
 824        int this_len = nbytes;
 825
 826        this_len -= (nbytes & (block_size - 1));
 827        return this_len > (1 << 16) ? (1 << 16) : this_len;
 828}
 829
 830static int __n2_crypt_chunk(struct crypto_skcipher *skcipher,
 831                            struct n2_crypto_chunk *cp,
 832                            struct spu_queue *qp, bool encrypt)
 833{
 834        struct n2_skcipher_context *ctx = crypto_skcipher_ctx(skcipher);
 835        struct cwq_initial_entry *ent;
 836        bool in_place;
 837        int i;
 838
 839        ent = spu_queue_alloc(qp, cp->arr_len);
 840        if (!ent) {
 841                pr_info("queue_alloc() of %d fails\n",
 842                        cp->arr_len);
 843                return -EBUSY;
 844        }
 845
 846        in_place = (cp->dest_paddr == cp->arr[0].src_paddr);
 847
 848        ent->control = control_word_base(cp->arr[0].src_len,
 849                                         0, ctx->enc_type, 0, 0,
 850                                         false, true, false, encrypt,
 851                                         OPCODE_ENCRYPT |
 852                                         (in_place ? OPCODE_INPLACE_BIT : 0));
 853        ent->src_addr = cp->arr[0].src_paddr;
 854        ent->auth_key_addr = 0UL;
 855        ent->auth_iv_addr = 0UL;
 856        ent->final_auth_state_addr = 0UL;
 857        ent->enc_key_addr = __pa(&ctx->key);
 858        ent->enc_iv_addr = cp->iv_paddr;
 859        ent->dest_addr = (in_place ? 0UL : cp->dest_paddr);
 860
 861        for (i = 1; i < cp->arr_len; i++) {
 862                ent = spu_queue_next(qp, ent);
 863
 864                ent->control = cp->arr[i].src_len - 1;
 865                ent->src_addr = cp->arr[i].src_paddr;
 866                ent->auth_key_addr = 0UL;
 867                ent->auth_iv_addr = 0UL;
 868                ent->final_auth_state_addr = 0UL;
 869                ent->enc_key_addr = 0UL;
 870                ent->enc_iv_addr = 0UL;
 871                ent->dest_addr = 0UL;
 872        }
 873        ent->control |= CONTROL_END_OF_BLOCK;
 874
 875        return (spu_queue_submit(qp, ent) != HV_EOK) ? -EINVAL : 0;
 876}
 877
 878static int n2_compute_chunks(struct skcipher_request *req)
 879{
 880        struct n2_request_context *rctx = skcipher_request_ctx(req);
 881        struct skcipher_walk *walk = &rctx->walk;
 882        struct n2_crypto_chunk *chunk;
 883        unsigned long dest_prev;
 884        unsigned int tot_len;
 885        bool prev_in_place;
 886        int err, nbytes;
 887
 888        err = skcipher_walk_async(walk, req);
 889        if (err)
 890                return err;
 891
 892        INIT_LIST_HEAD(&rctx->chunk_list);
 893
 894        chunk = &rctx->chunk;
 895        INIT_LIST_HEAD(&chunk->entry);
 896
 897        chunk->iv_paddr = 0UL;
 898        chunk->arr_len = 0;
 899        chunk->dest_paddr = 0UL;
 900
 901        prev_in_place = false;
 902        dest_prev = ~0UL;
 903        tot_len = 0;
 904
 905        while ((nbytes = walk->nbytes) != 0) {
 906                unsigned long dest_paddr, src_paddr;
 907                bool in_place;
 908                int this_len;
 909
 910                src_paddr = (page_to_phys(walk->src.phys.page) +
 911                             walk->src.phys.offset);
 912                dest_paddr = (page_to_phys(walk->dst.phys.page) +
 913                              walk->dst.phys.offset);
 914                in_place = (src_paddr == dest_paddr);
 915                this_len = skcipher_descriptor_len(nbytes, walk->blocksize);
 916
 917                if (chunk->arr_len != 0) {
 918                        if (in_place != prev_in_place ||
 919                            (!prev_in_place &&
 920                             dest_paddr != dest_prev) ||
 921                            chunk->arr_len == N2_CHUNK_ARR_LEN ||
 922                            tot_len + this_len > (1 << 16)) {
 923                                chunk->dest_final = dest_prev;
 924                                list_add_tail(&chunk->entry,
 925                                              &rctx->chunk_list);
 926                                chunk = kzalloc(sizeof(*chunk), GFP_ATOMIC);
 927                                if (!chunk) {
 928                                        err = -ENOMEM;
 929                                        break;
 930                                }
 931                                INIT_LIST_HEAD(&chunk->entry);
 932                        }
 933                }
 934                if (chunk->arr_len == 0) {
 935                        chunk->dest_paddr = dest_paddr;
 936                        tot_len = 0;
 937                }
 938                chunk->arr[chunk->arr_len].src_paddr = src_paddr;
 939                chunk->arr[chunk->arr_len].src_len = this_len;
 940                chunk->arr_len++;
 941
 942                dest_prev = dest_paddr + this_len;
 943                prev_in_place = in_place;
 944                tot_len += this_len;
 945
 946                err = skcipher_walk_done(walk, nbytes - this_len);
 947                if (err)
 948                        break;
 949        }
 950        if (!err && chunk->arr_len != 0) {
 951                chunk->dest_final = dest_prev;
 952                list_add_tail(&chunk->entry, &rctx->chunk_list);
 953        }
 954
 955        return err;
 956}
 957
 958static void n2_chunk_complete(struct skcipher_request *req, void *final_iv)
 959{
 960        struct n2_request_context *rctx = skcipher_request_ctx(req);
 961        struct n2_crypto_chunk *c, *tmp;
 962
 963        if (final_iv)
 964                memcpy(rctx->walk.iv, final_iv, rctx->walk.blocksize);
 965
 966        list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
 967                list_del(&c->entry);
 968                if (unlikely(c != &rctx->chunk))
 969                        kfree(c);
 970        }
 971
 972}
 973
 974static int n2_do_ecb(struct skcipher_request *req, bool encrypt)
 975{
 976        struct n2_request_context *rctx = skcipher_request_ctx(req);
 977        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 978        int err = n2_compute_chunks(req);
 979        struct n2_crypto_chunk *c, *tmp;
 980        unsigned long flags, hv_ret;
 981        struct spu_queue *qp;
 982
 983        if (err)
 984                return err;
 985
 986        qp = cpu_to_cwq[get_cpu()];
 987        err = -ENODEV;
 988        if (!qp)
 989                goto out;
 990
 991        spin_lock_irqsave(&qp->lock, flags);
 992
 993        list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
 994                err = __n2_crypt_chunk(tfm, c, qp, encrypt);
 995                if (err)
 996                        break;
 997                list_del(&c->entry);
 998                if (unlikely(c != &rctx->chunk))
 999                        kfree(c);
1000        }
1001        if (!err) {
1002                hv_ret = wait_for_tail(qp);
1003                if (hv_ret != HV_EOK)
1004                        err = -EINVAL;
1005        }
1006
1007        spin_unlock_irqrestore(&qp->lock, flags);
1008
1009out:
1010        put_cpu();
1011
1012        n2_chunk_complete(req, NULL);
1013        return err;
1014}
1015
1016static int n2_encrypt_ecb(struct skcipher_request *req)
1017{
1018        return n2_do_ecb(req, true);
1019}
1020
1021static int n2_decrypt_ecb(struct skcipher_request *req)
1022{
1023        return n2_do_ecb(req, false);
1024}
1025
1026static int n2_do_chaining(struct skcipher_request *req, bool encrypt)
1027{
1028        struct n2_request_context *rctx = skcipher_request_ctx(req);
1029        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1030        unsigned long flags, hv_ret, iv_paddr;
1031        int err = n2_compute_chunks(req);
1032        struct n2_crypto_chunk *c, *tmp;
1033        struct spu_queue *qp;
1034        void *final_iv_addr;
1035
1036        final_iv_addr = NULL;
1037
1038        if (err)
1039                return err;
1040
1041        qp = cpu_to_cwq[get_cpu()];
1042        err = -ENODEV;
1043        if (!qp)
1044                goto out;
1045
1046        spin_lock_irqsave(&qp->lock, flags);
1047
1048        if (encrypt) {
1049                iv_paddr = __pa(rctx->walk.iv);
1050                list_for_each_entry_safe(c, tmp, &rctx->chunk_list,
1051                                         entry) {
1052                        c->iv_paddr = iv_paddr;
1053                        err = __n2_crypt_chunk(tfm, c, qp, true);
1054                        if (err)
1055                                break;
1056                        iv_paddr = c->dest_final - rctx->walk.blocksize;
1057                        list_del(&c->entry);
1058                        if (unlikely(c != &rctx->chunk))
1059                                kfree(c);
1060                }
1061                final_iv_addr = __va(iv_paddr);
1062        } else {
1063                list_for_each_entry_safe_reverse(c, tmp, &rctx->chunk_list,
1064                                                 entry) {
1065                        if (c == &rctx->chunk) {
1066                                iv_paddr = __pa(rctx->walk.iv);
1067                        } else {
1068                                iv_paddr = (tmp->arr[tmp->arr_len-1].src_paddr +
1069                                            tmp->arr[tmp->arr_len-1].src_len -
1070                                            rctx->walk.blocksize);
1071                        }
1072                        if (!final_iv_addr) {
1073                                unsigned long pa;
1074
1075                                pa = (c->arr[c->arr_len-1].src_paddr +
1076                                      c->arr[c->arr_len-1].src_len -
1077                                      rctx->walk.blocksize);
1078                                final_iv_addr = rctx->temp_iv;
1079                                memcpy(rctx->temp_iv, __va(pa),
1080                                       rctx->walk.blocksize);
1081                        }
1082                        c->iv_paddr = iv_paddr;
1083                        err = __n2_crypt_chunk(tfm, c, qp, false);
1084                        if (err)
1085                                break;
1086                        list_del(&c->entry);
1087                        if (unlikely(c != &rctx->chunk))
1088                                kfree(c);
1089                }
1090        }
1091        if (!err) {
1092                hv_ret = wait_for_tail(qp);
1093                if (hv_ret != HV_EOK)
1094                        err = -EINVAL;
1095        }
1096
1097        spin_unlock_irqrestore(&qp->lock, flags);
1098
1099out:
1100        put_cpu();
1101
1102        n2_chunk_complete(req, err ? NULL : final_iv_addr);
1103        return err;
1104}
1105
1106static int n2_encrypt_chaining(struct skcipher_request *req)
1107{
1108        return n2_do_chaining(req, true);
1109}
1110
1111static int n2_decrypt_chaining(struct skcipher_request *req)
1112{
1113        return n2_do_chaining(req, false);
1114}
1115
1116struct n2_skcipher_tmpl {
1117        const char              *name;
1118        const char              *drv_name;
1119        u8                      block_size;
1120        u8                      enc_type;
1121        struct skcipher_alg     skcipher;
1122};
1123
1124static const struct n2_skcipher_tmpl skcipher_tmpls[] = {
1125        /* ARC4: only ECB is supported (chaining bits ignored) */
1126        {       .name           = "ecb(arc4)",
1127                .drv_name       = "ecb-arc4",
1128                .block_size     = 1,
1129                .enc_type       = (ENC_TYPE_ALG_RC4_STREAM |
1130                                   ENC_TYPE_CHAINING_ECB),
1131                .skcipher       = {
1132                        .min_keysize    = 1,
1133                        .max_keysize    = 256,
1134                        .setkey         = n2_arc4_setkey,
1135                        .encrypt        = n2_encrypt_ecb,
1136                        .decrypt        = n2_decrypt_ecb,
1137                },
1138        },
1139
1140        /* DES: ECB CBC and CFB are supported */
1141        {       .name           = "ecb(des)",
1142                .drv_name       = "ecb-des",
1143                .block_size     = DES_BLOCK_SIZE,
1144                .enc_type       = (ENC_TYPE_ALG_DES |
1145                                   ENC_TYPE_CHAINING_ECB),
1146                .skcipher       = {
1147                        .min_keysize    = DES_KEY_SIZE,
1148                        .max_keysize    = DES_KEY_SIZE,
1149                        .setkey         = n2_des_setkey,
1150                        .encrypt        = n2_encrypt_ecb,
1151                        .decrypt        = n2_decrypt_ecb,
1152                },
1153        },
1154        {       .name           = "cbc(des)",
1155                .drv_name       = "cbc-des",
1156                .block_size     = DES_BLOCK_SIZE,
1157                .enc_type       = (ENC_TYPE_ALG_DES |
1158                                   ENC_TYPE_CHAINING_CBC),
1159                .skcipher       = {
1160                        .ivsize         = DES_BLOCK_SIZE,
1161                        .min_keysize    = DES_KEY_SIZE,
1162                        .max_keysize    = DES_KEY_SIZE,
1163                        .setkey         = n2_des_setkey,
1164                        .encrypt        = n2_encrypt_chaining,
1165                        .decrypt        = n2_decrypt_chaining,
1166                },
1167        },
1168        {       .name           = "cfb(des)",
1169                .drv_name       = "cfb-des",
1170                .block_size     = DES_BLOCK_SIZE,
1171                .enc_type       = (ENC_TYPE_ALG_DES |
1172                                   ENC_TYPE_CHAINING_CFB),
1173                .skcipher       = {
1174                        .min_keysize    = DES_KEY_SIZE,
1175                        .max_keysize    = DES_KEY_SIZE,
1176                        .setkey         = n2_des_setkey,
1177                        .encrypt        = n2_encrypt_chaining,
1178                        .decrypt        = n2_decrypt_chaining,
1179                },
1180        },
1181
1182        /* 3DES: ECB CBC and CFB are supported */
1183        {       .name           = "ecb(des3_ede)",
1184                .drv_name       = "ecb-3des",
1185                .block_size     = DES_BLOCK_SIZE,
1186                .enc_type       = (ENC_TYPE_ALG_3DES |
1187                                   ENC_TYPE_CHAINING_ECB),
1188                .skcipher       = {
1189                        .min_keysize    = 3 * DES_KEY_SIZE,
1190                        .max_keysize    = 3 * DES_KEY_SIZE,
1191                        .setkey         = n2_3des_setkey,
1192                        .encrypt        = n2_encrypt_ecb,
1193                        .decrypt        = n2_decrypt_ecb,
1194                },
1195        },
1196        {       .name           = "cbc(des3_ede)",
1197                .drv_name       = "cbc-3des",
1198                .block_size     = DES_BLOCK_SIZE,
1199                .enc_type       = (ENC_TYPE_ALG_3DES |
1200                                   ENC_TYPE_CHAINING_CBC),
1201                .skcipher       = {
1202                        .ivsize         = DES_BLOCK_SIZE,
1203                        .min_keysize    = 3 * DES_KEY_SIZE,
1204                        .max_keysize    = 3 * DES_KEY_SIZE,
1205                        .setkey         = n2_3des_setkey,
1206                        .encrypt        = n2_encrypt_chaining,
1207                        .decrypt        = n2_decrypt_chaining,
1208                },
1209        },
1210        {       .name           = "cfb(des3_ede)",
1211                .drv_name       = "cfb-3des",
1212                .block_size     = DES_BLOCK_SIZE,
1213                .enc_type       = (ENC_TYPE_ALG_3DES |
1214                                   ENC_TYPE_CHAINING_CFB),
1215                .skcipher       = {
1216                        .min_keysize    = 3 * DES_KEY_SIZE,
1217                        .max_keysize    = 3 * DES_KEY_SIZE,
1218                        .setkey         = n2_3des_setkey,
1219                        .encrypt        = n2_encrypt_chaining,
1220                        .decrypt        = n2_decrypt_chaining,
1221                },
1222        },
1223        /* AES: ECB CBC and CTR are supported */
1224        {       .name           = "ecb(aes)",
1225                .drv_name       = "ecb-aes",
1226                .block_size     = AES_BLOCK_SIZE,
1227                .enc_type       = (ENC_TYPE_ALG_AES128 |
1228                                   ENC_TYPE_CHAINING_ECB),
1229                .skcipher       = {
1230                        .min_keysize    = AES_MIN_KEY_SIZE,
1231                        .max_keysize    = AES_MAX_KEY_SIZE,
1232                        .setkey         = n2_aes_setkey,
1233                        .encrypt        = n2_encrypt_ecb,
1234                        .decrypt        = n2_decrypt_ecb,
1235                },
1236        },
1237        {       .name           = "cbc(aes)",
1238                .drv_name       = "cbc-aes",
1239                .block_size     = AES_BLOCK_SIZE,
1240                .enc_type       = (ENC_TYPE_ALG_AES128 |
1241                                   ENC_TYPE_CHAINING_CBC),
1242                .skcipher       = {
1243                        .ivsize         = AES_BLOCK_SIZE,
1244                        .min_keysize    = AES_MIN_KEY_SIZE,
1245                        .max_keysize    = AES_MAX_KEY_SIZE,
1246                        .setkey         = n2_aes_setkey,
1247                        .encrypt        = n2_encrypt_chaining,
1248                        .decrypt        = n2_decrypt_chaining,
1249                },
1250        },
1251        {       .name           = "ctr(aes)",
1252                .drv_name       = "ctr-aes",
1253                .block_size     = AES_BLOCK_SIZE,
1254                .enc_type       = (ENC_TYPE_ALG_AES128 |
1255                                   ENC_TYPE_CHAINING_COUNTER),
1256                .skcipher       = {
1257                        .ivsize         = AES_BLOCK_SIZE,
1258                        .min_keysize    = AES_MIN_KEY_SIZE,
1259                        .max_keysize    = AES_MAX_KEY_SIZE,
1260                        .setkey         = n2_aes_setkey,
1261                        .encrypt        = n2_encrypt_chaining,
1262                        .decrypt        = n2_encrypt_chaining,
1263                },
1264        },
1265
1266};
1267#define NUM_CIPHER_TMPLS ARRAY_SIZE(skcipher_tmpls)
1268
1269static LIST_HEAD(skcipher_algs);
1270
1271struct n2_hash_tmpl {
1272        const char      *name;
1273        const u8        *hash_zero;
1274        const u32       *hash_init;
1275        u8              hw_op_hashsz;
1276        u8              digest_size;
1277        u8              block_size;
1278        u8              auth_type;
1279        u8              hmac_type;
1280};
1281
1282static const u32 n2_md5_init[MD5_HASH_WORDS] = {
1283        cpu_to_le32(MD5_H0),
1284        cpu_to_le32(MD5_H1),
1285        cpu_to_le32(MD5_H2),
1286        cpu_to_le32(MD5_H3),
1287};
1288static const u32 n2_sha1_init[SHA1_DIGEST_SIZE / 4] = {
1289        SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4,
1290};
1291static const u32 n2_sha256_init[SHA256_DIGEST_SIZE / 4] = {
1292        SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
1293        SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7,
1294};
1295static const u32 n2_sha224_init[SHA256_DIGEST_SIZE / 4] = {
1296        SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
1297        SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7,
1298};
1299
1300static const struct n2_hash_tmpl hash_tmpls[] = {
1301        { .name         = "md5",
1302          .hash_zero    = md5_zero_message_hash,
1303          .hash_init    = n2_md5_init,
1304          .auth_type    = AUTH_TYPE_MD5,
1305          .hmac_type    = AUTH_TYPE_HMAC_MD5,
1306          .hw_op_hashsz = MD5_DIGEST_SIZE,
1307          .digest_size  = MD5_DIGEST_SIZE,
1308          .block_size   = MD5_HMAC_BLOCK_SIZE },
1309        { .name         = "sha1",
1310          .hash_zero    = sha1_zero_message_hash,
1311          .hash_init    = n2_sha1_init,
1312          .auth_type    = AUTH_TYPE_SHA1,
1313          .hmac_type    = AUTH_TYPE_HMAC_SHA1,
1314          .hw_op_hashsz = SHA1_DIGEST_SIZE,
1315          .digest_size  = SHA1_DIGEST_SIZE,
1316          .block_size   = SHA1_BLOCK_SIZE },
1317        { .name         = "sha256",
1318          .hash_zero    = sha256_zero_message_hash,
1319          .hash_init    = n2_sha256_init,
1320          .auth_type    = AUTH_TYPE_SHA256,
1321          .hmac_type    = AUTH_TYPE_HMAC_SHA256,
1322          .hw_op_hashsz = SHA256_DIGEST_SIZE,
1323          .digest_size  = SHA256_DIGEST_SIZE,
1324          .block_size   = SHA256_BLOCK_SIZE },
1325        { .name         = "sha224",
1326          .hash_zero    = sha224_zero_message_hash,
1327          .hash_init    = n2_sha224_init,
1328          .auth_type    = AUTH_TYPE_SHA256,
1329          .hmac_type    = AUTH_TYPE_RESERVED,
1330          .hw_op_hashsz = SHA256_DIGEST_SIZE,
1331          .digest_size  = SHA224_DIGEST_SIZE,
1332          .block_size   = SHA224_BLOCK_SIZE },
1333};
1334#define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls)
1335
1336static LIST_HEAD(ahash_algs);
1337static LIST_HEAD(hmac_algs);
1338
1339static int algs_registered;
1340
1341static void __n2_unregister_algs(void)
1342{
1343        struct n2_skcipher_alg *skcipher, *skcipher_tmp;
1344        struct n2_ahash_alg *alg, *alg_tmp;
1345        struct n2_hmac_alg *hmac, *hmac_tmp;
1346
1347        list_for_each_entry_safe(skcipher, skcipher_tmp, &skcipher_algs, entry) {
1348                crypto_unregister_skcipher(&skcipher->skcipher);
1349                list_del(&skcipher->entry);
1350                kfree(skcipher);
1351        }
1352        list_for_each_entry_safe(hmac, hmac_tmp, &hmac_algs, derived.entry) {
1353                crypto_unregister_ahash(&hmac->derived.alg);
1354                list_del(&hmac->derived.entry);
1355                kfree(hmac);
1356        }
1357        list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) {
1358                crypto_unregister_ahash(&alg->alg);
1359                list_del(&alg->entry);
1360                kfree(alg);
1361        }
1362}
1363
1364static int n2_skcipher_init_tfm(struct crypto_skcipher *tfm)
1365{
1366        crypto_skcipher_set_reqsize(tfm, sizeof(struct n2_request_context));
1367        return 0;
1368}
1369
1370static int __n2_register_one_skcipher(const struct n2_skcipher_tmpl *tmpl)
1371{
1372        struct n2_skcipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1373        struct skcipher_alg *alg;
1374        int err;
1375
1376        if (!p)
1377                return -ENOMEM;
1378
1379        alg = &p->skcipher;
1380        *alg = tmpl->skcipher;
1381
1382        snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
1383        snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name);
1384        alg->base.cra_priority = N2_CRA_PRIORITY;
1385        alg->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
1386                              CRYPTO_ALG_ALLOCATES_MEMORY;
1387        alg->base.cra_blocksize = tmpl->block_size;
1388        p->enc_type = tmpl->enc_type;
1389        alg->base.cra_ctxsize = sizeof(struct n2_skcipher_context);
1390        alg->base.cra_module = THIS_MODULE;
1391        alg->init = n2_skcipher_init_tfm;
1392
1393        list_add(&p->entry, &skcipher_algs);
1394        err = crypto_register_skcipher(alg);
1395        if (err) {
1396                pr_err("%s alg registration failed\n", alg->base.cra_name);
1397                list_del(&p->entry);
1398                kfree(p);
1399        } else {
1400                pr_info("%s alg registered\n", alg->base.cra_name);
1401        }
1402        return err;
1403}
1404
1405static int __n2_register_one_hmac(struct n2_ahash_alg *n2ahash)
1406{
1407        struct n2_hmac_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1408        struct ahash_alg *ahash;
1409        struct crypto_alg *base;
1410        int err;
1411
1412        if (!p)
1413                return -ENOMEM;
1414
1415        p->child_alg = n2ahash->alg.halg.base.cra_name;
1416        memcpy(&p->derived, n2ahash, sizeof(struct n2_ahash_alg));
1417        INIT_LIST_HEAD(&p->derived.entry);
1418
1419        ahash = &p->derived.alg;
1420        ahash->digest = n2_hmac_async_digest;
1421        ahash->setkey = n2_hmac_async_setkey;
1422
1423        base = &ahash->halg.base;
1424        snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", p->child_alg);
1425        snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s-n2", p->child_alg);
1426
1427        base->cra_ctxsize = sizeof(struct n2_hmac_ctx);
1428        base->cra_init = n2_hmac_cra_init;
1429        base->cra_exit = n2_hmac_cra_exit;
1430
1431        list_add(&p->derived.entry, &hmac_algs);
1432        err = crypto_register_ahash(ahash);
1433        if (err) {
1434                pr_err("%s alg registration failed\n", base->cra_name);
1435                list_del(&p->derived.entry);
1436                kfree(p);
1437        } else {
1438                pr_info("%s alg registered\n", base->cra_name);
1439        }
1440        return err;
1441}
1442
1443static int __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
1444{
1445        struct n2_ahash_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1446        struct hash_alg_common *halg;
1447        struct crypto_alg *base;
1448        struct ahash_alg *ahash;
1449        int err;
1450
1451        if (!p)
1452                return -ENOMEM;
1453
1454        p->hash_zero = tmpl->hash_zero;
1455        p->hash_init = tmpl->hash_init;
1456        p->auth_type = tmpl->auth_type;
1457        p->hmac_type = tmpl->hmac_type;
1458        p->hw_op_hashsz = tmpl->hw_op_hashsz;
1459        p->digest_size = tmpl->digest_size;
1460
1461        ahash = &p->alg;
1462        ahash->init = n2_hash_async_init;
1463        ahash->update = n2_hash_async_update;
1464        ahash->final = n2_hash_async_final;
1465        ahash->finup = n2_hash_async_finup;
1466        ahash->digest = n2_hash_async_digest;
1467        ahash->export = n2_hash_async_noexport;
1468        ahash->import = n2_hash_async_noimport;
1469
1470        halg = &ahash->halg;
1471        halg->digestsize = tmpl->digest_size;
1472
1473        base = &halg->base;
1474        snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
1475        snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->name);
1476        base->cra_priority = N2_CRA_PRIORITY;
1477        base->cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1478                          CRYPTO_ALG_NEED_FALLBACK;
1479        base->cra_blocksize = tmpl->block_size;
1480        base->cra_ctxsize = sizeof(struct n2_hash_ctx);
1481        base->cra_module = THIS_MODULE;
1482        base->cra_init = n2_hash_cra_init;
1483        base->cra_exit = n2_hash_cra_exit;
1484
1485        list_add(&p->entry, &ahash_algs);
1486        err = crypto_register_ahash(ahash);
1487        if (err) {
1488                pr_err("%s alg registration failed\n", base->cra_name);
1489                list_del(&p->entry);
1490                kfree(p);
1491        } else {
1492                pr_info("%s alg registered\n", base->cra_name);
1493        }
1494        if (!err && p->hmac_type != AUTH_TYPE_RESERVED)
1495                err = __n2_register_one_hmac(p);
1496        return err;
1497}
1498
1499static int n2_register_algs(void)
1500{
1501        int i, err = 0;
1502
1503        mutex_lock(&spu_lock);
1504        if (algs_registered++)
1505                goto out;
1506
1507        for (i = 0; i < NUM_HASH_TMPLS; i++) {
1508                err = __n2_register_one_ahash(&hash_tmpls[i]);
1509                if (err) {
1510                        __n2_unregister_algs();
1511                        goto out;
1512                }
1513        }
1514        for (i = 0; i < NUM_CIPHER_TMPLS; i++) {
1515                err = __n2_register_one_skcipher(&skcipher_tmpls[i]);
1516                if (err) {
1517                        __n2_unregister_algs();
1518                        goto out;
1519                }
1520        }
1521
1522out:
1523        mutex_unlock(&spu_lock);
1524        return err;
1525}
1526
1527static void n2_unregister_algs(void)
1528{
1529        mutex_lock(&spu_lock);
1530        if (!--algs_registered)
1531                __n2_unregister_algs();
1532        mutex_unlock(&spu_lock);
1533}
1534
1535/* To map CWQ queues to interrupt sources, the hypervisor API provides
1536 * a devino.  This isn't very useful to us because all of the
1537 * interrupts listed in the device_node have been translated to
1538 * Linux virtual IRQ cookie numbers.
1539 *
1540 * So we have to back-translate, going through the 'intr' and 'ino'
1541 * property tables of the n2cp MDESC node, matching it with the OF
1542 * 'interrupts' property entries, in order to to figure out which
1543 * devino goes to which already-translated IRQ.
1544 */
1545static int find_devino_index(struct platform_device *dev, struct spu_mdesc_info *ip,
1546                             unsigned long dev_ino)
1547{
1548        const unsigned int *dev_intrs;
1549        unsigned int intr;
1550        int i;
1551
1552        for (i = 0; i < ip->num_intrs; i++) {
1553                if (ip->ino_table[i].ino == dev_ino)
1554                        break;
1555        }
1556        if (i == ip->num_intrs)
1557                return -ENODEV;
1558
1559        intr = ip->ino_table[i].intr;
1560
1561        dev_intrs = of_get_property(dev->dev.of_node, "interrupts", NULL);
1562        if (!dev_intrs)
1563                return -ENODEV;
1564
1565        for (i = 0; i < dev->archdata.num_irqs; i++) {
1566                if (dev_intrs[i] == intr)
1567                        return i;
1568        }
1569
1570        return -ENODEV;
1571}
1572
1573static int spu_map_ino(struct platform_device *dev, struct spu_mdesc_info *ip,
1574                       const char *irq_name, struct spu_queue *p,
1575                       irq_handler_t handler)
1576{
1577        unsigned long herr;
1578        int index;
1579
1580        herr = sun4v_ncs_qhandle_to_devino(p->qhandle, &p->devino);
1581        if (herr)
1582                return -EINVAL;
1583
1584        index = find_devino_index(dev, ip, p->devino);
1585        if (index < 0)
1586                return index;
1587
1588        p->irq = dev->archdata.irqs[index];
1589
1590        sprintf(p->irq_name, "%s-%d", irq_name, index);
1591
1592        return request_irq(p->irq, handler, 0, p->irq_name, p);
1593}
1594
1595static struct kmem_cache *queue_cache[2];
1596
1597static void *new_queue(unsigned long q_type)
1598{
1599        return kmem_cache_zalloc(queue_cache[q_type - 1], GFP_KERNEL);
1600}
1601
1602static void free_queue(void *p, unsigned long q_type)
1603{
1604        kmem_cache_free(queue_cache[q_type - 1], p);
1605}
1606
1607static int queue_cache_init(void)
1608{
1609        if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
1610                queue_cache[HV_NCS_QTYPE_MAU - 1] =
1611                        kmem_cache_create("mau_queue",
1612                                          (MAU_NUM_ENTRIES *
1613                                           MAU_ENTRY_SIZE),
1614                                          MAU_ENTRY_SIZE, 0, NULL);
1615        if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
1616                return -ENOMEM;
1617
1618        if (!queue_cache[HV_NCS_QTYPE_CWQ - 1])
1619                queue_cache[HV_NCS_QTYPE_CWQ - 1] =
1620                        kmem_cache_create("cwq_queue",
1621                                          (CWQ_NUM_ENTRIES *
1622                                           CWQ_ENTRY_SIZE),
1623                                          CWQ_ENTRY_SIZE, 0, NULL);
1624        if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) {
1625                kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
1626                queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
1627                return -ENOMEM;
1628        }
1629        return 0;
1630}
1631
1632static void queue_cache_destroy(void)
1633{
1634        kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
1635        kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
1636        queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
1637        queue_cache[HV_NCS_QTYPE_CWQ - 1] = NULL;
1638}
1639
1640static long spu_queue_register_workfn(void *arg)
1641{
1642        struct spu_qreg *qr = arg;
1643        struct spu_queue *p = qr->queue;
1644        unsigned long q_type = qr->type;
1645        unsigned long hv_ret;
1646
1647        hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q),
1648                                 CWQ_NUM_ENTRIES, &p->qhandle);
1649        if (!hv_ret)
1650                sun4v_ncs_sethead_marker(p->qhandle, 0);
1651
1652        return hv_ret ? -EINVAL : 0;
1653}
1654
1655static int spu_queue_register(struct spu_queue *p, unsigned long q_type)
1656{
1657        int cpu = cpumask_any_and(&p->sharing, cpu_online_mask);
1658        struct spu_qreg qr = { .queue = p, .type = q_type };
1659
1660        return work_on_cpu_safe(cpu, spu_queue_register_workfn, &qr);
1661}
1662
1663static int spu_queue_setup(struct spu_queue *p)
1664{
1665        int err;
1666
1667        p->q = new_queue(p->q_type);
1668        if (!p->q)
1669                return -ENOMEM;
1670
1671        err = spu_queue_register(p, p->q_type);
1672        if (err) {
1673                free_queue(p->q, p->q_type);
1674                p->q = NULL;
1675        }
1676
1677        return err;
1678}
1679
1680static void spu_queue_destroy(struct spu_queue *p)
1681{
1682        unsigned long hv_ret;
1683
1684        if (!p->q)
1685                return;
1686
1687        hv_ret = sun4v_ncs_qconf(p->q_type, p->qhandle, 0, &p->qhandle);
1688
1689        if (!hv_ret)
1690                free_queue(p->q, p->q_type);
1691}
1692
1693static void spu_list_destroy(struct list_head *list)
1694{
1695        struct spu_queue *p, *n;
1696
1697        list_for_each_entry_safe(p, n, list, list) {
1698                int i;
1699
1700                for (i = 0; i < NR_CPUS; i++) {
1701                        if (cpu_to_cwq[i] == p)
1702                                cpu_to_cwq[i] = NULL;
1703                }
1704
1705                if (p->irq) {
1706                        free_irq(p->irq, p);
1707                        p->irq = 0;
1708                }
1709                spu_queue_destroy(p);
1710                list_del(&p->list);
1711                kfree(p);
1712        }
1713}
1714
1715/* Walk the backward arcs of a CWQ 'exec-unit' node,
1716 * gathering cpu membership information.
1717 */
1718static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc,
1719                               struct platform_device *dev,
1720                               u64 node, struct spu_queue *p,
1721                               struct spu_queue **table)
1722{
1723        u64 arc;
1724
1725        mdesc_for_each_arc(arc, mdesc, node, MDESC_ARC_TYPE_BACK) {
1726                u64 tgt = mdesc_arc_target(mdesc, arc);
1727                const char *name = mdesc_node_name(mdesc, tgt);
1728                const u64 *id;
1729
1730                if (strcmp(name, "cpu"))
1731                        continue;
1732                id = mdesc_get_property(mdesc, tgt, "id", NULL);
1733                if (table[*id] != NULL) {
1734                        dev_err(&dev->dev, "%pOF: SPU cpu slot already set.\n",
1735                                dev->dev.of_node);
1736                        return -EINVAL;
1737                }
1738                cpumask_set_cpu(*id, &p->sharing);
1739                table[*id] = p;
1740        }
1741        return 0;
1742}
1743
1744/* Process an 'exec-unit' MDESC node of type 'cwq'.  */
1745static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list,
1746                            struct platform_device *dev, struct mdesc_handle *mdesc,
1747                            u64 node, const char *iname, unsigned long q_type,
1748                            irq_handler_t handler, struct spu_queue **table)
1749{
1750        struct spu_queue *p;
1751        int err;
1752
1753        p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL);
1754        if (!p) {
1755                dev_err(&dev->dev, "%pOF: Could not allocate SPU queue.\n",
1756                        dev->dev.of_node);
1757                return -ENOMEM;
1758        }
1759
1760        cpumask_clear(&p->sharing);
1761        spin_lock_init(&p->lock);
1762        p->q_type = q_type;
1763        INIT_LIST_HEAD(&p->jobs);
1764        list_add(&p->list, list);
1765
1766        err = spu_mdesc_walk_arcs(mdesc, dev, node, p, table);
1767        if (err)
1768                return err;
1769
1770        err = spu_queue_setup(p);
1771        if (err)
1772                return err;
1773
1774        return spu_map_ino(dev, ip, iname, p, handler);
1775}
1776
1777static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct platform_device *dev,
1778                          struct spu_mdesc_info *ip, struct list_head *list,
1779                          const char *exec_name, unsigned long q_type,
1780                          irq_handler_t handler, struct spu_queue **table)
1781{
1782        int err = 0;
1783        u64 node;
1784
1785        mdesc_for_each_node_by_name(mdesc, node, "exec-unit") {
1786                const char *type;
1787
1788                type = mdesc_get_property(mdesc, node, "type", NULL);
1789                if (!type || strcmp(type, exec_name))
1790                        continue;
1791
1792                err = handle_exec_unit(ip, list, dev, mdesc, node,
1793                                       exec_name, q_type, handler, table);
1794                if (err) {
1795                        spu_list_destroy(list);
1796                        break;
1797                }
1798        }
1799
1800        return err;
1801}
1802
1803static int get_irq_props(struct mdesc_handle *mdesc, u64 node,
1804                         struct spu_mdesc_info *ip)
1805{
1806        const u64 *ino;
1807        int ino_len;
1808        int i;
1809
1810        ino = mdesc_get_property(mdesc, node, "ino", &ino_len);
1811        if (!ino) {
1812                printk("NO 'ino'\n");
1813                return -ENODEV;
1814        }
1815
1816        ip->num_intrs = ino_len / sizeof(u64);
1817        ip->ino_table = kzalloc((sizeof(struct ino_blob) *
1818                                 ip->num_intrs),
1819                                GFP_KERNEL);
1820        if (!ip->ino_table)
1821                return -ENOMEM;
1822
1823        for (i = 0; i < ip->num_intrs; i++) {
1824                struct ino_blob *b = &ip->ino_table[i];
1825                b->intr = i + 1;
1826                b->ino = ino[i];
1827        }
1828
1829        return 0;
1830}
1831
1832static int grab_mdesc_irq_props(struct mdesc_handle *mdesc,
1833                                struct platform_device *dev,
1834                                struct spu_mdesc_info *ip,
1835                                const char *node_name)
1836{
1837        const unsigned int *reg;
1838        u64 node;
1839
1840        reg = of_get_property(dev->dev.of_node, "reg", NULL);
1841        if (!reg)
1842                return -ENODEV;
1843
1844        mdesc_for_each_node_by_name(mdesc, node, "virtual-device") {
1845                const char *name;
1846                const u64 *chdl;
1847
1848                name = mdesc_get_property(mdesc, node, "name", NULL);
1849                if (!name || strcmp(name, node_name))
1850                        continue;
1851                chdl = mdesc_get_property(mdesc, node, "cfg-handle", NULL);
1852                if (!chdl || (*chdl != *reg))
1853                        continue;
1854                ip->cfg_handle = *chdl;
1855                return get_irq_props(mdesc, node, ip);
1856        }
1857
1858        return -ENODEV;
1859}
1860
1861static unsigned long n2_spu_hvapi_major;
1862static unsigned long n2_spu_hvapi_minor;
1863
1864static int n2_spu_hvapi_register(void)
1865{
1866        int err;
1867
1868        n2_spu_hvapi_major = 2;
1869        n2_spu_hvapi_minor = 0;
1870
1871        err = sun4v_hvapi_register(HV_GRP_NCS,
1872                                   n2_spu_hvapi_major,
1873                                   &n2_spu_hvapi_minor);
1874
1875        if (!err)
1876                pr_info("Registered NCS HVAPI version %lu.%lu\n",
1877                        n2_spu_hvapi_major,
1878                        n2_spu_hvapi_minor);
1879
1880        return err;
1881}
1882
1883static void n2_spu_hvapi_unregister(void)
1884{
1885        sun4v_hvapi_unregister(HV_GRP_NCS);
1886}
1887
1888static int global_ref;
1889
1890static int grab_global_resources(void)
1891{
1892        int err = 0;
1893
1894        mutex_lock(&spu_lock);
1895
1896        if (global_ref++)
1897                goto out;
1898
1899        err = n2_spu_hvapi_register();
1900        if (err)
1901                goto out;
1902
1903        err = queue_cache_init();
1904        if (err)
1905                goto out_hvapi_release;
1906
1907        err = -ENOMEM;
1908        cpu_to_cwq = kcalloc(NR_CPUS, sizeof(struct spu_queue *),
1909                             GFP_KERNEL);
1910        if (!cpu_to_cwq)
1911                goto out_queue_cache_destroy;
1912
1913        cpu_to_mau = kcalloc(NR_CPUS, sizeof(struct spu_queue *),
1914                             GFP_KERNEL);
1915        if (!cpu_to_mau)
1916                goto out_free_cwq_table;
1917
1918        err = 0;
1919
1920out:
1921        if (err)
1922                global_ref--;
1923        mutex_unlock(&spu_lock);
1924        return err;
1925
1926out_free_cwq_table:
1927        kfree(cpu_to_cwq);
1928        cpu_to_cwq = NULL;
1929
1930out_queue_cache_destroy:
1931        queue_cache_destroy();
1932
1933out_hvapi_release:
1934        n2_spu_hvapi_unregister();
1935        goto out;
1936}
1937
1938static void release_global_resources(void)
1939{
1940        mutex_lock(&spu_lock);
1941        if (!--global_ref) {
1942                kfree(cpu_to_cwq);
1943                cpu_to_cwq = NULL;
1944
1945                kfree(cpu_to_mau);
1946                cpu_to_mau = NULL;
1947
1948                queue_cache_destroy();
1949                n2_spu_hvapi_unregister();
1950        }
1951        mutex_unlock(&spu_lock);
1952}
1953
1954static struct n2_crypto *alloc_n2cp(void)
1955{
1956        struct n2_crypto *np = kzalloc(sizeof(struct n2_crypto), GFP_KERNEL);
1957
1958        if (np)
1959                INIT_LIST_HEAD(&np->cwq_list);
1960
1961        return np;
1962}
1963
1964static void free_n2cp(struct n2_crypto *np)
1965{
1966        kfree(np->cwq_info.ino_table);
1967        np->cwq_info.ino_table = NULL;
1968
1969        kfree(np);
1970}
1971
1972static void n2_spu_driver_version(void)
1973{
1974        static int n2_spu_version_printed;
1975
1976        if (n2_spu_version_printed++ == 0)
1977                pr_info("%s", version);
1978}
1979
1980static int n2_crypto_probe(struct platform_device *dev)
1981{
1982        struct mdesc_handle *mdesc;
1983        struct n2_crypto *np;
1984        int err;
1985
1986        n2_spu_driver_version();
1987
1988        pr_info("Found N2CP at %pOF\n", dev->dev.of_node);
1989
1990        np = alloc_n2cp();
1991        if (!np) {
1992                dev_err(&dev->dev, "%pOF: Unable to allocate n2cp.\n",
1993                        dev->dev.of_node);
1994                return -ENOMEM;
1995        }
1996
1997        err = grab_global_resources();
1998        if (err) {
1999                dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n",
2000                        dev->dev.of_node);
2001                goto out_free_n2cp;
2002        }
2003
2004        mdesc = mdesc_grab();
2005
2006        if (!mdesc) {
2007                dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n",
2008                        dev->dev.of_node);
2009                err = -ENODEV;
2010                goto out_free_global;
2011        }
2012        err = grab_mdesc_irq_props(mdesc, dev, &np->cwq_info, "n2cp");
2013        if (err) {
2014                dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n",
2015                        dev->dev.of_node);
2016                mdesc_release(mdesc);
2017                goto out_free_global;
2018        }
2019
2020        err = spu_mdesc_scan(mdesc, dev, &np->cwq_info, &np->cwq_list,
2021                             "cwq", HV_NCS_QTYPE_CWQ, cwq_intr,
2022                             cpu_to_cwq);
2023        mdesc_release(mdesc);
2024
2025        if (err) {
2026                dev_err(&dev->dev, "%pOF: CWQ MDESC scan failed.\n",
2027                        dev->dev.of_node);
2028                goto out_free_global;
2029        }
2030
2031        err = n2_register_algs();
2032        if (err) {
2033                dev_err(&dev->dev, "%pOF: Unable to register algorithms.\n",
2034                        dev->dev.of_node);
2035                goto out_free_spu_list;
2036        }
2037
2038        dev_set_drvdata(&dev->dev, np);
2039
2040        return 0;
2041
2042out_free_spu_list:
2043        spu_list_destroy(&np->cwq_list);
2044
2045out_free_global:
2046        release_global_resources();
2047
2048out_free_n2cp:
2049        free_n2cp(np);
2050
2051        return err;
2052}
2053
2054static int n2_crypto_remove(struct platform_device *dev)
2055{
2056        struct n2_crypto *np = dev_get_drvdata(&dev->dev);
2057
2058        n2_unregister_algs();
2059
2060        spu_list_destroy(&np->cwq_list);
2061
2062        release_global_resources();
2063
2064        free_n2cp(np);
2065
2066        return 0;
2067}
2068
2069static struct n2_mau *alloc_ncp(void)
2070{
2071        struct n2_mau *mp = kzalloc(sizeof(struct n2_mau), GFP_KERNEL);
2072
2073        if (mp)
2074                INIT_LIST_HEAD(&mp->mau_list);
2075
2076        return mp;
2077}
2078
2079static void free_ncp(struct n2_mau *mp)
2080{
2081        kfree(mp->mau_info.ino_table);
2082        mp->mau_info.ino_table = NULL;
2083
2084        kfree(mp);
2085}
2086
2087static int n2_mau_probe(struct platform_device *dev)
2088{
2089        struct mdesc_handle *mdesc;
2090        struct n2_mau *mp;
2091        int err;
2092
2093        n2_spu_driver_version();
2094
2095        pr_info("Found NCP at %pOF\n", dev->dev.of_node);
2096
2097        mp = alloc_ncp();
2098        if (!mp) {
2099                dev_err(&dev->dev, "%pOF: Unable to allocate ncp.\n",
2100                        dev->dev.of_node);
2101                return -ENOMEM;
2102        }
2103
2104        err = grab_global_resources();
2105        if (err) {
2106                dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n",
2107                        dev->dev.of_node);
2108                goto out_free_ncp;
2109        }
2110
2111        mdesc = mdesc_grab();
2112
2113        if (!mdesc) {
2114                dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n",
2115                        dev->dev.of_node);
2116                err = -ENODEV;
2117                goto out_free_global;
2118        }
2119
2120        err = grab_mdesc_irq_props(mdesc, dev, &mp->mau_info, "ncp");
2121        if (err) {
2122                dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n",
2123                        dev->dev.of_node);
2124                mdesc_release(mdesc);
2125                goto out_free_global;
2126        }
2127
2128        err = spu_mdesc_scan(mdesc, dev, &mp->mau_info, &mp->mau_list,
2129                             "mau", HV_NCS_QTYPE_MAU, mau_intr,
2130                             cpu_to_mau);
2131        mdesc_release(mdesc);
2132
2133        if (err) {
2134                dev_err(&dev->dev, "%pOF: MAU MDESC scan failed.\n",
2135                        dev->dev.of_node);
2136                goto out_free_global;
2137        }
2138
2139        dev_set_drvdata(&dev->dev, mp);
2140
2141        return 0;
2142
2143out_free_global:
2144        release_global_resources();
2145
2146out_free_ncp:
2147        free_ncp(mp);
2148
2149        return err;
2150}
2151
2152static int n2_mau_remove(struct platform_device *dev)
2153{
2154        struct n2_mau *mp = dev_get_drvdata(&dev->dev);
2155
2156        spu_list_destroy(&mp->mau_list);
2157
2158        release_global_resources();
2159
2160        free_ncp(mp);
2161
2162        return 0;
2163}
2164
2165static const struct of_device_id n2_crypto_match[] = {
2166        {
2167                .name = "n2cp",
2168                .compatible = "SUNW,n2-cwq",
2169        },
2170        {
2171                .name = "n2cp",
2172                .compatible = "SUNW,vf-cwq",
2173        },
2174        {
2175                .name = "n2cp",
2176                .compatible = "SUNW,kt-cwq",
2177        },
2178        {},
2179};
2180
2181MODULE_DEVICE_TABLE(of, n2_crypto_match);
2182
2183static struct platform_driver n2_crypto_driver = {
2184        .driver = {
2185                .name           =       "n2cp",
2186                .of_match_table =       n2_crypto_match,
2187        },
2188        .probe          =       n2_crypto_probe,
2189        .remove         =       n2_crypto_remove,
2190};
2191
2192static const struct of_device_id n2_mau_match[] = {
2193        {
2194                .name = "ncp",
2195                .compatible = "SUNW,n2-mau",
2196        },
2197        {
2198                .name = "ncp",
2199                .compatible = "SUNW,vf-mau",
2200        },
2201        {
2202                .name = "ncp",
2203                .compatible = "SUNW,kt-mau",
2204        },
2205        {},
2206};
2207
2208MODULE_DEVICE_TABLE(of, n2_mau_match);
2209
2210static struct platform_driver n2_mau_driver = {
2211        .driver = {
2212                .name           =       "ncp",
2213                .of_match_table =       n2_mau_match,
2214        },
2215        .probe          =       n2_mau_probe,
2216        .remove         =       n2_mau_remove,
2217};
2218
2219static struct platform_driver * const drivers[] = {
2220        &n2_crypto_driver,
2221        &n2_mau_driver,
2222};
2223
2224static int __init n2_init(void)
2225{
2226        return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
2227}
2228
2229static void __exit n2_exit(void)
2230{
2231        platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
2232}
2233
2234module_init(n2_init);
2235module_exit(n2_exit);
2236