linux/drivers/crypto/n2_core.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support.
   3 *
   4 * Copyright (C) 2010, 2011 David S. Miller <davem@davemloft.net>
   5 */
   6
   7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   8
   9#include <linux/kernel.h>
  10#include <linux/module.h>
  11#include <linux/of.h>
  12#include <linux/of_device.h>
  13#include <linux/cpumask.h>
  14#include <linux/slab.h>
  15#include <linux/interrupt.h>
  16#include <linux/crypto.h>
  17#include <crypto/md5.h>
  18#include <crypto/sha.h>
  19#include <crypto/aes.h>
  20#include <crypto/internal/des.h>
  21#include <linux/mutex.h>
  22#include <linux/delay.h>
  23#include <linux/sched.h>
  24
  25#include <crypto/internal/hash.h>
  26#include <crypto/internal/skcipher.h>
  27#include <crypto/scatterwalk.h>
  28#include <crypto/algapi.h>
  29
  30#include <asm/hypervisor.h>
  31#include <asm/mdesc.h>
  32
  33#include "n2_core.h"
  34
  35#define DRV_MODULE_NAME         "n2_crypto"
  36#define DRV_MODULE_VERSION      "0.2"
  37#define DRV_MODULE_RELDATE      "July 28, 2011"
  38
  39static const char version[] =
  40        DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
  41
  42MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
  43MODULE_DESCRIPTION("Niagara2 Crypto driver");
  44MODULE_LICENSE("GPL");
  45MODULE_VERSION(DRV_MODULE_VERSION);
  46
  47#define N2_CRA_PRIORITY         200
  48
  49static DEFINE_MUTEX(spu_lock);
  50
  51struct spu_queue {
  52        cpumask_t               sharing;
  53        unsigned long           qhandle;
  54
  55        spinlock_t              lock;
  56        u8                      q_type;
  57        void                    *q;
  58        unsigned long           head;
  59        unsigned long           tail;
  60        struct list_head        jobs;
  61
  62        unsigned long           devino;
  63
  64        char                    irq_name[32];
  65        unsigned int            irq;
  66
  67        struct list_head        list;
  68};
  69
  70struct spu_qreg {
  71        struct spu_queue        *queue;
  72        unsigned long           type;
  73};
  74
  75static struct spu_queue **cpu_to_cwq;
  76static struct spu_queue **cpu_to_mau;
  77
  78static unsigned long spu_next_offset(struct spu_queue *q, unsigned long off)
  79{
  80        if (q->q_type == HV_NCS_QTYPE_MAU) {
  81                off += MAU_ENTRY_SIZE;
  82                if (off == (MAU_ENTRY_SIZE * MAU_NUM_ENTRIES))
  83                        off = 0;
  84        } else {
  85                off += CWQ_ENTRY_SIZE;
  86                if (off == (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES))
  87                        off = 0;
  88        }
  89        return off;
  90}
  91
  92struct n2_request_common {
  93        struct list_head        entry;
  94        unsigned int            offset;
  95};
  96#define OFFSET_NOT_RUNNING      (~(unsigned int)0)
  97
  98/* An async job request records the final tail value it used in
  99 * n2_request_common->offset, test to see if that offset is in
 100 * the range old_head, new_head, inclusive.
 101 */
 102static inline bool job_finished(struct spu_queue *q, unsigned int offset,
 103                                unsigned long old_head, unsigned long new_head)
 104{
 105        if (old_head <= new_head) {
 106                if (offset > old_head && offset <= new_head)
 107                        return true;
 108        } else {
 109                if (offset > old_head || offset <= new_head)
 110                        return true;
 111        }
 112        return false;
 113}
 114
 115/* When the HEAD marker is unequal to the actual HEAD, we get
 116 * a virtual device INO interrupt.  We should process the
 117 * completed CWQ entries and adjust the HEAD marker to clear
 118 * the IRQ.
 119 */
 120static irqreturn_t cwq_intr(int irq, void *dev_id)
 121{
 122        unsigned long off, new_head, hv_ret;
 123        struct spu_queue *q = dev_id;
 124
 125        pr_err("CPU[%d]: Got CWQ interrupt for qhdl[%lx]\n",
 126               smp_processor_id(), q->qhandle);
 127
 128        spin_lock(&q->lock);
 129
 130        hv_ret = sun4v_ncs_gethead(q->qhandle, &new_head);
 131
 132        pr_err("CPU[%d]: CWQ gethead[%lx] hv_ret[%lu]\n",
 133               smp_processor_id(), new_head, hv_ret);
 134
 135        for (off = q->head; off != new_head; off = spu_next_offset(q, off)) {
 136                /* XXX ... XXX */
 137        }
 138
 139        hv_ret = sun4v_ncs_sethead_marker(q->qhandle, new_head);
 140        if (hv_ret == HV_EOK)
 141                q->head = new_head;
 142
 143        spin_unlock(&q->lock);
 144
 145        return IRQ_HANDLED;
 146}
 147
 148static irqreturn_t mau_intr(int irq, void *dev_id)
 149{
 150        struct spu_queue *q = dev_id;
 151        unsigned long head, hv_ret;
 152
 153        spin_lock(&q->lock);
 154
 155        pr_err("CPU[%d]: Got MAU interrupt for qhdl[%lx]\n",
 156               smp_processor_id(), q->qhandle);
 157
 158        hv_ret = sun4v_ncs_gethead(q->qhandle, &head);
 159
 160        pr_err("CPU[%d]: MAU gethead[%lx] hv_ret[%lu]\n",
 161               smp_processor_id(), head, hv_ret);
 162
 163        sun4v_ncs_sethead_marker(q->qhandle, head);
 164
 165        spin_unlock(&q->lock);
 166
 167        return IRQ_HANDLED;
 168}
 169
 170static void *spu_queue_next(struct spu_queue *q, void *cur)
 171{
 172        return q->q + spu_next_offset(q, cur - q->q);
 173}
 174
 175static int spu_queue_num_free(struct spu_queue *q)
 176{
 177        unsigned long head = q->head;
 178        unsigned long tail = q->tail;
 179        unsigned long end = (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES);
 180        unsigned long diff;
 181
 182        if (head > tail)
 183                diff = head - tail;
 184        else
 185                diff = (end - tail) + head;
 186
 187        return (diff / CWQ_ENTRY_SIZE) - 1;
 188}
 189
 190static void *spu_queue_alloc(struct spu_queue *q, int num_entries)
 191{
 192        int avail = spu_queue_num_free(q);
 193
 194        if (avail >= num_entries)
 195                return q->q + q->tail;
 196
 197        return NULL;
 198}
 199
 200static unsigned long spu_queue_submit(struct spu_queue *q, void *last)
 201{
 202        unsigned long hv_ret, new_tail;
 203
 204        new_tail = spu_next_offset(q, last - q->q);
 205
 206        hv_ret = sun4v_ncs_settail(q->qhandle, new_tail);
 207        if (hv_ret == HV_EOK)
 208                q->tail = new_tail;
 209        return hv_ret;
 210}
 211
 212static u64 control_word_base(unsigned int len, unsigned int hmac_key_len,
 213                             int enc_type, int auth_type,
 214                             unsigned int hash_len,
 215                             bool sfas, bool sob, bool eob, bool encrypt,
 216                             int opcode)
 217{
 218        u64 word = (len - 1) & CONTROL_LEN;
 219
 220        word |= ((u64) opcode << CONTROL_OPCODE_SHIFT);
 221        word |= ((u64) enc_type << CONTROL_ENC_TYPE_SHIFT);
 222        word |= ((u64) auth_type << CONTROL_AUTH_TYPE_SHIFT);
 223        if (sfas)
 224                word |= CONTROL_STORE_FINAL_AUTH_STATE;
 225        if (sob)
 226                word |= CONTROL_START_OF_BLOCK;
 227        if (eob)
 228                word |= CONTROL_END_OF_BLOCK;
 229        if (encrypt)
 230                word |= CONTROL_ENCRYPT;
 231        if (hmac_key_len)
 232                word |= ((u64) (hmac_key_len - 1)) << CONTROL_HMAC_KEY_LEN_SHIFT;
 233        if (hash_len)
 234                word |= ((u64) (hash_len - 1)) << CONTROL_HASH_LEN_SHIFT;
 235
 236        return word;
 237}
 238
 239#if 0
 240static inline bool n2_should_run_async(struct spu_queue *qp, int this_len)
 241{
 242        if (this_len >= 64 ||
 243            qp->head != qp->tail)
 244                return true;
 245        return false;
 246}
 247#endif
 248
 249struct n2_ahash_alg {
 250        struct list_head        entry;
 251        const u8                *hash_zero;
 252        const u8                *hash_init;
 253        u8                      hw_op_hashsz;
 254        u8                      digest_size;
 255        u8                      auth_type;
 256        u8                      hmac_type;
 257        struct ahash_alg        alg;
 258};
 259
 260static inline struct n2_ahash_alg *n2_ahash_alg(struct crypto_tfm *tfm)
 261{
 262        struct crypto_alg *alg = tfm->__crt_alg;
 263        struct ahash_alg *ahash_alg;
 264
 265        ahash_alg = container_of(alg, struct ahash_alg, halg.base);
 266
 267        return container_of(ahash_alg, struct n2_ahash_alg, alg);
 268}
 269
 270struct n2_hmac_alg {
 271        const char              *child_alg;
 272        struct n2_ahash_alg     derived;
 273};
 274
 275static inline struct n2_hmac_alg *n2_hmac_alg(struct crypto_tfm *tfm)
 276{
 277        struct crypto_alg *alg = tfm->__crt_alg;
 278        struct ahash_alg *ahash_alg;
 279
 280        ahash_alg = container_of(alg, struct ahash_alg, halg.base);
 281
 282        return container_of(ahash_alg, struct n2_hmac_alg, derived.alg);
 283}
 284
 285struct n2_hash_ctx {
 286        struct crypto_ahash             *fallback_tfm;
 287};
 288
 289#define N2_HASH_KEY_MAX                 32 /* HW limit for all HMAC requests */
 290
 291struct n2_hmac_ctx {
 292        struct n2_hash_ctx              base;
 293
 294        struct crypto_shash             *child_shash;
 295
 296        int                             hash_key_len;
 297        unsigned char                   hash_key[N2_HASH_KEY_MAX];
 298};
 299
 300struct n2_hash_req_ctx {
 301        union {
 302                struct md5_state        md5;
 303                struct sha1_state       sha1;
 304                struct sha256_state     sha256;
 305        } u;
 306
 307        struct ahash_request            fallback_req;
 308};
 309
 310static int n2_hash_async_init(struct ahash_request *req)
 311{
 312        struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
 313        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 314        struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 315
 316        ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
 317        rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
 318
 319        return crypto_ahash_init(&rctx->fallback_req);
 320}
 321
 322static int n2_hash_async_update(struct ahash_request *req)
 323{
 324        struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
 325        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 326        struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 327
 328        ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
 329        rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
 330        rctx->fallback_req.nbytes = req->nbytes;
 331        rctx->fallback_req.src = req->src;
 332
 333        return crypto_ahash_update(&rctx->fallback_req);
 334}
 335
 336static int n2_hash_async_final(struct ahash_request *req)
 337{
 338        struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
 339        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 340        struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 341
 342        ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
 343        rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
 344        rctx->fallback_req.result = req->result;
 345
 346        return crypto_ahash_final(&rctx->fallback_req);
 347}
 348
 349static int n2_hash_async_finup(struct ahash_request *req)
 350{
 351        struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
 352        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 353        struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 354
 355        ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
 356        rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
 357        rctx->fallback_req.nbytes = req->nbytes;
 358        rctx->fallback_req.src = req->src;
 359        rctx->fallback_req.result = req->result;
 360
 361        return crypto_ahash_finup(&rctx->fallback_req);
 362}
 363
 364static int n2_hash_async_noimport(struct ahash_request *req, const void *in)
 365{
 366        return -ENOSYS;
 367}
 368
 369static int n2_hash_async_noexport(struct ahash_request *req, void *out)
 370{
 371        return -ENOSYS;
 372}
 373
 374static int n2_hash_cra_init(struct crypto_tfm *tfm)
 375{
 376        const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
 377        struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
 378        struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
 379        struct crypto_ahash *fallback_tfm;
 380        int err;
 381
 382        fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
 383                                          CRYPTO_ALG_NEED_FALLBACK);
 384        if (IS_ERR(fallback_tfm)) {
 385                pr_warn("Fallback driver '%s' could not be loaded!\n",
 386                        fallback_driver_name);
 387                err = PTR_ERR(fallback_tfm);
 388                goto out;
 389        }
 390
 391        crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
 392                                         crypto_ahash_reqsize(fallback_tfm)));
 393
 394        ctx->fallback_tfm = fallback_tfm;
 395        return 0;
 396
 397out:
 398        return err;
 399}
 400
 401static void n2_hash_cra_exit(struct crypto_tfm *tfm)
 402{
 403        struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
 404        struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
 405
 406        crypto_free_ahash(ctx->fallback_tfm);
 407}
 408
 409static int n2_hmac_cra_init(struct crypto_tfm *tfm)
 410{
 411        const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
 412        struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
 413        struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
 414        struct n2_hmac_alg *n2alg = n2_hmac_alg(tfm);
 415        struct crypto_ahash *fallback_tfm;
 416        struct crypto_shash *child_shash;
 417        int err;
 418
 419        fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
 420                                          CRYPTO_ALG_NEED_FALLBACK);
 421        if (IS_ERR(fallback_tfm)) {
 422                pr_warn("Fallback driver '%s' could not be loaded!\n",
 423                        fallback_driver_name);
 424                err = PTR_ERR(fallback_tfm);
 425                goto out;
 426        }
 427
 428        child_shash = crypto_alloc_shash(n2alg->child_alg, 0, 0);
 429        if (IS_ERR(child_shash)) {
 430                pr_warn("Child shash '%s' could not be loaded!\n",
 431                        n2alg->child_alg);
 432                err = PTR_ERR(child_shash);
 433                goto out_free_fallback;
 434        }
 435
 436        crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
 437                                         crypto_ahash_reqsize(fallback_tfm)));
 438
 439        ctx->child_shash = child_shash;
 440        ctx->base.fallback_tfm = fallback_tfm;
 441        return 0;
 442
 443out_free_fallback:
 444        crypto_free_ahash(fallback_tfm);
 445
 446out:
 447        return err;
 448}
 449
 450static void n2_hmac_cra_exit(struct crypto_tfm *tfm)
 451{
 452        struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
 453        struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
 454
 455        crypto_free_ahash(ctx->base.fallback_tfm);
 456        crypto_free_shash(ctx->child_shash);
 457}
 458
 459static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key,
 460                                unsigned int keylen)
 461{
 462        struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
 463        struct crypto_shash *child_shash = ctx->child_shash;
 464        struct crypto_ahash *fallback_tfm;
 465        int err, bs, ds;
 466
 467        fallback_tfm = ctx->base.fallback_tfm;
 468        err = crypto_ahash_setkey(fallback_tfm, key, keylen);
 469        if (err)
 470                return err;
 471
 472        bs = crypto_shash_blocksize(child_shash);
 473        ds = crypto_shash_digestsize(child_shash);
 474        BUG_ON(ds > N2_HASH_KEY_MAX);
 475        if (keylen > bs) {
 476                err = crypto_shash_tfm_digest(child_shash, key, keylen,
 477                                              ctx->hash_key);
 478                if (err)
 479                        return err;
 480                keylen = ds;
 481        } else if (keylen <= N2_HASH_KEY_MAX)
 482                memcpy(ctx->hash_key, key, keylen);
 483
 484        ctx->hash_key_len = keylen;
 485
 486        return err;
 487}
 488
 489static unsigned long wait_for_tail(struct spu_queue *qp)
 490{
 491        unsigned long head, hv_ret;
 492
 493        do {
 494                hv_ret = sun4v_ncs_gethead(qp->qhandle, &head);
 495                if (hv_ret != HV_EOK) {
 496                        pr_err("Hypervisor error on gethead\n");
 497                        break;
 498                }
 499                if (head == qp->tail) {
 500                        qp->head = head;
 501                        break;
 502                }
 503        } while (1);
 504        return hv_ret;
 505}
 506
 507static unsigned long submit_and_wait_for_tail(struct spu_queue *qp,
 508                                              struct cwq_initial_entry *ent)
 509{
 510        unsigned long hv_ret = spu_queue_submit(qp, ent);
 511
 512        if (hv_ret == HV_EOK)
 513                hv_ret = wait_for_tail(qp);
 514
 515        return hv_ret;
 516}
 517
 518static int n2_do_async_digest(struct ahash_request *req,
 519                              unsigned int auth_type, unsigned int digest_size,
 520                              unsigned int result_size, void *hash_loc,
 521                              unsigned long auth_key, unsigned int auth_key_len)
 522{
 523        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 524        struct cwq_initial_entry *ent;
 525        struct crypto_hash_walk walk;
 526        struct spu_queue *qp;
 527        unsigned long flags;
 528        int err = -ENODEV;
 529        int nbytes, cpu;
 530
 531        /* The total effective length of the operation may not
 532         * exceed 2^16.
 533         */
 534        if (unlikely(req->nbytes > (1 << 16))) {
 535                struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
 536                struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 537
 538                ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
 539                rctx->fallback_req.base.flags =
 540                        req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
 541                rctx->fallback_req.nbytes = req->nbytes;
 542                rctx->fallback_req.src = req->src;
 543                rctx->fallback_req.result = req->result;
 544
 545                return crypto_ahash_digest(&rctx->fallback_req);
 546        }
 547
 548        nbytes = crypto_hash_walk_first(req, &walk);
 549
 550        cpu = get_cpu();
 551        qp = cpu_to_cwq[cpu];
 552        if (!qp)
 553                goto out;
 554
 555        spin_lock_irqsave(&qp->lock, flags);
 556
 557        /* XXX can do better, improve this later by doing a by-hand scatterlist
 558         * XXX walk, etc.
 559         */
 560        ent = qp->q + qp->tail;
 561
 562        ent->control = control_word_base(nbytes, auth_key_len, 0,
 563                                         auth_type, digest_size,
 564                                         false, true, false, false,
 565                                         OPCODE_INPLACE_BIT |
 566                                         OPCODE_AUTH_MAC);
 567        ent->src_addr = __pa(walk.data);
 568        ent->auth_key_addr = auth_key;
 569        ent->auth_iv_addr = __pa(hash_loc);
 570        ent->final_auth_state_addr = 0UL;
 571        ent->enc_key_addr = 0UL;
 572        ent->enc_iv_addr = 0UL;
 573        ent->dest_addr = __pa(hash_loc);
 574
 575        nbytes = crypto_hash_walk_done(&walk, 0);
 576        while (nbytes > 0) {
 577                ent = spu_queue_next(qp, ent);
 578
 579                ent->control = (nbytes - 1);
 580                ent->src_addr = __pa(walk.data);
 581                ent->auth_key_addr = 0UL;
 582                ent->auth_iv_addr = 0UL;
 583                ent->final_auth_state_addr = 0UL;
 584                ent->enc_key_addr = 0UL;
 585                ent->enc_iv_addr = 0UL;
 586                ent->dest_addr = 0UL;
 587
 588                nbytes = crypto_hash_walk_done(&walk, 0);
 589        }
 590        ent->control |= CONTROL_END_OF_BLOCK;
 591
 592        if (submit_and_wait_for_tail(qp, ent) != HV_EOK)
 593                err = -EINVAL;
 594        else
 595                err = 0;
 596
 597        spin_unlock_irqrestore(&qp->lock, flags);
 598
 599        if (!err)
 600                memcpy(req->result, hash_loc, result_size);
 601out:
 602        put_cpu();
 603
 604        return err;
 605}
 606
 607static int n2_hash_async_digest(struct ahash_request *req)
 608{
 609        struct n2_ahash_alg *n2alg = n2_ahash_alg(req->base.tfm);
 610        struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
 611        int ds;
 612
 613        ds = n2alg->digest_size;
 614        if (unlikely(req->nbytes == 0)) {
 615                memcpy(req->result, n2alg->hash_zero, ds);
 616                return 0;
 617        }
 618        memcpy(&rctx->u, n2alg->hash_init, n2alg->hw_op_hashsz);
 619
 620        return n2_do_async_digest(req, n2alg->auth_type,
 621                                  n2alg->hw_op_hashsz, ds,
 622                                  &rctx->u, 0UL, 0);
 623}
 624
 625static int n2_hmac_async_digest(struct ahash_request *req)
 626{
 627        struct n2_hmac_alg *n2alg = n2_hmac_alg(req->base.tfm);
 628        struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
 629        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 630        struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
 631        int ds;
 632
 633        ds = n2alg->derived.digest_size;
 634        if (unlikely(req->nbytes == 0) ||
 635            unlikely(ctx->hash_key_len > N2_HASH_KEY_MAX)) {
 636                struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
 637                struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 638
 639                ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
 640                rctx->fallback_req.base.flags =
 641                        req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
 642                rctx->fallback_req.nbytes = req->nbytes;
 643                rctx->fallback_req.src = req->src;
 644                rctx->fallback_req.result = req->result;
 645
 646                return crypto_ahash_digest(&rctx->fallback_req);
 647        }
 648        memcpy(&rctx->u, n2alg->derived.hash_init,
 649               n2alg->derived.hw_op_hashsz);
 650
 651        return n2_do_async_digest(req, n2alg->derived.hmac_type,
 652                                  n2alg->derived.hw_op_hashsz, ds,
 653                                  &rctx->u,
 654                                  __pa(&ctx->hash_key),
 655                                  ctx->hash_key_len);
 656}
 657
 658struct n2_skcipher_context {
 659        int                     key_len;
 660        int                     enc_type;
 661        union {
 662                u8              aes[AES_MAX_KEY_SIZE];
 663                u8              des[DES_KEY_SIZE];
 664                u8              des3[3 * DES_KEY_SIZE];
 665        } key;
 666};
 667
 668#define N2_CHUNK_ARR_LEN        16
 669
 670struct n2_crypto_chunk {
 671        struct list_head        entry;
 672        unsigned long           iv_paddr : 44;
 673        unsigned long           arr_len : 20;
 674        unsigned long           dest_paddr;
 675        unsigned long           dest_final;
 676        struct {
 677                unsigned long   src_paddr : 44;
 678                unsigned long   src_len : 20;
 679        } arr[N2_CHUNK_ARR_LEN];
 680};
 681
 682struct n2_request_context {
 683        struct skcipher_walk    walk;
 684        struct list_head        chunk_list;
 685        struct n2_crypto_chunk  chunk;
 686        u8                      temp_iv[16];
 687};
 688
 689/* The SPU allows some level of flexibility for partial cipher blocks
 690 * being specified in a descriptor.
 691 *
 692 * It merely requires that every descriptor's length field is at least
 693 * as large as the cipher block size.  This means that a cipher block
 694 * can span at most 2 descriptors.  However, this does not allow a
 695 * partial block to span into the final descriptor as that would
 696 * violate the rule (since every descriptor's length must be at lest
 697 * the block size).  So, for example, assuming an 8 byte block size:
 698 *
 699 *      0xe --> 0xa --> 0x8
 700 *
 701 * is a valid length sequence, whereas:
 702 *
 703 *      0xe --> 0xb --> 0x7
 704 *
 705 * is not a valid sequence.
 706 */
 707
 708struct n2_skcipher_alg {
 709        struct list_head        entry;
 710        u8                      enc_type;
 711        struct skcipher_alg     skcipher;
 712};
 713
 714static inline struct n2_skcipher_alg *n2_skcipher_alg(struct crypto_skcipher *tfm)
 715{
 716        struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
 717
 718        return container_of(alg, struct n2_skcipher_alg, skcipher);
 719}
 720
 721struct n2_skcipher_request_context {
 722        struct skcipher_walk    walk;
 723};
 724
 725static int n2_aes_setkey(struct crypto_skcipher *skcipher, const u8 *key,
 726                         unsigned int keylen)
 727{
 728        struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
 729        struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
 730        struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
 731
 732        ctx->enc_type = (n2alg->enc_type & ENC_TYPE_CHAINING_MASK);
 733
 734        switch (keylen) {
 735        case AES_KEYSIZE_128:
 736                ctx->enc_type |= ENC_TYPE_ALG_AES128;
 737                break;
 738        case AES_KEYSIZE_192:
 739                ctx->enc_type |= ENC_TYPE_ALG_AES192;
 740                break;
 741        case AES_KEYSIZE_256:
 742                ctx->enc_type |= ENC_TYPE_ALG_AES256;
 743                break;
 744        default:
 745                return -EINVAL;
 746        }
 747
 748        ctx->key_len = keylen;
 749        memcpy(ctx->key.aes, key, keylen);
 750        return 0;
 751}
 752
 753static int n2_des_setkey(struct crypto_skcipher *skcipher, const u8 *key,
 754                         unsigned int keylen)
 755{
 756        struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
 757        struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
 758        struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
 759        int err;
 760
 761        err = verify_skcipher_des_key(skcipher, key);
 762        if (err)
 763                return err;
 764
 765        ctx->enc_type = n2alg->enc_type;
 766
 767        ctx->key_len = keylen;
 768        memcpy(ctx->key.des, key, keylen);
 769        return 0;
 770}
 771
 772static int n2_3des_setkey(struct crypto_skcipher *skcipher, const u8 *key,
 773                          unsigned int keylen)
 774{
 775        struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
 776        struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
 777        struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
 778        int err;
 779
 780        err = verify_skcipher_des3_key(skcipher, key);
 781        if (err)
 782                return err;
 783
 784        ctx->enc_type = n2alg->enc_type;
 785
 786        ctx->key_len = keylen;
 787        memcpy(ctx->key.des3, key, keylen);
 788        return 0;
 789}
 790
 791static inline int skcipher_descriptor_len(int nbytes, unsigned int block_size)
 792{
 793        int this_len = nbytes;
 794
 795        this_len -= (nbytes & (block_size - 1));
 796        return this_len > (1 << 16) ? (1 << 16) : this_len;
 797}
 798
 799static int __n2_crypt_chunk(struct crypto_skcipher *skcipher,
 800                            struct n2_crypto_chunk *cp,
 801                            struct spu_queue *qp, bool encrypt)
 802{
 803        struct n2_skcipher_context *ctx = crypto_skcipher_ctx(skcipher);
 804        struct cwq_initial_entry *ent;
 805        bool in_place;
 806        int i;
 807
 808        ent = spu_queue_alloc(qp, cp->arr_len);
 809        if (!ent) {
 810                pr_info("queue_alloc() of %d fails\n",
 811                        cp->arr_len);
 812                return -EBUSY;
 813        }
 814
 815        in_place = (cp->dest_paddr == cp->arr[0].src_paddr);
 816
 817        ent->control = control_word_base(cp->arr[0].src_len,
 818                                         0, ctx->enc_type, 0, 0,
 819                                         false, true, false, encrypt,
 820                                         OPCODE_ENCRYPT |
 821                                         (in_place ? OPCODE_INPLACE_BIT : 0));
 822        ent->src_addr = cp->arr[0].src_paddr;
 823        ent->auth_key_addr = 0UL;
 824        ent->auth_iv_addr = 0UL;
 825        ent->final_auth_state_addr = 0UL;
 826        ent->enc_key_addr = __pa(&ctx->key);
 827        ent->enc_iv_addr = cp->iv_paddr;
 828        ent->dest_addr = (in_place ? 0UL : cp->dest_paddr);
 829
 830        for (i = 1; i < cp->arr_len; i++) {
 831                ent = spu_queue_next(qp, ent);
 832
 833                ent->control = cp->arr[i].src_len - 1;
 834                ent->src_addr = cp->arr[i].src_paddr;
 835                ent->auth_key_addr = 0UL;
 836                ent->auth_iv_addr = 0UL;
 837                ent->final_auth_state_addr = 0UL;
 838                ent->enc_key_addr = 0UL;
 839                ent->enc_iv_addr = 0UL;
 840                ent->dest_addr = 0UL;
 841        }
 842        ent->control |= CONTROL_END_OF_BLOCK;
 843
 844        return (spu_queue_submit(qp, ent) != HV_EOK) ? -EINVAL : 0;
 845}
 846
 847static int n2_compute_chunks(struct skcipher_request *req)
 848{
 849        struct n2_request_context *rctx = skcipher_request_ctx(req);
 850        struct skcipher_walk *walk = &rctx->walk;
 851        struct n2_crypto_chunk *chunk;
 852        unsigned long dest_prev;
 853        unsigned int tot_len;
 854        bool prev_in_place;
 855        int err, nbytes;
 856
 857        err = skcipher_walk_async(walk, req);
 858        if (err)
 859                return err;
 860
 861        INIT_LIST_HEAD(&rctx->chunk_list);
 862
 863        chunk = &rctx->chunk;
 864        INIT_LIST_HEAD(&chunk->entry);
 865
 866        chunk->iv_paddr = 0UL;
 867        chunk->arr_len = 0;
 868        chunk->dest_paddr = 0UL;
 869
 870        prev_in_place = false;
 871        dest_prev = ~0UL;
 872        tot_len = 0;
 873
 874        while ((nbytes = walk->nbytes) != 0) {
 875                unsigned long dest_paddr, src_paddr;
 876                bool in_place;
 877                int this_len;
 878
 879                src_paddr = (page_to_phys(walk->src.phys.page) +
 880                             walk->src.phys.offset);
 881                dest_paddr = (page_to_phys(walk->dst.phys.page) +
 882                              walk->dst.phys.offset);
 883                in_place = (src_paddr == dest_paddr);
 884                this_len = skcipher_descriptor_len(nbytes, walk->blocksize);
 885
 886                if (chunk->arr_len != 0) {
 887                        if (in_place != prev_in_place ||
 888                            (!prev_in_place &&
 889                             dest_paddr != dest_prev) ||
 890                            chunk->arr_len == N2_CHUNK_ARR_LEN ||
 891                            tot_len + this_len > (1 << 16)) {
 892                                chunk->dest_final = dest_prev;
 893                                list_add_tail(&chunk->entry,
 894                                              &rctx->chunk_list);
 895                                chunk = kzalloc(sizeof(*chunk), GFP_ATOMIC);
 896                                if (!chunk) {
 897                                        err = -ENOMEM;
 898                                        break;
 899                                }
 900                                INIT_LIST_HEAD(&chunk->entry);
 901                        }
 902                }
 903                if (chunk->arr_len == 0) {
 904                        chunk->dest_paddr = dest_paddr;
 905                        tot_len = 0;
 906                }
 907                chunk->arr[chunk->arr_len].src_paddr = src_paddr;
 908                chunk->arr[chunk->arr_len].src_len = this_len;
 909                chunk->arr_len++;
 910
 911                dest_prev = dest_paddr + this_len;
 912                prev_in_place = in_place;
 913                tot_len += this_len;
 914
 915                err = skcipher_walk_done(walk, nbytes - this_len);
 916                if (err)
 917                        break;
 918        }
 919        if (!err && chunk->arr_len != 0) {
 920                chunk->dest_final = dest_prev;
 921                list_add_tail(&chunk->entry, &rctx->chunk_list);
 922        }
 923
 924        return err;
 925}
 926
 927static void n2_chunk_complete(struct skcipher_request *req, void *final_iv)
 928{
 929        struct n2_request_context *rctx = skcipher_request_ctx(req);
 930        struct n2_crypto_chunk *c, *tmp;
 931
 932        if (final_iv)
 933                memcpy(rctx->walk.iv, final_iv, rctx->walk.blocksize);
 934
 935        list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
 936                list_del(&c->entry);
 937                if (unlikely(c != &rctx->chunk))
 938                        kfree(c);
 939        }
 940
 941}
 942
 943static int n2_do_ecb(struct skcipher_request *req, bool encrypt)
 944{
 945        struct n2_request_context *rctx = skcipher_request_ctx(req);
 946        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 947        int err = n2_compute_chunks(req);
 948        struct n2_crypto_chunk *c, *tmp;
 949        unsigned long flags, hv_ret;
 950        struct spu_queue *qp;
 951
 952        if (err)
 953                return err;
 954
 955        qp = cpu_to_cwq[get_cpu()];
 956        err = -ENODEV;
 957        if (!qp)
 958                goto out;
 959
 960        spin_lock_irqsave(&qp->lock, flags);
 961
 962        list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
 963                err = __n2_crypt_chunk(tfm, c, qp, encrypt);
 964                if (err)
 965                        break;
 966                list_del(&c->entry);
 967                if (unlikely(c != &rctx->chunk))
 968                        kfree(c);
 969        }
 970        if (!err) {
 971                hv_ret = wait_for_tail(qp);
 972                if (hv_ret != HV_EOK)
 973                        err = -EINVAL;
 974        }
 975
 976        spin_unlock_irqrestore(&qp->lock, flags);
 977
 978out:
 979        put_cpu();
 980
 981        n2_chunk_complete(req, NULL);
 982        return err;
 983}
 984
 985static int n2_encrypt_ecb(struct skcipher_request *req)
 986{
 987        return n2_do_ecb(req, true);
 988}
 989
 990static int n2_decrypt_ecb(struct skcipher_request *req)
 991{
 992        return n2_do_ecb(req, false);
 993}
 994
 995static int n2_do_chaining(struct skcipher_request *req, bool encrypt)
 996{
 997        struct n2_request_context *rctx = skcipher_request_ctx(req);
 998        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 999        unsigned long flags, hv_ret, iv_paddr;
1000        int err = n2_compute_chunks(req);
1001        struct n2_crypto_chunk *c, *tmp;
1002        struct spu_queue *qp;
1003        void *final_iv_addr;
1004
1005        final_iv_addr = NULL;
1006
1007        if (err)
1008                return err;
1009
1010        qp = cpu_to_cwq[get_cpu()];
1011        err = -ENODEV;
1012        if (!qp)
1013                goto out;
1014
1015        spin_lock_irqsave(&qp->lock, flags);
1016
1017        if (encrypt) {
1018                iv_paddr = __pa(rctx->walk.iv);
1019                list_for_each_entry_safe(c, tmp, &rctx->chunk_list,
1020                                         entry) {
1021                        c->iv_paddr = iv_paddr;
1022                        err = __n2_crypt_chunk(tfm, c, qp, true);
1023                        if (err)
1024                                break;
1025                        iv_paddr = c->dest_final - rctx->walk.blocksize;
1026                        list_del(&c->entry);
1027                        if (unlikely(c != &rctx->chunk))
1028                                kfree(c);
1029                }
1030                final_iv_addr = __va(iv_paddr);
1031        } else {
1032                list_for_each_entry_safe_reverse(c, tmp, &rctx->chunk_list,
1033                                                 entry) {
1034                        if (c == &rctx->chunk) {
1035                                iv_paddr = __pa(rctx->walk.iv);
1036                        } else {
1037                                iv_paddr = (tmp->arr[tmp->arr_len-1].src_paddr +
1038                                            tmp->arr[tmp->arr_len-1].src_len -
1039                                            rctx->walk.blocksize);
1040                        }
1041                        if (!final_iv_addr) {
1042                                unsigned long pa;
1043
1044                                pa = (c->arr[c->arr_len-1].src_paddr +
1045                                      c->arr[c->arr_len-1].src_len -
1046                                      rctx->walk.blocksize);
1047                                final_iv_addr = rctx->temp_iv;
1048                                memcpy(rctx->temp_iv, __va(pa),
1049                                       rctx->walk.blocksize);
1050                        }
1051                        c->iv_paddr = iv_paddr;
1052                        err = __n2_crypt_chunk(tfm, c, qp, false);
1053                        if (err)
1054                                break;
1055                        list_del(&c->entry);
1056                        if (unlikely(c != &rctx->chunk))
1057                                kfree(c);
1058                }
1059        }
1060        if (!err) {
1061                hv_ret = wait_for_tail(qp);
1062                if (hv_ret != HV_EOK)
1063                        err = -EINVAL;
1064        }
1065
1066        spin_unlock_irqrestore(&qp->lock, flags);
1067
1068out:
1069        put_cpu();
1070
1071        n2_chunk_complete(req, err ? NULL : final_iv_addr);
1072        return err;
1073}
1074
1075static int n2_encrypt_chaining(struct skcipher_request *req)
1076{
1077        return n2_do_chaining(req, true);
1078}
1079
1080static int n2_decrypt_chaining(struct skcipher_request *req)
1081{
1082        return n2_do_chaining(req, false);
1083}
1084
1085struct n2_skcipher_tmpl {
1086        const char              *name;
1087        const char              *drv_name;
1088        u8                      block_size;
1089        u8                      enc_type;
1090        struct skcipher_alg     skcipher;
1091};
1092
1093static const struct n2_skcipher_tmpl skcipher_tmpls[] = {
1094        /* DES: ECB CBC and CFB are supported */
1095        {       .name           = "ecb(des)",
1096                .drv_name       = "ecb-des",
1097                .block_size     = DES_BLOCK_SIZE,
1098                .enc_type       = (ENC_TYPE_ALG_DES |
1099                                   ENC_TYPE_CHAINING_ECB),
1100                .skcipher       = {
1101                        .min_keysize    = DES_KEY_SIZE,
1102                        .max_keysize    = DES_KEY_SIZE,
1103                        .setkey         = n2_des_setkey,
1104                        .encrypt        = n2_encrypt_ecb,
1105                        .decrypt        = n2_decrypt_ecb,
1106                },
1107        },
1108        {       .name           = "cbc(des)",
1109                .drv_name       = "cbc-des",
1110                .block_size     = DES_BLOCK_SIZE,
1111                .enc_type       = (ENC_TYPE_ALG_DES |
1112                                   ENC_TYPE_CHAINING_CBC),
1113                .skcipher       = {
1114                        .ivsize         = DES_BLOCK_SIZE,
1115                        .min_keysize    = DES_KEY_SIZE,
1116                        .max_keysize    = DES_KEY_SIZE,
1117                        .setkey         = n2_des_setkey,
1118                        .encrypt        = n2_encrypt_chaining,
1119                        .decrypt        = n2_decrypt_chaining,
1120                },
1121        },
1122        {       .name           = "cfb(des)",
1123                .drv_name       = "cfb-des",
1124                .block_size     = DES_BLOCK_SIZE,
1125                .enc_type       = (ENC_TYPE_ALG_DES |
1126                                   ENC_TYPE_CHAINING_CFB),
1127                .skcipher       = {
1128                        .min_keysize    = DES_KEY_SIZE,
1129                        .max_keysize    = DES_KEY_SIZE,
1130                        .setkey         = n2_des_setkey,
1131                        .encrypt        = n2_encrypt_chaining,
1132                        .decrypt        = n2_decrypt_chaining,
1133                },
1134        },
1135
1136        /* 3DES: ECB CBC and CFB are supported */
1137        {       .name           = "ecb(des3_ede)",
1138                .drv_name       = "ecb-3des",
1139                .block_size     = DES_BLOCK_SIZE,
1140                .enc_type       = (ENC_TYPE_ALG_3DES |
1141                                   ENC_TYPE_CHAINING_ECB),
1142                .skcipher       = {
1143                        .min_keysize    = 3 * DES_KEY_SIZE,
1144                        .max_keysize    = 3 * DES_KEY_SIZE,
1145                        .setkey         = n2_3des_setkey,
1146                        .encrypt        = n2_encrypt_ecb,
1147                        .decrypt        = n2_decrypt_ecb,
1148                },
1149        },
1150        {       .name           = "cbc(des3_ede)",
1151                .drv_name       = "cbc-3des",
1152                .block_size     = DES_BLOCK_SIZE,
1153                .enc_type       = (ENC_TYPE_ALG_3DES |
1154                                   ENC_TYPE_CHAINING_CBC),
1155                .skcipher       = {
1156                        .ivsize         = DES_BLOCK_SIZE,
1157                        .min_keysize    = 3 * DES_KEY_SIZE,
1158                        .max_keysize    = 3 * DES_KEY_SIZE,
1159                        .setkey         = n2_3des_setkey,
1160                        .encrypt        = n2_encrypt_chaining,
1161                        .decrypt        = n2_decrypt_chaining,
1162                },
1163        },
1164        {       .name           = "cfb(des3_ede)",
1165                .drv_name       = "cfb-3des",
1166                .block_size     = DES_BLOCK_SIZE,
1167                .enc_type       = (ENC_TYPE_ALG_3DES |
1168                                   ENC_TYPE_CHAINING_CFB),
1169                .skcipher       = {
1170                        .min_keysize    = 3 * DES_KEY_SIZE,
1171                        .max_keysize    = 3 * DES_KEY_SIZE,
1172                        .setkey         = n2_3des_setkey,
1173                        .encrypt        = n2_encrypt_chaining,
1174                        .decrypt        = n2_decrypt_chaining,
1175                },
1176        },
1177        /* AES: ECB CBC and CTR are supported */
1178        {       .name           = "ecb(aes)",
1179                .drv_name       = "ecb-aes",
1180                .block_size     = AES_BLOCK_SIZE,
1181                .enc_type       = (ENC_TYPE_ALG_AES128 |
1182                                   ENC_TYPE_CHAINING_ECB),
1183                .skcipher       = {
1184                        .min_keysize    = AES_MIN_KEY_SIZE,
1185                        .max_keysize    = AES_MAX_KEY_SIZE,
1186                        .setkey         = n2_aes_setkey,
1187                        .encrypt        = n2_encrypt_ecb,
1188                        .decrypt        = n2_decrypt_ecb,
1189                },
1190        },
1191        {       .name           = "cbc(aes)",
1192                .drv_name       = "cbc-aes",
1193                .block_size     = AES_BLOCK_SIZE,
1194                .enc_type       = (ENC_TYPE_ALG_AES128 |
1195                                   ENC_TYPE_CHAINING_CBC),
1196                .skcipher       = {
1197                        .ivsize         = AES_BLOCK_SIZE,
1198                        .min_keysize    = AES_MIN_KEY_SIZE,
1199                        .max_keysize    = AES_MAX_KEY_SIZE,
1200                        .setkey         = n2_aes_setkey,
1201                        .encrypt        = n2_encrypt_chaining,
1202                        .decrypt        = n2_decrypt_chaining,
1203                },
1204        },
1205        {       .name           = "ctr(aes)",
1206                .drv_name       = "ctr-aes",
1207                .block_size     = AES_BLOCK_SIZE,
1208                .enc_type       = (ENC_TYPE_ALG_AES128 |
1209                                   ENC_TYPE_CHAINING_COUNTER),
1210                .skcipher       = {
1211                        .ivsize         = AES_BLOCK_SIZE,
1212                        .min_keysize    = AES_MIN_KEY_SIZE,
1213                        .max_keysize    = AES_MAX_KEY_SIZE,
1214                        .setkey         = n2_aes_setkey,
1215                        .encrypt        = n2_encrypt_chaining,
1216                        .decrypt        = n2_encrypt_chaining,
1217                },
1218        },
1219
1220};
1221#define NUM_CIPHER_TMPLS ARRAY_SIZE(skcipher_tmpls)
1222
1223static LIST_HEAD(skcipher_algs);
1224
1225struct n2_hash_tmpl {
1226        const char      *name;
1227        const u8        *hash_zero;
1228        const u8        *hash_init;
1229        u8              hw_op_hashsz;
1230        u8              digest_size;
1231        u8              block_size;
1232        u8              auth_type;
1233        u8              hmac_type;
1234};
1235
1236static const __le32 n2_md5_init[MD5_HASH_WORDS] = {
1237        cpu_to_le32(MD5_H0),
1238        cpu_to_le32(MD5_H1),
1239        cpu_to_le32(MD5_H2),
1240        cpu_to_le32(MD5_H3),
1241};
1242static const u32 n2_sha1_init[SHA1_DIGEST_SIZE / 4] = {
1243        SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4,
1244};
1245static const u32 n2_sha256_init[SHA256_DIGEST_SIZE / 4] = {
1246        SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
1247        SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7,
1248};
1249static const u32 n2_sha224_init[SHA256_DIGEST_SIZE / 4] = {
1250        SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
1251        SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7,
1252};
1253
1254static const struct n2_hash_tmpl hash_tmpls[] = {
1255        { .name         = "md5",
1256          .hash_zero    = md5_zero_message_hash,
1257          .hash_init    = (u8 *)n2_md5_init,
1258          .auth_type    = AUTH_TYPE_MD5,
1259          .hmac_type    = AUTH_TYPE_HMAC_MD5,
1260          .hw_op_hashsz = MD5_DIGEST_SIZE,
1261          .digest_size  = MD5_DIGEST_SIZE,
1262          .block_size   = MD5_HMAC_BLOCK_SIZE },
1263        { .name         = "sha1",
1264          .hash_zero    = sha1_zero_message_hash,
1265          .hash_init    = (u8 *)n2_sha1_init,
1266          .auth_type    = AUTH_TYPE_SHA1,
1267          .hmac_type    = AUTH_TYPE_HMAC_SHA1,
1268          .hw_op_hashsz = SHA1_DIGEST_SIZE,
1269          .digest_size  = SHA1_DIGEST_SIZE,
1270          .block_size   = SHA1_BLOCK_SIZE },
1271        { .name         = "sha256",
1272          .hash_zero    = sha256_zero_message_hash,
1273          .hash_init    = (u8 *)n2_sha256_init,
1274          .auth_type    = AUTH_TYPE_SHA256,
1275          .hmac_type    = AUTH_TYPE_HMAC_SHA256,
1276          .hw_op_hashsz = SHA256_DIGEST_SIZE,
1277          .digest_size  = SHA256_DIGEST_SIZE,
1278          .block_size   = SHA256_BLOCK_SIZE },
1279        { .name         = "sha224",
1280          .hash_zero    = sha224_zero_message_hash,
1281          .hash_init    = (u8 *)n2_sha224_init,
1282          .auth_type    = AUTH_TYPE_SHA256,
1283          .hmac_type    = AUTH_TYPE_RESERVED,
1284          .hw_op_hashsz = SHA256_DIGEST_SIZE,
1285          .digest_size  = SHA224_DIGEST_SIZE,
1286          .block_size   = SHA224_BLOCK_SIZE },
1287};
1288#define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls)
1289
1290static LIST_HEAD(ahash_algs);
1291static LIST_HEAD(hmac_algs);
1292
1293static int algs_registered;
1294
1295static void __n2_unregister_algs(void)
1296{
1297        struct n2_skcipher_alg *skcipher, *skcipher_tmp;
1298        struct n2_ahash_alg *alg, *alg_tmp;
1299        struct n2_hmac_alg *hmac, *hmac_tmp;
1300
1301        list_for_each_entry_safe(skcipher, skcipher_tmp, &skcipher_algs, entry) {
1302                crypto_unregister_skcipher(&skcipher->skcipher);
1303                list_del(&skcipher->entry);
1304                kfree(skcipher);
1305        }
1306        list_for_each_entry_safe(hmac, hmac_tmp, &hmac_algs, derived.entry) {
1307                crypto_unregister_ahash(&hmac->derived.alg);
1308                list_del(&hmac->derived.entry);
1309                kfree(hmac);
1310        }
1311        list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) {
1312                crypto_unregister_ahash(&alg->alg);
1313                list_del(&alg->entry);
1314                kfree(alg);
1315        }
1316}
1317
1318static int n2_skcipher_init_tfm(struct crypto_skcipher *tfm)
1319{
1320        crypto_skcipher_set_reqsize(tfm, sizeof(struct n2_request_context));
1321        return 0;
1322}
1323
1324static int __n2_register_one_skcipher(const struct n2_skcipher_tmpl *tmpl)
1325{
1326        struct n2_skcipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1327        struct skcipher_alg *alg;
1328        int err;
1329
1330        if (!p)
1331                return -ENOMEM;
1332
1333        alg = &p->skcipher;
1334        *alg = tmpl->skcipher;
1335
1336        snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
1337        snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name);
1338        alg->base.cra_priority = N2_CRA_PRIORITY;
1339        alg->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
1340                              CRYPTO_ALG_ALLOCATES_MEMORY;
1341        alg->base.cra_blocksize = tmpl->block_size;
1342        p->enc_type = tmpl->enc_type;
1343        alg->base.cra_ctxsize = sizeof(struct n2_skcipher_context);
1344        alg->base.cra_module = THIS_MODULE;
1345        alg->init = n2_skcipher_init_tfm;
1346
1347        list_add(&p->entry, &skcipher_algs);
1348        err = crypto_register_skcipher(alg);
1349        if (err) {
1350                pr_err("%s alg registration failed\n", alg->base.cra_name);
1351                list_del(&p->entry);
1352                kfree(p);
1353        } else {
1354                pr_info("%s alg registered\n", alg->base.cra_name);
1355        }
1356        return err;
1357}
1358
1359static int __n2_register_one_hmac(struct n2_ahash_alg *n2ahash)
1360{
1361        struct n2_hmac_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1362        struct ahash_alg *ahash;
1363        struct crypto_alg *base;
1364        int err;
1365
1366        if (!p)
1367                return -ENOMEM;
1368
1369        p->child_alg = n2ahash->alg.halg.base.cra_name;
1370        memcpy(&p->derived, n2ahash, sizeof(struct n2_ahash_alg));
1371        INIT_LIST_HEAD(&p->derived.entry);
1372
1373        ahash = &p->derived.alg;
1374        ahash->digest = n2_hmac_async_digest;
1375        ahash->setkey = n2_hmac_async_setkey;
1376
1377        base = &ahash->halg.base;
1378        snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", p->child_alg);
1379        snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s-n2", p->child_alg);
1380
1381        base->cra_ctxsize = sizeof(struct n2_hmac_ctx);
1382        base->cra_init = n2_hmac_cra_init;
1383        base->cra_exit = n2_hmac_cra_exit;
1384
1385        list_add(&p->derived.entry, &hmac_algs);
1386        err = crypto_register_ahash(ahash);
1387        if (err) {
1388                pr_err("%s alg registration failed\n", base->cra_name);
1389                list_del(&p->derived.entry);
1390                kfree(p);
1391        } else {
1392                pr_info("%s alg registered\n", base->cra_name);
1393        }
1394        return err;
1395}
1396
1397static int __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
1398{
1399        struct n2_ahash_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1400        struct hash_alg_common *halg;
1401        struct crypto_alg *base;
1402        struct ahash_alg *ahash;
1403        int err;
1404
1405        if (!p)
1406                return -ENOMEM;
1407
1408        p->hash_zero = tmpl->hash_zero;
1409        p->hash_init = tmpl->hash_init;
1410        p->auth_type = tmpl->auth_type;
1411        p->hmac_type = tmpl->hmac_type;
1412        p->hw_op_hashsz = tmpl->hw_op_hashsz;
1413        p->digest_size = tmpl->digest_size;
1414
1415        ahash = &p->alg;
1416        ahash->init = n2_hash_async_init;
1417        ahash->update = n2_hash_async_update;
1418        ahash->final = n2_hash_async_final;
1419        ahash->finup = n2_hash_async_finup;
1420        ahash->digest = n2_hash_async_digest;
1421        ahash->export = n2_hash_async_noexport;
1422        ahash->import = n2_hash_async_noimport;
1423
1424        halg = &ahash->halg;
1425        halg->digestsize = tmpl->digest_size;
1426
1427        base = &halg->base;
1428        snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
1429        snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->name);
1430        base->cra_priority = N2_CRA_PRIORITY;
1431        base->cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1432                          CRYPTO_ALG_NEED_FALLBACK;
1433        base->cra_blocksize = tmpl->block_size;
1434        base->cra_ctxsize = sizeof(struct n2_hash_ctx);
1435        base->cra_module = THIS_MODULE;
1436        base->cra_init = n2_hash_cra_init;
1437        base->cra_exit = n2_hash_cra_exit;
1438
1439        list_add(&p->entry, &ahash_algs);
1440        err = crypto_register_ahash(ahash);
1441        if (err) {
1442                pr_err("%s alg registration failed\n", base->cra_name);
1443                list_del(&p->entry);
1444                kfree(p);
1445        } else {
1446                pr_info("%s alg registered\n", base->cra_name);
1447        }
1448        if (!err && p->hmac_type != AUTH_TYPE_RESERVED)
1449                err = __n2_register_one_hmac(p);
1450        return err;
1451}
1452
1453static int n2_register_algs(void)
1454{
1455        int i, err = 0;
1456
1457        mutex_lock(&spu_lock);
1458        if (algs_registered++)
1459                goto out;
1460
1461        for (i = 0; i < NUM_HASH_TMPLS; i++) {
1462                err = __n2_register_one_ahash(&hash_tmpls[i]);
1463                if (err) {
1464                        __n2_unregister_algs();
1465                        goto out;
1466                }
1467        }
1468        for (i = 0; i < NUM_CIPHER_TMPLS; i++) {
1469                err = __n2_register_one_skcipher(&skcipher_tmpls[i]);
1470                if (err) {
1471                        __n2_unregister_algs();
1472                        goto out;
1473                }
1474        }
1475
1476out:
1477        mutex_unlock(&spu_lock);
1478        return err;
1479}
1480
1481static void n2_unregister_algs(void)
1482{
1483        mutex_lock(&spu_lock);
1484        if (!--algs_registered)
1485                __n2_unregister_algs();
1486        mutex_unlock(&spu_lock);
1487}
1488
1489/* To map CWQ queues to interrupt sources, the hypervisor API provides
1490 * a devino.  This isn't very useful to us because all of the
1491 * interrupts listed in the device_node have been translated to
1492 * Linux virtual IRQ cookie numbers.
1493 *
1494 * So we have to back-translate, going through the 'intr' and 'ino'
1495 * property tables of the n2cp MDESC node, matching it with the OF
1496 * 'interrupts' property entries, in order to to figure out which
1497 * devino goes to which already-translated IRQ.
1498 */
1499static int find_devino_index(struct platform_device *dev, struct spu_mdesc_info *ip,
1500                             unsigned long dev_ino)
1501{
1502        const unsigned int *dev_intrs;
1503        unsigned int intr;
1504        int i;
1505
1506        for (i = 0; i < ip->num_intrs; i++) {
1507                if (ip->ino_table[i].ino == dev_ino)
1508                        break;
1509        }
1510        if (i == ip->num_intrs)
1511                return -ENODEV;
1512
1513        intr = ip->ino_table[i].intr;
1514
1515        dev_intrs = of_get_property(dev->dev.of_node, "interrupts", NULL);
1516        if (!dev_intrs)
1517                return -ENODEV;
1518
1519        for (i = 0; i < dev->archdata.num_irqs; i++) {
1520                if (dev_intrs[i] == intr)
1521                        return i;
1522        }
1523
1524        return -ENODEV;
1525}
1526
1527static int spu_map_ino(struct platform_device *dev, struct spu_mdesc_info *ip,
1528                       const char *irq_name, struct spu_queue *p,
1529                       irq_handler_t handler)
1530{
1531        unsigned long herr;
1532        int index;
1533
1534        herr = sun4v_ncs_qhandle_to_devino(p->qhandle, &p->devino);
1535        if (herr)
1536                return -EINVAL;
1537
1538        index = find_devino_index(dev, ip, p->devino);
1539        if (index < 0)
1540                return index;
1541
1542        p->irq = dev->archdata.irqs[index];
1543
1544        sprintf(p->irq_name, "%s-%d", irq_name, index);
1545
1546        return request_irq(p->irq, handler, 0, p->irq_name, p);
1547}
1548
1549static struct kmem_cache *queue_cache[2];
1550
1551static void *new_queue(unsigned long q_type)
1552{
1553        return kmem_cache_zalloc(queue_cache[q_type - 1], GFP_KERNEL);
1554}
1555
1556static void free_queue(void *p, unsigned long q_type)
1557{
1558        kmem_cache_free(queue_cache[q_type - 1], p);
1559}
1560
1561static int queue_cache_init(void)
1562{
1563        if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
1564                queue_cache[HV_NCS_QTYPE_MAU - 1] =
1565                        kmem_cache_create("mau_queue",
1566                                          (MAU_NUM_ENTRIES *
1567                                           MAU_ENTRY_SIZE),
1568                                          MAU_ENTRY_SIZE, 0, NULL);
1569        if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
1570                return -ENOMEM;
1571
1572        if (!queue_cache[HV_NCS_QTYPE_CWQ - 1])
1573                queue_cache[HV_NCS_QTYPE_CWQ - 1] =
1574                        kmem_cache_create("cwq_queue",
1575                                          (CWQ_NUM_ENTRIES *
1576                                           CWQ_ENTRY_SIZE),
1577                                          CWQ_ENTRY_SIZE, 0, NULL);
1578        if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) {
1579                kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
1580                queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
1581                return -ENOMEM;
1582        }
1583        return 0;
1584}
1585
1586static void queue_cache_destroy(void)
1587{
1588        kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
1589        kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
1590        queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
1591        queue_cache[HV_NCS_QTYPE_CWQ - 1] = NULL;
1592}
1593
1594static long spu_queue_register_workfn(void *arg)
1595{
1596        struct spu_qreg *qr = arg;
1597        struct spu_queue *p = qr->queue;
1598        unsigned long q_type = qr->type;
1599        unsigned long hv_ret;
1600
1601        hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q),
1602                                 CWQ_NUM_ENTRIES, &p->qhandle);
1603        if (!hv_ret)
1604                sun4v_ncs_sethead_marker(p->qhandle, 0);
1605
1606        return hv_ret ? -EINVAL : 0;
1607}
1608
1609static int spu_queue_register(struct spu_queue *p, unsigned long q_type)
1610{
1611        int cpu = cpumask_any_and(&p->sharing, cpu_online_mask);
1612        struct spu_qreg qr = { .queue = p, .type = q_type };
1613
1614        return work_on_cpu_safe(cpu, spu_queue_register_workfn, &qr);
1615}
1616
1617static int spu_queue_setup(struct spu_queue *p)
1618{
1619        int err;
1620
1621        p->q = new_queue(p->q_type);
1622        if (!p->q)
1623                return -ENOMEM;
1624
1625        err = spu_queue_register(p, p->q_type);
1626        if (err) {
1627                free_queue(p->q, p->q_type);
1628                p->q = NULL;
1629        }
1630
1631        return err;
1632}
1633
1634static void spu_queue_destroy(struct spu_queue *p)
1635{
1636        unsigned long hv_ret;
1637
1638        if (!p->q)
1639                return;
1640
1641        hv_ret = sun4v_ncs_qconf(p->q_type, p->qhandle, 0, &p->qhandle);
1642
1643        if (!hv_ret)
1644                free_queue(p->q, p->q_type);
1645}
1646
1647static void spu_list_destroy(struct list_head *list)
1648{
1649        struct spu_queue *p, *n;
1650
1651        list_for_each_entry_safe(p, n, list, list) {
1652                int i;
1653
1654                for (i = 0; i < NR_CPUS; i++) {
1655                        if (cpu_to_cwq[i] == p)
1656                                cpu_to_cwq[i] = NULL;
1657                }
1658
1659                if (p->irq) {
1660                        free_irq(p->irq, p);
1661                        p->irq = 0;
1662                }
1663                spu_queue_destroy(p);
1664                list_del(&p->list);
1665                kfree(p);
1666        }
1667}
1668
1669/* Walk the backward arcs of a CWQ 'exec-unit' node,
1670 * gathering cpu membership information.
1671 */
1672static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc,
1673                               struct platform_device *dev,
1674                               u64 node, struct spu_queue *p,
1675                               struct spu_queue **table)
1676{
1677        u64 arc;
1678
1679        mdesc_for_each_arc(arc, mdesc, node, MDESC_ARC_TYPE_BACK) {
1680                u64 tgt = mdesc_arc_target(mdesc, arc);
1681                const char *name = mdesc_node_name(mdesc, tgt);
1682                const u64 *id;
1683
1684                if (strcmp(name, "cpu"))
1685                        continue;
1686                id = mdesc_get_property(mdesc, tgt, "id", NULL);
1687                if (table[*id] != NULL) {
1688                        dev_err(&dev->dev, "%pOF: SPU cpu slot already set.\n",
1689                                dev->dev.of_node);
1690                        return -EINVAL;
1691                }
1692                cpumask_set_cpu(*id, &p->sharing);
1693                table[*id] = p;
1694        }
1695        return 0;
1696}
1697
1698/* Process an 'exec-unit' MDESC node of type 'cwq'.  */
1699static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list,
1700                            struct platform_device *dev, struct mdesc_handle *mdesc,
1701                            u64 node, const char *iname, unsigned long q_type,
1702                            irq_handler_t handler, struct spu_queue **table)
1703{
1704        struct spu_queue *p;
1705        int err;
1706
1707        p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL);
1708        if (!p) {
1709                dev_err(&dev->dev, "%pOF: Could not allocate SPU queue.\n",
1710                        dev->dev.of_node);
1711                return -ENOMEM;
1712        }
1713
1714        cpumask_clear(&p->sharing);
1715        spin_lock_init(&p->lock);
1716        p->q_type = q_type;
1717        INIT_LIST_HEAD(&p->jobs);
1718        list_add(&p->list, list);
1719
1720        err = spu_mdesc_walk_arcs(mdesc, dev, node, p, table);
1721        if (err)
1722                return err;
1723
1724        err = spu_queue_setup(p);
1725        if (err)
1726                return err;
1727
1728        return spu_map_ino(dev, ip, iname, p, handler);
1729}
1730
1731static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct platform_device *dev,
1732                          struct spu_mdesc_info *ip, struct list_head *list,
1733                          const char *exec_name, unsigned long q_type,
1734                          irq_handler_t handler, struct spu_queue **table)
1735{
1736        int err = 0;
1737        u64 node;
1738
1739        mdesc_for_each_node_by_name(mdesc, node, "exec-unit") {
1740                const char *type;
1741
1742                type = mdesc_get_property(mdesc, node, "type", NULL);
1743                if (!type || strcmp(type, exec_name))
1744                        continue;
1745
1746                err = handle_exec_unit(ip, list, dev, mdesc, node,
1747                                       exec_name, q_type, handler, table);
1748                if (err) {
1749                        spu_list_destroy(list);
1750                        break;
1751                }
1752        }
1753
1754        return err;
1755}
1756
1757static int get_irq_props(struct mdesc_handle *mdesc, u64 node,
1758                         struct spu_mdesc_info *ip)
1759{
1760        const u64 *ino;
1761        int ino_len;
1762        int i;
1763
1764        ino = mdesc_get_property(mdesc, node, "ino", &ino_len);
1765        if (!ino) {
1766                printk("NO 'ino'\n");
1767                return -ENODEV;
1768        }
1769
1770        ip->num_intrs = ino_len / sizeof(u64);
1771        ip->ino_table = kzalloc((sizeof(struct ino_blob) *
1772                                 ip->num_intrs),
1773                                GFP_KERNEL);
1774        if (!ip->ino_table)
1775                return -ENOMEM;
1776
1777        for (i = 0; i < ip->num_intrs; i++) {
1778                struct ino_blob *b = &ip->ino_table[i];
1779                b->intr = i + 1;
1780                b->ino = ino[i];
1781        }
1782
1783        return 0;
1784}
1785
1786static int grab_mdesc_irq_props(struct mdesc_handle *mdesc,
1787                                struct platform_device *dev,
1788                                struct spu_mdesc_info *ip,
1789                                const char *node_name)
1790{
1791        const unsigned int *reg;
1792        u64 node;
1793
1794        reg = of_get_property(dev->dev.of_node, "reg", NULL);
1795        if (!reg)
1796                return -ENODEV;
1797
1798        mdesc_for_each_node_by_name(mdesc, node, "virtual-device") {
1799                const char *name;
1800                const u64 *chdl;
1801
1802                name = mdesc_get_property(mdesc, node, "name", NULL);
1803                if (!name || strcmp(name, node_name))
1804                        continue;
1805                chdl = mdesc_get_property(mdesc, node, "cfg-handle", NULL);
1806                if (!chdl || (*chdl != *reg))
1807                        continue;
1808                ip->cfg_handle = *chdl;
1809                return get_irq_props(mdesc, node, ip);
1810        }
1811
1812        return -ENODEV;
1813}
1814
1815static unsigned long n2_spu_hvapi_major;
1816static unsigned long n2_spu_hvapi_minor;
1817
1818static int n2_spu_hvapi_register(void)
1819{
1820        int err;
1821
1822        n2_spu_hvapi_major = 2;
1823        n2_spu_hvapi_minor = 0;
1824
1825        err = sun4v_hvapi_register(HV_GRP_NCS,
1826                                   n2_spu_hvapi_major,
1827                                   &n2_spu_hvapi_minor);
1828
1829        if (!err)
1830                pr_info("Registered NCS HVAPI version %lu.%lu\n",
1831                        n2_spu_hvapi_major,
1832                        n2_spu_hvapi_minor);
1833
1834        return err;
1835}
1836
1837static void n2_spu_hvapi_unregister(void)
1838{
1839        sun4v_hvapi_unregister(HV_GRP_NCS);
1840}
1841
1842static int global_ref;
1843
1844static int grab_global_resources(void)
1845{
1846        int err = 0;
1847
1848        mutex_lock(&spu_lock);
1849
1850        if (global_ref++)
1851                goto out;
1852
1853        err = n2_spu_hvapi_register();
1854        if (err)
1855                goto out;
1856
1857        err = queue_cache_init();
1858        if (err)
1859                goto out_hvapi_release;
1860
1861        err = -ENOMEM;
1862        cpu_to_cwq = kcalloc(NR_CPUS, sizeof(struct spu_queue *),
1863                             GFP_KERNEL);
1864        if (!cpu_to_cwq)
1865                goto out_queue_cache_destroy;
1866
1867        cpu_to_mau = kcalloc(NR_CPUS, sizeof(struct spu_queue *),
1868                             GFP_KERNEL);
1869        if (!cpu_to_mau)
1870                goto out_free_cwq_table;
1871
1872        err = 0;
1873
1874out:
1875        if (err)
1876                global_ref--;
1877        mutex_unlock(&spu_lock);
1878        return err;
1879
1880out_free_cwq_table:
1881        kfree(cpu_to_cwq);
1882        cpu_to_cwq = NULL;
1883
1884out_queue_cache_destroy:
1885        queue_cache_destroy();
1886
1887out_hvapi_release:
1888        n2_spu_hvapi_unregister();
1889        goto out;
1890}
1891
1892static void release_global_resources(void)
1893{
1894        mutex_lock(&spu_lock);
1895        if (!--global_ref) {
1896                kfree(cpu_to_cwq);
1897                cpu_to_cwq = NULL;
1898
1899                kfree(cpu_to_mau);
1900                cpu_to_mau = NULL;
1901
1902                queue_cache_destroy();
1903                n2_spu_hvapi_unregister();
1904        }
1905        mutex_unlock(&spu_lock);
1906}
1907
1908static struct n2_crypto *alloc_n2cp(void)
1909{
1910        struct n2_crypto *np = kzalloc(sizeof(struct n2_crypto), GFP_KERNEL);
1911
1912        if (np)
1913                INIT_LIST_HEAD(&np->cwq_list);
1914
1915        return np;
1916}
1917
1918static void free_n2cp(struct n2_crypto *np)
1919{
1920        kfree(np->cwq_info.ino_table);
1921        np->cwq_info.ino_table = NULL;
1922
1923        kfree(np);
1924}
1925
1926static void n2_spu_driver_version(void)
1927{
1928        static int n2_spu_version_printed;
1929
1930        if (n2_spu_version_printed++ == 0)
1931                pr_info("%s", version);
1932}
1933
1934static int n2_crypto_probe(struct platform_device *dev)
1935{
1936        struct mdesc_handle *mdesc;
1937        struct n2_crypto *np;
1938        int err;
1939
1940        n2_spu_driver_version();
1941
1942        pr_info("Found N2CP at %pOF\n", dev->dev.of_node);
1943
1944        np = alloc_n2cp();
1945        if (!np) {
1946                dev_err(&dev->dev, "%pOF: Unable to allocate n2cp.\n",
1947                        dev->dev.of_node);
1948                return -ENOMEM;
1949        }
1950
1951        err = grab_global_resources();
1952        if (err) {
1953                dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n",
1954                        dev->dev.of_node);
1955                goto out_free_n2cp;
1956        }
1957
1958        mdesc = mdesc_grab();
1959
1960        if (!mdesc) {
1961                dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n",
1962                        dev->dev.of_node);
1963                err = -ENODEV;
1964                goto out_free_global;
1965        }
1966        err = grab_mdesc_irq_props(mdesc, dev, &np->cwq_info, "n2cp");
1967        if (err) {
1968                dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n",
1969                        dev->dev.of_node);
1970                mdesc_release(mdesc);
1971                goto out_free_global;
1972        }
1973
1974        err = spu_mdesc_scan(mdesc, dev, &np->cwq_info, &np->cwq_list,
1975                             "cwq", HV_NCS_QTYPE_CWQ, cwq_intr,
1976                             cpu_to_cwq);
1977        mdesc_release(mdesc);
1978
1979        if (err) {
1980                dev_err(&dev->dev, "%pOF: CWQ MDESC scan failed.\n",
1981                        dev->dev.of_node);
1982                goto out_free_global;
1983        }
1984
1985        err = n2_register_algs();
1986        if (err) {
1987                dev_err(&dev->dev, "%pOF: Unable to register algorithms.\n",
1988                        dev->dev.of_node);
1989                goto out_free_spu_list;
1990        }
1991
1992        dev_set_drvdata(&dev->dev, np);
1993
1994        return 0;
1995
1996out_free_spu_list:
1997        spu_list_destroy(&np->cwq_list);
1998
1999out_free_global:
2000        release_global_resources();
2001
2002out_free_n2cp:
2003        free_n2cp(np);
2004
2005        return err;
2006}
2007
2008static int n2_crypto_remove(struct platform_device *dev)
2009{
2010        struct n2_crypto *np = dev_get_drvdata(&dev->dev);
2011
2012        n2_unregister_algs();
2013
2014        spu_list_destroy(&np->cwq_list);
2015
2016        release_global_resources();
2017
2018        free_n2cp(np);
2019
2020        return 0;
2021}
2022
2023static struct n2_mau *alloc_ncp(void)
2024{
2025        struct n2_mau *mp = kzalloc(sizeof(struct n2_mau), GFP_KERNEL);
2026
2027        if (mp)
2028                INIT_LIST_HEAD(&mp->mau_list);
2029
2030        return mp;
2031}
2032
2033static void free_ncp(struct n2_mau *mp)
2034{
2035        kfree(mp->mau_info.ino_table);
2036        mp->mau_info.ino_table = NULL;
2037
2038        kfree(mp);
2039}
2040
2041static int n2_mau_probe(struct platform_device *dev)
2042{
2043        struct mdesc_handle *mdesc;
2044        struct n2_mau *mp;
2045        int err;
2046
2047        n2_spu_driver_version();
2048
2049        pr_info("Found NCP at %pOF\n", dev->dev.of_node);
2050
2051        mp = alloc_ncp();
2052        if (!mp) {
2053                dev_err(&dev->dev, "%pOF: Unable to allocate ncp.\n",
2054                        dev->dev.of_node);
2055                return -ENOMEM;
2056        }
2057
2058        err = grab_global_resources();
2059        if (err) {
2060                dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n",
2061                        dev->dev.of_node);
2062                goto out_free_ncp;
2063        }
2064
2065        mdesc = mdesc_grab();
2066
2067        if (!mdesc) {
2068                dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n",
2069                        dev->dev.of_node);
2070                err = -ENODEV;
2071                goto out_free_global;
2072        }
2073
2074        err = grab_mdesc_irq_props(mdesc, dev, &mp->mau_info, "ncp");
2075        if (err) {
2076                dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n",
2077                        dev->dev.of_node);
2078                mdesc_release(mdesc);
2079                goto out_free_global;
2080        }
2081
2082        err = spu_mdesc_scan(mdesc, dev, &mp->mau_info, &mp->mau_list,
2083                             "mau", HV_NCS_QTYPE_MAU, mau_intr,
2084                             cpu_to_mau);
2085        mdesc_release(mdesc);
2086
2087        if (err) {
2088                dev_err(&dev->dev, "%pOF: MAU MDESC scan failed.\n",
2089                        dev->dev.of_node);
2090                goto out_free_global;
2091        }
2092
2093        dev_set_drvdata(&dev->dev, mp);
2094
2095        return 0;
2096
2097out_free_global:
2098        release_global_resources();
2099
2100out_free_ncp:
2101        free_ncp(mp);
2102
2103        return err;
2104}
2105
2106static int n2_mau_remove(struct platform_device *dev)
2107{
2108        struct n2_mau *mp = dev_get_drvdata(&dev->dev);
2109
2110        spu_list_destroy(&mp->mau_list);
2111
2112        release_global_resources();
2113
2114        free_ncp(mp);
2115
2116        return 0;
2117}
2118
2119static const struct of_device_id n2_crypto_match[] = {
2120        {
2121                .name = "n2cp",
2122                .compatible = "SUNW,n2-cwq",
2123        },
2124        {
2125                .name = "n2cp",
2126                .compatible = "SUNW,vf-cwq",
2127        },
2128        {
2129                .name = "n2cp",
2130                .compatible = "SUNW,kt-cwq",
2131        },
2132        {},
2133};
2134
2135MODULE_DEVICE_TABLE(of, n2_crypto_match);
2136
2137static struct platform_driver n2_crypto_driver = {
2138        .driver = {
2139                .name           =       "n2cp",
2140                .of_match_table =       n2_crypto_match,
2141        },
2142        .probe          =       n2_crypto_probe,
2143        .remove         =       n2_crypto_remove,
2144};
2145
2146static const struct of_device_id n2_mau_match[] = {
2147        {
2148                .name = "ncp",
2149                .compatible = "SUNW,n2-mau",
2150        },
2151        {
2152                .name = "ncp",
2153                .compatible = "SUNW,vf-mau",
2154        },
2155        {
2156                .name = "ncp",
2157                .compatible = "SUNW,kt-mau",
2158        },
2159        {},
2160};
2161
2162MODULE_DEVICE_TABLE(of, n2_mau_match);
2163
2164static struct platform_driver n2_mau_driver = {
2165        .driver = {
2166                .name           =       "ncp",
2167                .of_match_table =       n2_mau_match,
2168        },
2169        .probe          =       n2_mau_probe,
2170        .remove         =       n2_mau_remove,
2171};
2172
2173static struct platform_driver * const drivers[] = {
2174        &n2_crypto_driver,
2175        &n2_mau_driver,
2176};
2177
2178static int __init n2_init(void)
2179{
2180        return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
2181}
2182
2183static void __exit n2_exit(void)
2184{
2185        platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
2186}
2187
2188module_init(n2_init);
2189module_exit(n2_exit);
2190