linux/drivers/crypto/ccp/ccp-dev-v5.c
<<
>>
Prefs
   1/*
   2 * AMD Cryptographic Coprocessor (CCP) driver
   3 *
   4 * Copyright (C) 2016,2017 Advanced Micro Devices, Inc.
   5 *
   6 * Author: Gary R Hook <gary.hook@amd.com>
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License version 2 as
  10 * published by the Free Software Foundation.
  11 */
  12
  13#include <linux/module.h>
  14#include <linux/kernel.h>
  15#include <linux/pci.h>
  16#include <linux/kthread.h>
  17#include <linux/debugfs.h>
  18#include <linux/dma-mapping.h>
  19#include <linux/interrupt.h>
  20#include <linux/compiler.h>
  21#include <linux/ccp.h>
  22
  23#include "ccp-dev.h"
  24
  25/* Allocate the requested number of contiguous LSB slots
  26 * from the LSB bitmap. Look in the private range for this
  27 * queue first; failing that, check the public area.
  28 * If no space is available, wait around.
  29 * Return: first slot number
  30 */
  31static u32 ccp_lsb_alloc(struct ccp_cmd_queue *cmd_q, unsigned int count)
  32{
  33        struct ccp_device *ccp;
  34        int start;
  35
  36        /* First look at the map for the queue */
  37        if (cmd_q->lsb >= 0) {
  38                start = (u32)bitmap_find_next_zero_area(cmd_q->lsbmap,
  39                                                        LSB_SIZE,
  40                                                        0, count, 0);
  41                if (start < LSB_SIZE) {
  42                        bitmap_set(cmd_q->lsbmap, start, count);
  43                        return start + cmd_q->lsb * LSB_SIZE;
  44                }
  45        }
  46
  47        /* No joy; try to get an entry from the shared blocks */
  48        ccp = cmd_q->ccp;
  49        for (;;) {
  50                mutex_lock(&ccp->sb_mutex);
  51
  52                start = (u32)bitmap_find_next_zero_area(ccp->lsbmap,
  53                                                        MAX_LSB_CNT * LSB_SIZE,
  54                                                        0,
  55                                                        count, 0);
  56                if (start <= MAX_LSB_CNT * LSB_SIZE) {
  57                        bitmap_set(ccp->lsbmap, start, count);
  58
  59                        mutex_unlock(&ccp->sb_mutex);
  60                        return start;
  61                }
  62
  63                ccp->sb_avail = 0;
  64
  65                mutex_unlock(&ccp->sb_mutex);
  66
  67                /* Wait for KSB entries to become available */
  68                if (wait_event_interruptible(ccp->sb_queue, ccp->sb_avail))
  69                        return 0;
  70        }
  71}
  72
  73/* Free a number of LSB slots from the bitmap, starting at
  74 * the indicated starting slot number.
  75 */
  76static void ccp_lsb_free(struct ccp_cmd_queue *cmd_q, unsigned int start,
  77                         unsigned int count)
  78{
  79        if (!start)
  80                return;
  81
  82        if (cmd_q->lsb == start) {
  83                /* An entry from the private LSB */
  84                bitmap_clear(cmd_q->lsbmap, start, count);
  85        } else {
  86                /* From the shared LSBs */
  87                struct ccp_device *ccp = cmd_q->ccp;
  88
  89                mutex_lock(&ccp->sb_mutex);
  90                bitmap_clear(ccp->lsbmap, start, count);
  91                ccp->sb_avail = 1;
  92                mutex_unlock(&ccp->sb_mutex);
  93                wake_up_interruptible_all(&ccp->sb_queue);
  94        }
  95}
  96
  97/* CCP version 5: Union to define the function field (cmd_reg1/dword0) */
  98union ccp_function {
  99        struct {
 100                u16 size:7;
 101                u16 encrypt:1;
 102                u16 mode:5;
 103                u16 type:2;
 104        } aes;
 105        struct {
 106                u16 size:7;
 107                u16 encrypt:1;
 108                u16 rsvd:5;
 109                u16 type:2;
 110        } aes_xts;
 111        struct {
 112                u16 size:7;
 113                u16 encrypt:1;
 114                u16 mode:5;
 115                u16 type:2;
 116        } des3;
 117        struct {
 118                u16 rsvd1:10;
 119                u16 type:4;
 120                u16 rsvd2:1;
 121        } sha;
 122        struct {
 123                u16 mode:3;
 124                u16 size:12;
 125        } rsa;
 126        struct {
 127                u16 byteswap:2;
 128                u16 bitwise:3;
 129                u16 reflect:2;
 130                u16 rsvd:8;
 131        } pt;
 132        struct  {
 133                u16 rsvd:13;
 134        } zlib;
 135        struct {
 136                u16 size:10;
 137                u16 type:2;
 138                u16 mode:3;
 139        } ecc;
 140        u16 raw;
 141};
 142
 143#define CCP_AES_SIZE(p)         ((p)->aes.size)
 144#define CCP_AES_ENCRYPT(p)      ((p)->aes.encrypt)
 145#define CCP_AES_MODE(p)         ((p)->aes.mode)
 146#define CCP_AES_TYPE(p)         ((p)->aes.type)
 147#define CCP_XTS_SIZE(p)         ((p)->aes_xts.size)
 148#define CCP_XTS_TYPE(p)         ((p)->aes_xts.type)
 149#define CCP_XTS_ENCRYPT(p)      ((p)->aes_xts.encrypt)
 150#define CCP_DES3_SIZE(p)        ((p)->des3.size)
 151#define CCP_DES3_ENCRYPT(p)     ((p)->des3.encrypt)
 152#define CCP_DES3_MODE(p)        ((p)->des3.mode)
 153#define CCP_DES3_TYPE(p)        ((p)->des3.type)
 154#define CCP_SHA_TYPE(p)         ((p)->sha.type)
 155#define CCP_RSA_SIZE(p)         ((p)->rsa.size)
 156#define CCP_PT_BYTESWAP(p)      ((p)->pt.byteswap)
 157#define CCP_PT_BITWISE(p)       ((p)->pt.bitwise)
 158#define CCP_ECC_MODE(p)         ((p)->ecc.mode)
 159#define CCP_ECC_AFFINE(p)       ((p)->ecc.one)
 160
 161/* Word 0 */
 162#define CCP5_CMD_DW0(p)         ((p)->dw0)
 163#define CCP5_CMD_SOC(p)         (CCP5_CMD_DW0(p).soc)
 164#define CCP5_CMD_IOC(p)         (CCP5_CMD_DW0(p).ioc)
 165#define CCP5_CMD_INIT(p)        (CCP5_CMD_DW0(p).init)
 166#define CCP5_CMD_EOM(p)         (CCP5_CMD_DW0(p).eom)
 167#define CCP5_CMD_FUNCTION(p)    (CCP5_CMD_DW0(p).function)
 168#define CCP5_CMD_ENGINE(p)      (CCP5_CMD_DW0(p).engine)
 169#define CCP5_CMD_PROT(p)        (CCP5_CMD_DW0(p).prot)
 170
 171/* Word 1 */
 172#define CCP5_CMD_DW1(p)         ((p)->length)
 173#define CCP5_CMD_LEN(p)         (CCP5_CMD_DW1(p))
 174
 175/* Word 2 */
 176#define CCP5_CMD_DW2(p)         ((p)->src_lo)
 177#define CCP5_CMD_SRC_LO(p)      (CCP5_CMD_DW2(p))
 178
 179/* Word 3 */
 180#define CCP5_CMD_DW3(p)         ((p)->dw3)
 181#define CCP5_CMD_SRC_MEM(p)     ((p)->dw3.src_mem)
 182#define CCP5_CMD_SRC_HI(p)      ((p)->dw3.src_hi)
 183#define CCP5_CMD_LSB_ID(p)      ((p)->dw3.lsb_cxt_id)
 184#define CCP5_CMD_FIX_SRC(p)     ((p)->dw3.fixed)
 185
 186/* Words 4/5 */
 187#define CCP5_CMD_DW4(p)         ((p)->dw4)
 188#define CCP5_CMD_DST_LO(p)      (CCP5_CMD_DW4(p).dst_lo)
 189#define CCP5_CMD_DW5(p)         ((p)->dw5.fields.dst_hi)
 190#define CCP5_CMD_DST_HI(p)      (CCP5_CMD_DW5(p))
 191#define CCP5_CMD_DST_MEM(p)     ((p)->dw5.fields.dst_mem)
 192#define CCP5_CMD_FIX_DST(p)     ((p)->dw5.fields.fixed)
 193#define CCP5_CMD_SHA_LO(p)      ((p)->dw4.sha_len_lo)
 194#define CCP5_CMD_SHA_HI(p)      ((p)->dw5.sha_len_hi)
 195
 196/* Word 6/7 */
 197#define CCP5_CMD_DW6(p)         ((p)->key_lo)
 198#define CCP5_CMD_KEY_LO(p)      (CCP5_CMD_DW6(p))
 199#define CCP5_CMD_DW7(p)         ((p)->dw7)
 200#define CCP5_CMD_KEY_HI(p)      ((p)->dw7.key_hi)
 201#define CCP5_CMD_KEY_MEM(p)     ((p)->dw7.key_mem)
 202
 203static inline u32 low_address(unsigned long addr)
 204{
 205        return (u64)addr & 0x0ffffffff;
 206}
 207
 208static inline u32 high_address(unsigned long addr)
 209{
 210        return ((u64)addr >> 32) & 0x00000ffff;
 211}
 212
 213static unsigned int ccp5_get_free_slots(struct ccp_cmd_queue *cmd_q)
 214{
 215        unsigned int head_idx, n;
 216        u32 head_lo, queue_start;
 217
 218        queue_start = low_address(cmd_q->qdma_tail);
 219        head_lo = ioread32(cmd_q->reg_head_lo);
 220        head_idx = (head_lo - queue_start) / sizeof(struct ccp5_desc);
 221
 222        n = head_idx + COMMANDS_PER_QUEUE - cmd_q->qidx - 1;
 223
 224        return n % COMMANDS_PER_QUEUE; /* Always one unused spot */
 225}
 226
 227static int ccp5_do_cmd(struct ccp5_desc *desc,
 228                       struct ccp_cmd_queue *cmd_q)
 229{
 230        u32 *mP;
 231        __le32 *dP;
 232        u32 tail;
 233        int     i;
 234        int ret = 0;
 235
 236        cmd_q->total_ops++;
 237
 238        if (CCP5_CMD_SOC(desc)) {
 239                CCP5_CMD_IOC(desc) = 1;
 240                CCP5_CMD_SOC(desc) = 0;
 241        }
 242        mutex_lock(&cmd_q->q_mutex);
 243
 244        mP = (u32 *) &cmd_q->qbase[cmd_q->qidx];
 245        dP = (__le32 *) desc;
 246        for (i = 0; i < 8; i++)
 247                mP[i] = cpu_to_le32(dP[i]); /* handle endianness */
 248
 249        cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
 250
 251        /* The data used by this command must be flushed to memory */
 252        wmb();
 253
 254        /* Write the new tail address back to the queue register */
 255        tail = low_address(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE);
 256        iowrite32(tail, cmd_q->reg_tail_lo);
 257
 258        /* Turn the queue back on using our cached control register */
 259        iowrite32(cmd_q->qcontrol | CMD5_Q_RUN, cmd_q->reg_control);
 260        mutex_unlock(&cmd_q->q_mutex);
 261
 262        if (CCP5_CMD_IOC(desc)) {
 263                /* Wait for the job to complete */
 264                ret = wait_event_interruptible(cmd_q->int_queue,
 265                                               cmd_q->int_rcvd);
 266                if (ret || cmd_q->cmd_error) {
 267                        /* Log the error and flush the queue by
 268                         * moving the head pointer
 269                         */
 270                        if (cmd_q->cmd_error)
 271                                ccp_log_error(cmd_q->ccp,
 272                                              cmd_q->cmd_error);
 273                        iowrite32(tail, cmd_q->reg_head_lo);
 274                        if (!ret)
 275                                ret = -EIO;
 276                }
 277                cmd_q->int_rcvd = 0;
 278        }
 279
 280        return ret;
 281}
 282
 283static int ccp5_perform_aes(struct ccp_op *op)
 284{
 285        struct ccp5_desc desc;
 286        union ccp_function function;
 287        u32 key_addr = op->sb_key * LSB_ITEM_SIZE;
 288
 289        op->cmd_q->total_aes_ops++;
 290
 291        /* Zero out all the fields of the command desc */
 292        memset(&desc, 0, Q_DESC_SIZE);
 293
 294        CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_AES;
 295
 296        CCP5_CMD_SOC(&desc) = op->soc;
 297        CCP5_CMD_IOC(&desc) = 1;
 298        CCP5_CMD_INIT(&desc) = op->init;
 299        CCP5_CMD_EOM(&desc) = op->eom;
 300        CCP5_CMD_PROT(&desc) = 0;
 301
 302        function.raw = 0;
 303        CCP_AES_ENCRYPT(&function) = op->u.aes.action;
 304        CCP_AES_MODE(&function) = op->u.aes.mode;
 305        CCP_AES_TYPE(&function) = op->u.aes.type;
 306        CCP_AES_SIZE(&function) = op->u.aes.size;
 307
 308        CCP5_CMD_FUNCTION(&desc) = function.raw;
 309
 310        CCP5_CMD_LEN(&desc) = op->src.u.dma.length;
 311
 312        CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
 313        CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
 314        CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
 315
 316        CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
 317        CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
 318        CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
 319
 320        CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr);
 321        CCP5_CMD_KEY_HI(&desc) = 0;
 322        CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB;
 323        CCP5_CMD_LSB_ID(&desc) = op->sb_ctx;
 324
 325        return ccp5_do_cmd(&desc, op->cmd_q);
 326}
 327
 328static int ccp5_perform_xts_aes(struct ccp_op *op)
 329{
 330        struct ccp5_desc desc;
 331        union ccp_function function;
 332        u32 key_addr = op->sb_key * LSB_ITEM_SIZE;
 333
 334        op->cmd_q->total_xts_aes_ops++;
 335
 336        /* Zero out all the fields of the command desc */
 337        memset(&desc, 0, Q_DESC_SIZE);
 338
 339        CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_XTS_AES_128;
 340
 341        CCP5_CMD_SOC(&desc) = op->soc;
 342        CCP5_CMD_IOC(&desc) = 1;
 343        CCP5_CMD_INIT(&desc) = op->init;
 344        CCP5_CMD_EOM(&desc) = op->eom;
 345        CCP5_CMD_PROT(&desc) = 0;
 346
 347        function.raw = 0;
 348        CCP_XTS_TYPE(&function) = op->u.xts.type;
 349        CCP_XTS_ENCRYPT(&function) = op->u.xts.action;
 350        CCP_XTS_SIZE(&function) = op->u.xts.unit_size;
 351        CCP5_CMD_FUNCTION(&desc) = function.raw;
 352
 353        CCP5_CMD_LEN(&desc) = op->src.u.dma.length;
 354
 355        CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
 356        CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
 357        CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
 358
 359        CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
 360        CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
 361        CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
 362
 363        CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr);
 364        CCP5_CMD_KEY_HI(&desc) =  0;
 365        CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB;
 366        CCP5_CMD_LSB_ID(&desc) = op->sb_ctx;
 367
 368        return ccp5_do_cmd(&desc, op->cmd_q);
 369}
 370
 371static int ccp5_perform_sha(struct ccp_op *op)
 372{
 373        struct ccp5_desc desc;
 374        union ccp_function function;
 375
 376        op->cmd_q->total_sha_ops++;
 377
 378        /* Zero out all the fields of the command desc */
 379        memset(&desc, 0, Q_DESC_SIZE);
 380
 381        CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SHA;
 382
 383        CCP5_CMD_SOC(&desc) = op->soc;
 384        CCP5_CMD_IOC(&desc) = 1;
 385        CCP5_CMD_INIT(&desc) = 1;
 386        CCP5_CMD_EOM(&desc) = op->eom;
 387        CCP5_CMD_PROT(&desc) = 0;
 388
 389        function.raw = 0;
 390        CCP_SHA_TYPE(&function) = op->u.sha.type;
 391        CCP5_CMD_FUNCTION(&desc) = function.raw;
 392
 393        CCP5_CMD_LEN(&desc) = op->src.u.dma.length;
 394
 395        CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
 396        CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
 397        CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
 398
 399        CCP5_CMD_LSB_ID(&desc) = op->sb_ctx;
 400
 401        if (op->eom) {
 402                CCP5_CMD_SHA_LO(&desc) = lower_32_bits(op->u.sha.msg_bits);
 403                CCP5_CMD_SHA_HI(&desc) = upper_32_bits(op->u.sha.msg_bits);
 404        } else {
 405                CCP5_CMD_SHA_LO(&desc) = 0;
 406                CCP5_CMD_SHA_HI(&desc) = 0;
 407        }
 408
 409        return ccp5_do_cmd(&desc, op->cmd_q);
 410}
 411
 412static int ccp5_perform_des3(struct ccp_op *op)
 413{
 414        struct ccp5_desc desc;
 415        union ccp_function function;
 416        u32 key_addr = op->sb_key * LSB_ITEM_SIZE;
 417
 418        op->cmd_q->total_3des_ops++;
 419
 420        /* Zero out all the fields of the command desc */
 421        memset(&desc, 0, sizeof(struct ccp5_desc));
 422
 423        CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_DES3;
 424
 425        CCP5_CMD_SOC(&desc) = op->soc;
 426        CCP5_CMD_IOC(&desc) = 1;
 427        CCP5_CMD_INIT(&desc) = op->init;
 428        CCP5_CMD_EOM(&desc) = op->eom;
 429        CCP5_CMD_PROT(&desc) = 0;
 430
 431        function.raw = 0;
 432        CCP_DES3_ENCRYPT(&function) = op->u.des3.action;
 433        CCP_DES3_MODE(&function) = op->u.des3.mode;
 434        CCP_DES3_TYPE(&function) = op->u.des3.type;
 435        CCP5_CMD_FUNCTION(&desc) = function.raw;
 436
 437        CCP5_CMD_LEN(&desc) = op->src.u.dma.length;
 438
 439        CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
 440        CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
 441        CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
 442
 443        CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
 444        CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
 445        CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
 446
 447        CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr);
 448        CCP5_CMD_KEY_HI(&desc) = 0;
 449        CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB;
 450        CCP5_CMD_LSB_ID(&desc) = op->sb_ctx;
 451
 452        return ccp5_do_cmd(&desc, op->cmd_q);
 453}
 454
 455static int ccp5_perform_rsa(struct ccp_op *op)
 456{
 457        struct ccp5_desc desc;
 458        union ccp_function function;
 459
 460        op->cmd_q->total_rsa_ops++;
 461
 462        /* Zero out all the fields of the command desc */
 463        memset(&desc, 0, Q_DESC_SIZE);
 464
 465        CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_RSA;
 466
 467        CCP5_CMD_SOC(&desc) = op->soc;
 468        CCP5_CMD_IOC(&desc) = 1;
 469        CCP5_CMD_INIT(&desc) = 0;
 470        CCP5_CMD_EOM(&desc) = 1;
 471        CCP5_CMD_PROT(&desc) = 0;
 472
 473        function.raw = 0;
 474        CCP_RSA_SIZE(&function) = (op->u.rsa.mod_size + 7) >> 3;
 475        CCP5_CMD_FUNCTION(&desc) = function.raw;
 476
 477        CCP5_CMD_LEN(&desc) = op->u.rsa.input_len;
 478
 479        /* Source is from external memory */
 480        CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
 481        CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
 482        CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
 483
 484        /* Destination is in external memory */
 485        CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
 486        CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
 487        CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
 488
 489        /* Key (Exponent) is in external memory */
 490        CCP5_CMD_KEY_LO(&desc) = ccp_addr_lo(&op->exp.u.dma);
 491        CCP5_CMD_KEY_HI(&desc) = ccp_addr_hi(&op->exp.u.dma);
 492        CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
 493
 494        return ccp5_do_cmd(&desc, op->cmd_q);
 495}
 496
 497static int ccp5_perform_passthru(struct ccp_op *op)
 498{
 499        struct ccp5_desc desc;
 500        union ccp_function function;
 501        struct ccp_dma_info *saddr = &op->src.u.dma;
 502        struct ccp_dma_info *daddr = &op->dst.u.dma;
 503
 504
 505        op->cmd_q->total_pt_ops++;
 506
 507        memset(&desc, 0, Q_DESC_SIZE);
 508
 509        CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_PASSTHRU;
 510
 511        CCP5_CMD_SOC(&desc) = 0;
 512        CCP5_CMD_IOC(&desc) = 1;
 513        CCP5_CMD_INIT(&desc) = 0;
 514        CCP5_CMD_EOM(&desc) = op->eom;
 515        CCP5_CMD_PROT(&desc) = 0;
 516
 517        function.raw = 0;
 518        CCP_PT_BYTESWAP(&function) = op->u.passthru.byte_swap;
 519        CCP_PT_BITWISE(&function) = op->u.passthru.bit_mod;
 520        CCP5_CMD_FUNCTION(&desc) = function.raw;
 521
 522        /* Length of source data is always 256 bytes */
 523        if (op->src.type == CCP_MEMTYPE_SYSTEM)
 524                CCP5_CMD_LEN(&desc) = saddr->length;
 525        else
 526                CCP5_CMD_LEN(&desc) = daddr->length;
 527
 528        if (op->src.type == CCP_MEMTYPE_SYSTEM) {
 529                CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
 530                CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
 531                CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
 532
 533                if (op->u.passthru.bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
 534                        CCP5_CMD_LSB_ID(&desc) = op->sb_key;
 535        } else {
 536                u32 key_addr = op->src.u.sb * CCP_SB_BYTES;
 537
 538                CCP5_CMD_SRC_LO(&desc) = lower_32_bits(key_addr);
 539                CCP5_CMD_SRC_HI(&desc) = 0;
 540                CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SB;
 541        }
 542
 543        if (op->dst.type == CCP_MEMTYPE_SYSTEM) {
 544                CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
 545                CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
 546                CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
 547        } else {
 548                u32 key_addr = op->dst.u.sb * CCP_SB_BYTES;
 549
 550                CCP5_CMD_DST_LO(&desc) = lower_32_bits(key_addr);
 551                CCP5_CMD_DST_HI(&desc) = 0;
 552                CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SB;
 553        }
 554
 555        return ccp5_do_cmd(&desc, op->cmd_q);
 556}
 557
 558static int ccp5_perform_ecc(struct ccp_op *op)
 559{
 560        struct ccp5_desc desc;
 561        union ccp_function function;
 562
 563        op->cmd_q->total_ecc_ops++;
 564
 565        /* Zero out all the fields of the command desc */
 566        memset(&desc, 0, Q_DESC_SIZE);
 567
 568        CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_ECC;
 569
 570        CCP5_CMD_SOC(&desc) = 0;
 571        CCP5_CMD_IOC(&desc) = 1;
 572        CCP5_CMD_INIT(&desc) = 0;
 573        CCP5_CMD_EOM(&desc) = 1;
 574        CCP5_CMD_PROT(&desc) = 0;
 575
 576        function.raw = 0;
 577        function.ecc.mode = op->u.ecc.function;
 578        CCP5_CMD_FUNCTION(&desc) = function.raw;
 579
 580        CCP5_CMD_LEN(&desc) = op->src.u.dma.length;
 581
 582        CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
 583        CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
 584        CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
 585
 586        CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
 587        CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
 588        CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
 589
 590        return ccp5_do_cmd(&desc, op->cmd_q);
 591}
 592
 593static int ccp_find_lsb_regions(struct ccp_cmd_queue *cmd_q, u64 status)
 594{
 595        int q_mask = 1 << cmd_q->id;
 596        int queues = 0;
 597        int j;
 598
 599        /* Build a bit mask to know which LSBs this queue has access to.
 600         * Don't bother with segment 0 as it has special privileges.
 601         */
 602        for (j = 1; j < MAX_LSB_CNT; j++) {
 603                if (status & q_mask)
 604                        bitmap_set(cmd_q->lsbmask, j, 1);
 605                status >>= LSB_REGION_WIDTH;
 606        }
 607        queues = bitmap_weight(cmd_q->lsbmask, MAX_LSB_CNT);
 608        dev_dbg(cmd_q->ccp->dev, "Queue %d can access %d LSB regions\n",
 609                 cmd_q->id, queues);
 610
 611        return queues ? 0 : -EINVAL;
 612}
 613
 614static int ccp_find_and_assign_lsb_to_q(struct ccp_device *ccp,
 615                                        int lsb_cnt, int n_lsbs,
 616                                        unsigned long *lsb_pub)
 617{
 618        DECLARE_BITMAP(qlsb, MAX_LSB_CNT);
 619        int bitno;
 620        int qlsb_wgt;
 621        int i;
 622
 623        /* For each queue:
 624         * If the count of potential LSBs available to a queue matches the
 625         * ordinal given to us in lsb_cnt:
 626         * Copy the mask of possible LSBs for this queue into "qlsb";
 627         * For each bit in qlsb, see if the corresponding bit in the
 628         * aggregation mask is set; if so, we have a match.
 629         *     If we have a match, clear the bit in the aggregation to
 630         *     mark it as no longer available.
 631         *     If there is no match, clear the bit in qlsb and keep looking.
 632         */
 633        for (i = 0; i < ccp->cmd_q_count; i++) {
 634                struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i];
 635
 636                qlsb_wgt = bitmap_weight(cmd_q->lsbmask, MAX_LSB_CNT);
 637
 638                if (qlsb_wgt == lsb_cnt) {
 639                        bitmap_copy(qlsb, cmd_q->lsbmask, MAX_LSB_CNT);
 640
 641                        bitno = find_first_bit(qlsb, MAX_LSB_CNT);
 642                        while (bitno < MAX_LSB_CNT) {
 643                                if (test_bit(bitno, lsb_pub)) {
 644                                        /* We found an available LSB
 645                                         * that this queue can access
 646                                         */
 647                                        cmd_q->lsb = bitno;
 648                                        bitmap_clear(lsb_pub, bitno, 1);
 649                                        dev_dbg(ccp->dev,
 650                                                 "Queue %d gets LSB %d\n",
 651                                                 i, bitno);
 652                                        break;
 653                                }
 654                                bitmap_clear(qlsb, bitno, 1);
 655                                bitno = find_first_bit(qlsb, MAX_LSB_CNT);
 656                        }
 657                        if (bitno >= MAX_LSB_CNT)
 658                                return -EINVAL;
 659                        n_lsbs--;
 660                }
 661        }
 662        return n_lsbs;
 663}
 664
 665/* For each queue, from the most- to least-constrained:
 666 * find an LSB that can be assigned to the queue. If there are N queues that
 667 * can only use M LSBs, where N > M, fail; otherwise, every queue will get a
 668 * dedicated LSB. Remaining LSB regions become a shared resource.
 669 * If we have fewer LSBs than queues, all LSB regions become shared resources.
 670 */
 671static int ccp_assign_lsbs(struct ccp_device *ccp)
 672{
 673        DECLARE_BITMAP(lsb_pub, MAX_LSB_CNT);
 674        DECLARE_BITMAP(qlsb, MAX_LSB_CNT);
 675        int n_lsbs = 0;
 676        int bitno;
 677        int i, lsb_cnt;
 678        int rc = 0;
 679
 680        bitmap_zero(lsb_pub, MAX_LSB_CNT);
 681
 682        /* Create an aggregate bitmap to get a total count of available LSBs */
 683        for (i = 0; i < ccp->cmd_q_count; i++)
 684                bitmap_or(lsb_pub,
 685                          lsb_pub, ccp->cmd_q[i].lsbmask,
 686                          MAX_LSB_CNT);
 687
 688        n_lsbs = bitmap_weight(lsb_pub, MAX_LSB_CNT);
 689
 690        if (n_lsbs >= ccp->cmd_q_count) {
 691                /* We have enough LSBS to give every queue a private LSB.
 692                 * Brute force search to start with the queues that are more
 693                 * constrained in LSB choice. When an LSB is privately
 694                 * assigned, it is removed from the public mask.
 695                 * This is an ugly N squared algorithm with some optimization.
 696                 */
 697                for (lsb_cnt = 1;
 698                     n_lsbs && (lsb_cnt <= MAX_LSB_CNT);
 699                     lsb_cnt++) {
 700                        rc = ccp_find_and_assign_lsb_to_q(ccp, lsb_cnt, n_lsbs,
 701                                                          lsb_pub);
 702                        if (rc < 0)
 703                                return -EINVAL;
 704                        n_lsbs = rc;
 705                }
 706        }
 707
 708        rc = 0;
 709        /* What's left of the LSBs, according to the public mask, now become
 710         * shared. Any zero bits in the lsb_pub mask represent an LSB region
 711         * that can't be used as a shared resource, so mark the LSB slots for
 712         * them as "in use".
 713         */
 714        bitmap_copy(qlsb, lsb_pub, MAX_LSB_CNT);
 715
 716        bitno = find_first_zero_bit(qlsb, MAX_LSB_CNT);
 717        while (bitno < MAX_LSB_CNT) {
 718                bitmap_set(ccp->lsbmap, bitno * LSB_SIZE, LSB_SIZE);
 719                bitmap_set(qlsb, bitno, 1);
 720                bitno = find_first_zero_bit(qlsb, MAX_LSB_CNT);
 721        }
 722
 723        return rc;
 724}
 725
 726static void ccp5_disable_queue_interrupts(struct ccp_device *ccp)
 727{
 728        unsigned int i;
 729
 730        for (i = 0; i < ccp->cmd_q_count; i++)
 731                iowrite32(0x0, ccp->cmd_q[i].reg_int_enable);
 732}
 733
 734static void ccp5_enable_queue_interrupts(struct ccp_device *ccp)
 735{
 736        unsigned int i;
 737
 738        for (i = 0; i < ccp->cmd_q_count; i++)
 739                iowrite32(SUPPORTED_INTERRUPTS, ccp->cmd_q[i].reg_int_enable);
 740}
 741
 742static void ccp5_irq_bh(unsigned long data)
 743{
 744        struct ccp_device *ccp = (struct ccp_device *)data;
 745        u32 status;
 746        unsigned int i;
 747
 748        for (i = 0; i < ccp->cmd_q_count; i++) {
 749                struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i];
 750
 751                status = ioread32(cmd_q->reg_interrupt_status);
 752
 753                if (status) {
 754                        cmd_q->int_status = status;
 755                        cmd_q->q_status = ioread32(cmd_q->reg_status);
 756                        cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
 757
 758                        /* On error, only save the first error value */
 759                        if ((status & INT_ERROR) && !cmd_q->cmd_error)
 760                                cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
 761
 762                        cmd_q->int_rcvd = 1;
 763
 764                        /* Acknowledge the interrupt and wake the kthread */
 765                        iowrite32(status, cmd_q->reg_interrupt_status);
 766                        wake_up_interruptible(&cmd_q->int_queue);
 767                }
 768        }
 769        ccp5_enable_queue_interrupts(ccp);
 770}
 771
 772static irqreturn_t ccp5_irq_handler(int irq, void *data)
 773{
 774        struct ccp_device *ccp = (struct ccp_device *)data;
 775
 776        ccp5_disable_queue_interrupts(ccp);
 777        ccp->total_interrupts++;
 778        if (ccp->use_tasklet)
 779                tasklet_schedule(&ccp->irq_tasklet);
 780        else
 781                ccp5_irq_bh((unsigned long)ccp);
 782        return IRQ_HANDLED;
 783}
 784
 785static int ccp5_init(struct ccp_device *ccp)
 786{
 787        struct device *dev = ccp->dev;
 788        struct ccp_cmd_queue *cmd_q;
 789        struct dma_pool *dma_pool;
 790        char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
 791        unsigned int qmr, i;
 792        u64 status;
 793        u32 status_lo, status_hi;
 794        int ret;
 795
 796        /* Find available queues */
 797        qmr = ioread32(ccp->io_regs + Q_MASK_REG);
 798        for (i = 0; i < MAX_HW_QUEUES; i++) {
 799
 800                if (!(qmr & (1 << i)))
 801                        continue;
 802
 803                /* Allocate a dma pool for this queue */
 804                snprintf(dma_pool_name, sizeof(dma_pool_name), "%s_q%d",
 805                         ccp->name, i);
 806                dma_pool = dma_pool_create(dma_pool_name, dev,
 807                                           CCP_DMAPOOL_MAX_SIZE,
 808                                           CCP_DMAPOOL_ALIGN, 0);
 809                if (!dma_pool) {
 810                        dev_err(dev, "unable to allocate dma pool\n");
 811                        ret = -ENOMEM;
 812                }
 813
 814                cmd_q = &ccp->cmd_q[ccp->cmd_q_count];
 815                ccp->cmd_q_count++;
 816
 817                cmd_q->ccp = ccp;
 818                cmd_q->id = i;
 819                cmd_q->dma_pool = dma_pool;
 820                mutex_init(&cmd_q->q_mutex);
 821
 822                /* Page alignment satisfies our needs for N <= 128 */
 823                BUILD_BUG_ON(COMMANDS_PER_QUEUE > 128);
 824                cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
 825                cmd_q->qbase = dma_zalloc_coherent(dev, cmd_q->qsize,
 826                                                   &cmd_q->qbase_dma,
 827                                                   GFP_KERNEL);
 828                if (!cmd_q->qbase) {
 829                        dev_err(dev, "unable to allocate command queue\n");
 830                        ret = -ENOMEM;
 831                        goto e_pool;
 832                }
 833
 834                cmd_q->qidx = 0;
 835                /* Preset some register values and masks that are queue
 836                 * number dependent
 837                 */
 838                cmd_q->reg_control = ccp->io_regs +
 839                                     CMD5_Q_STATUS_INCR * (i + 1);
 840                cmd_q->reg_tail_lo = cmd_q->reg_control + CMD5_Q_TAIL_LO_BASE;
 841                cmd_q->reg_head_lo = cmd_q->reg_control + CMD5_Q_HEAD_LO_BASE;
 842                cmd_q->reg_int_enable = cmd_q->reg_control +
 843                                        CMD5_Q_INT_ENABLE_BASE;
 844                cmd_q->reg_interrupt_status = cmd_q->reg_control +
 845                                              CMD5_Q_INTERRUPT_STATUS_BASE;
 846                cmd_q->reg_status = cmd_q->reg_control + CMD5_Q_STATUS_BASE;
 847                cmd_q->reg_int_status = cmd_q->reg_control +
 848                                        CMD5_Q_INT_STATUS_BASE;
 849                cmd_q->reg_dma_status = cmd_q->reg_control +
 850                                        CMD5_Q_DMA_STATUS_BASE;
 851                cmd_q->reg_dma_read_status = cmd_q->reg_control +
 852                                             CMD5_Q_DMA_READ_STATUS_BASE;
 853                cmd_q->reg_dma_write_status = cmd_q->reg_control +
 854                                              CMD5_Q_DMA_WRITE_STATUS_BASE;
 855
 856                init_waitqueue_head(&cmd_q->int_queue);
 857
 858                dev_dbg(dev, "queue #%u available\n", i);
 859        }
 860
 861        if (ccp->cmd_q_count == 0) {
 862                dev_notice(dev, "no command queues available\n");
 863                ret = -EIO;
 864                goto e_pool;
 865        }
 866
 867        /* Turn off the queues and disable interrupts until ready */
 868        ccp5_disable_queue_interrupts(ccp);
 869        for (i = 0; i < ccp->cmd_q_count; i++) {
 870                cmd_q = &ccp->cmd_q[i];
 871
 872                cmd_q->qcontrol = 0; /* Start with nothing */
 873                iowrite32(cmd_q->qcontrol, cmd_q->reg_control);
 874
 875                ioread32(cmd_q->reg_int_status);
 876                ioread32(cmd_q->reg_status);
 877
 878                /* Clear the interrupt status */
 879                iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status);
 880        }
 881
 882        dev_dbg(dev, "Requesting an IRQ...\n");
 883        /* Request an irq */
 884        ret = sp_request_ccp_irq(ccp->sp, ccp5_irq_handler, ccp->name, ccp);
 885        if (ret) {
 886                dev_err(dev, "unable to allocate an IRQ\n");
 887                goto e_pool;
 888        }
 889        /* Initialize the ISR tasklet */
 890        if (ccp->use_tasklet)
 891                tasklet_init(&ccp->irq_tasklet, ccp5_irq_bh,
 892                             (unsigned long)ccp);
 893
 894        dev_dbg(dev, "Loading LSB map...\n");
 895        /* Copy the private LSB mask to the public registers */
 896        status_lo = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_LO_OFFSET);
 897        status_hi = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_HI_OFFSET);
 898        iowrite32(status_lo, ccp->io_regs + LSB_PUBLIC_MASK_LO_OFFSET);
 899        iowrite32(status_hi, ccp->io_regs + LSB_PUBLIC_MASK_HI_OFFSET);
 900        status = ((u64)status_hi<<30) | (u64)status_lo;
 901
 902        dev_dbg(dev, "Configuring virtual queues...\n");
 903        /* Configure size of each virtual queue accessible to host */
 904        for (i = 0; i < ccp->cmd_q_count; i++) {
 905                u32 dma_addr_lo;
 906                u32 dma_addr_hi;
 907
 908                cmd_q = &ccp->cmd_q[i];
 909
 910                cmd_q->qcontrol &= ~(CMD5_Q_SIZE << CMD5_Q_SHIFT);
 911                cmd_q->qcontrol |= QUEUE_SIZE_VAL << CMD5_Q_SHIFT;
 912
 913                cmd_q->qdma_tail = cmd_q->qbase_dma;
 914                dma_addr_lo = low_address(cmd_q->qdma_tail);
 915                iowrite32((u32)dma_addr_lo, cmd_q->reg_tail_lo);
 916                iowrite32((u32)dma_addr_lo, cmd_q->reg_head_lo);
 917
 918                dma_addr_hi = high_address(cmd_q->qdma_tail);
 919                cmd_q->qcontrol |= (dma_addr_hi << 16);
 920                iowrite32(cmd_q->qcontrol, cmd_q->reg_control);
 921
 922                /* Find the LSB regions accessible to the queue */
 923                ccp_find_lsb_regions(cmd_q, status);
 924                cmd_q->lsb = -1; /* Unassigned value */
 925        }
 926
 927        dev_dbg(dev, "Assigning LSBs...\n");
 928        ret = ccp_assign_lsbs(ccp);
 929        if (ret) {
 930                dev_err(dev, "Unable to assign LSBs (%d)\n", ret);
 931                goto e_irq;
 932        }
 933
 934        /* Optimization: pre-allocate LSB slots for each queue */
 935        for (i = 0; i < ccp->cmd_q_count; i++) {
 936                ccp->cmd_q[i].sb_key = ccp_lsb_alloc(&ccp->cmd_q[i], 2);
 937                ccp->cmd_q[i].sb_ctx = ccp_lsb_alloc(&ccp->cmd_q[i], 2);
 938        }
 939
 940        dev_dbg(dev, "Starting threads...\n");
 941        /* Create a kthread for each queue */
 942        for (i = 0; i < ccp->cmd_q_count; i++) {
 943                struct task_struct *kthread;
 944
 945                cmd_q = &ccp->cmd_q[i];
 946
 947                kthread = kthread_create(ccp_cmd_queue_thread, cmd_q,
 948                                         "%s-q%u", ccp->name, cmd_q->id);
 949                if (IS_ERR(kthread)) {
 950                        dev_err(dev, "error creating queue thread (%ld)\n",
 951                                PTR_ERR(kthread));
 952                        ret = PTR_ERR(kthread);
 953                        goto e_kthread;
 954                }
 955
 956                cmd_q->kthread = kthread;
 957                wake_up_process(kthread);
 958        }
 959
 960        dev_dbg(dev, "Enabling interrupts...\n");
 961        ccp5_enable_queue_interrupts(ccp);
 962
 963        dev_dbg(dev, "Registering device...\n");
 964        /* Put this on the unit list to make it available */
 965        ccp_add_device(ccp);
 966
 967        ret = ccp_register_rng(ccp);
 968        if (ret)
 969                goto e_kthread;
 970
 971        /* Register the DMA engine support */
 972        ret = ccp_dmaengine_register(ccp);
 973        if (ret)
 974                goto e_hwrng;
 975
 976        /* Set up debugfs entries */
 977        ccp5_debugfs_setup(ccp);
 978
 979        return 0;
 980
 981e_hwrng:
 982        ccp_unregister_rng(ccp);
 983
 984e_kthread:
 985        for (i = 0; i < ccp->cmd_q_count; i++)
 986                if (ccp->cmd_q[i].kthread)
 987                        kthread_stop(ccp->cmd_q[i].kthread);
 988
 989e_irq:
 990        sp_free_ccp_irq(ccp->sp, ccp);
 991
 992e_pool:
 993        for (i = 0; i < ccp->cmd_q_count; i++)
 994                dma_pool_destroy(ccp->cmd_q[i].dma_pool);
 995
 996        return ret;
 997}
 998
 999static void ccp5_destroy(struct ccp_device *ccp)
1000{
1001        struct device *dev = ccp->dev;
1002        struct ccp_cmd_queue *cmd_q;
1003        struct ccp_cmd *cmd;
1004        unsigned int i;
1005
1006        /* Unregister the DMA engine */
1007        ccp_dmaengine_unregister(ccp);
1008
1009        /* Unregister the RNG */
1010        ccp_unregister_rng(ccp);
1011
1012        /* Remove this device from the list of available units first */
1013        ccp_del_device(ccp);
1014
1015        /* We're in the process of tearing down the entire driver;
1016         * when all the devices are gone clean up debugfs
1017         */
1018        if (ccp_present())
1019                ccp5_debugfs_destroy();
1020
1021        /* Disable and clear interrupts */
1022        ccp5_disable_queue_interrupts(ccp);
1023        for (i = 0; i < ccp->cmd_q_count; i++) {
1024                cmd_q = &ccp->cmd_q[i];
1025
1026                /* Turn off the run bit */
1027                iowrite32(cmd_q->qcontrol & ~CMD5_Q_RUN, cmd_q->reg_control);
1028
1029                /* Clear the interrupt status */
1030                iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status);
1031                ioread32(cmd_q->reg_int_status);
1032                ioread32(cmd_q->reg_status);
1033        }
1034
1035        /* Stop the queue kthreads */
1036        for (i = 0; i < ccp->cmd_q_count; i++)
1037                if (ccp->cmd_q[i].kthread)
1038                        kthread_stop(ccp->cmd_q[i].kthread);
1039
1040        sp_free_ccp_irq(ccp->sp, ccp);
1041
1042        for (i = 0; i < ccp->cmd_q_count; i++) {
1043                cmd_q = &ccp->cmd_q[i];
1044                dma_free_coherent(dev, cmd_q->qsize, cmd_q->qbase,
1045                                  cmd_q->qbase_dma);
1046        }
1047
1048        /* Flush the cmd and backlog queue */
1049        while (!list_empty(&ccp->cmd)) {
1050                /* Invoke the callback directly with an error code */
1051                cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
1052                list_del(&cmd->entry);
1053                cmd->callback(cmd->data, -ENODEV);
1054        }
1055        while (!list_empty(&ccp->backlog)) {
1056                /* Invoke the callback directly with an error code */
1057                cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry);
1058                list_del(&cmd->entry);
1059                cmd->callback(cmd->data, -ENODEV);
1060        }
1061}
1062
1063static void ccp5_config(struct ccp_device *ccp)
1064{
1065        /* Public side */
1066        iowrite32(0x0, ccp->io_regs + CMD5_REQID_CONFIG_OFFSET);
1067}
1068
1069static void ccp5other_config(struct ccp_device *ccp)
1070{
1071        int i;
1072        u32 rnd;
1073
1074        /* We own all of the queues on the NTB CCP */
1075
1076        iowrite32(0x00012D57, ccp->io_regs + CMD5_TRNG_CTL_OFFSET);
1077        iowrite32(0x00000003, ccp->io_regs + CMD5_CONFIG_0_OFFSET);
1078        for (i = 0; i < 12; i++) {
1079                rnd = ioread32(ccp->io_regs + TRNG_OUT_REG);
1080                iowrite32(rnd, ccp->io_regs + CMD5_AES_MASK_OFFSET);
1081        }
1082
1083        iowrite32(0x0000001F, ccp->io_regs + CMD5_QUEUE_MASK_OFFSET);
1084        iowrite32(0x00005B6D, ccp->io_regs + CMD5_QUEUE_PRIO_OFFSET);
1085        iowrite32(0x00000000, ccp->io_regs + CMD5_CMD_TIMEOUT_OFFSET);
1086
1087        iowrite32(0x3FFFFFFF, ccp->io_regs + LSB_PRIVATE_MASK_LO_OFFSET);
1088        iowrite32(0x000003FF, ccp->io_regs + LSB_PRIVATE_MASK_HI_OFFSET);
1089
1090        iowrite32(0x00108823, ccp->io_regs + CMD5_CLK_GATE_CTL_OFFSET);
1091
1092        ccp5_config(ccp);
1093}
1094
1095/* Version 5 adds some function, but is essentially the same as v5 */
1096static const struct ccp_actions ccp5_actions = {
1097        .aes = ccp5_perform_aes,
1098        .xts_aes = ccp5_perform_xts_aes,
1099        .sha = ccp5_perform_sha,
1100        .des3 = ccp5_perform_des3,
1101        .rsa = ccp5_perform_rsa,
1102        .passthru = ccp5_perform_passthru,
1103        .ecc = ccp5_perform_ecc,
1104        .sballoc = ccp_lsb_alloc,
1105        .sbfree = ccp_lsb_free,
1106        .init = ccp5_init,
1107        .destroy = ccp5_destroy,
1108        .get_free_slots = ccp5_get_free_slots,
1109};
1110
1111const struct ccp_vdata ccpv5a = {
1112        .version = CCP_VERSION(5, 0),
1113        .setup = ccp5_config,
1114        .perform = &ccp5_actions,
1115        .offset = 0x0,
1116        .rsamax = CCP5_RSA_MAX_WIDTH,
1117};
1118
1119const struct ccp_vdata ccpv5b = {
1120        .version = CCP_VERSION(5, 0),
1121        .dma_chan_attr = DMA_PRIVATE,
1122        .setup = ccp5other_config,
1123        .perform = &ccp5_actions,
1124        .offset = 0x0,
1125        .rsamax = CCP5_RSA_MAX_WIDTH,
1126};
1127