linux/drivers/crypto/ccp/ccp-dev-v5.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * AMD Cryptographic Coprocessor (CCP) driver
   4 *
   5 * Copyright (C) 2016,2019 Advanced Micro Devices, Inc.
   6 *
   7 * Author: Gary R Hook <gary.hook@amd.com>
   8 */
   9
  10#include <linux/kernel.h>
  11#include <linux/kthread.h>
  12#include <linux/dma-mapping.h>
  13#include <linux/interrupt.h>
  14#include <linux/compiler.h>
  15#include <linux/ccp.h>
  16
  17#include "ccp-dev.h"
  18
  19/* Allocate the requested number of contiguous LSB slots
  20 * from the LSB bitmap. Look in the private range for this
  21 * queue first; failing that, check the public area.
  22 * If no space is available, wait around.
  23 * Return: first slot number
  24 */
  25static u32 ccp_lsb_alloc(struct ccp_cmd_queue *cmd_q, unsigned int count)
  26{
  27        struct ccp_device *ccp;
  28        int start;
  29
  30        /* First look at the map for the queue */
  31        if (cmd_q->lsb >= 0) {
  32                start = (u32)bitmap_find_next_zero_area(cmd_q->lsbmap,
  33                                                        LSB_SIZE,
  34                                                        0, count, 0);
  35                if (start < LSB_SIZE) {
  36                        bitmap_set(cmd_q->lsbmap, start, count);
  37                        return start + cmd_q->lsb * LSB_SIZE;
  38                }
  39        }
  40
  41        /* No joy; try to get an entry from the shared blocks */
  42        ccp = cmd_q->ccp;
  43        for (;;) {
  44                mutex_lock(&ccp->sb_mutex);
  45
  46                start = (u32)bitmap_find_next_zero_area(ccp->lsbmap,
  47                                                        MAX_LSB_CNT * LSB_SIZE,
  48                                                        0,
  49                                                        count, 0);
  50                if (start <= MAX_LSB_CNT * LSB_SIZE) {
  51                        bitmap_set(ccp->lsbmap, start, count);
  52
  53                        mutex_unlock(&ccp->sb_mutex);
  54                        return start;
  55                }
  56
  57                ccp->sb_avail = 0;
  58
  59                mutex_unlock(&ccp->sb_mutex);
  60
  61                /* Wait for KSB entries to become available */
  62                if (wait_event_interruptible(ccp->sb_queue, ccp->sb_avail))
  63                        return 0;
  64        }
  65}
  66
  67/* Free a number of LSB slots from the bitmap, starting at
  68 * the indicated starting slot number.
  69 */
  70static void ccp_lsb_free(struct ccp_cmd_queue *cmd_q, unsigned int start,
  71                         unsigned int count)
  72{
  73        if (!start)
  74                return;
  75
  76        if (cmd_q->lsb == start) {
  77                /* An entry from the private LSB */
  78                bitmap_clear(cmd_q->lsbmap, start, count);
  79        } else {
  80                /* From the shared LSBs */
  81                struct ccp_device *ccp = cmd_q->ccp;
  82
  83                mutex_lock(&ccp->sb_mutex);
  84                bitmap_clear(ccp->lsbmap, start, count);
  85                ccp->sb_avail = 1;
  86                mutex_unlock(&ccp->sb_mutex);
  87                wake_up_interruptible_all(&ccp->sb_queue);
  88        }
  89}
  90
  91/* CCP version 5: Union to define the function field (cmd_reg1/dword0) */
  92union ccp_function {
  93        struct {
  94                u16 size:7;
  95                u16 encrypt:1;
  96                u16 mode:5;
  97                u16 type:2;
  98        } aes;
  99        struct {
 100                u16 size:7;
 101                u16 encrypt:1;
 102                u16 rsvd:5;
 103                u16 type:2;
 104        } aes_xts;
 105        struct {
 106                u16 size:7;
 107                u16 encrypt:1;
 108                u16 mode:5;
 109                u16 type:2;
 110        } des3;
 111        struct {
 112                u16 rsvd1:10;
 113                u16 type:4;
 114                u16 rsvd2:1;
 115        } sha;
 116        struct {
 117                u16 mode:3;
 118                u16 size:12;
 119        } rsa;
 120        struct {
 121                u16 byteswap:2;
 122                u16 bitwise:3;
 123                u16 reflect:2;
 124                u16 rsvd:8;
 125        } pt;
 126        struct  {
 127                u16 rsvd:13;
 128        } zlib;
 129        struct {
 130                u16 size:10;
 131                u16 type:2;
 132                u16 mode:3;
 133        } ecc;
 134        u16 raw;
 135};
 136
 137#define CCP_AES_SIZE(p)         ((p)->aes.size)
 138#define CCP_AES_ENCRYPT(p)      ((p)->aes.encrypt)
 139#define CCP_AES_MODE(p)         ((p)->aes.mode)
 140#define CCP_AES_TYPE(p)         ((p)->aes.type)
 141#define CCP_XTS_SIZE(p)         ((p)->aes_xts.size)
 142#define CCP_XTS_TYPE(p)         ((p)->aes_xts.type)
 143#define CCP_XTS_ENCRYPT(p)      ((p)->aes_xts.encrypt)
 144#define CCP_DES3_SIZE(p)        ((p)->des3.size)
 145#define CCP_DES3_ENCRYPT(p)     ((p)->des3.encrypt)
 146#define CCP_DES3_MODE(p)        ((p)->des3.mode)
 147#define CCP_DES3_TYPE(p)        ((p)->des3.type)
 148#define CCP_SHA_TYPE(p)         ((p)->sha.type)
 149#define CCP_RSA_SIZE(p)         ((p)->rsa.size)
 150#define CCP_PT_BYTESWAP(p)      ((p)->pt.byteswap)
 151#define CCP_PT_BITWISE(p)       ((p)->pt.bitwise)
 152#define CCP_ECC_MODE(p)         ((p)->ecc.mode)
 153#define CCP_ECC_AFFINE(p)       ((p)->ecc.one)
 154
 155/* Word 0 */
 156#define CCP5_CMD_DW0(p)         ((p)->dw0)
 157#define CCP5_CMD_SOC(p)         (CCP5_CMD_DW0(p).soc)
 158#define CCP5_CMD_IOC(p)         (CCP5_CMD_DW0(p).ioc)
 159#define CCP5_CMD_INIT(p)        (CCP5_CMD_DW0(p).init)
 160#define CCP5_CMD_EOM(p)         (CCP5_CMD_DW0(p).eom)
 161#define CCP5_CMD_FUNCTION(p)    (CCP5_CMD_DW0(p).function)
 162#define CCP5_CMD_ENGINE(p)      (CCP5_CMD_DW0(p).engine)
 163#define CCP5_CMD_PROT(p)        (CCP5_CMD_DW0(p).prot)
 164
 165/* Word 1 */
 166#define CCP5_CMD_DW1(p)         ((p)->length)
 167#define CCP5_CMD_LEN(p)         (CCP5_CMD_DW1(p))
 168
 169/* Word 2 */
 170#define CCP5_CMD_DW2(p)         ((p)->src_lo)
 171#define CCP5_CMD_SRC_LO(p)      (CCP5_CMD_DW2(p))
 172
 173/* Word 3 */
 174#define CCP5_CMD_DW3(p)         ((p)->dw3)
 175#define CCP5_CMD_SRC_MEM(p)     ((p)->dw3.src_mem)
 176#define CCP5_CMD_SRC_HI(p)      ((p)->dw3.src_hi)
 177#define CCP5_CMD_LSB_ID(p)      ((p)->dw3.lsb_cxt_id)
 178#define CCP5_CMD_FIX_SRC(p)     ((p)->dw3.fixed)
 179
 180/* Words 4/5 */
 181#define CCP5_CMD_DW4(p)         ((p)->dw4)
 182#define CCP5_CMD_DST_LO(p)      (CCP5_CMD_DW4(p).dst_lo)
 183#define CCP5_CMD_DW5(p)         ((p)->dw5.fields.dst_hi)
 184#define CCP5_CMD_DST_HI(p)      (CCP5_CMD_DW5(p))
 185#define CCP5_CMD_DST_MEM(p)     ((p)->dw5.fields.dst_mem)
 186#define CCP5_CMD_FIX_DST(p)     ((p)->dw5.fields.fixed)
 187#define CCP5_CMD_SHA_LO(p)      ((p)->dw4.sha_len_lo)
 188#define CCP5_CMD_SHA_HI(p)      ((p)->dw5.sha_len_hi)
 189
 190/* Word 6/7 */
 191#define CCP5_CMD_DW6(p)         ((p)->key_lo)
 192#define CCP5_CMD_KEY_LO(p)      (CCP5_CMD_DW6(p))
 193#define CCP5_CMD_DW7(p)         ((p)->dw7)
 194#define CCP5_CMD_KEY_HI(p)      ((p)->dw7.key_hi)
 195#define CCP5_CMD_KEY_MEM(p)     ((p)->dw7.key_mem)
 196
 197static inline u32 low_address(unsigned long addr)
 198{
 199        return (u64)addr & 0x0ffffffff;
 200}
 201
 202static inline u32 high_address(unsigned long addr)
 203{
 204        return ((u64)addr >> 32) & 0x00000ffff;
 205}
 206
 207static unsigned int ccp5_get_free_slots(struct ccp_cmd_queue *cmd_q)
 208{
 209        unsigned int head_idx, n;
 210        u32 head_lo, queue_start;
 211
 212        queue_start = low_address(cmd_q->qdma_tail);
 213        head_lo = ioread32(cmd_q->reg_head_lo);
 214        head_idx = (head_lo - queue_start) / sizeof(struct ccp5_desc);
 215
 216        n = head_idx + COMMANDS_PER_QUEUE - cmd_q->qidx - 1;
 217
 218        return n % COMMANDS_PER_QUEUE; /* Always one unused spot */
 219}
 220
 221static int ccp5_do_cmd(struct ccp5_desc *desc,
 222                       struct ccp_cmd_queue *cmd_q)
 223{
 224        __le32 *mP;
 225        u32 *dP;
 226        u32 tail;
 227        int     i;
 228        int ret = 0;
 229
 230        cmd_q->total_ops++;
 231
 232        if (CCP5_CMD_SOC(desc)) {
 233                CCP5_CMD_IOC(desc) = 1;
 234                CCP5_CMD_SOC(desc) = 0;
 235        }
 236        mutex_lock(&cmd_q->q_mutex);
 237
 238        mP = (__le32 *)&cmd_q->qbase[cmd_q->qidx];
 239        dP = (u32 *)desc;
 240        for (i = 0; i < 8; i++)
 241                mP[i] = cpu_to_le32(dP[i]); /* handle endianness */
 242
 243        cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
 244
 245        /* The data used by this command must be flushed to memory */
 246        wmb();
 247
 248        /* Write the new tail address back to the queue register */
 249        tail = low_address(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE);
 250        iowrite32(tail, cmd_q->reg_tail_lo);
 251
 252        /* Turn the queue back on using our cached control register */
 253        iowrite32(cmd_q->qcontrol | CMD5_Q_RUN, cmd_q->reg_control);
 254        mutex_unlock(&cmd_q->q_mutex);
 255
 256        if (CCP5_CMD_IOC(desc)) {
 257                /* Wait for the job to complete */
 258                ret = wait_event_interruptible(cmd_q->int_queue,
 259                                               cmd_q->int_rcvd);
 260                if (ret || cmd_q->cmd_error) {
 261                        /* Log the error and flush the queue by
 262                         * moving the head pointer
 263                         */
 264                        if (cmd_q->cmd_error)
 265                                ccp_log_error(cmd_q->ccp,
 266                                              cmd_q->cmd_error);
 267                        iowrite32(tail, cmd_q->reg_head_lo);
 268                        if (!ret)
 269                                ret = -EIO;
 270                }
 271                cmd_q->int_rcvd = 0;
 272        }
 273
 274        return ret;
 275}
 276
 277static int ccp5_perform_aes(struct ccp_op *op)
 278{
 279        struct ccp5_desc desc;
 280        union ccp_function function;
 281        u32 key_addr = op->sb_key * LSB_ITEM_SIZE;
 282
 283        op->cmd_q->total_aes_ops++;
 284
 285        /* Zero out all the fields of the command desc */
 286        memset(&desc, 0, Q_DESC_SIZE);
 287
 288        CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_AES;
 289
 290        CCP5_CMD_SOC(&desc) = op->soc;
 291        CCP5_CMD_IOC(&desc) = 1;
 292        CCP5_CMD_INIT(&desc) = op->init;
 293        CCP5_CMD_EOM(&desc) = op->eom;
 294        CCP5_CMD_PROT(&desc) = 0;
 295
 296        function.raw = 0;
 297        CCP_AES_ENCRYPT(&function) = op->u.aes.action;
 298        CCP_AES_MODE(&function) = op->u.aes.mode;
 299        CCP_AES_TYPE(&function) = op->u.aes.type;
 300        CCP_AES_SIZE(&function) = op->u.aes.size;
 301
 302        CCP5_CMD_FUNCTION(&desc) = function.raw;
 303
 304        CCP5_CMD_LEN(&desc) = op->src.u.dma.length;
 305
 306        CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
 307        CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
 308        CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
 309
 310        CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
 311        CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
 312        CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
 313
 314        CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr);
 315        CCP5_CMD_KEY_HI(&desc) = 0;
 316        CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB;
 317        CCP5_CMD_LSB_ID(&desc) = op->sb_ctx;
 318
 319        return ccp5_do_cmd(&desc, op->cmd_q);
 320}
 321
 322static int ccp5_perform_xts_aes(struct ccp_op *op)
 323{
 324        struct ccp5_desc desc;
 325        union ccp_function function;
 326        u32 key_addr = op->sb_key * LSB_ITEM_SIZE;
 327
 328        op->cmd_q->total_xts_aes_ops++;
 329
 330        /* Zero out all the fields of the command desc */
 331        memset(&desc, 0, Q_DESC_SIZE);
 332
 333        CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_XTS_AES_128;
 334
 335        CCP5_CMD_SOC(&desc) = op->soc;
 336        CCP5_CMD_IOC(&desc) = 1;
 337        CCP5_CMD_INIT(&desc) = op->init;
 338        CCP5_CMD_EOM(&desc) = op->eom;
 339        CCP5_CMD_PROT(&desc) = 0;
 340
 341        function.raw = 0;
 342        CCP_XTS_TYPE(&function) = op->u.xts.type;
 343        CCP_XTS_ENCRYPT(&function) = op->u.xts.action;
 344        CCP_XTS_SIZE(&function) = op->u.xts.unit_size;
 345        CCP5_CMD_FUNCTION(&desc) = function.raw;
 346
 347        CCP5_CMD_LEN(&desc) = op->src.u.dma.length;
 348
 349        CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
 350        CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
 351        CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
 352
 353        CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
 354        CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
 355        CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
 356
 357        CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr);
 358        CCP5_CMD_KEY_HI(&desc) =  0;
 359        CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB;
 360        CCP5_CMD_LSB_ID(&desc) = op->sb_ctx;
 361
 362        return ccp5_do_cmd(&desc, op->cmd_q);
 363}
 364
 365static int ccp5_perform_sha(struct ccp_op *op)
 366{
 367        struct ccp5_desc desc;
 368        union ccp_function function;
 369
 370        op->cmd_q->total_sha_ops++;
 371
 372        /* Zero out all the fields of the command desc */
 373        memset(&desc, 0, Q_DESC_SIZE);
 374
 375        CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SHA;
 376
 377        CCP5_CMD_SOC(&desc) = op->soc;
 378        CCP5_CMD_IOC(&desc) = 1;
 379        CCP5_CMD_INIT(&desc) = 1;
 380        CCP5_CMD_EOM(&desc) = op->eom;
 381        CCP5_CMD_PROT(&desc) = 0;
 382
 383        function.raw = 0;
 384        CCP_SHA_TYPE(&function) = op->u.sha.type;
 385        CCP5_CMD_FUNCTION(&desc) = function.raw;
 386
 387        CCP5_CMD_LEN(&desc) = op->src.u.dma.length;
 388
 389        CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
 390        CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
 391        CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
 392
 393        CCP5_CMD_LSB_ID(&desc) = op->sb_ctx;
 394
 395        if (op->eom) {
 396                CCP5_CMD_SHA_LO(&desc) = lower_32_bits(op->u.sha.msg_bits);
 397                CCP5_CMD_SHA_HI(&desc) = upper_32_bits(op->u.sha.msg_bits);
 398        } else {
 399                CCP5_CMD_SHA_LO(&desc) = 0;
 400                CCP5_CMD_SHA_HI(&desc) = 0;
 401        }
 402
 403        return ccp5_do_cmd(&desc, op->cmd_q);
 404}
 405
 406static int ccp5_perform_des3(struct ccp_op *op)
 407{
 408        struct ccp5_desc desc;
 409        union ccp_function function;
 410        u32 key_addr = op->sb_key * LSB_ITEM_SIZE;
 411
 412        op->cmd_q->total_3des_ops++;
 413
 414        /* Zero out all the fields of the command desc */
 415        memset(&desc, 0, sizeof(struct ccp5_desc));
 416
 417        CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_DES3;
 418
 419        CCP5_CMD_SOC(&desc) = op->soc;
 420        CCP5_CMD_IOC(&desc) = 1;
 421        CCP5_CMD_INIT(&desc) = op->init;
 422        CCP5_CMD_EOM(&desc) = op->eom;
 423        CCP5_CMD_PROT(&desc) = 0;
 424
 425        function.raw = 0;
 426        CCP_DES3_ENCRYPT(&function) = op->u.des3.action;
 427        CCP_DES3_MODE(&function) = op->u.des3.mode;
 428        CCP_DES3_TYPE(&function) = op->u.des3.type;
 429        CCP5_CMD_FUNCTION(&desc) = function.raw;
 430
 431        CCP5_CMD_LEN(&desc) = op->src.u.dma.length;
 432
 433        CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
 434        CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
 435        CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
 436
 437        CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
 438        CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
 439        CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
 440
 441        CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr);
 442        CCP5_CMD_KEY_HI(&desc) = 0;
 443        CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB;
 444        CCP5_CMD_LSB_ID(&desc) = op->sb_ctx;
 445
 446        return ccp5_do_cmd(&desc, op->cmd_q);
 447}
 448
 449static int ccp5_perform_rsa(struct ccp_op *op)
 450{
 451        struct ccp5_desc desc;
 452        union ccp_function function;
 453
 454        op->cmd_q->total_rsa_ops++;
 455
 456        /* Zero out all the fields of the command desc */
 457        memset(&desc, 0, Q_DESC_SIZE);
 458
 459        CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_RSA;
 460
 461        CCP5_CMD_SOC(&desc) = op->soc;
 462        CCP5_CMD_IOC(&desc) = 1;
 463        CCP5_CMD_INIT(&desc) = 0;
 464        CCP5_CMD_EOM(&desc) = 1;
 465        CCP5_CMD_PROT(&desc) = 0;
 466
 467        function.raw = 0;
 468        CCP_RSA_SIZE(&function) = (op->u.rsa.mod_size + 7) >> 3;
 469        CCP5_CMD_FUNCTION(&desc) = function.raw;
 470
 471        CCP5_CMD_LEN(&desc) = op->u.rsa.input_len;
 472
 473        /* Source is from external memory */
 474        CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
 475        CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
 476        CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
 477
 478        /* Destination is in external memory */
 479        CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
 480        CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
 481        CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
 482
 483        /* Key (Exponent) is in external memory */
 484        CCP5_CMD_KEY_LO(&desc) = ccp_addr_lo(&op->exp.u.dma);
 485        CCP5_CMD_KEY_HI(&desc) = ccp_addr_hi(&op->exp.u.dma);
 486        CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
 487
 488        return ccp5_do_cmd(&desc, op->cmd_q);
 489}
 490
 491static int ccp5_perform_passthru(struct ccp_op *op)
 492{
 493        struct ccp5_desc desc;
 494        union ccp_function function;
 495        struct ccp_dma_info *saddr = &op->src.u.dma;
 496        struct ccp_dma_info *daddr = &op->dst.u.dma;
 497
 498
 499        op->cmd_q->total_pt_ops++;
 500
 501        memset(&desc, 0, Q_DESC_SIZE);
 502
 503        CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_PASSTHRU;
 504
 505        CCP5_CMD_SOC(&desc) = 0;
 506        CCP5_CMD_IOC(&desc) = 1;
 507        CCP5_CMD_INIT(&desc) = 0;
 508        CCP5_CMD_EOM(&desc) = op->eom;
 509        CCP5_CMD_PROT(&desc) = 0;
 510
 511        function.raw = 0;
 512        CCP_PT_BYTESWAP(&function) = op->u.passthru.byte_swap;
 513        CCP_PT_BITWISE(&function) = op->u.passthru.bit_mod;
 514        CCP5_CMD_FUNCTION(&desc) = function.raw;
 515
 516        /* Length of source data is always 256 bytes */
 517        if (op->src.type == CCP_MEMTYPE_SYSTEM)
 518                CCP5_CMD_LEN(&desc) = saddr->length;
 519        else
 520                CCP5_CMD_LEN(&desc) = daddr->length;
 521
 522        if (op->src.type == CCP_MEMTYPE_SYSTEM) {
 523                CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
 524                CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
 525                CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
 526
 527                if (op->u.passthru.bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
 528                        CCP5_CMD_LSB_ID(&desc) = op->sb_key;
 529        } else {
 530                u32 key_addr = op->src.u.sb * CCP_SB_BYTES;
 531
 532                CCP5_CMD_SRC_LO(&desc) = lower_32_bits(key_addr);
 533                CCP5_CMD_SRC_HI(&desc) = 0;
 534                CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SB;
 535        }
 536
 537        if (op->dst.type == CCP_MEMTYPE_SYSTEM) {
 538                CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
 539                CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
 540                CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
 541        } else {
 542                u32 key_addr = op->dst.u.sb * CCP_SB_BYTES;
 543
 544                CCP5_CMD_DST_LO(&desc) = lower_32_bits(key_addr);
 545                CCP5_CMD_DST_HI(&desc) = 0;
 546                CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SB;
 547        }
 548
 549        return ccp5_do_cmd(&desc, op->cmd_q);
 550}
 551
 552static int ccp5_perform_ecc(struct ccp_op *op)
 553{
 554        struct ccp5_desc desc;
 555        union ccp_function function;
 556
 557        op->cmd_q->total_ecc_ops++;
 558
 559        /* Zero out all the fields of the command desc */
 560        memset(&desc, 0, Q_DESC_SIZE);
 561
 562        CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_ECC;
 563
 564        CCP5_CMD_SOC(&desc) = 0;
 565        CCP5_CMD_IOC(&desc) = 1;
 566        CCP5_CMD_INIT(&desc) = 0;
 567        CCP5_CMD_EOM(&desc) = 1;
 568        CCP5_CMD_PROT(&desc) = 0;
 569
 570        function.raw = 0;
 571        function.ecc.mode = op->u.ecc.function;
 572        CCP5_CMD_FUNCTION(&desc) = function.raw;
 573
 574        CCP5_CMD_LEN(&desc) = op->src.u.dma.length;
 575
 576        CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
 577        CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
 578        CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
 579
 580        CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
 581        CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
 582        CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
 583
 584        return ccp5_do_cmd(&desc, op->cmd_q);
 585}
 586
 587static int ccp_find_lsb_regions(struct ccp_cmd_queue *cmd_q, u64 status)
 588{
 589        int q_mask = 1 << cmd_q->id;
 590        int queues = 0;
 591        int j;
 592
 593        /* Build a bit mask to know which LSBs this queue has access to.
 594         * Don't bother with segment 0 as it has special privileges.
 595         */
 596        for (j = 1; j < MAX_LSB_CNT; j++) {
 597                if (status & q_mask)
 598                        bitmap_set(cmd_q->lsbmask, j, 1);
 599                status >>= LSB_REGION_WIDTH;
 600        }
 601        queues = bitmap_weight(cmd_q->lsbmask, MAX_LSB_CNT);
 602        dev_dbg(cmd_q->ccp->dev, "Queue %d can access %d LSB regions\n",
 603                 cmd_q->id, queues);
 604
 605        return queues ? 0 : -EINVAL;
 606}
 607
 608static int ccp_find_and_assign_lsb_to_q(struct ccp_device *ccp,
 609                                        int lsb_cnt, int n_lsbs,
 610                                        unsigned long *lsb_pub)
 611{
 612        DECLARE_BITMAP(qlsb, MAX_LSB_CNT);
 613        int bitno;
 614        int qlsb_wgt;
 615        int i;
 616
 617        /* For each queue:
 618         * If the count of potential LSBs available to a queue matches the
 619         * ordinal given to us in lsb_cnt:
 620         * Copy the mask of possible LSBs for this queue into "qlsb";
 621         * For each bit in qlsb, see if the corresponding bit in the
 622         * aggregation mask is set; if so, we have a match.
 623         *     If we have a match, clear the bit in the aggregation to
 624         *     mark it as no longer available.
 625         *     If there is no match, clear the bit in qlsb and keep looking.
 626         */
 627        for (i = 0; i < ccp->cmd_q_count; i++) {
 628                struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i];
 629
 630                qlsb_wgt = bitmap_weight(cmd_q->lsbmask, MAX_LSB_CNT);
 631
 632                if (qlsb_wgt == lsb_cnt) {
 633                        bitmap_copy(qlsb, cmd_q->lsbmask, MAX_LSB_CNT);
 634
 635                        bitno = find_first_bit(qlsb, MAX_LSB_CNT);
 636                        while (bitno < MAX_LSB_CNT) {
 637                                if (test_bit(bitno, lsb_pub)) {
 638                                        /* We found an available LSB
 639                                         * that this queue can access
 640                                         */
 641                                        cmd_q->lsb = bitno;
 642                                        bitmap_clear(lsb_pub, bitno, 1);
 643                                        dev_dbg(ccp->dev,
 644                                                 "Queue %d gets LSB %d\n",
 645                                                 i, bitno);
 646                                        break;
 647                                }
 648                                bitmap_clear(qlsb, bitno, 1);
 649                                bitno = find_first_bit(qlsb, MAX_LSB_CNT);
 650                        }
 651                        if (bitno >= MAX_LSB_CNT)
 652                                return -EINVAL;
 653                        n_lsbs--;
 654                }
 655        }
 656        return n_lsbs;
 657}
 658
 659/* For each queue, from the most- to least-constrained:
 660 * find an LSB that can be assigned to the queue. If there are N queues that
 661 * can only use M LSBs, where N > M, fail; otherwise, every queue will get a
 662 * dedicated LSB. Remaining LSB regions become a shared resource.
 663 * If we have fewer LSBs than queues, all LSB regions become shared resources.
 664 */
 665static int ccp_assign_lsbs(struct ccp_device *ccp)
 666{
 667        DECLARE_BITMAP(lsb_pub, MAX_LSB_CNT);
 668        DECLARE_BITMAP(qlsb, MAX_LSB_CNT);
 669        int n_lsbs = 0;
 670        int bitno;
 671        int i, lsb_cnt;
 672        int rc = 0;
 673
 674        bitmap_zero(lsb_pub, MAX_LSB_CNT);
 675
 676        /* Create an aggregate bitmap to get a total count of available LSBs */
 677        for (i = 0; i < ccp->cmd_q_count; i++)
 678                bitmap_or(lsb_pub,
 679                          lsb_pub, ccp->cmd_q[i].lsbmask,
 680                          MAX_LSB_CNT);
 681
 682        n_lsbs = bitmap_weight(lsb_pub, MAX_LSB_CNT);
 683
 684        if (n_lsbs >= ccp->cmd_q_count) {
 685                /* We have enough LSBS to give every queue a private LSB.
 686                 * Brute force search to start with the queues that are more
 687                 * constrained in LSB choice. When an LSB is privately
 688                 * assigned, it is removed from the public mask.
 689                 * This is an ugly N squared algorithm with some optimization.
 690                 */
 691                for (lsb_cnt = 1;
 692                     n_lsbs && (lsb_cnt <= MAX_LSB_CNT);
 693                     lsb_cnt++) {
 694                        rc = ccp_find_and_assign_lsb_to_q(ccp, lsb_cnt, n_lsbs,
 695                                                          lsb_pub);
 696                        if (rc < 0)
 697                                return -EINVAL;
 698                        n_lsbs = rc;
 699                }
 700        }
 701
 702        rc = 0;
 703        /* What's left of the LSBs, according to the public mask, now become
 704         * shared. Any zero bits in the lsb_pub mask represent an LSB region
 705         * that can't be used as a shared resource, so mark the LSB slots for
 706         * them as "in use".
 707         */
 708        bitmap_copy(qlsb, lsb_pub, MAX_LSB_CNT);
 709
 710        bitno = find_first_zero_bit(qlsb, MAX_LSB_CNT);
 711        while (bitno < MAX_LSB_CNT) {
 712                bitmap_set(ccp->lsbmap, bitno * LSB_SIZE, LSB_SIZE);
 713                bitmap_set(qlsb, bitno, 1);
 714                bitno = find_first_zero_bit(qlsb, MAX_LSB_CNT);
 715        }
 716
 717        return rc;
 718}
 719
 720static void ccp5_disable_queue_interrupts(struct ccp_device *ccp)
 721{
 722        unsigned int i;
 723
 724        for (i = 0; i < ccp->cmd_q_count; i++)
 725                iowrite32(0x0, ccp->cmd_q[i].reg_int_enable);
 726}
 727
 728static void ccp5_enable_queue_interrupts(struct ccp_device *ccp)
 729{
 730        unsigned int i;
 731
 732        for (i = 0; i < ccp->cmd_q_count; i++)
 733                iowrite32(SUPPORTED_INTERRUPTS, ccp->cmd_q[i].reg_int_enable);
 734}
 735
 736static void ccp5_irq_bh(unsigned long data)
 737{
 738        struct ccp_device *ccp = (struct ccp_device *)data;
 739        u32 status;
 740        unsigned int i;
 741
 742        for (i = 0; i < ccp->cmd_q_count; i++) {
 743                struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i];
 744
 745                status = ioread32(cmd_q->reg_interrupt_status);
 746
 747                if (status) {
 748                        cmd_q->int_status = status;
 749                        cmd_q->q_status = ioread32(cmd_q->reg_status);
 750                        cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
 751
 752                        /* On error, only save the first error value */
 753                        if ((status & INT_ERROR) && !cmd_q->cmd_error)
 754                                cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
 755
 756                        cmd_q->int_rcvd = 1;
 757
 758                        /* Acknowledge the interrupt and wake the kthread */
 759                        iowrite32(status, cmd_q->reg_interrupt_status);
 760                        wake_up_interruptible(&cmd_q->int_queue);
 761                }
 762        }
 763        ccp5_enable_queue_interrupts(ccp);
 764}
 765
 766static irqreturn_t ccp5_irq_handler(int irq, void *data)
 767{
 768        struct ccp_device *ccp = (struct ccp_device *)data;
 769
 770        ccp5_disable_queue_interrupts(ccp);
 771        ccp->total_interrupts++;
 772        if (ccp->use_tasklet)
 773                tasklet_schedule(&ccp->irq_tasklet);
 774        else
 775                ccp5_irq_bh((unsigned long)ccp);
 776        return IRQ_HANDLED;
 777}
 778
 779static int ccp5_init(struct ccp_device *ccp)
 780{
 781        struct device *dev = ccp->dev;
 782        struct ccp_cmd_queue *cmd_q;
 783        struct dma_pool *dma_pool;
 784        char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
 785        unsigned int qmr, i;
 786        u64 status;
 787        u32 status_lo, status_hi;
 788        int ret;
 789
 790        /* Find available queues */
 791        qmr = ioread32(ccp->io_regs + Q_MASK_REG);
 792        /*
 793         * Check for a access to the registers.  If this read returns
 794         * 0xffffffff, it's likely that the system is running a broken
 795         * BIOS which disallows access to the device. Stop here and fail
 796         * the initialization (but not the load, as the PSP could get
 797         * properly initialized).
 798         */
 799        if (qmr == 0xffffffff) {
 800                dev_notice(dev, "ccp: unable to access the device: you might be running a broken BIOS.\n");
 801                return 1;
 802        }
 803
 804        for (i = 0; (i < MAX_HW_QUEUES) && (ccp->cmd_q_count < ccp->max_q_count); i++) {
 805                if (!(qmr & (1 << i)))
 806                        continue;
 807
 808                /* Allocate a dma pool for this queue */
 809                snprintf(dma_pool_name, sizeof(dma_pool_name), "%s_q%d",
 810                         ccp->name, i);
 811                dma_pool = dma_pool_create(dma_pool_name, dev,
 812                                           CCP_DMAPOOL_MAX_SIZE,
 813                                           CCP_DMAPOOL_ALIGN, 0);
 814                if (!dma_pool) {
 815                        dev_err(dev, "unable to allocate dma pool\n");
 816                        ret = -ENOMEM;
 817                        goto e_pool;
 818                }
 819
 820                cmd_q = &ccp->cmd_q[ccp->cmd_q_count];
 821                ccp->cmd_q_count++;
 822
 823                cmd_q->ccp = ccp;
 824                cmd_q->id = i;
 825                cmd_q->dma_pool = dma_pool;
 826                mutex_init(&cmd_q->q_mutex);
 827
 828                /* Page alignment satisfies our needs for N <= 128 */
 829                BUILD_BUG_ON(COMMANDS_PER_QUEUE > 128);
 830                cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
 831                cmd_q->qbase = dmam_alloc_coherent(dev, cmd_q->qsize,
 832                                                   &cmd_q->qbase_dma,
 833                                                   GFP_KERNEL);
 834                if (!cmd_q->qbase) {
 835                        dev_err(dev, "unable to allocate command queue\n");
 836                        ret = -ENOMEM;
 837                        goto e_pool;
 838                }
 839
 840                cmd_q->qidx = 0;
 841                /* Preset some register values and masks that are queue
 842                 * number dependent
 843                 */
 844                cmd_q->reg_control = ccp->io_regs +
 845                                     CMD5_Q_STATUS_INCR * (i + 1);
 846                cmd_q->reg_tail_lo = cmd_q->reg_control + CMD5_Q_TAIL_LO_BASE;
 847                cmd_q->reg_head_lo = cmd_q->reg_control + CMD5_Q_HEAD_LO_BASE;
 848                cmd_q->reg_int_enable = cmd_q->reg_control +
 849                                        CMD5_Q_INT_ENABLE_BASE;
 850                cmd_q->reg_interrupt_status = cmd_q->reg_control +
 851                                              CMD5_Q_INTERRUPT_STATUS_BASE;
 852                cmd_q->reg_status = cmd_q->reg_control + CMD5_Q_STATUS_BASE;
 853                cmd_q->reg_int_status = cmd_q->reg_control +
 854                                        CMD5_Q_INT_STATUS_BASE;
 855                cmd_q->reg_dma_status = cmd_q->reg_control +
 856                                        CMD5_Q_DMA_STATUS_BASE;
 857                cmd_q->reg_dma_read_status = cmd_q->reg_control +
 858                                             CMD5_Q_DMA_READ_STATUS_BASE;
 859                cmd_q->reg_dma_write_status = cmd_q->reg_control +
 860                                              CMD5_Q_DMA_WRITE_STATUS_BASE;
 861
 862                init_waitqueue_head(&cmd_q->int_queue);
 863
 864                dev_dbg(dev, "queue #%u available\n", i);
 865        }
 866
 867        if (ccp->cmd_q_count == 0) {
 868                dev_notice(dev, "no command queues available\n");
 869                ret = 1;
 870                goto e_pool;
 871        }
 872
 873        /* Turn off the queues and disable interrupts until ready */
 874        ccp5_disable_queue_interrupts(ccp);
 875        for (i = 0; i < ccp->cmd_q_count; i++) {
 876                cmd_q = &ccp->cmd_q[i];
 877
 878                cmd_q->qcontrol = 0; /* Start with nothing */
 879                iowrite32(cmd_q->qcontrol, cmd_q->reg_control);
 880
 881                ioread32(cmd_q->reg_int_status);
 882                ioread32(cmd_q->reg_status);
 883
 884                /* Clear the interrupt status */
 885                iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status);
 886        }
 887
 888        dev_dbg(dev, "Requesting an IRQ...\n");
 889        /* Request an irq */
 890        ret = sp_request_ccp_irq(ccp->sp, ccp5_irq_handler, ccp->name, ccp);
 891        if (ret) {
 892                dev_err(dev, "unable to allocate an IRQ\n");
 893                goto e_pool;
 894        }
 895        /* Initialize the ISR tasklet */
 896        if (ccp->use_tasklet)
 897                tasklet_init(&ccp->irq_tasklet, ccp5_irq_bh,
 898                             (unsigned long)ccp);
 899
 900        dev_dbg(dev, "Loading LSB map...\n");
 901        /* Copy the private LSB mask to the public registers */
 902        status_lo = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_LO_OFFSET);
 903        status_hi = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_HI_OFFSET);
 904        iowrite32(status_lo, ccp->io_regs + LSB_PUBLIC_MASK_LO_OFFSET);
 905        iowrite32(status_hi, ccp->io_regs + LSB_PUBLIC_MASK_HI_OFFSET);
 906        status = ((u64)status_hi<<30) | (u64)status_lo;
 907
 908        dev_dbg(dev, "Configuring virtual queues...\n");
 909        /* Configure size of each virtual queue accessible to host */
 910        for (i = 0; i < ccp->cmd_q_count; i++) {
 911                u32 dma_addr_lo;
 912                u32 dma_addr_hi;
 913
 914                cmd_q = &ccp->cmd_q[i];
 915
 916                cmd_q->qcontrol &= ~(CMD5_Q_SIZE << CMD5_Q_SHIFT);
 917                cmd_q->qcontrol |= QUEUE_SIZE_VAL << CMD5_Q_SHIFT;
 918
 919                cmd_q->qdma_tail = cmd_q->qbase_dma;
 920                dma_addr_lo = low_address(cmd_q->qdma_tail);
 921                iowrite32((u32)dma_addr_lo, cmd_q->reg_tail_lo);
 922                iowrite32((u32)dma_addr_lo, cmd_q->reg_head_lo);
 923
 924                dma_addr_hi = high_address(cmd_q->qdma_tail);
 925                cmd_q->qcontrol |= (dma_addr_hi << 16);
 926                iowrite32(cmd_q->qcontrol, cmd_q->reg_control);
 927
 928                /* Find the LSB regions accessible to the queue */
 929                ccp_find_lsb_regions(cmd_q, status);
 930                cmd_q->lsb = -1; /* Unassigned value */
 931        }
 932
 933        dev_dbg(dev, "Assigning LSBs...\n");
 934        ret = ccp_assign_lsbs(ccp);
 935        if (ret) {
 936                dev_err(dev, "Unable to assign LSBs (%d)\n", ret);
 937                goto e_irq;
 938        }
 939
 940        /* Optimization: pre-allocate LSB slots for each queue */
 941        for (i = 0; i < ccp->cmd_q_count; i++) {
 942                ccp->cmd_q[i].sb_key = ccp_lsb_alloc(&ccp->cmd_q[i], 2);
 943                ccp->cmd_q[i].sb_ctx = ccp_lsb_alloc(&ccp->cmd_q[i], 2);
 944        }
 945
 946        dev_dbg(dev, "Starting threads...\n");
 947        /* Create a kthread for each queue */
 948        for (i = 0; i < ccp->cmd_q_count; i++) {
 949                struct task_struct *kthread;
 950
 951                cmd_q = &ccp->cmd_q[i];
 952
 953                kthread = kthread_create(ccp_cmd_queue_thread, cmd_q,
 954                                         "%s-q%u", ccp->name, cmd_q->id);
 955                if (IS_ERR(kthread)) {
 956                        dev_err(dev, "error creating queue thread (%ld)\n",
 957                                PTR_ERR(kthread));
 958                        ret = PTR_ERR(kthread);
 959                        goto e_kthread;
 960                }
 961
 962                cmd_q->kthread = kthread;
 963                wake_up_process(kthread);
 964        }
 965
 966        dev_dbg(dev, "Enabling interrupts...\n");
 967        ccp5_enable_queue_interrupts(ccp);
 968
 969        dev_dbg(dev, "Registering device...\n");
 970        /* Put this on the unit list to make it available */
 971        ccp_add_device(ccp);
 972
 973        ret = ccp_register_rng(ccp);
 974        if (ret)
 975                goto e_kthread;
 976
 977        /* Register the DMA engine support */
 978        ret = ccp_dmaengine_register(ccp);
 979        if (ret)
 980                goto e_hwrng;
 981
 982#ifdef CONFIG_CRYPTO_DEV_CCP_DEBUGFS
 983        /* Set up debugfs entries */
 984        ccp5_debugfs_setup(ccp);
 985#endif
 986
 987        return 0;
 988
 989e_hwrng:
 990        ccp_unregister_rng(ccp);
 991
 992e_kthread:
 993        for (i = 0; i < ccp->cmd_q_count; i++)
 994                if (ccp->cmd_q[i].kthread)
 995                        kthread_stop(ccp->cmd_q[i].kthread);
 996
 997e_irq:
 998        sp_free_ccp_irq(ccp->sp, ccp);
 999
1000e_pool:
1001        for (i = 0; i < ccp->cmd_q_count; i++)
1002                dma_pool_destroy(ccp->cmd_q[i].dma_pool);
1003
1004        return ret;
1005}
1006
1007static void ccp5_destroy(struct ccp_device *ccp)
1008{
1009        struct ccp_cmd_queue *cmd_q;
1010        struct ccp_cmd *cmd;
1011        unsigned int i;
1012
1013        /* Unregister the DMA engine */
1014        ccp_dmaengine_unregister(ccp);
1015
1016        /* Unregister the RNG */
1017        ccp_unregister_rng(ccp);
1018
1019        /* Remove this device from the list of available units first */
1020        ccp_del_device(ccp);
1021
1022#ifdef CONFIG_CRYPTO_DEV_CCP_DEBUGFS
1023        /* We're in the process of tearing down the entire driver;
1024         * when all the devices are gone clean up debugfs
1025         */
1026        if (ccp_present())
1027                ccp5_debugfs_destroy();
1028#endif
1029
1030        /* Disable and clear interrupts */
1031        ccp5_disable_queue_interrupts(ccp);
1032        for (i = 0; i < ccp->cmd_q_count; i++) {
1033                cmd_q = &ccp->cmd_q[i];
1034
1035                /* Turn off the run bit */
1036                iowrite32(cmd_q->qcontrol & ~CMD5_Q_RUN, cmd_q->reg_control);
1037
1038                /* Clear the interrupt status */
1039                iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status);
1040                ioread32(cmd_q->reg_int_status);
1041                ioread32(cmd_q->reg_status);
1042        }
1043
1044        /* Stop the queue kthreads */
1045        for (i = 0; i < ccp->cmd_q_count; i++)
1046                if (ccp->cmd_q[i].kthread)
1047                        kthread_stop(ccp->cmd_q[i].kthread);
1048
1049        sp_free_ccp_irq(ccp->sp, ccp);
1050
1051        /* Flush the cmd and backlog queue */
1052        while (!list_empty(&ccp->cmd)) {
1053                /* Invoke the callback directly with an error code */
1054                cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
1055                list_del(&cmd->entry);
1056                cmd->callback(cmd->data, -ENODEV);
1057        }
1058        while (!list_empty(&ccp->backlog)) {
1059                /* Invoke the callback directly with an error code */
1060                cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry);
1061                list_del(&cmd->entry);
1062                cmd->callback(cmd->data, -ENODEV);
1063        }
1064}
1065
1066static void ccp5_config(struct ccp_device *ccp)
1067{
1068        /* Public side */
1069        iowrite32(0x0, ccp->io_regs + CMD5_REQID_CONFIG_OFFSET);
1070}
1071
1072static void ccp5other_config(struct ccp_device *ccp)
1073{
1074        int i;
1075        u32 rnd;
1076
1077        /* We own all of the queues on the NTB CCP */
1078
1079        iowrite32(0x00012D57, ccp->io_regs + CMD5_TRNG_CTL_OFFSET);
1080        iowrite32(0x00000003, ccp->io_regs + CMD5_CONFIG_0_OFFSET);
1081        for (i = 0; i < 12; i++) {
1082                rnd = ioread32(ccp->io_regs + TRNG_OUT_REG);
1083                iowrite32(rnd, ccp->io_regs + CMD5_AES_MASK_OFFSET);
1084        }
1085
1086        iowrite32(0x0000001F, ccp->io_regs + CMD5_QUEUE_MASK_OFFSET);
1087        iowrite32(0x00005B6D, ccp->io_regs + CMD5_QUEUE_PRIO_OFFSET);
1088        iowrite32(0x00000000, ccp->io_regs + CMD5_CMD_TIMEOUT_OFFSET);
1089
1090        iowrite32(0x3FFFFFFF, ccp->io_regs + LSB_PRIVATE_MASK_LO_OFFSET);
1091        iowrite32(0x000003FF, ccp->io_regs + LSB_PRIVATE_MASK_HI_OFFSET);
1092
1093        iowrite32(0x00108823, ccp->io_regs + CMD5_CLK_GATE_CTL_OFFSET);
1094
1095        ccp5_config(ccp);
1096}
1097
1098/* Version 5 adds some function, but is essentially the same as v5 */
1099static const struct ccp_actions ccp5_actions = {
1100        .aes = ccp5_perform_aes,
1101        .xts_aes = ccp5_perform_xts_aes,
1102        .sha = ccp5_perform_sha,
1103        .des3 = ccp5_perform_des3,
1104        .rsa = ccp5_perform_rsa,
1105        .passthru = ccp5_perform_passthru,
1106        .ecc = ccp5_perform_ecc,
1107        .sballoc = ccp_lsb_alloc,
1108        .sbfree = ccp_lsb_free,
1109        .init = ccp5_init,
1110        .destroy = ccp5_destroy,
1111        .get_free_slots = ccp5_get_free_slots,
1112};
1113
1114const struct ccp_vdata ccpv5a = {
1115        .version = CCP_VERSION(5, 0),
1116        .setup = ccp5_config,
1117        .perform = &ccp5_actions,
1118        .offset = 0x0,
1119        .rsamax = CCP5_RSA_MAX_WIDTH,
1120};
1121
1122const struct ccp_vdata ccpv5b = {
1123        .version = CCP_VERSION(5, 0),
1124        .dma_chan_attr = DMA_PRIVATE,
1125        .setup = ccp5other_config,
1126        .perform = &ccp5_actions,
1127        .offset = 0x0,
1128        .rsamax = CCP5_RSA_MAX_WIDTH,
1129};
1130