linux/drivers/crypto/ccp/ccp-dev-v3.c
<<
>>
Prefs
   1/*
   2 * AMD Cryptographic Coprocessor (CCP) driver
   3 *
   4 * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
   5 *
   6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
   7 * Author: Gary R Hook <gary.hook@amd.com>
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License version 2 as
  11 * published by the Free Software Foundation.
  12 */
  13
  14#include <linux/module.h>
  15#include <linux/kernel.h>
  16#include <linux/pci.h>
  17#include <linux/kthread.h>
  18#include <linux/interrupt.h>
  19#include <linux/ccp.h>
  20
  21#include "ccp-dev.h"
  22
  23static u32 ccp_alloc_ksb(struct ccp_cmd_queue *cmd_q, unsigned int count)
  24{
  25        int start;
  26        struct ccp_device *ccp = cmd_q->ccp;
  27
  28        for (;;) {
  29                mutex_lock(&ccp->sb_mutex);
  30
  31                start = (u32)bitmap_find_next_zero_area(ccp->sb,
  32                                                        ccp->sb_count,
  33                                                        ccp->sb_start,
  34                                                        count, 0);
  35                if (start <= ccp->sb_count) {
  36                        bitmap_set(ccp->sb, start, count);
  37
  38                        mutex_unlock(&ccp->sb_mutex);
  39                        break;
  40                }
  41
  42                ccp->sb_avail = 0;
  43
  44                mutex_unlock(&ccp->sb_mutex);
  45
  46                /* Wait for KSB entries to become available */
  47                if (wait_event_interruptible(ccp->sb_queue, ccp->sb_avail))
  48                        return 0;
  49        }
  50
  51        return KSB_START + start;
  52}
  53
  54static void ccp_free_ksb(struct ccp_cmd_queue *cmd_q, unsigned int start,
  55                         unsigned int count)
  56{
  57        struct ccp_device *ccp = cmd_q->ccp;
  58
  59        if (!start)
  60                return;
  61
  62        mutex_lock(&ccp->sb_mutex);
  63
  64        bitmap_clear(ccp->sb, start - KSB_START, count);
  65
  66        ccp->sb_avail = 1;
  67
  68        mutex_unlock(&ccp->sb_mutex);
  69
  70        wake_up_interruptible_all(&ccp->sb_queue);
  71}
  72
  73static unsigned int ccp_get_free_slots(struct ccp_cmd_queue *cmd_q)
  74{
  75        return CMD_Q_DEPTH(ioread32(cmd_q->reg_status));
  76}
  77
  78static int ccp_do_cmd(struct ccp_op *op, u32 *cr, unsigned int cr_count)
  79{
  80        struct ccp_cmd_queue *cmd_q = op->cmd_q;
  81        struct ccp_device *ccp = cmd_q->ccp;
  82        void __iomem *cr_addr;
  83        u32 cr0, cmd;
  84        unsigned int i;
  85        int ret = 0;
  86
  87        /* We could read a status register to see how many free slots
  88         * are actually available, but reading that register resets it
  89         * and you could lose some error information.
  90         */
  91        cmd_q->free_slots--;
  92
  93        cr0 = (cmd_q->id << REQ0_CMD_Q_SHIFT)
  94              | (op->jobid << REQ0_JOBID_SHIFT)
  95              | REQ0_WAIT_FOR_WRITE;
  96
  97        if (op->soc)
  98                cr0 |= REQ0_STOP_ON_COMPLETE
  99                       | REQ0_INT_ON_COMPLETE;
 100
 101        if (op->ioc || !cmd_q->free_slots)
 102                cr0 |= REQ0_INT_ON_COMPLETE;
 103
 104        /* Start at CMD_REQ1 */
 105        cr_addr = ccp->io_regs + CMD_REQ0 + CMD_REQ_INCR;
 106
 107        mutex_lock(&ccp->req_mutex);
 108
 109        /* Write CMD_REQ1 through CMD_REQx first */
 110        for (i = 0; i < cr_count; i++, cr_addr += CMD_REQ_INCR)
 111                iowrite32(*(cr + i), cr_addr);
 112
 113        /* Tell the CCP to start */
 114        wmb();
 115        iowrite32(cr0, ccp->io_regs + CMD_REQ0);
 116
 117        mutex_unlock(&ccp->req_mutex);
 118
 119        if (cr0 & REQ0_INT_ON_COMPLETE) {
 120                /* Wait for the job to complete */
 121                ret = wait_event_interruptible(cmd_q->int_queue,
 122                                               cmd_q->int_rcvd);
 123                if (ret || cmd_q->cmd_error) {
 124                        /* On error delete all related jobs from the queue */
 125                        cmd = (cmd_q->id << DEL_Q_ID_SHIFT)
 126                              | op->jobid;
 127                        if (cmd_q->cmd_error)
 128                                ccp_log_error(cmd_q->ccp,
 129                                              cmd_q->cmd_error);
 130
 131                        iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB);
 132
 133                        if (!ret)
 134                                ret = -EIO;
 135                } else if (op->soc) {
 136                        /* Delete just head job from the queue on SoC */
 137                        cmd = DEL_Q_ACTIVE
 138                              | (cmd_q->id << DEL_Q_ID_SHIFT)
 139                              | op->jobid;
 140
 141                        iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB);
 142                }
 143
 144                cmd_q->free_slots = CMD_Q_DEPTH(cmd_q->q_status);
 145
 146                cmd_q->int_rcvd = 0;
 147        }
 148
 149        return ret;
 150}
 151
 152static int ccp_perform_aes(struct ccp_op *op)
 153{
 154        u32 cr[6];
 155
 156        /* Fill out the register contents for REQ1 through REQ6 */
 157        cr[0] = (CCP_ENGINE_AES << REQ1_ENGINE_SHIFT)
 158                | (op->u.aes.type << REQ1_AES_TYPE_SHIFT)
 159                | (op->u.aes.mode << REQ1_AES_MODE_SHIFT)
 160                | (op->u.aes.action << REQ1_AES_ACTION_SHIFT)
 161                | (op->sb_key << REQ1_KEY_KSB_SHIFT);
 162        cr[1] = op->src.u.dma.length - 1;
 163        cr[2] = ccp_addr_lo(&op->src.u.dma);
 164        cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT)
 165                | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
 166                | ccp_addr_hi(&op->src.u.dma);
 167        cr[4] = ccp_addr_lo(&op->dst.u.dma);
 168        cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
 169                | ccp_addr_hi(&op->dst.u.dma);
 170
 171        if (op->u.aes.mode == CCP_AES_MODE_CFB)
 172                cr[0] |= ((0x7f) << REQ1_AES_CFB_SIZE_SHIFT);
 173
 174        if (op->eom)
 175                cr[0] |= REQ1_EOM;
 176
 177        if (op->init)
 178                cr[0] |= REQ1_INIT;
 179
 180        return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
 181}
 182
 183static int ccp_perform_xts_aes(struct ccp_op *op)
 184{
 185        u32 cr[6];
 186
 187        /* Fill out the register contents for REQ1 through REQ6 */
 188        cr[0] = (CCP_ENGINE_XTS_AES_128 << REQ1_ENGINE_SHIFT)
 189                | (op->u.xts.action << REQ1_AES_ACTION_SHIFT)
 190                | (op->u.xts.unit_size << REQ1_XTS_AES_SIZE_SHIFT)
 191                | (op->sb_key << REQ1_KEY_KSB_SHIFT);
 192        cr[1] = op->src.u.dma.length - 1;
 193        cr[2] = ccp_addr_lo(&op->src.u.dma);
 194        cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT)
 195                | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
 196                | ccp_addr_hi(&op->src.u.dma);
 197        cr[4] = ccp_addr_lo(&op->dst.u.dma);
 198        cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
 199                | ccp_addr_hi(&op->dst.u.dma);
 200
 201        if (op->eom)
 202                cr[0] |= REQ1_EOM;
 203
 204        if (op->init)
 205                cr[0] |= REQ1_INIT;
 206
 207        return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
 208}
 209
 210static int ccp_perform_sha(struct ccp_op *op)
 211{
 212        u32 cr[6];
 213
 214        /* Fill out the register contents for REQ1 through REQ6 */
 215        cr[0] = (CCP_ENGINE_SHA << REQ1_ENGINE_SHIFT)
 216                | (op->u.sha.type << REQ1_SHA_TYPE_SHIFT)
 217                | REQ1_INIT;
 218        cr[1] = op->src.u.dma.length - 1;
 219        cr[2] = ccp_addr_lo(&op->src.u.dma);
 220        cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT)
 221                | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
 222                | ccp_addr_hi(&op->src.u.dma);
 223
 224        if (op->eom) {
 225                cr[0] |= REQ1_EOM;
 226                cr[4] = lower_32_bits(op->u.sha.msg_bits);
 227                cr[5] = upper_32_bits(op->u.sha.msg_bits);
 228        } else {
 229                cr[4] = 0;
 230                cr[5] = 0;
 231        }
 232
 233        return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
 234}
 235
 236static int ccp_perform_rsa(struct ccp_op *op)
 237{
 238        u32 cr[6];
 239
 240        /* Fill out the register contents for REQ1 through REQ6 */
 241        cr[0] = (CCP_ENGINE_RSA << REQ1_ENGINE_SHIFT)
 242                | (op->u.rsa.mod_size << REQ1_RSA_MOD_SIZE_SHIFT)
 243                | (op->sb_key << REQ1_KEY_KSB_SHIFT)
 244                | REQ1_EOM;
 245        cr[1] = op->u.rsa.input_len - 1;
 246        cr[2] = ccp_addr_lo(&op->src.u.dma);
 247        cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT)
 248                | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
 249                | ccp_addr_hi(&op->src.u.dma);
 250        cr[4] = ccp_addr_lo(&op->dst.u.dma);
 251        cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
 252                | ccp_addr_hi(&op->dst.u.dma);
 253
 254        return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
 255}
 256
 257static int ccp_perform_passthru(struct ccp_op *op)
 258{
 259        u32 cr[6];
 260
 261        /* Fill out the register contents for REQ1 through REQ6 */
 262        cr[0] = (CCP_ENGINE_PASSTHRU << REQ1_ENGINE_SHIFT)
 263                | (op->u.passthru.bit_mod << REQ1_PT_BW_SHIFT)
 264                | (op->u.passthru.byte_swap << REQ1_PT_BS_SHIFT);
 265
 266        if (op->src.type == CCP_MEMTYPE_SYSTEM)
 267                cr[1] = op->src.u.dma.length - 1;
 268        else
 269                cr[1] = op->dst.u.dma.length - 1;
 270
 271        if (op->src.type == CCP_MEMTYPE_SYSTEM) {
 272                cr[2] = ccp_addr_lo(&op->src.u.dma);
 273                cr[3] = (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
 274                        | ccp_addr_hi(&op->src.u.dma);
 275
 276                if (op->u.passthru.bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
 277                        cr[3] |= (op->sb_key << REQ4_KSB_SHIFT);
 278        } else {
 279                cr[2] = op->src.u.sb * CCP_SB_BYTES;
 280                cr[3] = (CCP_MEMTYPE_SB << REQ4_MEMTYPE_SHIFT);
 281        }
 282
 283        if (op->dst.type == CCP_MEMTYPE_SYSTEM) {
 284                cr[4] = ccp_addr_lo(&op->dst.u.dma);
 285                cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
 286                        | ccp_addr_hi(&op->dst.u.dma);
 287        } else {
 288                cr[4] = op->dst.u.sb * CCP_SB_BYTES;
 289                cr[5] = (CCP_MEMTYPE_SB << REQ6_MEMTYPE_SHIFT);
 290        }
 291
 292        if (op->eom)
 293                cr[0] |= REQ1_EOM;
 294
 295        return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
 296}
 297
 298static int ccp_perform_ecc(struct ccp_op *op)
 299{
 300        u32 cr[6];
 301
 302        /* Fill out the register contents for REQ1 through REQ6 */
 303        cr[0] = REQ1_ECC_AFFINE_CONVERT
 304                | (CCP_ENGINE_ECC << REQ1_ENGINE_SHIFT)
 305                | (op->u.ecc.function << REQ1_ECC_FUNCTION_SHIFT)
 306                | REQ1_EOM;
 307        cr[1] = op->src.u.dma.length - 1;
 308        cr[2] = ccp_addr_lo(&op->src.u.dma);
 309        cr[3] = (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
 310                | ccp_addr_hi(&op->src.u.dma);
 311        cr[4] = ccp_addr_lo(&op->dst.u.dma);
 312        cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
 313                | ccp_addr_hi(&op->dst.u.dma);
 314
 315        return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
 316}
 317
 318static void ccp_disable_queue_interrupts(struct ccp_device *ccp)
 319{
 320        iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
 321}
 322
 323static void ccp_enable_queue_interrupts(struct ccp_device *ccp)
 324{
 325        iowrite32(ccp->qim, ccp->io_regs + IRQ_MASK_REG);
 326}
 327
 328static void ccp_irq_bh(unsigned long data)
 329{
 330        struct ccp_device *ccp = (struct ccp_device *)data;
 331        struct ccp_cmd_queue *cmd_q;
 332        u32 q_int, status;
 333        unsigned int i;
 334
 335        status = ioread32(ccp->io_regs + IRQ_STATUS_REG);
 336
 337        for (i = 0; i < ccp->cmd_q_count; i++) {
 338                cmd_q = &ccp->cmd_q[i];
 339
 340                q_int = status & (cmd_q->int_ok | cmd_q->int_err);
 341                if (q_int) {
 342                        cmd_q->int_status = status;
 343                        cmd_q->q_status = ioread32(cmd_q->reg_status);
 344                        cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
 345
 346                        /* On error, only save the first error value */
 347                        if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error)
 348                                cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
 349
 350                        cmd_q->int_rcvd = 1;
 351
 352                        /* Acknowledge the interrupt and wake the kthread */
 353                        iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG);
 354                        wake_up_interruptible(&cmd_q->int_queue);
 355                }
 356        }
 357        ccp_enable_queue_interrupts(ccp);
 358}
 359
 360static irqreturn_t ccp_irq_handler(int irq, void *data)
 361{
 362        struct device *dev = data;
 363        struct ccp_device *ccp = dev_get_drvdata(dev);
 364
 365        ccp_disable_queue_interrupts(ccp);
 366        if (ccp->use_tasklet)
 367                tasklet_schedule(&ccp->irq_tasklet);
 368        else
 369                ccp_irq_bh((unsigned long)ccp);
 370
 371        return IRQ_HANDLED;
 372}
 373
 374static int ccp_init(struct ccp_device *ccp)
 375{
 376        struct device *dev = ccp->dev;
 377        struct ccp_cmd_queue *cmd_q;
 378        struct dma_pool *dma_pool;
 379        char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
 380        unsigned int qmr, i;
 381        int ret;
 382
 383        /* Find available queues */
 384        ccp->qim = 0;
 385        qmr = ioread32(ccp->io_regs + Q_MASK_REG);
 386        for (i = 0; i < MAX_HW_QUEUES; i++) {
 387                if (!(qmr & (1 << i)))
 388                        continue;
 389
 390                /* Allocate a dma pool for this queue */
 391                snprintf(dma_pool_name, sizeof(dma_pool_name), "%s_q%d",
 392                         ccp->name, i);
 393                dma_pool = dma_pool_create(dma_pool_name, dev,
 394                                           CCP_DMAPOOL_MAX_SIZE,
 395                                           CCP_DMAPOOL_ALIGN, 0);
 396                if (!dma_pool) {
 397                        dev_err(dev, "unable to allocate dma pool\n");
 398                        ret = -ENOMEM;
 399                        goto e_pool;
 400                }
 401
 402                cmd_q = &ccp->cmd_q[ccp->cmd_q_count];
 403                ccp->cmd_q_count++;
 404
 405                cmd_q->ccp = ccp;
 406                cmd_q->id = i;
 407                cmd_q->dma_pool = dma_pool;
 408
 409                /* Reserve 2 KSB regions for the queue */
 410                cmd_q->sb_key = KSB_START + ccp->sb_start++;
 411                cmd_q->sb_ctx = KSB_START + ccp->sb_start++;
 412                ccp->sb_count -= 2;
 413
 414                /* Preset some register values and masks that are queue
 415                 * number dependent
 416                 */
 417                cmd_q->reg_status = ccp->io_regs + CMD_Q_STATUS_BASE +
 418                                    (CMD_Q_STATUS_INCR * i);
 419                cmd_q->reg_int_status = ccp->io_regs + CMD_Q_INT_STATUS_BASE +
 420                                        (CMD_Q_STATUS_INCR * i);
 421                cmd_q->int_ok = 1 << (i * 2);
 422                cmd_q->int_err = 1 << ((i * 2) + 1);
 423
 424                cmd_q->free_slots = ccp_get_free_slots(cmd_q);
 425
 426                init_waitqueue_head(&cmd_q->int_queue);
 427
 428                /* Build queue interrupt mask (two interrupts per queue) */
 429                ccp->qim |= cmd_q->int_ok | cmd_q->int_err;
 430
 431#ifdef CONFIG_ARM64
 432                /* For arm64 set the recommended queue cache settings */
 433                iowrite32(ccp->axcache, ccp->io_regs + CMD_Q_CACHE_BASE +
 434                          (CMD_Q_CACHE_INC * i));
 435#endif
 436
 437                dev_dbg(dev, "queue #%u available\n", i);
 438        }
 439        if (ccp->cmd_q_count == 0) {
 440                dev_notice(dev, "no command queues available\n");
 441                ret = -EIO;
 442                goto e_pool;
 443        }
 444        dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count);
 445
 446        /* Disable and clear interrupts until ready */
 447        ccp_disable_queue_interrupts(ccp);
 448        for (i = 0; i < ccp->cmd_q_count; i++) {
 449                cmd_q = &ccp->cmd_q[i];
 450
 451                ioread32(cmd_q->reg_int_status);
 452                ioread32(cmd_q->reg_status);
 453        }
 454        iowrite32(ccp->qim, ccp->io_regs + IRQ_STATUS_REG);
 455
 456        /* Request an irq */
 457        ret = ccp->get_irq(ccp);
 458        if (ret) {
 459                dev_err(dev, "unable to allocate an IRQ\n");
 460                goto e_pool;
 461        }
 462
 463        /* Initialize the ISR tasklet? */
 464        if (ccp->use_tasklet)
 465                tasklet_init(&ccp->irq_tasklet, ccp_irq_bh,
 466                             (unsigned long)ccp);
 467
 468        dev_dbg(dev, "Starting threads...\n");
 469        /* Create a kthread for each queue */
 470        for (i = 0; i < ccp->cmd_q_count; i++) {
 471                struct task_struct *kthread;
 472
 473                cmd_q = &ccp->cmd_q[i];
 474
 475                kthread = kthread_create(ccp_cmd_queue_thread, cmd_q,
 476                                         "%s-q%u", ccp->name, cmd_q->id);
 477                if (IS_ERR(kthread)) {
 478                        dev_err(dev, "error creating queue thread (%ld)\n",
 479                                PTR_ERR(kthread));
 480                        ret = PTR_ERR(kthread);
 481                        goto e_kthread;
 482                }
 483
 484                cmd_q->kthread = kthread;
 485                wake_up_process(kthread);
 486        }
 487
 488        dev_dbg(dev, "Enabling interrupts...\n");
 489        /* Enable interrupts */
 490        ccp_enable_queue_interrupts(ccp);
 491
 492        dev_dbg(dev, "Registering device...\n");
 493        ccp_add_device(ccp);
 494
 495        ret = ccp_register_rng(ccp);
 496        if (ret)
 497                goto e_kthread;
 498
 499        /* Register the DMA engine support */
 500        ret = ccp_dmaengine_register(ccp);
 501        if (ret)
 502                goto e_hwrng;
 503
 504        return 0;
 505
 506e_hwrng:
 507        ccp_unregister_rng(ccp);
 508
 509e_kthread:
 510        for (i = 0; i < ccp->cmd_q_count; i++)
 511                if (ccp->cmd_q[i].kthread)
 512                        kthread_stop(ccp->cmd_q[i].kthread);
 513
 514        ccp->free_irq(ccp);
 515
 516e_pool:
 517        for (i = 0; i < ccp->cmd_q_count; i++)
 518                dma_pool_destroy(ccp->cmd_q[i].dma_pool);
 519
 520        return ret;
 521}
 522
 523static void ccp_destroy(struct ccp_device *ccp)
 524{
 525        struct ccp_cmd_queue *cmd_q;
 526        struct ccp_cmd *cmd;
 527        unsigned int i;
 528
 529        /* Unregister the DMA engine */
 530        ccp_dmaengine_unregister(ccp);
 531
 532        /* Unregister the RNG */
 533        ccp_unregister_rng(ccp);
 534
 535        /* Remove this device from the list of available units */
 536        ccp_del_device(ccp);
 537
 538        /* Disable and clear interrupts */
 539        ccp_disable_queue_interrupts(ccp);
 540        for (i = 0; i < ccp->cmd_q_count; i++) {
 541                cmd_q = &ccp->cmd_q[i];
 542
 543                ioread32(cmd_q->reg_int_status);
 544                ioread32(cmd_q->reg_status);
 545        }
 546        iowrite32(ccp->qim, ccp->io_regs + IRQ_STATUS_REG);
 547
 548        /* Stop the queue kthreads */
 549        for (i = 0; i < ccp->cmd_q_count; i++)
 550                if (ccp->cmd_q[i].kthread)
 551                        kthread_stop(ccp->cmd_q[i].kthread);
 552
 553        ccp->free_irq(ccp);
 554
 555        for (i = 0; i < ccp->cmd_q_count; i++)
 556                dma_pool_destroy(ccp->cmd_q[i].dma_pool);
 557
 558        /* Flush the cmd and backlog queue */
 559        while (!list_empty(&ccp->cmd)) {
 560                /* Invoke the callback directly with an error code */
 561                cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
 562                list_del(&cmd->entry);
 563                cmd->callback(cmd->data, -ENODEV);
 564        }
 565        while (!list_empty(&ccp->backlog)) {
 566                /* Invoke the callback directly with an error code */
 567                cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry);
 568                list_del(&cmd->entry);
 569                cmd->callback(cmd->data, -ENODEV);
 570        }
 571}
 572
 573static const struct ccp_actions ccp3_actions = {
 574        .aes = ccp_perform_aes,
 575        .xts_aes = ccp_perform_xts_aes,
 576        .des3 = NULL,
 577        .sha = ccp_perform_sha,
 578        .rsa = ccp_perform_rsa,
 579        .passthru = ccp_perform_passthru,
 580        .ecc = ccp_perform_ecc,
 581        .sballoc = ccp_alloc_ksb,
 582        .sbfree = ccp_free_ksb,
 583        .init = ccp_init,
 584        .destroy = ccp_destroy,
 585        .get_free_slots = ccp_get_free_slots,
 586        .irqhandler = ccp_irq_handler,
 587};
 588
 589const struct ccp_vdata ccpv3 = {
 590        .version = CCP_VERSION(3, 0),
 591        .setup = NULL,
 592        .perform = &ccp3_actions,
 593        .bar = 2,
 594        .offset = 0x20000,
 595};
 596