linux/drivers/crypto/ccp/ccp-dev.h
<<
>>
Prefs
   1/*
   2 * AMD Cryptographic Coprocessor (CCP) driver
   3 *
   4 * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
   5 *
   6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License version 2 as
  10 * published by the Free Software Foundation.
  11 */
  12
  13#ifndef __CCP_DEV_H__
  14#define __CCP_DEV_H__
  15
  16#include <linux/device.h>
  17#include <linux/pci.h>
  18#include <linux/spinlock.h>
  19#include <linux/mutex.h>
  20#include <linux/list.h>
  21#include <linux/wait.h>
  22#include <linux/dmapool.h>
  23#include <linux/hw_random.h>
  24#include <linux/bitops.h>
  25#include <linux/interrupt.h>
  26#include <linux/irqreturn.h>
  27#include <linux/dmaengine.h>
  28
  29#define MAX_CCP_NAME_LEN                16
  30#define MAX_DMAPOOL_NAME_LEN            32
  31
  32#define MAX_HW_QUEUES                   5
  33#define MAX_CMD_QLEN                    100
  34
  35#define TRNG_RETRIES                    10
  36
  37#define CACHE_NONE                      0x00
  38#define CACHE_WB_NO_ALLOC               0xb7
  39
  40/****** Register Mappings ******/
  41#define Q_MASK_REG                      0x000
  42#define TRNG_OUT_REG                    0x00c
  43#define IRQ_MASK_REG                    0x040
  44#define IRQ_STATUS_REG                  0x200
  45
  46#define DEL_CMD_Q_JOB                   0x124
  47#define DEL_Q_ACTIVE                    0x00000200
  48#define DEL_Q_ID_SHIFT                  6
  49
  50#define CMD_REQ0                        0x180
  51#define CMD_REQ_INCR                    0x04
  52
  53#define CMD_Q_STATUS_BASE               0x210
  54#define CMD_Q_INT_STATUS_BASE           0x214
  55#define CMD_Q_STATUS_INCR               0x20
  56
  57#define CMD_Q_CACHE_BASE                0x228
  58#define CMD_Q_CACHE_INC                 0x20
  59
  60#define CMD_Q_ERROR(__qs)               ((__qs) & 0x0000003f)
  61#define CMD_Q_DEPTH(__qs)               (((__qs) >> 12) & 0x0000000f)
  62
  63/****** REQ0 Related Values ******/
  64#define REQ0_WAIT_FOR_WRITE             0x00000004
  65#define REQ0_INT_ON_COMPLETE            0x00000002
  66#define REQ0_STOP_ON_COMPLETE           0x00000001
  67
  68#define REQ0_CMD_Q_SHIFT                9
  69#define REQ0_JOBID_SHIFT                3
  70
  71/****** REQ1 Related Values ******/
  72#define REQ1_PROTECT_SHIFT              27
  73#define REQ1_ENGINE_SHIFT               23
  74#define REQ1_KEY_KSB_SHIFT              2
  75
  76#define REQ1_EOM                        0x00000002
  77#define REQ1_INIT                       0x00000001
  78
  79/* AES Related Values */
  80#define REQ1_AES_TYPE_SHIFT             21
  81#define REQ1_AES_MODE_SHIFT             18
  82#define REQ1_AES_ACTION_SHIFT           17
  83#define REQ1_AES_CFB_SIZE_SHIFT         10
  84
  85/* XTS-AES Related Values */
  86#define REQ1_XTS_AES_SIZE_SHIFT         10
  87
  88/* SHA Related Values */
  89#define REQ1_SHA_TYPE_SHIFT             21
  90
  91/* RSA Related Values */
  92#define REQ1_RSA_MOD_SIZE_SHIFT         10
  93
  94/* Pass-Through Related Values */
  95#define REQ1_PT_BW_SHIFT                12
  96#define REQ1_PT_BS_SHIFT                10
  97
  98/* ECC Related Values */
  99#define REQ1_ECC_AFFINE_CONVERT         0x00200000
 100#define REQ1_ECC_FUNCTION_SHIFT         18
 101
 102/****** REQ4 Related Values ******/
 103#define REQ4_KSB_SHIFT                  18
 104#define REQ4_MEMTYPE_SHIFT              16
 105
 106/****** REQ6 Related Values ******/
 107#define REQ6_MEMTYPE_SHIFT              16
 108
 109/****** Key Storage Block ******/
 110#define KSB_START                       77
 111#define KSB_END                         127
 112#define KSB_COUNT                       (KSB_END - KSB_START + 1)
 113#define CCP_KSB_BITS                    256
 114#define CCP_KSB_BYTES                   32
 115
 116#define CCP_JOBID_MASK                  0x0000003f
 117
 118#define CCP_DMAPOOL_MAX_SIZE            64
 119#define CCP_DMAPOOL_ALIGN               BIT(5)
 120
 121#define CCP_REVERSE_BUF_SIZE            64
 122
 123#define CCP_AES_KEY_KSB_COUNT           1
 124#define CCP_AES_CTX_KSB_COUNT           1
 125
 126#define CCP_XTS_AES_KEY_KSB_COUNT       1
 127#define CCP_XTS_AES_CTX_KSB_COUNT       1
 128
 129#define CCP_SHA_KSB_COUNT               1
 130
 131#define CCP_RSA_MAX_WIDTH               4096
 132
 133#define CCP_PASSTHRU_BLOCKSIZE          256
 134#define CCP_PASSTHRU_MASKSIZE           32
 135#define CCP_PASSTHRU_KSB_COUNT          1
 136
 137#define CCP_ECC_MODULUS_BYTES           48      /* 384-bits */
 138#define CCP_ECC_MAX_OPERANDS            6
 139#define CCP_ECC_MAX_OUTPUTS             3
 140#define CCP_ECC_SRC_BUF_SIZE            448
 141#define CCP_ECC_DST_BUF_SIZE            192
 142#define CCP_ECC_OPERAND_SIZE            64
 143#define CCP_ECC_OUTPUT_SIZE             64
 144#define CCP_ECC_RESULT_OFFSET           60
 145#define CCP_ECC_RESULT_SUCCESS          0x0001
 146
 147struct ccp_op;
 148
 149/* Structure for computation functions that are device-specific */
 150struct ccp_actions {
 151        int (*perform_aes)(struct ccp_op *);
 152        int (*perform_xts_aes)(struct ccp_op *);
 153        int (*perform_sha)(struct ccp_op *);
 154        int (*perform_rsa)(struct ccp_op *);
 155        int (*perform_passthru)(struct ccp_op *);
 156        int (*perform_ecc)(struct ccp_op *);
 157        int (*init)(struct ccp_device *);
 158        void (*destroy)(struct ccp_device *);
 159        irqreturn_t (*irqhandler)(int, void *);
 160};
 161
 162/* Structure to hold CCP version-specific values */
 163struct ccp_vdata {
 164        unsigned int version;
 165        const struct ccp_actions *perform;
 166};
 167
 168extern struct ccp_vdata ccpv3;
 169
 170struct ccp_device;
 171struct ccp_cmd;
 172
 173struct ccp_dma_cmd {
 174        struct list_head entry;
 175
 176        struct ccp_cmd ccp_cmd;
 177};
 178
 179struct ccp_dma_desc {
 180        struct list_head entry;
 181
 182        struct ccp_device *ccp;
 183
 184        struct list_head pending;
 185        struct list_head active;
 186
 187        enum dma_status status;
 188        struct dma_async_tx_descriptor tx_desc;
 189        size_t len;
 190};
 191
 192struct ccp_dma_chan {
 193        struct ccp_device *ccp;
 194
 195        spinlock_t lock;
 196        struct list_head pending;
 197        struct list_head active;
 198        struct list_head complete;
 199
 200        struct tasklet_struct cleanup_tasklet;
 201
 202        enum dma_status status;
 203        struct dma_chan dma_chan;
 204};
 205
 206struct ccp_cmd_queue {
 207        struct ccp_device *ccp;
 208
 209        /* Queue identifier */
 210        u32 id;
 211
 212        /* Queue dma pool */
 213        struct dma_pool *dma_pool;
 214
 215        /* Queue reserved KSB regions */
 216        u32 ksb_key;
 217        u32 ksb_ctx;
 218
 219        /* Queue processing thread */
 220        struct task_struct *kthread;
 221        unsigned int active;
 222        unsigned int suspended;
 223
 224        /* Number of free command slots available */
 225        unsigned int free_slots;
 226
 227        /* Interrupt masks */
 228        u32 int_ok;
 229        u32 int_err;
 230
 231        /* Register addresses for queue */
 232        void __iomem *reg_status;
 233        void __iomem *reg_int_status;
 234
 235        /* Status values from job */
 236        u32 int_status;
 237        u32 q_status;
 238        u32 q_int_status;
 239        u32 cmd_error;
 240
 241        /* Interrupt wait queue */
 242        wait_queue_head_t int_queue;
 243        unsigned int int_rcvd;
 244} ____cacheline_aligned;
 245
 246struct ccp_device {
 247        struct list_head entry;
 248
 249        struct ccp_vdata *vdata;
 250        unsigned int ord;
 251        char name[MAX_CCP_NAME_LEN];
 252        char rngname[MAX_CCP_NAME_LEN];
 253
 254        struct device *dev;
 255
 256        /*
 257         * Bus specific device information
 258         */
 259        void *dev_specific;
 260        int (*get_irq)(struct ccp_device *ccp);
 261        void (*free_irq)(struct ccp_device *ccp);
 262        unsigned int irq;
 263
 264        /*
 265         * I/O area used for device communication. The register mapping
 266         * starts at an offset into the mapped bar.
 267         *   The CMD_REQx registers and the Delete_Cmd_Queue_Job register
 268         *   need to be protected while a command queue thread is accessing
 269         *   them.
 270         */
 271        struct mutex req_mutex ____cacheline_aligned;
 272        void __iomem *io_map;
 273        void __iomem *io_regs;
 274
 275        /*
 276         * Master lists that all cmds are queued on. Because there can be
 277         * more than one CCP command queue that can process a cmd a separate
 278         * backlog list is neeeded so that the backlog completion call
 279         * completes before the cmd is available for execution.
 280         */
 281        spinlock_t cmd_lock ____cacheline_aligned;
 282        unsigned int cmd_count;
 283        struct list_head cmd;
 284        struct list_head backlog;
 285
 286        /*
 287         * The command queues. These represent the queues available on the
 288         * CCP that are available for processing cmds
 289         */
 290        struct ccp_cmd_queue cmd_q[MAX_HW_QUEUES];
 291        unsigned int cmd_q_count;
 292
 293        /*
 294         * Support for the CCP True RNG
 295         */
 296        struct hwrng hwrng;
 297        unsigned int hwrng_retries;
 298
 299        /*
 300         * Support for the CCP DMA capabilities
 301         */
 302        struct dma_device dma_dev;
 303        struct ccp_dma_chan *ccp_dma_chan;
 304        struct kmem_cache *dma_cmd_cache;
 305        struct kmem_cache *dma_desc_cache;
 306
 307        /*
 308         * A counter used to generate job-ids for cmds submitted to the CCP
 309         */
 310        atomic_t current_id ____cacheline_aligned;
 311
 312        /*
 313         * The CCP uses key storage blocks (KSB) to maintain context for certain
 314         * operations. To prevent multiple cmds from using the same KSB range
 315         * a command queue reserves a KSB range for the duration of the cmd.
 316         * Each queue, will however, reserve 2 KSB blocks for operations that
 317         * only require single KSB entries (eg. AES context/iv and key) in order
 318         * to avoid allocation contention.  This will reserve at most 10 KSB
 319         * entries, leaving 40 KSB entries available for dynamic allocation.
 320         */
 321        struct mutex ksb_mutex ____cacheline_aligned;
 322        DECLARE_BITMAP(ksb, KSB_COUNT);
 323        wait_queue_head_t ksb_queue;
 324        unsigned int ksb_avail;
 325        unsigned int ksb_count;
 326        u32 ksb_start;
 327
 328        /* Suspend support */
 329        unsigned int suspending;
 330        wait_queue_head_t suspend_queue;
 331
 332        /* DMA caching attribute support */
 333        unsigned int axcache;
 334};
 335
 336enum ccp_memtype {
 337        CCP_MEMTYPE_SYSTEM = 0,
 338        CCP_MEMTYPE_KSB,
 339        CCP_MEMTYPE_LOCAL,
 340        CCP_MEMTYPE__LAST,
 341};
 342
 343struct ccp_dma_info {
 344        dma_addr_t address;
 345        unsigned int offset;
 346        unsigned int length;
 347        enum dma_data_direction dir;
 348};
 349
 350struct ccp_dm_workarea {
 351        struct device *dev;
 352        struct dma_pool *dma_pool;
 353        unsigned int length;
 354
 355        u8 *address;
 356        struct ccp_dma_info dma;
 357};
 358
 359struct ccp_sg_workarea {
 360        struct scatterlist *sg;
 361        int nents;
 362
 363        struct scatterlist *dma_sg;
 364        struct device *dma_dev;
 365        unsigned int dma_count;
 366        enum dma_data_direction dma_dir;
 367
 368        unsigned int sg_used;
 369
 370        u64 bytes_left;
 371};
 372
 373struct ccp_data {
 374        struct ccp_sg_workarea sg_wa;
 375        struct ccp_dm_workarea dm_wa;
 376};
 377
 378struct ccp_mem {
 379        enum ccp_memtype type;
 380        union {
 381                struct ccp_dma_info dma;
 382                u32 ksb;
 383        } u;
 384};
 385
 386struct ccp_aes_op {
 387        enum ccp_aes_type type;
 388        enum ccp_aes_mode mode;
 389        enum ccp_aes_action action;
 390};
 391
 392struct ccp_xts_aes_op {
 393        enum ccp_aes_action action;
 394        enum ccp_xts_aes_unit_size unit_size;
 395};
 396
 397struct ccp_sha_op {
 398        enum ccp_sha_type type;
 399        u64 msg_bits;
 400};
 401
 402struct ccp_rsa_op {
 403        u32 mod_size;
 404        u32 input_len;
 405};
 406
 407struct ccp_passthru_op {
 408        enum ccp_passthru_bitwise bit_mod;
 409        enum ccp_passthru_byteswap byte_swap;
 410};
 411
 412struct ccp_ecc_op {
 413        enum ccp_ecc_function function;
 414};
 415
 416struct ccp_op {
 417        struct ccp_cmd_queue *cmd_q;
 418
 419        u32 jobid;
 420        u32 ioc;
 421        u32 soc;
 422        u32 ksb_key;
 423        u32 ksb_ctx;
 424        u32 init;
 425        u32 eom;
 426
 427        struct ccp_mem src;
 428        struct ccp_mem dst;
 429
 430        union {
 431                struct ccp_aes_op aes;
 432                struct ccp_xts_aes_op xts;
 433                struct ccp_sha_op sha;
 434                struct ccp_rsa_op rsa;
 435                struct ccp_passthru_op passthru;
 436                struct ccp_ecc_op ecc;
 437        } u;
 438};
 439
 440static inline u32 ccp_addr_lo(struct ccp_dma_info *info)
 441{
 442        return lower_32_bits(info->address + info->offset);
 443}
 444
 445static inline u32 ccp_addr_hi(struct ccp_dma_info *info)
 446{
 447        return upper_32_bits(info->address + info->offset) & 0x0000ffff;
 448}
 449
 450int ccp_pci_init(void);
 451void ccp_pci_exit(void);
 452
 453int ccp_platform_init(void);
 454void ccp_platform_exit(void);
 455
 456void ccp_add_device(struct ccp_device *ccp);
 457void ccp_del_device(struct ccp_device *ccp);
 458
 459struct ccp_device *ccp_alloc_struct(struct device *dev);
 460bool ccp_queues_suspended(struct ccp_device *ccp);
 461int ccp_cmd_queue_thread(void *data);
 462
 463int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd);
 464
 465int ccp_dmaengine_register(struct ccp_device *ccp);
 466void ccp_dmaengine_unregister(struct ccp_device *ccp);
 467
 468#endif
 469