linux/drivers/crypto/hisilicon/qm.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/* Copyright (c) 2019 HiSilicon Limited. */
   3#ifndef HISI_ACC_QM_H
   4#define HISI_ACC_QM_H
   5
   6#include <linux/bitfield.h>
   7#include <linux/debugfs.h>
   8#include <linux/iopoll.h>
   9#include <linux/module.h>
  10#include <linux/pci.h>
  11
  12#define QM_QNUM_V1                      4096
  13#define QM_QNUM_V2                      1024
  14#define QM_MAX_VFS_NUM_V2               63
  15
  16/* qm user domain */
  17#define QM_ARUSER_M_CFG_1               0x100088
  18#define AXUSER_SNOOP_ENABLE             BIT(30)
  19#define AXUSER_CMD_TYPE                 GENMASK(14, 12)
  20#define AXUSER_CMD_SMMU_NORMAL          1
  21#define AXUSER_NS                       BIT(6)
  22#define AXUSER_NO                       BIT(5)
  23#define AXUSER_FP                       BIT(4)
  24#define AXUSER_SSV                      BIT(0)
  25#define AXUSER_BASE                     (AXUSER_SNOOP_ENABLE |          \
  26                                        FIELD_PREP(AXUSER_CMD_TYPE,     \
  27                                        AXUSER_CMD_SMMU_NORMAL) |       \
  28                                        AXUSER_NS | AXUSER_NO | AXUSER_FP)
  29#define QM_ARUSER_M_CFG_ENABLE          0x100090
  30#define ARUSER_M_CFG_ENABLE             0xfffffffe
  31#define QM_AWUSER_M_CFG_1               0x100098
  32#define QM_AWUSER_M_CFG_ENABLE          0x1000a0
  33#define AWUSER_M_CFG_ENABLE             0xfffffffe
  34#define QM_WUSER_M_CFG_ENABLE           0x1000a8
  35#define WUSER_M_CFG_ENABLE              0xffffffff
  36
  37/* qm cache */
  38#define QM_CACHE_CTL                    0x100050
  39#define SQC_CACHE_ENABLE                BIT(0)
  40#define CQC_CACHE_ENABLE                BIT(1)
  41#define SQC_CACHE_WB_ENABLE             BIT(4)
  42#define SQC_CACHE_WB_THRD               GENMASK(10, 5)
  43#define CQC_CACHE_WB_ENABLE             BIT(11)
  44#define CQC_CACHE_WB_THRD               GENMASK(17, 12)
  45#define QM_AXI_M_CFG                    0x1000ac
  46#define AXI_M_CFG                       0xffff
  47#define QM_AXI_M_CFG_ENABLE             0x1000b0
  48#define AM_CFG_SINGLE_PORT_MAX_TRANS    0x300014
  49#define AXI_M_CFG_ENABLE                0xffffffff
  50#define QM_PEH_AXUSER_CFG               0x1000cc
  51#define QM_PEH_AXUSER_CFG_ENABLE        0x1000d0
  52#define PEH_AXUSER_CFG                  0x401001
  53#define PEH_AXUSER_CFG_ENABLE           0xffffffff
  54
  55#define QM_AXI_RRESP                    BIT(0)
  56#define QM_AXI_BRESP                    BIT(1)
  57#define QM_ECC_MBIT                     BIT(2)
  58#define QM_ECC_1BIT                     BIT(3)
  59#define QM_ACC_GET_TASK_TIMEOUT         BIT(4)
  60#define QM_ACC_DO_TASK_TIMEOUT          BIT(5)
  61#define QM_ACC_WB_NOT_READY_TIMEOUT     BIT(6)
  62#define QM_SQ_CQ_VF_INVALID             BIT(7)
  63#define QM_CQ_VF_INVALID                BIT(8)
  64#define QM_SQ_VF_INVALID                BIT(9)
  65#define QM_DB_TIMEOUT                   BIT(10)
  66#define QM_OF_FIFO_OF                   BIT(11)
  67#define QM_DB_RANDOM_INVALID            BIT(12)
  68#define QM_MAILBOX_TIMEOUT              BIT(13)
  69#define QM_FLR_TIMEOUT                  BIT(14)
  70
  71#define QM_BASE_NFE     (QM_AXI_RRESP | QM_AXI_BRESP | QM_ECC_MBIT | \
  72                         QM_ACC_GET_TASK_TIMEOUT | QM_DB_TIMEOUT | \
  73                         QM_OF_FIFO_OF | QM_DB_RANDOM_INVALID | \
  74                         QM_MAILBOX_TIMEOUT | QM_FLR_TIMEOUT)
  75#define QM_BASE_CE                      QM_ECC_1BIT
  76
  77#define QM_Q_DEPTH                      1024
  78#define QM_MIN_QNUM                     2
  79#define HISI_ACC_SGL_SGE_NR_MAX         255
  80#define QM_SHAPER_CFG                   0x100164
  81#define QM_SHAPER_ENABLE                BIT(30)
  82#define QM_SHAPER_TYPE1_OFFSET          10
  83
  84/* page number for queue file region */
  85#define QM_DOORBELL_PAGE_NR             1
  86
  87/* uacce mode of the driver */
  88#define UACCE_MODE_NOUACCE              0 /* don't use uacce */
  89#define UACCE_MODE_SVA                  1 /* use uacce sva mode */
  90#define UACCE_MODE_DESC "0(default) means only register to crypto, 1 means both register to crypto and uacce"
  91
  92enum qm_stop_reason {
  93        QM_NORMAL,
  94        QM_SOFT_RESET,
  95        QM_FLR,
  96};
  97
  98enum qm_state {
  99        QM_INIT = 0,
 100        QM_START,
 101        QM_CLOSE,
 102        QM_STOP,
 103};
 104
 105enum qp_state {
 106        QP_INIT = 1,
 107        QP_START,
 108        QP_STOP,
 109        QP_CLOSE,
 110};
 111
 112enum qm_hw_ver {
 113        QM_HW_UNKNOWN = -1,
 114        QM_HW_V1 = 0x20,
 115        QM_HW_V2 = 0x21,
 116        QM_HW_V3 = 0x30,
 117};
 118
 119enum qm_fun_type {
 120        QM_HW_PF,
 121        QM_HW_VF,
 122};
 123
 124enum qm_debug_file {
 125        CURRENT_QM,
 126        CURRENT_Q,
 127        CLEAR_ENABLE,
 128        DEBUG_FILE_NUM,
 129};
 130
 131struct qm_dfx {
 132        atomic64_t err_irq_cnt;
 133        atomic64_t aeq_irq_cnt;
 134        atomic64_t abnormal_irq_cnt;
 135        atomic64_t create_qp_err_cnt;
 136        atomic64_t mb_err_cnt;
 137};
 138
 139struct debugfs_file {
 140        enum qm_debug_file index;
 141        struct mutex lock;
 142        struct qm_debug *debug;
 143};
 144
 145struct qm_debug {
 146        u32 curr_qm_qp_num;
 147        u32 sqe_mask_offset;
 148        u32 sqe_mask_len;
 149        struct qm_dfx dfx;
 150        struct dentry *debug_root;
 151        struct dentry *qm_d;
 152        struct debugfs_file files[DEBUG_FILE_NUM];
 153};
 154
 155struct qm_shaper_factor {
 156        u32 func_qos;
 157        u64 cir_b;
 158        u64 cir_u;
 159        u64 cir_s;
 160        u64 cbs_s;
 161};
 162
 163struct qm_dma {
 164        void *va;
 165        dma_addr_t dma;
 166        size_t size;
 167};
 168
 169struct hisi_qm_status {
 170        u32 eq_head;
 171        bool eqc_phase;
 172        u32 aeq_head;
 173        bool aeqc_phase;
 174        atomic_t flags;
 175        int stop_reason;
 176};
 177
 178struct hisi_qm;
 179
 180struct hisi_qm_err_info {
 181        char *acpi_rst;
 182        u32 msi_wr_port;
 183        u32 ecc_2bits_mask;
 184        u32 dev_ce_mask;
 185        u32 ce;
 186        u32 nfe;
 187        u32 fe;
 188};
 189
 190struct hisi_qm_err_status {
 191        u32 is_qm_ecc_mbit;
 192        u32 is_dev_ecc_mbit;
 193};
 194
 195struct hisi_qm_err_ini {
 196        int (*hw_init)(struct hisi_qm *qm);
 197        void (*hw_err_enable)(struct hisi_qm *qm);
 198        void (*hw_err_disable)(struct hisi_qm *qm);
 199        u32 (*get_dev_hw_err_status)(struct hisi_qm *qm);
 200        void (*clear_dev_hw_err_status)(struct hisi_qm *qm, u32 err_sts);
 201        void (*open_axi_master_ooo)(struct hisi_qm *qm);
 202        void (*close_axi_master_ooo)(struct hisi_qm *qm);
 203        void (*open_sva_prefetch)(struct hisi_qm *qm);
 204        void (*close_sva_prefetch)(struct hisi_qm *qm);
 205        void (*log_dev_hw_err)(struct hisi_qm *qm, u32 err_sts);
 206        void (*err_info_init)(struct hisi_qm *qm);
 207};
 208
 209struct hisi_qm_list {
 210        struct mutex lock;
 211        struct list_head list;
 212        int (*register_to_crypto)(struct hisi_qm *qm);
 213        void (*unregister_from_crypto)(struct hisi_qm *qm);
 214};
 215
 216struct hisi_qm {
 217        enum qm_hw_ver ver;
 218        enum qm_fun_type fun_type;
 219        const char *dev_name;
 220        struct pci_dev *pdev;
 221        void __iomem *io_base;
 222        void __iomem *db_io_base;
 223        u32 sqe_size;
 224        u32 qp_base;
 225        u32 qp_num;
 226        u32 qp_in_used;
 227        u32 ctrl_qp_num;
 228        u32 max_qp_num;
 229        u32 vfs_num;
 230        u32 db_interval;
 231        struct list_head list;
 232        struct hisi_qm_list *qm_list;
 233
 234        struct qm_dma qdma;
 235        struct qm_sqc *sqc;
 236        struct qm_cqc *cqc;
 237        struct qm_eqe *eqe;
 238        struct qm_aeqe *aeqe;
 239        dma_addr_t sqc_dma;
 240        dma_addr_t cqc_dma;
 241        dma_addr_t eqe_dma;
 242        dma_addr_t aeqe_dma;
 243
 244        struct hisi_qm_status status;
 245        const struct hisi_qm_err_ini *err_ini;
 246        struct hisi_qm_err_info err_info;
 247        struct hisi_qm_err_status err_status;
 248        unsigned long misc_ctl; /* driver removing and reset sched */
 249
 250        struct rw_semaphore qps_lock;
 251        struct idr qp_idr;
 252        struct hisi_qp *qp_array;
 253
 254        struct mutex mailbox_lock;
 255
 256        const struct hisi_qm_hw_ops *ops;
 257
 258        struct qm_debug debug;
 259
 260        u32 error_mask;
 261
 262        struct workqueue_struct *wq;
 263        struct work_struct work;
 264        struct work_struct rst_work;
 265        struct work_struct cmd_process;
 266
 267        const char *algs;
 268        bool use_sva;
 269        bool is_frozen;
 270
 271        /* doorbell isolation enable */
 272        bool use_db_isolation;
 273        resource_size_t phys_base;
 274        resource_size_t db_phys_base;
 275        struct uacce_device *uacce;
 276        int mode;
 277        struct qm_shaper_factor *factor;
 278        u32 mb_qos;
 279        u32 type_rate;
 280};
 281
 282struct hisi_qp_status {
 283        atomic_t used;
 284        u16 sq_tail;
 285        u16 cq_head;
 286        bool cqc_phase;
 287        atomic_t flags;
 288};
 289
 290struct hisi_qp_ops {
 291        int (*fill_sqe)(void *sqe, void *q_parm, void *d_parm);
 292};
 293
 294struct hisi_qp {
 295        u32 qp_id;
 296        u8 alg_type;
 297        u8 req_type;
 298
 299        struct qm_dma qdma;
 300        void *sqe;
 301        struct qm_cqe *cqe;
 302        dma_addr_t sqe_dma;
 303        dma_addr_t cqe_dma;
 304
 305        struct hisi_qp_status qp_status;
 306        struct hisi_qp_ops *hw_ops;
 307        void *qp_ctx;
 308        void (*req_cb)(struct hisi_qp *qp, void *data);
 309        void (*event_cb)(struct hisi_qp *qp);
 310
 311        struct hisi_qm *qm;
 312        bool is_resetting;
 313        bool is_in_kernel;
 314        u16 pasid;
 315        struct uacce_queue *uacce_q;
 316};
 317
 318static inline int q_num_set(const char *val, const struct kernel_param *kp,
 319                            unsigned int device)
 320{
 321        struct pci_dev *pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI,
 322                                              device, NULL);
 323        u32 n, q_num;
 324        int ret;
 325
 326        if (!val)
 327                return -EINVAL;
 328
 329        if (!pdev) {
 330                q_num = min_t(u32, QM_QNUM_V1, QM_QNUM_V2);
 331                pr_info("No device found currently, suppose queue number is %u\n",
 332                        q_num);
 333        } else {
 334                if (pdev->revision == QM_HW_V1)
 335                        q_num = QM_QNUM_V1;
 336                else
 337                        q_num = QM_QNUM_V2;
 338        }
 339
 340        ret = kstrtou32(val, 10, &n);
 341        if (ret || n < QM_MIN_QNUM || n > q_num)
 342                return -EINVAL;
 343
 344        return param_set_int(val, kp);
 345}
 346
 347static inline int vfs_num_set(const char *val, const struct kernel_param *kp)
 348{
 349        u32 n;
 350        int ret;
 351
 352        if (!val)
 353                return -EINVAL;
 354
 355        ret = kstrtou32(val, 10, &n);
 356        if (ret < 0)
 357                return ret;
 358
 359        if (n > QM_MAX_VFS_NUM_V2)
 360                return -EINVAL;
 361
 362        return param_set_int(val, kp);
 363}
 364
 365static inline int mode_set(const char *val, const struct kernel_param *kp)
 366{
 367        u32 n;
 368        int ret;
 369
 370        if (!val)
 371                return -EINVAL;
 372
 373        ret = kstrtou32(val, 10, &n);
 374        if (ret != 0 || (n != UACCE_MODE_SVA &&
 375                         n != UACCE_MODE_NOUACCE))
 376                return -EINVAL;
 377
 378        return param_set_int(val, kp);
 379}
 380
 381static inline int uacce_mode_set(const char *val, const struct kernel_param *kp)
 382{
 383        return mode_set(val, kp);
 384}
 385
 386static inline void hisi_qm_init_list(struct hisi_qm_list *qm_list)
 387{
 388        INIT_LIST_HEAD(&qm_list->list);
 389        mutex_init(&qm_list->lock);
 390}
 391
 392int hisi_qm_init(struct hisi_qm *qm);
 393void hisi_qm_uninit(struct hisi_qm *qm);
 394int hisi_qm_start(struct hisi_qm *qm);
 395int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r);
 396struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type);
 397int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg);
 398int hisi_qm_stop_qp(struct hisi_qp *qp);
 399void hisi_qm_release_qp(struct hisi_qp *qp);
 400int hisi_qp_send(struct hisi_qp *qp, const void *msg);
 401int hisi_qm_get_free_qp_num(struct hisi_qm *qm);
 402int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number);
 403void hisi_qm_debug_init(struct hisi_qm *qm);
 404enum qm_hw_ver hisi_qm_get_hw_version(struct pci_dev *pdev);
 405void hisi_qm_debug_regs_clear(struct hisi_qm *qm);
 406int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs);
 407int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen);
 408int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs);
 409void hisi_qm_dev_err_init(struct hisi_qm *qm);
 410void hisi_qm_dev_err_uninit(struct hisi_qm *qm);
 411pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev,
 412                                          pci_channel_state_t state);
 413pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev);
 414void hisi_qm_reset_prepare(struct pci_dev *pdev);
 415void hisi_qm_reset_done(struct pci_dev *pdev);
 416
 417struct hisi_acc_sgl_pool;
 418struct hisi_acc_hw_sgl *hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
 419        struct scatterlist *sgl, struct hisi_acc_sgl_pool *pool,
 420        u32 index, dma_addr_t *hw_sgl_dma);
 421void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl,
 422                           struct hisi_acc_hw_sgl *hw_sgl);
 423struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
 424                                                   u32 count, u32 sge_nr);
 425void hisi_acc_free_sgl_pool(struct device *dev,
 426                            struct hisi_acc_sgl_pool *pool);
 427int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num,
 428                           u8 alg_type, int node, struct hisi_qp **qps);
 429void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num);
 430void hisi_qm_dev_shutdown(struct pci_dev *pdev);
 431void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
 432int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
 433void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
 434int hisi_qm_resume(struct device *dev);
 435int hisi_qm_suspend(struct device *dev);
 436void hisi_qm_pm_uninit(struct hisi_qm *qm);
 437void hisi_qm_pm_init(struct hisi_qm *qm);
 438int hisi_qm_get_dfx_access(struct hisi_qm *qm);
 439void hisi_qm_put_dfx_access(struct hisi_qm *qm);
 440void hisi_qm_regs_dump(struct seq_file *s, struct debugfs_regset32 *regset);
 441#endif
 442