linux/drivers/crypto/hisilicon/hpre/hpre_main.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2018-2019 HiSilicon Limited. */
   3#include <linux/acpi.h>
   4#include <linux/aer.h>
   5#include <linux/bitops.h>
   6#include <linux/debugfs.h>
   7#include <linux/init.h>
   8#include <linux/io.h>
   9#include <linux/kernel.h>
  10#include <linux/module.h>
  11#include <linux/pci.h>
  12#include <linux/pm_runtime.h>
  13#include <linux/topology.h>
  14#include <linux/uacce.h>
  15#include "hpre.h"
  16
  17#define HPRE_QM_ABNML_INT_MASK          0x100004
  18#define HPRE_CTRL_CNT_CLR_CE_BIT        BIT(0)
  19#define HPRE_COMM_CNT_CLR_CE            0x0
  20#define HPRE_CTRL_CNT_CLR_CE            0x301000
  21#define HPRE_FSM_MAX_CNT                0x301008
  22#define HPRE_VFG_AXQOS                  0x30100c
  23#define HPRE_VFG_AXCACHE                0x301010
  24#define HPRE_RDCHN_INI_CFG              0x301014
  25#define HPRE_AWUSR_FP_CFG               0x301018
  26#define HPRE_BD_ENDIAN                  0x301020
  27#define HPRE_ECC_BYPASS                 0x301024
  28#define HPRE_RAS_WIDTH_CFG              0x301028
  29#define HPRE_POISON_BYPASS              0x30102c
  30#define HPRE_BD_ARUSR_CFG               0x301030
  31#define HPRE_BD_AWUSR_CFG               0x301034
  32#define HPRE_TYPES_ENB                  0x301038
  33#define HPRE_RSA_ENB                    BIT(0)
  34#define HPRE_ECC_ENB                    BIT(1)
  35#define HPRE_DATA_RUSER_CFG             0x30103c
  36#define HPRE_DATA_WUSER_CFG             0x301040
  37#define HPRE_INT_MASK                   0x301400
  38#define HPRE_INT_STATUS                 0x301800
  39#define HPRE_CORE_INT_ENABLE            0
  40#define HPRE_CORE_INT_DISABLE           GENMASK(21, 0)
  41#define HPRE_RDCHN_INI_ST               0x301a00
  42#define HPRE_CLSTR_BASE                 0x302000
  43#define HPRE_CORE_EN_OFFSET             0x04
  44#define HPRE_CORE_INI_CFG_OFFSET        0x20
  45#define HPRE_CORE_INI_STATUS_OFFSET     0x80
  46#define HPRE_CORE_HTBT_WARN_OFFSET      0x8c
  47#define HPRE_CORE_IS_SCHD_OFFSET        0x90
  48
  49#define HPRE_RAS_CE_ENB                 0x301410
  50#define HPRE_HAC_RAS_CE_ENABLE          (BIT(0) | BIT(22) | BIT(23))
  51#define HPRE_RAS_NFE_ENB                0x301414
  52#define HPRE_HAC_RAS_NFE_ENABLE         0x3ffffe
  53#define HPRE_RAS_FE_ENB                 0x301418
  54#define HPRE_OOO_SHUTDOWN_SEL           0x301a3c
  55#define HPRE_HAC_RAS_FE_ENABLE          0
  56
  57#define HPRE_CORE_ENB           (HPRE_CLSTR_BASE + HPRE_CORE_EN_OFFSET)
  58#define HPRE_CORE_INI_CFG       (HPRE_CLSTR_BASE + HPRE_CORE_INI_CFG_OFFSET)
  59#define HPRE_CORE_INI_STATUS (HPRE_CLSTR_BASE + HPRE_CORE_INI_STATUS_OFFSET)
  60#define HPRE_HAC_ECC1_CNT               0x301a04
  61#define HPRE_HAC_ECC2_CNT               0x301a08
  62#define HPRE_HAC_SOURCE_INT             0x301600
  63#define HPRE_CLSTR_ADDR_INTRVL          0x1000
  64#define HPRE_CLUSTER_INQURY             0x100
  65#define HPRE_CLSTR_ADDR_INQRY_RSLT      0x104
  66#define HPRE_TIMEOUT_ABNML_BIT          6
  67#define HPRE_PASID_EN_BIT               9
  68#define HPRE_REG_RD_INTVRL_US           10
  69#define HPRE_REG_RD_TMOUT_US            1000
  70#define HPRE_DBGFS_VAL_MAX_LEN          20
  71#define HPRE_PCI_DEVICE_ID              0xa258
  72#define HPRE_PCI_VF_DEVICE_ID           0xa259
  73#define HPRE_QM_USR_CFG_MASK            GENMASK(31, 1)
  74#define HPRE_QM_AXI_CFG_MASK            GENMASK(15, 0)
  75#define HPRE_QM_VFG_AX_MASK             GENMASK(7, 0)
  76#define HPRE_BD_USR_MASK                GENMASK(1, 0)
  77#define HPRE_CLUSTER_CORE_MASK_V2       GENMASK(3, 0)
  78#define HPRE_CLUSTER_CORE_MASK_V3       GENMASK(7, 0)
  79#define HPRE_PREFETCH_CFG               0x301130
  80#define HPRE_SVA_PREFTCH_DFX            0x30115C
  81#define HPRE_PREFETCH_ENABLE            (~(BIT(0) | BIT(30)))
  82#define HPRE_PREFETCH_DISABLE           BIT(30)
  83#define HPRE_SVA_DISABLE_READY          (BIT(4) | BIT(8))
  84
  85/* clock gate */
  86#define HPRE_CLKGATE_CTL                0x301a10
  87#define HPRE_PEH_CFG_AUTO_GATE          0x301a2c
  88#define HPRE_CLUSTER_DYN_CTL            0x302010
  89#define HPRE_CORE_SHB_CFG               0x302088
  90#define HPRE_CLKGATE_CTL_EN             BIT(0)
  91#define HPRE_PEH_CFG_AUTO_GATE_EN       BIT(0)
  92#define HPRE_CLUSTER_DYN_CTL_EN         BIT(0)
  93#define HPRE_CORE_GATE_EN               (BIT(30) | BIT(31))
  94
  95#define HPRE_AM_OOO_SHUTDOWN_ENB        0x301044
  96#define HPRE_AM_OOO_SHUTDOWN_ENABLE     BIT(0)
  97#define HPRE_WR_MSI_PORT                BIT(2)
  98
  99#define HPRE_CORE_ECC_2BIT_ERR          BIT(1)
 100#define HPRE_OOO_ECC_2BIT_ERR           BIT(5)
 101
 102#define HPRE_QM_BME_FLR                 BIT(7)
 103#define HPRE_QM_PM_FLR                  BIT(11)
 104#define HPRE_QM_SRIOV_FLR               BIT(12)
 105
 106#define HPRE_SHAPER_TYPE_RATE           128
 107#define HPRE_VIA_MSI_DSM                1
 108#define HPRE_SQE_MASK_OFFSET            8
 109#define HPRE_SQE_MASK_LEN               24
 110
 111static const char hpre_name[] = "hisi_hpre";
 112static struct dentry *hpre_debugfs_root;
 113static const struct pci_device_id hpre_dev_ids[] = {
 114        { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HPRE_PCI_DEVICE_ID) },
 115        { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HPRE_PCI_VF_DEVICE_ID) },
 116        { 0, }
 117};
 118
 119MODULE_DEVICE_TABLE(pci, hpre_dev_ids);
 120
 121struct hpre_hw_error {
 122        u32 int_msk;
 123        const char *msg;
 124};
 125
 126static struct hisi_qm_list hpre_devices = {
 127        .register_to_crypto     = hpre_algs_register,
 128        .unregister_from_crypto = hpre_algs_unregister,
 129};
 130
 131static const char * const hpre_debug_file_name[] = {
 132        [HPRE_CLEAR_ENABLE] = "rdclr_en",
 133        [HPRE_CLUSTER_CTRL] = "cluster_ctrl",
 134};
 135
 136static const struct hpre_hw_error hpre_hw_errors[] = {
 137        {
 138                .int_msk = BIT(0),
 139                .msg = "core_ecc_1bit_err_int_set"
 140        }, {
 141                .int_msk = BIT(1),
 142                .msg = "core_ecc_2bit_err_int_set"
 143        }, {
 144                .int_msk = BIT(2),
 145                .msg = "dat_wb_poison_int_set"
 146        }, {
 147                .int_msk = BIT(3),
 148                .msg = "dat_rd_poison_int_set"
 149        }, {
 150                .int_msk = BIT(4),
 151                .msg = "bd_rd_poison_int_set"
 152        }, {
 153                .int_msk = BIT(5),
 154                .msg = "ooo_ecc_2bit_err_int_set"
 155        }, {
 156                .int_msk = BIT(6),
 157                .msg = "cluster1_shb_timeout_int_set"
 158        }, {
 159                .int_msk = BIT(7),
 160                .msg = "cluster2_shb_timeout_int_set"
 161        }, {
 162                .int_msk = BIT(8),
 163                .msg = "cluster3_shb_timeout_int_set"
 164        }, {
 165                .int_msk = BIT(9),
 166                .msg = "cluster4_shb_timeout_int_set"
 167        }, {
 168                .int_msk = GENMASK(15, 10),
 169                .msg = "ooo_rdrsp_err_int_set"
 170        }, {
 171                .int_msk = GENMASK(21, 16),
 172                .msg = "ooo_wrrsp_err_int_set"
 173        }, {
 174                .int_msk = BIT(22),
 175                .msg = "pt_rng_timeout_int_set"
 176        }, {
 177                .int_msk = BIT(23),
 178                .msg = "sva_fsm_timeout_int_set"
 179        }, {
 180                /* sentinel */
 181        }
 182};
 183
 184static const u64 hpre_cluster_offsets[] = {
 185        [HPRE_CLUSTER0] =
 186                HPRE_CLSTR_BASE + HPRE_CLUSTER0 * HPRE_CLSTR_ADDR_INTRVL,
 187        [HPRE_CLUSTER1] =
 188                HPRE_CLSTR_BASE + HPRE_CLUSTER1 * HPRE_CLSTR_ADDR_INTRVL,
 189        [HPRE_CLUSTER2] =
 190                HPRE_CLSTR_BASE + HPRE_CLUSTER2 * HPRE_CLSTR_ADDR_INTRVL,
 191        [HPRE_CLUSTER3] =
 192                HPRE_CLSTR_BASE + HPRE_CLUSTER3 * HPRE_CLSTR_ADDR_INTRVL,
 193};
 194
 195static const struct debugfs_reg32 hpre_cluster_dfx_regs[] = {
 196        {"CORES_EN_STATUS          ",  HPRE_CORE_EN_OFFSET},
 197        {"CORES_INI_CFG              ",  HPRE_CORE_INI_CFG_OFFSET},
 198        {"CORES_INI_STATUS         ",  HPRE_CORE_INI_STATUS_OFFSET},
 199        {"CORES_HTBT_WARN         ",  HPRE_CORE_HTBT_WARN_OFFSET},
 200        {"CORES_IS_SCHD               ",  HPRE_CORE_IS_SCHD_OFFSET},
 201};
 202
 203static const struct debugfs_reg32 hpre_com_dfx_regs[] = {
 204        {"READ_CLR_EN          ",  HPRE_CTRL_CNT_CLR_CE},
 205        {"AXQOS                   ",  HPRE_VFG_AXQOS},
 206        {"AWUSR_CFG              ",  HPRE_AWUSR_FP_CFG},
 207        {"QM_ARUSR_MCFG1           ",  QM_ARUSER_M_CFG_1},
 208        {"QM_AWUSR_MCFG1           ",  QM_AWUSER_M_CFG_1},
 209        {"BD_ENDIAN               ",  HPRE_BD_ENDIAN},
 210        {"ECC_CHECK_CTRL       ",  HPRE_ECC_BYPASS},
 211        {"RAS_INT_WIDTH       ",  HPRE_RAS_WIDTH_CFG},
 212        {"POISON_BYPASS       ",  HPRE_POISON_BYPASS},
 213        {"BD_ARUSER               ",  HPRE_BD_ARUSR_CFG},
 214        {"BD_AWUSER               ",  HPRE_BD_AWUSR_CFG},
 215        {"DATA_ARUSER            ",  HPRE_DATA_RUSER_CFG},
 216        {"DATA_AWUSER           ",  HPRE_DATA_WUSER_CFG},
 217        {"INT_STATUS               ",  HPRE_INT_STATUS},
 218};
 219
 220static const char *hpre_dfx_files[HPRE_DFX_FILE_NUM] = {
 221        "send_cnt",
 222        "recv_cnt",
 223        "send_fail_cnt",
 224        "send_busy_cnt",
 225        "over_thrhld_cnt",
 226        "overtime_thrhld",
 227        "invalid_req_cnt"
 228};
 229
 230static const struct kernel_param_ops hpre_uacce_mode_ops = {
 231        .set = uacce_mode_set,
 232        .get = param_get_int,
 233};
 234
 235/*
 236 * uacce_mode = 0 means hpre only register to crypto,
 237 * uacce_mode = 1 means hpre both register to crypto and uacce.
 238 */
 239static u32 uacce_mode = UACCE_MODE_NOUACCE;
 240module_param_cb(uacce_mode, &hpre_uacce_mode_ops, &uacce_mode, 0444);
 241MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC);
 242
 243static int pf_q_num_set(const char *val, const struct kernel_param *kp)
 244{
 245        return q_num_set(val, kp, HPRE_PCI_DEVICE_ID);
 246}
 247
 248static const struct kernel_param_ops hpre_pf_q_num_ops = {
 249        .set = pf_q_num_set,
 250        .get = param_get_int,
 251};
 252
 253static u32 pf_q_num = HPRE_PF_DEF_Q_NUM;
 254module_param_cb(pf_q_num, &hpre_pf_q_num_ops, &pf_q_num, 0444);
 255MODULE_PARM_DESC(pf_q_num, "Number of queues in PF of CS(2-1024)");
 256
 257static const struct kernel_param_ops vfs_num_ops = {
 258        .set = vfs_num_set,
 259        .get = param_get_int,
 260};
 261
 262static u32 vfs_num;
 263module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
 264MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
 265
 266static inline int hpre_cluster_num(struct hisi_qm *qm)
 267{
 268        return (qm->ver >= QM_HW_V3) ? HPRE_CLUSTERS_NUM_V3 :
 269                HPRE_CLUSTERS_NUM_V2;
 270}
 271
 272static inline int hpre_cluster_core_mask(struct hisi_qm *qm)
 273{
 274        return (qm->ver >= QM_HW_V3) ?
 275                HPRE_CLUSTER_CORE_MASK_V3 : HPRE_CLUSTER_CORE_MASK_V2;
 276}
 277
 278struct hisi_qp *hpre_create_qp(u8 type)
 279{
 280        int node = cpu_to_node(smp_processor_id());
 281        struct hisi_qp *qp = NULL;
 282        int ret;
 283
 284        if (type != HPRE_V2_ALG_TYPE && type != HPRE_V3_ECC_ALG_TYPE)
 285                return NULL;
 286
 287        /*
 288         * type: 0 - RSA/DH. algorithm supported in V2,
 289         *       1 - ECC algorithm in V3.
 290         */
 291        ret = hisi_qm_alloc_qps_node(&hpre_devices, 1, type, node, &qp);
 292        if (!ret)
 293                return qp;
 294
 295        return NULL;
 296}
 297
 298static void hpre_config_pasid(struct hisi_qm *qm)
 299{
 300        u32 val1, val2;
 301
 302        if (qm->ver >= QM_HW_V3)
 303                return;
 304
 305        val1 = readl_relaxed(qm->io_base + HPRE_DATA_RUSER_CFG);
 306        val2 = readl_relaxed(qm->io_base + HPRE_DATA_WUSER_CFG);
 307        if (qm->use_sva) {
 308                val1 |= BIT(HPRE_PASID_EN_BIT);
 309                val2 |= BIT(HPRE_PASID_EN_BIT);
 310        } else {
 311                val1 &= ~BIT(HPRE_PASID_EN_BIT);
 312                val2 &= ~BIT(HPRE_PASID_EN_BIT);
 313        }
 314        writel_relaxed(val1, qm->io_base + HPRE_DATA_RUSER_CFG);
 315        writel_relaxed(val2, qm->io_base + HPRE_DATA_WUSER_CFG);
 316}
 317
 318static int hpre_cfg_by_dsm(struct hisi_qm *qm)
 319{
 320        struct device *dev = &qm->pdev->dev;
 321        union acpi_object *obj;
 322        guid_t guid;
 323
 324        if (guid_parse("b06b81ab-0134-4a45-9b0c-483447b95fa7", &guid)) {
 325                dev_err(dev, "Hpre GUID failed\n");
 326                return -EINVAL;
 327        }
 328
 329        /* Switch over to MSI handling due to non-standard PCI implementation */
 330        obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &guid,
 331                                0, HPRE_VIA_MSI_DSM, NULL);
 332        if (!obj) {
 333                dev_err(dev, "ACPI handle failed!\n");
 334                return -EIO;
 335        }
 336
 337        ACPI_FREE(obj);
 338
 339        return 0;
 340}
 341
 342static int hpre_set_cluster(struct hisi_qm *qm)
 343{
 344        u32 cluster_core_mask = hpre_cluster_core_mask(qm);
 345        u8 clusters_num = hpre_cluster_num(qm);
 346        struct device *dev = &qm->pdev->dev;
 347        unsigned long offset;
 348        u32 val = 0;
 349        int ret, i;
 350
 351        for (i = 0; i < clusters_num; i++) {
 352                offset = i * HPRE_CLSTR_ADDR_INTRVL;
 353
 354                /* clusters initiating */
 355                writel(cluster_core_mask,
 356                       qm->io_base + offset + HPRE_CORE_ENB);
 357                writel(0x1, qm->io_base + offset + HPRE_CORE_INI_CFG);
 358                ret = readl_relaxed_poll_timeout(qm->io_base + offset +
 359                                        HPRE_CORE_INI_STATUS, val,
 360                                        ((val & cluster_core_mask) ==
 361                                        cluster_core_mask),
 362                                        HPRE_REG_RD_INTVRL_US,
 363                                        HPRE_REG_RD_TMOUT_US);
 364                if (ret) {
 365                        dev_err(dev,
 366                                "cluster %d int st status timeout!\n", i);
 367                        return -ETIMEDOUT;
 368                }
 369        }
 370
 371        return 0;
 372}
 373
 374/*
 375 * For Kunpeng 920, we should disable FLR triggered by hardware (BME/PM/SRIOV).
 376 * Or it may stay in D3 state when we bind and unbind hpre quickly,
 377 * as it does FLR triggered by hardware.
 378 */
 379static void disable_flr_of_bme(struct hisi_qm *qm)
 380{
 381        u32 val;
 382
 383        val = readl(qm->io_base + QM_PEH_AXUSER_CFG);
 384        val &= ~(HPRE_QM_BME_FLR | HPRE_QM_SRIOV_FLR);
 385        val |= HPRE_QM_PM_FLR;
 386        writel(val, qm->io_base + QM_PEH_AXUSER_CFG);
 387        writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + QM_PEH_AXUSER_CFG_ENABLE);
 388}
 389
 390static void hpre_open_sva_prefetch(struct hisi_qm *qm)
 391{
 392        u32 val;
 393        int ret;
 394
 395        if (qm->ver < QM_HW_V3)
 396                return;
 397
 398        /* Enable prefetch */
 399        val = readl_relaxed(qm->io_base + HPRE_PREFETCH_CFG);
 400        val &= HPRE_PREFETCH_ENABLE;
 401        writel(val, qm->io_base + HPRE_PREFETCH_CFG);
 402
 403        ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_PREFETCH_CFG,
 404                                         val, !(val & HPRE_PREFETCH_DISABLE),
 405                                         HPRE_REG_RD_INTVRL_US,
 406                                         HPRE_REG_RD_TMOUT_US);
 407        if (ret)
 408                pci_err(qm->pdev, "failed to open sva prefetch\n");
 409}
 410
 411static void hpre_close_sva_prefetch(struct hisi_qm *qm)
 412{
 413        u32 val;
 414        int ret;
 415
 416        if (qm->ver < QM_HW_V3)
 417                return;
 418
 419        val = readl_relaxed(qm->io_base + HPRE_PREFETCH_CFG);
 420        val |= HPRE_PREFETCH_DISABLE;
 421        writel(val, qm->io_base + HPRE_PREFETCH_CFG);
 422
 423        ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_SVA_PREFTCH_DFX,
 424                                         val, !(val & HPRE_SVA_DISABLE_READY),
 425                                         HPRE_REG_RD_INTVRL_US,
 426                                         HPRE_REG_RD_TMOUT_US);
 427        if (ret)
 428                pci_err(qm->pdev, "failed to close sva prefetch\n");
 429}
 430
 431static void hpre_enable_clock_gate(struct hisi_qm *qm)
 432{
 433        u32 val;
 434
 435        if (qm->ver < QM_HW_V3)
 436                return;
 437
 438        val = readl(qm->io_base + HPRE_CLKGATE_CTL);
 439        val |= HPRE_CLKGATE_CTL_EN;
 440        writel(val, qm->io_base + HPRE_CLKGATE_CTL);
 441
 442        val = readl(qm->io_base + HPRE_PEH_CFG_AUTO_GATE);
 443        val |= HPRE_PEH_CFG_AUTO_GATE_EN;
 444        writel(val, qm->io_base + HPRE_PEH_CFG_AUTO_GATE);
 445
 446        val = readl(qm->io_base + HPRE_CLUSTER_DYN_CTL);
 447        val |= HPRE_CLUSTER_DYN_CTL_EN;
 448        writel(val, qm->io_base + HPRE_CLUSTER_DYN_CTL);
 449
 450        val = readl_relaxed(qm->io_base + HPRE_CORE_SHB_CFG);
 451        val |= HPRE_CORE_GATE_EN;
 452        writel(val, qm->io_base + HPRE_CORE_SHB_CFG);
 453}
 454
 455static void hpre_disable_clock_gate(struct hisi_qm *qm)
 456{
 457        u32 val;
 458
 459        if (qm->ver < QM_HW_V3)
 460                return;
 461
 462        val = readl(qm->io_base + HPRE_CLKGATE_CTL);
 463        val &= ~HPRE_CLKGATE_CTL_EN;
 464        writel(val, qm->io_base + HPRE_CLKGATE_CTL);
 465
 466        val = readl(qm->io_base + HPRE_PEH_CFG_AUTO_GATE);
 467        val &= ~HPRE_PEH_CFG_AUTO_GATE_EN;
 468        writel(val, qm->io_base + HPRE_PEH_CFG_AUTO_GATE);
 469
 470        val = readl(qm->io_base + HPRE_CLUSTER_DYN_CTL);
 471        val &= ~HPRE_CLUSTER_DYN_CTL_EN;
 472        writel(val, qm->io_base + HPRE_CLUSTER_DYN_CTL);
 473
 474        val = readl_relaxed(qm->io_base + HPRE_CORE_SHB_CFG);
 475        val &= ~HPRE_CORE_GATE_EN;
 476        writel(val, qm->io_base + HPRE_CORE_SHB_CFG);
 477}
 478
 479static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
 480{
 481        struct device *dev = &qm->pdev->dev;
 482        u32 val;
 483        int ret;
 484
 485        /* disabel dynamic clock gate before sram init */
 486        hpre_disable_clock_gate(qm);
 487
 488        writel(HPRE_QM_USR_CFG_MASK, qm->io_base + QM_ARUSER_M_CFG_ENABLE);
 489        writel(HPRE_QM_USR_CFG_MASK, qm->io_base + QM_AWUSER_M_CFG_ENABLE);
 490        writel_relaxed(HPRE_QM_AXI_CFG_MASK, qm->io_base + QM_AXI_M_CFG);
 491
 492        /* HPRE need more time, we close this interrupt */
 493        val = readl_relaxed(qm->io_base + HPRE_QM_ABNML_INT_MASK);
 494        val |= BIT(HPRE_TIMEOUT_ABNML_BIT);
 495        writel_relaxed(val, qm->io_base + HPRE_QM_ABNML_INT_MASK);
 496
 497        if (qm->ver >= QM_HW_V3)
 498                writel(HPRE_RSA_ENB | HPRE_ECC_ENB,
 499                        qm->io_base + HPRE_TYPES_ENB);
 500        else
 501                writel(HPRE_RSA_ENB, qm->io_base + HPRE_TYPES_ENB);
 502
 503        writel(HPRE_QM_VFG_AX_MASK, qm->io_base + HPRE_VFG_AXCACHE);
 504        writel(0x0, qm->io_base + HPRE_BD_ENDIAN);
 505        writel(0x0, qm->io_base + HPRE_INT_MASK);
 506        writel(0x0, qm->io_base + HPRE_POISON_BYPASS);
 507        writel(0x0, qm->io_base + HPRE_COMM_CNT_CLR_CE);
 508        writel(0x0, qm->io_base + HPRE_ECC_BYPASS);
 509
 510        writel(HPRE_BD_USR_MASK, qm->io_base + HPRE_BD_ARUSR_CFG);
 511        writel(HPRE_BD_USR_MASK, qm->io_base + HPRE_BD_AWUSR_CFG);
 512        writel(0x1, qm->io_base + HPRE_RDCHN_INI_CFG);
 513        ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_RDCHN_INI_ST, val,
 514                        val & BIT(0),
 515                        HPRE_REG_RD_INTVRL_US,
 516                        HPRE_REG_RD_TMOUT_US);
 517        if (ret) {
 518                dev_err(dev, "read rd channel timeout fail!\n");
 519                return -ETIMEDOUT;
 520        }
 521
 522        ret = hpre_set_cluster(qm);
 523        if (ret)
 524                return -ETIMEDOUT;
 525
 526        /* This setting is only needed by Kunpeng 920. */
 527        if (qm->ver == QM_HW_V2) {
 528                ret = hpre_cfg_by_dsm(qm);
 529                if (ret)
 530                        return ret;
 531
 532                disable_flr_of_bme(qm);
 533        }
 534
 535        /* Config data buffer pasid needed by Kunpeng 920 */
 536        hpre_config_pasid(qm);
 537
 538        hpre_enable_clock_gate(qm);
 539
 540        return ret;
 541}
 542
 543static void hpre_cnt_regs_clear(struct hisi_qm *qm)
 544{
 545        u8 clusters_num = hpre_cluster_num(qm);
 546        unsigned long offset;
 547        int i;
 548
 549        /* clear clusterX/cluster_ctrl */
 550        for (i = 0; i < clusters_num; i++) {
 551                offset = HPRE_CLSTR_BASE + i * HPRE_CLSTR_ADDR_INTRVL;
 552                writel(0x0, qm->io_base + offset + HPRE_CLUSTER_INQURY);
 553        }
 554
 555        /* clear rdclr_en */
 556        writel(0x0, qm->io_base + HPRE_CTRL_CNT_CLR_CE);
 557
 558        hisi_qm_debug_regs_clear(qm);
 559}
 560
 561static void hpre_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
 562{
 563        u32 val1, val2;
 564
 565        val1 = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
 566        if (enable) {
 567                val1 |= HPRE_AM_OOO_SHUTDOWN_ENABLE;
 568                val2 = HPRE_HAC_RAS_NFE_ENABLE;
 569        } else {
 570                val1 &= ~HPRE_AM_OOO_SHUTDOWN_ENABLE;
 571                val2 = 0x0;
 572        }
 573
 574        if (qm->ver > QM_HW_V2)
 575                writel(val2, qm->io_base + HPRE_OOO_SHUTDOWN_SEL);
 576
 577        writel(val1, qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
 578}
 579
 580static void hpre_hw_error_disable(struct hisi_qm *qm)
 581{
 582        /* disable hpre hw error interrupts */
 583        writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_INT_MASK);
 584
 585        /* disable HPRE block master OOO when nfe occurs on Kunpeng930 */
 586        hpre_master_ooo_ctrl(qm, false);
 587}
 588
 589static void hpre_hw_error_enable(struct hisi_qm *qm)
 590{
 591        /* clear HPRE hw error source if having */
 592        writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_HAC_SOURCE_INT);
 593
 594        /* configure error type */
 595        writel(HPRE_HAC_RAS_CE_ENABLE, qm->io_base + HPRE_RAS_CE_ENB);
 596        writel(HPRE_HAC_RAS_NFE_ENABLE, qm->io_base + HPRE_RAS_NFE_ENB);
 597        writel(HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_RAS_FE_ENB);
 598
 599        /* enable HPRE block master OOO when nfe occurs on Kunpeng930 */
 600        hpre_master_ooo_ctrl(qm, true);
 601
 602        /* enable hpre hw error interrupts */
 603        writel(HPRE_CORE_INT_ENABLE, qm->io_base + HPRE_INT_MASK);
 604}
 605
 606static inline struct hisi_qm *hpre_file_to_qm(struct hpre_debugfs_file *file)
 607{
 608        struct hpre *hpre = container_of(file->debug, struct hpre, debug);
 609
 610        return &hpre->qm;
 611}
 612
 613static u32 hpre_clear_enable_read(struct hpre_debugfs_file *file)
 614{
 615        struct hisi_qm *qm = hpre_file_to_qm(file);
 616
 617        return readl(qm->io_base + HPRE_CTRL_CNT_CLR_CE) &
 618               HPRE_CTRL_CNT_CLR_CE_BIT;
 619}
 620
 621static int hpre_clear_enable_write(struct hpre_debugfs_file *file, u32 val)
 622{
 623        struct hisi_qm *qm = hpre_file_to_qm(file);
 624        u32 tmp;
 625
 626        if (val != 1 && val != 0)
 627                return -EINVAL;
 628
 629        tmp = (readl(qm->io_base + HPRE_CTRL_CNT_CLR_CE) &
 630               ~HPRE_CTRL_CNT_CLR_CE_BIT) | val;
 631        writel(tmp, qm->io_base + HPRE_CTRL_CNT_CLR_CE);
 632
 633        return 0;
 634}
 635
 636static u32 hpre_cluster_inqry_read(struct hpre_debugfs_file *file)
 637{
 638        struct hisi_qm *qm = hpre_file_to_qm(file);
 639        int cluster_index = file->index - HPRE_CLUSTER_CTRL;
 640        unsigned long offset = HPRE_CLSTR_BASE +
 641                               cluster_index * HPRE_CLSTR_ADDR_INTRVL;
 642
 643        return readl(qm->io_base + offset + HPRE_CLSTR_ADDR_INQRY_RSLT);
 644}
 645
 646static int hpre_cluster_inqry_write(struct hpre_debugfs_file *file, u32 val)
 647{
 648        struct hisi_qm *qm = hpre_file_to_qm(file);
 649        int cluster_index = file->index - HPRE_CLUSTER_CTRL;
 650        unsigned long offset = HPRE_CLSTR_BASE + cluster_index *
 651                               HPRE_CLSTR_ADDR_INTRVL;
 652
 653        writel(val, qm->io_base + offset + HPRE_CLUSTER_INQURY);
 654
 655        return 0;
 656}
 657
 658static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf,
 659                                    size_t count, loff_t *pos)
 660{
 661        struct hpre_debugfs_file *file = filp->private_data;
 662        struct hisi_qm *qm = hpre_file_to_qm(file);
 663        char tbuf[HPRE_DBGFS_VAL_MAX_LEN];
 664        u32 val;
 665        int ret;
 666
 667        ret = hisi_qm_get_dfx_access(qm);
 668        if (ret)
 669                return ret;
 670
 671        spin_lock_irq(&file->lock);
 672        switch (file->type) {
 673        case HPRE_CLEAR_ENABLE:
 674                val = hpre_clear_enable_read(file);
 675                break;
 676        case HPRE_CLUSTER_CTRL:
 677                val = hpre_cluster_inqry_read(file);
 678                break;
 679        default:
 680                goto err_input;
 681        }
 682        spin_unlock_irq(&file->lock);
 683
 684        hisi_qm_put_dfx_access(qm);
 685        ret = snprintf(tbuf, HPRE_DBGFS_VAL_MAX_LEN, "%u\n", val);
 686        return simple_read_from_buffer(buf, count, pos, tbuf, ret);
 687
 688err_input:
 689        spin_unlock_irq(&file->lock);
 690        hisi_qm_put_dfx_access(qm);
 691        return -EINVAL;
 692}
 693
 694static ssize_t hpre_ctrl_debug_write(struct file *filp, const char __user *buf,
 695                                     size_t count, loff_t *pos)
 696{
 697        struct hpre_debugfs_file *file = filp->private_data;
 698        struct hisi_qm *qm = hpre_file_to_qm(file);
 699        char tbuf[HPRE_DBGFS_VAL_MAX_LEN];
 700        unsigned long val;
 701        int len, ret;
 702
 703        if (*pos != 0)
 704                return 0;
 705
 706        if (count >= HPRE_DBGFS_VAL_MAX_LEN)
 707                return -ENOSPC;
 708
 709        len = simple_write_to_buffer(tbuf, HPRE_DBGFS_VAL_MAX_LEN - 1,
 710                                     pos, buf, count);
 711        if (len < 0)
 712                return len;
 713
 714        tbuf[len] = '\0';
 715        if (kstrtoul(tbuf, 0, &val))
 716                return -EFAULT;
 717
 718        ret = hisi_qm_get_dfx_access(qm);
 719        if (ret)
 720                return ret;
 721
 722        spin_lock_irq(&file->lock);
 723        switch (file->type) {
 724        case HPRE_CLEAR_ENABLE:
 725                ret = hpre_clear_enable_write(file, val);
 726                if (ret)
 727                        goto err_input;
 728                break;
 729        case HPRE_CLUSTER_CTRL:
 730                ret = hpre_cluster_inqry_write(file, val);
 731                if (ret)
 732                        goto err_input;
 733                break;
 734        default:
 735                ret = -EINVAL;
 736                goto err_input;
 737        }
 738
 739        ret = count;
 740
 741err_input:
 742        spin_unlock_irq(&file->lock);
 743        hisi_qm_put_dfx_access(qm);
 744        return ret;
 745}
 746
 747static const struct file_operations hpre_ctrl_debug_fops = {
 748        .owner = THIS_MODULE,
 749        .open = simple_open,
 750        .read = hpre_ctrl_debug_read,
 751        .write = hpre_ctrl_debug_write,
 752};
 753
 754static int hpre_debugfs_atomic64_get(void *data, u64 *val)
 755{
 756        struct hpre_dfx *dfx_item = data;
 757
 758        *val = atomic64_read(&dfx_item->value);
 759
 760        return 0;
 761}
 762
 763static int hpre_debugfs_atomic64_set(void *data, u64 val)
 764{
 765        struct hpre_dfx *dfx_item = data;
 766        struct hpre_dfx *hpre_dfx = NULL;
 767
 768        if (dfx_item->type == HPRE_OVERTIME_THRHLD) {
 769                hpre_dfx = dfx_item - HPRE_OVERTIME_THRHLD;
 770                atomic64_set(&hpre_dfx[HPRE_OVER_THRHLD_CNT].value, 0);
 771        } else if (val) {
 772                return -EINVAL;
 773        }
 774
 775        atomic64_set(&dfx_item->value, val);
 776
 777        return 0;
 778}
 779
 780DEFINE_DEBUGFS_ATTRIBUTE(hpre_atomic64_ops, hpre_debugfs_atomic64_get,
 781                         hpre_debugfs_atomic64_set, "%llu\n");
 782
 783static int hpre_com_regs_show(struct seq_file *s, void *unused)
 784{
 785        hisi_qm_regs_dump(s, s->private);
 786
 787        return 0;
 788}
 789
 790DEFINE_SHOW_ATTRIBUTE(hpre_com_regs);
 791
 792static int hpre_cluster_regs_show(struct seq_file *s, void *unused)
 793{
 794        hisi_qm_regs_dump(s, s->private);
 795
 796        return 0;
 797}
 798
 799DEFINE_SHOW_ATTRIBUTE(hpre_cluster_regs);
 800
 801static int hpre_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir,
 802                                    enum hpre_ctrl_dbgfs_file type, int indx)
 803{
 804        struct hpre *hpre = container_of(qm, struct hpre, qm);
 805        struct hpre_debug *dbg = &hpre->debug;
 806        struct dentry *file_dir;
 807
 808        if (dir)
 809                file_dir = dir;
 810        else
 811                file_dir = qm->debug.debug_root;
 812
 813        if (type >= HPRE_DEBUG_FILE_NUM)
 814                return -EINVAL;
 815
 816        spin_lock_init(&dbg->files[indx].lock);
 817        dbg->files[indx].debug = dbg;
 818        dbg->files[indx].type = type;
 819        dbg->files[indx].index = indx;
 820        debugfs_create_file(hpre_debug_file_name[type], 0600, file_dir,
 821                            dbg->files + indx, &hpre_ctrl_debug_fops);
 822
 823        return 0;
 824}
 825
 826static int hpre_pf_comm_regs_debugfs_init(struct hisi_qm *qm)
 827{
 828        struct device *dev = &qm->pdev->dev;
 829        struct debugfs_regset32 *regset;
 830
 831        regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
 832        if (!regset)
 833                return -ENOMEM;
 834
 835        regset->regs = hpre_com_dfx_regs;
 836        regset->nregs = ARRAY_SIZE(hpre_com_dfx_regs);
 837        regset->base = qm->io_base;
 838        regset->dev = dev;
 839
 840        debugfs_create_file("regs", 0444, qm->debug.debug_root,
 841                            regset, &hpre_com_regs_fops);
 842
 843        return 0;
 844}
 845
 846static int hpre_cluster_debugfs_init(struct hisi_qm *qm)
 847{
 848        u8 clusters_num = hpre_cluster_num(qm);
 849        struct device *dev = &qm->pdev->dev;
 850        char buf[HPRE_DBGFS_VAL_MAX_LEN];
 851        struct debugfs_regset32 *regset;
 852        struct dentry *tmp_d;
 853        int i, ret;
 854
 855        for (i = 0; i < clusters_num; i++) {
 856                ret = snprintf(buf, HPRE_DBGFS_VAL_MAX_LEN, "cluster%d", i);
 857                if (ret < 0)
 858                        return -EINVAL;
 859                tmp_d = debugfs_create_dir(buf, qm->debug.debug_root);
 860
 861                regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
 862                if (!regset)
 863                        return -ENOMEM;
 864
 865                regset->regs = hpre_cluster_dfx_regs;
 866                regset->nregs = ARRAY_SIZE(hpre_cluster_dfx_regs);
 867                regset->base = qm->io_base + hpre_cluster_offsets[i];
 868                regset->dev = dev;
 869
 870                debugfs_create_file("regs", 0444, tmp_d, regset,
 871                                    &hpre_cluster_regs_fops);
 872                ret = hpre_create_debugfs_file(qm, tmp_d, HPRE_CLUSTER_CTRL,
 873                                               i + HPRE_CLUSTER_CTRL);
 874                if (ret)
 875                        return ret;
 876        }
 877
 878        return 0;
 879}
 880
 881static int hpre_ctrl_debug_init(struct hisi_qm *qm)
 882{
 883        int ret;
 884
 885        ret = hpre_create_debugfs_file(qm, NULL, HPRE_CLEAR_ENABLE,
 886                                       HPRE_CLEAR_ENABLE);
 887        if (ret)
 888                return ret;
 889
 890        ret = hpre_pf_comm_regs_debugfs_init(qm);
 891        if (ret)
 892                return ret;
 893
 894        return hpre_cluster_debugfs_init(qm);
 895}
 896
 897static void hpre_dfx_debug_init(struct hisi_qm *qm)
 898{
 899        struct hpre *hpre = container_of(qm, struct hpre, qm);
 900        struct hpre_dfx *dfx = hpre->debug.dfx;
 901        struct dentry *parent;
 902        int i;
 903
 904        parent = debugfs_create_dir("hpre_dfx", qm->debug.debug_root);
 905        for (i = 0; i < HPRE_DFX_FILE_NUM; i++) {
 906                dfx[i].type = i;
 907                debugfs_create_file(hpre_dfx_files[i], 0644, parent, &dfx[i],
 908                                    &hpre_atomic64_ops);
 909        }
 910}
 911
 912static int hpre_debugfs_init(struct hisi_qm *qm)
 913{
 914        struct device *dev = &qm->pdev->dev;
 915        int ret;
 916
 917        qm->debug.debug_root = debugfs_create_dir(dev_name(dev),
 918                                                  hpre_debugfs_root);
 919
 920        qm->debug.sqe_mask_offset = HPRE_SQE_MASK_OFFSET;
 921        qm->debug.sqe_mask_len = HPRE_SQE_MASK_LEN;
 922        hisi_qm_debug_init(qm);
 923
 924        if (qm->pdev->device == HPRE_PCI_DEVICE_ID) {
 925                ret = hpre_ctrl_debug_init(qm);
 926                if (ret)
 927                        goto failed_to_create;
 928        }
 929
 930        hpre_dfx_debug_init(qm);
 931
 932        return 0;
 933
 934failed_to_create:
 935        debugfs_remove_recursive(qm->debug.debug_root);
 936        return ret;
 937}
 938
 939static void hpre_debugfs_exit(struct hisi_qm *qm)
 940{
 941        debugfs_remove_recursive(qm->debug.debug_root);
 942}
 943
 944static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
 945{
 946        if (pdev->revision == QM_HW_V1) {
 947                pci_warn(pdev, "HPRE version 1 is not supported!\n");
 948                return -EINVAL;
 949        }
 950
 951        if (pdev->revision >= QM_HW_V3)
 952                qm->algs = "rsa\ndh\necdh\nx25519\nx448\necdsa\nsm2";
 953        else
 954                qm->algs = "rsa\ndh";
 955        qm->mode = uacce_mode;
 956        qm->pdev = pdev;
 957        qm->ver = pdev->revision;
 958        qm->sqe_size = HPRE_SQE_SIZE;
 959        qm->dev_name = hpre_name;
 960
 961        qm->fun_type = (pdev->device == HPRE_PCI_DEVICE_ID) ?
 962                        QM_HW_PF : QM_HW_VF;
 963        if (qm->fun_type == QM_HW_PF) {
 964                qm->qp_base = HPRE_PF_DEF_Q_BASE;
 965                qm->qp_num = pf_q_num;
 966                qm->debug.curr_qm_qp_num = pf_q_num;
 967                qm->qm_list = &hpre_devices;
 968        }
 969
 970        return hisi_qm_init(qm);
 971}
 972
 973static void hpre_log_hw_error(struct hisi_qm *qm, u32 err_sts)
 974{
 975        const struct hpre_hw_error *err = hpre_hw_errors;
 976        struct device *dev = &qm->pdev->dev;
 977
 978        while (err->msg) {
 979                if (err->int_msk & err_sts)
 980                        dev_warn(dev, "%s [error status=0x%x] found\n",
 981                                 err->msg, err->int_msk);
 982                err++;
 983        }
 984}
 985
 986static u32 hpre_get_hw_err_status(struct hisi_qm *qm)
 987{
 988        return readl(qm->io_base + HPRE_INT_STATUS);
 989}
 990
 991static void hpre_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
 992{
 993        writel(err_sts, qm->io_base + HPRE_HAC_SOURCE_INT);
 994}
 995
 996static void hpre_open_axi_master_ooo(struct hisi_qm *qm)
 997{
 998        u32 value;
 999
1000        value = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
1001        writel(value & ~HPRE_AM_OOO_SHUTDOWN_ENABLE,
1002               qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
1003        writel(value | HPRE_AM_OOO_SHUTDOWN_ENABLE,
1004               qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
1005}
1006
1007static void hpre_err_info_init(struct hisi_qm *qm)
1008{
1009        struct hisi_qm_err_info *err_info = &qm->err_info;
1010
1011        err_info->ce = QM_BASE_CE;
1012        err_info->fe = 0;
1013        err_info->ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR |
1014                                   HPRE_OOO_ECC_2BIT_ERR;
1015        err_info->dev_ce_mask = HPRE_HAC_RAS_CE_ENABLE;
1016        err_info->msi_wr_port = HPRE_WR_MSI_PORT;
1017        err_info->acpi_rst = "HRST";
1018        err_info->nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT;
1019}
1020
1021static const struct hisi_qm_err_ini hpre_err_ini = {
1022        .hw_init                = hpre_set_user_domain_and_cache,
1023        .hw_err_enable          = hpre_hw_error_enable,
1024        .hw_err_disable         = hpre_hw_error_disable,
1025        .get_dev_hw_err_status  = hpre_get_hw_err_status,
1026        .clear_dev_hw_err_status = hpre_clear_hw_err_status,
1027        .log_dev_hw_err         = hpre_log_hw_error,
1028        .open_axi_master_ooo    = hpre_open_axi_master_ooo,
1029        .open_sva_prefetch      = hpre_open_sva_prefetch,
1030        .close_sva_prefetch     = hpre_close_sva_prefetch,
1031        .err_info_init          = hpre_err_info_init,
1032};
1033
1034static int hpre_pf_probe_init(struct hpre *hpre)
1035{
1036        struct hisi_qm *qm = &hpre->qm;
1037        int ret;
1038
1039        ret = hpre_set_user_domain_and_cache(qm);
1040        if (ret)
1041                return ret;
1042
1043        hpre_open_sva_prefetch(qm);
1044
1045        qm->err_ini = &hpre_err_ini;
1046        qm->err_ini->err_info_init(qm);
1047        hisi_qm_dev_err_init(qm);
1048
1049        return 0;
1050}
1051
1052static int hpre_probe_init(struct hpre *hpre)
1053{
1054        u32 type_rate = HPRE_SHAPER_TYPE_RATE;
1055        struct hisi_qm *qm = &hpre->qm;
1056        int ret;
1057
1058        if (qm->fun_type == QM_HW_PF) {
1059                ret = hpre_pf_probe_init(hpre);
1060                if (ret)
1061                        return ret;
1062                /* Enable shaper type 0 */
1063                if (qm->ver >= QM_HW_V3) {
1064                        type_rate |= QM_SHAPER_ENABLE;
1065                        qm->type_rate = type_rate;
1066                }
1067        }
1068
1069        return 0;
1070}
1071
1072static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1073{
1074        struct hisi_qm *qm;
1075        struct hpre *hpre;
1076        int ret;
1077
1078        hpre = devm_kzalloc(&pdev->dev, sizeof(*hpre), GFP_KERNEL);
1079        if (!hpre)
1080                return -ENOMEM;
1081
1082        qm = &hpre->qm;
1083        ret = hpre_qm_init(qm, pdev);
1084        if (ret) {
1085                pci_err(pdev, "Failed to init HPRE QM (%d)!\n", ret);
1086                return ret;
1087        }
1088
1089        ret = hpre_probe_init(hpre);
1090        if (ret) {
1091                pci_err(pdev, "Failed to probe (%d)!\n", ret);
1092                goto err_with_qm_init;
1093        }
1094
1095        ret = hisi_qm_start(qm);
1096        if (ret)
1097                goto err_with_err_init;
1098
1099        ret = hpre_debugfs_init(qm);
1100        if (ret)
1101                dev_warn(&pdev->dev, "init debugfs fail!\n");
1102
1103        ret = hisi_qm_alg_register(qm, &hpre_devices);
1104        if (ret < 0) {
1105                pci_err(pdev, "fail to register algs to crypto!\n");
1106                goto err_with_qm_start;
1107        }
1108
1109        if (qm->uacce) {
1110                ret = uacce_register(qm->uacce);
1111                if (ret) {
1112                        pci_err(pdev, "failed to register uacce (%d)!\n", ret);
1113                        goto err_with_alg_register;
1114                }
1115        }
1116
1117        if (qm->fun_type == QM_HW_PF && vfs_num) {
1118                ret = hisi_qm_sriov_enable(pdev, vfs_num);
1119                if (ret < 0)
1120                        goto err_with_alg_register;
1121        }
1122
1123        hisi_qm_pm_init(qm);
1124
1125        return 0;
1126
1127err_with_alg_register:
1128        hisi_qm_alg_unregister(qm, &hpre_devices);
1129
1130err_with_qm_start:
1131        hpre_debugfs_exit(qm);
1132        hisi_qm_stop(qm, QM_NORMAL);
1133
1134err_with_err_init:
1135        hisi_qm_dev_err_uninit(qm);
1136
1137err_with_qm_init:
1138        hisi_qm_uninit(qm);
1139
1140        return ret;
1141}
1142
1143static void hpre_remove(struct pci_dev *pdev)
1144{
1145        struct hisi_qm *qm = pci_get_drvdata(pdev);
1146        int ret;
1147
1148        hisi_qm_pm_uninit(qm);
1149        hisi_qm_wait_task_finish(qm, &hpre_devices);
1150        hisi_qm_alg_unregister(qm, &hpre_devices);
1151        if (qm->fun_type == QM_HW_PF && qm->vfs_num) {
1152                ret = hisi_qm_sriov_disable(pdev, true);
1153                if (ret) {
1154                        pci_err(pdev, "Disable SRIOV fail!\n");
1155                        return;
1156                }
1157        }
1158
1159        hpre_debugfs_exit(qm);
1160        hisi_qm_stop(qm, QM_NORMAL);
1161
1162        if (qm->fun_type == QM_HW_PF) {
1163                hpre_cnt_regs_clear(qm);
1164                qm->debug.curr_qm_qp_num = 0;
1165                hisi_qm_dev_err_uninit(qm);
1166        }
1167
1168        hisi_qm_uninit(qm);
1169}
1170
1171static const struct dev_pm_ops hpre_pm_ops = {
1172        SET_RUNTIME_PM_OPS(hisi_qm_suspend, hisi_qm_resume, NULL)
1173};
1174
1175static const struct pci_error_handlers hpre_err_handler = {
1176        .error_detected         = hisi_qm_dev_err_detected,
1177        .slot_reset             = hisi_qm_dev_slot_reset,
1178        .reset_prepare          = hisi_qm_reset_prepare,
1179        .reset_done             = hisi_qm_reset_done,
1180};
1181
1182static struct pci_driver hpre_pci_driver = {
1183        .name                   = hpre_name,
1184        .id_table               = hpre_dev_ids,
1185        .probe                  = hpre_probe,
1186        .remove                 = hpre_remove,
1187        .sriov_configure        = IS_ENABLED(CONFIG_PCI_IOV) ?
1188                                  hisi_qm_sriov_configure : NULL,
1189        .err_handler            = &hpre_err_handler,
1190        .shutdown               = hisi_qm_dev_shutdown,
1191        .driver.pm              = &hpre_pm_ops,
1192};
1193
1194static void hpre_register_debugfs(void)
1195{
1196        if (!debugfs_initialized())
1197                return;
1198
1199        hpre_debugfs_root = debugfs_create_dir(hpre_name, NULL);
1200}
1201
1202static void hpre_unregister_debugfs(void)
1203{
1204        debugfs_remove_recursive(hpre_debugfs_root);
1205}
1206
1207static int __init hpre_init(void)
1208{
1209        int ret;
1210
1211        hisi_qm_init_list(&hpre_devices);
1212        hpre_register_debugfs();
1213
1214        ret = pci_register_driver(&hpre_pci_driver);
1215        if (ret) {
1216                hpre_unregister_debugfs();
1217                pr_err("hpre: can't register hisi hpre driver.\n");
1218        }
1219
1220        return ret;
1221}
1222
1223static void __exit hpre_exit(void)
1224{
1225        pci_unregister_driver(&hpre_pci_driver);
1226        hpre_unregister_debugfs();
1227}
1228
1229module_init(hpre_init);
1230module_exit(hpre_exit);
1231
1232MODULE_LICENSE("GPL v2");
1233MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com>");
1234MODULE_AUTHOR("Meng Yu <yumeng18@huawei.com>");
1235MODULE_DESCRIPTION("Driver for HiSilicon HPRE accelerator");
1236