linux/drivers/crypto/hisilicon/sec2/sec_main.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2019 HiSilicon Limited. */
   3
   4#include <linux/acpi.h>
   5#include <linux/aer.h>
   6#include <linux/bitops.h>
   7#include <linux/debugfs.h>
   8#include <linux/init.h>
   9#include <linux/io.h>
  10#include <linux/kernel.h>
  11#include <linux/module.h>
  12#include <linux/pci.h>
  13#include <linux/seq_file.h>
  14#include <linux/topology.h>
  15
  16#include "sec.h"
  17
  18#define SEC_VF_NUM                      63
  19#define SEC_QUEUE_NUM_V1                4096
  20#define SEC_QUEUE_NUM_V2                1024
  21#define SEC_PF_PCI_DEVICE_ID            0xa255
  22#define SEC_VF_PCI_DEVICE_ID            0xa256
  23
  24#define SEC_XTS_MIV_ENABLE_REG          0x301384
  25#define SEC_XTS_MIV_ENABLE_MSK          0x7FFFFFFF
  26#define SEC_XTS_MIV_DISABLE_MSK         0xFFFFFFFF
  27#define SEC_BD_ERR_CHK_EN1              0xfffff7fd
  28#define SEC_BD_ERR_CHK_EN2              0xffffbfff
  29
  30#define SEC_SQE_SIZE                    128
  31#define SEC_SQ_SIZE                     (SEC_SQE_SIZE * QM_Q_DEPTH)
  32#define SEC_PF_DEF_Q_NUM                64
  33#define SEC_PF_DEF_Q_BASE               0
  34#define SEC_CTX_Q_NUM_DEF               24
  35#define SEC_CTX_Q_NUM_MAX               32
  36
  37#define SEC_CTRL_CNT_CLR_CE             0x301120
  38#define SEC_CTRL_CNT_CLR_CE_BIT         BIT(0)
  39#define SEC_ENGINE_PF_CFG_OFF           0x300000
  40#define SEC_ACC_COMMON_REG_OFF          0x1000
  41#define SEC_CORE_INT_SOURCE             0x301010
  42#define SEC_CORE_INT_MASK               0x301000
  43#define SEC_CORE_INT_STATUS             0x301008
  44#define SEC_CORE_SRAM_ECC_ERR_INFO      0x301C14
  45#define SEC_ECC_NUM(err)                        (((err) >> 16) & 0xFF)
  46#define SEC_ECC_ADDR(err)                       ((err) >> 0)
  47#define SEC_CORE_INT_DISABLE            0x0
  48#define SEC_CORE_INT_ENABLE             0x1ff
  49
  50#define SEC_RAS_CE_REG                  0x50
  51#define SEC_RAS_FE_REG                  0x54
  52#define SEC_RAS_NFE_REG                 0x58
  53#define SEC_RAS_CE_ENB_MSK              0x88
  54#define SEC_RAS_FE_ENB_MSK              0x0
  55#define SEC_RAS_NFE_ENB_MSK             0x177
  56#define SEC_RAS_DISABLE                 0x0
  57#define SEC_MEM_START_INIT_REG          0x0100
  58#define SEC_MEM_INIT_DONE_REG           0x0104
  59#define SEC_QM_ABNORMAL_INT_MASK        0x100004
  60
  61#define SEC_CONTROL_REG                 0x0200
  62#define SEC_TRNG_EN_SHIFT               8
  63#define SEC_CLK_GATE_ENABLE             BIT(3)
  64#define SEC_CLK_GATE_DISABLE            (~BIT(3))
  65#define SEC_AXI_SHUTDOWN_ENABLE BIT(12)
  66#define SEC_AXI_SHUTDOWN_DISABLE        0xFFFFEFFF
  67
  68#define SEC_INTERFACE_USER_CTRL0_REG    0x0220
  69#define SEC_INTERFACE_USER_CTRL1_REG    0x0224
  70#define SEC_BD_ERR_CHK_EN_REG1          0x0384
  71#define SEC_BD_ERR_CHK_EN_REG2          0x038c
  72
  73#define SEC_USER0_SMMU_NORMAL           (BIT(23) | BIT(15))
  74#define SEC_USER1_SMMU_NORMAL           (BIT(31) | BIT(23) | BIT(15) | BIT(7))
  75#define SEC_CORE_INT_STATUS_M_ECC       BIT(2)
  76
  77#define SEC_DELAY_10_US                 10
  78#define SEC_POLL_TIMEOUT_US             1000
  79#define SEC_VF_CNT_MASK                 0xffffffc0
  80#define SEC_DBGFS_VAL_MAX_LEN           20
  81
  82#define SEC_ADDR(qm, offset) ((qm)->io_base + (offset) + \
  83                             SEC_ENGINE_PF_CFG_OFF + SEC_ACC_COMMON_REG_OFF)
  84
  85struct sec_hw_error {
  86        u32 int_msk;
  87        const char *msg;
  88};
  89
  90static const char sec_name[] = "hisi_sec2";
  91static struct dentry *sec_debugfs_root;
  92static LIST_HEAD(sec_list);
  93static DEFINE_MUTEX(sec_list_lock);
  94
  95static const struct sec_hw_error sec_hw_errors[] = {
  96        {.int_msk = BIT(0), .msg = "sec_axi_rresp_err_rint"},
  97        {.int_msk = BIT(1), .msg = "sec_axi_bresp_err_rint"},
  98        {.int_msk = BIT(2), .msg = "sec_ecc_2bit_err_rint"},
  99        {.int_msk = BIT(3), .msg = "sec_ecc_1bit_err_rint"},
 100        {.int_msk = BIT(4), .msg = "sec_req_trng_timeout_rint"},
 101        {.int_msk = BIT(5), .msg = "sec_fsm_hbeat_rint"},
 102        {.int_msk = BIT(6), .msg = "sec_channel_req_rng_timeout_rint"},
 103        {.int_msk = BIT(7), .msg = "sec_bd_err_rint"},
 104        {.int_msk = BIT(8), .msg = "sec_chain_buff_err_rint"},
 105        { /* sentinel */ }
 106};
 107
 108struct sec_dev *sec_find_device(int node)
 109{
 110#define SEC_NUMA_MAX_DISTANCE   100
 111        int min_distance = SEC_NUMA_MAX_DISTANCE;
 112        int dev_node = 0, free_qp_num = 0;
 113        struct sec_dev *sec, *ret = NULL;
 114        struct hisi_qm *qm;
 115        struct device *dev;
 116
 117        mutex_lock(&sec_list_lock);
 118        list_for_each_entry(sec, &sec_list, list) {
 119                qm = &sec->qm;
 120                dev = &qm->pdev->dev;
 121#ifdef CONFIG_NUMA
 122                dev_node = dev->numa_node;
 123                if (dev_node < 0)
 124                        dev_node = 0;
 125#endif
 126                if (node_distance(dev_node, node) < min_distance) {
 127                        free_qp_num = hisi_qm_get_free_qp_num(qm);
 128                        if (free_qp_num >= sec->ctx_q_num) {
 129                                ret = sec;
 130                                min_distance = node_distance(dev_node, node);
 131                        }
 132                }
 133        }
 134        mutex_unlock(&sec_list_lock);
 135
 136        return ret;
 137}
 138
 139static const char * const sec_dbg_file_name[] = {
 140        [SEC_CURRENT_QM] = "current_qm",
 141        [SEC_CLEAR_ENABLE] = "clear_enable",
 142};
 143
 144static struct debugfs_reg32 sec_dfx_regs[] = {
 145        {"SEC_PF_ABNORMAL_INT_SOURCE    ",  0x301010},
 146        {"SEC_SAA_EN                    ",  0x301270},
 147        {"SEC_BD_LATENCY_MIN            ",  0x301600},
 148        {"SEC_BD_LATENCY_MAX            ",  0x301608},
 149        {"SEC_BD_LATENCY_AVG            ",  0x30160C},
 150        {"SEC_BD_NUM_IN_SAA0            ",  0x301670},
 151        {"SEC_BD_NUM_IN_SAA1            ",  0x301674},
 152        {"SEC_BD_NUM_IN_SEC             ",  0x301680},
 153        {"SEC_ECC_1BIT_CNT              ",  0x301C00},
 154        {"SEC_ECC_1BIT_INFO             ",  0x301C04},
 155        {"SEC_ECC_2BIT_CNT              ",  0x301C10},
 156        {"SEC_ECC_2BIT_INFO             ",  0x301C14},
 157        {"SEC_BD_SAA0                   ",  0x301C20},
 158        {"SEC_BD_SAA1                   ",  0x301C24},
 159        {"SEC_BD_SAA2                   ",  0x301C28},
 160        {"SEC_BD_SAA3                   ",  0x301C2C},
 161        {"SEC_BD_SAA4                   ",  0x301C30},
 162        {"SEC_BD_SAA5                   ",  0x301C34},
 163        {"SEC_BD_SAA6                   ",  0x301C38},
 164        {"SEC_BD_SAA7                   ",  0x301C3C},
 165        {"SEC_BD_SAA8                   ",  0x301C40},
 166};
 167
 168static int sec_pf_q_num_set(const char *val, const struct kernel_param *kp)
 169{
 170        struct pci_dev *pdev;
 171        u32 n, q_num;
 172        u8 rev_id;
 173        int ret;
 174
 175        if (!val)
 176                return -EINVAL;
 177
 178        pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI,
 179                              SEC_PF_PCI_DEVICE_ID, NULL);
 180        if (!pdev) {
 181                q_num = min_t(u32, SEC_QUEUE_NUM_V1, SEC_QUEUE_NUM_V2);
 182                pr_info("No device, suppose queue number is %d!\n", q_num);
 183        } else {
 184                rev_id = pdev->revision;
 185
 186                switch (rev_id) {
 187                case QM_HW_V1:
 188                        q_num = SEC_QUEUE_NUM_V1;
 189                        break;
 190                case QM_HW_V2:
 191                        q_num = SEC_QUEUE_NUM_V2;
 192                        break;
 193                default:
 194                        return -EINVAL;
 195                }
 196        }
 197
 198        ret = kstrtou32(val, 10, &n);
 199        if (ret || !n || n > q_num)
 200                return -EINVAL;
 201
 202        return param_set_int(val, kp);
 203}
 204
 205static const struct kernel_param_ops sec_pf_q_num_ops = {
 206        .set = sec_pf_q_num_set,
 207        .get = param_get_int,
 208};
 209static u32 pf_q_num = SEC_PF_DEF_Q_NUM;
 210module_param_cb(pf_q_num, &sec_pf_q_num_ops, &pf_q_num, 0444);
 211MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 0-4096, v2 0-1024)");
 212
 213static int sec_ctx_q_num_set(const char *val, const struct kernel_param *kp)
 214{
 215        u32 ctx_q_num;
 216        int ret;
 217
 218        if (!val)
 219                return -EINVAL;
 220
 221        ret = kstrtou32(val, 10, &ctx_q_num);
 222        if (ret)
 223                return -EINVAL;
 224
 225        if (!ctx_q_num || ctx_q_num > SEC_CTX_Q_NUM_MAX || ctx_q_num & 0x1) {
 226                pr_err("ctx queue num[%u] is invalid!\n", ctx_q_num);
 227                return -EINVAL;
 228        }
 229
 230        return param_set_int(val, kp);
 231}
 232
 233static const struct kernel_param_ops sec_ctx_q_num_ops = {
 234        .set = sec_ctx_q_num_set,
 235        .get = param_get_int,
 236};
 237static u32 ctx_q_num = SEC_CTX_Q_NUM_DEF;
 238module_param_cb(ctx_q_num, &sec_ctx_q_num_ops, &ctx_q_num, 0444);
 239MODULE_PARM_DESC(ctx_q_num, "Queue num in ctx (24 default, 2, 4, ..., 32)");
 240
 241static const struct pci_device_id sec_dev_ids[] = {
 242        { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_PF_PCI_DEVICE_ID) },
 243        { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_VF_PCI_DEVICE_ID) },
 244        { 0, }
 245};
 246MODULE_DEVICE_TABLE(pci, sec_dev_ids);
 247
 248static inline void sec_add_to_list(struct sec_dev *sec)
 249{
 250        mutex_lock(&sec_list_lock);
 251        list_add_tail(&sec->list, &sec_list);
 252        mutex_unlock(&sec_list_lock);
 253}
 254
 255static inline void sec_remove_from_list(struct sec_dev *sec)
 256{
 257        mutex_lock(&sec_list_lock);
 258        list_del(&sec->list);
 259        mutex_unlock(&sec_list_lock);
 260}
 261
 262static u8 sec_get_endian(struct sec_dev *sec)
 263{
 264        struct hisi_qm *qm = &sec->qm;
 265        u32 reg;
 266
 267        /*
 268         * As for VF, it is a wrong way to get endian setting by
 269         * reading a register of the engine
 270         */
 271        if (qm->pdev->is_virtfn) {
 272                dev_err_ratelimited(&qm->pdev->dev,
 273                                    "cannot access a register in VF!\n");
 274                return SEC_LE;
 275        }
 276        reg = readl_relaxed(qm->io_base + SEC_ENGINE_PF_CFG_OFF +
 277                            SEC_ACC_COMMON_REG_OFF + SEC_CONTROL_REG);
 278
 279        /* BD little endian mode */
 280        if (!(reg & BIT(0)))
 281                return SEC_LE;
 282
 283        /* BD 32-bits big endian mode */
 284        else if (!(reg & BIT(1)))
 285                return SEC_32BE;
 286
 287        /* BD 64-bits big endian mode */
 288        else
 289                return SEC_64BE;
 290}
 291
 292static int sec_engine_init(struct sec_dev *sec)
 293{
 294        struct hisi_qm *qm = &sec->qm;
 295        int ret;
 296        u32 reg;
 297
 298        /* disable clock gate control */
 299        reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG));
 300        reg &= SEC_CLK_GATE_DISABLE;
 301        writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG));
 302
 303        writel_relaxed(0x1, SEC_ADDR(qm, SEC_MEM_START_INIT_REG));
 304
 305        ret = readl_relaxed_poll_timeout(SEC_ADDR(qm, SEC_MEM_INIT_DONE_REG),
 306                                         reg, reg & 0x1, SEC_DELAY_10_US,
 307                                         SEC_POLL_TIMEOUT_US);
 308        if (ret) {
 309                dev_err(&qm->pdev->dev, "fail to init sec mem\n");
 310                return ret;
 311        }
 312
 313        reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG));
 314        reg |= (0x1 << SEC_TRNG_EN_SHIFT);
 315        writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG));
 316
 317        reg = readl_relaxed(SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL0_REG));
 318        reg |= SEC_USER0_SMMU_NORMAL;
 319        writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL0_REG));
 320
 321        reg = readl_relaxed(SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG));
 322        reg |= SEC_USER1_SMMU_NORMAL;
 323        writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG));
 324
 325        writel_relaxed(SEC_BD_ERR_CHK_EN1,
 326                       SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG1));
 327        writel_relaxed(SEC_BD_ERR_CHK_EN2,
 328                       SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG2));
 329
 330        /* enable clock gate control */
 331        reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG));
 332        reg |= SEC_CLK_GATE_ENABLE;
 333        writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG));
 334
 335        /* config endian */
 336        reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG));
 337        reg |= sec_get_endian(sec);
 338        writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG));
 339
 340        /* Enable sm4 xts mode multiple iv */
 341        writel_relaxed(SEC_XTS_MIV_ENABLE_MSK,
 342                       qm->io_base + SEC_XTS_MIV_ENABLE_REG);
 343
 344        return 0;
 345}
 346
 347static int sec_set_user_domain_and_cache(struct sec_dev *sec)
 348{
 349        struct hisi_qm *qm = &sec->qm;
 350
 351        /* qm user domain */
 352        writel(AXUSER_BASE, qm->io_base + QM_ARUSER_M_CFG_1);
 353        writel(ARUSER_M_CFG_ENABLE, qm->io_base + QM_ARUSER_M_CFG_ENABLE);
 354        writel(AXUSER_BASE, qm->io_base + QM_AWUSER_M_CFG_1);
 355        writel(AWUSER_M_CFG_ENABLE, qm->io_base + QM_AWUSER_M_CFG_ENABLE);
 356        writel(WUSER_M_CFG_ENABLE, qm->io_base + QM_WUSER_M_CFG_ENABLE);
 357
 358        /* qm cache */
 359        writel(AXI_M_CFG, qm->io_base + QM_AXI_M_CFG);
 360        writel(AXI_M_CFG_ENABLE, qm->io_base + QM_AXI_M_CFG_ENABLE);
 361
 362        /* disable FLR triggered by BME(bus master enable) */
 363        writel(PEH_AXUSER_CFG, qm->io_base + QM_PEH_AXUSER_CFG);
 364        writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + QM_PEH_AXUSER_CFG_ENABLE);
 365
 366        /* enable sqc,cqc writeback */
 367        writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE |
 368               CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) |
 369               FIELD_PREP(CQC_CACHE_WB_THRD, 1), qm->io_base + QM_CACHE_CTL);
 370
 371        return sec_engine_init(sec);
 372}
 373
 374/* sec_debug_regs_clear() - clear the sec debug regs */
 375static void sec_debug_regs_clear(struct hisi_qm *qm)
 376{
 377        /* clear current_qm */
 378        writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
 379        writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
 380
 381        /* clear rdclr_en */
 382        writel(0x0, qm->io_base + SEC_CTRL_CNT_CLR_CE);
 383
 384        hisi_qm_debug_regs_clear(qm);
 385}
 386
 387static void sec_hw_error_enable(struct sec_dev *sec)
 388{
 389        struct hisi_qm *qm = &sec->qm;
 390        u32 val;
 391
 392        if (qm->ver == QM_HW_V1) {
 393                writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK);
 394                dev_info(&qm->pdev->dev, "V1 not support hw error handle\n");
 395                return;
 396        }
 397
 398        val = readl(qm->io_base + SEC_CONTROL_REG);
 399
 400        /* clear SEC hw error source if having */
 401        writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_SOURCE);
 402
 403        /* enable SEC hw error interrupts */
 404        writel(SEC_CORE_INT_ENABLE, qm->io_base + SEC_CORE_INT_MASK);
 405
 406        /* enable RAS int */
 407        writel(SEC_RAS_CE_ENB_MSK, qm->io_base + SEC_RAS_CE_REG);
 408        writel(SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_RAS_FE_REG);
 409        writel(SEC_RAS_NFE_ENB_MSK, qm->io_base + SEC_RAS_NFE_REG);
 410
 411        /* enable SEC block master OOO when m-bit error occur */
 412        val = val | SEC_AXI_SHUTDOWN_ENABLE;
 413
 414        writel(val, qm->io_base + SEC_CONTROL_REG);
 415}
 416
 417static void sec_hw_error_disable(struct sec_dev *sec)
 418{
 419        struct hisi_qm *qm = &sec->qm;
 420        u32 val;
 421
 422        val = readl(qm->io_base + SEC_CONTROL_REG);
 423
 424        /* disable RAS int */
 425        writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_CE_REG);
 426        writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_FE_REG);
 427        writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_NFE_REG);
 428
 429        /* disable SEC hw error interrupts */
 430        writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK);
 431
 432        /* disable SEC block master OOO when m-bit error occur */
 433        val = val & SEC_AXI_SHUTDOWN_DISABLE;
 434
 435        writel(val, qm->io_base + SEC_CONTROL_REG);
 436}
 437
 438static void sec_hw_error_init(struct sec_dev *sec)
 439{
 440        if (sec->qm.fun_type == QM_HW_VF)
 441                return;
 442
 443        hisi_qm_hw_error_init(&sec->qm, QM_BASE_CE,
 444                              QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT
 445                              | QM_ACC_WB_NOT_READY_TIMEOUT, 0,
 446                              QM_DB_RANDOM_INVALID);
 447        sec_hw_error_enable(sec);
 448}
 449
 450static void sec_hw_error_uninit(struct sec_dev *sec)
 451{
 452        if (sec->qm.fun_type == QM_HW_VF)
 453                return;
 454
 455        sec_hw_error_disable(sec);
 456        writel(GENMASK(12, 0), sec->qm.io_base + SEC_QM_ABNORMAL_INT_MASK);
 457}
 458
 459static u32 sec_current_qm_read(struct sec_debug_file *file)
 460{
 461        struct hisi_qm *qm = file->qm;
 462
 463        return readl(qm->io_base + QM_DFX_MB_CNT_VF);
 464}
 465
 466static int sec_current_qm_write(struct sec_debug_file *file, u32 val)
 467{
 468        struct hisi_qm *qm = file->qm;
 469        struct sec_dev *sec = container_of(qm, struct sec_dev, qm);
 470        u32 vfq_num;
 471        u32 tmp;
 472
 473        if (val > sec->num_vfs)
 474                return -EINVAL;
 475
 476        /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
 477        if (!val) {
 478                qm->debug.curr_qm_qp_num = qm->qp_num;
 479        } else {
 480                vfq_num = (qm->ctrl_qp_num - qm->qp_num) / sec->num_vfs;
 481
 482                if (val == sec->num_vfs)
 483                        qm->debug.curr_qm_qp_num =
 484                                qm->ctrl_qp_num - qm->qp_num -
 485                                (sec->num_vfs - 1) * vfq_num;
 486                else
 487                        qm->debug.curr_qm_qp_num = vfq_num;
 488        }
 489
 490        writel(val, qm->io_base + QM_DFX_MB_CNT_VF);
 491        writel(val, qm->io_base + QM_DFX_DB_CNT_VF);
 492
 493        tmp = val |
 494              (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK);
 495        writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
 496
 497        tmp = val |
 498              (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK);
 499        writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
 500
 501        return 0;
 502}
 503
 504static u32 sec_clear_enable_read(struct sec_debug_file *file)
 505{
 506        struct hisi_qm *qm = file->qm;
 507
 508        return readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) &
 509                        SEC_CTRL_CNT_CLR_CE_BIT;
 510}
 511
 512static int sec_clear_enable_write(struct sec_debug_file *file, u32 val)
 513{
 514        struct hisi_qm *qm = file->qm;
 515        u32 tmp;
 516
 517        if (val != 1 && val)
 518                return -EINVAL;
 519
 520        tmp = (readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) &
 521               ~SEC_CTRL_CNT_CLR_CE_BIT) | val;
 522        writel(tmp, qm->io_base + SEC_CTRL_CNT_CLR_CE);
 523
 524        return 0;
 525}
 526
 527static ssize_t sec_debug_read(struct file *filp, char __user *buf,
 528                               size_t count, loff_t *pos)
 529{
 530        struct sec_debug_file *file = filp->private_data;
 531        char tbuf[SEC_DBGFS_VAL_MAX_LEN];
 532        u32 val;
 533        int ret;
 534
 535        spin_lock_irq(&file->lock);
 536
 537        switch (file->index) {
 538        case SEC_CURRENT_QM:
 539                val = sec_current_qm_read(file);
 540                break;
 541        case SEC_CLEAR_ENABLE:
 542                val = sec_clear_enable_read(file);
 543                break;
 544        default:
 545                spin_unlock_irq(&file->lock);
 546                return -EINVAL;
 547        }
 548
 549        spin_unlock_irq(&file->lock);
 550        ret = snprintf(tbuf, SEC_DBGFS_VAL_MAX_LEN, "%u\n", val);
 551
 552        return simple_read_from_buffer(buf, count, pos, tbuf, ret);
 553}
 554
 555static ssize_t sec_debug_write(struct file *filp, const char __user *buf,
 556                               size_t count, loff_t *pos)
 557{
 558        struct sec_debug_file *file = filp->private_data;
 559        char tbuf[SEC_DBGFS_VAL_MAX_LEN];
 560        unsigned long val;
 561        int len, ret;
 562
 563        if (*pos != 0)
 564                return 0;
 565
 566        if (count >= SEC_DBGFS_VAL_MAX_LEN)
 567                return -ENOSPC;
 568
 569        len = simple_write_to_buffer(tbuf, SEC_DBGFS_VAL_MAX_LEN - 1,
 570                                     pos, buf, count);
 571        if (len < 0)
 572                return len;
 573
 574        tbuf[len] = '\0';
 575        if (kstrtoul(tbuf, 0, &val))
 576                return -EFAULT;
 577
 578        spin_lock_irq(&file->lock);
 579
 580        switch (file->index) {
 581        case SEC_CURRENT_QM:
 582                ret = sec_current_qm_write(file, val);
 583                if (ret)
 584                        goto err_input;
 585                break;
 586        case SEC_CLEAR_ENABLE:
 587                ret = sec_clear_enable_write(file, val);
 588                if (ret)
 589                        goto err_input;
 590                break;
 591        default:
 592                ret = -EINVAL;
 593                goto err_input;
 594        }
 595
 596        spin_unlock_irq(&file->lock);
 597
 598        return count;
 599
 600 err_input:
 601        spin_unlock_irq(&file->lock);
 602        return ret;
 603}
 604
 605static const struct file_operations sec_dbg_fops = {
 606        .owner = THIS_MODULE,
 607        .open = simple_open,
 608        .read = sec_debug_read,
 609        .write = sec_debug_write,
 610};
 611
 612static int sec_debugfs_atomic64_get(void *data, u64 *val)
 613{
 614        *val = atomic64_read((atomic64_t *)data);
 615        return 0;
 616}
 617DEFINE_DEBUGFS_ATTRIBUTE(sec_atomic64_ops, sec_debugfs_atomic64_get,
 618                         NULL, "%lld\n");
 619
 620static int sec_core_debug_init(struct sec_dev *sec)
 621{
 622        struct hisi_qm *qm = &sec->qm;
 623        struct device *dev = &qm->pdev->dev;
 624        struct sec_dfx *dfx = &sec->debug.dfx;
 625        struct debugfs_regset32 *regset;
 626        struct dentry *tmp_d;
 627
 628        tmp_d = debugfs_create_dir("sec_dfx", sec->qm.debug.debug_root);
 629
 630        regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
 631        if (!regset)
 632                return -ENOENT;
 633
 634        regset->regs = sec_dfx_regs;
 635        regset->nregs = ARRAY_SIZE(sec_dfx_regs);
 636        regset->base = qm->io_base;
 637
 638        debugfs_create_regset32("regs", 0444, tmp_d, regset);
 639
 640        debugfs_create_file("send_cnt", 0444, tmp_d,
 641                            &dfx->send_cnt, &sec_atomic64_ops);
 642
 643        debugfs_create_file("recv_cnt", 0444, tmp_d,
 644                            &dfx->recv_cnt, &sec_atomic64_ops);
 645
 646        return 0;
 647}
 648
 649static int sec_debug_init(struct sec_dev *sec)
 650{
 651        int i;
 652
 653        for (i = SEC_CURRENT_QM; i < SEC_DEBUG_FILE_NUM; i++) {
 654                spin_lock_init(&sec->debug.files[i].lock);
 655                sec->debug.files[i].index = i;
 656                sec->debug.files[i].qm = &sec->qm;
 657
 658                debugfs_create_file(sec_dbg_file_name[i], 0600,
 659                                    sec->qm.debug.debug_root,
 660                                    sec->debug.files + i,
 661                                    &sec_dbg_fops);
 662        }
 663
 664        return sec_core_debug_init(sec);
 665}
 666
 667static int sec_debugfs_init(struct sec_dev *sec)
 668{
 669        struct hisi_qm *qm = &sec->qm;
 670        struct device *dev = &qm->pdev->dev;
 671        int ret;
 672
 673        qm->debug.debug_root = debugfs_create_dir(dev_name(dev),
 674                                                  sec_debugfs_root);
 675        ret = hisi_qm_debug_init(qm);
 676        if (ret)
 677                goto failed_to_create;
 678
 679        if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID) {
 680                ret = sec_debug_init(sec);
 681                if (ret)
 682                        goto failed_to_create;
 683        }
 684
 685        return 0;
 686
 687failed_to_create:
 688        debugfs_remove_recursive(sec_debugfs_root);
 689
 690        return ret;
 691}
 692
 693static void sec_debugfs_exit(struct sec_dev *sec)
 694{
 695        debugfs_remove_recursive(sec->qm.debug.debug_root);
 696}
 697
 698static int sec_pf_probe_init(struct sec_dev *sec)
 699{
 700        struct hisi_qm *qm = &sec->qm;
 701        int ret;
 702
 703        switch (qm->ver) {
 704        case QM_HW_V1:
 705                qm->ctrl_qp_num = SEC_QUEUE_NUM_V1;
 706                break;
 707
 708        case QM_HW_V2:
 709                qm->ctrl_qp_num = SEC_QUEUE_NUM_V2;
 710                break;
 711
 712        default:
 713                return -EINVAL;
 714        }
 715
 716        ret = sec_set_user_domain_and_cache(sec);
 717        if (ret)
 718                return ret;
 719
 720        sec_hw_error_init(sec);
 721        sec_debug_regs_clear(qm);
 722
 723        return 0;
 724}
 725
 726static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
 727{
 728        enum qm_hw_ver rev_id;
 729
 730        rev_id = hisi_qm_get_hw_version(pdev);
 731        if (rev_id == QM_HW_UNKNOWN)
 732                return -ENODEV;
 733
 734        qm->pdev = pdev;
 735        qm->ver = rev_id;
 736
 737        qm->sqe_size = SEC_SQE_SIZE;
 738        qm->dev_name = sec_name;
 739        qm->fun_type = (pdev->device == SEC_PF_PCI_DEVICE_ID) ?
 740                        QM_HW_PF : QM_HW_VF;
 741        qm->use_dma_api = true;
 742
 743        return hisi_qm_init(qm);
 744}
 745
 746static void sec_qm_uninit(struct hisi_qm *qm)
 747{
 748        hisi_qm_uninit(qm);
 749}
 750
 751static int sec_probe_init(struct hisi_qm *qm, struct sec_dev *sec)
 752{
 753        if (qm->fun_type == QM_HW_PF) {
 754                qm->qp_base = SEC_PF_DEF_Q_BASE;
 755                qm->qp_num = pf_q_num;
 756                qm->debug.curr_qm_qp_num = pf_q_num;
 757
 758                return sec_pf_probe_init(sec);
 759        } else if (qm->fun_type == QM_HW_VF) {
 760                /*
 761                 * have no way to get qm configure in VM in v1 hardware,
 762                 * so currently force PF to uses SEC_PF_DEF_Q_NUM, and force
 763                 * to trigger only one VF in v1 hardware.
 764                 * v2 hardware has no such problem.
 765                 */
 766                if (qm->ver == QM_HW_V1) {
 767                        qm->qp_base = SEC_PF_DEF_Q_NUM;
 768                        qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM;
 769                } else if (qm->ver == QM_HW_V2) {
 770                        /* v2 starts to support get vft by mailbox */
 771                        return hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
 772                }
 773        } else {
 774                return -ENODEV;
 775        }
 776
 777        return 0;
 778}
 779
 780static void sec_probe_uninit(struct sec_dev *sec)
 781{
 782        sec_hw_error_uninit(sec);
 783}
 784
 785static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 786{
 787        struct sec_dev *sec;
 788        struct hisi_qm *qm;
 789        int ret;
 790
 791        sec = devm_kzalloc(&pdev->dev, sizeof(*sec), GFP_KERNEL);
 792        if (!sec)
 793                return -ENOMEM;
 794
 795        pci_set_drvdata(pdev, sec);
 796
 797        sec->ctx_q_num = ctx_q_num;
 798
 799        qm = &sec->qm;
 800
 801        ret = sec_qm_init(qm, pdev);
 802        if (ret) {
 803                pci_err(pdev, "Failed to pre init qm!\n");
 804                return ret;
 805        }
 806
 807        ret = sec_probe_init(qm, sec);
 808        if (ret) {
 809                pci_err(pdev, "Failed to probe!\n");
 810                goto err_qm_uninit;
 811        }
 812
 813        ret = hisi_qm_start(qm);
 814        if (ret) {
 815                pci_err(pdev, "Failed to start sec qm!\n");
 816                goto err_probe_uninit;
 817        }
 818
 819        ret = sec_debugfs_init(sec);
 820        if (ret)
 821                pci_warn(pdev, "Failed to init debugfs!\n");
 822
 823        sec_add_to_list(sec);
 824
 825        ret = sec_register_to_crypto();
 826        if (ret < 0) {
 827                pr_err("Failed to register driver to crypto.\n");
 828                goto err_remove_from_list;
 829        }
 830
 831        return 0;
 832
 833err_remove_from_list:
 834        sec_remove_from_list(sec);
 835        sec_debugfs_exit(sec);
 836        hisi_qm_stop(qm);
 837
 838err_probe_uninit:
 839        sec_probe_uninit(sec);
 840
 841err_qm_uninit:
 842        sec_qm_uninit(qm);
 843
 844        return ret;
 845}
 846
 847/* now we only support equal assignment */
 848static int sec_vf_q_assign(struct sec_dev *sec, u32 num_vfs)
 849{
 850        struct hisi_qm *qm = &sec->qm;
 851        u32 qp_num = qm->qp_num;
 852        u32 q_base = qp_num;
 853        u32 q_num, remain_q_num;
 854        int i, j, ret;
 855
 856        if (!num_vfs)
 857                return -EINVAL;
 858
 859        remain_q_num = qm->ctrl_qp_num - qp_num;
 860        q_num = remain_q_num / num_vfs;
 861
 862        for (i = 1; i <= num_vfs; i++) {
 863                if (i == num_vfs)
 864                        q_num += remain_q_num % num_vfs;
 865                ret = hisi_qm_set_vft(qm, i, q_base, q_num);
 866                if (ret) {
 867                        for (j = i; j > 0; j--)
 868                                hisi_qm_set_vft(qm, j, 0, 0);
 869                        return ret;
 870                }
 871                q_base += q_num;
 872        }
 873
 874        return 0;
 875}
 876
 877static int sec_clear_vft_config(struct sec_dev *sec)
 878{
 879        struct hisi_qm *qm = &sec->qm;
 880        u32 num_vfs = sec->num_vfs;
 881        int ret;
 882        u32 i;
 883
 884        for (i = 1; i <= num_vfs; i++) {
 885                ret = hisi_qm_set_vft(qm, i, 0, 0);
 886                if (ret)
 887                        return ret;
 888        }
 889
 890        sec->num_vfs = 0;
 891
 892        return 0;
 893}
 894
 895static int sec_sriov_enable(struct pci_dev *pdev, int max_vfs)
 896{
 897        struct sec_dev *sec = pci_get_drvdata(pdev);
 898        int pre_existing_vfs, ret;
 899        u32 num_vfs;
 900
 901        pre_existing_vfs = pci_num_vf(pdev);
 902
 903        if (pre_existing_vfs) {
 904                pci_err(pdev, "Can't enable VF. Please disable at first!\n");
 905                return 0;
 906        }
 907
 908        num_vfs = min_t(u32, max_vfs, SEC_VF_NUM);
 909
 910        ret = sec_vf_q_assign(sec, num_vfs);
 911        if (ret) {
 912                pci_err(pdev, "Can't assign queues for VF!\n");
 913                return ret;
 914        }
 915
 916        sec->num_vfs = num_vfs;
 917
 918        ret = pci_enable_sriov(pdev, num_vfs);
 919        if (ret) {
 920                pci_err(pdev, "Can't enable VF!\n");
 921                sec_clear_vft_config(sec);
 922                return ret;
 923        }
 924
 925        return num_vfs;
 926}
 927
 928static int sec_sriov_disable(struct pci_dev *pdev)
 929{
 930        struct sec_dev *sec = pci_get_drvdata(pdev);
 931
 932        if (pci_vfs_assigned(pdev)) {
 933                pci_err(pdev, "Can't disable VFs while VFs are assigned!\n");
 934                return -EPERM;
 935        }
 936
 937        /* remove in sec_pci_driver will be called to free VF resources */
 938        pci_disable_sriov(pdev);
 939
 940        return sec_clear_vft_config(sec);
 941}
 942
 943static int sec_sriov_configure(struct pci_dev *pdev, int num_vfs)
 944{
 945        if (num_vfs)
 946                return sec_sriov_enable(pdev, num_vfs);
 947        else
 948                return sec_sriov_disable(pdev);
 949}
 950
 951static void sec_remove(struct pci_dev *pdev)
 952{
 953        struct sec_dev *sec = pci_get_drvdata(pdev);
 954        struct hisi_qm *qm = &sec->qm;
 955
 956        sec_unregister_from_crypto();
 957
 958        sec_remove_from_list(sec);
 959
 960        if (qm->fun_type == QM_HW_PF && sec->num_vfs)
 961                (void)sec_sriov_disable(pdev);
 962
 963        sec_debugfs_exit(sec);
 964
 965        (void)hisi_qm_stop(qm);
 966
 967        if (qm->fun_type == QM_HW_PF)
 968                sec_debug_regs_clear(qm);
 969
 970        sec_probe_uninit(sec);
 971
 972        sec_qm_uninit(qm);
 973}
 974
 975static void sec_log_hw_error(struct sec_dev *sec, u32 err_sts)
 976{
 977        const struct sec_hw_error *errs = sec_hw_errors;
 978        struct device *dev = &sec->qm.pdev->dev;
 979        u32 err_val;
 980
 981        while (errs->msg) {
 982                if (errs->int_msk & err_sts) {
 983                        dev_err(dev, "%s [error status=0x%x] found\n",
 984                                errs->msg, errs->int_msk);
 985
 986                        if (SEC_CORE_INT_STATUS_M_ECC & err_sts) {
 987                                err_val = readl(sec->qm.io_base +
 988                                                SEC_CORE_SRAM_ECC_ERR_INFO);
 989                                dev_err(dev, "multi ecc sram num=0x%x\n",
 990                                        SEC_ECC_NUM(err_val));
 991                                dev_err(dev, "multi ecc sram addr=0x%x\n",
 992                                        SEC_ECC_ADDR(err_val));
 993                        }
 994                }
 995                errs++;
 996        }
 997}
 998
 999static pci_ers_result_t sec_hw_error_handle(struct sec_dev *sec)
1000{
1001        u32 err_sts;
1002
1003        /* read err sts */
1004        err_sts = readl(sec->qm.io_base + SEC_CORE_INT_STATUS);
1005        if (err_sts) {
1006                sec_log_hw_error(sec, err_sts);
1007
1008                /* clear error interrupts */
1009                writel(err_sts, sec->qm.io_base + SEC_CORE_INT_SOURCE);
1010
1011                return PCI_ERS_RESULT_NEED_RESET;
1012        }
1013
1014        return PCI_ERS_RESULT_RECOVERED;
1015}
1016
1017static pci_ers_result_t sec_process_hw_error(struct pci_dev *pdev)
1018{
1019        struct sec_dev *sec = pci_get_drvdata(pdev);
1020        pci_ers_result_t qm_ret, sec_ret;
1021
1022        if (!sec) {
1023                pci_err(pdev, "Can't recover error during device init\n");
1024                return PCI_ERS_RESULT_NONE;
1025        }
1026
1027        /* log qm error */
1028        qm_ret = hisi_qm_hw_error_handle(&sec->qm);
1029
1030        /* log sec error */
1031        sec_ret = sec_hw_error_handle(sec);
1032
1033        return (qm_ret == PCI_ERS_RESULT_NEED_RESET ||
1034                sec_ret == PCI_ERS_RESULT_NEED_RESET) ?
1035                PCI_ERS_RESULT_NEED_RESET : PCI_ERS_RESULT_RECOVERED;
1036}
1037
1038static pci_ers_result_t sec_error_detected(struct pci_dev *pdev,
1039                                           pci_channel_state_t state)
1040{
1041        if (pdev->is_virtfn)
1042                return PCI_ERS_RESULT_NONE;
1043
1044        pci_info(pdev, "PCI error detected, state(=%d)!!\n", state);
1045        if (state == pci_channel_io_perm_failure)
1046                return PCI_ERS_RESULT_DISCONNECT;
1047
1048        return sec_process_hw_error(pdev);
1049}
1050
1051static const struct pci_error_handlers sec_err_handler = {
1052        .error_detected = sec_error_detected,
1053};
1054
1055static struct pci_driver sec_pci_driver = {
1056        .name = "hisi_sec2",
1057        .id_table = sec_dev_ids,
1058        .probe = sec_probe,
1059        .remove = sec_remove,
1060        .err_handler = &sec_err_handler,
1061        .sriov_configure = sec_sriov_configure,
1062};
1063
1064static void sec_register_debugfs(void)
1065{
1066        if (!debugfs_initialized())
1067                return;
1068
1069        sec_debugfs_root = debugfs_create_dir("hisi_sec2", NULL);
1070}
1071
1072static void sec_unregister_debugfs(void)
1073{
1074        debugfs_remove_recursive(sec_debugfs_root);
1075}
1076
1077static int __init sec_init(void)
1078{
1079        int ret;
1080
1081        sec_register_debugfs();
1082
1083        ret = pci_register_driver(&sec_pci_driver);
1084        if (ret < 0) {
1085                sec_unregister_debugfs();
1086                pr_err("Failed to register pci driver.\n");
1087                return ret;
1088        }
1089
1090        return 0;
1091}
1092
1093static void __exit sec_exit(void)
1094{
1095        pci_unregister_driver(&sec_pci_driver);
1096        sec_unregister_debugfs();
1097}
1098
1099module_init(sec_init);
1100module_exit(sec_exit);
1101
1102MODULE_LICENSE("GPL v2");
1103MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com>");
1104MODULE_AUTHOR("Longfang Liu <liulongfang@huawei.com>");
1105MODULE_AUTHOR("Wei Zhang <zhangwei375@huawei.com>");
1106MODULE_DESCRIPTION("Driver for HiSilicon SEC accelerator");
1107