1
2
3
4#include <linux/acpi.h>
5#include <linux/aer.h>
6#include <linux/bitops.h>
7#include <linux/debugfs.h>
8#include <linux/init.h>
9#include <linux/io.h>
10#include <linux/iommu.h>
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/pci.h>
14#include <linux/seq_file.h>
15#include <linux/topology.h>
16
17#include "sec.h"
18
19#define SEC_VF_NUM 63
20#define SEC_QUEUE_NUM_V1 4096
21#define SEC_QUEUE_NUM_V2 1024
22#define SEC_PF_PCI_DEVICE_ID 0xa255
23#define SEC_VF_PCI_DEVICE_ID 0xa256
24
25#define SEC_BD_ERR_CHK_EN0 0xEFFFFFFF
26#define SEC_BD_ERR_CHK_EN1 0x7ffff7fd
27#define SEC_BD_ERR_CHK_EN3 0xffffbfff
28
29#define SEC_SQE_SIZE 128
30#define SEC_SQ_SIZE (SEC_SQE_SIZE * QM_Q_DEPTH)
31#define SEC_PF_DEF_Q_NUM 256
32#define SEC_PF_DEF_Q_BASE 0
33#define SEC_CTX_Q_NUM_DEF 2
34#define SEC_CTX_Q_NUM_MAX 32
35
36#define SEC_CTRL_CNT_CLR_CE 0x301120
37#define SEC_CTRL_CNT_CLR_CE_BIT BIT(0)
38#define SEC_ENGINE_PF_CFG_OFF 0x300000
39#define SEC_ACC_COMMON_REG_OFF 0x1000
40#define SEC_CORE_INT_SOURCE 0x301010
41#define SEC_CORE_INT_MASK 0x301000
42#define SEC_CORE_INT_STATUS 0x301008
43#define SEC_CORE_SRAM_ECC_ERR_INFO 0x301C14
44#define SEC_ECC_NUM(err) (((err) >> 16) & 0xFF)
45#define SEC_ECC_ADDR(err) ((err) >> 0)
46#define SEC_CORE_INT_DISABLE 0x0
47#define SEC_CORE_INT_ENABLE 0x1ff
48#define SEC_CORE_INT_CLEAR 0x1ff
49#define SEC_SAA_ENABLE 0x17f
50
51#define SEC_RAS_CE_REG 0x301050
52#define SEC_RAS_FE_REG 0x301054
53#define SEC_RAS_NFE_REG 0x301058
54#define SEC_RAS_CE_ENB_MSK 0x88
55#define SEC_RAS_FE_ENB_MSK 0x0
56#define SEC_RAS_NFE_ENB_MSK 0x177
57#define SEC_RAS_DISABLE 0x0
58#define SEC_MEM_START_INIT_REG 0x0100
59#define SEC_MEM_INIT_DONE_REG 0x0104
60
61#define SEC_CONTROL_REG 0x0200
62#define SEC_TRNG_EN_SHIFT 8
63#define SEC_CLK_GATE_ENABLE BIT(3)
64#define SEC_CLK_GATE_DISABLE (~BIT(3))
65#define SEC_AXI_SHUTDOWN_ENABLE BIT(12)
66#define SEC_AXI_SHUTDOWN_DISABLE 0xFFFFEFFF
67
68#define SEC_INTERFACE_USER_CTRL0_REG 0x0220
69#define SEC_INTERFACE_USER_CTRL1_REG 0x0224
70#define SEC_SAA_EN_REG 0x0270
71#define SEC_BD_ERR_CHK_EN_REG0 0x0380
72#define SEC_BD_ERR_CHK_EN_REG1 0x0384
73#define SEC_BD_ERR_CHK_EN_REG3 0x038c
74
75#define SEC_USER0_SMMU_NORMAL (BIT(23) | BIT(15))
76#define SEC_USER1_SMMU_NORMAL (BIT(31) | BIT(23) | BIT(15) | BIT(7))
77#define SEC_CORE_INT_STATUS_M_ECC BIT(2)
78
79#define SEC_DELAY_10_US 10
80#define SEC_POLL_TIMEOUT_US 1000
81#define SEC_DBGFS_VAL_MAX_LEN 20
82#define SEC_SINGLE_PORT_MAX_TRANS 0x2060
83
84#define SEC_SQE_MASK_OFFSET 64
85#define SEC_SQE_MASK_LEN 48
86
87#define SEC_ADDR(qm, offset) ((qm)->io_base + (offset) + \
88 SEC_ENGINE_PF_CFG_OFF + SEC_ACC_COMMON_REG_OFF)
89
90struct sec_hw_error {
91 u32 int_msk;
92 const char *msg;
93};
94
95struct sec_dfx_item {
96 const char *name;
97 u32 offset;
98};
99
100static const char sec_name[] = "hisi_sec2";
101static struct dentry *sec_debugfs_root;
102static struct hisi_qm_list sec_devices;
103
104static const struct sec_hw_error sec_hw_errors[] = {
105 {.int_msk = BIT(0), .msg = "sec_axi_rresp_err_rint"},
106 {.int_msk = BIT(1), .msg = "sec_axi_bresp_err_rint"},
107 {.int_msk = BIT(2), .msg = "sec_ecc_2bit_err_rint"},
108 {.int_msk = BIT(3), .msg = "sec_ecc_1bit_err_rint"},
109 {.int_msk = BIT(4), .msg = "sec_req_trng_timeout_rint"},
110 {.int_msk = BIT(5), .msg = "sec_fsm_hbeat_rint"},
111 {.int_msk = BIT(6), .msg = "sec_channel_req_rng_timeout_rint"},
112 {.int_msk = BIT(7), .msg = "sec_bd_err_rint"},
113 {.int_msk = BIT(8), .msg = "sec_chain_buff_err_rint"},
114 { }
115};
116
117static const char * const sec_dbg_file_name[] = {
118 [SEC_CURRENT_QM] = "current_qm",
119 [SEC_CLEAR_ENABLE] = "clear_enable",
120};
121
122static struct sec_dfx_item sec_dfx_labels[] = {
123 {"send_cnt", offsetof(struct sec_dfx, send_cnt)},
124 {"recv_cnt", offsetof(struct sec_dfx, recv_cnt)},
125 {"send_busy_cnt", offsetof(struct sec_dfx, send_busy_cnt)},
126 {"recv_busy_cnt", offsetof(struct sec_dfx, recv_busy_cnt)},
127 {"err_bd_cnt", offsetof(struct sec_dfx, err_bd_cnt)},
128 {"invalid_req_cnt", offsetof(struct sec_dfx, invalid_req_cnt)},
129 {"done_flag_cnt", offsetof(struct sec_dfx, done_flag_cnt)},
130};
131
132static const struct debugfs_reg32 sec_dfx_regs[] = {
133 {"SEC_PF_ABNORMAL_INT_SOURCE ", 0x301010},
134 {"SEC_SAA_EN ", 0x301270},
135 {"SEC_BD_LATENCY_MIN ", 0x301600},
136 {"SEC_BD_LATENCY_MAX ", 0x301608},
137 {"SEC_BD_LATENCY_AVG ", 0x30160C},
138 {"SEC_BD_NUM_IN_SAA0 ", 0x301670},
139 {"SEC_BD_NUM_IN_SAA1 ", 0x301674},
140 {"SEC_BD_NUM_IN_SEC ", 0x301680},
141 {"SEC_ECC_1BIT_CNT ", 0x301C00},
142 {"SEC_ECC_1BIT_INFO ", 0x301C04},
143 {"SEC_ECC_2BIT_CNT ", 0x301C10},
144 {"SEC_ECC_2BIT_INFO ", 0x301C14},
145 {"SEC_BD_SAA0 ", 0x301C20},
146 {"SEC_BD_SAA1 ", 0x301C24},
147 {"SEC_BD_SAA2 ", 0x301C28},
148 {"SEC_BD_SAA3 ", 0x301C2C},
149 {"SEC_BD_SAA4 ", 0x301C30},
150 {"SEC_BD_SAA5 ", 0x301C34},
151 {"SEC_BD_SAA6 ", 0x301C38},
152 {"SEC_BD_SAA7 ", 0x301C3C},
153 {"SEC_BD_SAA8 ", 0x301C40},
154};
155
156static int sec_pf_q_num_set(const char *val, const struct kernel_param *kp)
157{
158 return q_num_set(val, kp, SEC_PF_PCI_DEVICE_ID);
159}
160
161static const struct kernel_param_ops sec_pf_q_num_ops = {
162 .set = sec_pf_q_num_set,
163 .get = param_get_int,
164};
165
166static u32 pf_q_num = SEC_PF_DEF_Q_NUM;
167module_param_cb(pf_q_num, &sec_pf_q_num_ops, &pf_q_num, 0444);
168MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 0-4096, v2 0-1024)");
169
170static int sec_ctx_q_num_set(const char *val, const struct kernel_param *kp)
171{
172 u32 ctx_q_num;
173 int ret;
174
175 if (!val)
176 return -EINVAL;
177
178 ret = kstrtou32(val, 10, &ctx_q_num);
179 if (ret)
180 return -EINVAL;
181
182 if (!ctx_q_num || ctx_q_num > SEC_CTX_Q_NUM_MAX || ctx_q_num & 0x1) {
183 pr_err("ctx queue num[%u] is invalid!\n", ctx_q_num);
184 return -EINVAL;
185 }
186
187 return param_set_int(val, kp);
188}
189
190static const struct kernel_param_ops sec_ctx_q_num_ops = {
191 .set = sec_ctx_q_num_set,
192 .get = param_get_int,
193};
194static u32 ctx_q_num = SEC_CTX_Q_NUM_DEF;
195module_param_cb(ctx_q_num, &sec_ctx_q_num_ops, &ctx_q_num, 0444);
196MODULE_PARM_DESC(ctx_q_num, "Queue num in ctx (2 default, 2, 4, ..., 32)");
197
198static const struct kernel_param_ops vfs_num_ops = {
199 .set = vfs_num_set,
200 .get = param_get_int,
201};
202
203static u32 vfs_num;
204module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
205MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
206
207void sec_destroy_qps(struct hisi_qp **qps, int qp_num)
208{
209 hisi_qm_free_qps(qps, qp_num);
210 kfree(qps);
211}
212
213struct hisi_qp **sec_create_qps(void)
214{
215 int node = cpu_to_node(smp_processor_id());
216 u32 ctx_num = ctx_q_num;
217 struct hisi_qp **qps;
218 int ret;
219
220 qps = kcalloc(ctx_num, sizeof(struct hisi_qp *), GFP_KERNEL);
221 if (!qps)
222 return NULL;
223
224 ret = hisi_qm_alloc_qps_node(&sec_devices, ctx_num, 0, node, qps);
225 if (!ret)
226 return qps;
227
228 kfree(qps);
229 return NULL;
230}
231
232
233static const struct pci_device_id sec_dev_ids[] = {
234 { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_PF_PCI_DEVICE_ID) },
235 { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_VF_PCI_DEVICE_ID) },
236 { 0, }
237};
238MODULE_DEVICE_TABLE(pci, sec_dev_ids);
239
240static u8 sec_get_endian(struct hisi_qm *qm)
241{
242 u32 reg;
243
244
245
246
247
248 if (qm->pdev->is_virtfn) {
249 dev_err_ratelimited(&qm->pdev->dev,
250 "cannot access a register in VF!\n");
251 return SEC_LE;
252 }
253 reg = readl_relaxed(qm->io_base + SEC_ENGINE_PF_CFG_OFF +
254 SEC_ACC_COMMON_REG_OFF + SEC_CONTROL_REG);
255
256
257 if (!(reg & BIT(0)))
258 return SEC_LE;
259
260
261 else if (!(reg & BIT(1)))
262 return SEC_32BE;
263
264
265 else
266 return SEC_64BE;
267}
268
269static int sec_engine_init(struct hisi_qm *qm)
270{
271 int ret;
272 u32 reg;
273
274
275 reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG));
276 reg &= SEC_CLK_GATE_DISABLE;
277 writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG));
278
279 writel_relaxed(0x1, SEC_ADDR(qm, SEC_MEM_START_INIT_REG));
280
281 ret = readl_relaxed_poll_timeout(SEC_ADDR(qm, SEC_MEM_INIT_DONE_REG),
282 reg, reg & 0x1, SEC_DELAY_10_US,
283 SEC_POLL_TIMEOUT_US);
284 if (ret) {
285 pci_err(qm->pdev, "fail to init sec mem\n");
286 return ret;
287 }
288
289 reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG));
290 reg |= (0x1 << SEC_TRNG_EN_SHIFT);
291 writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG));
292
293 reg = readl_relaxed(SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL0_REG));
294 reg |= SEC_USER0_SMMU_NORMAL;
295 writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL0_REG));
296
297 reg = readl_relaxed(SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG));
298 reg |= SEC_USER1_SMMU_NORMAL;
299 writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG));
300
301 writel(SEC_SINGLE_PORT_MAX_TRANS,
302 qm->io_base + AM_CFG_SINGLE_PORT_MAX_TRANS);
303
304 writel(SEC_SAA_ENABLE, SEC_ADDR(qm, SEC_SAA_EN_REG));
305
306
307 writel_relaxed(SEC_BD_ERR_CHK_EN0,
308 SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG0));
309
310 writel_relaxed(SEC_BD_ERR_CHK_EN1,
311 SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG1));
312 writel_relaxed(SEC_BD_ERR_CHK_EN3,
313 SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG3));
314
315
316 reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG));
317 reg |= sec_get_endian(qm);
318 writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG));
319
320 return 0;
321}
322
323static int sec_set_user_domain_and_cache(struct hisi_qm *qm)
324{
325
326 writel(AXUSER_BASE, qm->io_base + QM_ARUSER_M_CFG_1);
327 writel(ARUSER_M_CFG_ENABLE, qm->io_base + QM_ARUSER_M_CFG_ENABLE);
328 writel(AXUSER_BASE, qm->io_base + QM_AWUSER_M_CFG_1);
329 writel(AWUSER_M_CFG_ENABLE, qm->io_base + QM_AWUSER_M_CFG_ENABLE);
330 writel(WUSER_M_CFG_ENABLE, qm->io_base + QM_WUSER_M_CFG_ENABLE);
331
332
333 writel(AXI_M_CFG, qm->io_base + QM_AXI_M_CFG);
334 writel(AXI_M_CFG_ENABLE, qm->io_base + QM_AXI_M_CFG_ENABLE);
335
336
337 writel(PEH_AXUSER_CFG, qm->io_base + QM_PEH_AXUSER_CFG);
338 writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + QM_PEH_AXUSER_CFG_ENABLE);
339
340
341 writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE |
342 CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) |
343 FIELD_PREP(CQC_CACHE_WB_THRD, 1), qm->io_base + QM_CACHE_CTL);
344
345 return sec_engine_init(qm);
346}
347
348
349static void sec_debug_regs_clear(struct hisi_qm *qm)
350{
351 int i;
352
353
354 writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
355 writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
356
357
358 writel(0x1, qm->io_base + SEC_CTRL_CNT_CLR_CE);
359 for (i = 0; i < ARRAY_SIZE(sec_dfx_regs); i++)
360 readl(qm->io_base + sec_dfx_regs[i].offset);
361
362
363 writel(0x0, qm->io_base + SEC_CTRL_CNT_CLR_CE);
364
365 hisi_qm_debug_regs_clear(qm);
366}
367
368static void sec_hw_error_enable(struct hisi_qm *qm)
369{
370 u32 val;
371
372 if (qm->ver == QM_HW_V1) {
373 writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK);
374 pci_info(qm->pdev, "V1 not support hw error handle\n");
375 return;
376 }
377
378 val = readl(SEC_ADDR(qm, SEC_CONTROL_REG));
379
380
381 writel(SEC_CORE_INT_CLEAR, qm->io_base + SEC_CORE_INT_SOURCE);
382
383
384 writel(SEC_CORE_INT_ENABLE, qm->io_base + SEC_CORE_INT_MASK);
385
386
387 writel(SEC_RAS_CE_ENB_MSK, qm->io_base + SEC_RAS_CE_REG);
388 writel(SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_RAS_FE_REG);
389 writel(SEC_RAS_NFE_ENB_MSK, qm->io_base + SEC_RAS_NFE_REG);
390
391
392 val = val | SEC_AXI_SHUTDOWN_ENABLE;
393
394 writel(val, SEC_ADDR(qm, SEC_CONTROL_REG));
395}
396
397static void sec_hw_error_disable(struct hisi_qm *qm)
398{
399 u32 val;
400
401 val = readl(SEC_ADDR(qm, SEC_CONTROL_REG));
402
403
404 writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_CE_REG);
405 writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_FE_REG);
406 writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_NFE_REG);
407
408
409 writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK);
410
411
412 val = val & SEC_AXI_SHUTDOWN_DISABLE;
413
414 writel(val, SEC_ADDR(qm, SEC_CONTROL_REG));
415}
416
417static u32 sec_current_qm_read(struct sec_debug_file *file)
418{
419 struct hisi_qm *qm = file->qm;
420
421 return readl(qm->io_base + QM_DFX_MB_CNT_VF);
422}
423
424static int sec_current_qm_write(struct sec_debug_file *file, u32 val)
425{
426 struct hisi_qm *qm = file->qm;
427 u32 vfq_num;
428 u32 tmp;
429
430 if (val > qm->vfs_num)
431 return -EINVAL;
432
433
434 if (!val) {
435 qm->debug.curr_qm_qp_num = qm->qp_num;
436 } else {
437 vfq_num = (qm->ctrl_qp_num - qm->qp_num) / qm->vfs_num;
438
439 if (val == qm->vfs_num)
440 qm->debug.curr_qm_qp_num =
441 qm->ctrl_qp_num - qm->qp_num -
442 (qm->vfs_num - 1) * vfq_num;
443 else
444 qm->debug.curr_qm_qp_num = vfq_num;
445 }
446
447 writel(val, qm->io_base + QM_DFX_MB_CNT_VF);
448 writel(val, qm->io_base + QM_DFX_DB_CNT_VF);
449
450 tmp = val |
451 (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK);
452 writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
453
454 tmp = val |
455 (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK);
456 writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
457
458 return 0;
459}
460
461static u32 sec_clear_enable_read(struct sec_debug_file *file)
462{
463 struct hisi_qm *qm = file->qm;
464
465 return readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) &
466 SEC_CTRL_CNT_CLR_CE_BIT;
467}
468
469static int sec_clear_enable_write(struct sec_debug_file *file, u32 val)
470{
471 struct hisi_qm *qm = file->qm;
472 u32 tmp;
473
474 if (val != 1 && val)
475 return -EINVAL;
476
477 tmp = (readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) &
478 ~SEC_CTRL_CNT_CLR_CE_BIT) | val;
479 writel(tmp, qm->io_base + SEC_CTRL_CNT_CLR_CE);
480
481 return 0;
482}
483
484static ssize_t sec_debug_read(struct file *filp, char __user *buf,
485 size_t count, loff_t *pos)
486{
487 struct sec_debug_file *file = filp->private_data;
488 char tbuf[SEC_DBGFS_VAL_MAX_LEN];
489 u32 val;
490 int ret;
491
492 spin_lock_irq(&file->lock);
493
494 switch (file->index) {
495 case SEC_CURRENT_QM:
496 val = sec_current_qm_read(file);
497 break;
498 case SEC_CLEAR_ENABLE:
499 val = sec_clear_enable_read(file);
500 break;
501 default:
502 spin_unlock_irq(&file->lock);
503 return -EINVAL;
504 }
505
506 spin_unlock_irq(&file->lock);
507 ret = snprintf(tbuf, SEC_DBGFS_VAL_MAX_LEN, "%u\n", val);
508
509 return simple_read_from_buffer(buf, count, pos, tbuf, ret);
510}
511
512static ssize_t sec_debug_write(struct file *filp, const char __user *buf,
513 size_t count, loff_t *pos)
514{
515 struct sec_debug_file *file = filp->private_data;
516 char tbuf[SEC_DBGFS_VAL_MAX_LEN];
517 unsigned long val;
518 int len, ret;
519
520 if (*pos != 0)
521 return 0;
522
523 if (count >= SEC_DBGFS_VAL_MAX_LEN)
524 return -ENOSPC;
525
526 len = simple_write_to_buffer(tbuf, SEC_DBGFS_VAL_MAX_LEN - 1,
527 pos, buf, count);
528 if (len < 0)
529 return len;
530
531 tbuf[len] = '\0';
532 if (kstrtoul(tbuf, 0, &val))
533 return -EFAULT;
534
535 spin_lock_irq(&file->lock);
536
537 switch (file->index) {
538 case SEC_CURRENT_QM:
539 ret = sec_current_qm_write(file, val);
540 if (ret)
541 goto err_input;
542 break;
543 case SEC_CLEAR_ENABLE:
544 ret = sec_clear_enable_write(file, val);
545 if (ret)
546 goto err_input;
547 break;
548 default:
549 ret = -EINVAL;
550 goto err_input;
551 }
552
553 spin_unlock_irq(&file->lock);
554
555 return count;
556
557 err_input:
558 spin_unlock_irq(&file->lock);
559 return ret;
560}
561
562static const struct file_operations sec_dbg_fops = {
563 .owner = THIS_MODULE,
564 .open = simple_open,
565 .read = sec_debug_read,
566 .write = sec_debug_write,
567};
568
569static int sec_debugfs_atomic64_get(void *data, u64 *val)
570{
571 *val = atomic64_read((atomic64_t *)data);
572
573 return 0;
574}
575
576static int sec_debugfs_atomic64_set(void *data, u64 val)
577{
578 if (val)
579 return -EINVAL;
580
581 atomic64_set((atomic64_t *)data, 0);
582
583 return 0;
584}
585
586DEFINE_DEBUGFS_ATTRIBUTE(sec_atomic64_ops, sec_debugfs_atomic64_get,
587 sec_debugfs_atomic64_set, "%lld\n");
588
589static int sec_core_debug_init(struct hisi_qm *qm)
590{
591 struct sec_dev *sec = container_of(qm, struct sec_dev, qm);
592 struct device *dev = &qm->pdev->dev;
593 struct sec_dfx *dfx = &sec->debug.dfx;
594 struct debugfs_regset32 *regset;
595 struct dentry *tmp_d;
596 int i;
597
598 tmp_d = debugfs_create_dir("sec_dfx", qm->debug.debug_root);
599
600 regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
601 if (!regset)
602 return -ENOMEM;
603
604 regset->regs = sec_dfx_regs;
605 regset->nregs = ARRAY_SIZE(sec_dfx_regs);
606 regset->base = qm->io_base;
607
608 if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID)
609 debugfs_create_regset32("regs", 0444, tmp_d, regset);
610
611 for (i = 0; i < ARRAY_SIZE(sec_dfx_labels); i++) {
612 atomic64_t *data = (atomic64_t *)((uintptr_t)dfx +
613 sec_dfx_labels[i].offset);
614 debugfs_create_file(sec_dfx_labels[i].name, 0644,
615 tmp_d, data, &sec_atomic64_ops);
616 }
617
618 return 0;
619}
620
621static int sec_debug_init(struct hisi_qm *qm)
622{
623 struct sec_dev *sec = container_of(qm, struct sec_dev, qm);
624 int i;
625
626 if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID) {
627 for (i = SEC_CURRENT_QM; i < SEC_DEBUG_FILE_NUM; i++) {
628 spin_lock_init(&sec->debug.files[i].lock);
629 sec->debug.files[i].index = i;
630 sec->debug.files[i].qm = qm;
631
632 debugfs_create_file(sec_dbg_file_name[i], 0600,
633 qm->debug.debug_root,
634 sec->debug.files + i,
635 &sec_dbg_fops);
636 }
637 }
638
639 return sec_core_debug_init(qm);
640}
641
642static int sec_debugfs_init(struct hisi_qm *qm)
643{
644 struct device *dev = &qm->pdev->dev;
645 int ret;
646
647 qm->debug.debug_root = debugfs_create_dir(dev_name(dev),
648 sec_debugfs_root);
649 qm->debug.sqe_mask_offset = SEC_SQE_MASK_OFFSET;
650 qm->debug.sqe_mask_len = SEC_SQE_MASK_LEN;
651 ret = hisi_qm_debug_init(qm);
652 if (ret)
653 goto failed_to_create;
654
655 ret = sec_debug_init(qm);
656 if (ret)
657 goto failed_to_create;
658
659
660 return 0;
661
662failed_to_create:
663 debugfs_remove_recursive(sec_debugfs_root);
664
665 return ret;
666}
667
668static void sec_debugfs_exit(struct hisi_qm *qm)
669{
670 debugfs_remove_recursive(qm->debug.debug_root);
671}
672
673static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts)
674{
675 const struct sec_hw_error *errs = sec_hw_errors;
676 struct device *dev = &qm->pdev->dev;
677 u32 err_val;
678
679 while (errs->msg) {
680 if (errs->int_msk & err_sts) {
681 dev_err(dev, "%s [error status=0x%x] found\n",
682 errs->msg, errs->int_msk);
683
684 if (SEC_CORE_INT_STATUS_M_ECC & errs->int_msk) {
685 err_val = readl(qm->io_base +
686 SEC_CORE_SRAM_ECC_ERR_INFO);
687 dev_err(dev, "multi ecc sram num=0x%x\n",
688 SEC_ECC_NUM(err_val));
689 }
690 }
691 errs++;
692 }
693}
694
695static u32 sec_get_hw_err_status(struct hisi_qm *qm)
696{
697 return readl(qm->io_base + SEC_CORE_INT_STATUS);
698}
699
700static void sec_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
701{
702 writel(err_sts, qm->io_base + SEC_CORE_INT_SOURCE);
703}
704
705static void sec_open_axi_master_ooo(struct hisi_qm *qm)
706{
707 u32 val;
708
709 val = readl(SEC_ADDR(qm, SEC_CONTROL_REG));
710 writel(val & SEC_AXI_SHUTDOWN_DISABLE, SEC_ADDR(qm, SEC_CONTROL_REG));
711 writel(val | SEC_AXI_SHUTDOWN_ENABLE, SEC_ADDR(qm, SEC_CONTROL_REG));
712}
713
714static const struct hisi_qm_err_ini sec_err_ini = {
715 .hw_init = sec_set_user_domain_and_cache,
716 .hw_err_enable = sec_hw_error_enable,
717 .hw_err_disable = sec_hw_error_disable,
718 .get_dev_hw_err_status = sec_get_hw_err_status,
719 .clear_dev_hw_err_status = sec_clear_hw_err_status,
720 .log_dev_hw_err = sec_log_hw_error,
721 .open_axi_master_ooo = sec_open_axi_master_ooo,
722 .err_info = {
723 .ce = QM_BASE_CE,
724 .nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT |
725 QM_ACC_WB_NOT_READY_TIMEOUT,
726 .fe = 0,
727 .ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC,
728 .msi_wr_port = BIT(0),
729 .acpi_rst = "SRST",
730 }
731};
732
733static int sec_pf_probe_init(struct sec_dev *sec)
734{
735 struct hisi_qm *qm = &sec->qm;
736 int ret;
737
738 if (qm->ver == QM_HW_V1)
739 qm->ctrl_qp_num = SEC_QUEUE_NUM_V1;
740 else
741 qm->ctrl_qp_num = SEC_QUEUE_NUM_V2;
742
743 qm->err_ini = &sec_err_ini;
744
745 ret = sec_set_user_domain_and_cache(qm);
746 if (ret)
747 return ret;
748
749 hisi_qm_dev_err_init(qm);
750 sec_debug_regs_clear(qm);
751
752 return 0;
753}
754
755static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
756{
757 int ret;
758
759 qm->pdev = pdev;
760 qm->ver = pdev->revision;
761 qm->sqe_size = SEC_SQE_SIZE;
762 qm->dev_name = sec_name;
763
764 qm->fun_type = (pdev->device == SEC_PF_PCI_DEVICE_ID) ?
765 QM_HW_PF : QM_HW_VF;
766 if (qm->fun_type == QM_HW_PF) {
767 qm->qp_base = SEC_PF_DEF_Q_BASE;
768 qm->qp_num = pf_q_num;
769 qm->debug.curr_qm_qp_num = pf_q_num;
770 qm->qm_list = &sec_devices;
771 } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) {
772
773
774
775
776
777
778 qm->qp_base = SEC_PF_DEF_Q_NUM;
779 qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM;
780 }
781
782
783
784
785
786
787
788 qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_MEM_RECLAIM |
789 WQ_UNBOUND, num_online_cpus(),
790 pci_name(qm->pdev));
791 if (!qm->wq) {
792 pci_err(qm->pdev, "fail to alloc workqueue\n");
793 return -ENOMEM;
794 }
795
796 ret = hisi_qm_init(qm);
797 if (ret)
798 destroy_workqueue(qm->wq);
799
800 return ret;
801}
802
803static void sec_qm_uninit(struct hisi_qm *qm)
804{
805 hisi_qm_uninit(qm);
806}
807
808static int sec_probe_init(struct sec_dev *sec)
809{
810 struct hisi_qm *qm = &sec->qm;
811 int ret;
812
813 if (qm->fun_type == QM_HW_PF) {
814 ret = sec_pf_probe_init(sec);
815 if (ret)
816 return ret;
817 }
818
819 return 0;
820}
821
822static void sec_probe_uninit(struct hisi_qm *qm)
823{
824 hisi_qm_dev_err_uninit(qm);
825
826 destroy_workqueue(qm->wq);
827}
828
829static void sec_iommu_used_check(struct sec_dev *sec)
830{
831 struct iommu_domain *domain;
832 struct device *dev = &sec->qm.pdev->dev;
833
834 domain = iommu_get_domain_for_dev(dev);
835
836
837 sec->iommu_used = false;
838 if (domain) {
839 if (domain->type & __IOMMU_DOMAIN_PAGING)
840 sec->iommu_used = true;
841 dev_info(dev, "SMMU Opened, the iommu type = %u\n",
842 domain->type);
843 }
844}
845
846static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
847{
848 struct sec_dev *sec;
849 struct hisi_qm *qm;
850 int ret;
851
852 sec = devm_kzalloc(&pdev->dev, sizeof(*sec), GFP_KERNEL);
853 if (!sec)
854 return -ENOMEM;
855
856 qm = &sec->qm;
857 ret = sec_qm_init(qm, pdev);
858 if (ret) {
859 pci_err(pdev, "Failed to init SEC QM (%d)!\n", ret);
860 return ret;
861 }
862
863 sec->ctx_q_num = ctx_q_num;
864 sec_iommu_used_check(sec);
865
866 ret = sec_probe_init(sec);
867 if (ret) {
868 pci_err(pdev, "Failed to probe!\n");
869 goto err_qm_uninit;
870 }
871
872 ret = hisi_qm_start(qm);
873 if (ret) {
874 pci_err(pdev, "Failed to start sec qm!\n");
875 goto err_probe_uninit;
876 }
877
878 ret = sec_debugfs_init(qm);
879 if (ret)
880 pci_warn(pdev, "Failed to init debugfs!\n");
881
882 hisi_qm_add_to_list(qm, &sec_devices);
883
884 ret = sec_register_to_crypto();
885 if (ret < 0) {
886 pr_err("Failed to register driver to crypto.\n");
887 goto err_remove_from_list;
888 }
889
890 if (qm->fun_type == QM_HW_PF && vfs_num) {
891 ret = hisi_qm_sriov_enable(pdev, vfs_num);
892 if (ret < 0)
893 goto err_crypto_unregister;
894 }
895
896 return 0;
897
898err_crypto_unregister:
899 sec_unregister_from_crypto();
900
901err_remove_from_list:
902 hisi_qm_del_from_list(qm, &sec_devices);
903 sec_debugfs_exit(qm);
904 hisi_qm_stop(qm);
905
906err_probe_uninit:
907 sec_probe_uninit(qm);
908
909err_qm_uninit:
910 sec_qm_uninit(qm);
911
912 return ret;
913}
914
915static void sec_remove(struct pci_dev *pdev)
916{
917 struct sec_dev *sec = pci_get_drvdata(pdev);
918 struct hisi_qm *qm = &sec->qm;
919
920 sec_unregister_from_crypto();
921
922 hisi_qm_del_from_list(qm, &sec_devices);
923
924 if (qm->fun_type == QM_HW_PF && qm->vfs_num)
925 hisi_qm_sriov_disable(pdev);
926
927 sec_debugfs_exit(qm);
928
929 (void)hisi_qm_stop(qm);
930
931 if (qm->fun_type == QM_HW_PF)
932 sec_debug_regs_clear(qm);
933
934 sec_probe_uninit(qm);
935
936 sec_qm_uninit(qm);
937}
938
939static const struct pci_error_handlers sec_err_handler = {
940 .error_detected = hisi_qm_dev_err_detected,
941 .slot_reset = hisi_qm_dev_slot_reset,
942 .reset_prepare = hisi_qm_reset_prepare,
943 .reset_done = hisi_qm_reset_done,
944};
945
946static struct pci_driver sec_pci_driver = {
947 .name = "hisi_sec2",
948 .id_table = sec_dev_ids,
949 .probe = sec_probe,
950 .remove = sec_remove,
951 .err_handler = &sec_err_handler,
952 .sriov_configure = hisi_qm_sriov_configure,
953};
954
955static void sec_register_debugfs(void)
956{
957 if (!debugfs_initialized())
958 return;
959
960 sec_debugfs_root = debugfs_create_dir("hisi_sec2", NULL);
961}
962
963static void sec_unregister_debugfs(void)
964{
965 debugfs_remove_recursive(sec_debugfs_root);
966}
967
968static int __init sec_init(void)
969{
970 int ret;
971
972 hisi_qm_init_list(&sec_devices);
973 sec_register_debugfs();
974
975 ret = pci_register_driver(&sec_pci_driver);
976 if (ret < 0) {
977 sec_unregister_debugfs();
978 pr_err("Failed to register pci driver.\n");
979 return ret;
980 }
981
982 return 0;
983}
984
985static void __exit sec_exit(void)
986{
987 pci_unregister_driver(&sec_pci_driver);
988 sec_unregister_debugfs();
989}
990
991module_init(sec_init);
992module_exit(sec_exit);
993
994MODULE_LICENSE("GPL v2");
995MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com>");
996MODULE_AUTHOR("Longfang Liu <liulongfang@huawei.com>");
997MODULE_AUTHOR("Kai Ye <yekai13@huawei.com>");
998MODULE_AUTHOR("Wei Zhang <zhangwei375@huawei.com>");
999MODULE_DESCRIPTION("Driver for HiSilicon SEC accelerator");
1000