1
2
3#include <asm/page.h>
4#include <linux/acpi.h>
5#include <linux/aer.h>
6#include <linux/bitmap.h>
7#include <linux/debugfs.h>
8#include <linux/dma-mapping.h>
9#include <linux/idr.h>
10#include <linux/io.h>
11#include <linux/irqreturn.h>
12#include <linux/log2.h>
13#include <linux/seq_file.h>
14#include <linux/slab.h>
15#include <linux/uacce.h>
16#include <linux/uaccess.h>
17#include <uapi/misc/uacce/hisi_qm.h>
18#include "qm.h"
19
20
21#define QM_VF_AEQ_INT_SOURCE 0x0
22#define QM_VF_AEQ_INT_MASK 0x4
23#define QM_VF_EQ_INT_SOURCE 0x8
24#define QM_VF_EQ_INT_MASK 0xc
25#define QM_IRQ_NUM_V1 1
26#define QM_IRQ_NUM_PF_V2 4
27#define QM_IRQ_NUM_VF_V2 2
28
29#define QM_EQ_EVENT_IRQ_VECTOR 0
30#define QM_AEQ_EVENT_IRQ_VECTOR 1
31#define QM_ABNORMAL_EVENT_IRQ_VECTOR 3
32
33
34#define QM_MB_CMD_SQC 0x0
35#define QM_MB_CMD_CQC 0x1
36#define QM_MB_CMD_EQC 0x2
37#define QM_MB_CMD_AEQC 0x3
38#define QM_MB_CMD_SQC_BT 0x4
39#define QM_MB_CMD_CQC_BT 0x5
40#define QM_MB_CMD_SQC_VFT_V2 0x6
41
42#define QM_MB_CMD_SEND_BASE 0x300
43#define QM_MB_EVENT_SHIFT 8
44#define QM_MB_BUSY_SHIFT 13
45#define QM_MB_OP_SHIFT 14
46#define QM_MB_CMD_DATA_ADDR_L 0x304
47#define QM_MB_CMD_DATA_ADDR_H 0x308
48
49
50#define QM_SQ_HOP_NUM_SHIFT 0
51#define QM_SQ_PAGE_SIZE_SHIFT 4
52#define QM_SQ_BUF_SIZE_SHIFT 8
53#define QM_SQ_SQE_SIZE_SHIFT 12
54#define QM_SQ_PRIORITY_SHIFT 0
55#define QM_SQ_ORDERS_SHIFT 4
56#define QM_SQ_TYPE_SHIFT 8
57
58#define QM_SQ_TYPE_MASK GENMASK(3, 0)
59#define QM_SQ_TAIL_IDX(sqc) ((le16_to_cpu((sqc)->w11) >> 6) & 0x1)
60
61
62#define QM_CQ_HOP_NUM_SHIFT 0
63#define QM_CQ_PAGE_SIZE_SHIFT 4
64#define QM_CQ_BUF_SIZE_SHIFT 8
65#define QM_CQ_CQE_SIZE_SHIFT 12
66#define QM_CQ_PHASE_SHIFT 0
67#define QM_CQ_FLAG_SHIFT 1
68
69#define QM_CQE_PHASE(cqe) (le16_to_cpu((cqe)->w7) & 0x1)
70#define QM_QC_CQE_SIZE 4
71#define QM_CQ_TAIL_IDX(cqc) ((le16_to_cpu((cqc)->w11) >> 6) & 0x1)
72
73
74#define QM_EQE_AEQE_SIZE (2UL << 12)
75#define QM_EQC_PHASE_SHIFT 16
76
77#define QM_EQE_PHASE(eqe) ((le32_to_cpu((eqe)->dw0) >> 16) & 0x1)
78#define QM_EQE_CQN_MASK GENMASK(15, 0)
79
80#define QM_AEQE_PHASE(aeqe) ((le32_to_cpu((aeqe)->dw0) >> 16) & 0x1)
81#define QM_AEQE_TYPE_SHIFT 17
82
83#define QM_DOORBELL_CMD_SQ 0
84#define QM_DOORBELL_CMD_CQ 1
85#define QM_DOORBELL_CMD_EQ 2
86#define QM_DOORBELL_CMD_AEQ 3
87
88#define QM_DOORBELL_BASE_V1 0x340
89#define QM_DB_CMD_SHIFT_V1 16
90#define QM_DB_INDEX_SHIFT_V1 32
91#define QM_DB_PRIORITY_SHIFT_V1 48
92#define QM_DOORBELL_SQ_CQ_BASE_V2 0x1000
93#define QM_DOORBELL_EQ_AEQ_BASE_V2 0x2000
94#define QM_DB_CMD_SHIFT_V2 12
95#define QM_DB_RAND_SHIFT_V2 16
96#define QM_DB_INDEX_SHIFT_V2 32
97#define QM_DB_PRIORITY_SHIFT_V2 48
98
99#define QM_MEM_START_INIT 0x100040
100#define QM_MEM_INIT_DONE 0x100044
101#define QM_VFT_CFG_RDY 0x10006c
102#define QM_VFT_CFG_OP_WR 0x100058
103#define QM_VFT_CFG_TYPE 0x10005c
104#define QM_SQC_VFT 0x0
105#define QM_CQC_VFT 0x1
106#define QM_VFT_CFG 0x100060
107#define QM_VFT_CFG_OP_ENABLE 0x100054
108
109#define QM_VFT_CFG_DATA_L 0x100064
110#define QM_VFT_CFG_DATA_H 0x100068
111#define QM_SQC_VFT_BUF_SIZE (7ULL << 8)
112#define QM_SQC_VFT_SQC_SIZE (5ULL << 12)
113#define QM_SQC_VFT_INDEX_NUMBER (1ULL << 16)
114#define QM_SQC_VFT_START_SQN_SHIFT 28
115#define QM_SQC_VFT_VALID (1ULL << 44)
116#define QM_SQC_VFT_SQN_SHIFT 45
117#define QM_CQC_VFT_BUF_SIZE (7ULL << 8)
118#define QM_CQC_VFT_SQC_SIZE (5ULL << 12)
119#define QM_CQC_VFT_INDEX_NUMBER (1ULL << 16)
120#define QM_CQC_VFT_VALID (1ULL << 28)
121
122#define QM_SQC_VFT_BASE_SHIFT_V2 28
123#define QM_SQC_VFT_BASE_MASK_V2 GENMASK(5, 0)
124#define QM_SQC_VFT_NUM_SHIFT_V2 45
125#define QM_SQC_VFT_NUM_MASK_v2 GENMASK(9, 0)
126
127#define QM_DFX_CNT_CLR_CE 0x100118
128
129#define QM_ABNORMAL_INT_SOURCE 0x100000
130#define QM_ABNORMAL_INT_SOURCE_CLR GENMASK(12, 0)
131#define QM_ABNORMAL_INT_MASK 0x100004
132#define QM_ABNORMAL_INT_MASK_VALUE 0x1fff
133#define QM_ABNORMAL_INT_STATUS 0x100008
134#define QM_ABNORMAL_INT_SET 0x10000c
135#define QM_ABNORMAL_INF00 0x100010
136#define QM_FIFO_OVERFLOW_TYPE 0xc0
137#define QM_FIFO_OVERFLOW_TYPE_SHIFT 6
138#define QM_FIFO_OVERFLOW_VF 0x3f
139#define QM_ABNORMAL_INF01 0x100014
140#define QM_DB_TIMEOUT_TYPE 0xc0
141#define QM_DB_TIMEOUT_TYPE_SHIFT 6
142#define QM_DB_TIMEOUT_VF 0x3f
143#define QM_RAS_CE_ENABLE 0x1000ec
144#define QM_RAS_FE_ENABLE 0x1000f0
145#define QM_RAS_NFE_ENABLE 0x1000f4
146#define QM_RAS_CE_THRESHOLD 0x1000f8
147#define QM_RAS_CE_TIMES_PER_IRQ 1
148#define QM_RAS_MSI_INT_SEL 0x1040f4
149
150#define QM_DEV_RESET_FLAG 0
151#define QM_RESET_WAIT_TIMEOUT 400
152#define QM_PEH_VENDOR_ID 0x1000d8
153#define ACC_VENDOR_ID_VALUE 0x5a5a
154#define QM_PEH_DFX_INFO0 0x1000fc
155#define ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT 3
156#define ACC_PEH_MSI_DISABLE GENMASK(31, 0)
157#define ACC_MASTER_GLOBAL_CTRL_SHUTDOWN 0x1
158#define ACC_MASTER_TRANS_RETURN_RW 3
159#define ACC_MASTER_TRANS_RETURN 0x300150
160#define ACC_MASTER_GLOBAL_CTRL 0x300000
161#define ACC_AM_CFG_PORT_WR_EN 0x30001c
162#define QM_RAS_NFE_MBIT_DISABLE ~QM_ECC_MBIT
163#define ACC_AM_ROB_ECC_INT_STS 0x300104
164#define ACC_ROB_ECC_ERR_MULTPL BIT(1)
165
166#define POLL_PERIOD 10
167#define POLL_TIMEOUT 1000
168#define WAIT_PERIOD_US_MAX 200
169#define WAIT_PERIOD_US_MIN 100
170#define MAX_WAIT_COUNTS 1000
171#define QM_CACHE_WB_START 0x204
172#define QM_CACHE_WB_DONE 0x208
173
174#define PCI_BAR_2 2
175#define QM_SQE_DATA_ALIGN_MASK GENMASK(6, 0)
176#define QMC_ALIGN(sz) ALIGN(sz, 32)
177
178#define QM_DBG_READ_LEN 256
179#define QM_DBG_WRITE_LEN 1024
180#define QM_DBG_TMP_BUF_LEN 22
181#define QM_PCI_COMMAND_INVALID ~0
182
183#define WAIT_PERIOD 20
184#define REMOVE_WAIT_DELAY 10
185#define QM_SQE_ADDR_MASK GENMASK(7, 0)
186#define QM_EQ_DEPTH (1024 * 2)
187
188#define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \
189 (((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \
190 ((pg_sz) << QM_CQ_PAGE_SIZE_SHIFT) | \
191 ((buf_sz) << QM_CQ_BUF_SIZE_SHIFT) | \
192 ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT))
193
194#define QM_MK_CQC_DW3_V2(cqe_sz) \
195 ((QM_Q_DEPTH - 1) | ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT))
196
197#define QM_MK_SQC_W13(priority, orders, alg_type) \
198 (((priority) << QM_SQ_PRIORITY_SHIFT) | \
199 ((orders) << QM_SQ_ORDERS_SHIFT) | \
200 (((alg_type) & QM_SQ_TYPE_MASK) << QM_SQ_TYPE_SHIFT))
201
202#define QM_MK_SQC_DW3_V1(hop_num, pg_sz, buf_sz, sqe_sz) \
203 (((hop_num) << QM_SQ_HOP_NUM_SHIFT) | \
204 ((pg_sz) << QM_SQ_PAGE_SIZE_SHIFT) | \
205 ((buf_sz) << QM_SQ_BUF_SIZE_SHIFT) | \
206 ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
207
208#define QM_MK_SQC_DW3_V2(sqe_sz) \
209 ((QM_Q_DEPTH - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
210
211#define INIT_QC_COMMON(qc, base, pasid) do { \
212 (qc)->head = 0; \
213 (qc)->tail = 0; \
214 (qc)->base_l = cpu_to_le32(lower_32_bits(base)); \
215 (qc)->base_h = cpu_to_le32(upper_32_bits(base)); \
216 (qc)->dw3 = 0; \
217 (qc)->w8 = 0; \
218 (qc)->rsvd0 = 0; \
219 (qc)->pasid = cpu_to_le16(pasid); \
220 (qc)->w11 = 0; \
221 (qc)->rsvd1 = 0; \
222} while (0)
223
224enum vft_type {
225 SQC_VFT = 0,
226 CQC_VFT,
227};
228
229enum acc_err_result {
230 ACC_ERR_NONE,
231 ACC_ERR_NEED_RESET,
232 ACC_ERR_RECOVERED,
233};
234
235struct qm_cqe {
236 __le32 rsvd0;
237 __le16 cmd_id;
238 __le16 rsvd1;
239 __le16 sq_head;
240 __le16 sq_num;
241 __le16 rsvd2;
242 __le16 w7;
243};
244
245struct qm_eqe {
246 __le32 dw0;
247};
248
249struct qm_aeqe {
250 __le32 dw0;
251};
252
253struct qm_sqc {
254 __le16 head;
255 __le16 tail;
256 __le32 base_l;
257 __le32 base_h;
258 __le32 dw3;
259 __le16 w8;
260 __le16 rsvd0;
261 __le16 pasid;
262 __le16 w11;
263 __le16 cq_num;
264 __le16 w13;
265 __le32 rsvd1;
266};
267
268struct qm_cqc {
269 __le16 head;
270 __le16 tail;
271 __le32 base_l;
272 __le32 base_h;
273 __le32 dw3;
274 __le16 w8;
275 __le16 rsvd0;
276 __le16 pasid;
277 __le16 w11;
278 __le32 dw6;
279 __le32 rsvd1;
280};
281
282struct qm_eqc {
283 __le16 head;
284 __le16 tail;
285 __le32 base_l;
286 __le32 base_h;
287 __le32 dw3;
288 __le32 rsvd[2];
289 __le32 dw6;
290};
291
292struct qm_aeqc {
293 __le16 head;
294 __le16 tail;
295 __le32 base_l;
296 __le32 base_h;
297 __le32 dw3;
298 __le32 rsvd[2];
299 __le32 dw6;
300};
301
302struct qm_mailbox {
303 __le16 w0;
304 __le16 queue_num;
305 __le32 base_l;
306 __le32 base_h;
307 __le32 rsvd;
308};
309
310struct qm_doorbell {
311 __le16 queue_num;
312 __le16 cmd;
313 __le16 index;
314 __le16 priority;
315};
316
317struct hisi_qm_resource {
318 struct hisi_qm *qm;
319 int distance;
320 struct list_head list;
321};
322
323struct hisi_qm_hw_ops {
324 int (*get_vft)(struct hisi_qm *qm, u32 *base, u32 *number);
325 void (*qm_db)(struct hisi_qm *qm, u16 qn,
326 u8 cmd, u16 index, u8 priority);
327 u32 (*get_irq_num)(struct hisi_qm *qm);
328 int (*debug_init)(struct hisi_qm *qm);
329 void (*hw_error_init)(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe);
330 void (*hw_error_uninit)(struct hisi_qm *qm);
331 enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm);
332};
333
334struct qm_dfx_item {
335 const char *name;
336 u32 offset;
337};
338
339static struct qm_dfx_item qm_dfx_files[] = {
340 {"err_irq", offsetof(struct qm_dfx, err_irq_cnt)},
341 {"aeq_irq", offsetof(struct qm_dfx, aeq_irq_cnt)},
342 {"abnormal_irq", offsetof(struct qm_dfx, abnormal_irq_cnt)},
343 {"create_qp_err", offsetof(struct qm_dfx, create_qp_err_cnt)},
344 {"mb_err", offsetof(struct qm_dfx, mb_err_cnt)},
345};
346
347static const char * const qm_debug_file_name[] = {
348 [CURRENT_Q] = "current_q",
349 [CLEAR_ENABLE] = "clear_enable",
350};
351
352struct hisi_qm_hw_error {
353 u32 int_msk;
354 const char *msg;
355};
356
357static const struct hisi_qm_hw_error qm_hw_error[] = {
358 { .int_msk = BIT(0), .msg = "qm_axi_rresp" },
359 { .int_msk = BIT(1), .msg = "qm_axi_bresp" },
360 { .int_msk = BIT(2), .msg = "qm_ecc_mbit" },
361 { .int_msk = BIT(3), .msg = "qm_ecc_1bit" },
362 { .int_msk = BIT(4), .msg = "qm_acc_get_task_timeout" },
363 { .int_msk = BIT(5), .msg = "qm_acc_do_task_timeout" },
364 { .int_msk = BIT(6), .msg = "qm_acc_wb_not_ready_timeout" },
365 { .int_msk = BIT(7), .msg = "qm_sq_cq_vf_invalid" },
366 { .int_msk = BIT(8), .msg = "qm_cq_vf_invalid" },
367 { .int_msk = BIT(9), .msg = "qm_sq_vf_invalid" },
368 { .int_msk = BIT(10), .msg = "qm_db_timeout" },
369 { .int_msk = BIT(11), .msg = "qm_of_fifo_of" },
370 { .int_msk = BIT(12), .msg = "qm_db_random_invalid" },
371 { }
372};
373
374static const char * const qm_db_timeout[] = {
375 "sq", "cq", "eq", "aeq",
376};
377
378static const char * const qm_fifo_overflow[] = {
379 "cq", "eq", "aeq",
380};
381
382static const char * const qm_s[] = {
383 "init", "start", "close", "stop",
384};
385
386static const char * const qp_s[] = {
387 "none", "init", "start", "stop", "close",
388};
389
390static bool qm_avail_state(struct hisi_qm *qm, enum qm_state new)
391{
392 enum qm_state curr = atomic_read(&qm->status.flags);
393 bool avail = false;
394
395 switch (curr) {
396 case QM_INIT:
397 if (new == QM_START || new == QM_CLOSE)
398 avail = true;
399 break;
400 case QM_START:
401 if (new == QM_STOP)
402 avail = true;
403 break;
404 case QM_STOP:
405 if (new == QM_CLOSE || new == QM_START)
406 avail = true;
407 break;
408 default:
409 break;
410 }
411
412 dev_dbg(&qm->pdev->dev, "change qm state from %s to %s\n",
413 qm_s[curr], qm_s[new]);
414
415 if (!avail)
416 dev_warn(&qm->pdev->dev, "Can not change qm state from %s to %s\n",
417 qm_s[curr], qm_s[new]);
418
419 return avail;
420}
421
422static bool qm_qp_avail_state(struct hisi_qm *qm, struct hisi_qp *qp,
423 enum qp_state new)
424{
425 enum qm_state qm_curr = atomic_read(&qm->status.flags);
426 enum qp_state qp_curr = 0;
427 bool avail = false;
428
429 if (qp)
430 qp_curr = atomic_read(&qp->qp_status.flags);
431
432 switch (new) {
433 case QP_INIT:
434 if (qm_curr == QM_START || qm_curr == QM_INIT)
435 avail = true;
436 break;
437 case QP_START:
438 if ((qm_curr == QM_START && qp_curr == QP_INIT) ||
439 (qm_curr == QM_START && qp_curr == QP_STOP))
440 avail = true;
441 break;
442 case QP_STOP:
443 if ((qm_curr == QM_START && qp_curr == QP_START) ||
444 (qp_curr == QP_INIT))
445 avail = true;
446 break;
447 case QP_CLOSE:
448 if ((qm_curr == QM_START && qp_curr == QP_INIT) ||
449 (qm_curr == QM_START && qp_curr == QP_STOP) ||
450 (qm_curr == QM_STOP && qp_curr == QP_STOP) ||
451 (qm_curr == QM_STOP && qp_curr == QP_INIT))
452 avail = true;
453 break;
454 default:
455 break;
456 }
457
458 dev_dbg(&qm->pdev->dev, "change qp state from %s to %s in QM %s\n",
459 qp_s[qp_curr], qp_s[new], qm_s[qm_curr]);
460
461 if (!avail)
462 dev_warn(&qm->pdev->dev,
463 "Can not change qp state from %s to %s in QM %s\n",
464 qp_s[qp_curr], qp_s[new], qm_s[qm_curr]);
465
466 return avail;
467}
468
469
470static int qm_wait_mb_ready(struct hisi_qm *qm)
471{
472 u32 val;
473
474 return readl_relaxed_poll_timeout(qm->io_base + QM_MB_CMD_SEND_BASE,
475 val, !((val >> QM_MB_BUSY_SHIFT) &
476 0x1), 10, 1000);
477}
478
479
480static void qm_mb_write(struct hisi_qm *qm, const void *src)
481{
482 void __iomem *fun_base = qm->io_base + QM_MB_CMD_SEND_BASE;
483 unsigned long tmp0 = 0, tmp1 = 0;
484
485 if (!IS_ENABLED(CONFIG_ARM64)) {
486 memcpy_toio(fun_base, src, 16);
487 wmb();
488 return;
489 }
490
491 asm volatile("ldp %0, %1, %3\n"
492 "stp %0, %1, %2\n"
493 "dsb sy\n"
494 : "=&r" (tmp0),
495 "=&r" (tmp1),
496 "+Q" (*((char __iomem *)fun_base))
497 : "Q" (*((char *)src))
498 : "memory");
499}
500
501static int qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
502 bool op)
503{
504 struct qm_mailbox mailbox;
505 int ret = 0;
506
507 dev_dbg(&qm->pdev->dev, "QM mailbox request to q%u: %u-%llx\n",
508 queue, cmd, (unsigned long long)dma_addr);
509
510 mailbox.w0 = cpu_to_le16(cmd |
511 (op ? 0x1 << QM_MB_OP_SHIFT : 0) |
512 (0x1 << QM_MB_BUSY_SHIFT));
513 mailbox.queue_num = cpu_to_le16(queue);
514 mailbox.base_l = cpu_to_le32(lower_32_bits(dma_addr));
515 mailbox.base_h = cpu_to_le32(upper_32_bits(dma_addr));
516 mailbox.rsvd = 0;
517
518 mutex_lock(&qm->mailbox_lock);
519
520 if (unlikely(qm_wait_mb_ready(qm))) {
521 ret = -EBUSY;
522 dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n");
523 goto busy_unlock;
524 }
525
526 qm_mb_write(qm, &mailbox);
527
528 if (unlikely(qm_wait_mb_ready(qm))) {
529 ret = -EBUSY;
530 dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n");
531 goto busy_unlock;
532 }
533
534busy_unlock:
535 mutex_unlock(&qm->mailbox_lock);
536
537 if (ret)
538 atomic64_inc(&qm->debug.dfx.mb_err_cnt);
539 return ret;
540}
541
542static void qm_db_v1(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
543{
544 u64 doorbell;
545
546 doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V1) |
547 ((u64)index << QM_DB_INDEX_SHIFT_V1) |
548 ((u64)priority << QM_DB_PRIORITY_SHIFT_V1);
549
550 writeq(doorbell, qm->io_base + QM_DOORBELL_BASE_V1);
551}
552
553static void qm_db_v2(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
554{
555 u64 doorbell;
556 u64 dbase;
557 u16 randata = 0;
558
559 if (cmd == QM_DOORBELL_CMD_SQ || cmd == QM_DOORBELL_CMD_CQ)
560 dbase = QM_DOORBELL_SQ_CQ_BASE_V2;
561 else
562 dbase = QM_DOORBELL_EQ_AEQ_BASE_V2;
563
564 doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V2) |
565 ((u64)randata << QM_DB_RAND_SHIFT_V2) |
566 ((u64)index << QM_DB_INDEX_SHIFT_V2) |
567 ((u64)priority << QM_DB_PRIORITY_SHIFT_V2);
568
569 writeq(doorbell, qm->io_base + dbase);
570}
571
572static void qm_db(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
573{
574 dev_dbg(&qm->pdev->dev, "QM doorbell request: qn=%u, cmd=%u, index=%u\n",
575 qn, cmd, index);
576
577 qm->ops->qm_db(qm, qn, cmd, index, priority);
578}
579
580static int qm_dev_mem_reset(struct hisi_qm *qm)
581{
582 u32 val;
583
584 writel(0x1, qm->io_base + QM_MEM_START_INIT);
585 return readl_relaxed_poll_timeout(qm->io_base + QM_MEM_INIT_DONE, val,
586 val & BIT(0), 10, 1000);
587}
588
589static u32 qm_get_irq_num_v1(struct hisi_qm *qm)
590{
591 return QM_IRQ_NUM_V1;
592}
593
594static u32 qm_get_irq_num_v2(struct hisi_qm *qm)
595{
596 if (qm->fun_type == QM_HW_PF)
597 return QM_IRQ_NUM_PF_V2;
598 else
599 return QM_IRQ_NUM_VF_V2;
600}
601
602static struct hisi_qp *qm_to_hisi_qp(struct hisi_qm *qm, struct qm_eqe *eqe)
603{
604 u16 cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK;
605
606 return &qm->qp_array[cqn];
607}
608
609static void qm_cq_head_update(struct hisi_qp *qp)
610{
611 if (qp->qp_status.cq_head == QM_Q_DEPTH - 1) {
612 qp->qp_status.cqc_phase = !qp->qp_status.cqc_phase;
613 qp->qp_status.cq_head = 0;
614 } else {
615 qp->qp_status.cq_head++;
616 }
617}
618
619static void qm_poll_qp(struct hisi_qp *qp, struct hisi_qm *qm)
620{
621 if (qp->event_cb) {
622 qp->event_cb(qp);
623 return;
624 }
625
626 if (qp->req_cb) {
627 struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head;
628
629 while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) {
630 dma_rmb();
631 qp->req_cb(qp, qp->sqe + qm->sqe_size *
632 le16_to_cpu(cqe->sq_head));
633 qm_cq_head_update(qp);
634 cqe = qp->cqe + qp->qp_status.cq_head;
635 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ,
636 qp->qp_status.cq_head, 0);
637 atomic_dec(&qp->qp_status.used);
638 }
639
640
641 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ,
642 qp->qp_status.cq_head, 1);
643 }
644}
645
646static void qm_work_process(struct work_struct *work)
647{
648 struct hisi_qm *qm = container_of(work, struct hisi_qm, work);
649 struct qm_eqe *eqe = qm->eqe + qm->status.eq_head;
650 struct hisi_qp *qp;
651 int eqe_num = 0;
652
653 while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) {
654 eqe_num++;
655 qp = qm_to_hisi_qp(qm, eqe);
656 qm_poll_qp(qp, qm);
657
658 if (qm->status.eq_head == QM_EQ_DEPTH - 1) {
659 qm->status.eqc_phase = !qm->status.eqc_phase;
660 eqe = qm->eqe;
661 qm->status.eq_head = 0;
662 } else {
663 eqe++;
664 qm->status.eq_head++;
665 }
666
667 if (eqe_num == QM_EQ_DEPTH / 2 - 1) {
668 eqe_num = 0;
669 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
670 }
671 }
672
673 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
674}
675
676static irqreturn_t do_qm_irq(int irq, void *data)
677{
678 struct hisi_qm *qm = (struct hisi_qm *)data;
679
680
681 if (qm->wq)
682 queue_work(qm->wq, &qm->work);
683 else
684 schedule_work(&qm->work);
685
686 return IRQ_HANDLED;
687}
688
689static irqreturn_t qm_irq(int irq, void *data)
690{
691 struct hisi_qm *qm = data;
692
693 if (readl(qm->io_base + QM_VF_EQ_INT_SOURCE))
694 return do_qm_irq(irq, data);
695
696 atomic64_inc(&qm->debug.dfx.err_irq_cnt);
697 dev_err(&qm->pdev->dev, "invalid int source\n");
698 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
699
700 return IRQ_NONE;
701}
702
703static irqreturn_t qm_aeq_irq(int irq, void *data)
704{
705 struct hisi_qm *qm = data;
706 struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head;
707 u32 type;
708
709 atomic64_inc(&qm->debug.dfx.aeq_irq_cnt);
710 if (!readl(qm->io_base + QM_VF_AEQ_INT_SOURCE))
711 return IRQ_NONE;
712
713 while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) {
714 type = le32_to_cpu(aeqe->dw0) >> QM_AEQE_TYPE_SHIFT;
715 if (type < ARRAY_SIZE(qm_fifo_overflow))
716 dev_err(&qm->pdev->dev, "%s overflow\n",
717 qm_fifo_overflow[type]);
718 else
719 dev_err(&qm->pdev->dev, "unknown error type %d\n",
720 type);
721
722 if (qm->status.aeq_head == QM_Q_DEPTH - 1) {
723 qm->status.aeqc_phase = !qm->status.aeqc_phase;
724 aeqe = qm->aeqe;
725 qm->status.aeq_head = 0;
726 } else {
727 aeqe++;
728 qm->status.aeq_head++;
729 }
730
731 qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0);
732 }
733
734 return IRQ_HANDLED;
735}
736
737static void qm_irq_unregister(struct hisi_qm *qm)
738{
739 struct pci_dev *pdev = qm->pdev;
740
741 free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm);
742
743 if (qm->ver == QM_HW_V1)
744 return;
745
746 free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm);
747
748 if (qm->fun_type == QM_HW_PF)
749 free_irq(pci_irq_vector(pdev,
750 QM_ABNORMAL_EVENT_IRQ_VECTOR), qm);
751}
752
753static void qm_init_qp_status(struct hisi_qp *qp)
754{
755 struct hisi_qp_status *qp_status = &qp->qp_status;
756
757 qp_status->sq_tail = 0;
758 qp_status->cq_head = 0;
759 qp_status->cqc_phase = true;
760 atomic_set(&qp_status->used, 0);
761}
762
763static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base,
764 u32 number)
765{
766 u64 tmp = 0;
767
768 if (number > 0) {
769 switch (type) {
770 case SQC_VFT:
771 if (qm->ver == QM_HW_V1) {
772 tmp = QM_SQC_VFT_BUF_SIZE |
773 QM_SQC_VFT_SQC_SIZE |
774 QM_SQC_VFT_INDEX_NUMBER |
775 QM_SQC_VFT_VALID |
776 (u64)base << QM_SQC_VFT_START_SQN_SHIFT;
777 } else {
778 tmp = (u64)base << QM_SQC_VFT_START_SQN_SHIFT |
779 QM_SQC_VFT_VALID |
780 (u64)(number - 1) << QM_SQC_VFT_SQN_SHIFT;
781 }
782 break;
783 case CQC_VFT:
784 if (qm->ver == QM_HW_V1) {
785 tmp = QM_CQC_VFT_BUF_SIZE |
786 QM_CQC_VFT_SQC_SIZE |
787 QM_CQC_VFT_INDEX_NUMBER |
788 QM_CQC_VFT_VALID;
789 } else {
790 tmp = QM_CQC_VFT_VALID;
791 }
792 break;
793 }
794 }
795
796 writel(lower_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_L);
797 writel(upper_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_H);
798}
799
800static int qm_set_vft_common(struct hisi_qm *qm, enum vft_type type,
801 u32 fun_num, u32 base, u32 number)
802{
803 unsigned int val;
804 int ret;
805
806 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
807 val & BIT(0), 10, 1000);
808 if (ret)
809 return ret;
810
811 writel(0x0, qm->io_base + QM_VFT_CFG_OP_WR);
812 writel(type, qm->io_base + QM_VFT_CFG_TYPE);
813 writel(fun_num, qm->io_base + QM_VFT_CFG);
814
815 qm_vft_data_cfg(qm, type, base, number);
816
817 writel(0x0, qm->io_base + QM_VFT_CFG_RDY);
818 writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE);
819
820 return readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
821 val & BIT(0), 10, 1000);
822}
823
824
825static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
826 u32 number)
827{
828 int ret, i;
829
830 for (i = SQC_VFT; i <= CQC_VFT; i++) {
831 ret = qm_set_vft_common(qm, i, fun_num, base, number);
832 if (ret)
833 return ret;
834 }
835
836 return 0;
837}
838
839static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number)
840{
841 u64 sqc_vft;
842 int ret;
843
844 ret = qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1);
845 if (ret)
846 return ret;
847
848 sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
849 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32);
850 *base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2);
851 *number = (QM_SQC_VFT_NUM_MASK_v2 &
852 (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1;
853
854 return 0;
855}
856
857static struct hisi_qm *file_to_qm(struct debugfs_file *file)
858{
859 struct qm_debug *debug = file->debug;
860
861 return container_of(debug, struct hisi_qm, debug);
862}
863
864static u32 current_q_read(struct debugfs_file *file)
865{
866 struct hisi_qm *qm = file_to_qm(file);
867
868 return readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) >> QM_DFX_QN_SHIFT;
869}
870
871static int current_q_write(struct debugfs_file *file, u32 val)
872{
873 struct hisi_qm *qm = file_to_qm(file);
874 u32 tmp;
875
876 if (val >= qm->debug.curr_qm_qp_num)
877 return -EINVAL;
878
879 tmp = val << QM_DFX_QN_SHIFT |
880 (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_FUN_MASK);
881 writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
882
883 tmp = val << QM_DFX_QN_SHIFT |
884 (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_FUN_MASK);
885 writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
886
887 return 0;
888}
889
890static u32 clear_enable_read(struct debugfs_file *file)
891{
892 struct hisi_qm *qm = file_to_qm(file);
893
894 return readl(qm->io_base + QM_DFX_CNT_CLR_CE);
895}
896
897
898static int clear_enable_write(struct debugfs_file *file, u32 rd_clr_ctrl)
899{
900 struct hisi_qm *qm = file_to_qm(file);
901
902 if (rd_clr_ctrl > 1)
903 return -EINVAL;
904
905 writel(rd_clr_ctrl, qm->io_base + QM_DFX_CNT_CLR_CE);
906
907 return 0;
908}
909
910static ssize_t qm_debug_read(struct file *filp, char __user *buf,
911 size_t count, loff_t *pos)
912{
913 struct debugfs_file *file = filp->private_data;
914 enum qm_debug_file index = file->index;
915 char tbuf[QM_DBG_TMP_BUF_LEN];
916 u32 val;
917 int ret;
918
919 mutex_lock(&file->lock);
920 switch (index) {
921 case CURRENT_Q:
922 val = current_q_read(file);
923 break;
924 case CLEAR_ENABLE:
925 val = clear_enable_read(file);
926 break;
927 default:
928 mutex_unlock(&file->lock);
929 return -EINVAL;
930 }
931 mutex_unlock(&file->lock);
932 ret = sprintf(tbuf, "%u\n", val);
933 return simple_read_from_buffer(buf, count, pos, tbuf, ret);
934}
935
936static ssize_t qm_debug_write(struct file *filp, const char __user *buf,
937 size_t count, loff_t *pos)
938{
939 struct debugfs_file *file = filp->private_data;
940 enum qm_debug_file index = file->index;
941 unsigned long val;
942 char tbuf[QM_DBG_TMP_BUF_LEN];
943 int len, ret;
944
945 if (*pos != 0)
946 return 0;
947
948 if (count >= QM_DBG_TMP_BUF_LEN)
949 return -ENOSPC;
950
951 len = simple_write_to_buffer(tbuf, QM_DBG_TMP_BUF_LEN - 1, pos, buf,
952 count);
953 if (len < 0)
954 return len;
955
956 tbuf[len] = '\0';
957 if (kstrtoul(tbuf, 0, &val))
958 return -EFAULT;
959
960 mutex_lock(&file->lock);
961 switch (index) {
962 case CURRENT_Q:
963 ret = current_q_write(file, val);
964 if (ret)
965 goto err_input;
966 break;
967 case CLEAR_ENABLE:
968 ret = clear_enable_write(file, val);
969 if (ret)
970 goto err_input;
971 break;
972 default:
973 ret = -EINVAL;
974 goto err_input;
975 }
976 mutex_unlock(&file->lock);
977
978 return count;
979
980err_input:
981 mutex_unlock(&file->lock);
982 return ret;
983}
984
985static const struct file_operations qm_debug_fops = {
986 .owner = THIS_MODULE,
987 .open = simple_open,
988 .read = qm_debug_read,
989 .write = qm_debug_write,
990};
991
992struct qm_dfx_registers {
993 char *reg_name;
994 u64 reg_offset;
995};
996
997#define CNT_CYC_REGS_NUM 10
998static struct qm_dfx_registers qm_dfx_regs[] = {
999
1000 {"QM_ECC_1BIT_CNT ", 0x104000ull},
1001 {"QM_ECC_MBIT_CNT ", 0x104008ull},
1002 {"QM_DFX_MB_CNT ", 0x104018ull},
1003 {"QM_DFX_DB_CNT ", 0x104028ull},
1004 {"QM_DFX_SQE_CNT ", 0x104038ull},
1005 {"QM_DFX_CQE_CNT ", 0x104048ull},
1006 {"QM_DFX_SEND_SQE_TO_ACC_CNT ", 0x104050ull},
1007 {"QM_DFX_WB_SQE_FROM_ACC_CNT ", 0x104058ull},
1008 {"QM_DFX_ACC_FINISH_CNT ", 0x104060ull},
1009 {"QM_DFX_CQE_ERR_CNT ", 0x1040b4ull},
1010 {"QM_DFX_FUNS_ACTIVE_ST ", 0x200ull},
1011 {"QM_ECC_1BIT_INF ", 0x104004ull},
1012 {"QM_ECC_MBIT_INF ", 0x10400cull},
1013 {"QM_DFX_ACC_RDY_VLD0 ", 0x1040a0ull},
1014 {"QM_DFX_ACC_RDY_VLD1 ", 0x1040a4ull},
1015 {"QM_DFX_AXI_RDY_VLD ", 0x1040a8ull},
1016 {"QM_DFX_FF_ST0 ", 0x1040c8ull},
1017 {"QM_DFX_FF_ST1 ", 0x1040ccull},
1018 {"QM_DFX_FF_ST2 ", 0x1040d0ull},
1019 {"QM_DFX_FF_ST3 ", 0x1040d4ull},
1020 {"QM_DFX_FF_ST4 ", 0x1040d8ull},
1021 {"QM_DFX_FF_ST5 ", 0x1040dcull},
1022 {"QM_DFX_FF_ST6 ", 0x1040e0ull},
1023 {"QM_IN_IDLE_ST ", 0x1040e4ull},
1024 { NULL, 0}
1025};
1026
1027static struct qm_dfx_registers qm_vf_dfx_regs[] = {
1028 {"QM_DFX_FUNS_ACTIVE_ST ", 0x200ull},
1029 { NULL, 0}
1030};
1031
1032static int qm_regs_show(struct seq_file *s, void *unused)
1033{
1034 struct hisi_qm *qm = s->private;
1035 struct qm_dfx_registers *regs;
1036 u32 val;
1037
1038 if (qm->fun_type == QM_HW_PF)
1039 regs = qm_dfx_regs;
1040 else
1041 regs = qm_vf_dfx_regs;
1042
1043 while (regs->reg_name) {
1044 val = readl(qm->io_base + regs->reg_offset);
1045 seq_printf(s, "%s= 0x%08x\n", regs->reg_name, val);
1046 regs++;
1047 }
1048
1049 return 0;
1050}
1051
1052DEFINE_SHOW_ATTRIBUTE(qm_regs);
1053
1054static ssize_t qm_cmd_read(struct file *filp, char __user *buffer,
1055 size_t count, loff_t *pos)
1056{
1057 char buf[QM_DBG_READ_LEN];
1058 int len;
1059
1060 len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n",
1061 "Please echo help to cmd to get help information");
1062
1063 return simple_read_from_buffer(buffer, count, pos, buf, len);
1064}
1065
1066static void *qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size,
1067 dma_addr_t *dma_addr)
1068{
1069 struct device *dev = &qm->pdev->dev;
1070 void *ctx_addr;
1071
1072 ctx_addr = kzalloc(ctx_size, GFP_KERNEL);
1073 if (!ctx_addr)
1074 return ERR_PTR(-ENOMEM);
1075
1076 *dma_addr = dma_map_single(dev, ctx_addr, ctx_size, DMA_FROM_DEVICE);
1077 if (dma_mapping_error(dev, *dma_addr)) {
1078 dev_err(dev, "DMA mapping error!\n");
1079 kfree(ctx_addr);
1080 return ERR_PTR(-ENOMEM);
1081 }
1082
1083 return ctx_addr;
1084}
1085
1086static void qm_ctx_free(struct hisi_qm *qm, size_t ctx_size,
1087 const void *ctx_addr, dma_addr_t *dma_addr)
1088{
1089 struct device *dev = &qm->pdev->dev;
1090
1091 dma_unmap_single(dev, *dma_addr, ctx_size, DMA_FROM_DEVICE);
1092 kfree(ctx_addr);
1093}
1094
1095static int dump_show(struct hisi_qm *qm, void *info,
1096 unsigned int info_size, char *info_name)
1097{
1098 struct device *dev = &qm->pdev->dev;
1099 u8 *info_buf, *info_curr = info;
1100 u32 i;
1101#define BYTE_PER_DW 4
1102
1103 info_buf = kzalloc(info_size, GFP_KERNEL);
1104 if (!info_buf)
1105 return -ENOMEM;
1106
1107 for (i = 0; i < info_size; i++, info_curr++) {
1108 if (i % BYTE_PER_DW == 0)
1109 info_buf[i + 3UL] = *info_curr;
1110 else if (i % BYTE_PER_DW == 1)
1111 info_buf[i + 1UL] = *info_curr;
1112 else if (i % BYTE_PER_DW == 2)
1113 info_buf[i - 1] = *info_curr;
1114 else if (i % BYTE_PER_DW == 3)
1115 info_buf[i - 3] = *info_curr;
1116 }
1117
1118 dev_info(dev, "%s DUMP\n", info_name);
1119 for (i = 0; i < info_size; i += BYTE_PER_DW) {
1120 pr_info("DW%d: %02X%02X %02X%02X\n", i / BYTE_PER_DW,
1121 info_buf[i], info_buf[i + 1UL],
1122 info_buf[i + 2UL], info_buf[i + 3UL]);
1123 }
1124
1125 kfree(info_buf);
1126
1127 return 0;
1128}
1129
1130static int qm_dump_sqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id)
1131{
1132 return qm_mb(qm, QM_MB_CMD_SQC, dma_addr, qp_id, 1);
1133}
1134
1135static int qm_dump_cqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id)
1136{
1137 return qm_mb(qm, QM_MB_CMD_CQC, dma_addr, qp_id, 1);
1138}
1139
1140static int qm_sqc_dump(struct hisi_qm *qm, const char *s)
1141{
1142 struct device *dev = &qm->pdev->dev;
1143 struct qm_sqc *sqc, *sqc_curr;
1144 dma_addr_t sqc_dma;
1145 u32 qp_id;
1146 int ret;
1147
1148 if (!s)
1149 return -EINVAL;
1150
1151 ret = kstrtou32(s, 0, &qp_id);
1152 if (ret || qp_id >= qm->qp_num) {
1153 dev_err(dev, "Please input qp num (0-%d)", qm->qp_num - 1);
1154 return -EINVAL;
1155 }
1156
1157 sqc = qm_ctx_alloc(qm, sizeof(*sqc), &sqc_dma);
1158 if (IS_ERR(sqc))
1159 return PTR_ERR(sqc);
1160
1161 ret = qm_dump_sqc_raw(qm, sqc_dma, qp_id);
1162 if (ret) {
1163 down_read(&qm->qps_lock);
1164 if (qm->sqc) {
1165 sqc_curr = qm->sqc + qp_id;
1166
1167 ret = dump_show(qm, sqc_curr, sizeof(*sqc),
1168 "SOFT SQC");
1169 if (ret)
1170 dev_info(dev, "Show soft sqc failed!\n");
1171 }
1172 up_read(&qm->qps_lock);
1173
1174 goto err_free_ctx;
1175 }
1176
1177 ret = dump_show(qm, sqc, sizeof(*sqc), "SQC");
1178 if (ret)
1179 dev_info(dev, "Show hw sqc failed!\n");
1180
1181err_free_ctx:
1182 qm_ctx_free(qm, sizeof(*sqc), sqc, &sqc_dma);
1183 return ret;
1184}
1185
1186static int qm_cqc_dump(struct hisi_qm *qm, const char *s)
1187{
1188 struct device *dev = &qm->pdev->dev;
1189 struct qm_cqc *cqc, *cqc_curr;
1190 dma_addr_t cqc_dma;
1191 u32 qp_id;
1192 int ret;
1193
1194 if (!s)
1195 return -EINVAL;
1196
1197 ret = kstrtou32(s, 0, &qp_id);
1198 if (ret || qp_id >= qm->qp_num) {
1199 dev_err(dev, "Please input qp num (0-%d)", qm->qp_num - 1);
1200 return -EINVAL;
1201 }
1202
1203 cqc = qm_ctx_alloc(qm, sizeof(*cqc), &cqc_dma);
1204 if (IS_ERR(cqc))
1205 return PTR_ERR(cqc);
1206
1207 ret = qm_dump_cqc_raw(qm, cqc_dma, qp_id);
1208 if (ret) {
1209 down_read(&qm->qps_lock);
1210 if (qm->cqc) {
1211 cqc_curr = qm->cqc + qp_id;
1212
1213 ret = dump_show(qm, cqc_curr, sizeof(*cqc),
1214 "SOFT CQC");
1215 if (ret)
1216 dev_info(dev, "Show soft cqc failed!\n");
1217 }
1218 up_read(&qm->qps_lock);
1219
1220 goto err_free_ctx;
1221 }
1222
1223 ret = dump_show(qm, cqc, sizeof(*cqc), "CQC");
1224 if (ret)
1225 dev_info(dev, "Show hw cqc failed!\n");
1226
1227err_free_ctx:
1228 qm_ctx_free(qm, sizeof(*cqc), cqc, &cqc_dma);
1229 return ret;
1230}
1231
1232static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, size_t size,
1233 int cmd, char *name)
1234{
1235 struct device *dev = &qm->pdev->dev;
1236 dma_addr_t xeqc_dma;
1237 void *xeqc;
1238 int ret;
1239
1240 if (strsep(&s, " ")) {
1241 dev_err(dev, "Please do not input extra characters!\n");
1242 return -EINVAL;
1243 }
1244
1245 xeqc = qm_ctx_alloc(qm, size, &xeqc_dma);
1246 if (IS_ERR(xeqc))
1247 return PTR_ERR(xeqc);
1248
1249 ret = qm_mb(qm, cmd, xeqc_dma, 0, 1);
1250 if (ret)
1251 goto err_free_ctx;
1252
1253 ret = dump_show(qm, xeqc, size, name);
1254 if (ret)
1255 dev_info(dev, "Show hw %s failed!\n", name);
1256
1257err_free_ctx:
1258 qm_ctx_free(qm, size, xeqc, &xeqc_dma);
1259 return ret;
1260}
1261
1262static int q_dump_param_parse(struct hisi_qm *qm, char *s,
1263 u32 *e_id, u32 *q_id)
1264{
1265 struct device *dev = &qm->pdev->dev;
1266 unsigned int qp_num = qm->qp_num;
1267 char *presult;
1268 int ret;
1269
1270 presult = strsep(&s, " ");
1271 if (!presult) {
1272 dev_err(dev, "Please input qp number!\n");
1273 return -EINVAL;
1274 }
1275
1276 ret = kstrtou32(presult, 0, q_id);
1277 if (ret || *q_id >= qp_num) {
1278 dev_err(dev, "Please input qp num (0-%d)", qp_num - 1);
1279 return -EINVAL;
1280 }
1281
1282 presult = strsep(&s, " ");
1283 if (!presult) {
1284 dev_err(dev, "Please input sqe number!\n");
1285 return -EINVAL;
1286 }
1287
1288 ret = kstrtou32(presult, 0, e_id);
1289 if (ret || *e_id >= QM_Q_DEPTH) {
1290 dev_err(dev, "Please input sqe num (0-%d)", QM_Q_DEPTH - 1);
1291 return -EINVAL;
1292 }
1293
1294 if (strsep(&s, " ")) {
1295 dev_err(dev, "Please do not input extra characters!\n");
1296 return -EINVAL;
1297 }
1298
1299 return 0;
1300}
1301
1302static int qm_sq_dump(struct hisi_qm *qm, char *s)
1303{
1304 struct device *dev = &qm->pdev->dev;
1305 void *sqe, *sqe_curr;
1306 struct hisi_qp *qp;
1307 u32 qp_id, sqe_id;
1308 int ret;
1309
1310 ret = q_dump_param_parse(qm, s, &sqe_id, &qp_id);
1311 if (ret)
1312 return ret;
1313
1314 sqe = kzalloc(qm->sqe_size * QM_Q_DEPTH, GFP_KERNEL);
1315 if (!sqe)
1316 return -ENOMEM;
1317
1318 qp = &qm->qp_array[qp_id];
1319 memcpy(sqe, qp->sqe, qm->sqe_size * QM_Q_DEPTH);
1320 sqe_curr = sqe + (u32)(sqe_id * qm->sqe_size);
1321 memset(sqe_curr + qm->debug.sqe_mask_offset, QM_SQE_ADDR_MASK,
1322 qm->debug.sqe_mask_len);
1323
1324 ret = dump_show(qm, sqe_curr, qm->sqe_size, "SQE");
1325 if (ret)
1326 dev_info(dev, "Show sqe failed!\n");
1327
1328 kfree(sqe);
1329
1330 return ret;
1331}
1332
1333static int qm_cq_dump(struct hisi_qm *qm, char *s)
1334{
1335 struct device *dev = &qm->pdev->dev;
1336 struct qm_cqe *cqe_curr;
1337 struct hisi_qp *qp;
1338 u32 qp_id, cqe_id;
1339 int ret;
1340
1341 ret = q_dump_param_parse(qm, s, &cqe_id, &qp_id);
1342 if (ret)
1343 return ret;
1344
1345 qp = &qm->qp_array[qp_id];
1346 cqe_curr = qp->cqe + cqe_id;
1347 ret = dump_show(qm, cqe_curr, sizeof(struct qm_cqe), "CQE");
1348 if (ret)
1349 dev_info(dev, "Show cqe failed!\n");
1350
1351 return ret;
1352}
1353
1354static int qm_eq_aeq_dump(struct hisi_qm *qm, const char *s,
1355 size_t size, char *name)
1356{
1357 struct device *dev = &qm->pdev->dev;
1358 void *xeqe;
1359 u32 xeqe_id;
1360 int ret;
1361
1362 if (!s)
1363 return -EINVAL;
1364
1365 ret = kstrtou32(s, 0, &xeqe_id);
1366 if (ret)
1367 return -EINVAL;
1368
1369 if (!strcmp(name, "EQE") && xeqe_id >= QM_EQ_DEPTH) {
1370 dev_err(dev, "Please input eqe num (0-%d)", QM_EQ_DEPTH - 1);
1371 return -EINVAL;
1372 } else if (!strcmp(name, "AEQE") && xeqe_id >= QM_Q_DEPTH) {
1373 dev_err(dev, "Please input aeqe num (0-%d)", QM_Q_DEPTH - 1);
1374 return -EINVAL;
1375 }
1376
1377 down_read(&qm->qps_lock);
1378
1379 if (qm->eqe && !strcmp(name, "EQE")) {
1380 xeqe = qm->eqe + xeqe_id;
1381 } else if (qm->aeqe && !strcmp(name, "AEQE")) {
1382 xeqe = qm->aeqe + xeqe_id;
1383 } else {
1384 ret = -EINVAL;
1385 goto err_unlock;
1386 }
1387
1388 ret = dump_show(qm, xeqe, size, name);
1389 if (ret)
1390 dev_info(dev, "Show %s failed!\n", name);
1391
1392err_unlock:
1393 up_read(&qm->qps_lock);
1394 return ret;
1395}
1396
1397static int qm_dbg_help(struct hisi_qm *qm, char *s)
1398{
1399 struct device *dev = &qm->pdev->dev;
1400
1401 if (strsep(&s, " ")) {
1402 dev_err(dev, "Please do not input extra characters!\n");
1403 return -EINVAL;
1404 }
1405
1406 dev_info(dev, "available commands:\n");
1407 dev_info(dev, "sqc <num>\n");
1408 dev_info(dev, "cqc <num>\n");
1409 dev_info(dev, "eqc\n");
1410 dev_info(dev, "aeqc\n");
1411 dev_info(dev, "sq <num> <e>\n");
1412 dev_info(dev, "cq <num> <e>\n");
1413 dev_info(dev, "eq <e>\n");
1414 dev_info(dev, "aeq <e>\n");
1415
1416 return 0;
1417}
1418
1419static int qm_cmd_write_dump(struct hisi_qm *qm, const char *cmd_buf)
1420{
1421 struct device *dev = &qm->pdev->dev;
1422 char *presult, *s, *s_tmp;
1423 int ret;
1424
1425 s = kstrdup(cmd_buf, GFP_KERNEL);
1426 if (!s)
1427 return -ENOMEM;
1428
1429 s_tmp = s;
1430 presult = strsep(&s, " ");
1431 if (!presult) {
1432 ret = -EINVAL;
1433 goto err_buffer_free;
1434 }
1435
1436 if (!strcmp(presult, "sqc"))
1437 ret = qm_sqc_dump(qm, s);
1438 else if (!strcmp(presult, "cqc"))
1439 ret = qm_cqc_dump(qm, s);
1440 else if (!strcmp(presult, "eqc"))
1441 ret = qm_eqc_aeqc_dump(qm, s, sizeof(struct qm_eqc),
1442 QM_MB_CMD_EQC, "EQC");
1443 else if (!strcmp(presult, "aeqc"))
1444 ret = qm_eqc_aeqc_dump(qm, s, sizeof(struct qm_aeqc),
1445 QM_MB_CMD_AEQC, "AEQC");
1446 else if (!strcmp(presult, "sq"))
1447 ret = qm_sq_dump(qm, s);
1448 else if (!strcmp(presult, "cq"))
1449 ret = qm_cq_dump(qm, s);
1450 else if (!strcmp(presult, "eq"))
1451 ret = qm_eq_aeq_dump(qm, s, sizeof(struct qm_eqe), "EQE");
1452 else if (!strcmp(presult, "aeq"))
1453 ret = qm_eq_aeq_dump(qm, s, sizeof(struct qm_aeqe), "AEQE");
1454 else if (!strcmp(presult, "help"))
1455 ret = qm_dbg_help(qm, s);
1456 else
1457 ret = -EINVAL;
1458
1459 if (ret)
1460 dev_info(dev, "Please echo help\n");
1461
1462err_buffer_free:
1463 kfree(s_tmp);
1464
1465 return ret;
1466}
1467
1468static ssize_t qm_cmd_write(struct file *filp, const char __user *buffer,
1469 size_t count, loff_t *pos)
1470{
1471 struct hisi_qm *qm = filp->private_data;
1472 char *cmd_buf, *cmd_buf_tmp;
1473 int ret;
1474
1475 if (*pos)
1476 return 0;
1477
1478
1479 if (unlikely(atomic_read(&qm->status.flags) == QM_STOP))
1480 return 0;
1481
1482 if (count > QM_DBG_WRITE_LEN)
1483 return -ENOSPC;
1484
1485 cmd_buf = kzalloc(count + 1, GFP_KERNEL);
1486 if (!cmd_buf)
1487 return -ENOMEM;
1488
1489 if (copy_from_user(cmd_buf, buffer, count)) {
1490 kfree(cmd_buf);
1491 return -EFAULT;
1492 }
1493
1494 cmd_buf[count] = '\0';
1495
1496 cmd_buf_tmp = strchr(cmd_buf, '\n');
1497 if (cmd_buf_tmp) {
1498 *cmd_buf_tmp = '\0';
1499 count = cmd_buf_tmp - cmd_buf + 1;
1500 }
1501
1502 ret = qm_cmd_write_dump(qm, cmd_buf);
1503 if (ret) {
1504 kfree(cmd_buf);
1505 return ret;
1506 }
1507
1508 kfree(cmd_buf);
1509
1510 return count;
1511}
1512
1513static const struct file_operations qm_cmd_fops = {
1514 .owner = THIS_MODULE,
1515 .open = simple_open,
1516 .read = qm_cmd_read,
1517 .write = qm_cmd_write,
1518};
1519
1520static int qm_create_debugfs_file(struct hisi_qm *qm, enum qm_debug_file index)
1521{
1522 struct dentry *qm_d = qm->debug.qm_d;
1523 struct debugfs_file *file = qm->debug.files + index;
1524
1525 debugfs_create_file(qm_debug_file_name[index], 0600, qm_d, file,
1526 &qm_debug_fops);
1527
1528 file->index = index;
1529 mutex_init(&file->lock);
1530 file->debug = &qm->debug;
1531
1532 return 0;
1533}
1534
1535static void qm_hw_error_init_v1(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
1536{
1537 writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
1538}
1539
1540static void qm_hw_error_init_v2(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
1541{
1542 u32 irq_enable = ce | nfe | fe;
1543 u32 irq_unmask = ~irq_enable;
1544
1545 qm->error_mask = ce | nfe | fe;
1546
1547
1548 writel(QM_ABNORMAL_INT_SOURCE_CLR,
1549 qm->io_base + QM_ABNORMAL_INT_SOURCE);
1550
1551
1552 writel(ce, qm->io_base + QM_RAS_CE_ENABLE);
1553 writel(QM_RAS_CE_TIMES_PER_IRQ, qm->io_base + QM_RAS_CE_THRESHOLD);
1554 writel(nfe, qm->io_base + QM_RAS_NFE_ENABLE);
1555 writel(fe, qm->io_base + QM_RAS_FE_ENABLE);
1556
1557 irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
1558 writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK);
1559}
1560
1561static void qm_hw_error_uninit_v2(struct hisi_qm *qm)
1562{
1563 writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
1564}
1565
1566static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status)
1567{
1568 const struct hisi_qm_hw_error *err;
1569 struct device *dev = &qm->pdev->dev;
1570 u32 reg_val, type, vf_num;
1571 int i;
1572
1573 for (i = 0; i < ARRAY_SIZE(qm_hw_error); i++) {
1574 err = &qm_hw_error[i];
1575 if (!(err->int_msk & error_status))
1576 continue;
1577
1578 dev_err(dev, "%s [error status=0x%x] found\n",
1579 err->msg, err->int_msk);
1580
1581 if (err->int_msk & QM_DB_TIMEOUT) {
1582 reg_val = readl(qm->io_base + QM_ABNORMAL_INF01);
1583 type = (reg_val & QM_DB_TIMEOUT_TYPE) >>
1584 QM_DB_TIMEOUT_TYPE_SHIFT;
1585 vf_num = reg_val & QM_DB_TIMEOUT_VF;
1586 dev_err(dev, "qm %s doorbell timeout in function %u\n",
1587 qm_db_timeout[type], vf_num);
1588 } else if (err->int_msk & QM_OF_FIFO_OF) {
1589 reg_val = readl(qm->io_base + QM_ABNORMAL_INF00);
1590 type = (reg_val & QM_FIFO_OVERFLOW_TYPE) >>
1591 QM_FIFO_OVERFLOW_TYPE_SHIFT;
1592 vf_num = reg_val & QM_FIFO_OVERFLOW_VF;
1593
1594 if (type < ARRAY_SIZE(qm_fifo_overflow))
1595 dev_err(dev, "qm %s fifo overflow in function %u\n",
1596 qm_fifo_overflow[type], vf_num);
1597 else
1598 dev_err(dev, "unknown error type\n");
1599 }
1600 }
1601}
1602
1603static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
1604{
1605 u32 error_status, tmp;
1606
1607
1608 tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
1609 error_status = qm->error_mask & tmp;
1610
1611 if (error_status) {
1612 if (error_status & QM_ECC_MBIT)
1613 qm->err_status.is_qm_ecc_mbit = true;
1614
1615 qm_log_hw_error(qm, error_status);
1616 if (error_status == QM_DB_RANDOM_INVALID) {
1617 writel(error_status, qm->io_base +
1618 QM_ABNORMAL_INT_SOURCE);
1619 return ACC_ERR_RECOVERED;
1620 }
1621
1622 return ACC_ERR_NEED_RESET;
1623 }
1624
1625 return ACC_ERR_RECOVERED;
1626}
1627
1628static const struct hisi_qm_hw_ops qm_hw_ops_v1 = {
1629 .qm_db = qm_db_v1,
1630 .get_irq_num = qm_get_irq_num_v1,
1631 .hw_error_init = qm_hw_error_init_v1,
1632};
1633
1634static const struct hisi_qm_hw_ops qm_hw_ops_v2 = {
1635 .get_vft = qm_get_vft_v2,
1636 .qm_db = qm_db_v2,
1637 .get_irq_num = qm_get_irq_num_v2,
1638 .hw_error_init = qm_hw_error_init_v2,
1639 .hw_error_uninit = qm_hw_error_uninit_v2,
1640 .hw_error_handle = qm_hw_error_handle_v2,
1641};
1642
1643static void *qm_get_avail_sqe(struct hisi_qp *qp)
1644{
1645 struct hisi_qp_status *qp_status = &qp->qp_status;
1646 u16 sq_tail = qp_status->sq_tail;
1647
1648 if (unlikely(atomic_read(&qp->qp_status.used) == QM_Q_DEPTH - 1))
1649 return NULL;
1650
1651 return qp->sqe + sq_tail * qp->qm->sqe_size;
1652}
1653
1654static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type)
1655{
1656 struct device *dev = &qm->pdev->dev;
1657 struct hisi_qp *qp;
1658 int qp_id;
1659
1660 if (!qm_qp_avail_state(qm, NULL, QP_INIT))
1661 return ERR_PTR(-EPERM);
1662
1663 if (qm->qp_in_used == qm->qp_num) {
1664 dev_info_ratelimited(dev, "All %u queues of QM are busy!\n",
1665 qm->qp_num);
1666 atomic64_inc(&qm->debug.dfx.create_qp_err_cnt);
1667 return ERR_PTR(-EBUSY);
1668 }
1669
1670 qp_id = idr_alloc_cyclic(&qm->qp_idr, NULL, 0, qm->qp_num, GFP_ATOMIC);
1671 if (qp_id < 0) {
1672 dev_info_ratelimited(dev, "All %u queues of QM are busy!\n",
1673 qm->qp_num);
1674 atomic64_inc(&qm->debug.dfx.create_qp_err_cnt);
1675 return ERR_PTR(-EBUSY);
1676 }
1677
1678 qp = &qm->qp_array[qp_id];
1679
1680 memset(qp->cqe, 0, sizeof(struct qm_cqe) * QM_Q_DEPTH);
1681
1682 qp->event_cb = NULL;
1683 qp->req_cb = NULL;
1684 qp->qp_id = qp_id;
1685 qp->alg_type = alg_type;
1686 qm->qp_in_used++;
1687 atomic_set(&qp->qp_status.flags, QP_INIT);
1688
1689 return qp;
1690}
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type)
1701{
1702 struct hisi_qp *qp;
1703
1704 down_write(&qm->qps_lock);
1705 qp = qm_create_qp_nolock(qm, alg_type);
1706 up_write(&qm->qps_lock);
1707
1708 return qp;
1709}
1710EXPORT_SYMBOL_GPL(hisi_qm_create_qp);
1711
1712
1713
1714
1715
1716
1717
1718void hisi_qm_release_qp(struct hisi_qp *qp)
1719{
1720 struct hisi_qm *qm = qp->qm;
1721
1722 down_write(&qm->qps_lock);
1723
1724 if (!qm_qp_avail_state(qm, qp, QP_CLOSE)) {
1725 up_write(&qm->qps_lock);
1726 return;
1727 }
1728
1729 qm->qp_in_used--;
1730 idr_remove(&qm->qp_idr, qp->qp_id);
1731
1732 up_write(&qm->qps_lock);
1733}
1734EXPORT_SYMBOL_GPL(hisi_qm_release_qp);
1735
1736static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
1737{
1738 struct hisi_qm *qm = qp->qm;
1739 struct device *dev = &qm->pdev->dev;
1740 enum qm_hw_ver ver = qm->ver;
1741 struct qm_sqc *sqc;
1742 struct qm_cqc *cqc;
1743 dma_addr_t sqc_dma;
1744 dma_addr_t cqc_dma;
1745 int ret;
1746
1747 qm_init_qp_status(qp);
1748
1749 sqc = kzalloc(sizeof(struct qm_sqc), GFP_KERNEL);
1750 if (!sqc)
1751 return -ENOMEM;
1752 sqc_dma = dma_map_single(dev, sqc, sizeof(struct qm_sqc),
1753 DMA_TO_DEVICE);
1754 if (dma_mapping_error(dev, sqc_dma)) {
1755 kfree(sqc);
1756 return -ENOMEM;
1757 }
1758
1759 INIT_QC_COMMON(sqc, qp->sqe_dma, pasid);
1760 if (ver == QM_HW_V1) {
1761 sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size));
1762 sqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1);
1763 } else {
1764 sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size));
1765 sqc->w8 = 0;
1766 }
1767 sqc->cq_num = cpu_to_le16(qp_id);
1768 sqc->w13 = cpu_to_le16(QM_MK_SQC_W13(0, 1, qp->alg_type));
1769
1770 ret = qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 0);
1771 dma_unmap_single(dev, sqc_dma, sizeof(struct qm_sqc), DMA_TO_DEVICE);
1772 kfree(sqc);
1773 if (ret)
1774 return ret;
1775
1776 cqc = kzalloc(sizeof(struct qm_cqc), GFP_KERNEL);
1777 if (!cqc)
1778 return -ENOMEM;
1779 cqc_dma = dma_map_single(dev, cqc, sizeof(struct qm_cqc),
1780 DMA_TO_DEVICE);
1781 if (dma_mapping_error(dev, cqc_dma)) {
1782 kfree(cqc);
1783 return -ENOMEM;
1784 }
1785
1786 INIT_QC_COMMON(cqc, qp->cqe_dma, pasid);
1787 if (ver == QM_HW_V1) {
1788 cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V1(0, 0, 0, 4));
1789 cqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1);
1790 } else {
1791 cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(4));
1792 cqc->w8 = 0;
1793 }
1794 cqc->dw6 = cpu_to_le32(1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT);
1795
1796 ret = qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 0);
1797 dma_unmap_single(dev, cqc_dma, sizeof(struct qm_cqc), DMA_TO_DEVICE);
1798 kfree(cqc);
1799
1800 return ret;
1801}
1802
1803static int qm_start_qp_nolock(struct hisi_qp *qp, unsigned long arg)
1804{
1805 struct hisi_qm *qm = qp->qm;
1806 struct device *dev = &qm->pdev->dev;
1807 int qp_id = qp->qp_id;
1808 u32 pasid = arg;
1809 int ret;
1810
1811 if (!qm_qp_avail_state(qm, qp, QP_START))
1812 return -EPERM;
1813
1814 ret = qm_qp_ctx_cfg(qp, qp_id, pasid);
1815 if (ret)
1816 return ret;
1817
1818 atomic_set(&qp->qp_status.flags, QP_START);
1819 dev_dbg(dev, "queue %d started\n", qp_id);
1820
1821 return 0;
1822}
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg)
1833{
1834 struct hisi_qm *qm = qp->qm;
1835 int ret;
1836
1837 down_write(&qm->qps_lock);
1838 ret = qm_start_qp_nolock(qp, arg);
1839 up_write(&qm->qps_lock);
1840
1841 return ret;
1842}
1843EXPORT_SYMBOL_GPL(hisi_qm_start_qp);
1844
1845
1846
1847
1848
1849static int qm_drain_qp(struct hisi_qp *qp)
1850{
1851 size_t size = sizeof(struct qm_sqc) + sizeof(struct qm_cqc);
1852 struct hisi_qm *qm = qp->qm;
1853 struct device *dev = &qm->pdev->dev;
1854 struct qm_sqc *sqc;
1855 struct qm_cqc *cqc;
1856 dma_addr_t dma_addr;
1857 int ret = 0, i = 0;
1858 void *addr;
1859
1860
1861
1862
1863
1864 if (qm->err_status.is_qm_ecc_mbit || qm->err_status.is_dev_ecc_mbit)
1865 return 0;
1866
1867 addr = qm_ctx_alloc(qm, size, &dma_addr);
1868 if (IS_ERR(addr)) {
1869 dev_err(dev, "Failed to alloc ctx for sqc and cqc!\n");
1870 return -ENOMEM;
1871 }
1872
1873 while (++i) {
1874 ret = qm_dump_sqc_raw(qm, dma_addr, qp->qp_id);
1875 if (ret) {
1876 dev_err_ratelimited(dev, "Failed to dump sqc!\n");
1877 break;
1878 }
1879 sqc = addr;
1880
1881 ret = qm_dump_cqc_raw(qm, (dma_addr + sizeof(struct qm_sqc)),
1882 qp->qp_id);
1883 if (ret) {
1884 dev_err_ratelimited(dev, "Failed to dump cqc!\n");
1885 break;
1886 }
1887 cqc = addr + sizeof(struct qm_sqc);
1888
1889 if ((sqc->tail == cqc->tail) &&
1890 (QM_SQ_TAIL_IDX(sqc) == QM_CQ_TAIL_IDX(cqc)))
1891 break;
1892
1893 if (i == MAX_WAIT_COUNTS) {
1894 dev_err(dev, "Fail to empty queue %u!\n", qp->qp_id);
1895 ret = -EBUSY;
1896 break;
1897 }
1898
1899 usleep_range(WAIT_PERIOD_US_MIN, WAIT_PERIOD_US_MAX);
1900 }
1901
1902 qm_ctx_free(qm, size, addr, &dma_addr);
1903
1904 return ret;
1905}
1906
1907static int qm_stop_qp_nolock(struct hisi_qp *qp)
1908{
1909 struct device *dev = &qp->qm->pdev->dev;
1910 int ret;
1911
1912
1913
1914
1915
1916
1917
1918 if (atomic_read(&qp->qp_status.flags) == QP_STOP) {
1919 qp->is_resetting = false;
1920 return 0;
1921 }
1922
1923 if (!qm_qp_avail_state(qp->qm, qp, QP_STOP))
1924 return -EPERM;
1925
1926 atomic_set(&qp->qp_status.flags, QP_STOP);
1927
1928 ret = qm_drain_qp(qp);
1929 if (ret)
1930 dev_err(dev, "Failed to drain out data for stopping!\n");
1931
1932 if (qp->qm->wq)
1933 flush_workqueue(qp->qm->wq);
1934 else
1935 flush_work(&qp->qm->work);
1936
1937 dev_dbg(dev, "stop queue %u!", qp->qp_id);
1938
1939 return 0;
1940}
1941
1942
1943
1944
1945
1946
1947
1948int hisi_qm_stop_qp(struct hisi_qp *qp)
1949{
1950 int ret;
1951
1952 down_write(&qp->qm->qps_lock);
1953 ret = qm_stop_qp_nolock(qp);
1954 up_write(&qp->qm->qps_lock);
1955
1956 return ret;
1957}
1958EXPORT_SYMBOL_GPL(hisi_qm_stop_qp);
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975int hisi_qp_send(struct hisi_qp *qp, const void *msg)
1976{
1977 struct hisi_qp_status *qp_status = &qp->qp_status;
1978 u16 sq_tail = qp_status->sq_tail;
1979 u16 sq_tail_next = (sq_tail + 1) % QM_Q_DEPTH;
1980 void *sqe = qm_get_avail_sqe(qp);
1981
1982 if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP ||
1983 atomic_read(&qp->qm->status.flags) == QM_STOP ||
1984 qp->is_resetting)) {
1985 dev_info_ratelimited(&qp->qm->pdev->dev, "QP is stopped or resetting\n");
1986 return -EAGAIN;
1987 }
1988
1989 if (!sqe)
1990 return -EBUSY;
1991
1992 memcpy(sqe, msg, qp->qm->sqe_size);
1993
1994 qm_db(qp->qm, qp->qp_id, QM_DOORBELL_CMD_SQ, sq_tail_next, 0);
1995 atomic_inc(&qp->qp_status.used);
1996 qp_status->sq_tail = sq_tail_next;
1997
1998 return 0;
1999}
2000EXPORT_SYMBOL_GPL(hisi_qp_send);
2001
2002static void hisi_qm_cache_wb(struct hisi_qm *qm)
2003{
2004 unsigned int val;
2005
2006 if (qm->ver == QM_HW_V1)
2007 return;
2008
2009 writel(0x1, qm->io_base + QM_CACHE_WB_START);
2010 if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE,
2011 val, val & BIT(0), 10, 1000))
2012 dev_err(&qm->pdev->dev, "QM writeback sqc cache fail!\n");
2013}
2014
2015static void qm_qp_event_notifier(struct hisi_qp *qp)
2016{
2017 wake_up_interruptible(&qp->uacce_q->wait);
2018}
2019
2020static int hisi_qm_get_available_instances(struct uacce_device *uacce)
2021{
2022 return hisi_qm_get_free_qp_num(uacce->priv);
2023}
2024
2025static int hisi_qm_uacce_get_queue(struct uacce_device *uacce,
2026 unsigned long arg,
2027 struct uacce_queue *q)
2028{
2029 struct hisi_qm *qm = uacce->priv;
2030 struct hisi_qp *qp;
2031 u8 alg_type = 0;
2032
2033 qp = hisi_qm_create_qp(qm, alg_type);
2034 if (IS_ERR(qp))
2035 return PTR_ERR(qp);
2036
2037 q->priv = qp;
2038 q->uacce = uacce;
2039 qp->uacce_q = q;
2040 qp->event_cb = qm_qp_event_notifier;
2041 qp->pasid = arg;
2042
2043 return 0;
2044}
2045
2046static void hisi_qm_uacce_put_queue(struct uacce_queue *q)
2047{
2048 struct hisi_qp *qp = q->priv;
2049
2050 hisi_qm_cache_wb(qp->qm);
2051 hisi_qm_release_qp(qp);
2052}
2053
2054
2055static int hisi_qm_uacce_mmap(struct uacce_queue *q,
2056 struct vm_area_struct *vma,
2057 struct uacce_qfile_region *qfr)
2058{
2059 struct hisi_qp *qp = q->priv;
2060 struct hisi_qm *qm = qp->qm;
2061 size_t sz = vma->vm_end - vma->vm_start;
2062 struct pci_dev *pdev = qm->pdev;
2063 struct device *dev = &pdev->dev;
2064 unsigned long vm_pgoff;
2065 int ret;
2066
2067 switch (qfr->type) {
2068 case UACCE_QFRT_MMIO:
2069 if (qm->ver == QM_HW_V1) {
2070 if (sz > PAGE_SIZE * QM_DOORBELL_PAGE_NR)
2071 return -EINVAL;
2072 } else {
2073 if (sz > PAGE_SIZE * (QM_DOORBELL_PAGE_NR +
2074 QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE))
2075 return -EINVAL;
2076 }
2077
2078 vma->vm_flags |= VM_IO;
2079
2080 return remap_pfn_range(vma, vma->vm_start,
2081 qm->phys_base >> PAGE_SHIFT,
2082 sz, pgprot_noncached(vma->vm_page_prot));
2083 case UACCE_QFRT_DUS:
2084 if (sz != qp->qdma.size)
2085 return -EINVAL;
2086
2087
2088
2089
2090
2091 vm_pgoff = vma->vm_pgoff;
2092 vma->vm_pgoff = 0;
2093 ret = dma_mmap_coherent(dev, vma, qp->qdma.va,
2094 qp->qdma.dma, sz);
2095 vma->vm_pgoff = vm_pgoff;
2096 return ret;
2097
2098 default:
2099 return -EINVAL;
2100 }
2101}
2102
2103static int hisi_qm_uacce_start_queue(struct uacce_queue *q)
2104{
2105 struct hisi_qp *qp = q->priv;
2106
2107 return hisi_qm_start_qp(qp, qp->pasid);
2108}
2109
2110static void hisi_qm_uacce_stop_queue(struct uacce_queue *q)
2111{
2112 hisi_qm_stop_qp(q->priv);
2113}
2114
2115static int qm_set_sqctype(struct uacce_queue *q, u16 type)
2116{
2117 struct hisi_qm *qm = q->uacce->priv;
2118 struct hisi_qp *qp = q->priv;
2119
2120 down_write(&qm->qps_lock);
2121 qp->alg_type = type;
2122 up_write(&qm->qps_lock);
2123
2124 return 0;
2125}
2126
2127static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd,
2128 unsigned long arg)
2129{
2130 struct hisi_qp *qp = q->priv;
2131 struct hisi_qp_ctx qp_ctx;
2132
2133 if (cmd == UACCE_CMD_QM_SET_QP_CTX) {
2134 if (copy_from_user(&qp_ctx, (void __user *)arg,
2135 sizeof(struct hisi_qp_ctx)))
2136 return -EFAULT;
2137
2138 if (qp_ctx.qc_type != 0 && qp_ctx.qc_type != 1)
2139 return -EINVAL;
2140
2141 qm_set_sqctype(q, qp_ctx.qc_type);
2142 qp_ctx.id = qp->qp_id;
2143
2144 if (copy_to_user((void __user *)arg, &qp_ctx,
2145 sizeof(struct hisi_qp_ctx)))
2146 return -EFAULT;
2147 } else {
2148 return -EINVAL;
2149 }
2150
2151 return 0;
2152}
2153
2154static const struct uacce_ops uacce_qm_ops = {
2155 .get_available_instances = hisi_qm_get_available_instances,
2156 .get_queue = hisi_qm_uacce_get_queue,
2157 .put_queue = hisi_qm_uacce_put_queue,
2158 .start_queue = hisi_qm_uacce_start_queue,
2159 .stop_queue = hisi_qm_uacce_stop_queue,
2160 .mmap = hisi_qm_uacce_mmap,
2161 .ioctl = hisi_qm_uacce_ioctl,
2162};
2163
2164static int qm_alloc_uacce(struct hisi_qm *qm)
2165{
2166 struct pci_dev *pdev = qm->pdev;
2167 struct uacce_device *uacce;
2168 unsigned long mmio_page_nr;
2169 unsigned long dus_page_nr;
2170 struct uacce_interface interface = {
2171 .flags = UACCE_DEV_SVA,
2172 .ops = &uacce_qm_ops,
2173 };
2174 int ret;
2175
2176 ret = strscpy(interface.name, pdev->driver->name,
2177 sizeof(interface.name));
2178 if (ret < 0)
2179 return -ENAMETOOLONG;
2180
2181 uacce = uacce_alloc(&pdev->dev, &interface);
2182 if (IS_ERR(uacce))
2183 return PTR_ERR(uacce);
2184
2185 if (uacce->flags & UACCE_DEV_SVA) {
2186 qm->use_sva = true;
2187 } else {
2188
2189 uacce_remove(uacce);
2190 qm->uacce = NULL;
2191 return -EINVAL;
2192 }
2193
2194 uacce->is_vf = pdev->is_virtfn;
2195 uacce->priv = qm;
2196 uacce->algs = qm->algs;
2197
2198 if (qm->ver == QM_HW_V1) {
2199 mmio_page_nr = QM_DOORBELL_PAGE_NR;
2200 uacce->api_ver = HISI_QM_API_VER_BASE;
2201 } else {
2202 mmio_page_nr = QM_DOORBELL_PAGE_NR +
2203 QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE;
2204 uacce->api_ver = HISI_QM_API_VER2_BASE;
2205 }
2206
2207 dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * QM_Q_DEPTH +
2208 sizeof(struct qm_cqe) * QM_Q_DEPTH) >> PAGE_SHIFT;
2209
2210 uacce->qf_pg_num[UACCE_QFRT_MMIO] = mmio_page_nr;
2211 uacce->qf_pg_num[UACCE_QFRT_DUS] = dus_page_nr;
2212
2213 qm->uacce = uacce;
2214
2215 return 0;
2216}
2217
2218
2219
2220
2221
2222
2223
2224
2225static int qm_frozen(struct hisi_qm *qm)
2226{
2227 down_write(&qm->qps_lock);
2228
2229 if (qm->is_frozen) {
2230 up_write(&qm->qps_lock);
2231 return 0;
2232 }
2233
2234 if (!qm->qp_in_used) {
2235 qm->qp_in_used = qm->qp_num;
2236 qm->is_frozen = true;
2237 up_write(&qm->qps_lock);
2238 return 0;
2239 }
2240
2241 up_write(&qm->qps_lock);
2242
2243 return -EBUSY;
2244}
2245
2246static int qm_try_frozen_vfs(struct pci_dev *pdev,
2247 struct hisi_qm_list *qm_list)
2248{
2249 struct hisi_qm *qm, *vf_qm;
2250 struct pci_dev *dev;
2251 int ret = 0;
2252
2253 if (!qm_list || !pdev)
2254 return -EINVAL;
2255
2256
2257 mutex_lock(&qm_list->lock);
2258 list_for_each_entry(qm, &qm_list->list, list) {
2259 dev = qm->pdev;
2260 if (dev == pdev)
2261 continue;
2262 if (pci_physfn(dev) == pdev) {
2263 vf_qm = pci_get_drvdata(dev);
2264 ret = qm_frozen(vf_qm);
2265 if (ret)
2266 goto frozen_fail;
2267 }
2268 }
2269
2270frozen_fail:
2271 mutex_unlock(&qm_list->lock);
2272
2273 return ret;
2274}
2275
2276
2277
2278
2279
2280
2281
2282void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
2283{
2284 while (qm_frozen(qm) ||
2285 ((qm->fun_type == QM_HW_PF) &&
2286 qm_try_frozen_vfs(qm->pdev, qm_list))) {
2287 msleep(WAIT_PERIOD);
2288 }
2289
2290 udelay(REMOVE_WAIT_DELAY);
2291}
2292EXPORT_SYMBOL_GPL(hisi_qm_wait_task_finish);
2293
2294
2295
2296
2297
2298
2299
2300int hisi_qm_get_free_qp_num(struct hisi_qm *qm)
2301{
2302 int ret;
2303
2304 down_read(&qm->qps_lock);
2305 ret = qm->qp_num - qm->qp_in_used;
2306 up_read(&qm->qps_lock);
2307
2308 return ret;
2309}
2310EXPORT_SYMBOL_GPL(hisi_qm_get_free_qp_num);
2311
2312static void hisi_qp_memory_uninit(struct hisi_qm *qm, int num)
2313{
2314 struct device *dev = &qm->pdev->dev;
2315 struct qm_dma *qdma;
2316 int i;
2317
2318 for (i = num - 1; i >= 0; i--) {
2319 qdma = &qm->qp_array[i].qdma;
2320 dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma);
2321 }
2322
2323 kfree(qm->qp_array);
2324}
2325
2326static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id)
2327{
2328 struct device *dev = &qm->pdev->dev;
2329 size_t off = qm->sqe_size * QM_Q_DEPTH;
2330 struct hisi_qp *qp;
2331
2332 qp = &qm->qp_array[id];
2333 qp->qdma.va = dma_alloc_coherent(dev, dma_size, &qp->qdma.dma,
2334 GFP_KERNEL);
2335 if (!qp->qdma.va)
2336 return -ENOMEM;
2337
2338 qp->sqe = qp->qdma.va;
2339 qp->sqe_dma = qp->qdma.dma;
2340 qp->cqe = qp->qdma.va + off;
2341 qp->cqe_dma = qp->qdma.dma + off;
2342 qp->qdma.size = dma_size;
2343 qp->qm = qm;
2344 qp->qp_id = id;
2345
2346 return 0;
2347}
2348
2349static int hisi_qm_memory_init(struct hisi_qm *qm)
2350{
2351 struct device *dev = &qm->pdev->dev;
2352 size_t qp_dma_size, off = 0;
2353 int i, ret = 0;
2354
2355#define QM_INIT_BUF(qm, type, num) do { \
2356 (qm)->type = ((qm)->qdma.va + (off)); \
2357 (qm)->type##_dma = (qm)->qdma.dma + (off); \
2358 off += QMC_ALIGN(sizeof(struct qm_##type) * (num)); \
2359} while (0)
2360
2361 idr_init(&qm->qp_idr);
2362 qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * QM_EQ_DEPTH) +
2363 QMC_ALIGN(sizeof(struct qm_aeqe) * QM_Q_DEPTH) +
2364 QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) +
2365 QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num);
2366 qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size, &qm->qdma.dma,
2367 GFP_ATOMIC);
2368 dev_dbg(dev, "allocate qm dma buf size=%zx)\n", qm->qdma.size);
2369 if (!qm->qdma.va)
2370 return -ENOMEM;
2371
2372 QM_INIT_BUF(qm, eqe, QM_EQ_DEPTH);
2373 QM_INIT_BUF(qm, aeqe, QM_Q_DEPTH);
2374 QM_INIT_BUF(qm, sqc, qm->qp_num);
2375 QM_INIT_BUF(qm, cqc, qm->qp_num);
2376
2377 qm->qp_array = kcalloc(qm->qp_num, sizeof(struct hisi_qp), GFP_KERNEL);
2378 if (!qm->qp_array) {
2379 ret = -ENOMEM;
2380 goto err_alloc_qp_array;
2381 }
2382
2383
2384 qp_dma_size = qm->sqe_size * QM_Q_DEPTH +
2385 sizeof(struct qm_cqe) * QM_Q_DEPTH;
2386 qp_dma_size = PAGE_ALIGN(qp_dma_size);
2387 for (i = 0; i < qm->qp_num; i++) {
2388 ret = hisi_qp_memory_init(qm, qp_dma_size, i);
2389 if (ret)
2390 goto err_init_qp_mem;
2391
2392 dev_dbg(dev, "allocate qp dma buf size=%zx)\n", qp_dma_size);
2393 }
2394
2395 return ret;
2396
2397err_init_qp_mem:
2398 hisi_qp_memory_uninit(qm, i);
2399err_alloc_qp_array:
2400 dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma);
2401
2402 return ret;
2403}
2404
2405static void hisi_qm_pre_init(struct hisi_qm *qm)
2406{
2407 struct pci_dev *pdev = qm->pdev;
2408
2409 if (qm->ver == QM_HW_V1)
2410 qm->ops = &qm_hw_ops_v1;
2411 else
2412 qm->ops = &qm_hw_ops_v2;
2413
2414 pci_set_drvdata(pdev, qm);
2415 mutex_init(&qm->mailbox_lock);
2416 init_rwsem(&qm->qps_lock);
2417 qm->qp_in_used = 0;
2418 qm->is_frozen = false;
2419}
2420
2421
2422
2423
2424
2425
2426
2427void hisi_qm_uninit(struct hisi_qm *qm)
2428{
2429 struct pci_dev *pdev = qm->pdev;
2430 struct device *dev = &pdev->dev;
2431
2432 down_write(&qm->qps_lock);
2433
2434 if (!qm_avail_state(qm, QM_CLOSE)) {
2435 up_write(&qm->qps_lock);
2436 return;
2437 }
2438
2439 uacce_remove(qm->uacce);
2440 qm->uacce = NULL;
2441
2442 hisi_qp_memory_uninit(qm, qm->qp_num);
2443 idr_destroy(&qm->qp_idr);
2444
2445 if (qm->qdma.va) {
2446 hisi_qm_cache_wb(qm);
2447 dma_free_coherent(dev, qm->qdma.size,
2448 qm->qdma.va, qm->qdma.dma);
2449 memset(&qm->qdma, 0, sizeof(qm->qdma));
2450 }
2451
2452 qm_irq_unregister(qm);
2453 pci_free_irq_vectors(pdev);
2454 iounmap(qm->io_base);
2455 pci_release_mem_regions(pdev);
2456 pci_disable_device(pdev);
2457
2458 up_write(&qm->qps_lock);
2459}
2460EXPORT_SYMBOL_GPL(hisi_qm_uninit);
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number)
2475{
2476 if (!base || !number)
2477 return -EINVAL;
2478
2479 if (!qm->ops->get_vft) {
2480 dev_err(&qm->pdev->dev, "Don't support vft read!\n");
2481 return -EINVAL;
2482 }
2483
2484 return qm->ops->get_vft(qm, base, number);
2485}
2486EXPORT_SYMBOL_GPL(hisi_qm_get_vft);
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496static int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
2497 u32 number)
2498{
2499 u32 max_q_num = qm->ctrl_qp_num;
2500
2501 if (base >= max_q_num || number > max_q_num ||
2502 (base + number) > max_q_num)
2503 return -EINVAL;
2504
2505 return qm_set_sqc_cqc_vft(qm, fun_num, base, number);
2506}
2507
2508static void qm_init_eq_aeq_status(struct hisi_qm *qm)
2509{
2510 struct hisi_qm_status *status = &qm->status;
2511
2512 status->eq_head = 0;
2513 status->aeq_head = 0;
2514 status->eqc_phase = true;
2515 status->aeqc_phase = true;
2516}
2517
2518static int qm_eq_ctx_cfg(struct hisi_qm *qm)
2519{
2520 struct device *dev = &qm->pdev->dev;
2521 struct qm_eqc *eqc;
2522 struct qm_aeqc *aeqc;
2523 dma_addr_t eqc_dma;
2524 dma_addr_t aeqc_dma;
2525 int ret;
2526
2527 qm_init_eq_aeq_status(qm);
2528
2529 eqc = kzalloc(sizeof(struct qm_eqc), GFP_KERNEL);
2530 if (!eqc)
2531 return -ENOMEM;
2532 eqc_dma = dma_map_single(dev, eqc, sizeof(struct qm_eqc),
2533 DMA_TO_DEVICE);
2534 if (dma_mapping_error(dev, eqc_dma)) {
2535 kfree(eqc);
2536 return -ENOMEM;
2537 }
2538
2539 eqc->base_l = cpu_to_le32(lower_32_bits(qm->eqe_dma));
2540 eqc->base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma));
2541 if (qm->ver == QM_HW_V1)
2542 eqc->dw3 = cpu_to_le32(QM_EQE_AEQE_SIZE);
2543 eqc->dw6 = cpu_to_le32((QM_EQ_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
2544 ret = qm_mb(qm, QM_MB_CMD_EQC, eqc_dma, 0, 0);
2545 dma_unmap_single(dev, eqc_dma, sizeof(struct qm_eqc), DMA_TO_DEVICE);
2546 kfree(eqc);
2547 if (ret)
2548 return ret;
2549
2550 aeqc = kzalloc(sizeof(struct qm_aeqc), GFP_KERNEL);
2551 if (!aeqc)
2552 return -ENOMEM;
2553 aeqc_dma = dma_map_single(dev, aeqc, sizeof(struct qm_aeqc),
2554 DMA_TO_DEVICE);
2555 if (dma_mapping_error(dev, aeqc_dma)) {
2556 kfree(aeqc);
2557 return -ENOMEM;
2558 }
2559
2560 aeqc->base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma));
2561 aeqc->base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma));
2562 aeqc->dw6 = cpu_to_le32((QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
2563
2564 ret = qm_mb(qm, QM_MB_CMD_AEQC, aeqc_dma, 0, 0);
2565 dma_unmap_single(dev, aeqc_dma, sizeof(struct qm_aeqc), DMA_TO_DEVICE);
2566 kfree(aeqc);
2567
2568 return ret;
2569}
2570
2571static int __hisi_qm_start(struct hisi_qm *qm)
2572{
2573 int ret;
2574
2575 WARN_ON(!qm->qdma.dma);
2576
2577 if (qm->fun_type == QM_HW_PF) {
2578 ret = qm_dev_mem_reset(qm);
2579 if (ret)
2580 return ret;
2581
2582 ret = hisi_qm_set_vft(qm, 0, qm->qp_base, qm->qp_num);
2583 if (ret)
2584 return ret;
2585 }
2586
2587 ret = qm_eq_ctx_cfg(qm);
2588 if (ret)
2589 return ret;
2590
2591 ret = qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0);
2592 if (ret)
2593 return ret;
2594
2595 ret = qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0);
2596 if (ret)
2597 return ret;
2598
2599 writel(0x0, qm->io_base + QM_VF_EQ_INT_MASK);
2600 writel(0x0, qm->io_base + QM_VF_AEQ_INT_MASK);
2601
2602 return 0;
2603}
2604
2605
2606
2607
2608
2609
2610
2611int hisi_qm_start(struct hisi_qm *qm)
2612{
2613 struct device *dev = &qm->pdev->dev;
2614 int ret = 0;
2615
2616 down_write(&qm->qps_lock);
2617
2618 if (!qm_avail_state(qm, QM_START)) {
2619 up_write(&qm->qps_lock);
2620 return -EPERM;
2621 }
2622
2623 dev_dbg(dev, "qm start with %d queue pairs\n", qm->qp_num);
2624
2625 if (!qm->qp_num) {
2626 dev_err(dev, "qp_num should not be 0\n");
2627 ret = -EINVAL;
2628 goto err_unlock;
2629 }
2630
2631 ret = __hisi_qm_start(qm);
2632 if (!ret)
2633 atomic_set(&qm->status.flags, QM_START);
2634
2635err_unlock:
2636 up_write(&qm->qps_lock);
2637 return ret;
2638}
2639EXPORT_SYMBOL_GPL(hisi_qm_start);
2640
2641static int qm_restart(struct hisi_qm *qm)
2642{
2643 struct device *dev = &qm->pdev->dev;
2644 struct hisi_qp *qp;
2645 int ret, i;
2646
2647 ret = hisi_qm_start(qm);
2648 if (ret < 0)
2649 return ret;
2650
2651 down_write(&qm->qps_lock);
2652 for (i = 0; i < qm->qp_num; i++) {
2653 qp = &qm->qp_array[i];
2654 if (atomic_read(&qp->qp_status.flags) == QP_STOP &&
2655 qp->is_resetting == true) {
2656 ret = qm_start_qp_nolock(qp, 0);
2657 if (ret < 0) {
2658 dev_err(dev, "Failed to start qp%d!\n", i);
2659
2660 up_write(&qm->qps_lock);
2661 return ret;
2662 }
2663 qp->is_resetting = false;
2664 }
2665 }
2666 up_write(&qm->qps_lock);
2667
2668 return 0;
2669}
2670
2671
2672static int qm_stop_started_qp(struct hisi_qm *qm)
2673{
2674 struct device *dev = &qm->pdev->dev;
2675 struct hisi_qp *qp;
2676 int i, ret;
2677
2678 for (i = 0; i < qm->qp_num; i++) {
2679 qp = &qm->qp_array[i];
2680 if (qp && atomic_read(&qp->qp_status.flags) == QP_START) {
2681 qp->is_resetting = true;
2682 ret = qm_stop_qp_nolock(qp);
2683 if (ret < 0) {
2684 dev_err(dev, "Failed to stop qp%d!\n", i);
2685 return ret;
2686 }
2687 }
2688 }
2689
2690 return 0;
2691}
2692
2693
2694
2695
2696
2697static void qm_clear_queues(struct hisi_qm *qm)
2698{
2699 struct hisi_qp *qp;
2700 int i;
2701
2702 for (i = 0; i < qm->qp_num; i++) {
2703 qp = &qm->qp_array[i];
2704 if (qp->is_resetting)
2705 memset(qp->qdma.va, 0, qp->qdma.size);
2706 }
2707
2708 memset(qm->qdma.va, 0, qm->qdma.size);
2709}
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r)
2721{
2722 struct device *dev = &qm->pdev->dev;
2723 int ret = 0;
2724
2725 down_write(&qm->qps_lock);
2726
2727 qm->status.stop_reason = r;
2728 if (!qm_avail_state(qm, QM_STOP)) {
2729 ret = -EPERM;
2730 goto err_unlock;
2731 }
2732
2733 if (qm->status.stop_reason == QM_SOFT_RESET ||
2734 qm->status.stop_reason == QM_FLR) {
2735 ret = qm_stop_started_qp(qm);
2736 if (ret < 0) {
2737 dev_err(dev, "Failed to stop started qp!\n");
2738 goto err_unlock;
2739 }
2740 }
2741
2742
2743 writel(0x1, qm->io_base + QM_VF_EQ_INT_MASK);
2744 writel(0x1, qm->io_base + QM_VF_AEQ_INT_MASK);
2745
2746 if (qm->fun_type == QM_HW_PF) {
2747 ret = hisi_qm_set_vft(qm, 0, 0, 0);
2748 if (ret < 0) {
2749 dev_err(dev, "Failed to set vft!\n");
2750 ret = -EBUSY;
2751 goto err_unlock;
2752 }
2753 }
2754
2755 qm_clear_queues(qm);
2756 atomic_set(&qm->status.flags, QM_STOP);
2757
2758err_unlock:
2759 up_write(&qm->qps_lock);
2760 return ret;
2761}
2762EXPORT_SYMBOL_GPL(hisi_qm_stop);
2763
2764static ssize_t qm_status_read(struct file *filp, char __user *buffer,
2765 size_t count, loff_t *pos)
2766{
2767 struct hisi_qm *qm = filp->private_data;
2768 char buf[QM_DBG_READ_LEN];
2769 int val, len;
2770
2771 val = atomic_read(&qm->status.flags);
2772 len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n", qm_s[val]);
2773
2774 return simple_read_from_buffer(buffer, count, pos, buf, len);
2775}
2776
2777static const struct file_operations qm_status_fops = {
2778 .owner = THIS_MODULE,
2779 .open = simple_open,
2780 .read = qm_status_read,
2781};
2782
2783static int qm_debugfs_atomic64_set(void *data, u64 val)
2784{
2785 if (val)
2786 return -EINVAL;
2787
2788 atomic64_set((atomic64_t *)data, 0);
2789
2790 return 0;
2791}
2792
2793static int qm_debugfs_atomic64_get(void *data, u64 *val)
2794{
2795 *val = atomic64_read((atomic64_t *)data);
2796
2797 return 0;
2798}
2799
2800DEFINE_DEBUGFS_ATTRIBUTE(qm_atomic64_ops, qm_debugfs_atomic64_get,
2801 qm_debugfs_atomic64_set, "%llu\n");
2802
2803
2804
2805
2806
2807
2808
2809int hisi_qm_debug_init(struct hisi_qm *qm)
2810{
2811 struct qm_dfx *dfx = &qm->debug.dfx;
2812 struct dentry *qm_d;
2813 void *data;
2814 int i, ret;
2815
2816 qm_d = debugfs_create_dir("qm", qm->debug.debug_root);
2817 qm->debug.qm_d = qm_d;
2818
2819
2820 if (qm->fun_type == QM_HW_PF)
2821 for (i = CURRENT_Q; i < DEBUG_FILE_NUM; i++)
2822 if (qm_create_debugfs_file(qm, i)) {
2823 ret = -ENOENT;
2824 goto failed_to_create;
2825 }
2826
2827 debugfs_create_file("regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops);
2828
2829 debugfs_create_file("cmd", 0444, qm->debug.qm_d, qm, &qm_cmd_fops);
2830
2831 debugfs_create_file("status", 0444, qm->debug.qm_d, qm,
2832 &qm_status_fops);
2833 for (i = 0; i < ARRAY_SIZE(qm_dfx_files); i++) {
2834 data = (atomic64_t *)((uintptr_t)dfx + qm_dfx_files[i].offset);
2835 debugfs_create_file(qm_dfx_files[i].name,
2836 0644,
2837 qm_d,
2838 data,
2839 &qm_atomic64_ops);
2840 }
2841
2842 return 0;
2843
2844failed_to_create:
2845 debugfs_remove_recursive(qm_d);
2846 return ret;
2847}
2848EXPORT_SYMBOL_GPL(hisi_qm_debug_init);
2849
2850
2851
2852
2853
2854void hisi_qm_debug_regs_clear(struct hisi_qm *qm)
2855{
2856 struct qm_dfx_registers *regs;
2857 int i;
2858
2859
2860 writel(0x0, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
2861 writel(0x0, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
2862
2863
2864
2865
2866
2867 writel(0x1, qm->io_base + QM_DFX_CNT_CLR_CE);
2868
2869 regs = qm_dfx_regs;
2870 for (i = 0; i < CNT_CYC_REGS_NUM; i++) {
2871 readl(qm->io_base + regs->reg_offset);
2872 regs++;
2873 }
2874
2875 writel(0x0, qm->io_base + QM_DFX_CNT_CLR_CE);
2876}
2877EXPORT_SYMBOL_GPL(hisi_qm_debug_regs_clear);
2878
2879static void qm_hw_error_init(struct hisi_qm *qm)
2880{
2881 const struct hisi_qm_err_info *err_info = &qm->err_ini->err_info;
2882
2883 if (!qm->ops->hw_error_init) {
2884 dev_err(&qm->pdev->dev, "QM doesn't support hw error handling!\n");
2885 return;
2886 }
2887
2888 qm->ops->hw_error_init(qm, err_info->ce, err_info->nfe, err_info->fe);
2889}
2890
2891static void qm_hw_error_uninit(struct hisi_qm *qm)
2892{
2893 if (!qm->ops->hw_error_uninit) {
2894 dev_err(&qm->pdev->dev, "Unexpected QM hw error uninit!\n");
2895 return;
2896 }
2897
2898 qm->ops->hw_error_uninit(qm);
2899}
2900
2901static enum acc_err_result qm_hw_error_handle(struct hisi_qm *qm)
2902{
2903 if (!qm->ops->hw_error_handle) {
2904 dev_err(&qm->pdev->dev, "QM doesn't support hw error report!\n");
2905 return ACC_ERR_NONE;
2906 }
2907
2908 return qm->ops->hw_error_handle(qm);
2909}
2910
2911
2912
2913
2914
2915
2916
2917void hisi_qm_dev_err_init(struct hisi_qm *qm)
2918{
2919 if (qm->fun_type == QM_HW_VF)
2920 return;
2921
2922 qm_hw_error_init(qm);
2923
2924 if (!qm->err_ini->hw_err_enable) {
2925 dev_err(&qm->pdev->dev, "Device doesn't support hw error init!\n");
2926 return;
2927 }
2928 qm->err_ini->hw_err_enable(qm);
2929}
2930EXPORT_SYMBOL_GPL(hisi_qm_dev_err_init);
2931
2932
2933
2934
2935
2936
2937
2938void hisi_qm_dev_err_uninit(struct hisi_qm *qm)
2939{
2940 if (qm->fun_type == QM_HW_VF)
2941 return;
2942
2943 qm_hw_error_uninit(qm);
2944
2945 if (!qm->err_ini->hw_err_disable) {
2946 dev_err(&qm->pdev->dev, "Unexpected device hw error uninit!\n");
2947 return;
2948 }
2949 qm->err_ini->hw_err_disable(qm);
2950}
2951EXPORT_SYMBOL_GPL(hisi_qm_dev_err_uninit);
2952
2953
2954
2955
2956
2957
2958void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num)
2959{
2960 int i;
2961
2962 if (!qps || qp_num <= 0)
2963 return;
2964
2965 for (i = qp_num - 1; i >= 0; i--)
2966 hisi_qm_release_qp(qps[i]);
2967}
2968EXPORT_SYMBOL_GPL(hisi_qm_free_qps);
2969
2970static void free_list(struct list_head *head)
2971{
2972 struct hisi_qm_resource *res, *tmp;
2973
2974 list_for_each_entry_safe(res, tmp, head, list) {
2975 list_del(&res->list);
2976 kfree(res);
2977 }
2978}
2979
2980static int hisi_qm_sort_devices(int node, struct list_head *head,
2981 struct hisi_qm_list *qm_list)
2982{
2983 struct hisi_qm_resource *res, *tmp;
2984 struct hisi_qm *qm;
2985 struct list_head *n;
2986 struct device *dev;
2987 int dev_node = 0;
2988
2989 list_for_each_entry(qm, &qm_list->list, list) {
2990 dev = &qm->pdev->dev;
2991
2992 if (IS_ENABLED(CONFIG_NUMA)) {
2993 dev_node = dev_to_node(dev);
2994 if (dev_node < 0)
2995 dev_node = 0;
2996 }
2997
2998 res = kzalloc(sizeof(*res), GFP_KERNEL);
2999 if (!res)
3000 return -ENOMEM;
3001
3002 res->qm = qm;
3003 res->distance = node_distance(dev_node, node);
3004 n = head;
3005 list_for_each_entry(tmp, head, list) {
3006 if (res->distance < tmp->distance) {
3007 n = &tmp->list;
3008 break;
3009 }
3010 }
3011 list_add_tail(&res->list, n);
3012 }
3013
3014 return 0;
3015}
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num,
3030 u8 alg_type, int node, struct hisi_qp **qps)
3031{
3032 struct hisi_qm_resource *tmp;
3033 int ret = -ENODEV;
3034 LIST_HEAD(head);
3035 int i;
3036
3037 if (!qps || !qm_list || qp_num <= 0)
3038 return -EINVAL;
3039
3040 mutex_lock(&qm_list->lock);
3041 if (hisi_qm_sort_devices(node, &head, qm_list)) {
3042 mutex_unlock(&qm_list->lock);
3043 goto err;
3044 }
3045
3046 list_for_each_entry(tmp, &head, list) {
3047 for (i = 0; i < qp_num; i++) {
3048 qps[i] = hisi_qm_create_qp(tmp->qm, alg_type);
3049 if (IS_ERR(qps[i])) {
3050 hisi_qm_free_qps(qps, i);
3051 break;
3052 }
3053 }
3054
3055 if (i == qp_num) {
3056 ret = 0;
3057 break;
3058 }
3059 }
3060
3061 mutex_unlock(&qm_list->lock);
3062 if (ret)
3063 pr_info("Failed to create qps, node[%d], alg[%d], qp[%d]!\n",
3064 node, alg_type, qp_num);
3065
3066err:
3067 free_list(&head);
3068 return ret;
3069}
3070EXPORT_SYMBOL_GPL(hisi_qm_alloc_qps_node);
3071
3072static int qm_vf_q_assign(struct hisi_qm *qm, u32 num_vfs)
3073{
3074 u32 remain_q_num, q_num, i, j;
3075 u32 q_base = qm->qp_num;
3076 int ret;
3077
3078 if (!num_vfs)
3079 return -EINVAL;
3080
3081 remain_q_num = qm->ctrl_qp_num - qm->qp_num;
3082
3083
3084 if (qm->ctrl_qp_num < qm->qp_num || remain_q_num < num_vfs)
3085 return -EINVAL;
3086
3087 q_num = remain_q_num / num_vfs;
3088 for (i = 1; i <= num_vfs; i++) {
3089 if (i == num_vfs)
3090 q_num += remain_q_num % num_vfs;
3091 ret = hisi_qm_set_vft(qm, i, q_base, q_num);
3092 if (ret) {
3093 for (j = i; j > 0; j--)
3094 hisi_qm_set_vft(qm, j, 0, 0);
3095 return ret;
3096 }
3097 q_base += q_num;
3098 }
3099
3100 return 0;
3101}
3102
3103static int qm_clear_vft_config(struct hisi_qm *qm)
3104{
3105 int ret;
3106 u32 i;
3107
3108 for (i = 1; i <= qm->vfs_num; i++) {
3109 ret = hisi_qm_set_vft(qm, i, 0, 0);
3110 if (ret)
3111 return ret;
3112 }
3113 qm->vfs_num = 0;
3114
3115 return 0;
3116}
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs)
3128{
3129 struct hisi_qm *qm = pci_get_drvdata(pdev);
3130 int pre_existing_vfs, num_vfs, total_vfs, ret;
3131
3132 total_vfs = pci_sriov_get_totalvfs(pdev);
3133 pre_existing_vfs = pci_num_vf(pdev);
3134 if (pre_existing_vfs) {
3135 pci_err(pdev, "%d VFs already enabled. Please disable pre-enabled VFs!\n",
3136 pre_existing_vfs);
3137 return 0;
3138 }
3139
3140 num_vfs = min_t(int, max_vfs, total_vfs);
3141 ret = qm_vf_q_assign(qm, num_vfs);
3142 if (ret) {
3143 pci_err(pdev, "Can't assign queues for VF!\n");
3144 return ret;
3145 }
3146
3147 qm->vfs_num = num_vfs;
3148
3149 ret = pci_enable_sriov(pdev, num_vfs);
3150 if (ret) {
3151 pci_err(pdev, "Can't enable VF!\n");
3152 qm_clear_vft_config(qm);
3153 return ret;
3154 }
3155
3156 pci_info(pdev, "VF enabled, vfs_num(=%d)!\n", num_vfs);
3157
3158 return num_vfs;
3159}
3160EXPORT_SYMBOL_GPL(hisi_qm_sriov_enable);
3161
3162
3163
3164
3165
3166
3167
3168
3169int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen)
3170{
3171 struct hisi_qm *qm = pci_get_drvdata(pdev);
3172
3173 if (pci_vfs_assigned(pdev)) {
3174 pci_err(pdev, "Failed to disable VFs as VFs are assigned!\n");
3175 return -EPERM;
3176 }
3177
3178
3179 if (!is_frozen && qm_try_frozen_vfs(pdev, qm->qm_list)) {
3180 pci_err(pdev, "Task is using its VF!\n");
3181 return -EBUSY;
3182 }
3183
3184 pci_disable_sriov(pdev);
3185 return qm_clear_vft_config(qm);
3186}
3187EXPORT_SYMBOL_GPL(hisi_qm_sriov_disable);
3188
3189
3190
3191
3192
3193
3194
3195
3196int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs)
3197{
3198 if (num_vfs == 0)
3199 return hisi_qm_sriov_disable(pdev, 0);
3200 else
3201 return hisi_qm_sriov_enable(pdev, num_vfs);
3202}
3203EXPORT_SYMBOL_GPL(hisi_qm_sriov_configure);
3204
3205static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm)
3206{
3207 u32 err_sts;
3208
3209 if (!qm->err_ini->get_dev_hw_err_status) {
3210 dev_err(&qm->pdev->dev, "Device doesn't support get hw error status!\n");
3211 return ACC_ERR_NONE;
3212 }
3213
3214
3215 err_sts = qm->err_ini->get_dev_hw_err_status(qm);
3216 if (err_sts) {
3217 if (err_sts & qm->err_ini->err_info.ecc_2bits_mask)
3218 qm->err_status.is_dev_ecc_mbit = true;
3219
3220 if (!qm->err_ini->log_dev_hw_err) {
3221 dev_err(&qm->pdev->dev, "Device doesn't support log hw error!\n");
3222 return ACC_ERR_NEED_RESET;
3223 }
3224
3225 qm->err_ini->log_dev_hw_err(qm, err_sts);
3226 return ACC_ERR_NEED_RESET;
3227 }
3228
3229 return ACC_ERR_RECOVERED;
3230}
3231
3232static enum acc_err_result qm_process_dev_error(struct hisi_qm *qm)
3233{
3234 enum acc_err_result qm_ret, dev_ret;
3235
3236
3237 qm_ret = qm_hw_error_handle(qm);
3238
3239
3240 dev_ret = qm_dev_err_handle(qm);
3241
3242 return (qm_ret == ACC_ERR_NEED_RESET ||
3243 dev_ret == ACC_ERR_NEED_RESET) ?
3244 ACC_ERR_NEED_RESET : ACC_ERR_RECOVERED;
3245}
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev,
3256 pci_channel_state_t state)
3257{
3258 struct hisi_qm *qm = pci_get_drvdata(pdev);
3259 enum acc_err_result ret;
3260
3261 if (pdev->is_virtfn)
3262 return PCI_ERS_RESULT_NONE;
3263
3264 pci_info(pdev, "PCI error detected, state(=%d)!!\n", state);
3265 if (state == pci_channel_io_perm_failure)
3266 return PCI_ERS_RESULT_DISCONNECT;
3267
3268 ret = qm_process_dev_error(qm);
3269 if (ret == ACC_ERR_NEED_RESET)
3270 return PCI_ERS_RESULT_NEED_RESET;
3271
3272 return PCI_ERS_RESULT_RECOVERED;
3273}
3274EXPORT_SYMBOL_GPL(hisi_qm_dev_err_detected);
3275
3276static int qm_get_hw_error_status(struct hisi_qm *qm)
3277{
3278 return readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
3279}
3280
3281static int qm_check_req_recv(struct hisi_qm *qm)
3282{
3283 struct pci_dev *pdev = qm->pdev;
3284 int ret;
3285 u32 val;
3286
3287 writel(ACC_VENDOR_ID_VALUE, qm->io_base + QM_PEH_VENDOR_ID);
3288 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val,
3289 (val == ACC_VENDOR_ID_VALUE),
3290 POLL_PERIOD, POLL_TIMEOUT);
3291 if (ret) {
3292 dev_err(&pdev->dev, "Fails to read QM reg!\n");
3293 return ret;
3294 }
3295
3296 writel(PCI_VENDOR_ID_HUAWEI, qm->io_base + QM_PEH_VENDOR_ID);
3297 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val,
3298 (val == PCI_VENDOR_ID_HUAWEI),
3299 POLL_PERIOD, POLL_TIMEOUT);
3300 if (ret)
3301 dev_err(&pdev->dev, "Fails to read QM reg in the second time!\n");
3302
3303 return ret;
3304}
3305
3306static int qm_set_pf_mse(struct hisi_qm *qm, bool set)
3307{
3308 struct pci_dev *pdev = qm->pdev;
3309 u16 cmd;
3310 int i;
3311
3312 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
3313 if (set)
3314 cmd |= PCI_COMMAND_MEMORY;
3315 else
3316 cmd &= ~PCI_COMMAND_MEMORY;
3317
3318 pci_write_config_word(pdev, PCI_COMMAND, cmd);
3319 for (i = 0; i < MAX_WAIT_COUNTS; i++) {
3320 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
3321 if (set == ((cmd & PCI_COMMAND_MEMORY) >> 1))
3322 return 0;
3323
3324 udelay(1);
3325 }
3326
3327 return -ETIMEDOUT;
3328}
3329
3330static int qm_set_vf_mse(struct hisi_qm *qm, bool set)
3331{
3332 struct pci_dev *pdev = qm->pdev;
3333 u16 sriov_ctrl;
3334 int pos;
3335 int i;
3336
3337 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
3338 pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl);
3339 if (set)
3340 sriov_ctrl |= PCI_SRIOV_CTRL_MSE;
3341 else
3342 sriov_ctrl &= ~PCI_SRIOV_CTRL_MSE;
3343 pci_write_config_word(pdev, pos + PCI_SRIOV_CTRL, sriov_ctrl);
3344
3345 for (i = 0; i < MAX_WAIT_COUNTS; i++) {
3346 pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl);
3347 if (set == (sriov_ctrl & PCI_SRIOV_CTRL_MSE) >>
3348 ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT)
3349 return 0;
3350
3351 udelay(1);
3352 }
3353
3354 return -ETIMEDOUT;
3355}
3356
3357static int qm_set_msi(struct hisi_qm *qm, bool set)
3358{
3359 struct pci_dev *pdev = qm->pdev;
3360
3361 if (set) {
3362 pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64,
3363 0);
3364 } else {
3365 pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64,
3366 ACC_PEH_MSI_DISABLE);
3367 if (qm->err_status.is_qm_ecc_mbit ||
3368 qm->err_status.is_dev_ecc_mbit)
3369 return 0;
3370
3371 mdelay(1);
3372 if (readl(qm->io_base + QM_PEH_DFX_INFO0))
3373 return -EFAULT;
3374 }
3375
3376 return 0;
3377}
3378
3379static int qm_vf_reset_prepare(struct hisi_qm *qm,
3380 enum qm_stop_reason stop_reason)
3381{
3382 struct hisi_qm_list *qm_list = qm->qm_list;
3383 struct pci_dev *pdev = qm->pdev;
3384 struct pci_dev *virtfn;
3385 struct hisi_qm *vf_qm;
3386 int ret = 0;
3387
3388 mutex_lock(&qm_list->lock);
3389 list_for_each_entry(vf_qm, &qm_list->list, list) {
3390 virtfn = vf_qm->pdev;
3391 if (virtfn == pdev)
3392 continue;
3393
3394 if (pci_physfn(virtfn) == pdev) {
3395
3396 pci_save_state(virtfn);
3397
3398 ret = hisi_qm_stop(vf_qm, stop_reason);
3399 if (ret)
3400 goto stop_fail;
3401 }
3402 }
3403
3404stop_fail:
3405 mutex_unlock(&qm_list->lock);
3406 return ret;
3407}
3408
3409static int qm_reset_prepare_ready(struct hisi_qm *qm)
3410{
3411 struct pci_dev *pdev = qm->pdev;
3412 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
3413 int delay = 0;
3414
3415
3416 while (test_and_set_bit(QM_DEV_RESET_FLAG, &pf_qm->reset_flag)) {
3417 msleep(++delay);
3418 if (delay > QM_RESET_WAIT_TIMEOUT)
3419 return -EBUSY;
3420 }
3421
3422 return 0;
3423}
3424
3425static int qm_controller_reset_prepare(struct hisi_qm *qm)
3426{
3427 struct pci_dev *pdev = qm->pdev;
3428 int ret;
3429
3430 ret = qm_reset_prepare_ready(qm);
3431 if (ret) {
3432 pci_err(pdev, "Controller reset not ready!\n");
3433 return ret;
3434 }
3435
3436 if (qm->vfs_num) {
3437 ret = qm_vf_reset_prepare(qm, QM_SOFT_RESET);
3438 if (ret) {
3439 pci_err(pdev, "Fails to stop VFs!\n");
3440 return ret;
3441 }
3442 }
3443
3444 ret = hisi_qm_stop(qm, QM_SOFT_RESET);
3445 if (ret) {
3446 pci_err(pdev, "Fails to stop QM!\n");
3447 return ret;
3448 }
3449
3450 return 0;
3451}
3452
3453static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm)
3454{
3455 u32 nfe_enb = 0;
3456
3457 if (!qm->err_status.is_dev_ecc_mbit &&
3458 qm->err_status.is_qm_ecc_mbit &&
3459 qm->err_ini->close_axi_master_ooo) {
3460
3461 qm->err_ini->close_axi_master_ooo(qm);
3462
3463 } else if (qm->err_status.is_dev_ecc_mbit &&
3464 !qm->err_status.is_qm_ecc_mbit &&
3465 !qm->err_ini->close_axi_master_ooo) {
3466
3467 nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE);
3468 writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE,
3469 qm->io_base + QM_RAS_NFE_ENABLE);
3470 writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET);
3471 }
3472}
3473
3474static int qm_soft_reset(struct hisi_qm *qm)
3475{
3476 struct pci_dev *pdev = qm->pdev;
3477 int ret;
3478 u32 val;
3479
3480
3481 ret = qm_check_req_recv(qm);
3482 if (ret)
3483 return ret;
3484
3485 if (qm->vfs_num) {
3486 ret = qm_set_vf_mse(qm, false);
3487 if (ret) {
3488 pci_err(pdev, "Fails to disable vf MSE bit.\n");
3489 return ret;
3490 }
3491 }
3492
3493 ret = qm_set_msi(qm, false);
3494 if (ret) {
3495 pci_err(pdev, "Fails to disable PEH MSI bit.\n");
3496 return ret;
3497 }
3498
3499 qm_dev_ecc_mbit_handle(qm);
3500
3501
3502 writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN,
3503 qm->io_base + ACC_MASTER_GLOBAL_CTRL);
3504
3505
3506 ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN,
3507 val,
3508 (val == ACC_MASTER_TRANS_RETURN_RW),
3509 POLL_PERIOD, POLL_TIMEOUT);
3510 if (ret) {
3511 pci_emerg(pdev, "Bus lock! Please reset system.\n");
3512 return ret;
3513 }
3514
3515 ret = qm_set_pf_mse(qm, false);
3516 if (ret) {
3517 pci_err(pdev, "Fails to disable pf MSE bit.\n");
3518 return ret;
3519 }
3520
3521
3522 if (ACPI_HANDLE(&pdev->dev)) {
3523 unsigned long long value = 0;
3524 acpi_status s;
3525
3526 s = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev),
3527 qm->err_ini->err_info.acpi_rst,
3528 NULL, &value);
3529 if (ACPI_FAILURE(s)) {
3530 pci_err(pdev, "NO controller reset method!\n");
3531 return -EIO;
3532 }
3533
3534 if (value) {
3535 pci_err(pdev, "Reset step %llu failed!\n", value);
3536 return -EIO;
3537 }
3538 } else {
3539 pci_err(pdev, "No reset method!\n");
3540 return -EINVAL;
3541 }
3542
3543 return 0;
3544}
3545
3546static int qm_vf_reset_done(struct hisi_qm *qm)
3547{
3548 struct hisi_qm_list *qm_list = qm->qm_list;
3549 struct pci_dev *pdev = qm->pdev;
3550 struct pci_dev *virtfn;
3551 struct hisi_qm *vf_qm;
3552 int ret = 0;
3553
3554 mutex_lock(&qm_list->lock);
3555 list_for_each_entry(vf_qm, &qm_list->list, list) {
3556 virtfn = vf_qm->pdev;
3557 if (virtfn == pdev)
3558 continue;
3559
3560 if (pci_physfn(virtfn) == pdev) {
3561
3562 pci_restore_state(virtfn);
3563
3564 ret = qm_restart(vf_qm);
3565 if (ret)
3566 goto restart_fail;
3567 }
3568 }
3569
3570restart_fail:
3571 mutex_unlock(&qm_list->lock);
3572 return ret;
3573}
3574
3575static int qm_get_dev_err_status(struct hisi_qm *qm)
3576{
3577 return qm->err_ini->get_dev_hw_err_status(qm);
3578}
3579
3580static int qm_dev_hw_init(struct hisi_qm *qm)
3581{
3582 return qm->err_ini->hw_init(qm);
3583}
3584
3585static void qm_restart_prepare(struct hisi_qm *qm)
3586{
3587 u32 value;
3588
3589 if (!qm->err_status.is_qm_ecc_mbit &&
3590 !qm->err_status.is_dev_ecc_mbit)
3591 return;
3592
3593
3594 value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN);
3595 writel(value & ~qm->err_ini->err_info.msi_wr_port,
3596 qm->io_base + ACC_AM_CFG_PORT_WR_EN);
3597
3598
3599 value = qm_get_dev_err_status(qm) &
3600 qm->err_ini->err_info.ecc_2bits_mask;
3601 if (value && qm->err_ini->clear_dev_hw_err_status)
3602 qm->err_ini->clear_dev_hw_err_status(qm, value);
3603
3604
3605 writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SOURCE);
3606
3607
3608 writel(ACC_ROB_ECC_ERR_MULTPL, qm->io_base + ACC_AM_ROB_ECC_INT_STS);
3609
3610 if (qm->err_ini->open_axi_master_ooo)
3611 qm->err_ini->open_axi_master_ooo(qm);
3612}
3613
3614static void qm_restart_done(struct hisi_qm *qm)
3615{
3616 u32 value;
3617
3618 if (!qm->err_status.is_qm_ecc_mbit &&
3619 !qm->err_status.is_dev_ecc_mbit)
3620 return;
3621
3622
3623 value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN);
3624 value |= qm->err_ini->err_info.msi_wr_port;
3625 writel(value, qm->io_base + ACC_AM_CFG_PORT_WR_EN);
3626
3627 qm->err_status.is_qm_ecc_mbit = false;
3628 qm->err_status.is_dev_ecc_mbit = false;
3629}
3630
3631static int qm_controller_reset_done(struct hisi_qm *qm)
3632{
3633 struct pci_dev *pdev = qm->pdev;
3634 int ret;
3635
3636 ret = qm_set_msi(qm, true);
3637 if (ret) {
3638 pci_err(pdev, "Fails to enable PEH MSI bit!\n");
3639 return ret;
3640 }
3641
3642 ret = qm_set_pf_mse(qm, true);
3643 if (ret) {
3644 pci_err(pdev, "Fails to enable pf MSE bit!\n");
3645 return ret;
3646 }
3647
3648 if (qm->vfs_num) {
3649 ret = qm_set_vf_mse(qm, true);
3650 if (ret) {
3651 pci_err(pdev, "Fails to enable vf MSE bit!\n");
3652 return ret;
3653 }
3654 }
3655
3656 ret = qm_dev_hw_init(qm);
3657 if (ret) {
3658 pci_err(pdev, "Failed to init device\n");
3659 return ret;
3660 }
3661
3662 qm_restart_prepare(qm);
3663
3664 ret = qm_restart(qm);
3665 if (ret) {
3666 pci_err(pdev, "Failed to start QM!\n");
3667 return ret;
3668 }
3669
3670 if (qm->vfs_num) {
3671 ret = qm_vf_q_assign(qm, qm->vfs_num);
3672 if (ret) {
3673 pci_err(pdev, "Failed to assign queue!\n");
3674 return ret;
3675 }
3676 }
3677
3678 ret = qm_vf_reset_done(qm);
3679 if (ret) {
3680 pci_err(pdev, "Failed to start VFs!\n");
3681 return -EPERM;
3682 }
3683
3684 hisi_qm_dev_err_init(qm);
3685 qm_restart_done(qm);
3686
3687 clear_bit(QM_DEV_RESET_FLAG, &qm->reset_flag);
3688
3689 return 0;
3690}
3691
3692static int qm_controller_reset(struct hisi_qm *qm)
3693{
3694 struct pci_dev *pdev = qm->pdev;
3695 int ret;
3696
3697 pci_info(pdev, "Controller resetting...\n");
3698
3699 ret = qm_controller_reset_prepare(qm);
3700 if (ret)
3701 return ret;
3702
3703 ret = qm_soft_reset(qm);
3704 if (ret) {
3705 pci_err(pdev, "Controller reset failed (%d)\n", ret);
3706 return ret;
3707 }
3708
3709 ret = qm_controller_reset_done(qm);
3710 if (ret)
3711 return ret;
3712
3713 pci_info(pdev, "Controller reset complete\n");
3714
3715 return 0;
3716}
3717
3718
3719
3720
3721
3722
3723
3724
3725pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev)
3726{
3727 struct hisi_qm *qm = pci_get_drvdata(pdev);
3728 int ret;
3729
3730 if (pdev->is_virtfn)
3731 return PCI_ERS_RESULT_RECOVERED;
3732
3733 pci_aer_clear_nonfatal_status(pdev);
3734
3735
3736 ret = qm_controller_reset(qm);
3737 if (ret) {
3738 pci_err(pdev, "Controller reset failed (%d)\n", ret);
3739 return PCI_ERS_RESULT_DISCONNECT;
3740 }
3741
3742 return PCI_ERS_RESULT_RECOVERED;
3743}
3744EXPORT_SYMBOL_GPL(hisi_qm_dev_slot_reset);
3745
3746
3747static int qm_check_dev_error(struct hisi_qm *qm)
3748{
3749 int ret;
3750
3751 if (qm->fun_type == QM_HW_VF)
3752 return 0;
3753
3754 ret = qm_get_hw_error_status(qm) & QM_ECC_MBIT;
3755 if (ret)
3756 return ret;
3757
3758 return (qm_get_dev_err_status(qm) &
3759 qm->err_ini->err_info.ecc_2bits_mask);
3760}
3761
3762void hisi_qm_reset_prepare(struct pci_dev *pdev)
3763{
3764 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
3765 struct hisi_qm *qm = pci_get_drvdata(pdev);
3766 u32 delay = 0;
3767 int ret;
3768
3769 hisi_qm_dev_err_uninit(pf_qm);
3770
3771
3772
3773
3774
3775 while (qm_check_dev_error(pf_qm)) {
3776 msleep(++delay);
3777 if (delay > QM_RESET_WAIT_TIMEOUT)
3778 return;
3779 }
3780
3781 ret = qm_reset_prepare_ready(qm);
3782 if (ret) {
3783 pci_err(pdev, "FLR not ready!\n");
3784 return;
3785 }
3786
3787 if (qm->vfs_num) {
3788 ret = qm_vf_reset_prepare(qm, QM_FLR);
3789 if (ret) {
3790 pci_err(pdev, "Failed to prepare reset, ret = %d.\n",
3791 ret);
3792 return;
3793 }
3794 }
3795
3796 ret = hisi_qm_stop(qm, QM_FLR);
3797 if (ret) {
3798 pci_err(pdev, "Failed to stop QM, ret = %d.\n", ret);
3799 return;
3800 }
3801
3802 pci_info(pdev, "FLR resetting...\n");
3803}
3804EXPORT_SYMBOL_GPL(hisi_qm_reset_prepare);
3805
3806static bool qm_flr_reset_complete(struct pci_dev *pdev)
3807{
3808 struct pci_dev *pf_pdev = pci_physfn(pdev);
3809 struct hisi_qm *qm = pci_get_drvdata(pf_pdev);
3810 u32 id;
3811
3812 pci_read_config_dword(qm->pdev, PCI_COMMAND, &id);
3813 if (id == QM_PCI_COMMAND_INVALID) {
3814 pci_err(pdev, "Device can not be used!\n");
3815 return false;
3816 }
3817
3818 clear_bit(QM_DEV_RESET_FLAG, &qm->reset_flag);
3819
3820 return true;
3821}
3822
3823void hisi_qm_reset_done(struct pci_dev *pdev)
3824{
3825 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
3826 struct hisi_qm *qm = pci_get_drvdata(pdev);
3827 int ret;
3828
3829 hisi_qm_dev_err_init(pf_qm);
3830
3831 ret = qm_restart(qm);
3832 if (ret) {
3833 pci_err(pdev, "Failed to start QM, ret = %d.\n", ret);
3834 goto flr_done;
3835 }
3836
3837 if (qm->fun_type == QM_HW_PF) {
3838 ret = qm_dev_hw_init(qm);
3839 if (ret) {
3840 pci_err(pdev, "Failed to init PF, ret = %d.\n", ret);
3841 goto flr_done;
3842 }
3843
3844 if (!qm->vfs_num)
3845 goto flr_done;
3846
3847 ret = qm_vf_q_assign(qm, qm->vfs_num);
3848 if (ret) {
3849 pci_err(pdev, "Failed to assign VFs, ret = %d.\n", ret);
3850 goto flr_done;
3851 }
3852
3853 ret = qm_vf_reset_done(qm);
3854 if (ret) {
3855 pci_err(pdev, "Failed to start VFs, ret = %d.\n", ret);
3856 goto flr_done;
3857 }
3858 }
3859
3860flr_done:
3861 if (qm_flr_reset_complete(pdev))
3862 pci_info(pdev, "FLR reset complete\n");
3863}
3864EXPORT_SYMBOL_GPL(hisi_qm_reset_done);
3865
3866static irqreturn_t qm_abnormal_irq(int irq, void *data)
3867{
3868 struct hisi_qm *qm = data;
3869 enum acc_err_result ret;
3870
3871 atomic64_inc(&qm->debug.dfx.abnormal_irq_cnt);
3872 ret = qm_process_dev_error(qm);
3873 if (ret == ACC_ERR_NEED_RESET)
3874 schedule_work(&qm->rst_work);
3875
3876 return IRQ_HANDLED;
3877}
3878
3879static int qm_irq_register(struct hisi_qm *qm)
3880{
3881 struct pci_dev *pdev = qm->pdev;
3882 int ret;
3883
3884 ret = request_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR),
3885 qm_irq, IRQF_SHARED, qm->dev_name, qm);
3886 if (ret)
3887 return ret;
3888
3889 if (qm->ver != QM_HW_V1) {
3890 ret = request_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR),
3891 qm_aeq_irq, IRQF_SHARED, qm->dev_name, qm);
3892 if (ret)
3893 goto err_aeq_irq;
3894
3895 if (qm->fun_type == QM_HW_PF) {
3896 ret = request_irq(pci_irq_vector(pdev,
3897 QM_ABNORMAL_EVENT_IRQ_VECTOR),
3898 qm_abnormal_irq, IRQF_SHARED,
3899 qm->dev_name, qm);
3900 if (ret)
3901 goto err_abonormal_irq;
3902 }
3903 }
3904
3905 return 0;
3906
3907err_abonormal_irq:
3908 free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm);
3909err_aeq_irq:
3910 free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm);
3911 return ret;
3912}
3913
3914
3915
3916
3917
3918
3919
3920void hisi_qm_dev_shutdown(struct pci_dev *pdev)
3921{
3922 struct hisi_qm *qm = pci_get_drvdata(pdev);
3923 int ret;
3924
3925 ret = hisi_qm_stop(qm, QM_NORMAL);
3926 if (ret)
3927 dev_err(&pdev->dev, "Fail to stop qm in shutdown!\n");
3928}
3929EXPORT_SYMBOL_GPL(hisi_qm_dev_shutdown);
3930
3931static void hisi_qm_controller_reset(struct work_struct *rst_work)
3932{
3933 struct hisi_qm *qm = container_of(rst_work, struct hisi_qm, rst_work);
3934 int ret;
3935
3936
3937 ret = qm_controller_reset(qm);
3938 if (ret)
3939 dev_err(&qm->pdev->dev, "controller reset failed (%d)\n", ret);
3940
3941}
3942
3943
3944
3945
3946
3947
3948
3949
3950
3951int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
3952{
3953 int flag = 0;
3954 int ret = 0;
3955
3956 mutex_lock(&qm_list->lock);
3957 if (list_empty(&qm_list->list))
3958 flag = 1;
3959 list_add_tail(&qm->list, &qm_list->list);
3960 mutex_unlock(&qm_list->lock);
3961
3962 if (flag) {
3963 ret = qm_list->register_to_crypto();
3964 if (ret) {
3965 mutex_lock(&qm_list->lock);
3966 list_del(&qm->list);
3967 mutex_unlock(&qm_list->lock);
3968 }
3969 }
3970
3971 return ret;
3972}
3973EXPORT_SYMBOL_GPL(hisi_qm_alg_register);
3974
3975
3976
3977
3978
3979
3980
3981
3982
3983
3984void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
3985{
3986 mutex_lock(&qm_list->lock);
3987 list_del(&qm->list);
3988 mutex_unlock(&qm_list->lock);
3989
3990 if (list_empty(&qm_list->list))
3991 qm_list->unregister_from_crypto();
3992}
3993EXPORT_SYMBOL_GPL(hisi_qm_alg_unregister);
3994
3995
3996
3997
3998
3999
4000
4001int hisi_qm_init(struct hisi_qm *qm)
4002{
4003 struct pci_dev *pdev = qm->pdev;
4004 struct device *dev = &pdev->dev;
4005 unsigned int num_vec;
4006 int ret;
4007
4008 hisi_qm_pre_init(qm);
4009
4010 ret = qm_alloc_uacce(qm);
4011 if (ret < 0)
4012 dev_warn(&pdev->dev, "fail to alloc uacce (%d)\n", ret);
4013
4014 ret = pci_enable_device_mem(pdev);
4015 if (ret < 0) {
4016 dev_err(&pdev->dev, "Failed to enable device mem!\n");
4017 goto err_remove_uacce;
4018 }
4019
4020 ret = pci_request_mem_regions(pdev, qm->dev_name);
4021 if (ret < 0) {
4022 dev_err(&pdev->dev, "Failed to request mem regions!\n");
4023 goto err_disable_pcidev;
4024 }
4025
4026 qm->phys_base = pci_resource_start(pdev, PCI_BAR_2);
4027 qm->phys_size = pci_resource_len(qm->pdev, PCI_BAR_2);
4028 qm->io_base = ioremap(qm->phys_base, qm->phys_size);
4029 if (!qm->io_base) {
4030 ret = -EIO;
4031 goto err_release_mem_regions;
4032 }
4033
4034 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
4035 if (ret < 0)
4036 goto err_iounmap;
4037 pci_set_master(pdev);
4038
4039 if (!qm->ops->get_irq_num) {
4040 ret = -EOPNOTSUPP;
4041 goto err_iounmap;
4042 }
4043 num_vec = qm->ops->get_irq_num(qm);
4044 ret = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSI);
4045 if (ret < 0) {
4046 dev_err(dev, "Failed to enable MSI vectors!\n");
4047 goto err_iounmap;
4048 }
4049
4050 ret = qm_irq_register(qm);
4051 if (ret)
4052 goto err_free_irq_vectors;
4053
4054 if (qm->fun_type == QM_HW_VF && qm->ver != QM_HW_V1) {
4055
4056 ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
4057 if (ret)
4058 goto err_irq_unregister;
4059 }
4060
4061 ret = hisi_qm_memory_init(qm);
4062 if (ret)
4063 goto err_irq_unregister;
4064
4065 INIT_WORK(&qm->work, qm_work_process);
4066 if (qm->fun_type == QM_HW_PF)
4067 INIT_WORK(&qm->rst_work, hisi_qm_controller_reset);
4068
4069 atomic_set(&qm->status.flags, QM_INIT);
4070
4071 return 0;
4072
4073err_irq_unregister:
4074 qm_irq_unregister(qm);
4075err_free_irq_vectors:
4076 pci_free_irq_vectors(pdev);
4077err_iounmap:
4078 iounmap(qm->io_base);
4079err_release_mem_regions:
4080 pci_release_mem_regions(pdev);
4081err_disable_pcidev:
4082 pci_disable_device(pdev);
4083err_remove_uacce:
4084 uacce_remove(qm->uacce);
4085 qm->uacce = NULL;
4086 return ret;
4087}
4088EXPORT_SYMBOL_GPL(hisi_qm_init);
4089
4090
4091MODULE_LICENSE("GPL v2");
4092MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>");
4093MODULE_DESCRIPTION("HiSilicon Accelerator queue manager driver");
4094